summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/ac.c5
-rw-r--r--drivers/acpi/acpi_amba.c6
-rw-r--r--drivers/acpi/acpi_apd.c9
-rw-r--r--drivers/acpi/acpi_fpdt.c22
-rw-r--r--drivers/acpi/acpi_lpss.c69
-rw-r--r--drivers/acpi/acpi_pcc.c28
-rw-r--r--drivers/acpi/acpi_platform.c22
-rw-r--r--drivers/acpi/acpi_video.c114
-rw-r--r--drivers/acpi/apei/apei-base.c5
-rw-r--r--drivers/acpi/apei/bert.c3
-rw-r--r--drivers/acpi/apei/erst.c6
-rw-r--r--drivers/acpi/arm64/dma.c28
-rw-r--r--drivers/acpi/bus.c37
-rw-r--r--drivers/acpi/cppc_acpi.c45
-rw-r--r--drivers/acpi/device_pm.c38
-rw-r--r--drivers/acpi/dptf/Kconfig3
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/fan_core.c58
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/irq.c4
-rw-r--r--drivers/acpi/numa/hmat.c25
-rw-r--r--drivers/acpi/osi.c24
-rw-r--r--drivers/acpi/pci_root.c75
-rw-r--r--drivers/acpi/power.c11
-rw-r--r--drivers/acpi/processor_idle.c31
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/property.c24
-rw-r--r--drivers/acpi/resource.c46
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/scan.c179
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/utils.c30
-rw-r--r--drivers/acpi/video_detect.c449
-rw-r--r--drivers/acpi/x86/apple.c1
-rw-r--r--drivers/acpi/x86/s2idle.c161
-rw-r--r--drivers/acpi/x86/utils.c33
-rw-r--r--drivers/amba/bus.c8
-rw-r--r--drivers/android/binder.c12
-rw-r--r--drivers/android/binder_alloc.c35
-rw-r--r--drivers/ata/ahci_imx.c15
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-sata.c24
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/auxdisplay/lcd2s.c3
-rw-r--r--drivers/base/arch_topology.c6
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c40
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware_loader/sysfs.c7
-rw-r--r--drivers/base/firmware_loader/sysfs.h5
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c12
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/runtime.c7
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/regmap/regmap-mmio.c289
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c14
-rw-r--r--drivers/base/regmap/regmap-spi.c8
-rw-r--r--drivers/base/regmap/regmap.c167
-rw-r--r--drivers/base/regmap/trace.h61
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/aoe/aoeblk.c15
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c12
-rw-r--r--drivers/block/nbd.c7
-rw-r--r--drivers/block/null_blk/main.c8
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rnbd/Makefile6
-rw-r--r--drivers/block/rnbd/rnbd-clt.c8
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c43
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h64
-rw-r--r--drivers/block/rnbd/rnbd-srv-trace.c17
-rw-r--r--drivers/block/rnbd/rnbd-srv-trace.h207
-rw-r--r--drivers/block/rnbd/rnbd-srv.c123
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/ublk_drv.c333
-rw-r--r--drivers/block/virtio_blk.c15
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/block/zram/zram_drv.c48
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bluetooth/btintel.c20
-rw-r--r--drivers/bluetooth/btusb.c38
-rw-r--r--drivers/bluetooth/hci_ldisc.c7
-rw-r--r--drivers/bluetooth/hci_serdev.c10
-rw-r--r--drivers/bus/hisi_lpc.c96
-rw-r--r--drivers/bus/mhi/host/main.c19
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c4
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c6
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c12
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c16
-rw-r--r--drivers/clk/clk-cdce706.c3
-rw-r--r--drivers/clk/clk-cs2000-cp.c4
-rw-r--r--drivers/clk/clk-si514.c3
-rw-r--r--drivers/clk/clk-si5341.c4
-rw-r--r--drivers/clk/clk-si5351.c4
-rw-r--r--drivers/clk/clk-si570.c3
-rw-r--r--drivers/clk/clk-tps68470.c13
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/imx/clk-imx6sx.c4
-rw-r--r--drivers/clk/imx/clk-imx93.c2
-rw-r--r--drivers/clk/ingenic/tcu.c15
-rw-r--r--drivers/clk/microchip/clk-mpfs.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c8
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/counter/104-quad-8.c6
-rw-r--r--drivers/cpufreq/Kconfig.x8615
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c293
-rw-r--r--drivers/cpufreq/amd-pstate.c99
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c24
-rw-r--r--drivers/cpufreq/sti-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c35
-rw-r--r--drivers/cpufreq/ti-cpufreq.c2
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c4
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c4
-rw-r--r--drivers/cpuidle/governor.c11
-rw-r--r--drivers/crypto/atmel-ecc.c6
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c4
-rw-r--r--drivers/dax/hmem/device.c1
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c7
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c8
-rw-r--r--drivers/dma-buf/dma-buf.c4
-rw-r--r--drivers/dma-buf/dma-fence.c22
-rw-r--r--drivers/dma-buf/dma-resv.c3
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c4
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c22
-rw-r--r--drivers/dma-buf/st-dma-fence.c16
-rw-r--r--drivers/dma-buf/st-dma-resv.c10
-rw-r--r--drivers/dma-buf/sync_file.c8
-rw-r--r--drivers/dma-buf/udmabuf.c11
-rw-r--r--drivers/dma/ti/k3-udma-private.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c21
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_module.h4
-rw-r--r--drivers/edac/i10nm_base.c287
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c28
-rw-r--r--drivers/edac/ppc4xx_edac.c23
-rw-r--r--drivers/edac/sb_edac.c148
-rw-r--r--drivers/edac/skx_base.c9
-rw-r--r--drivers/edac/skx_common.c26
-rw-r--r--drivers/edac/skx_common.h16
-rw-r--r--drivers/edac/wq.c1
-rw-r--r--drivers/extcon/extcon-rt8973a.c4
-rw-r--r--drivers/firmware/arm_ffa/bus.c4
-rw-r--r--drivers/firmware/arm_ffa/driver.c132
-rw-r--r--drivers/firmware/arm_scmi/clock.c6
-rw-r--r--drivers/firmware/arm_scmi/optee.c1
-rw-r--r--drivers/firmware/arm_scmi/reset.c10
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c46
-rw-r--r--drivers/firmware/arm_scmi/sensors.c25
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/capsule-loader.c31
-rw-r--r--drivers/firmware/efi/dev-path-parser.c10
-rw-r--r--drivers/firmware/efi/efibc.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile9
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c8
-rw-r--r--drivers/firmware/psci/psci.c130
-rw-r--r--drivers/firmware/qcom_scm.h2
-rw-r--r--drivers/firmware/sysfb.c4
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c13
-rw-r--r--drivers/firmware/xilinx/zynqmp.c31
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c8
-rw-r--r--drivers/gpio/Kconfig3
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c10
-rw-r--r--drivers/gpio/gpio-104-idi-48.c10
-rw-r--r--drivers/gpio/gpio-104-idio-16.c18
-rw-r--r--drivers/gpio/gpio-adp5588.c4
-rw-r--r--drivers/gpio/gpio-f7188x.c275
-rw-r--r--drivers/gpio/gpio-ftgpio010.c22
-rw-r--r--drivers/gpio/gpio-ixp4xx.c17
-rw-r--r--drivers/gpio/gpio-max7300.c4
-rw-r--r--drivers/gpio/gpio-mockup.c15
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mt7621.c21
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpio-pca953x.c21
-rw-r--r--drivers/gpio/gpio-pcf857x.c4
-rw-r--r--drivers/gpio/gpio-pxa.c11
-rw-r--r--drivers/gpio/gpio-realtek-otto.c166
-rw-r--r--drivers/gpio/gpio-rockchip.c4
-rw-r--r--drivers/gpio/gpio-tpic2810.c4
-rw-r--r--drivers/gpio/gpio-tqmx86.c4
-rw-r--r--drivers/gpio/gpio-ws16c48.c10
-rw-r--r--drivers/gpio/gpiolib-cdev.c5
-rw-r--r--drivers/gpu/drm/Kconfig39
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c1224
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c283
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c431
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c511
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c303
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h (renamed from drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c348
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h771
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c89
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c99
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c186
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h47
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c109
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c181
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c273
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c439
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c154
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c359
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c415
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c779
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h1172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c217
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c687
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c305
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c443
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c337
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c278
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c1175
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c1884
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h69
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h108
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h1469
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h12086
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h44640
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h58
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c45
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c66
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c236
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c79
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c50
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
-rw-r--r--drivers/gpu/drm/arm/Kconfig4
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c12
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c77
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c8
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c35
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c6
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c8
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c2
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c10
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c28
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c9
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c42
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h6
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c48
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c4
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c31
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c42
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c7
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c4
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c2
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c12
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c74
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c4
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c9
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c13
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c94
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c119
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c43
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1149
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c13
-rw-r--r--drivers/gpu/drm/drm_aperture.c10
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c84
-rw-r--r--drivers/gpu/drm/drm_auth.c4
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_client.c4
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c135
-rw-r--r--drivers/gpu/drm/drm_crtc.c94
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c11
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c76
-rw-r--r--drivers/gpu/drm/drm_encoder.c75
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c (renamed from drivers/gpu/drm/drm_fb_cma_helper.c)67
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c101
-rw-r--r--drivers/gpu/drm/drm_file.c143
-rw-r--r--drivers/gpu/drm/drm_format_helper.c702
-rw-r--r--drivers/gpu/drm/drm_fourcc.c55
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c70
-rw-r--r--drivers/gpu/drm/drm_gem.c187
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c (renamed from drivers/gpu/drm/drm_gem_cma_helper.c)302
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c12
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c6
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c49
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c66
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c18
-rw-r--r--drivers/gpu/drm/drm_plane.c70
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c114
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c64
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c18
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c102
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c54
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c30
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c70
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c6
-rw-r--r--drivers/gpu/drm/gma500/power.c166
-rw-r--r--drivers/gpu/drm/gma500/power.h18
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c73
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c52
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h25
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c62
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c34
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c15
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c3
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c10
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c9
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c7
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight_regs.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c392
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c293
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c383
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h418
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c156
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c326
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h81
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h270
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c1116
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h346
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c11
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3562
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h80
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c30
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c141
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h188
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c39
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c62
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c86
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c16
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c158
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c191
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c174
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c104
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c88
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c85
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c94
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c113
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c229
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c98
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c476
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c40
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c159
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c63
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c147
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h533
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c59
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h43
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c84
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c100
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c308
-rw-r--r--drivers/gpu/drm/i915/i915_pci.h6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h557
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h6
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c73
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c76
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h19
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c9
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h42
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c97
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h97
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c41
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c4
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pci_config.h7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3708
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h65
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c112
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h40
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c19
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c14
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c9
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c8
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c18
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c33
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c58
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c17
-rw-r--r--drivers/gpu/drm/kmb/Kconfig2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c6
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c15
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig4
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_crtc.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c10
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_interface.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c11
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.c3
-rw-r--r--drivers/gpu/drm/mcde/Kconfig2
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c8
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig11
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c2663
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h350
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/Kconfig2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c29
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h7
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.h1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.h1
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c19
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c15
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c2
-rw-r--r--drivers/gpu/drm/mgag200/Makefile4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_bmc.c99
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c27
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h208
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c254
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c277
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c181
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c315
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c316
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c192
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c431
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c326
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c726
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c997
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c78
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c74
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c65
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c150
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c37
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h31
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c172
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c299
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c288
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c162
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c185
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c87
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c145
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c102
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c188
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c25
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c50
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h89
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c179
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h123
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c164
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c78
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h36
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c22
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c101
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c1
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig4
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c27
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.h1
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c25
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c39
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c198
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c85
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig4
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c4
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c45
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c4
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c4
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c7
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c4
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c4
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c4
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c4
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c4
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c4
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c4
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c4
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c547
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c4
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c3
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c3
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c4
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig1
-rw-r--r--drivers/gpu/drm/panfrost/Makefile3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c249
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.h12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c44
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h42
-rw-r--r--drivers/gpu/drm/pl111/Kconfig2
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c8
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c9
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c66
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c778
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h41
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c34
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c41
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h31
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c5
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c51
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c122
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c24
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h1
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/selftests/Makefile8
-rw-r--r--drivers/gpu/drm/selftests/drm_buddy_selftests.h15
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h68
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h28
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h40
-rw-r--r--drivers/gpu/drm/selftests/drm_selftest.c109
-rw-r--r--drivers/gpu/drm/selftests/drm_selftest.h41
-rw-r--r--drivers/gpu/drm/selftests/test-drm_buddy.c994
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c1141
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c668
-rw-r--r--drivers/gpu/drm/selftests/test-drm_format.c280
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.c32
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h52
-rw-r--r--drivers/gpu/drm/selftests/test-drm_rect.c223
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c21
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c4
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c21
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c263
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h9
-rw-r--r--drivers/gpu/drm/sprd/Kconfig2
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c15
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c6
-rw-r--r--drivers/gpu/drm/sti/Kconfig2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c1
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c7
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c18
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c18
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c3
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h1
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c11
-rw-r--r--drivers/gpu/drm/stm/ltdc.c17
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig28
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c64
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c27
-rw-r--r--drivers/gpu/drm/tegra/dc.c1
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/plane.c1
-rw-r--r--drivers/gpu/drm/tests/Makefile4
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c756
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c991
-rw-r--r--drivers/gpu/drm/tests/drm_damage_helper_test.c639
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c (renamed from drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c)89
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c370
-rw-r--r--drivers/gpu/drm/tests/drm_format_test.c359
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c (renamed from drivers/gpu/drm/selftests/test-drm_framebuffer.c)120
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c (renamed from drivers/gpu/drm/selftests/test-drm_mm.c)1253
-rw-r--r--drivers/gpu/drm/tests/drm_plane_helper_test.c (renamed from drivers/gpu/drm/selftests/test-drm_plane_helper.c)155
-rw-r--r--drivers/gpu/drm/tests/drm_rect_test.c214
-rw-r--r--drivers/gpu/drm/tidss/Kconfig2
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c28
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c6
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c3
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c1
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c10
-rw-r--r--drivers/gpu/drm/tiny/Kconfig22
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c14
-rw-r--r--drivers/gpu/drm/tiny/bochs.c14
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c19
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c12
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c7
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c7
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c7
-rw-r--r--drivers/gpu/drm/tiny/repaper.c42
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c594
-rw-r--r--drivers/gpu/drm/tiny/st7586.c17
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c174
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c64
-rw-r--r--drivers/gpu/drm/tve200/Kconfig2
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c12
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c10
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c19
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_main.c128
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c49
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c12
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig3
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c79
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c149
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c72
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c131
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h65
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c131
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c982
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c145
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c71
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c63
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c73
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c273
-rw-r--r--drivers/gpu/drm/via/Makefile2
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h349
-rw-r--r--drivers/gpu/drm/via/via_dma.c744
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c807
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h140
-rw-r--r--drivers/gpu/drm/via/via_dri1.c3630
-rw-r--r--drivers/gpu/drm/via/via_drv.c124
-rw-r--r--drivers/gpu/drm/via/via_drv.h229
-rw-r--r--drivers/gpu/drm/via/via_irq.c388
-rw-r--r--drivers/gpu/drm/via/via_map.c132
-rw-r--r--drivers/gpu/drm/via/via_mm.c241
-rw-r--r--drivers/gpu/drm/via/via_verifier.c1110
-rw-r--r--drivers/gpu/drm/via/via_verifier.h62
-rw-r--r--drivers/gpu/drm/via/via_video.c94
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c53
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c65
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c40
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c21
-rw-r--r--drivers/gpu/drm/vkms/Makefile1
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c314
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h33
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c286
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c56
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c13
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c18
-rw-r--r--drivers/hid/hid-asus.c7
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-nintendo.c6
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-steam.c10
-rw-r--r--drivers/hid/hid-thrustmaster.c3
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c68
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/vmbus_drv.c59
-rw-r--r--drivers/hwmon/Kconfig47
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/abituguru.c9
-rw-r--r--drivers/hwmon/abituguru3.c9
-rw-r--r--drivers/hwmon/acpi_power_meter.c11
-rw-r--r--drivers/hwmon/adc128d818.c6
-rw-r--r--drivers/hwmon/adm1021.c2
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c2
-rw-r--r--drivers/hwmon/adm1029.c2
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adm9240.c10
-rw-r--r--drivers/hwmon/adt7310.c2
-rw-r--r--drivers/hwmon/adt7410.c2
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/adt7470.c3
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/adt7x10.c7
-rw-r--r--drivers/hwmon/adt7x10.h5
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c181
-rw-r--r--drivers/hwmon/asb100.c8
-rw-r--r--drivers/hwmon/asc7621.c6
-rw-r--r--drivers/hwmon/asus-ec-sensors.c408
-rw-r--r--drivers/hwmon/asus_wmi_ec_sensors.c622
-rw-r--r--drivers/hwmon/axi-fan-control.c15
-rw-r--r--drivers/hwmon/corsair-psu.c32
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c14
-rw-r--r--drivers/hwmon/dme1737.c6
-rw-r--r--drivers/hwmon/emc1403.c12
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc2305.c620
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/f71882fg.c2453
-rw-r--r--drivers/hwmon/f75375s.c7
-rw-r--r--drivers/hwmon/fschmd.c8
-rw-r--r--drivers/hwmon/ftsteutates.c5
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/gl520sm.c2
-rw-r--r--drivers/hwmon/gpio-fan.c14
-rw-r--r--drivers/hwmon/gsc-hwmon.c1
-rw-r--r--drivers/hwmon/hwmon.c14
-rw-r--r--drivers/hwmon/iio_hwmon.c8
-rw-r--r--drivers/hwmon/ina209.c4
-rw-r--r--drivers/hwmon/ina3221.c17
-rw-r--r--drivers/hwmon/it87.c8
-rw-r--r--drivers/hwmon/jc42.c5
-rw-r--r--drivers/hwmon/lm63.c6
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm80.c2
-rw-r--r--drivers/hwmon/lm83.c2
-rw-r--r--drivers/hwmon/lm85.c2
-rw-r--r--drivers/hwmon/lm87.c2
-rw-r--r--drivers/hwmon/lm90.c10
-rw-r--r--drivers/hwmon/lm92.c2
-rw-r--r--drivers/hwmon/lm93.c2
-rw-r--r--drivers/hwmon/lm95234.c2
-rw-r--r--drivers/hwmon/lm95241.c2
-rw-r--r--drivers/hwmon/lm95245.c2
-rw-r--r--drivers/hwmon/ltc2947-core.c24
-rw-r--r--drivers/hwmon/ltc2947-i2c.c2
-rw-r--r--drivers/hwmon/ltc2947-spi.c2
-rw-r--r--drivers/hwmon/max1619.c2
-rw-r--r--drivers/hwmon/max1668.c2
-rw-r--r--drivers/hwmon/max31722.c8
-rw-r--r--drivers/hwmon/max31730.c10
-rw-r--r--drivers/hwmon/max31760.c596
-rw-r--r--drivers/hwmon/max31790.c38
-rw-r--r--drivers/hwmon/max6639.c8
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/mr75203.c447
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/nct6775-platform.c8
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c2
-rw-r--r--drivers/hwmon/nzxt-smart2.c1
-rw-r--r--drivers/hwmon/occ/p8_i2c.c4
-rw-r--r--drivers/hwmon/pc87360.c1461
-rw-r--r--drivers/hwmon/pcf8591.c3
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/mp2888.c13
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c29
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c71
-rw-r--r--drivers/hwmon/pwm-fan.c320
-rw-r--r--drivers/hwmon/scpi-hwmon.c14
-rw-r--r--drivers/hwmon/sht4x.c2
-rw-r--r--drivers/hwmon/sis5595.c187
-rw-r--r--drivers/hwmon/smm665.c3
-rw-r--r--drivers/hwmon/smsc47m192.c2
-rw-r--r--drivers/hwmon/sparx5-temp.c19
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/thmc50.c2
-rw-r--r--drivers/hwmon/tmp102.c6
-rw-r--r--drivers/hwmon/tmp103.c8
-rw-r--r--drivers/hwmon/tmp108.c8
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/tps23861.c107
-rw-r--r--drivers/hwmon/via686a.c206
-rw-r--r--drivers/hwmon/vt8231.c198
-rw-r--r--drivers/hwmon/w83627ehf.c8
-rw-r--r--drivers/hwmon/w83627hf.c1600
-rw-r--r--drivers/hwmon/w83781d.c6
-rw-r--r--drivers/hwmon/w83791d.c8
-rw-r--r--drivers/hwmon/w83792d.c8
-rw-r--r--drivers/hwmon/w83793.c8
-rw-r--r--drivers/hwmon/w83795.c6
-rw-r--r--drivers/hwmon/w83l785ts.c8
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h3
-rw-r--r--drivers/i2c/busses/Kconfig14
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c27
-rw-r--r--drivers/i2c/busses/i2c-cadence.c20
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c67
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c37
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c24
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-ismt.c7
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c1210
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c926
-rw-r--r--drivers/i2c/busses/i2c-riic.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c9
-rw-r--r--drivers/i2c/busses/i2c-scmi.c41
-rw-r--r--drivers/i2c/busses/i2c-tegra.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c2
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-mux.c5
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/i2c-slave-testunit.c3
-rw-r--r--drivers/i2c/i2c-smbus.c3
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/idle/intel_idle.c53
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c4
-rw-r--r--drivers/iio/accel/kxcjk-1013.c4
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c4
-rw-r--r--drivers/iio/accel/mc3230.c4
-rw-r--r--drivers/iio/accel/mma7455_i2c.c4
-rw-r--r--drivers/iio/accel/mma7660.c4
-rw-r--r--drivers/iio/accel/mma8452.c4
-rw-r--r--drivers/iio/accel/mma9551.c4
-rw-r--r--drivers/iio/accel/mma9553.c4
-rw-r--r--drivers/iio/accel/stk8312.c4
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/ad799x.c4
-rw-r--r--drivers/iio/adc/ina2xx-adc.c4
-rw-r--r--drivers/iio/adc/ltc2497.c4
-rw-r--r--drivers/iio/adc/mcp3911.c28
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c14
-rw-r--r--drivers/iio/adc/ti-ads1015.c4
-rw-r--r--drivers/iio/chemical/atlas-sensor.c4
-rw-r--r--drivers/iio/chemical/ccs811.c4
-rw-r--r--drivers/iio/chemical/sgp30.c4
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5446.c4
-rw-r--r--drivers/iio/dac/ad5593r.c4
-rw-r--r--drivers/iio/dac/ad5696-i2c.c4
-rw-r--r--drivers/iio/dac/ds4424.c4
-rw-r--r--drivers/iio/dac/m62332.c4
-rw-r--r--drivers/iio/dac/mcp4725.c4
-rw-r--r--drivers/iio/dac/ti-dac5571.c4
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c4
-rw-r--r--drivers/iio/gyro/itg3200_core.c4
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c4
-rw-r--r--drivers/iio/health/max30102.c4
-rw-r--r--drivers/iio/humidity/hdc2010.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c4
-rw-r--r--drivers/iio/imu/kmx61.c4
-rw-r--r--drivers/iio/light/apds9300.c4
-rw-r--r--drivers/iio/light/apds9960.c4
-rw-r--r--drivers/iio/light/bh1750.c4
-rw-r--r--drivers/iio/light/bh1780.c4
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3232.c4
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/iio/light/cm36651.c4
-rw-r--r--drivers/iio/light/gp2ap002.c4
-rw-r--r--drivers/iio/light/gp2ap020a00f.c4
-rw-r--r--drivers/iio/light/isl29028.c4
-rw-r--r--drivers/iio/light/isl29125.c4
-rw-r--r--drivers/iio/light/jsa1212.c4
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/light/opt3001.c6
-rw-r--r--drivers/iio/light/pa12203001.c4
-rw-r--r--drivers/iio/light/rpr0521.c4
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/tcs3472.c4
-rw-r--r--drivers/iio/light/tsl2563.c4
-rw-r--r--drivers/iio/light/tsl2583.c4
-rw-r--r--drivers/iio/light/tsl4531.c4
-rw-r--r--drivers/iio/light/us5182d.c4
-rw-r--r--drivers/iio/light/vcnl4000.c4
-rw-r--r--drivers/iio/light/vcnl4035.c4
-rw-r--r--drivers/iio/light/veml6070.c4
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/iio/magnetometer/ak8975.c4
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c4
-rw-r--r--drivers/iio/magnetometer/mag3110.c4
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c4
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/mpl3115.c4
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c4
-rw-r--r--drivers/iio/proximity/sx9500.c4
-rw-r--r--drivers/iio/temperature/mlx90614.c4
-rw-r--r--drivers/iio/temperature/mlx90632.c4
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c25
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c7
-rw-r--r--drivers/infiniband/hw/irdma/uk.c7
-rw-r--r--drivers/infiniband/hw/irdma/utils.c15
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c31
-rw-r--r--drivers/infiniband/hw/mlx5/main.c36
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/input/joystick/as5011.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c8
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/keyboard/adp5588-keys.c3
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c3
-rw-r--r--drivers/input/keyboard/lm8323.c4
-rw-r--r--drivers/input/keyboard/lm8333.c4
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c4
-rw-r--r--drivers/input/keyboard/qt1070.c4
-rw-r--r--drivers/input/keyboard/qt2160.c4
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c2
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c4
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/misc/bma150.c4
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c4
-rw-r--r--drivers/input/misc/pcf8574_keypad.c4
-rw-r--r--drivers/input/misc/rk805-pwrkey.c1
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/rmi4/rmi_smbus.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c4
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c4
-rw-r--r--drivers/input/touchscreen/goodix.c6
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/input/touchscreen/migor_ts.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c4
-rw-r--r--drivers/input/touchscreen/stmfts.c4
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c10
-rw-r--r--drivers/input/touchscreen/tsc2004.c4
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/amd/iommu_v2.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c6
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/iommu.c214
-rw-r--r--drivers/iommu/intel/iommu.h9
-rw-r--r--drivers/iommu/iommu.c21
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c11
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c14
-rw-r--r--drivers/irqchip/irq-gic-v4.c2
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c40
-rw-r--r--drivers/irqchip/irq-stm32-exti.c2
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/mISDN/l1oip.h1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c13
-rw-r--r--drivers/leds/flash/leds-as3645a.c4
-rw-r--r--drivers/leds/flash/leds-lm3601x.c13
-rw-r--r--drivers/leds/flash/leds-rt4505.c3
-rw-r--r--drivers/leds/leds-an30259a.c4
-rw-r--r--drivers/leds/leds-aw2013.c4
-rw-r--r--drivers/leds/leds-bd2802.c4
-rw-r--r--drivers/leds/leds-blinkm.c3
-rw-r--r--drivers/leds/leds-is31fl32xx.c4
-rw-r--r--drivers/leds/leds-lm3530.c3
-rw-r--r--drivers/leds/leds-lm3532.c4
-rw-r--r--drivers/leds/leds-lm355x.c4
-rw-r--r--drivers/leds/leds-lm3642.c3
-rw-r--r--drivers/leds/leds-lm3692x.c4
-rw-r--r--drivers/leds/leds-lm3697.c8
-rw-r--r--drivers/leds/leds-lp3944.c4
-rw-r--r--drivers/leds/leds-lp3952.c4
-rw-r--r--drivers/leds/leds-lp50xx.c4
-rw-r--r--drivers/leds/leds-lp5521.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-lp5562.c4
-rw-r--r--drivers/leds/leds-lp8501.c4
-rw-r--r--drivers/leds/leds-lp8860.c4
-rw-r--r--drivers/leds/leds-pca9532.c6
-rw-r--r--drivers/leds/leds-tca6507.c4
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c42
-rw-r--r--drivers/macintosh/ams/ams-i2c.c6
-rw-r--r--drivers/macintosh/therm_adt746x.c4
-rw-r--r--drivers/macintosh/therm_windtunnel.c4
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c3
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c2
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/writeback.c78
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm-verity-loadpin.c8
-rw-r--r--drivers/md/dm-verity-target.c16
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c9
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid10.c164
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c11
-rw-r--r--drivers/md/raid5.c147
-rw-r--r--drivers/md/raid5.h32
-rw-r--r--drivers/media/cec/i2c/ch7322.c4
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c11
-rw-r--r--drivers/media/dvb-frontends/a8293.c3
-rw-r--r--drivers/media/dvb-frontends/af9013.c4
-rw-r--r--drivers/media/dvb-frontends/af9033.c4
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c3
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c3
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c3
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c4
-rw-r--r--drivers/media/dvb-frontends/mn88472.c4
-rw-r--r--drivers/media/dvb-frontends/mn88473.c4
-rw-r--r--drivers/media/dvb-frontends/mxl692.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c4
-rw-r--r--drivers/media/dvb-frontends/si2165.c3
-rw-r--r--drivers/media/dvb-frontends/si2168.c4
-rw-r--r--drivers/media/dvb-frontends/sp2.c3
-rw-r--r--drivers/media/dvb-frontends/stv090x.c3
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c3
-rw-r--r--drivers/media/dvb-frontends/tc90522.c3
-rw-r--r--drivers/media/dvb-frontends/tda10071.c3
-rw-r--r--drivers/media/dvb-frontends/ts2020.c3
-rw-r--r--drivers/media/i2c/ad5820.c3
-rw-r--r--drivers/media/i2c/ad9389b.c3
-rw-r--r--drivers/media/i2c/adp1653.c4
-rw-r--r--drivers/media/i2c/adv7170.c3
-rw-r--r--drivers/media/i2c/adv7175.c3
-rw-r--r--drivers/media/i2c/adv7180.c4
-rw-r--r--drivers/media/i2c/adv7183.c3
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7393.c4
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c3
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/i2c/adv7842.c3
-rw-r--r--drivers/media/i2c/ak7375.c4
-rw-r--r--drivers/media/i2c/ak881x.c4
-rw-r--r--drivers/media/i2c/ar0521.c3
-rw-r--r--drivers/media/i2c/bt819.c3
-rw-r--r--drivers/media/i2c/bt856.c3
-rw-r--r--drivers/media/i2c/bt866.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c4
-rw-r--r--drivers/media/i2c/cs3308.c3
-rw-r--r--drivers/media/i2c/cs5345.c3
-rw-r--r--drivers/media/i2c/cs53l32a.c3
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/i2c/dw9714.c4
-rw-r--r--drivers/media/i2c/dw9768.c4
-rw-r--r--drivers/media/i2c/dw9807-vcm.c4
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c4
-rw-r--r--drivers/media/i2c/hi556.c4
-rw-r--r--drivers/media/i2c/hi846.c4
-rw-r--r--drivers/media/i2c/hi847.c4
-rw-r--r--drivers/media/i2c/imx208.c4
-rw-r--r--drivers/media/i2c/imx214.c4
-rw-r--r--drivers/media/i2c/imx219.c4
-rw-r--r--drivers/media/i2c/imx258.c4
-rw-r--r--drivers/media/i2c/imx274.c3
-rw-r--r--drivers/media/i2c/imx290.c4
-rw-r--r--drivers/media/i2c/imx319.c4
-rw-r--r--drivers/media/i2c/imx334.c4
-rw-r--r--drivers/media/i2c/imx335.c4
-rw-r--r--drivers/media/i2c/imx355.c4
-rw-r--r--drivers/media/i2c/imx412.c4
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c4
-rw-r--r--drivers/media/i2c/isl7998x.c4
-rw-r--r--drivers/media/i2c/ks0127.c3
-rw-r--r--drivers/media/i2c/lm3560.c4
-rw-r--r--drivers/media/i2c/lm3646.c4
-rw-r--r--drivers/media/i2c/m52790.c3
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c4
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max9286.c4
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/msp3400-driver.c3
-rw-r--r--drivers/media/i2c/mt9m001.c4
-rw-r--r--drivers/media/i2c/mt9m032.c3
-rw-r--r--drivers/media/i2c/mt9m111.c4
-rw-r--r--drivers/media/i2c/mt9p031.c4
-rw-r--r--drivers/media/i2c/mt9t001.c3
-rw-r--r--drivers/media/i2c/mt9t112.c4
-rw-r--r--drivers/media/i2c/mt9v011.c4
-rw-r--r--drivers/media/i2c/mt9v032.c4
-rw-r--r--drivers/media/i2c/mt9v111.c4
-rw-r--r--drivers/media/i2c/noon010pc30.c4
-rw-r--r--drivers/media/i2c/og01a1b.c4
-rw-r--r--drivers/media/i2c/ov02a10.c4
-rw-r--r--drivers/media/i2c/ov08d10.c4
-rw-r--r--drivers/media/i2c/ov13858.c4
-rw-r--r--drivers/media/i2c/ov13b10.c4
-rw-r--r--drivers/media/i2c/ov2640.c3
-rw-r--r--drivers/media/i2c/ov2659.c4
-rw-r--r--drivers/media/i2c/ov2680.c4
-rw-r--r--drivers/media/i2c/ov2685.c4
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5645.c4
-rw-r--r--drivers/media/i2c/ov5647.c4
-rw-r--r--drivers/media/i2c/ov5648.c4
-rw-r--r--drivers/media/i2c/ov5670.c4
-rw-r--r--drivers/media/i2c/ov5675.c4
-rw-r--r--drivers/media/i2c/ov5693.c4
-rw-r--r--drivers/media/i2c/ov5695.c4
-rw-r--r--drivers/media/i2c/ov6650.c3
-rw-r--r--drivers/media/i2c/ov7251.c4
-rw-r--r--drivers/media/i2c/ov7640.c4
-rw-r--r--drivers/media/i2c/ov7670.c3
-rw-r--r--drivers/media/i2c/ov772x.c4
-rw-r--r--drivers/media/i2c/ov7740.c3
-rw-r--r--drivers/media/i2c/ov8856.c4
-rw-r--r--drivers/media/i2c/ov8865.c4
-rw-r--r--drivers/media/i2c/ov9282.c4
-rw-r--r--drivers/media/i2c/ov9640.c4
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/ov9734.c4
-rw-r--r--drivers/media/i2c/rdacm20.c4
-rw-r--r--drivers/media/i2c/rdacm21.c4
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c4
-rw-r--r--drivers/media/i2c/s5k4ecgx.c4
-rw-r--r--drivers/media/i2c/s5k5baf.c4
-rw-r--r--drivers/media/i2c/s5k6a3.c3
-rw-r--r--drivers/media/i2c/s5k6aa.c4
-rw-r--r--drivers/media/i2c/saa6588.c4
-rw-r--r--drivers/media/i2c/saa6752hs.c3
-rw-r--r--drivers/media/i2c/saa7110.c3
-rw-r--r--drivers/media/i2c/saa7115.c3
-rw-r--r--drivers/media/i2c/saa7127.c3
-rw-r--r--drivers/media/i2c/saa717x.c3
-rw-r--r--drivers/media/i2c/saa7185.c3
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c4
-rw-r--r--drivers/media/i2c/sr030pc30.c3
-rw-r--r--drivers/media/i2c/st-mipid02.c4
-rw-r--r--drivers/media/i2c/tc358743.c4
-rw-r--r--drivers/media/i2c/tda1997x.c4
-rw-r--r--drivers/media/i2c/tda7432.c3
-rw-r--r--drivers/media/i2c/tda9840.c3
-rw-r--r--drivers/media/i2c/tea6415c.c3
-rw-r--r--drivers/media/i2c/tea6420.c3
-rw-r--r--drivers/media/i2c/ths7303.c4
-rw-r--r--drivers/media/i2c/ths8200.c4
-rw-r--r--drivers/media/i2c/tlv320aic23b.c3
-rw-r--r--drivers/media/i2c/tvaudio.c3
-rw-r--r--drivers/media/i2c/tvp514x.c3
-rw-r--r--drivers/media/i2c/tvp5150.c4
-rw-r--r--drivers/media/i2c/tvp7002.c3
-rw-r--r--drivers/media/i2c/tw2804.c3
-rw-r--r--drivers/media/i2c/tw9903.c3
-rw-r--r--drivers/media/i2c/tw9906.c3
-rw-r--r--drivers/media/i2c/tw9910.c4
-rw-r--r--drivers/media/i2c/uda1342.c3
-rw-r--r--drivers/media/i2c/upd64031a.c3
-rw-r--r--drivers/media/i2c/upd64083.c3
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/i2c/vp27smpx.c3
-rw-r--r--drivers/media/i2c/vpx3220.c4
-rw-r--r--drivers/media/i2c/vs6624.c3
-rw-r--r--drivers/media/i2c/wm8739.c3
-rw-r--r--drivers/media/i2c/wm8775.c3
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c2
-rw-r--r--drivers/media/radio/radio-tea5764.c3
-rw-r--r--drivers/media/radio/saa7706h.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c3
-rw-r--r--drivers/media/radio/si4713/si4713.c4
-rw-r--r--drivers/media/radio/tef6862.c3
-rw-r--r--drivers/media/rc/mceusb.c35
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c4
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c4
-rw-r--r--drivers/media/tuners/e4000.c4
-rw-r--r--drivers/media/tuners/fc2580.c3
-rw-r--r--drivers/media/tuners/m88rs6000t.c4
-rw-r--r--drivers/media/tuners/mt2060.c4
-rw-r--r--drivers/media/tuners/mxl301rf.c3
-rw-r--r--drivers/media/tuners/qm1d1b0004.c3
-rw-r--r--drivers/media/tuners/qm1d1c0042.c3
-rw-r--r--drivers/media/tuners/si2157.c4
-rw-r--r--drivers/media/tuners/tda18212.c4
-rw-r--r--drivers/media/tuners/tda18250.c4
-rw-r--r--drivers/media/tuners/tua9001.c3
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/go7007/s2250-board.c3
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c2
-rw-r--r--drivers/media/v4l2-core/tuner-core.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c26
-rw-r--r--drivers/memory/Kconfig9
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/brcmstb_memc.c301
-rw-r--r--drivers/memory/dfl-emif.c62
-rw-r--r--drivers/memory/mtk-smi.c109
-rw-r--r--drivers/memory/of_memory.c2
-rw-r--r--drivers/memory/pl353-smc.c1
-rw-r--r--drivers/mfd/88pm800.c4
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm860x-core.c3
-rw-r--r--drivers/mfd/Kconfig21
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/acer-ec-a500.c4
-rw-r--r--drivers/mfd/arizona-i2c.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c4
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/da9052-i2c.c3
-rw-r--r--drivers/mfd/da9055-i2c.c4
-rw-r--r--drivers/mfd/da9062-core.c4
-rw-r--r--drivers/mfd/da9150-core.c4
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/ene-kb3930.c4
-rw-r--r--drivers/mfd/gateworks-gsc.c4
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c4
-rw-r--r--drivers/mfd/iqs62x.c4
-rw-r--r--drivers/mfd/lm3533-core.c4
-rw-r--r--drivers/mfd/lp8788.c3
-rw-r--r--drivers/mfd/madera-i2c.c4
-rw-r--r--drivers/mfd/max14577.c4
-rw-r--r--drivers/mfd/max77693.c4
-rw-r--r--drivers/mfd/max8907.c4
-rw-r--r--drivers/mfd/max8925-i2c.c3
-rw-r--r--drivers/mfd/mc13xxx-i2c.c3
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/ntxec.c4
-rw-r--r--drivers/mfd/ocelot-core.c161
-rw-r--r--drivers/mfd/ocelot-spi.c299
-rw-r--r--drivers/mfd/ocelot.h49
-rw-r--r--drivers/mfd/palmas.c4
-rw-r--r--drivers/mfd/pcf50633-core.c4
-rw-r--r--drivers/mfd/retu-mfd.c4
-rw-r--r--drivers/mfd/rk808.c4
-rw-r--r--drivers/mfd/rn5t618.c4
-rw-r--r--drivers/mfd/rsmu_i2c.c4
-rw-r--r--drivers/mfd/rt4831.c4
-rw-r--r--drivers/mfd/si476x-i2c.c4
-rw-r--r--drivers/mfd/stmfx.c4
-rw-r--r--drivers/mfd/stmpe-i2c.c4
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/tps6105x.c4
-rw-r--r--drivers/mfd/tps65010.c3
-rw-r--r--drivers/mfd/tps65086.c4
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps6586x.c3
-rw-r--r--drivers/mfd/tps65912-i2c.c4
-rw-r--r--drivers/mfd/twl-core.c3
-rw-r--r--drivers/mfd/twl6040.c4
-rw-r--r--drivers/mfd/wm8994-core.c4
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c3
-rw-r--r--drivers/misc/apds9802als.c3
-rw-r--r--drivers/misc/apds990x.c3
-rw-r--r--drivers/misc/bh1770glc.c4
-rw-r--r--drivers/misc/ds1682.c3
-rw-r--r--drivers/misc/eeprom/at24.c4
-rw-r--r--drivers/misc/eeprom/ee1004.c4
-rw-r--r--drivers/misc/eeprom/eeprom.c4
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c4
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/fastrpc.c14
-rw-r--r--drivers/misc/hmc6352.c3
-rw-r--r--drivers/misc/ibmvmc.c6
-rw-r--r--drivers/misc/ics932s401.c5
-rw-r--r--drivers/misc/isl29003.c3
-rw-r--r--drivers/misc/isl29020.c3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c3
-rw-r--r--drivers/misc/lkdtm/cfi.c15
-rw-r--r--drivers/misc/lkdtm/fortify.c96
-rw-r--r--drivers/misc/lkdtm/usercopy.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c106
-rw-r--r--drivers/misc/mei/client.c16
-rw-r--r--drivers/misc/mei/debugfs.c19
-rw-r--r--drivers/misc/mei/gsc-me.c77
-rw-r--r--drivers/misc/mei/hbm.c14
-rw-r--r--drivers/misc/mei/hw-me-regs.h9
-rw-r--r--drivers/misc/mei/hw-me.c138
-rw-r--r--drivers/misc/mei/hw-me.h17
-rw-r--r--drivers/misc/mei/hw-txe.c4
-rw-r--r--drivers/misc/mei/hw.h7
-rw-r--r--drivers/misc/mei/init.c35
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mei/mei_dev.h35
-rw-r--r--drivers/misc/mei/mkhi.h55
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/tsl2550.c4
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/sd.c49
-rw-r--r--drivers/mmc/core/sdio.c4
-rw-r--r--drivers/mmc/core/sdio_irq.c4
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/au1xmmc.c3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c90
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c4
-rw-r--r--drivers/mmc/host/mmc_hsq.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c17
-rw-r--r--drivers/mmc/host/mtk-sd.c115
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c16
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c7
-rw-r--r--drivers/mmc/host/sdhci-sprd.c6
-rw-r--r--drivers/mmc/host/sdhci.c88
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c5
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c5
-rw-r--r--drivers/mtd/maps/pismo.c4
-rw-r--r--drivers/mtd/parsers/Kconfig6
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c2
-rw-r--r--drivers/net/amt.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c46
-rw-r--r--drivers/net/bonding/bond_main.c96
-rw-r--r--drivers/net/bonding/bond_sysfs.c106
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c28
-rw-r--r--drivers/net/can/c_can/c_can.h17
-rw-r--r--drivers/net/can/c_can/c_can_main.c11
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c3
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c1
-rw-r--r--drivers/net/can/dev/rx-offload.c4
-rw-r--r--drivers/net/can/dev/skb.c113
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c69
-rw-r--r--drivers/net/can/flexcan/flexcan.h20
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c7
-rw-r--r--drivers/net/can/m_can/m_can.c3
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c26
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c38
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h2
-rw-r--r--drivers/net/can/usb/gs_usb.c682
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/can/vxcan.c8
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c2
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c136
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c99
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h7
-rw-r--r--drivers/net/dsa/lan9303-core.c34
-rw-r--r--drivers/net/dsa/lan9303_i2c.c8
-rw-r--r--drivers/net/dsa/lan9303_mdio.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c10
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c111
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c113
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h5
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1126
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h137
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c15
-rw-r--r--drivers/net/dsa/microchip/lan937x.h6
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c118
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h18
-rw-r--r--drivers/net/dsa/mt7530.c71
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c39
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c19
-rw-r--r--drivers/net/dsa/ocelot/felix.c255
-rw-r--r--drivers/net/dsa/ocelot/felix.h16
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c655
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c327
-rw-r--r--drivers/net/dsa/qca/ar9331.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c23
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c2
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c5
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c2
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c8
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/8390/etherh.c6
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c4
-rw-r--r--drivers/net/ethernet/adi/Kconfig28
-rw-r--r--drivers/net/ethernet/adi/Makefile6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1697
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/altera/Kconfig2
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h19
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c23
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c456
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c8
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c49
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c10
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c8
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c20
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c16
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c57
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c17
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c30
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c9
-rw-r--r--drivers/net/ethernet/cortina/gemini.c26
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c32
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c4
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/engleder/Kconfig1
-rw-r--r--drivers/net/ethernet/engleder/Makefile2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h48
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c40
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h16
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c465
-rw-r--r--drivers/net/ethernet/engleder/tsnep_rxnfc.c307
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c30
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c12
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h12
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c68
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c84
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h26
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c239
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h116
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c59
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c117
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c17
-rw-r--r--drivers/net/ethernet/freescale/fec.h31
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c234
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c321
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h24
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c238
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c164
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h54
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c497
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h45
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c2
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c15
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c16
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c15
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c66
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c327
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c415
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c35
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c303
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h23
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c224
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c372
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c288
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c370
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c807
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c98
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c251
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c242
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c236
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h15
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c131
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/jme.c8
-rw-r--r--drivers/net/ethernet/korina.c11
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c12
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c11
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h473
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c214
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1102
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c889
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c346
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1668
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c300
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c84
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c110
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c51
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c54
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c179
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c54
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c125
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.h17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1119
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c366
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h76
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c66
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c144
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h102
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c302
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h81
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c46
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c64
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c479
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h89
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h158
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c559
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c227
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1870
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1384
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c440
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c373
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c518
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c379
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h163
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c46
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c5
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c6
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c68
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c155
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c363
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c104
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h119
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c235
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c24
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h356
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c133
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c95
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h165
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c513
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c271
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c125
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c14
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c46
-rw-r--r--drivers/net/ethernet/mscc/Makefile11
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c796
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h12
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c88
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c481
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c458
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c143
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c85
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c242
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c262
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c61
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c12
-rw-r--r--drivers/net/ethernet/ni/nixge.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c16
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c107
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c19
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c6
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c241
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c133
-rw-r--r--drivers/net/ethernet/renesas/ravb.h8
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c3
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c21
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c4
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c43
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.h22
-rw-r--r--drivers/net/ethernet/sfc/mae.c165
-rw-r--r--drivers/net/ethernet/sfc/mae.h14
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c128
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c430
-rw-r--r--drivers/net/ethernet/sfc/tc.h36
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.c228
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.h29
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/smsc/epic100.c8
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c12
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c8
-rw-r--r--drivers/net/ethernet/socionext/netsec.c6
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c8
-rw-r--r--drivers/net/ethernet/sun/sunhme.c669
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c6
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c10
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c7
-rw-r--r--drivers/net/ethernet/ti/cpmac.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c15
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c242
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c6
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c25
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c10
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig13
-rw-r--r--drivers/net/ethernet/wangxun/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c170
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h50
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h181
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c81
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c51
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c6
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/fjes/fjes_ethtool.c6
-rw-r--r--drivers/net/fjes/fjes_main.c1152
-rw-r--r--drivers/net/geneve.c13
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hippi/rrunner.c1
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ieee802154/cc2520.c1
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c1
-rw-r--r--drivers/net/ipa/Makefile2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c10
-rw-r--r--drivers/net/ipa/gsi.c94
-rw-r--r--drivers/net/ipa/gsi.h26
-rw-r--r--drivers/net/ipa/gsi_private.h14
-rw-r--r--drivers/net/ipa/gsi_reg.h210
-rw-r--r--drivers/net/ipa/gsi_trans.c221
-rw-r--r--drivers/net/ipa/gsi_trans.h7
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_cmd.c11
-rw-r--r--drivers/net/ipa/ipa_cmd.h2
-rw-r--r--drivers/net/ipa/ipa_data.h4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c494
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c47
-rw-r--r--drivers/net/ipa/ipa_interrupt.h2
-rw-r--r--drivers/net/ipa/ipa_main.c284
-rw-r--r--drivers/net/ipa/ipa_mem.c20
-rw-r--r--drivers/net/ipa/ipa_modem.c2
-rw-r--r--drivers/net/ipa/ipa_modem.h2
-rw-r--r--drivers/net/ipa/ipa_power.c2
-rw-r--r--drivers/net/ipa/ipa_power.h2
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.h2
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c10
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h39
-rw-r--r--drivers/net/ipa/ipa_reg.c97
-rw-r--r--drivers/net/ipa/ipa_reg.h1121
-rw-r--r--drivers/net/ipa/ipa_resource.c65
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c2
-rw-r--r--drivers/net/ipa/ipa_sysfs.h2
-rw-r--r--drivers/net/ipa/ipa_table.c31
-rw-r--r--drivers/net/ipa/ipa_table.h5
-rw-r--r--drivers/net/ipa/ipa_uc.c11
-rw-r--r--drivers/net/ipa/ipa_uc.h2
-rw-r--r--drivers/net/ipa/ipa_version.h30
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c478
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c512
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c533
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c509
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c105
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/mctp/mctp-i2c.c4
-rw-r--r--drivers/net/mdio/fwnode_mdio.c62
-rw-r--r--drivers/net/mdio/mdio-i2c.c310
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c42
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c20
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c9
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c9
-rw-r--r--drivers/net/mdio/of_mdio.c1
-rw-r--r--drivers/net/net_failover.c4
-rw-r--r--drivers/net/netconsole.c10
-rw-r--r--drivers/net/netdevsim/dev.c20
-rw-r--r--drivers/net/netdevsim/hwstats.c6
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/ntb_netdev.c6
-rw-r--r--drivers/net/pcs/Kconfig6
-rw-r--r--drivers/net/pcs/Makefile1
-rw-r--r--drivers/net/pcs/pcs-altera-tse.c175
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/adin1100.c7
-rw-r--r--drivers/net/phy/aquantia_main.c121
-rw-r--r--drivers/net/phy/at803x.c28
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/broadcom.c39
-rw-r--r--drivers/net/phy/marvell-88x2222.c3
-rw-r--r--drivers/net/phy/marvell.c5
-rw-r--r--drivers/net/phy/marvell10g.c133
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/meson-gxl.c8
-rw-r--r--drivers/net/phy/micrel.c221
-rw-r--r--drivers/net/phy/microchip_t1.c58
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c113
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c83
-rw-r--r--drivers/net/phy/phy-core.c74
-rw-r--r--drivers/net/phy/phy.c28
-rw-r--r--drivers/net/phy/phy_device.c24
-rw-r--r--drivers/net/phy/phylink.c487
-rw-r--r--drivers/net/phy/realtek.c44
-rw-r--r--drivers/net/phy/sfp-bus.c175
-rw-r--r--drivers/net/phy/sfp.c397
-rw-r--r--drivers/net/phy/sfp.h11
-rw-r--r--drivers/net/phy/smsc.c30
-rw-r--r--drivers/net/phy/spi_ks8995.c69
-rw-r--r--drivers/net/pse-pd/Kconfig22
-rw-r--r--drivers/net/pse-pd/Makefile6
-rw-r--r--drivers/net/pse-pd/pse_core.c314
-rw-r--r--drivers/net/pse-pd/pse_regulator.c147
-rw-r--r--drivers/net/rionet.c8
-rw-r--r--drivers/net/team/team.c29
-rw-r--r--drivers/net/thunderbolt.c64
-rw-r--r--drivers/net/tun.c31
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/aqc111.c2
-rw-r--r--drivers/net/usb/asix.h3
-rw-r--r--drivers/net/usb/asix_common.c4
-rw-r--r--drivers/net/usb/asix_devices.c142
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c62
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c52
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c13
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireguard/peer.c3
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c68
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c188
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c132
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c488
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c28
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c21
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c118
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c165
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c118
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c30
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c246
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h72
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c62
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c118
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c434
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h13
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c668
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c376
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/intersil/p54/main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c552
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c11
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c256
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c198
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c22
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c1753
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c108
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c88
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c220
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h31
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c23
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c65
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c235
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h64
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1887
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c489
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h551
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c107
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c702
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h299
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c338
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h63
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c161
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c410
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h73
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c453
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c78
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h148
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c244
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c94
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c411
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c76
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c28868
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c17
-rw-r--r--drivers/net/wireless/rndis_wlan.c25
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c18
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c8
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c9
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c2
-rw-r--r--drivers/net/wwan/wwan_hwsim.c6
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c7
-rw-r--r--drivers/net/xen-netback/xenbus.c5
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c4
-rw-r--r--drivers/nfc/nxp-nci/i2c.c4
-rw-r--r--drivers/nfc/pn533/i2c.c4
-rw-r--r--drivers/nfc/pn533/uart.c1
-rw-r--r--drivers/nfc/pn544/i2c.c4
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c4
-rw-r--r--drivers/nfc/st-nci/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/core.c167
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fc.c124
-rw-r--r--drivers/nvme/host/ioctl.c317
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h48
-rw-r--r--drivers/nvme/host/pci.c96
-rw-r--r--drivers/nvme/host/rdma.c171
-rw-r--r--drivers/nvme/host/tcp.c176
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c1
-rw-r--r--drivers/nvme/target/configfs.c29
-rw-r--r--drivers/nvme/target/core.c7
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c23
-rw-r--r--drivers/nvme/target/fabrics-cmd.c19
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c19
-rw-r--r--drivers/nvme/target/loop.c91
-rw-r--r--drivers/nvme/target/nvmet.h7
-rw-r--r--drivers/nvme/target/passthru.c12
-rw-r--r--drivers/nvme/target/tcp.c94
-rw-r--r--drivers/nvme/target/zns.c20
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/unittest.c6
-rw-r--r--drivers/opp/core.c2
-rw-r--r--drivers/parisc/ccio-dma.c14
-rw-r--r--drivers/parisc/iosapic.c11
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/pci/controller/Kconfig2
-rw-r--r--drivers/pcmcia/Kconfig13
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/at91_cf.c407
-rw-r--r--drivers/pcmcia/i82092.c4
-rw-r--r--drivers/pcmcia/omap_cf.c4
-rw-r--r--drivers/pcmcia/sa1100_generic.c10
-rw-r--r--drivers/pcmcia/vrc4171_card.c745
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/cpu.c3
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c810
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c4
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/arm_spe_pmu.c6
-rw-r--r--drivers/perf/qcom_l2_pmu.c10
-rw-r--r--drivers/perf/qcom_l3_pmu.c3
-rw-r--r--drivers/perf/riscv_pmu_legacy.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/phy/broadcom/Kconfig4
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c87
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c3
-rw-r--r--drivers/pinctrl/Kconfig5
-rw-r--r--drivers/pinctrl/bcm/Kconfig4
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c14
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c127
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c2
-rw-r--r--drivers/platform/chrome/Kconfig11
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c24
-rw-r--r--drivers/platform/chrome/cros_ec.c11
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c3
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c3
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c4
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c32
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c110
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c321
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c1
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c55
-rw-r--r--drivers/platform/surface/surface3_power.c6
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c3
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c47
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/acer-wmi.c77
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig2
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/hsmp.c2
-rw-r--r--drivers/platform/x86/amd/pmc.c131
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig16
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile9
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c304
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c305
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c395
-rw-r--r--drivers/platform/x86/amd/pmf/core.c412
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h417
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c146
-rw-r--r--drivers/platform/x86/amilo-rfkill.c3
-rw-r--r--drivers/platform/x86/apple-gmux.c3
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c59
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c667
-rw-r--r--drivers/platform/x86/asus-wmi.h12
-rw-r--r--drivers/platform/x86/compal-laptop.c153
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c2
-rw-r--r--drivers/platform/x86/dell/dcdbas.c2
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c4
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c12
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c2
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/eeepc-wmi.c25
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c2
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c33
-rw-r--r--drivers/platform/x86/huawei-wmi.c2
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c8
-rw-r--r--drivers/platform/x86/intel/int3472/common.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c34
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c80
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.h3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c54
-rw-r--r--drivers/platform/x86/intel/oaktrail.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c2
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c24
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c106
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c82
-rw-r--r--drivers/platform/x86/p2sb.c18
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c44
-rw-r--r--drivers/platform/x86/samsung-laptop.c89
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c1
-rw-r--r--drivers/platform/x86/simatic-ipc.c10
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c337
-rw-r--r--drivers/platform/x86/winmate-fm07-keys.c2
-rw-r--r--drivers/platform/x86/wmi.c66
-rw-r--r--drivers/platform/x86/x86-android-tablets.c14
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h1
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c4
-rw-r--r--drivers/power/supply/bq24257_charger.c4
-rw-r--r--drivers/power/supply/bq25890_charger.c4
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/cw2015_battery.c3
-rw-r--r--drivers/power/supply/ds2782_battery.c4
-rw-r--r--drivers/power/supply/lp8727_charger.c3
-rw-r--r--drivers/power/supply/rt5033_battery.c4
-rw-r--r--drivers/power/supply/rt9455_charger.c4
-rw-r--r--drivers/power/supply/smb347-charger.c4
-rw-r--r--drivers/power/supply/z2_battery.c4
-rw-r--r--drivers/powercap/intel_rapl_common.c5
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--drivers/ptp/ptp_ocp.c8
-rw-r--r--drivers/pwm/core.c35
-rw-r--r--drivers/pwm/pwm-pca9685.c4
-rw-r--r--drivers/ras/cec.c8
-rw-r--r--drivers/regulator/Kconfig27
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/bd71815-regulator.c7
-rw-r--r--drivers/regulator/bd9576-regulator.c17
-rw-r--r--drivers/regulator/core.c109
-rw-r--r--drivers/regulator/da9121-regulator.c3
-rw-r--r--drivers/regulator/devres.c164
-rw-r--r--drivers/regulator/gpio-regulator.c15
-rw-r--r--drivers/regulator/lp8755.c4
-rw-r--r--drivers/regulator/max597x-regulator.c5
-rw-r--r--drivers/regulator/max8973-regulator.c10
-rw-r--r--drivers/regulator/mt6331-regulator.c507
-rw-r--r--drivers/regulator/mt6332-regulator.c422
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c71
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c24
-rw-r--r--drivers/regulator/qcom_smd-regulator.c400
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c378
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c4
-rw-r--r--drivers/regulator/ti-abb-regulator.c2
-rw-r--r--drivers/regulator/tps65219-regulator.c411
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c1
-rw-r--r--drivers/reset/Kconfig2
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/reset/reset-microchip-sparx5.c22
-rw-r--r--drivers/reset/reset-npcm.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-isl12026.c3
-rw-r--r--drivers/rtc/rtc-m41t80.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c3
-rw-r--r--drivers/rtc/rtc-x1205.c3
-rw-r--r--drivers/s390/block/dasd.c86
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_alias.c9
-rw-r--r--drivers/s390/block/dasd_devmap.c609
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c294
-rw-r--r--drivers/s390/block/dasd_eckd.h9
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c29
-rw-r--r--drivers/s390/block/dasd_int.h75
-rw-r--r--drivers/s390/block/dasd_ioctl.c53
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c30
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c5
-rw-r--r--drivers/scsi/hosts.c28
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_debug.c7
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_ioctl.c22
-rw-r--r--drivers/scsi/scsi_lib.c57
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/sd.c84
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/scsi/sg.c31
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c5
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c5
-rw-r--r--drivers/soc/apple/rtkit.c6
-rw-r--r--drivers/soc/bcm/bcm63xx/Kconfig4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c1
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c66
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/qbman/qman.c77
-rw-r--r--drivers/soc/imx/Kconfig8
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/gpcv2.c5
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c97
-rw-r--r--drivers/soc/imx/imx8mp-blk-ctrl.c89
-rw-r--r--drivers/soc/imx/imx93-blk-ctrl.c436
-rw-r--r--drivers/soc/imx/imx93-pd.c164
-rw-r--r--drivers/soc/imx/imx93-src.c33
-rw-r--r--drivers/soc/mediatek/Kconfig2
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h6
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c20
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c44
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c6
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c2
-rw-r--r--drivers/soc/mediatek/mtk-svs.c284
-rw-r--r--drivers/soc/pxa/ssp.c6
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/icc-bwmon.c479
-rw-r--r--drivers/soc/qcom/llcc-qcom.c92
-rw-r--r--drivers/soc/qcom/qcom_stats.c9
-rw-r--r--drivers/soc/qcom/qmi_encdec.c50
-rw-r--r--drivers/soc/qcom/qmi_interface.c12
-rw-r--r--drivers/soc/qcom/rpmpd.c22
-rw-r--r--drivers/soc/qcom/smem_state.c3
-rw-r--r--drivers/soc/qcom/smsm.c20
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/renesas-soc.c14
-rw-r--r--drivers/soc/rockchip/io-domain.c20
-rw-r--r--drivers/soc/rockchip/pm_domains.c130
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c74
-rw-r--r--drivers/soc/tegra/Kconfig10
-rw-r--r--drivers/soc/tegra/Makefile1
-rw-r--r--drivers/soc/tegra/cbb/Makefile9
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c190
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c2364
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c1113
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c1
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c36
-rw-r--r--drivers/soc/tegra/pmc.c45
-rw-r--r--drivers/soundwire/bus.c32
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/soundwire/cadence_master.h2
-rw-r--r--drivers/soundwire/intel.c1
-rw-r--r--drivers/soundwire/qcom.c10
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-amd.c183
-rw-r--r--drivers/spi/spi-aspeed-smc.c4
-rw-r--r--drivers/spi/spi-bitbang-txrx.h6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c41
-rw-r--r--drivers/spi/spi-cadence-xspi.c4
-rw-r--r--drivers/spi/spi-dw-bt1.c4
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-fsl-lpspi.c10
-rw-r--r--drivers/spi/spi-fsl-qspi.c3
-rw-r--r--drivers/spi/spi-fsl-spi.c157
-rw-r--r--drivers/spi/spi-gxp.c10
-rw-r--r--drivers/spi/spi-img-spfi.c6
-rw-r--r--drivers/spi/spi-intel.c164
-rw-r--r--drivers/spi/spi-loopback-test.c27
-rw-r--r--drivers/spi/spi-meson-spicc.c129
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c600
-rw-r--r--drivers/spi/spi-microchip-core.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c35
-rw-r--r--drivers/spi/spi-mt65xx.c5
-rw-r--r--drivers/spi/spi-mt7621.c42
-rw-r--r--drivers/spi/spi-mux.c1
-rw-r--r--drivers/spi/spi-npcm-pspi.c1
-rw-r--r--drivers/spi/spi-nxp-fspi.c8
-rw-r--r--drivers/spi/spi-omap-100k.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c41
-rw-r--r--drivers/spi/spi-qup.c21
-rw-r--r--drivers/spi/spi-s3c24xx.c24
-rw-r--r--drivers/spi/spi-s3c64xx.c13
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c125
-rw-r--r--drivers/spi/spi-xilinx.c20
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c16
-rw-r--r--drivers/spi/spi.c178
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c3
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c4
-rw-r--r--drivers/staging/media/max96712/max96712.c4
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c4
-rw-r--r--drivers/staging/most/i2c/i2c.c4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c4
-rw-r--r--drivers/staging/qlge/qlge_main.c4
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c36
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c13
-rw-r--r--drivers/staging/sm750fb/sm750.c15
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c12
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/tee/optee/ffa_abi.c46
-rw-r--r--drivers/tee/optee/optee_private.h1
-rw-r--r--drivers/tee/tee_shm.c4
-rw-r--r--drivers/thermal/amlogic_thermal.c16
-rw-r--r--drivers/thermal/armada_thermal.c12
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c14
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c14
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c20
-rw-r--r--drivers/thermal/broadcom/ns-thermal.c50
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c16
-rw-r--r--drivers/thermal/cpufreq_cooling.c12
-rw-r--r--drivers/thermal/da9062-thermal.c5
-rw-r--r--drivers/thermal/db8500_thermal.c8
-rw-r--r--drivers/thermal/gov_bang_bang.c10
-rw-r--r--drivers/thermal/gov_fair_share.c3
-rw-r--r--drivers/thermal/gov_power_allocator.c20
-rw-r--r--drivers/thermal/gov_step_wise.c10
-rw-r--r--drivers/thermal/gov_user_space.c5
-rw-r--r--drivers/thermal/hisi_thermal.c14
-rw-r--r--drivers/thermal/imx8mm_thermal.c14
-rw-r--r--drivers/thermal/imx_sc_thermal.c14
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c13
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c13
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c27
-rw-r--r--drivers/thermal/k3_bandgap.c12
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c12
-rw-r--r--drivers/thermal/max77620_thermal.c8
-rw-r--r--drivers/thermal/mtk_thermal.c10
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c23
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c12
-rw-r--r--drivers/thermal/qcom/tsens.c16
-rw-r--r--drivers/thermal/qoriq_thermal.c12
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c16
-rw-r--r--drivers/thermal/rcar_thermal.c13
-rw-r--r--drivers/thermal/rockchip_thermal.c14
-rw-r--r--drivers/thermal/rzg2l_thermal.c10
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c24
-rw-r--r--drivers/thermal/sprd_thermal.c18
-rw-r--r--drivers/thermal/st/stm_thermal.c18
-rw-r--r--drivers/thermal/sun8i_thermal.c14
-rw-r--r--drivers/thermal/tegra/soctherm.c21
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c19
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c12
-rw-r--r--drivers/thermal/thermal-generic-adc.c10
-rw-r--r--drivers/thermal/thermal_core.c81
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thermal/thermal_helpers.c73
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/thermal_mmio.c19
-rw-r--r--drivers/thermal/thermal_netlink.c1
-rw-r--r--drivers/thermal/thermal_of.c1148
-rw-r--r--drivers/thermal/thermal_sysfs.c11
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c16
-rw-r--r--drivers/thermal/uniphier_thermal.c10
-rw-r--r--drivers/thunderbolt/Kconfig3
-rw-r--r--drivers/thunderbolt/acpi.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/icm.c1
-rw-r--r--drivers/thunderbolt/nhi.c49
-rw-r--r--drivers/thunderbolt/nhi.h1
-rw-r--r--drivers/thunderbolt/switch.c7
-rw-r--r--drivers/thunderbolt/tb.c8
-rw-r--r--drivers/thunderbolt/tb.h2
-rw-r--r--drivers/thunderbolt/usb4.c8
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/tty/n_gsm.c85
-rw-r--r--drivers/tty/serial/21285.c3
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c14
-rw-r--r--drivers/tty/serial/max310x.c4
-rw-r--r--drivers/tty/serial/sc16is7xx.c4
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/sifive.c2
-rw-r--r--drivers/tty/serial/tegra-tcu.c2
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/vt/vt.c12
-rw-r--r--drivers/ufs/core/ufshcd.c18
-rw-r--r--drivers/ufs/core/ufshpb.c8
-rw-r--r--drivers/ufs/host/ufs-exynos.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c4
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/core.c37
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c96
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/dwc3/host.c11
-rw-r--r--drivers/usb/gadget/function/f_uac2.c16
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c4
-rw-r--r--drivers/usb/gadget/udc/core.c26
-rw-r--r--drivers/usb/host/xhci-hub.c13
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c15
-rw-r--r--drivers/usb/host/xhci-plat.c11
-rw-r--r--drivers/usb/host/xhci.c19
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c5
-rw-r--r--drivers/usb/misc/usb3503.c4
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c4
-rw-r--r--drivers/usb/phy/phy-isp1301.c4
-rw-r--r--drivers/usb/serial/ch341.c16
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h28
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/anx7411.c4
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/hd3ss3220.c4
-rw-r--r--drivers/usb/typec/mux/fsa4480.c4
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c12
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c3
-rw-r--r--drivers/usb/typec/rt1719.c4
-rw-r--r--drivers/usb/typec/stusb160x.c4
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/tipd/core.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c55
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c4
-rw-r--r--drivers/usb/typec/wusb3801.c4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c17
-rw-r--r--drivers/vdpa/vdpa.c1
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c9
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c12
-rw-r--r--drivers/vhost/net.c15
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/aperture.c69
-rw-r--r--drivers/video/backlight/Kconfig13
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp8860_bl.c4
-rw-r--r--drivers/video/backlight/adp8870_bl.c4
-rw-r--r--drivers/video/backlight/arcxcnn_bl.c4
-rw-r--r--drivers/video/backlight/bd6107.c4
-rw-r--r--drivers/video/backlight/lm3630a_bl.c3
-rw-r--r--drivers/video/backlight/lm3639_bl.c3
-rw-r--r--drivers/video/backlight/lp855x_bl.c4
-rw-r--r--drivers/video/backlight/lv5207lp.c4
-rw-r--r--drivers/video/backlight/mt6370-backlight.c351
-rw-r--r--drivers/video/backlight/tosa_bl.c3
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/asiliantfb.c5
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c57
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c9
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c131
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/carminefb.c5
-rw-r--r--drivers/video/fbdev/chipsfb.c14
-rw-r--r--drivers/video/fbdev/cirrusfb.c7
-rw-r--r--drivers/video/fbdev/clps711x-fb.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c29
-rw-r--r--drivers/video/fbdev/core/fbmem.c219
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/cyber2000fb.c13
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c11
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c5
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c5
-rw-r--r--drivers/video/fbdev/gxt4500.c7
-rw-r--r--drivers/video/fbdev/hyperv_fb.c10
-rw-r--r--drivers/video/fbdev/i740fb.c7
-rw-r--r--drivers/video/fbdev/i810/i810_main.c315
-rw-r--r--drivers/video/fbdev/imsttfb.c36
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c5
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c5
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c11
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c3
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c5
-rw-r--r--drivers/video/fbdev/neofb.c41
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c7
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c10
-rw-r--r--drivers/video/fbdev/pm3fb.c5
-rw-r--r--drivers/video/fbdev/pvr2fb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c67
-rw-r--r--drivers/video/fbdev/s3fb.c7
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c5
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c283
-rw-r--r--drivers/video/fbdev/skeletonfb.c210
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/sm712fb.c5
-rw-r--r--drivers/video/fbdev/ssd1307fb.c6
-rw-r--r--drivers/video/fbdev/sstfb.c45
-rw-r--r--drivers/video/fbdev/sunxvr1000.c2
-rw-r--r--drivers/video/fbdev/sunxvr2500.c7
-rw-r--r--drivers/video/fbdev/sunxvr500.c7
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c9
-rw-r--r--drivers/video/fbdev/tgafb.c19
-rw-r--r--drivers/video/fbdev/tridentfb.c7
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c7
-rw-r--r--drivers/video/fbdev/vga16fb.c191
-rw-r--r--drivers/video/fbdev/via/via-core.c5
-rw-r--r--drivers/video/fbdev/vt8623fb.c5
-rw-r--r--drivers/video/hdmi.c82
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c20
-rw-r--r--drivers/virtio/virtio_pci_common.h3
-rw-r--r--drivers/virtio/virtio_pci_legacy.c6
-rw-r--r--drivers/virtio/virtio_pci_modern.c17
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/virtio/virtio_vdpa.c16
-rw-r--r--drivers/w1/masters/ds2482.c3
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/ziirave_wdt.c4
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c21
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
3866 files changed, 224065 insertions, 77428 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7802d8846a8d..473241b5193f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -27,9 +27,6 @@ menuconfig ACPI
Management (APM) specification. If both ACPI and APM support
are configured, ACPI is used.
- The project home page for the Linux ACPI subsystem is here:
- <https://01.org/linux-acpi>
-
Linux support for ACPI is based on Intel Corporation's ACPI
Component Architecture (ACPI CA). For more information on the
ACPI CA, see:
@@ -212,6 +209,7 @@ config ACPI_VIDEO
tristate "Video"
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on ACPI_WMI || !X86
select THERMAL
help
This driver implements the ACPI Extensions For Display Adapters
@@ -347,7 +345,6 @@ config ACPI_CUSTOM_DSDT_FILE
depends on !STANDALONE
help
This option supports a custom DSDT by linking it into the kernel.
- See Documentation/admin-guide/acpi/dsdt-override.rst
Enter the full path name to the file which includes the AmlCode
or dsdt_aml_code declaration.
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index c29e41bfcf35..bb9fe7984b1a 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -36,11 +36,6 @@ static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
-struct acpi_ac_bl {
- const char *hid;
- int hrv;
-};
-
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
{"", 0},
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index ab8a4e0191b1..f5b443ab01c2 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -21,6 +21,7 @@
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
+ {"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */
{"ARMHC500", 0}, /* ARM CoreSight ETM4x */
{"ARMHC501", 0}, /* ARM CoreSight ETR */
{"ARMHC502", 0}, /* ARM CoreSight STM */
@@ -48,6 +49,7 @@ static void amba_register_dummy_clk(void)
static int amba_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
+ struct acpi_device *parent = acpi_dev_parent(adev);
struct amba_device *dev;
struct resource_entry *rentry;
struct list_head resource_list;
@@ -97,8 +99,8 @@ static int amba_handler_attach(struct acpi_device *adev,
* attached to it, that physical device should be the parent of
* the amba device we are about to create.
*/
- if (adev->parent)
- dev->dev.parent = acpi_get_first_physical_node(adev->parent);
+ if (parent)
+ dev->dev.parent = acpi_get_first_physical_node(parent);
ACPI_COMPANION_SET(&dev->dev, adev);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ad245bbd965e..3bbe2276cac7 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -60,12 +60,6 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
}
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
-static int misc_check_res(struct acpi_resource *ares, void *data)
-{
- struct resource res;
-
- return !acpi_dev_resource_memory(ares, &res);
-}
static int fch_misc_setup(struct apd_private_data *pdata)
{
@@ -82,8 +76,7 @@ static int fch_misc_setup(struct apd_private_data *pdata)
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list, misc_check_res,
- NULL);
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
return -ENOENT;
diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
index 6922a44b3ce7..a2056c4c8cb7 100644
--- a/drivers/acpi/acpi_fpdt.c
+++ b/drivers/acpi/acpi_fpdt.c
@@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = {
static struct kobject *fpdt_kobj;
+#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT
+#include <linux/processor.h>
+static bool fpdt_address_valid(u64 address)
+{
+ /*
+ * On some systems the table contains invalid addresses
+ * with unsuppored high address bits set, check for this.
+ */
+ return !(address >> boot_cpu_data.x86_phys_bits);
+}
+#else
+static bool fpdt_address_valid(u64 address)
+{
+ return true;
+}
+#endif
+
static int fpdt_process_subtable(u64 address, u32 subtable_type)
{
struct fpdt_subtable_header *subtable_header;
@@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
u32 length, offset;
int result;
+ if (!fpdt_address_valid(address)) {
+ pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address);
+ return -EINVAL;
+ }
+
subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
if (!subtable_header)
return -ENOMEM;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index c4d4d21391d7..f08ffa75f4a7 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -167,10 +167,10 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
- struct acpi_device *adev = pdata->adev;
+ u64 uid;
/* Only call pwm_add_table for the first PWM controller */
- if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return;
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
@@ -180,14 +180,13 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
static void byt_i2c_setup(struct lpss_private_data *pdata)
{
- const char *uid_str = acpi_device_uid(pdata->adev);
acpi_handle handle = pdata->adev->handle;
unsigned long long shared_host = 0;
acpi_status status;
- long uid = 0;
+ u64 uid;
- /* Expected to always be true, but better safe then sorry */
- if (uid_str && !kstrtol(uid_str, 10, &uid) && uid) {
+ /* Expected to always be successfull, but better safe then sorry */
+ if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
if (ACPI_SUCCESS(status) && shared_host)
@@ -211,10 +210,10 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
- struct acpi_device *adev = pdata->adev;
+ u64 uid;
/* Only call pwm_add_table for the first PWM controller */
- if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return;
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
@@ -392,13 +391,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
#ifdef CONFIG_X86_INTEL_LPSS
-static int is_memory(struct acpi_resource *res, void *not_used)
-{
- struct resource r;
-
- return !acpi_dev_resource_memory(res, &r);
-}
-
/* LPSS main clock device. */
static struct platform_device *lpss_clk_dev;
@@ -659,29 +651,25 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
goto err_out;
- list_for_each_entry(rentry, &resource_list, node)
- if (resource_type(rentry->res) == IORESOURCE_MEM) {
- if (dev_desc->prv_size_override)
- pdata->mmio_size = dev_desc->prv_size_override;
- else
- pdata->mmio_size = resource_size(rentry->res);
- pdata->mmio_base = ioremap(rentry->res->start,
- pdata->mmio_size);
- break;
- }
+ rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
+ if (rentry) {
+ if (dev_desc->prv_size_override)
+ pdata->mmio_size = dev_desc->prv_size_override;
+ else
+ pdata->mmio_size = resource_size(rentry->res);
+ pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
+ }
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
/* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
adev->pnp.type.platform_id = 0;
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
+ goto out_free;
}
pdata->adev = adev;
@@ -692,11 +680,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
if (dev_desc->flags & LPSS_CLK) {
ret = register_device_clock(adev, pdata);
- if (ret) {
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
- }
+ if (ret)
+ goto out_free;
}
/*
@@ -708,15 +693,19 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
- if (!IS_ERR_OR_NULL(pdev)) {
- acpi_lpss_create_device_links(adev, pdev);
- return 1;
+ if (IS_ERR_OR_NULL(pdev)) {
+ adev->driver_data = NULL;
+ ret = PTR_ERR(pdev);
+ goto err_out;
}
- ret = PTR_ERR(pdev);
- adev->driver_data = NULL;
+ acpi_lpss_create_device_links(adev, pdev);
+ return 1;
- err_out:
+out_free:
+ /* Skip the device, but continue the namespace scan */
+ ret = 0;
+err_out:
kfree(pdata);
return ret;
}
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index a12b55d81209..ee4ce5ba1fb2 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -23,6 +23,12 @@
#include <acpi/pcc.h>
+/*
+ * Arbitrary retries in case the remote processor is slow to respond
+ * to PCC commands
+ */
+#define PCC_CMD_WAIT_RETRIES_NUM 500
+
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
void __iomem *pcc_comm_addr;
@@ -63,6 +69,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
if (IS_ERR(data->pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
ctx->subspace_id);
+ kfree(data);
return AE_NOT_FOUND;
}
@@ -72,6 +79,8 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
if (!data->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
ctx->subspace_id);
+ pcc_mbox_free_channel(data->pcc_chan);
+ kfree(data);
return AE_NO_MEMORY;
}
@@ -86,6 +95,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
{
int ret;
struct pcc_data *data = region_context;
+ u64 usecs_lat;
reinit_completion(&data->done);
@@ -96,10 +106,22 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
if (ret < 0)
return AE_ERROR;
- if (data->pcc_chan->mchan->mbox->txdone_irq)
- wait_for_completion(&data->done);
+ if (data->pcc_chan->mchan->mbox->txdone_irq) {
+ /*
+ * pcc_chan->latency is just a Nominal value. In reality the remote
+ * processor could be much slower to reply. So add an arbitrary
+ * amount of wait on top of Nominal.
+ */
+ usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency;
+ ret = wait_for_completion_timeout(&data->done,
+ usecs_to_jiffies(usecs_lat));
+ if (ret == 0) {
+ pr_err("PCC command executed timeout!\n");
+ return AE_TIME;
+ }
+ }
- mbox_client_txdone(data->pcc_chan->mchan, ret);
+ mbox_chan_txdone(data->pcc_chan->mchan, ret);
memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index de3cbf152dee..fe00a5783f53 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -20,13 +20,13 @@
#include "internal.h"
static const struct acpi_device_id forbidden_id_list[] = {
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
{"PNP0000", 0}, /* PIC */
{"PNP0100", 0}, /* Timer */
{"PNP0200", 0}, /* AT DMA Controller */
- {"ACPI0009", 0}, /* IOxAPIC */
- {"ACPI000A", 0}, /* IOAPIC */
{"SMB0001", 0}, /* ACPI SMBUS virtual device */
- {"", 0},
+ { }
};
static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
@@ -78,7 +78,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
* If the device has parent we need to take its resources into
* account as well because this device might consume part of those.
*/
- parent = acpi_get_first_physical_node(adev->parent);
+ parent = acpi_get_first_physical_node(acpi_dev_parent(adev));
if (parent && dev_is_pci(parent))
dest->parent = pci_find_resource(to_pci_dev(parent), dest);
}
@@ -97,6 +97,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
const struct property_entry *properties)
{
+ struct acpi_device *parent = acpi_dev_parent(adev);
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
struct resource_entry *rentry;
@@ -113,13 +114,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
- if (count < 0) {
+ if (count < 0)
return NULL;
- } else if (count > 0) {
- resources = kcalloc(count, sizeof(struct resource),
- GFP_KERNEL);
+ if (count > 0) {
+ resources = kcalloc(count, sizeof(*resources), GFP_KERNEL);
if (!resources) {
- dev_err(&adev->dev, "No memory for resources\n");
acpi_dev_free_resource_list(&resource_list);
return ERR_PTR(-ENOMEM);
}
@@ -137,10 +136,9 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
* attached to it, that physical device should be the parent of the
* platform device we are about to create.
*/
- pdevinfo.parent = adev->parent ?
- acpi_get_first_physical_node(adev->parent) : NULL;
+ pdevinfo.parent = parent ? acpi_get_first_physical_node(parent) : NULL;
pdevinfo.name = dev_name(&adev->dev);
- pdevinfo.id = -1;
+ pdevinfo.id = PLATFORM_DEVID_NONE;
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 5cbe2196176d..32953646caeb 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -47,9 +47,6 @@ module_param(brightness_switch_enabled, bool, 0644);
static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
-static int disable_backlight_sysfs_if = -1;
-module_param(disable_backlight_sysfs_if, int, 0444);
-
#define REPORT_OUTPUT_KEY_EVENTS 0x01
#define REPORT_BRIGHTNESS_KEY_EVENTS 0x02
static int report_key_events = -1;
@@ -73,6 +70,16 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
+/*
+ * Display probing is known to take up to 5 seconds, so delay the fallback
+ * backlight registration by 5 seconds + 3 seconds for some extra margin.
+ */
+static int register_backlight_delay = 8;
+module_param(register_backlight_delay, int, 0444);
+MODULE_PARM_DESC(register_backlight_delay,
+ "Delay in seconds before doing fallback (non GPU driver triggered) "
+ "backlight registration, set to 0 to disable.");
+
static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -81,7 +88,9 @@ static LIST_HEAD(video_bus_head);
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
-void acpi_video_detect_exit(void);
+static void acpi_video_bus_register_backlight_work(struct work_struct *ignored);
+static DECLARE_DELAYED_WORK(video_bus_register_backlight_work,
+ acpi_video_bus_register_backlight_work);
/*
* Indices in the _BCL method response: the first two items are special,
@@ -382,14 +391,6 @@ static int video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
-static int video_disable_backlight_sysfs_if(
- const struct dmi_system_id *d)
-{
- if (disable_backlight_sysfs_if == -1)
- disable_backlight_sysfs_if = 1;
- return 0;
-}
-
static int video_set_device_id_scheme(const struct dmi_system_id *d)
{
device_id_scheme = true;
@@ -463,40 +464,6 @@ static const struct dmi_system_id video_dmi_table[] = {
},
/*
- * Some machines have a broken acpi-video interface for brightness
- * control, but still need an acpi_video_device_lcd_set_level() call
- * on resume to turn the backlight power on. We Enable backlight
- * control on these systems, but do not register a backlight sysfs
- * as brightness control does not work.
- */
- {
- /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
- .callback = video_disable_backlight_sysfs_if,
- .ident = "Toshiba Portege R700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
- },
- },
- {
- /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
- .callback = video_disable_backlight_sysfs_if,
- .ident = "Toshiba Portege R830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
- },
- },
- {
- /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
- .callback = video_disable_backlight_sysfs_if,
- .ident = "Toshiba Satellite R830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
- },
- },
- /*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.
*/
@@ -1758,9 +1725,6 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
if (result)
return;
- if (disable_backlight_sysfs_if > 0)
- return;
-
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
@@ -1859,8 +1823,6 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
if (video->backlight_registered)
return 0;
- acpi_video_run_bcl_for_osi(video);
-
if (acpi_video_get_backlight_type() != acpi_backlight_video)
return 0;
@@ -2030,7 +1992,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
- device->parent->handle, 1,
+ acpi_dev_parent(device)->handle, 1,
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
@@ -2086,7 +2048,11 @@ static int acpi_video_bus_add(struct acpi_device *device)
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
- acpi_video_bus_register_backlight(video);
+ /*
+ * The userspace visible backlight_device gets registered separately
+ * from acpi_video_register_backlight().
+ */
+ acpi_video_run_bcl_for_osi(video);
acpi_video_bus_add_notify_handler(video);
return 0;
@@ -2111,20 +2077,25 @@ static int acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device);
- acpi_video_bus_remove_notify_handler(video);
- acpi_video_bus_unregister_backlight(video);
- acpi_video_bus_put_devices(video);
-
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
+ acpi_video_bus_remove_notify_handler(video);
+ acpi_video_bus_unregister_backlight(video);
+ acpi_video_bus_put_devices(video);
+
kfree(video->attached_array);
kfree(video);
return 0;
}
+static void acpi_video_bus_register_backlight_work(struct work_struct *ignored)
+{
+ acpi_video_register_backlight();
+}
+
static int __init is_i740(struct pci_dev *dev)
{
if (dev->device == 0x00D1)
@@ -2235,6 +2206,18 @@ int acpi_video_register(void)
*/
register_count = 1;
+ /*
+ * acpi_video_bus_add() skips registering the userspace visible
+ * backlight_device. The intend is for this to be registered by the
+ * drm/kms driver calling acpi_video_register_backlight() *after* it is
+ * done setting up its own native backlight device. The delayed work
+ * ensures that acpi_video_register_backlight() always gets called
+ * eventually, in case there is no drm/kms driver or it is disabled.
+ */
+ if (register_backlight_delay)
+ schedule_delayed_work(&video_bus_register_backlight_work,
+ register_backlight_delay * HZ);
+
leave:
mutex_unlock(&register_count_mutex);
return ret;
@@ -2245,6 +2228,7 @@ void acpi_video_unregister(void)
{
mutex_lock(&register_count_mutex);
if (register_count) {
+ cancel_delayed_work_sync(&video_bus_register_backlight_work);
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
may_report_brightness_keys = false;
@@ -2253,19 +2237,16 @@ void acpi_video_unregister(void)
}
EXPORT_SYMBOL(acpi_video_unregister);
-void acpi_video_unregister_backlight(void)
+void acpi_video_register_backlight(void)
{
struct acpi_video_bus *video;
- mutex_lock(&register_count_mutex);
- if (register_count) {
- mutex_lock(&video_list_lock);
- list_for_each_entry(video, &video_bus_head, entry)
- acpi_video_bus_unregister_backlight(video);
- mutex_unlock(&video_list_lock);
- }
- mutex_unlock(&register_count_mutex);
+ mutex_lock(&video_list_lock);
+ list_for_each_entry(video, &video_bus_head, entry)
+ acpi_video_bus_register_backlight(video);
+ mutex_unlock(&video_list_lock);
}
+EXPORT_SYMBOL(acpi_video_register_backlight);
bool acpi_video_handles_brightness_key_presses(void)
{
@@ -2302,7 +2283,6 @@ static int __init acpi_video_init(void)
static void __exit acpi_video_exit(void)
{
- acpi_video_detect_exit();
acpi_video_unregister();
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 9f49272cad39..9b52482b4ed5 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -125,12 +125,9 @@ EXPORT_SYMBOL_GPL(apei_exec_write_register);
int apei_exec_write_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
- int rc;
-
ctx->value = entry->value;
- rc = apei_exec_write_register(ctx, entry);
- return rc;
+ return apei_exec_write_register(ctx, entry);
}
EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 45973aa6e06d..c23eb75866d0 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -90,6 +90,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
if (skipped)
pr_info(HW_ERR "Skipped %d error records\n", skipped);
+
+ if (printed + skipped)
+ pr_info("Total records found: %d\n", printed + skipped);
}
static int __init setup_bert_disable(char *str)
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 31b077eedb58..247989060e29 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1020,14 +1020,10 @@ static int reader_pos;
static int erst_open_pstore(struct pstore_info *psi)
{
- int rc;
-
if (erst_disable)
return -ENODEV;
- rc = erst_get_record_id_begin(&reader_pos);
-
- return rc;
+ return erst_get_record_id_begin(&reader_pos);
}
static int erst_close_pstore(struct pstore_info *psi)
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index f16739ad3cc0..93d796531af3 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -4,11 +4,12 @@
#include <linux/device.h>
#include <linux/dma-direct.h>
-void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
+void acpi_arch_dma_setup(struct device *dev)
{
int ret;
u64 end, mask;
- u64 dmaaddr = 0, size = 0, offset = 0;
+ u64 size = 0;
+ const struct bus_dma_region *map = NULL;
/*
* If @dev is expected to be DMA-capable then the bus code that created
@@ -26,7 +27,19 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
else
size = 1ULL << 32;
- ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ ret = acpi_dma_get_range(dev, &map);
+ if (!ret && map) {
+ const struct bus_dma_region *r = map;
+
+ for (end = 0; r->size; r++) {
+ if (r->dma_start + r->size - 1 > end)
+ end = r->dma_start + r->size - 1;
+ }
+
+ size = end + 1;
+ dev->dma_range_map = map;
+ }
+
if (ret == -ENODEV)
ret = iort_dma_get_ranges(dev, &size);
if (!ret) {
@@ -34,17 +47,10 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
* Limit coherent and dma mask based on size retrieved from
* firmware.
*/
- end = dmaaddr + size - 1;
+ end = size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
*dev->dma_mask = min(*dev->dma_mask, mask);
}
-
- *dma_addr = dmaaddr;
- *dma_size = size;
-
- ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
-
- dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c0d20d997891..d466c8195314 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -456,7 +456,7 @@ out_free:
Notification Handling
-------------------------------------------------------------------------- */
-/**
+/*
* acpi_bus_notify
* ---------------
* Callback for all 'system-level' device notifications (values 0x00-0x7F).
@@ -511,7 +511,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
}
- adev = acpi_bus_get_acpi_device(handle);
+ adev = acpi_get_acpi_dev(handle);
if (!adev)
goto err;
@@ -524,14 +524,14 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
}
if (!hotplug_event) {
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
return;
}
if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
err:
acpi_evaluate_ost(handle, type, ost_code, NULL);
@@ -802,7 +802,7 @@ static bool acpi_of_modalias(struct acpi_device *adev,
str = obj->string.pointer;
chr = strchr(str, ',');
- strlcpy(modalias, chr ? chr + 1 : str, len);
+ strscpy(modalias, chr ? chr + 1 : str, len);
return true;
}
@@ -822,7 +822,7 @@ void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len)
{
if (!acpi_of_modalias(adev, modalias, len))
- strlcpy(modalias, default_id, len);
+ strscpy(modalias, default_id, len);
}
EXPORT_SYMBOL_GPL(acpi_set_modalias);
@@ -925,12 +925,13 @@ static const void *acpi_of_device_get_match_data(const struct device *dev)
const void *acpi_device_get_match_data(const struct device *dev)
{
+ const struct acpi_device_id *acpi_ids = dev->driver->acpi_match_table;
const struct acpi_device_id *match;
- if (!dev->driver->acpi_match_table)
+ if (!acpi_ids)
return acpi_of_device_get_match_data(dev);
- match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ match = acpi_match_device(acpi_ids, dev);
if (!match)
return NULL;
@@ -948,14 +949,13 @@ EXPORT_SYMBOL(acpi_match_device_ids);
bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
- if (!drv->acpi_match_table)
- return acpi_of_match_device(ACPI_COMPANION(dev),
- drv->of_match_table,
- NULL);
-
- return __acpi_match_device(acpi_companion_match(dev),
- drv->acpi_match_table, drv->of_match_table,
- NULL, NULL);
+ const struct acpi_device_id *acpi_ids = drv->acpi_match_table;
+ const struct of_device_id *of_ids = drv->of_match_table;
+
+ if (!acpi_ids)
+ return acpi_of_match_device(ACPI_COMPANION(dev), of_ids, NULL);
+
+ return __acpi_match_device(acpi_companion_match(dev), acpi_ids, of_ids, NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
@@ -973,16 +973,13 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device);
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
- int ret;
-
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner;
- ret = driver_register(&driver->drv);
- return ret;
+ return driver_register(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_register_driver);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1e15a9f25ae9..093675b1a1ff 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -424,6 +424,9 @@ bool acpi_cpc_valid(void)
struct cpc_desc *cpc_ptr;
int cpu;
+ if (acpi_disabled)
+ return false;
+
for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
@@ -1241,6 +1244,48 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
+ * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
+ *
+ * CPPC has flexibility about how CPU performance counters are accessed.
+ * One of the choices is PCC regions, which can have a high access latency. This
+ * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
+ *
+ * Return: true if any of the counters are in PCC regions, false otherwise
+ */
+bool cppc_perf_ctrs_in_pcc(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ struct cpc_register_resource *ref_perf_reg;
+ struct cpc_desc *cpc_desc;
+
+ cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+
+ if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
+ CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
+ CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
+ return true;
+
+
+ ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
+
+ /*
+ * If reference perf register is not supported then we should
+ * use the nominal perf value
+ */
+ if (!CPC_SUPPORTED(ref_perf_reg))
+ ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
+
+ if (CPC_IN_PCC(ref_perf_reg))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
+
+/**
* cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 9dce1245689c..d594effe905f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -75,15 +75,17 @@ static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state)
int acpi_device_get_power(struct acpi_device *device, int *state)
{
int result = ACPI_STATE_UNKNOWN;
+ struct acpi_device *parent;
int error;
if (!device || !state)
return -EINVAL;
+ parent = acpi_dev_parent(device);
+
if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
- *state = device->parent ?
- device->parent->power.state : ACPI_STATE_D0;
+ *state = parent ? parent->power.state : ACPI_STATE_D0;
goto out;
}
@@ -122,10 +124,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
* point, the fact that the device is in D0 implies that the parent has
* to be in D0 too, except if ignore_parent is set.
*/
- if (!device->power.flags.ignore_parent && device->parent
- && device->parent->power.state == ACPI_STATE_UNKNOWN
- && result == ACPI_STATE_D0)
- device->parent->power.state = ACPI_STATE_D0;
+ if (!device->power.flags.ignore_parent && parent &&
+ parent->power.state == ACPI_STATE_UNKNOWN &&
+ result == ACPI_STATE_D0)
+ parent->power.state = ACPI_STATE_D0;
*state = result;
@@ -191,13 +193,17 @@ int acpi_device_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
- if (!device->power.flags.ignore_parent && device->parent &&
- state < device->parent->power.state) {
- acpi_handle_debug(device->handle,
- "Cannot transition to %s for parent in %s\n",
- acpi_power_state_string(state),
- acpi_power_state_string(device->parent->power.state));
- return -ENODEV;
+ if (!device->power.flags.ignore_parent) {
+ struct acpi_device *parent;
+
+ parent = acpi_dev_parent(device);
+ if (parent && state < parent->power.state) {
+ acpi_handle_debug(device->handle,
+ "Cannot transition to %s for parent in %s\n",
+ acpi_power_state_string(state),
+ acpi_power_state_string(parent->power.state));
+ return -ENODEV;
+ }
}
/*
@@ -497,7 +503,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
acpi_handle_debug(handle, "Wake notify\n");
- adev = acpi_bus_get_acpi_device(handle);
+ adev = acpi_get_acpi_dev(handle);
if (!adev)
return;
@@ -515,7 +521,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
mutex_unlock(&acpi_pm_notifier_lock);
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
}
/**
@@ -1460,7 +1466,7 @@ EXPORT_SYMBOL_GPL(acpi_storage_d3);
* not valid to ask for the ACPI power state of the device in that time frame.
*
* This function is intended to be used in a driver's probe or remove
- * function. See Documentation/firmware-guide/acpi/low-power-probe.rst for
+ * function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for
* more information.
*/
bool acpi_dev_state_d0(struct device *dev)
diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig
index 1e8c7ce89bf1..4b3fdc03e4ed 100644
--- a/drivers/acpi/dptf/Kconfig
+++ b/drivers/acpi/dptf/Kconfig
@@ -11,9 +11,6 @@ menuconfig ACPI_DPTF
a coordinated approach for different policies to effect the hardware
state of a system.
- For more information see:
- <https://01.org/intel%C2%AE-dynamic-platform-and-thermal-framework-dptf-chromium-os/overview>
-
if ACPI_DPTF
config DPTF_POWER
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index c95e535035a0..9b42628cf21b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -917,14 +917,10 @@ EXPORT_SYMBOL(ec_read);
int ec_write(u8 addr, u8 val)
{
- int err;
-
if (!first_ec)
return -ENODEV;
- err = acpi_ec_write(first_ec, addr, val);
-
- return err;
+ return acpi_ec_write(first_ec, addr, val);
}
EXPORT_SYMBOL(ec_write);
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index b9a9a59ddcc1..52a0b303b70a 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -19,43 +19,12 @@
#include "fan.h"
-MODULE_AUTHOR("Paul Diefenbaugh");
-MODULE_DESCRIPTION("ACPI Fan Driver");
-MODULE_LICENSE("GPL");
-
-static int acpi_fan_probe(struct platform_device *pdev);
-static int acpi_fan_remove(struct platform_device *pdev);
-
static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS,
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
-#ifdef CONFIG_PM_SLEEP
-static int acpi_fan_suspend(struct device *dev);
-static int acpi_fan_resume(struct device *dev);
-static const struct dev_pm_ops acpi_fan_pm = {
- .resume = acpi_fan_resume,
- .freeze = acpi_fan_suspend,
- .thaw = acpi_fan_resume,
- .restore = acpi_fan_resume,
-};
-#define FAN_PM_OPS_PTR (&acpi_fan_pm)
-#else
-#define FAN_PM_OPS_PTR NULL
-#endif
-
-static struct platform_driver acpi_fan_driver = {
- .probe = acpi_fan_probe,
- .remove = acpi_fan_remove,
- .driver = {
- .name = "acpi-fan",
- .acpi_match_table = fan_device_ids,
- .pm = FAN_PM_OPS_PTR,
- },
-};
-
/* thermal cooling device callbacks */
static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
*state)
@@ -459,6 +428,33 @@ static int acpi_fan_resume(struct device *dev)
return result;
}
+
+static const struct dev_pm_ops acpi_fan_pm = {
+ .resume = acpi_fan_resume,
+ .freeze = acpi_fan_suspend,
+ .thaw = acpi_fan_resume,
+ .restore = acpi_fan_resume,
+};
+#define FAN_PM_OPS_PTR (&acpi_fan_pm)
+
+#else
+
+#define FAN_PM_OPS_PTR NULL
+
#endif
+static struct platform_driver acpi_fan_driver = {
+ .probe = acpi_fan_probe,
+ .remove = acpi_fan_remove,
+ .driver = {
+ .name = "acpi-fan",
+ .acpi_match_table = fan_device_ids,
+ .pm = FAN_PM_OPS_PTR,
+ },
+};
+
module_platform_driver(acpi_fan_driver);
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Fan Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 628bf8f18130..219c02df9a08 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -102,10 +102,10 @@ struct acpi_device_bus_id {
struct list_head node;
};
-int acpi_device_add(struct acpi_device *device,
- void (*release)(struct device *));
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
- int type);
+ int type, void (*release)(struct device *));
+int acpi_tie_acpi_dev(struct acpi_device *adev);
+int acpi_device_add(struct acpi_device *device);
int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device);
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index dabe45eba055..4db5bb587599 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -118,12 +118,12 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
if (WARN_ON(ACPI_FAILURE(status)))
return NULL;
- device = acpi_bus_get_acpi_device(handle);
+ device = acpi_get_acpi_dev(handle);
if (WARN_ON(!device))
return NULL;
result = &device->fwnode;
- acpi_bus_put_acpi_device(device);
+ acpi_put_acpi_dev(device);
return result;
}
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index c3d783aca196..23f49a2f4d14 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -9,7 +9,6 @@
*/
#define pr_fmt(fmt) "acpi/hmat: " fmt
-#define dev_fmt(fmt) "acpi/hmat: " fmt
#include <linux/acpi.h>
#include <linux/bitops.h>
@@ -302,7 +301,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
u8 type, mem_hier;
if (hmat_loc->header.length < sizeof(*hmat_loc)) {
- pr_notice("HMAT: Unexpected locality header length: %u\n",
+ pr_notice("Unexpected locality header length: %u\n",
hmat_loc->header.length);
return -EINVAL;
}
@@ -314,12 +313,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
sizeof(*inits) * ipds + sizeof(*targs) * tpds;
if (hmat_loc->header.length < total_size) {
- pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
+ pr_notice("Unexpected locality header length:%u, minimum required:%u\n",
hmat_loc->header.length, total_size);
return -EINVAL;
}
- pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
hmat_loc->flags, hmat_data_type(type), ipds, tpds,
hmat_loc->entry_base_unit);
@@ -363,13 +362,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
- pr_notice("HMAT: Unexpected cache header length: %u\n",
+ pr_notice("Unexpected cache header length: %u\n",
cache->header.length);
return -EINVAL;
}
attrs = cache->cache_attributes;
- pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
@@ -424,24 +423,24 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
struct memory_target *target = NULL;
if (p->header.length != sizeof(*p)) {
- pr_notice("HMAT: Unexpected address range header length: %u\n",
+ pr_notice("Unexpected address range header length: %u\n",
p->header.length);
return -EINVAL;
}
if (hmat_revision == 1)
- pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->reserved3, p->reserved4, p->flags, p->processor_PD,
p->memory_PD);
else
- pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
hmat_revision > 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
- pr_debug("HMAT: Memory Domain missing from SRAT\n");
+ pr_debug("Memory Domain missing from SRAT\n");
return -EINVAL;
}
}
@@ -449,7 +448,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
int p_node = pxm_to_node(p->processor_PD);
if (p_node == NUMA_NO_NODE) {
- pr_debug("HMAT: Invalid Processor Domain\n");
+ pr_debug("Invalid Processor Domain\n");
return -EINVAL;
}
target->processor_pxm = p->processor_PD;
@@ -840,7 +839,7 @@ static __init int hmat_init(void)
case 2:
break;
default:
- pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
+ pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision);
goto out_put;
}
@@ -848,7 +847,7 @@ static __init int hmat_init(void)
if (acpi_table_parse_entries(ACPI_SIG_HMAT,
sizeof(struct acpi_table_hmat), i,
hmat_parse_subtable, 0) < 0) {
- pr_notice("Ignoring HMAT: Invalid table");
+ pr_notice("Ignoring: Invalid table");
goto out_put;
}
}
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 9f6853809138..d4405e1ca9b9 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -44,30 +44,6 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Processor Device", true},
{"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
- /*
- * Linux-Dell-Video is used by BIOS to disable RTD3 for NVidia graphics
- * cards as RTD3 is not supported by drivers now. Systems with NVidia
- * cards will hang without RTD3 disabled.
- *
- * Once NVidia drivers officially support RTD3, this _OSI strings can
- * be removed if both new and old graphics cards are supported.
- */
- {"Linux-Dell-Video", true},
- /*
- * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI
- * audio device which is turned off for power-saving in Windows OS.
- * This power management feature observed on some Lenovo Thinkpad
- * systems which will not be able to output audio via HDMI without
- * a BIOS workaround.
- */
- {"Linux-Lenovo-NV-HDMI-Audio", true},
- /*
- * Linux-HPI-Hybrid-Graphics is used by BIOS to enable dGPU to
- * output video directly to external monitors on HP Inc. mobile
- * workstations as Nvidia and AMD VGA drivers provide limited
- * hybrid graphics supports.
- */
- {"Linux-HPI-Hybrid-Graphics", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d57cf8454b93..c8385ef54c37 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -312,76 +312,25 @@ struct acpi_handle_node {
*/
struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
{
- int dev, fn;
- unsigned long long adr;
- acpi_status status;
- acpi_handle phandle;
- struct pci_bus *pbus;
- struct pci_dev *pdev = NULL;
- struct acpi_handle_node *node, *tmp;
- struct acpi_pci_root *root;
- LIST_HEAD(device_list);
-
- /*
- * Walk up the ACPI CA namespace until we reach a PCI root bridge.
- */
- phandle = handle;
- while (!acpi_is_root_bridge(phandle)) {
- node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
- if (!node)
- goto out;
-
- INIT_LIST_HEAD(&node->node);
- node->handle = phandle;
- list_add(&node->node, &device_list);
-
- status = acpi_get_parent(phandle, &phandle);
- if (ACPI_FAILURE(status))
- goto out;
- }
-
- root = acpi_pci_find_root(phandle);
- if (!root)
- goto out;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
+ struct acpi_device_physical_node *pn;
+ struct pci_dev *pci_dev = NULL;
- pbus = root->bus;
-
- /*
- * Now, walk back down the PCI device tree until we return to our
- * original handle. Assumes that everything between the PCI root
- * bridge and the device we're looking for must be a P2P bridge.
- */
- list_for_each_entry(node, &device_list, node) {
- acpi_handle hnd = node->handle;
- status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
- if (ACPI_FAILURE(status))
- goto out;
- dev = (adr >> 16) & 0xffff;
- fn = adr & 0xffff;
-
- pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
- if (!pdev || hnd == handle)
- break;
+ if (!adev)
+ return NULL;
- pbus = pdev->subordinate;
- pci_dev_put(pdev);
+ mutex_lock(&adev->physical_node_lock);
- /*
- * This function may be called for a non-PCI device that has a
- * PCI parent (eg. a disk under a PCI SATA controller). In that
- * case pdev->subordinate will be NULL for the parent.
- */
- if (!pbus) {
- dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
- pdev = NULL;
+ list_for_each_entry(pn, &adev->physical_node_list, node) {
+ if (dev_is_pci(pn->dev)) {
+ pci_dev = to_pci_dev(pn->dev);
break;
}
}
-out:
- list_for_each_entry_safe(node, tmp, &device_list, node)
- kfree(node);
- return pdev;
+ mutex_unlock(&adev->physical_node_lock);
+
+ return pci_dev;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 8c4a73a1351e..f2588aba8421 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -944,13 +944,15 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
return NULL;
device = &resource->device;
- acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER);
+ acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
+ acpi_release_power_resource);
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN;
+ device->flags.match_driver = true;
/* Evaluate the object to get the system level and resource order. */
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
@@ -967,8 +969,11 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
- device->flags.match_driver = true;
- result = acpi_device_add(device, acpi_release_power_resource);
+ result = acpi_tie_acpi_dev(device);
+ if (result)
+ goto err;
+
+ result = acpi_device_add(device);
if (result)
goto err;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 16a1663d02d4..acfabfe07c4f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -531,10 +531,27 @@ static void wait_for_freeze(void)
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
+ /*
+ * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
+ * not this code. Assume that any Intel systems using this
+ * are ancient and may need the dummy wait. This also assumes
+ * that the motivating chipset issue was Intel-only.
+ */
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
#endif
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
+ /*
+ * Dummy wait op - must do something useless after P_LVL2 read
+ * because chipsets cannot guarantee that STPCLK# signal gets
+ * asserted in time to freeze execution properly
+ *
+ * This workaround has been in place since the original ACPI
+ * implementation was merged, circa 2002.
+ *
+ * If a profile is pointing to this instruction, please first
+ * consider moving your system to a more modern idle
+ * mechanism.
+ */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
@@ -787,7 +804,7 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
- strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->enter = acpi_idle_enter;
@@ -956,7 +973,7 @@ static int acpi_processor_evaluate_lpi(acpi_handle handle,
obj = pkg_elem + 9;
if (obj->type == ACPI_TYPE_STRING)
- strlcpy(lpi_state->desc, obj->string.pointer,
+ strscpy(lpi_state->desc, obj->string.pointer,
ACPI_CX_DESC_LEN);
lpi_state->index = state_idx;
@@ -1022,7 +1039,7 @@ static bool combine_lpi_states(struct acpi_lpi_state *local,
result->arch_flags = parent->arch_flags;
result->index = parent->index;
- strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
+ strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
return true;
@@ -1196,7 +1213,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
state = &drv->states[i];
snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
- strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
+ strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
state->exit_latency = lpi->wake_latency;
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index db6ac540e924..e534fd49a67e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 7b3ad8ed2f4e..b8d9eb9a433e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -304,8 +304,10 @@ static void acpi_init_of_compatible(struct acpi_device *adev)
ret = acpi_dev_get_property(adev, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
- if (adev->parent
- && adev->parent->flags.of_compatible_ok)
+ struct acpi_device *parent;
+
+ parent = acpi_dev_parent(adev);
+ if (parent && parent->flags.of_compatible_ok)
goto out;
return;
@@ -370,7 +372,7 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
bool ret;
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
return false;
}
@@ -1043,11 +1045,10 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
break; \
} \
if (__items[i].integer.value > _Generic(__val, \
- u8: U8_MAX, \
- u16: U16_MAX, \
- u32: U32_MAX, \
- u64: U64_MAX, \
- default: 0U)) { \
+ u8 *: U8_MAX, \
+ u16 *: U16_MAX, \
+ u32 *: U32_MAX, \
+ u64 *: U64_MAX)) { \
ret = -EOVERFLOW; \
break; \
} \
@@ -1268,10 +1269,11 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode)
return to_acpi_data_node(fwnode)->parent;
}
if (is_acpi_device_node(fwnode)) {
- struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
+ struct acpi_device *parent;
- if (dev)
- return acpi_fwnode_handle(to_acpi_device(dev));
+ parent = acpi_dev_parent(to_acpi_device_node(fwnode));
+ if (parent)
+ return acpi_fwnode_handle(parent);
}
return NULL;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 510cdec375c4..514d89656dde 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -399,6 +399,31 @@ static const struct dmi_system_id medion_laptop[] = {
{ }
};
+static const struct dmi_system_id asus_laptop[] = {
+ {
+ .ident = "Asus Vivobook K3402ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook K3502ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook S5402ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
+ },
+ },
+ { }
+};
+
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
@@ -409,6 +434,7 @@ struct irq_override_cmp {
static const struct irq_override_cmp skip_override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+ { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
@@ -690,6 +716,9 @@ static int is_memory(struct acpi_resource *ares, void *not_used)
memset(&win, 0, sizeof(win));
+ if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM))
+ return 1;
+
return !(acpi_dev_resource_memory(ares, res)
|| acpi_dev_resource_address_space(ares, &win)
|| acpi_dev_resource_ext_address_space(ares, &win));
@@ -719,6 +748,23 @@ int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
+ * acpi_dev_get_memory_resources - Get current memory resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ *
+ * This is a helper function that locates all memory type resources of @adev
+ * with acpi_dev_get_resources().
+ *
+ * The number of resources in the output list is returned on success, an error
+ * code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list)
+{
+ return acpi_dev_get_resources(adev, list, is_memory, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources);
+
+/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
* types
* @ares: Input ACPI resource object.
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 4938010fcac7..e6a01a8df1b8 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -632,7 +632,7 @@ static int acpi_sbs_add(struct acpi_device *device)
mutex_init(&sbs->lock);
- sbs->hc = acpi_driver_data(device->parent);
+ sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device;
strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 7c62e149a7a1..340e0b61587e 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -266,7 +266,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
mutex_init(&hc->lock);
init_waitqueue_head(&hc->wait);
- hc->ec = acpi_driver_data(device->parent);
+ hc->ec = acpi_driver_data(acpi_dev_parent(device));
hc->offset = (val >> 8) & 0xff;
hc->query_bit = val & 0xff;
device->driver_data = hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 42cec8120f18..558664d169fc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -20,6 +20,7 @@
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
#include <linux/crc32.h>
+#include <linux/dma-direct.h>
#include "internal.h"
@@ -29,8 +30,6 @@ extern struct acpi_device *acpi_root;
#define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus"
-#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
-
#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
static const char *dummy_hid = "device";
@@ -429,7 +428,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
out:
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
mutex_unlock(&acpi_scan_lock);
unlock_device_hotplug();
}
@@ -599,11 +598,22 @@ static void get_acpi_device(void *dev)
acpi_dev_get(dev);
}
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
+/**
+ * acpi_get_acpi_dev - Retrieve ACPI device object and reference count it.
+ * @handle: ACPI handle associated with the requested ACPI device object.
+ *
+ * Return a pointer to the ACPI device object associated with @handle and bump
+ * up that object's reference counter (under the ACPI Namespace lock), if
+ * present, or return NULL otherwise.
+ *
+ * The ACPI device object reference acquired by this function needs to be
+ * dropped via acpi_dev_put().
+ */
+struct acpi_device *acpi_get_acpi_dev(acpi_handle handle)
{
return handle_to_device(handle, get_acpi_device);
}
-EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device);
+EXPORT_SYMBOL_GPL(acpi_get_acpi_dev);
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
@@ -632,7 +642,7 @@ static int acpi_device_set_name(struct acpi_device *device,
return 0;
}
-static int acpi_tie_acpi_dev(struct acpi_device *adev)
+int acpi_tie_acpi_dev(struct acpi_device *adev)
{
acpi_handle handle = adev->handle;
acpi_status status;
@@ -662,8 +672,7 @@ static void acpi_store_pld_crc(struct acpi_device *adev)
ACPI_FREE(pld);
}
-static int __acpi_device_add(struct acpi_device *device,
- void (*release)(struct device *))
+int acpi_device_add(struct acpi_device *device)
{
struct acpi_device_bus_id *acpi_device_bus_id;
int result;
@@ -719,11 +728,6 @@ static int __acpi_device_add(struct acpi_device *device,
mutex_unlock(&acpi_device_lock);
- if (device->parent)
- device->dev.parent = &device->parent->dev;
-
- device->dev.bus = &acpi_bus_type;
- device->dev.release = release;
result = device_add(&device->dev);
if (result) {
dev_err(&device->dev, "Error registering device\n");
@@ -750,17 +754,6 @@ err_unlock:
return result;
}
-int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
-{
- int ret;
-
- ret = acpi_tie_acpi_dev(adev);
- if (ret)
- return ret;
-
- return __acpi_device_add(adev, release);
-}
-
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
@@ -805,10 +798,9 @@ static const char * const acpi_honor_dep_ids[] = {
NULL
};
-static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
+static struct acpi_device *acpi_find_parent_acpi_dev(acpi_handle handle)
{
- struct acpi_device *device;
- acpi_status status;
+ struct acpi_device *adev;
/*
* Fixed hardware devices do not appear in the namespace and do not
@@ -819,13 +811,18 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
return acpi_root;
do {
+ acpi_status status;
+
status = acpi_get_parent(handle, &handle);
- if (ACPI_FAILURE(status))
- return status == AE_NULL_ENTRY ? NULL : acpi_root;
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NULL_ENTRY)
+ return acpi_root;
- device = acpi_fetch_acpi_dev(handle);
- } while (!device);
- return device;
+ return NULL;
+ }
+ adev = acpi_fetch_acpi_dev(handle);
+ } while (!adev);
+ return adev;
}
acpi_status
@@ -1112,7 +1109,7 @@ static void acpi_device_get_busid(struct acpi_device *device)
* The device's Bus ID is simply the object name.
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
- if (ACPI_IS_ROOT_DEVICE(device)) {
+ if (!acpi_dev_parent(device)) {
strcpy(device->pnp.bus_id, "ACPI");
return;
}
@@ -1467,25 +1464,21 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
* acpi_dma_get_range() - Get device DMA parameters.
*
* @dev: device to configure
- * @dma_addr: pointer device DMA address result
- * @offset: pointer to the DMA offset result
- * @size: pointer to DMA range size result
+ * @map: pointer to DMA ranges result
*
- * Evaluate DMA regions and return respectively DMA region start, offset
- * and size in dma_addr, offset and size on parsing success; it does not
- * update the passed in values on failure.
+ * Evaluate DMA regions and return pointer to DMA regions on
+ * parsing success; it does not update the passed in values on failure.
*
* Return 0 on success, < 0 on failure.
*/
-int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
- u64 *size)
+int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
struct acpi_device *adev;
LIST_HEAD(list);
struct resource_entry *rentry;
int ret;
struct device *dma_dev = dev;
- u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+ struct bus_dma_region *r;
/*
* Walk the device tree chasing an ACPI companion with a _DMA
@@ -1510,31 +1503,28 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
ret = acpi_dev_get_dma_resources(adev, &list);
if (ret > 0) {
+ r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
list_for_each_entry(rentry, &list, node) {
- if (dma_offset && rentry->offset != dma_offset) {
+ if (rentry->res->start >= rentry->res->end) {
+ kfree(r);
ret = -EINVAL;
- dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
+ dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
goto out;
}
- dma_offset = rentry->offset;
- /* Take lower and upper limits */
- if (rentry->res->start < dma_start)
- dma_start = rentry->res->start;
- if (rentry->res->end > dma_end)
- dma_end = rentry->res->end;
- }
-
- if (dma_start >= dma_end) {
- ret = -EINVAL;
- dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
- goto out;
+ r->cpu_start = rentry->res->start;
+ r->dma_start = rentry->res->start - rentry->offset;
+ r->size = resource_size(rentry->res);
+ r->offset = rentry->offset;
+ r++;
}
- *dma_addr = dma_start - dma_offset;
- len = dma_end - dma_start;
- *size = max(len, len + 1);
- *offset = dma_offset;
+ *map = r;
}
out:
acpi_dev_free_resource_list(&list);
@@ -1624,20 +1614,19 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id)
{
const struct iommu_ops *iommu;
- u64 dma_addr = 0, size = 0;
if (attr == DEV_DMA_NOT_SUPPORTED) {
set_dma_ops(dev, &dma_dummy_ops);
return 0;
}
- acpi_arch_dma_setup(dev, &dma_addr, &size);
+ acpi_arch_dma_setup(dev);
iommu = acpi_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- arch_setup_dma_ops(dev, dma_addr, size,
+ arch_setup_dma_ops(dev, 0, U64_MAX,
iommu, attr == DEV_DMA_COHERENT);
return 0;
@@ -1648,7 +1637,7 @@ static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
acpi_status status;
- struct acpi_device *parent = adev->parent;
+ struct acpi_device *parent = acpi_dev_parent(adev);
if (parent && parent->flags.cca_seen) {
/*
@@ -1692,7 +1681,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{
- struct acpi_device *parent = device->parent;
+ struct acpi_device *parent = acpi_dev_parent(device);
static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0},
{}
@@ -1762,12 +1751,16 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
- int type)
+ int type, void (*release)(struct device *))
{
+ struct acpi_device *parent = acpi_find_parent_acpi_dev(handle);
+
INIT_LIST_HEAD(&device->pnp.ids);
device->device_type = type;
device->handle = handle;
- device->parent = acpi_bus_get_parent(handle);
+ device->dev.parent = parent ? &parent->dev : NULL;
+ device->dev.release = release;
+ device->dev.bus = &acpi_bus_type;
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, ACPI_STA_DEFAULT);
acpi_device_get_busid(device);
@@ -1821,7 +1814,7 @@ static int acpi_add_single_object(struct acpi_device **child,
if (!device)
return -ENOMEM;
- acpi_init_device_object(device, handle, type);
+ acpi_init_device_object(device, handle, type, acpi_device_release);
/*
* Getting the status is delayed till here so that we can call
* acpi_bus_get_status() and use its quirk handling. Note that
@@ -1851,7 +1844,7 @@ static int acpi_add_single_object(struct acpi_device **child,
mutex_unlock(&acpi_dep_list_lock);
if (!result)
- result = __acpi_device_add(device, acpi_device_release);
+ result = acpi_device_add(device);
if (result) {
acpi_device_release(&device->dev);
@@ -1862,8 +1855,8 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_device_add_finalize(device);
acpi_handle_debug(handle, "Added as %s, parent %s\n",
- dev_name(&device->dev), device->parent ?
- dev_name(&device->parent->dev) : "(null)");
+ dev_name(&device->dev), device->dev.parent ?
+ dev_name(device->dev.parent) : "(null)");
*child = device;
return 0;
@@ -2235,11 +2228,24 @@ ok:
return 0;
}
-static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
+static int acpi_dev_get_next_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
{
- struct acpi_device *adev;
+ struct acpi_device **adev_p = data;
+ struct acpi_device *adev = *adev_p;
- adev = acpi_bus_get_acpi_device(dep->consumer);
+ /*
+ * If we're passed a 'previous' consumer device then we need to skip
+ * any consumers until we meet the previous one, and then NULL @data
+ * so the next one can be returned.
+ */
+ if (adev) {
+ if (dep->consumer == adev->handle)
+ *adev_p = NULL;
+
+ return 0;
+ }
+
+ adev = acpi_get_acpi_dev(dep->consumer);
if (adev) {
*(struct acpi_device **)data = adev;
return 1;
@@ -2292,7 +2298,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
{
- struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer);
+ struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer);
if (adev) {
adev->dep_unmet--;
@@ -2368,25 +2374,32 @@ bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
/**
- * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
+ * acpi_dev_get_next_consumer_dev - Return the next adev dependent on @supplier
* @supplier: Pointer to the dependee device
+ * @start: Pointer to the current dependent device
*
- * Returns the first &struct acpi_device which declares itself dependent on
+ * Returns the next &struct acpi_device which declares itself dependent on
* @supplier via the _DEP buffer, parsed from the acpi_dep_list.
*
- * The caller is responsible for putting the reference to adev when it is no
- * longer needed.
+ * If the returned adev is not passed as @start to this function, the caller is
+ * responsible for putting the reference to adev when it is no longer needed.
*/
-struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier)
+struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
+ struct acpi_device *start)
{
- struct acpi_device *adev = NULL;
+ struct acpi_device *adev = start;
acpi_walk_dep_device_list(supplier->handle,
- acpi_dev_get_first_consumer_dev_cb, &adev);
+ acpi_dev_get_next_consumer_dev_cb, &adev);
+
+ acpi_dev_put(start);
+
+ if (adev == start)
+ return NULL;
return adev;
}
-EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev);
+EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev);
/**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 7fe41ee489d6..d960a238be4e 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -18,6 +18,7 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
extern int acpi_s2idle_begin(void);
extern int acpi_s2idle_prepare(void);
extern int acpi_s2idle_prepare_late(void);
+extern void acpi_s2idle_check(void);
extern bool acpi_s2idle_wake(void);
extern void acpi_s2idle_restore_early(void);
extern void acpi_s2idle_restore(void);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 5a7b8065e77f..2ea14648a661 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -794,6 +794,30 @@ bool acpi_dev_hid_uid_match(struct acpi_device *adev,
EXPORT_SYMBOL(acpi_dev_hid_uid_match);
/**
+ * acpi_dev_uid_to_integer - treat ACPI device _UID as integer
+ * @adev: ACPI device to get _UID from
+ * @integer: output buffer for integer
+ *
+ * Considers _UID as integer and converts it to @integer.
+ *
+ * Returns 0 on success, or negative error code otherwise.
+ */
+int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
+{
+ const char *uid;
+
+ if (!adev)
+ return -ENODEV;
+
+ uid = acpi_device_uid(adev);
+ if (!uid)
+ return -ENODATA;
+
+ return kstrtou64(uid, 0, integer);
+}
+EXPORT_SYMBOL(acpi_dev_uid_to_integer);
+
+/**
* acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
@@ -878,7 +902,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_dev_match_info match = {};
struct device *dev;
- strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
@@ -911,7 +935,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
struct acpi_dev_match_info match = {};
struct device *dev;
- strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
@@ -961,7 +985,7 @@ EXPORT_SYMBOL(acpi_video_backlight_string);
static int __init acpi_backlight(char *str)
{
- strlcpy(acpi_video_backlight_string, str,
+ strscpy(acpi_video_backlight_string, str,
sizeof(acpi_video_backlight_string));
return 1;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 5d7f38016a24..0d9064a9804c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -17,8 +17,9 @@
* Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop,
* sony_acpi,... can take care about backlight brightness.
*
- * Backlight drivers can use acpi_video_get_backlight_type() to determine
- * which driver should handle the backlight.
+ * Backlight drivers can use acpi_video_get_backlight_type() to determine which
+ * driver should handle the backlight. RAW/GPU-driver backlight drivers must
+ * use the acpi_video_backlight_use_native() helper for this.
*
* If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
* this file will not be compiled and acpi_video_get_backlight_type() will
@@ -27,20 +28,16 @@
#include <linux/export.h>
#include <linux/acpi.h>
+#include <linux/apple-gmux.h>
#include <linux/backlight.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
-void acpi_video_unregister_backlight(void);
-
-static bool backlight_notifier_registered;
-static struct notifier_block backlight_nb;
-static struct work_struct backlight_notify_work;
-
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -78,6 +75,36 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
+/* This depends on ACPI_WMI which is X86 only */
+#ifdef CONFIG_X86
+static bool nvidia_wmi_ec_supported(void)
+{
+ struct wmi_brightness_args args = {
+ .mode = WMI_BRIGHTNESS_MODE_GET,
+ .val = 0,
+ .ret = 0,
+ };
+ struct acpi_buffer buf = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMI_BRIGHTNESS_GUID, 0,
+ WMI_BRIGHTNESS_METHOD_SOURCE, &buf, &buf);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ /*
+ * If brightness is handled by the EC then nvidia-wmi-ec-backlight
+ * should be used, else the GPU driver(s) should be used.
+ */
+ return args.ret == WMI_BRIGHTNESS_SOURCE_EC;
+}
+#else
+static bool nvidia_wmi_ec_supported(void)
+{
+ return false;
+}
+#endif
+
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -105,63 +132,143 @@ static int video_detect_force_none(const struct dmi_system_id *d)
}
static const struct dmi_system_id video_detect_dmi_table[] = {
- /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
- * ACPI backlight device is used. This flag will definitively break
- * the backlight interface (even the vendor interface) until next
- * reboot. It's why we should prevent video.ko from being used here
- * and we can't rely on a later call to acpi_video_unregister().
- */
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */
.callback = video_detect_force_vendor,
- /* X360 */
+ /* Acer KAV80 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
- DMI_MATCH(DMI_BOARD_NAME, "X360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Asus UL30VT */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Asus UL30VT */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Asus UL30A */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Asus UL30A */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
{
- .callback = video_detect_force_vendor,
- /* GIGABYTE GB-BXBT-2807 */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Asus X55U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus X101CH */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus X401U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus X501U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus 1015CX */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* GIGABYTE GB-BXBT-2807 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Sony VPCEH3U1E */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Samsung N150/N210/N220 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Samsung NF110/NF210/NF310 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
+ DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Samsung NC210 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Sony VPCEH3U1E */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Xiaomi Mi Pad 2 */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Xiaomi Mi Pad 2 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
/*
+ * Toshiba models with Transflective display, these need to use
+ * the toshiba_acpi vendor driver for proper Transflective handling.
+ */
+ {
+ .callback = video_detect_force_vendor,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R500"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R600"),
+ },
+ },
+
+ /*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
* when userspace is not handling brightness key events. Disable
@@ -390,6 +497,41 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1012674 */
+ .callback = video_detect_force_native,
+ /* Acer Aspire 5741 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
+ },
+ },
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42993 */
+ .callback = video_detect_force_native,
+ /* Acer Aspire 5750 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+ },
+ },
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42833 */
+ .callback = video_detect_force_native,
+ /* Acer Extensa 5235 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Acer TravelMate 4750 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
+ },
+ },
+ {
/* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
.callback = video_detect_force_native,
/* Acer TravelMate 5735Z */
@@ -400,120 +542,109 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
- .callback = video_detect_force_native,
- /* ASUSTeK COMPUTER INC. GA401 */
- .matches = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=36322 */
+ .callback = video_detect_force_native,
+ /* Acer TravelMate 5760 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* ASUSTeK COMPUTER INC. GA401 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA401"),
},
},
{
- .callback = video_detect_force_native,
- /* ASUSTeK COMPUTER INC. GA502 */
- .matches = {
+ .callback = video_detect_force_native,
+ /* ASUSTeK COMPUTER INC. GA502 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA502"),
},
},
{
- .callback = video_detect_force_native,
- /* ASUSTeK COMPUTER INC. GA503 */
- .matches = {
+ .callback = video_detect_force_native,
+ /* ASUSTeK COMPUTER INC. GA503 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
},
},
- /*
- * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a
- * working native and video interface. However the default detection
- * mechanism first registers the video interface before unregistering
- * it again and switching to the native interface during boot. This
- * results in a dangling SBIOS request for backlight change for some
- * reason, causing the backlight to switch to ~2% once per boot on the
- * first power cord connect or disconnect event. Setting the native
- * interface explicitly circumvents this buggy behaviour, by avoiding
- * the unregistering process.
- */
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ .callback = video_detect_force_native,
+ /* Asus UX303UB */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ .callback = video_detect_force_native,
+ /* Samsung N150P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150P"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ .callback = video_detect_force_native,
+ /* Samsung N145P/N250P/N260P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ .callback = video_detect_force_native,
+ /* Samsung N250P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
},
},
+
/*
- * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
- * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
- * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
- * above.
+ * These Toshibas have a broken acpi-video interface for brightness
+ * control. They also have an issue where the panel is off after
+ * suspend until a special firmware call is made to turn it back
+ * on. This is handled by the toshiba_acpi kernel module, so that
+ * module must be enabled for these models to work correctly.
*/
{
- .callback = video_detect_force_native,
- .ident = "TongFang PF5PU1G",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "TongFang PF4NU1F",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "TongFang PF4NU1F",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "TongFang PF5NU1G",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .callback = video_detect_force_native,
+ /* Toshiba Portégé R700 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "TongFang PF5NU1G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
+ /* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ /* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .callback = video_detect_force_native,
+ /* Toshiba Satellite/Portégé R830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "TongFang PF5LUXG",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+ .callback = video_detect_force_native,
+ /* Toshiba Satellite/Portégé Z830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
},
},
+
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
@@ -537,43 +668,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
-/* This uses a workqueue to avoid various locking ordering issues */
-static void acpi_video_backlight_notify_work(struct work_struct *work)
-{
- if (acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
-}
-
-static int acpi_video_backlight_notify(struct notifier_block *nb,
- unsigned long val, void *bd)
-{
- struct backlight_device *backlight = bd;
-
- /* A raw bl registering may change video -> native */
- if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED)
- schedule_work(&backlight_notify_work);
-
- return NOTIFY_OK;
-}
-
/*
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
- *
- * The autodetect order is:
- * 1) Is the acpi-video backlight interface supported ->
- * no, use a vendor interface
- * 2) Is this a win8 "ready" BIOS and do we have a native interface ->
- * yes, use a native interface
- * 3) Else use the acpi-video interface
- *
- * Arguably the native on win8 check should be done first, but that would
- * be a behavior change, which may causes issues.
*/
-enum acpi_backlight_type acpi_video_get_backlight_type(void)
+static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
{
static DEFINE_MUTEX(init_mutex);
+ static bool nvidia_wmi_ec_present;
+ static bool native_available;
static bool init_done;
static long video_caps;
@@ -585,48 +688,60 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
- INIT_WORK(&backlight_notify_work,
- acpi_video_backlight_notify_work);
- backlight_nb.notifier_call = acpi_video_backlight_notify;
- backlight_nb.priority = 0;
- if (backlight_register_notifier(&backlight_nb) == 0)
- backlight_notifier_registered = true;
+ nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
init_done = true;
}
+ if (native)
+ native_available = true;
mutex_unlock(&init_mutex);
+ /*
+ * The below heuristics / detection steps are in order of descending
+ * presedence. The commandline takes presedence over anything else.
+ */
if (acpi_backlight_cmdline != acpi_backlight_undef)
return acpi_backlight_cmdline;
+ /* DMI quirks override any autodetection. */
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
- if (!(video_caps & ACPI_VIDEO_BACKLIGHT))
- return acpi_backlight_vendor;
+ /* Special cases such as nvidia_wmi_ec and apple gmux. */
+ if (nvidia_wmi_ec_present)
+ return acpi_backlight_nvidia_wmi_ec;
- if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW))
- return acpi_backlight_native;
+ if (apple_gmux_present())
+ return acpi_backlight_apple_gmux;
+
+ /* On systems with ACPI video use either native or ACPI video. */
+ if (video_caps & ACPI_VIDEO_BACKLIGHT) {
+ /*
+ * Windows 8 and newer no longer use the ACPI video interface,
+ * so it often does not work. If the ACPI tables are written
+ * for win8 and native brightness ctl is available, use that.
+ *
+ * The native check deliberately is inside the if acpi-video
+ * block on older devices without acpi-video support native
+ * is usually not the best choice.
+ */
+ if (acpi_osi_is_win8() && native_available)
+ return acpi_backlight_native;
+ else
+ return acpi_backlight_video;
+ }
- return acpi_backlight_video;
+ /* No ACPI video (old hw), use vendor specific fw methods. */
+ return acpi_backlight_vendor;
}
-EXPORT_SYMBOL(acpi_video_get_backlight_type);
-/*
- * Set the preferred backlight interface type based on DMI info.
- * This function allows DMI blacklists to be implemented by external
- * platform drivers instead of putting a big blacklist in video_detect.c
- */
-void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
- acpi_backlight_dmi = type;
- /* Remove acpi-video backlight interface if it is no longer desired */
- if (acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ return __acpi_video_get_backlight_type(false);
}
-EXPORT_SYMBOL(acpi_video_set_dmi_backlight_type);
+EXPORT_SYMBOL(acpi_video_get_backlight_type);
-void __exit acpi_video_detect_exit(void)
+bool acpi_video_backlight_use_native(void)
{
- if (backlight_notifier_registered)
- backlight_unregister_notifier(&backlight_nb);
+ return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
}
+EXPORT_SYMBOL(acpi_video_backlight_use_native);
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index c285c91a5e9c..8812ecd03d55 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -8,6 +8,7 @@
#include <linux/bitmap.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/uuid.h>
+#include "../internal.h"
/* Apple _DSM device properties GUID */
static const guid_t apple_prp_guid =
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index f9ac12b778e6..42f249070c09 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/suspend.h>
#include "../sleep.h"
@@ -27,6 +28,10 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+static bool prefer_microsoft_dsm_guid __read_mostly;
+module_param(prefer_microsoft_dsm_guid, bool, 0644);
+MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
+
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -363,40 +368,132 @@ out:
return ret;
}
+struct amd_lps0_hid_device_data {
+ const unsigned int rev_id;
+ const bool check_off_by_one;
+ const bool prefer_amd_guid;
+};
+
+static const struct amd_lps0_hid_device_data amd_picasso = {
+ .rev_id = 0,
+ .check_off_by_one = true,
+ .prefer_amd_guid = false,
+};
+
+static const struct amd_lps0_hid_device_data amd_cezanne = {
+ .rev_id = 0,
+ .check_off_by_one = false,
+ .prefer_amd_guid = false,
+};
+
+static const struct amd_lps0_hid_device_data amd_rembrandt = {
+ .rev_id = 2,
+ .check_off_by_one = false,
+ .prefer_amd_guid = true,
+};
+
+static const struct acpi_device_id amd_hid_ids[] = {
+ {"AMD0004", (kernel_ulong_t)&amd_picasso, },
+ {"AMD0005", (kernel_ulong_t)&amd_picasso, },
+ {"AMDI0005", (kernel_ulong_t)&amd_picasso, },
+ {"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
+ {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
+ {}
+};
+
+static int lps0_prefer_microsoft(const struct dmi_system_id *id)
+{
+ pr_debug("Preferring Microsoft GUID.\n");
+ prefer_microsoft_dsm_guid = true;
+ return 0;
+}
+
+static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
+ {
+ /*
+ * ASUS TUF Gaming A17 FA707RE
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216101
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
+ },
+ },
+ {
+ /* ASUS ROG Zephyrus G14 (2022) */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
+ },
+ },
+ {
+ /*
+ * Lenovo Yoga Slim 7 Pro X 14ARH7
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82"),
+ },
+ },
+ {
+ /*
+ * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
+ },
+ },
+ {
+ /*
+ * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
+ },
+ },
+ {}
+};
+
static int lps0_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
if (lps0_device_handle)
return 0;
+ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
+ &lps0_dsm_guid_microsoft);
if (acpi_s2idle_vendor_amd()) {
- /* AMD0004, AMD0005, AMDI0005:
- * - Should use rev_id 0x0
- * - function mask > 0x3: Should use AMD method, but has off by one bug
- * - function mask = 0x3: Should use Microsoft method
- * AMDI0006:
- * - should use rev_id 0x0
- * - function mask = 0x3: Should use Microsoft method
- * AMDI0007:
- * - Should use rev_id 0x2
- * - Should only use AMD method
- */
- const char *hid = acpi_device_hid(adev);
- rev_id = strcmp(hid, "AMDI0007") ? 0 : 2;
+ static const struct acpi_device_id *dev_id;
+ const struct amd_lps0_hid_device_data *data;
+
+ for (dev_id = &amd_hid_ids[0]; dev_id->id[0]; dev_id++)
+ if (acpi_dev_hid_uid_match(adev, dev_id->id, NULL))
+ break;
+ if (dev_id->id[0])
+ data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
+ else
+ data = &amd_rembrandt;
+ rev_id = data->rev_id;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
- &lps0_dsm_guid_microsoft);
- if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
- !strcmp(hid, "AMD0005") ||
- !strcmp(hid, "AMDI0005"))) {
+ if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
- } else if (lps0_dsm_func_mask_microsoft > 0 &&
- (!strcmp(hid, "AMDI0007") ||
- !strcmp(hid, "AMDI0008"))) {
+ } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
+ !prefer_microsoft_dsm_guid) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
@@ -404,7 +501,8 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ if (!prefer_microsoft_dsm_guid)
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
@@ -486,6 +584,19 @@ int acpi_s2idle_prepare_late(void)
return 0;
}
+void acpi_s2idle_check(void)
+{
+ struct acpi_s2idle_dev_ops *handler;
+
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
+ if (handler->check)
+ handler->check();
+ }
+}
+
void acpi_s2idle_restore_early(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -527,14 +638,16 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
.prepare_late = acpi_s2idle_prepare_late,
+ .check = acpi_s2idle_check,
.wake = acpi_s2idle_wake,
.restore_early = acpi_s2idle_restore_early,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
-void acpi_s2idle_setup(void)
+void __init acpi_s2idle_setup(void)
{
+ dmi_check_system(s2idle_dmi_table);
acpi_scan_add_handler(&lps0_handler);
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 664070fc8349..f8a2cbdc0ce2 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -207,9 +207,26 @@ static const struct x86_cpu_id storage_d3_cpu_ids[] = {
{}
};
+static const struct dmi_system_id force_storage_d3_dmi[] = {
+ {
+ /*
+ * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
+ * but .NVME is needed to get StorageD3Enable node
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
+ }
+ },
+ {}
+};
+
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
+
+ return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
}
/*
@@ -351,11 +368,17 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
+ u64 uid;
+ int ret;
*skip = false;
- /* !dev_is_platform() to not match on PNP enumerated debug UARTs */
- if (!adev || !adev->pnp.unique_id || !dev_is_platform(controller_parent))
+ ret = acpi_dev_uid_to_integer(adev, &uid);
+ if (ret)
+ return 0;
+
+ /* to not match on PNP enumerated debug UARTs */
+ if (!dev_is_platform(controller_parent))
return 0;
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
@@ -363,10 +386,10 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
quirks = (unsigned long)dmi_id->driver_data;
if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) {
- if (!strcmp(adev->pnp.unique_id, "1"))
+ if (uid == 1)
return -ENODEV; /* Create tty cdev instead of serdev */
- if (!strcmp(adev->pnp.unique_id, "2"))
+ if (uid == 2)
*skip = true;
}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 32b0e0b930c1..110a535648d2 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -209,6 +209,7 @@ static int amba_match(struct device *dev, struct device_driver *drv)
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
+ mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
int ret = amba_read_periphid(pcdev);
@@ -218,11 +219,14 @@ static int amba_match(struct device *dev, struct device_driver *drv)
* permanent failure in reading pid and cid, simply map it to
* -EPROBE_DEFER.
*/
- if (ret)
+ if (ret) {
+ mutex_unlock(&pcdev->periphid_lock);
return -EPROBE_DEFER;
+ }
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
}
+ mutex_unlock(&pcdev->periphid_lock);
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
@@ -532,6 +536,7 @@ static void amba_device_release(struct device *dev)
if (d->res.parent)
release_resource(&d->res);
+ mutex_destroy(&d->periphid_lock);
kfree(d);
}
@@ -584,6 +589,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->res.name = dev_name(&dev->dev);
+ mutex_init(&dev->periphid_lock);
}
/**
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c964d7c8c384..6428f6be69e3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1385,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 1014beb12802..9b1778c00610 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -322,7 +322,6 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
*/
if (vma) {
vm_start = vma->vm_start;
- alloc->vma_vm_mm = vma->vm_mm;
mmap_assert_write_locked(alloc->vma_vm_mm);
} else {
mmap_assert_locked(alloc->vma_vm_mm);
@@ -402,12 +401,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
+ mmap_read_lock(alloc->vma_vm_mm);
if (!binder_alloc_get_vma(alloc)) {
+ mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
+ mmap_read_unlock(alloc->vma_vm_mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -792,7 +794,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
binder_alloc_set_vma(alloc, vma);
- mmgrab(alloc->vma_vm_mm);
return 0;
@@ -929,17 +930,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
+
+ mmap_read_lock(alloc->vma_vm_mm);
+ if (binder_alloc_get_vma(alloc) == NULL) {
+ mmap_read_unlock(alloc->vma_vm_mm);
+ goto uninitialized;
}
+
+ mmap_read_unlock(alloc->vma_vm_mm);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+
+uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@@ -1080,6 +1089,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
+ alloc->vma_vm_mm = current->mm;
+ mmgrab(alloc->vma_vm_mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 79aa9f285312..b734e069034d 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -327,7 +327,7 @@ static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
}
/* SATA AHCI temperature monitor */
-static int sata_ahci_read_temperature(void *dev, int *temp)
+static int __sata_ahci_read_temperature(void *dev, int *temp)
{
u16 mpll_test_reg, rtune_ctl_reg, dac_ctl_reg, read_sum;
u32 str1, str2, str3, str4;
@@ -416,6 +416,11 @@ static int sata_ahci_read_temperature(void *dev, int *temp)
return 0;
}
+static int sata_ahci_read_temperature(struct thermal_zone_device *tz, int *temp)
+{
+ return __sata_ahci_read_temperature(tz->devdata, temp);
+}
+
static ssize_t sata_ahci_show_temp(struct device *dev,
struct device_attribute *da,
char *buf)
@@ -423,14 +428,14 @@ static ssize_t sata_ahci_show_temp(struct device *dev,
unsigned int temp = 0;
int err;
- err = sata_ahci_read_temperature(dev, &temp);
+ err = __sata_ahci_read_temperature(dev, &temp);
if (err < 0)
return err;
return sprintf(buf, "%u\n", temp);
}
-static const struct thermal_zone_of_device_ops fsl_sata_ahci_of_thermal_ops = {
+static const struct thermal_zone_device_ops fsl_sata_ahci_of_thermal_ops = {
.get_temp = sata_ahci_read_temperature,
};
@@ -1131,8 +1136,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
ret = PTR_ERR(hwmon_dev);
goto disable_clk;
}
- devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- &fsl_sata_ahci_of_thermal_ops);
+ devm_thermal_of_zone_register(hwmon_dev, 0, hwmon_dev,
+ &fsl_sata_ahci_of_thermal_ops);
dev_info(dev, "%s: sensor 'sata_ahci'\n", dev_name(hwmon_dev));
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75b86913db1a..d3ce5c383f3a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3989,6 +3989,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* These specific Pioneer models have LPM issues */
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1b82283f4b49..08e11bc312c2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2119,6 +2119,7 @@ const char *ata_get_cmd_name(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index eef57d101ed1..b6806d41a8c5 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1018,26 +1018,25 @@ DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
/**
- * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
- * @ap: ATA port to which the device change the queue depth
+ * ata_change_queue_depth - Set a device maximum queue depth
+ * @ap: ATA port of the target device
+ * @dev: target ATA device
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
*
- * libsas and libata have different approaches for associating a sdev to
- * its ata_port.
+ * Helper to set a device maximum queue depth, usable with both libsas
+ * and libata.
*
*/
-int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth)
+int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth)
{
- struct ata_device *dev;
unsigned long flags;
- if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+ if (!dev || !ata_dev_enabled(dev))
return sdev->queue_depth;
- dev = ata_scsi_find_dev(ap, sdev);
- if (!dev || !ata_dev_enabled(dev))
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
/* NCQ enabled? */
@@ -1059,7 +1058,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
return scsi_change_queue_depth(sdev, queue_depth);
}
-EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+EXPORT_SYMBOL_GPL(ata_change_queue_depth);
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
@@ -1080,7 +1079,8 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- return __ata_change_queue_depth(ap, sdev, queue_depth);
+ return ata_change_queue_depth(ap, ata_scsi_find_dev(ap, sdev),
+ sdev, queue_depth);
}
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f3c64e796423..e2ebb0b065e2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1055,6 +1055,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
+ int depth = 1;
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
@@ -1100,13 +1101,10 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
- if (dev->flags & ATA_DFLAG_NCQ) {
- int depth;
-
+ if (dev->flags & ATA_DFLAG_NCQ)
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
- depth = min(ATA_MAX_QUEUE, depth);
- scsi_change_queue_depth(sdev, depth);
- }
+ depth = min(ATA_MAX_QUEUE, depth);
+ scsi_change_queue_depth(sdev, depth);
if (dev->flags & ATA_DFLAG_TRUSTED)
sdev->security_supported = 1;
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 4fab3b2c7023..02425991c159 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -775,7 +775,7 @@ static int ht16k33_probe(struct i2c_client *client)
return err;
}
-static int ht16k33_remove(struct i2c_client *client)
+static void ht16k33_remove(struct i2c_client *client)
{
struct ht16k33_priv *priv = i2c_get_clientdata(client);
struct ht16k33_fbdev *fbdev = &priv->fbdev;
@@ -796,8 +796,6 @@ static int ht16k33_remove(struct i2c_client *client)
device_remove_file(&client->dev, &dev_attr_map_seg14);
break;
}
-
- return 0;
}
static const struct i2c_device_id ht16k33_i2c_match[] = {
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index e465108d9998..135831a16514 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -340,13 +340,12 @@ fail1:
return err;
}
-static int lcd2s_i2c_remove(struct i2c_client *i2c)
+static void lcd2s_i2c_remove(struct i2c_client *i2c)
{
struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c);
charlcd_unregister(lcd2s->charlcd);
charlcd_free(lcd2s->charlcd);
- return 0;
}
static const struct i2c_device_id lcd2s_i2c_id[] = {
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 0424b59b695e..dd90591e51ba 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -353,7 +353,7 @@ void topology_init_cpu_capacity_cppc(void)
struct cppc_perf_caps perf_caps;
int cpu;
- if (likely(acpi_disabled || !acpi_cpc_valid()))
+ if (likely(!acpi_cpc_valid()))
return;
raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
@@ -724,7 +724,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
*/
if (cpumask_subset(cpu_coregroup_mask(cpu),
&cpu_topology[cpu].cluster_sibling))
- return get_cpu_mask(cpu);
+ return topology_sibling_cpumask(cpu);
return &cpu_topology[cpu].cluster_sibling;
}
@@ -735,7 +735,7 @@ void update_siblings_masks(unsigned int cpuid)
int cpu, ret;
ret = detect_cache_attributes(cpuid);
- if (ret)
+ if (ret && ret != -ENOENT)
pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 753e7cca0f40..5fb4bc51dd8b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1625,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg)
}
early_param("fw_devlink", fw_devlink_setup);
-static bool fw_devlink_strict = true;
+static bool fw_devlink_strict;
static int __init fw_devlink_strict_setup(char *arg)
{
return strtobool(arg, &fw_devlink_strict);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 70f79fc71539..ec69b43f926a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -274,12 +274,42 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Return:
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
+ return -ENODEV;
+ }
+
+ if (!driver_deferred_probe_timeout && initcalls_done) {
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
+ return -ETIMEDOUT;
+ }
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
+
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
+ driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
@@ -881,6 +911,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -1120,6 +1155,11 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 15a75afe6b84..676b6275d5b5 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override,
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
+ /*
+ * Compute the real length of the string in case userspace sends us a
+ * bunch of \0 characters like python likes to do.
+ */
+ len = strlen(s);
+
if (!len) {
/* Empty string passed - clear override */
device_lock(dev);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index 77bad32c481a..5b66b3d1fa16 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -93,10 +93,9 @@ static void fw_dev_release(struct device *dev)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- if (fw_sysfs->fw_upload_priv) {
- free_fw_priv(fw_sysfs->fw_priv);
- kfree(fw_sysfs->fw_upload_priv);
- }
+ if (fw_sysfs->fw_upload_priv)
+ fw_upload_free(fw_sysfs);
+
kfree(fw_sysfs);
}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 5d8ff1675c79..df1d5add698f 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -106,12 +106,17 @@ extern struct device_attribute dev_attr_cancel;
extern struct device_attribute dev_attr_remaining_size;
int fw_upload_start(struct fw_sysfs *fw_sysfs);
+void fw_upload_free(struct fw_sysfs *fw_sysfs);
umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
#else
static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
{
return 0;
}
+
+static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+}
#endif
#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 87044d52322a..a0af8f5f13d8 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -264,6 +264,15 @@ int fw_upload_start(struct fw_sysfs *fw_sysfs)
return 0;
}
+void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_upload_priv->fw_upload);
+ kfree(fw_upload_priv);
+}
+
/**
* firmware_upload_register() - register for the firmware upload sysfs API
* @module: kernel module of this device
@@ -377,6 +386,7 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
{
struct fw_sysfs *fw_sysfs = fw_upload->priv;
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+ struct module *module = fw_upload_priv->module;
mutex_lock(&fw_upload_priv->lock);
if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
@@ -392,6 +402,6 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
unregister:
device_unregister(&fw_sysfs->dev);
- module_put(fw_upload_priv->module);
+ module_put(module);
}
EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5a2e0232862e..55a10e6d4e2a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2733,7 +2733,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return -ENODEV;
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 997be3ac20a7..b52049098d4e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -792,10 +792,13 @@ static int rpm_resume(struct device *dev, int rpmflags)
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
- if (dev->power.runtime_status == RPM_SUSPENDING)
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
- else
+ if (rpmflags & RPM_NOWAIT)
+ retval = -EINPROGRESS;
+ } else {
retval = -EINPROGRESS;
+ }
goto out;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e3befa2c1b66..7cc0c0cf8eaa 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -944,6 +944,8 @@ void pm_system_irq_wakeup(unsigned int irq_number)
else
irq_number = 0;
+ pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
+
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
if (irq_number)
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 71f16be7e717..3ccdd86a97e7 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -10,13 +10,14 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/swab.h>
#include "internal.h"
struct regmap_mmio_context {
void __iomem *regs;
unsigned int val_bytes;
- bool relaxed_mmio;
+ bool big_endian;
bool attached_clk;
struct clk *clk;
@@ -33,9 +34,6 @@ static int regmap_mmio_regbits_check(size_t reg_bits)
case 8:
case 16:
case 32:
-#ifdef CONFIG_64BIT
- case 64:
-#endif
return 0;
default:
return -EINVAL;
@@ -50,18 +48,13 @@ static int regmap_mmio_get_min_stride(size_t val_bits)
case 8:
/* The core treats 0 as 1 */
min_stride = 0;
- return 0;
+ break;
case 16:
min_stride = 2;
break;
case 32:
min_stride = 4;
break;
-#ifdef CONFIG_64BIT
- case 64:
- min_stride = 8;
- break;
-#endif
default:
return -EINVAL;
}
@@ -83,6 +76,12 @@ static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
writeb_relaxed(val, ctx->regs + reg);
}
+static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite8(val, ctx->regs + reg);
+}
+
static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
@@ -97,10 +96,22 @@ static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
writew_relaxed(val, ctx->regs + reg);
}
+static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16(val, ctx->regs + reg);
+}
+
static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
+ writew(swab16(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
iowrite16be(val, ctx->regs + reg);
}
@@ -118,28 +129,24 @@ static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
writel_relaxed(val, ctx->regs + reg);
}
-static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
- unsigned int reg,
- unsigned int val)
+static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
{
- iowrite32be(val, ctx->regs + reg);
+ iowrite32(val, ctx->regs + reg);
}
-#ifdef CONFIG_64BIT
-static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
- writeq(val, ctx->regs + reg);
+ writel(swab32(val), ctx->regs + reg);
}
-static void regmap_mmio_write64le_relaxed(struct regmap_mmio_context *ctx,
- unsigned int reg,
- unsigned int val)
+static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
{
- writeq_relaxed(val, ctx->regs + reg);
+ iowrite32be(val, ctx->regs + reg);
}
-#endif
static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
{
@@ -160,6 +167,83 @@ static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
return 0;
}
+static int regmap_mmio_noinc_write(void *context, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+ int i;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ {
+ const u16 *valp = (const u16 *)val;
+ for (i = 0; i < val_count; i++)
+ writew(swab16(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+ case 4:
+ {
+ const u32 *valp = (const u32 *)val;
+ for (i = 0; i < val_count; i++)
+ writel(swab32(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#ifdef CONFIG_64BIT
+ case 8:
+ {
+ const u64 *valp = (const u64 *)val;
+ for (i = 0; i < val_count; i++)
+ writeq(swab64(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ writesb(ctx->regs + reg, (const u8 *)val, val_count);
+ break;
+ case 2:
+ writesw(ctx->regs + reg, (const u16 *)val, val_count);
+ break;
+ case 4:
+ writesl(ctx->regs + reg, (const u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ writesq(ctx->regs + reg, (const u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
unsigned int reg)
{
@@ -172,6 +256,12 @@ static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
return readb_relaxed(ctx->regs + reg);
}
+static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread8(ctx->regs + reg);
+}
+
static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
@@ -184,9 +274,21 @@ static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx
return readw_relaxed(ctx->regs + reg);
}
+static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16(ctx->regs + reg);
+}
+
static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
+ return swab16(readw(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
return ioread16be(ctx->regs + reg);
}
@@ -202,25 +304,23 @@ static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx
return readl_relaxed(ctx->regs + reg);
}
-static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
- unsigned int reg)
+static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- return ioread32be(ctx->regs + reg);
+ return ioread32(ctx->regs + reg);
}
-#ifdef CONFIG_64BIT
-static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
- return readq(ctx->regs + reg);
+ return swab32(readl(ctx->regs + reg));
}
-static unsigned int regmap_mmio_read64le_relaxed(struct regmap_mmio_context *ctx,
- unsigned int reg)
+static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- return readq_relaxed(ctx->regs + reg);
+ return ioread32be(ctx->regs + reg);
}
-#endif
static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
{
@@ -241,6 +341,71 @@ static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
return 0;
}
+static int regmap_mmio_noinc_read(void *context, unsigned int reg,
+ void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ readsb(ctx->regs + reg, (u8 *)val, val_count);
+ break;
+ case 2:
+ readsw(ctx->regs + reg, (u16 *)val, val_count);
+ break;
+ case 4:
+ readsl(ctx->regs + reg, (u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ readsq(ctx->regs + reg, (u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ swab16_array(val, val_count);
+ break;
+ case 4:
+ swab32_array(val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ swab64_array(val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+
static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
@@ -257,6 +422,8 @@ static const struct regmap_bus regmap_mmio = {
.fast_io = true,
.reg_write = regmap_mmio_write,
.reg_read = regmap_mmio_read,
+ .reg_noinc_write = regmap_mmio_noinc_write,
+ .reg_noinc_read = regmap_mmio_noinc_read,
.free_context = regmap_mmio_free_context,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
@@ -284,13 +451,15 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
+ if (config->use_relaxed_mmio && config->io_port)
+ return ERR_PTR(-EINVAL);
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
- ctx->relaxed_mmio = config->use_relaxed_mmio;
ctx->clk = ERR_PTR(-ENODEV);
switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
@@ -301,7 +470,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
#endif
switch (config->val_bits) {
case 8:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read8_relaxed;
ctx->reg_write = regmap_mmio_write8_relaxed;
} else {
@@ -310,7 +482,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
}
break;
case 16:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16le;
+ ctx->reg_write = regmap_mmio_iowrite16le;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read16le_relaxed;
ctx->reg_write = regmap_mmio_write16le_relaxed;
} else {
@@ -319,7 +494,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
}
break;
case 32:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32le;
+ ctx->reg_write = regmap_mmio_iowrite32le;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read32le_relaxed;
ctx->reg_write = regmap_mmio_write32le_relaxed;
} else {
@@ -327,17 +505,6 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->reg_write = regmap_mmio_write32le;
}
break;
-#ifdef CONFIG_64BIT
- case 64:
- if (ctx->relaxed_mmio) {
- ctx->reg_read = regmap_mmio_read64le_relaxed;
- ctx->reg_write = regmap_mmio_write64le_relaxed;
- } else {
- ctx->reg_read = regmap_mmio_read64le;
- ctx->reg_write = regmap_mmio_write64le;
- }
- break;
-#endif
default:
ret = -EINVAL;
goto err_free;
@@ -347,18 +514,34 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
#ifdef __BIG_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
+ ctx->big_endian = true;
switch (config->val_bits) {
case 8:
- ctx->reg_read = regmap_mmio_read8;
- ctx->reg_write = regmap_mmio_write8;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
break;
case 16:
- ctx->reg_read = regmap_mmio_read16be;
- ctx->reg_write = regmap_mmio_write16be;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16be;
+ ctx->reg_write = regmap_mmio_iowrite16be;
+ } else {
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ }
break;
case 32:
- ctx->reg_read = regmap_mmio_read32be;
- ctx->reg_write = regmap_mmio_write32be;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32be;
+ ctx->reg_write = regmap_mmio_iowrite32be;
+ } else {
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ }
break;
default:
ret = -EINVAL;
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index ad1da83e849f..4c2b94b3e30b 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/swab.h>
/*
* This driver implements the regmap operations for a generic SPI
@@ -162,19 +163,12 @@ struct spi_avmm_bridge {
/* bridge buffer used in translation between protocol layers */
char trans_buf[TRANS_BUF_SIZE];
char phy_buf[PHY_BUF_SIZE];
- void (*swap_words)(char *buf, unsigned int len);
+ void (*swap_words)(void *buf, unsigned int len);
};
-static void br_swap_words_32(char *buf, unsigned int len)
+static void br_swap_words_32(void *buf, unsigned int len)
{
- u32 *p = (u32 *)buf;
- unsigned int count;
-
- count = len / 4;
- while (count--) {
- *p = swab32p(p);
- p++;
- }
+ swab32_array(buf, len / 4);
}
/*
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 719323bc6c7f..37ab23a9d034 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
const struct regmap_config *config)
{
size_t max_size = spi_max_transfer_size(spi);
+ size_t max_msg_size, reg_reserve_size;
struct regmap_bus *bus;
if (max_size != SIZE_MAX) {
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
if (!bus)
return ERR_PTR(-ENOMEM);
+ max_msg_size = spi_max_message_size(spi);
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ + config->pad_bits / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
bus->free_on_exit = true;
bus->max_raw_read = max_size;
bus->max_raw_write = max_size;
+
return bus;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index fee221c5008c..c6d6d53e8cd3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -288,15 +288,9 @@ static void regmap_format_16_native(void *buf, unsigned int val,
memcpy(buf, &v, sizeof(v));
}
-static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
{
- u8 *b = buf;
-
- val <<= shift;
-
- b[0] = val >> 16;
- b[1] = val >> 8;
- b[2] = val;
+ put_unaligned_be24(val << shift, buf);
}
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
@@ -380,14 +374,9 @@ static unsigned int regmap_parse_16_native(const void *buf)
return v;
}
-static unsigned int regmap_parse_24(const void *buf)
+static unsigned int regmap_parse_24_be(const void *buf)
{
- const u8 *b = buf;
- unsigned int ret = b[2];
- ret |= ((unsigned int)b[1]) << 8;
- ret |= ((unsigned int)b[0]) << 16;
-
- return ret;
+ return get_unaligned_be24(buf);
}
static unsigned int regmap_parse_32_be(const void *buf)
@@ -991,9 +980,13 @@ struct regmap *__regmap_init(struct device *dev,
break;
case 24:
- if (reg_endian != REGMAP_ENDIAN_BIG)
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_24_be;
+ break;
+ default:
goto err_hwlock;
- map->format.format_reg = regmap_format_24;
+ }
break;
case 32:
@@ -1064,10 +1057,14 @@ struct regmap *__regmap_init(struct device *dev,
}
break;
case 24:
- if (val_endian != REGMAP_ENDIAN_BIG)
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_24_be;
+ map->format.parse_val = regmap_parse_24_be;
+ break;
+ default:
goto err_hwlock;
- map->format.format_val = regmap_format_24;
- map->format.parse_val = regmap_parse_24;
+ }
break;
case 32:
switch (val_endian) {
@@ -2132,6 +2129,99 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
+static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
+ void *val, unsigned int val_len, bool write)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ unsigned int lastval;
+ u8 *u8p;
+ u16 *u16p;
+ u32 *u32p;
+#ifdef CONFIG_64BIT
+ u64 *u64p;
+#endif
+ int ret;
+ int i;
+
+ switch (val_bytes) {
+ case 1:
+ u8p = val;
+ if (write)
+ lastval = (unsigned int)u8p[val_count - 1];
+ break;
+ case 2:
+ u16p = val;
+ if (write)
+ lastval = (unsigned int)u16p[val_count - 1];
+ break;
+ case 4:
+ u32p = val;
+ if (write)
+ lastval = (unsigned int)u32p[val_count - 1];
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ u64p = val;
+ if (write)
+ lastval = (unsigned int)u64p[val_count - 1];
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Update the cache with the last value we write, the rest is just
+ * gone down in the hardware FIFO. We can't cache FIFOs. This makes
+ * sure a single read from the cache will work.
+ */
+ if (write) {
+ if (!map->cache_bypass && !map->defer_caching) {
+ ret = regcache_write(map, reg, lastval);
+ if (ret != 0)
+ return ret;
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+ ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
+ } else {
+ ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
+ }
+
+ if (!ret && regmap_should_log(map)) {
+ dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
+ for (i = 0; i < val_count; i++) {
+ switch (val_bytes) {
+ case 1:
+ pr_cont("%x", u8p[i]);
+ break;
+ case 2:
+ pr_cont("%x", u16p[i]);
+ break;
+ case 4:
+ pr_cont("%x", u32p[i]);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ pr_cont("%llx", u64p[i]);
+ break;
+#endif
+ default:
+ break;
+ }
+ if (i == (val_count - 1))
+ pr_cont("]\n");
+ else
+ pr_cont(",");
+ }
+ }
+
+ return 0;
+}
+
/**
* regmap_noinc_write(): Write data from a register without incrementing the
* register number
@@ -2159,9 +2249,8 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
size_t write_len;
int ret;
- if (!map->write)
- return -ENOTSUPP;
-
+ if (!map->write && !(map->bus && map->bus->reg_noinc_write))
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2176,6 +2265,15 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
goto out_unlock;
}
+ /*
+ * Use the accelerated operation if we can. The val drops the const
+ * typing in order to facilitate code reuse in regmap_noinc_readwrite().
+ */
+ if (map->bus->reg_noinc_write) {
+ ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
+ goto out_unlock;
+ }
+
while (val_len) {
if (map->max_raw_write && map->max_raw_write < val_len)
write_len = map->max_raw_write;
@@ -2350,6 +2448,10 @@ out:
kfree(wval);
}
+
+ if (!ret)
+ trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -2946,6 +3048,22 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
goto out_unlock;
}
+ /* Use the accelerated operation if we can */
+ if (map->bus->reg_noinc_read) {
+ /*
+ * We have not defined the FIFO semantics for cache, as the
+ * cache is just one value deep. Should we return the last
+ * written value? Just avoid this by always reading the FIFO
+ * even when using cache. Cache only will not work.
+ */
+ if (map->cache_only) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
+ goto out_unlock;
+ }
+
while (val_len) {
if (map->max_raw_read && map->max_raw_read < val_len)
read_len = map->max_raw_read;
@@ -3095,6 +3213,9 @@ out:
map->unlock(map->lock_arg);
}
+ if (!ret)
+ trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 9abee14df9ee..704e106e5dbd 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -32,9 +32,7 @@ DECLARE_EVENT_CLASS(regmap_reg,
__entry->val = val;
),
- TP_printk("%s reg=%x val=%x", __get_str(name),
- (unsigned int)__entry->reg,
- (unsigned int)__entry->val)
+ TP_printk("%s reg=%x val=%x", __get_str(name), __entry->reg, __entry->val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_write,
@@ -43,7 +41,6 @@ DEFINE_EVENT(regmap_reg, regmap_reg_write,
unsigned int val),
TP_ARGS(map, reg, val)
-
);
DEFINE_EVENT(regmap_reg, regmap_reg_read,
@@ -52,7 +49,6 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read,
unsigned int val),
TP_ARGS(map, reg, val)
-
);
DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
@@ -61,7 +57,47 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
unsigned int val),
TP_ARGS(map, reg, val)
+);
+
+DECLARE_EVENT_CLASS(regmap_bulk,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len),
+
+ TP_STRUCT__entry(
+ __string(name, regmap_name(map))
+ __field(unsigned int, reg)
+ __dynamic_array(char, buf, val_len)
+ __field(int, val_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->reg = reg;
+ __entry->val_len = val_len;
+ memcpy(__get_dynamic_array(buf), val, val_len);
+ ),
+ TP_printk("%s reg=%x val=%s", __get_str(name), __entry->reg,
+ __print_hex(__get_dynamic_array(buf), __entry->val_len))
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_write,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_read,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
);
DECLARE_EVENT_CLASS(regmap_block,
@@ -82,9 +118,7 @@ DECLARE_EVENT_CLASS(regmap_block,
__entry->count = count;
),
- TP_printk("%s reg=%x count=%d", __get_str(name),
- (unsigned int)__entry->reg,
- (int)__entry->count)
+ TP_printk("%s reg=%x count=%d", __get_str(name), __entry->reg, __entry->count)
);
DEFINE_EVENT(regmap_block, regmap_hw_read_start,
@@ -154,8 +188,7 @@ DECLARE_EVENT_CLASS(regmap_bool,
__entry->flag = flag;
),
- TP_printk("%s flag=%d", __get_str(name),
- (int)__entry->flag)
+ TP_printk("%s flag=%d", __get_str(name), __entry->flag)
);
DEFINE_EVENT(regmap_bool, regmap_cache_only,
@@ -163,7 +196,6 @@ DEFINE_EVENT(regmap_bool, regmap_cache_only,
TP_PROTO(struct regmap *map, bool flag),
TP_ARGS(map, flag)
-
);
DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
@@ -171,7 +203,6 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
TP_PROTO(struct regmap *map, bool flag),
TP_ARGS(map, flag)
-
);
DECLARE_EVENT_CLASS(regmap_async,
@@ -203,7 +234,6 @@ DEFINE_EVENT(regmap_async, regmap_async_io_complete,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
@@ -211,7 +241,6 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_start,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
@@ -219,7 +248,6 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_done,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
TRACE_EVENT(regcache_drop_region,
@@ -241,8 +269,7 @@ TRACE_EVENT(regcache_drop_region,
__entry->to = to;
),
- TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
- (unsigned int)__entry->to)
+ TP_printk("%s %u-%u", __get_str(name), __entry->from, __entry->to)
);
#endif /* _TRACE_REGMAP_H */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 12aca34e8db0..4f01e6b17bb9 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -30,7 +30,7 @@ enum bcma_boot_dev {
BCMA_BOOT_DEV_NAND,
};
-/* The 47162a0 hangs when reading MIPS DMP registers registers */
+/* The 47162a0 hangs when reading MIPS DMP registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 12b3ca8f6f4a..128722cf6c3c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -108,7 +108,7 @@ static ssize_t aoedisk_show_payload(struct device *dev,
return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
-static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+static int aoe_debugfs_show(struct seq_file *s, void *ignored)
{
struct aoedev *d;
struct aoetgt **t, **te;
@@ -151,11 +151,7 @@ static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
return 0;
}
-
-static int aoe_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, aoedisk_debugfs_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(aoe_debugfs);
static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
@@ -184,13 +180,6 @@ static const struct attribute_group *aoe_attr_groups[] = {
NULL,
};
-static const struct file_operations aoe_debugfs_fops = {
- .open = aoe_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void
aoedisk_add_debugfs(struct aoedev *d)
{
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 859499cd1ff8..20acc4a1fd6d 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -397,7 +397,7 @@ static int brd_alloc(int i)
disk->minors = max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
- strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
+ strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
/*
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f15f2f041596..4d661282ff41 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1529,7 +1529,6 @@ extern int w_send_read_req(struct drbd_work *, int);
extern int w_e_reissue(struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
-extern int w_start_resync(struct drbd_work *, int);
extern void resync_timer_fn(struct timer_list *t);
extern void start_resync_timer_fn(struct timer_list *t);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 013d355a2033..864c98e74875 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -4752,7 +4752,7 @@ void notify_helper(enum drbd_notification_type type,
struct drbd_genlmsghdr *dh;
int err;
- strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
+ strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
helper_info.helper_status = status;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index af4c7d65490b..c897c4572036 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2113,9 +2113,6 @@ static int receive_DataReply(struct drbd_connection *connection, struct packet_i
if (unlikely(!req))
return -EIO;
- /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
- * special casing it there for the various failure cases.
- * still no race with drbd_fail_pending_reads */
err = recv_dless_read(peer_device, req, sector, pi->size);
if (!err)
req_mod(req, DATA_RECEIVED);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 511f39a08de4..6237fa1dcb0e 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -266,8 +266,6 @@ struct bio_and_error {
extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref);
-extern void _req_may_be_done(struct drbd_request *req,
- struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_device *device,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e3c0ba93c1a3..ad92192c7d61 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -979,6 +979,11 @@ loop_set_status_from_info(struct loop_device *lo,
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
+
+ /* loff_t vars have been assigned __u64 */
+ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+ return -EOVERFLOW;
+
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 562725d222a7..815d77ba6381 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1397,15 +1397,15 @@ static void mtip_dump_identify(struct mtip_port *port)
if (!port->identify_valid)
return;
- strlcpy(cbuf, (char *)(port->identify+10), 21);
+ strscpy(cbuf, (char *)(port->identify + 10), 21);
dev_info(&port->dd->pdev->dev,
"Serial No.: %s\n", cbuf);
- strlcpy(cbuf, (char *)(port->identify+23), 9);
+ strscpy(cbuf, (char *)(port->identify + 23), 9);
dev_info(&port->dd->pdev->dev,
"Firmware Ver.: %s\n", cbuf);
- strlcpy(cbuf, (char *)(port->identify+27), 41);
+ strscpy(cbuf, (char *)(port->identify + 27), 41);
dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
@@ -1421,13 +1421,13 @@ static void mtip_dump_identify(struct mtip_port *port)
pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
switch (revid & 0xFF) {
case 0x1:
- strlcpy(cbuf, "A0", 3);
+ strscpy(cbuf, "A0", 3);
break;
case 0x3:
- strlcpy(cbuf, "A2", 3);
+ strscpy(cbuf, "A2", 3);
break;
default:
- strlcpy(cbuf, "?", 2);
+ strscpy(cbuf, "?", 2);
break;
}
dev_info(&port->dd->pdev->dev,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2a709daefbc4..4762a03e1ffe 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1413,10 +1413,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret)
+ if (ret) {
sock_shutdown(nbd);
- flush_workqueue(nbd->recv_workq);
+ nbd_clear_que(nbd);
+ }
+ flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(nbd);
/* user requested, ignore socket errors */
@@ -2322,6 +2324,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = nbd_connect_genl_ops,
.n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
+ .resv_start_op = NBD_CMD_STATUS + 1,
.maxattr = NBD_ATTR_MAX,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index c451c477978f..1f154f92f4c2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1528,7 +1528,7 @@ static bool should_requeue_request(struct request *rq)
return false;
}
-static int null_map_queues(struct blk_mq_tag_set *set)
+static void null_map_queues(struct blk_mq_tag_set *set)
{
struct nullb *nullb = set->driver_data;
int i, qoff;
@@ -1555,7 +1555,9 @@ static int null_map_queues(struct blk_mq_tag_set *set)
} else {
pr_warn("tag set has unexpected nr_hw_queues: %d\n",
set->nr_hw_queues);
- return -EINVAL;
+ WARN_ON_ONCE(true);
+ submit_queues = 1;
+ poll_queues = 0;
}
}
@@ -1577,8 +1579,6 @@ static int null_map_queues(struct blk_mq_tag_set *set)
qoff += map->nr_queues;
blk_mq_map_queues(map);
}
-
- return 0;
}
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index e1d080f680ed..c76e0148eada 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -745,7 +745,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
gendisk->flags |= GENHD_FL_NO_PART;
gendisk->fops = &ps3vram_fops;
gendisk->private_data = dev;
- strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
+ strscpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
blk_queue_max_segments(gendisk->queue, BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(gendisk->queue, BLK_MAX_SEGMENT_SIZE);
diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile
index 5bb1a7ad1ada..40b31630822c 100644
--- a/drivers/block/rnbd/Makefile
+++ b/drivers/block/rnbd/Makefile
@@ -6,10 +6,12 @@ rnbd-client-y := rnbd-clt.o \
rnbd-clt-sysfs.o \
rnbd-common.o
+CFLAGS_rnbd-srv-trace.o = -I$(src)
+
rnbd-server-y := rnbd-common.o \
rnbd-srv.o \
- rnbd-srv-dev.o \
- rnbd-srv-sysfs.o
+ rnbd-srv-sysfs.o \
+ rnbd-srv-trace.o
obj-$(CONFIG_BLK_DEV_RNBD_CLIENT) += rnbd-client.o
obj-$(CONFIG_BLK_DEV_RNBD_SERVER) += rnbd-server.o
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 04da33a22ef4..78334da74d8b 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1159,13 +1159,11 @@ static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct rnbd_queue *q = hctx->driver_data;
struct rnbd_clt_dev *dev = q->dev;
- int cnt;
- cnt = rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
- return cnt;
+ return rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
}
-static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
+static void rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct rnbd_clt_session *sess = set->driver_data;
@@ -1194,8 +1192,6 @@ static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
set->map[HCTX_TYPE_DEFAULT].nr_queues,
set->map[HCTX_TYPE_READ].nr_queues);
}
-
- return 0;
}
static struct blk_mq_ops rnbd_mq_ops = {
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
deleted file mode 100644
index c63017f6e421..000000000000
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RDMA Network Block Driver
- *
- * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
- * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
- * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
- */
-#undef pr_fmt
-#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
-
-#include "rnbd-srv-dev.h"
-#include "rnbd-log.h"
-
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
-{
- struct rnbd_dev *dev;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- dev->blk_open_flags = flags;
- dev->bdev = blkdev_get_by_path(path, flags, THIS_MODULE);
- ret = PTR_ERR_OR_ZERO(dev->bdev);
- if (ret)
- goto err;
-
- dev->blk_open_flags = flags;
-
- return dev;
-
-err:
- kfree(dev);
- return ERR_PTR(ret);
-}
-
-void rnbd_dev_close(struct rnbd_dev *dev)
-{
- blkdev_put(dev->bdev, dev->blk_open_flags);
- kfree(dev);
-}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
deleted file mode 100644
index 8407d12f70af..000000000000
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * RDMA Network Block Driver
- *
- * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
- * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
- * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
- */
-#ifndef RNBD_SRV_DEV_H
-#define RNBD_SRV_DEV_H
-
-#include <linux/fs.h>
-#include "rnbd-proto.h"
-
-struct rnbd_dev {
- struct block_device *bdev;
- fmode_t blk_open_flags;
-};
-
-/**
- * rnbd_dev_open() - Open a device
- * @path: path to open
- * @flags: open flags
- */
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags);
-
-/**
- * rnbd_dev_close() - Close a device
- */
-void rnbd_dev_close(struct rnbd_dev *dev);
-
-void rnbd_endio(void *priv, int error);
-
-static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
-{
- return queue_max_segments(bdev_get_queue(dev->bdev));
-}
-
-static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
-{
- return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
-}
-
-static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
-{
- return bdev_max_secure_erase_sectors(dev->bdev);
-}
-
-static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
-{
- return bdev_max_discard_sectors(dev->bdev);
-}
-
-static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
-{
- return bdev_get_queue(dev->bdev)->limits.discard_granularity;
-}
-
-static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
-{
- return bdev_discard_alignment(dev->bdev);
-}
-
-#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv-trace.c b/drivers/block/rnbd/rnbd-srv-trace.c
new file mode 100644
index 000000000000..30f0895c18f5
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-trace.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-srv.h"
+#include "rnbd-srv.h"
+#include "rnbd-proto.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rnbd-srv-trace.h"
diff --git a/drivers/block/rnbd/rnbd-srv-trace.h b/drivers/block/rnbd/rnbd-srv-trace.h
new file mode 100644
index 000000000000..8dedf73bdd28
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-trace.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rnbd_srv
+
+#if !defined(_TRACE_RNBD_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RNBD_SRV_H
+
+#include <linux/tracepoint.h>
+
+struct rnbd_srv_session;
+struct rtrs_srv_op;
+
+DECLARE_EVENT_CLASS(rnbd_srv_link_class,
+ TP_PROTO(struct rnbd_srv_session *srv),
+
+ TP_ARGS(srv),
+
+ TP_STRUCT__entry(
+ __field(int, qdepth)
+ __string(sessname, srv->sessname)
+ ),
+
+ TP_fast_assign(
+ __entry->qdepth = srv->queue_depth;
+ __assign_str(sessname, srv->sessname);
+ ),
+
+ TP_printk("sessname: %s qdepth: %d",
+ __get_str(sessname),
+ __entry->qdepth
+ )
+);
+
+#define DEFINE_LINK_EVENT(name) \
+DEFINE_EVENT(rnbd_srv_link_class, name, \
+ TP_PROTO(struct rnbd_srv_session *srv), \
+ TP_ARGS(srv))
+
+DEFINE_LINK_EVENT(create_sess);
+DEFINE_LINK_EVENT(destroy_sess);
+
+TRACE_DEFINE_ENUM(RNBD_OP_READ);
+TRACE_DEFINE_ENUM(RNBD_OP_WRITE);
+TRACE_DEFINE_ENUM(RNBD_OP_FLUSH);
+TRACE_DEFINE_ENUM(RNBD_OP_DISCARD);
+TRACE_DEFINE_ENUM(RNBD_OP_SECURE_ERASE);
+TRACE_DEFINE_ENUM(RNBD_F_SYNC);
+TRACE_DEFINE_ENUM(RNBD_F_FUA);
+
+#define show_rnbd_rw_flags(x) \
+ __print_flags(x, "|", \
+ { RNBD_OP_READ, "READ" }, \
+ { RNBD_OP_WRITE, "WRITE" }, \
+ { RNBD_OP_FLUSH, "FLUSH" }, \
+ { RNBD_OP_DISCARD, "DISCARD" }, \
+ { RNBD_OP_SECURE_ERASE, "SECURE_ERASE" }, \
+ { RNBD_F_SYNC, "SYNC" }, \
+ { RNBD_F_FUA, "FUA" })
+
+TRACE_EVENT(process_rdma,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_io *msg,
+ struct rtrs_srv_op *id,
+ u32 datalen,
+ size_t usrlen),
+
+ TP_ARGS(srv, msg, id, datalen, usrlen),
+
+ TP_STRUCT__entry(
+ __string(sessname, srv->sessname)
+ __field(u8, dir)
+ __field(u8, ver)
+ __field(u32, device_id)
+ __field(u64, sector)
+ __field(u32, flags)
+ __field(u32, bi_size)
+ __field(u16, ioprio)
+ __field(u32, datalen)
+ __field(size_t, usrlen)
+ ),
+
+ TP_fast_assign(
+ __assign_str(sessname, srv->sessname);
+ __entry->dir = id->dir;
+ __entry->ver = srv->ver;
+ __entry->device_id = le32_to_cpu(msg->device_id);
+ __entry->sector = le64_to_cpu(msg->sector);
+ __entry->bi_size = le32_to_cpu(msg->bi_size);
+ __entry->flags = le32_to_cpu(msg->rw);
+ __entry->ioprio = le16_to_cpu(msg->prio);
+ __entry->datalen = datalen;
+ __entry->usrlen = usrlen;
+ ),
+
+ TP_printk("I/O req: sess: %s, type: %s, ver: %d, devid: %u, sector: %llu, bsize: %u, flags: %s, ioprio: %d, datalen: %u, usrlen: %zu",
+ __get_str(sessname),
+ __print_symbolic(__entry->dir,
+ { READ, "READ" },
+ { WRITE, "WRITE" }),
+ __entry->ver,
+ __entry->device_id,
+ __entry->sector,
+ __entry->bi_size,
+ show_rnbd_rw_flags(__entry->flags),
+ __entry->ioprio,
+ __entry->datalen,
+ __entry->usrlen
+ )
+);
+
+TRACE_EVENT(process_msg_sess_info,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_sess_info *msg),
+
+ TP_ARGS(srv, msg),
+
+ TP_STRUCT__entry(
+ __field(u8, proto_ver)
+ __field(u8, clt_ver)
+ __field(u8, srv_ver)
+ __string(sessname, srv->sessname)
+ ),
+
+ TP_fast_assign(
+ __entry->proto_ver = srv->ver;
+ __entry->clt_ver = msg->ver;
+ __entry->srv_ver = RNBD_PROTO_VER_MAJOR;
+ __assign_str(sessname, srv->sessname);
+ ),
+
+ TP_printk("Session %s using proto-ver %d (clt-ver: %d, srv-ver: %d)",
+ __get_str(sessname),
+ __entry->proto_ver,
+ __entry->clt_ver,
+ __entry->srv_ver
+ )
+);
+
+TRACE_DEFINE_ENUM(RNBD_ACCESS_RO);
+TRACE_DEFINE_ENUM(RNBD_ACCESS_RW);
+TRACE_DEFINE_ENUM(RNBD_ACCESS_MIGRATION);
+
+#define show_rnbd_access_mode(x) \
+ __print_symbolic(x, \
+ { RNBD_ACCESS_RO, "RO" }, \
+ { RNBD_ACCESS_RW, "RW" }, \
+ { RNBD_ACCESS_MIGRATION, "MIGRATION" })
+
+TRACE_EVENT(process_msg_open,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_open *msg),
+
+ TP_ARGS(srv, msg),
+
+ TP_STRUCT__entry(
+ __field(u8, access_mode)
+ __string(sessname, srv->sessname)
+ __string(dev_name, msg->dev_name)
+ ),
+
+ TP_fast_assign(
+ __entry->access_mode = msg->access_mode;
+ __assign_str(sessname, srv->sessname);
+ __assign_str(dev_name, msg->dev_name);
+ ),
+
+ TP_printk("Open message received: session='%s' path='%s' access_mode=%s",
+ __get_str(sessname),
+ __get_str(dev_name),
+ show_rnbd_access_mode(__entry->access_mode)
+ )
+);
+
+TRACE_EVENT(process_msg_close,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_close *msg),
+
+ TP_ARGS(srv, msg),
+
+ TP_STRUCT__entry(
+ __field(u32, device_id)
+ __string(sessname, srv->sessname)
+ ),
+
+ TP_fast_assign(
+ __entry->device_id = le32_to_cpu(msg->device_id);
+ __assign_str(sessname, srv->sessname);
+ ),
+
+ TP_printk("Close message received: session='%s' device id='%d'",
+ __get_str(sessname),
+ __entry->device_id
+ )
+);
+
+#endif /* _TRACE_RNBD_SRV_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rnbd-srv-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 5e08da277ddf..08b041159cd3 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -13,7 +13,7 @@
#include <linux/blkdev.h>
#include "rnbd-srv.h"
-#include "rnbd-srv-dev.h"
+#include "rnbd-srv-trace.h"
MODULE_DESCRIPTION("RDMA Network Block Device Server");
MODULE_LICENSE("GPL");
@@ -84,18 +84,6 @@ static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
kref_put(&sess_dev->kref, rnbd_sess_dev_release);
}
-void rnbd_endio(void *priv, int error)
-{
- struct rnbd_io_private *rnbd_priv = priv;
- struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
-
- rnbd_put_sess_dev(sess_dev);
-
- rtrs_srv_resp_rdma(rnbd_priv->id, error);
-
- kfree(priv);
-}
-
static struct rnbd_srv_sess_dev *
rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
{
@@ -116,7 +104,13 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
static void rnbd_dev_bi_end_io(struct bio *bio)
{
- rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status));
+ struct rnbd_io_private *rnbd_priv = bio->bi_private;
+ struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
+
+ rnbd_put_sess_dev(sess_dev);
+ rtrs_srv_resp_rdma(rnbd_priv->id, blk_status_to_errno(bio->bi_status));
+
+ kfree(rnbd_priv);
bio_put(bio);
}
@@ -132,6 +126,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
struct bio *bio;
short prio;
+ trace_process_rdma(srv_sess, msg, id, datalen, usrlen);
+
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -149,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
+ bio = bio_alloc(sess_dev->bdev, 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@@ -223,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- rnbd_dev_close(sess_dev->rnbd_dev);
+ blkdev_put(sess_dev->bdev, sess_dev->open_flags);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (sess_dev->open_flags & FMODE_WRITE)
@@ -244,6 +240,8 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess)
if (xa_empty(&srv_sess->index_idr))
goto out;
+ trace_destroy_sess(srv_sess);
+
mutex_lock(&srv_sess->lock);
xa_for_each(&srv_sess->index_idr, index, sess_dev)
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
@@ -290,6 +288,8 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
rtrs_srv_set_sess_priv(rtrs, srv_sess);
+ trace_create_sess(srv_sess);
+
return 0;
}
@@ -332,23 +332,24 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
mutex_unlock(&sess->lock);
}
-static int process_msg_close(struct rnbd_srv_session *srv_sess,
+static void process_msg_close(struct rnbd_srv_session *srv_sess,
void *data, size_t datalen, const void *usr,
size_t usrlen)
{
const struct rnbd_msg_close *close_msg = usr;
struct rnbd_srv_sess_dev *sess_dev;
+ trace_process_msg_close(srv_sess, close_msg);
+
sess_dev = rnbd_get_sess_dev(le32_to_cpu(close_msg->device_id),
srv_sess);
if (IS_ERR(sess_dev))
- return 0;
+ return;
rnbd_put_sess_dev(sess_dev);
mutex_lock(&srv_sess->lock);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
- return 0;
}
static int process_msg_open(struct rnbd_srv_session *srv_sess,
@@ -378,7 +379,7 @@ static int rnbd_srv_rdma_ev(void *priv,
case RNBD_MSG_IO:
return process_rdma(srv_sess, id, data, datalen, usr, usrlen);
case RNBD_MSG_CLOSE:
- ret = process_msg_close(srv_sess, data, datalen, usr, usrlen);
+ process_msg_close(srv_sess, data, datalen, usr, usrlen);
break;
case RNBD_MSG_OPEN:
ret = process_msg_open(srv_sess, usr, usrlen, data, datalen);
@@ -393,6 +394,11 @@ static int rnbd_srv_rdma_ev(void *priv,
return -EINVAL;
}
+ /*
+ * Since ret is passed to rtrs to handle the failure case, we
+ * just return 0 at the end otherwise callers in rtrs would call
+ * send_io_resp_imm again to print redundant err message.
+ */
rtrs_srv_resp_rdma(id, ret);
return 0;
}
@@ -504,14 +510,14 @@ static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
}
static struct rnbd_srv_dev *
-rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
+rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
struct rnbd_srv_session *srv_sess,
enum rnbd_access_mode access_mode)
{
int ret;
struct rnbd_srv_dev *new_dev, *dev;
- new_dev = rnbd_srv_init_srv_dev(rnbd_dev->bdev);
+ new_dev = rnbd_srv_init_srv_dev(bdev);
if (IS_ERR(new_dev))
return new_dev;
@@ -531,41 +537,32 @@ rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
- struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+ struct block_device *bdev = sess_dev->bdev;
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
- rsp->device_id =
- cpu_to_le32(sess_dev->device_id);
- rsp->nsectors =
- cpu_to_le64(get_capacity(rnbd_dev->bdev->bd_disk));
- rsp->logical_block_size =
- cpu_to_le16(bdev_logical_block_size(rnbd_dev->bdev));
- rsp->physical_block_size =
- cpu_to_le16(bdev_physical_block_size(rnbd_dev->bdev));
- rsp->max_segments =
- cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
+ rsp->device_id = cpu_to_le32(sess_dev->device_id);
+ rsp->nsectors = cpu_to_le64(bdev_nr_sectors(bdev));
+ rsp->logical_block_size = cpu_to_le16(bdev_logical_block_size(bdev));
+ rsp->physical_block_size = cpu_to_le16(bdev_physical_block_size(bdev));
+ rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev));
rsp->max_hw_sectors =
- cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
+ cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev)));
rsp->max_write_same_sectors = 0;
- rsp->max_discard_sectors =
- cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
- rsp->discard_granularity =
- cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev));
- rsp->discard_alignment =
- cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
- rsp->secure_discard =
- cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
+ rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev));
+ rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev));
+ rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev));
+ rsp->secure_discard = cpu_to_le16(bdev_max_secure_erase_sectors(bdev));
rsp->cache_policy = 0;
- if (bdev_write_cache(rnbd_dev->bdev))
+ if (bdev_write_cache(bdev))
rsp->cache_policy |= RNBD_WRITEBACK;
- if (bdev_fua(rnbd_dev->bdev))
+ if (bdev_fua(bdev))
rsp->cache_policy |= RNBD_FUA;
}
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct rnbd_dev *rnbd_dev, fmode_t open_flags,
+ struct block_device *bdev, fmode_t open_flags,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -577,7 +574,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
- sdev->rnbd_dev = rnbd_dev;
+ sdev->bdev = bdev;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->open_flags = open_flags;
@@ -643,9 +640,8 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
struct rnbd_msg_sess_info_rsp *rsp = data;
srv_sess->ver = min_t(u8, sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
- pr_debug("Session %s using protocol version %d (client version: %d, server version: %d)\n",
- srv_sess->sessname, srv_sess->ver,
- sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+
+ trace_process_msg_sess_info(srv_sess, sess_info_msg);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
rsp->ver = srv_sess->ver;
@@ -685,14 +681,13 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
+ struct block_device *bdev;
fmode_t open_flags;
char *full_path;
- struct rnbd_dev *rnbd_dev;
struct rnbd_msg_open_rsp *rsp = data;
- pr_debug("Open message received: session='%s' path='%s' access_mode=%d\n",
- srv_sess->sessname, open_msg->dev_name,
- open_msg->access_mode);
+ trace_process_msg_open(srv_sess, open_msg);
+
open_flags = FMODE_READ;
if (open_msg->access_mode != RNBD_ACCESS_RO)
open_flags |= FMODE_WRITE;
@@ -725,25 +720,25 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- rnbd_dev = rnbd_dev_open(full_path, open_flags);
- if (IS_ERR(rnbd_dev)) {
- pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
- full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
- ret = PTR_ERR(rnbd_dev);
+ bdev = blkdev_get_by_path(full_path, open_flags, THIS_MODULE);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
+ full_path, srv_sess->sessname, ret);
goto free_path;
}
- srv_dev = rnbd_srv_get_or_create_srv_dev(rnbd_dev, srv_sess,
+ srv_dev = rnbd_srv_get_or_create_srv_dev(bdev, srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(srv_dev));
ret = PTR_ERR(srv_dev);
- goto rnbd_dev_close;
+ goto blkdev_put;
}
srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
- rnbd_dev, open_flags,
+ bdev, open_flags,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
@@ -758,7 +753,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -800,8 +795,8 @@ srv_dev_put:
mutex_unlock(&srv_dev->lock);
}
rnbd_put_srv_dev(srv_dev);
-rnbd_dev_close:
- rnbd_dev_close(rnbd_dev);
+blkdev_put:
+ blkdev_put(bdev, open_flags);
free_path:
kfree(full_path);
reject:
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 081bceaf4ae9..f5962fd31d62 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- struct rnbd_dev *rnbd_dev;
+ struct block_device *bdev;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2b7d1db5c4a7..2651bf41dde3 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -49,7 +49,9 @@
/* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
| UBLK_F_URING_CMD_COMP_IN_TASK \
- | UBLK_F_NEED_GET_DATA)
+ | UBLK_F_NEED_GET_DATA \
+ | UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
@@ -119,7 +121,7 @@ struct ublk_queue {
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
- bool abort_work_pending;
+ bool force_abort;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
struct ublk_io ios[0];
@@ -161,6 +163,7 @@ struct ublk_device {
* monitor each queue's daemon periodically
*/
struct delayed_work monitor_work;
+ struct work_struct quiesce_work;
struct work_struct stop_work;
};
@@ -323,6 +326,30 @@ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
PAGE_SIZE);
}
+static inline bool ublk_queue_can_use_recovery_reissue(
+ struct ublk_queue *ubq)
+{
+ if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
+ (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
+ return true;
+ return false;
+}
+
+static inline bool ublk_queue_can_use_recovery(
+ struct ublk_queue *ubq)
+{
+ if (ubq->flags & UBLK_F_USER_RECOVERY)
+ return true;
+ return false;
+}
+
+static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+{
+ if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
+ return true;
+ return false;
+}
+
static void ublk_free_disk(struct gendisk *disk)
{
struct ublk_device *ub = disk->private_data;
@@ -555,7 +582,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
}
-static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
return ubq->ubq_daemon->flags & PF_EXITING;
}
@@ -605,19 +632,24 @@ static void ublk_complete_rq(struct request *req)
}
/*
- * __ublk_fail_req() may be called from abort context or ->ubq_daemon
- * context during exiting, so lock is required.
+ * Since __ublk_rq_task_work always fails requests immediately during
+ * exiting, __ublk_fail_req() is only called from abort context during
+ * exiting. So lock is unnecessary.
*
* Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again.
*/
-static void __ublk_fail_req(struct ublk_io *io, struct request *req)
+static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
io->flags |= UBLK_IO_FLAG_ABORTED;
- blk_mq_end_request(req, BLK_STS_IOERR);
+ if (ublk_queue_can_use_recovery_reissue(ubq))
+ blk_mq_requeue_request(req, false);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
}
}
@@ -638,23 +670,40 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
#define UBLK_REQUEUE_DELAY_MS 3
+static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ struct request *rq)
+{
+ /* We cannot process this rq so just requeue it. */
+ if (ublk_queue_can_use_recovery(ubq))
+ blk_mq_requeue_request(rq, false);
+ else
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+
+ mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+}
+
static inline void __ublk_rq_task_work(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_device *ub = ubq->dev;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- bool task_exiting = current != ubq->ubq_daemon ||
- (current->flags & PF_EXITING);
unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
- if (unlikely(task_exiting)) {
- blk_mq_end_request(req, BLK_STS_IOERR);
- mod_delayed_work(system_wq, &ub->monitor_work, 0);
+ /*
+ * Task is exiting if either:
+ *
+ * (1) current != ubq_daemon.
+ * io_uring_cmd_complete_in_task() tries to run task_work
+ * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
+ *
+ * (2) current->flags & PF_EXITING.
+ */
+ if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
+ __ublk_abort_rq(ubq, req);
return;
}
@@ -680,6 +729,11 @@ static inline void __ublk_rq_task_work(struct request *req)
* do the copy work.
*/
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
}
mapped_bytes = ublk_map_io(ubq, req, io);
@@ -734,13 +788,24 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
res = ublk_setup_iod(ubq, rq);
if (unlikely(res != BLK_STS_OK))
return BLK_STS_IOERR;
+ /* With recovery feature enabled, force_abort is set in
+ * ublk_stop_dev() before calling del_gendisk(). We have to
+ * abort all requeued and new rqs here to let del_gendisk()
+ * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
+ * to avoid UAF on io_uring ctx.
+ *
+ * Note: force_abort is guaranteed to be seen because it is set
+ * before request queue is unqiuesced.
+ */
+ if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ return BLK_STS_IOERR;
blk_mq_start_request(bd->rq);
if (unlikely(ubq_daemon_is_dying(ubq))) {
fail:
- mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
- return BLK_STS_IOERR;
+ __ublk_abort_rq(ubq, rq);
+ return BLK_STS_OK;
}
if (ublk_can_use_task_work(ubq)) {
@@ -751,9 +816,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
goto fail;
} else {
- struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ /*
+ * If the check pass, we know that this is a re-issued request aborted
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
+ * because this ioucmd's io_uring context may be freed now if no inflight
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
+ *
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
+ * guarantees that here is a re-issued request aborted previously.
+ */
+ if ((io->flags & UBLK_IO_FLAG_ABORTED))
+ goto fail;
+
pdu->req = rq;
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
}
@@ -895,7 +976,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
if (rq)
- __ublk_fail_req(io, rq);
+ __ublk_fail_req(ubq, io, rq);
}
}
ublk_put_device(ub);
@@ -911,7 +992,10 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
struct ublk_queue *ubq = ublk_get_queue(ub, i);
if (ubq_daemon_is_dying(ubq)) {
- schedule_work(&ub->stop_work);
+ if (ublk_queue_can_use_recovery(ubq))
+ schedule_work(&ub->quiesce_work);
+ else
+ schedule_work(&ub->stop_work);
/* abort queue is for making forward progress */
ublk_abort_queue(ub, ubq);
@@ -919,12 +1003,13 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
}
/*
- * We can't schedule monitor work after ublk_remove() is started.
+ * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
+ * after ublk_remove() or __ublk_quiesce_dev() is started.
*
* No need ub->mutex, monitor work are canceled after state is marked
- * as DEAD, so DEAD state is observed reliably.
+ * as not LIVE, so new state is observed reliably.
*/
- if (ub->dev_info.state != UBLK_S_DEV_DEAD)
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
schedule_delayed_work(&ub->monitor_work,
UBLK_DAEMON_MONITOR_PERIOD);
}
@@ -961,12 +1046,97 @@ static void ublk_cancel_dev(struct ublk_device *ub)
ublk_cancel_queue(ublk_get_queue(ub, i));
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static bool ublk_check_inflight_rq(struct request *rq, void *data)
+{
+ bool *idle = data;
+
+ if (blk_mq_request_started(rq)) {
+ *idle = false;
+ return false;
+ }
+ return true;
+}
+
+static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
+{
+ bool idle;
+
+ WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
+ while (true) {
+ idle = true;
+ blk_mq_tagset_busy_iter(&ub->tag_set,
+ ublk_check_inflight_rq, &idle);
+ if (idle)
+ break;
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ }
+}
+
+static void __ublk_quiesce_dev(struct ublk_device *ub)
+{
+ pr_devel("%s: quiesce ub: dev_id %d state %s\n",
+ __func__, ub->dev_info.dev_id,
+ ub->dev_info.state == UBLK_S_DEV_LIVE ?
+ "LIVE" : "QUIESCED");
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ublk_wait_tagset_rqs_idle(ub);
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ ublk_cancel_dev(ub);
+ /* we are going to release task_struct of ubq_daemon and resets
+ * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ * Besides, monitor_work is not necessary in QUIESCED state since we have
+ * already scheduled quiesce_work and quiesced all ubqs.
+ *
+ * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
+ * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
+ */
+ cancel_delayed_work_sync(&ub->monitor_work);
+}
+
+static void ublk_quiesce_work_fn(struct work_struct *work)
{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, quiesce_work);
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state != UBLK_S_DEV_LIVE)
goto unlock;
+ __ublk_quiesce_dev(ub);
+ unlock:
+ mutex_unlock(&ub->mutex);
+}
+
+static void ublk_unquiesce_dev(struct ublk_device *ub)
+{
+ int i;
+ pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ __func__, ub->dev_info.dev_id,
+ ub->dev_info.state == UBLK_S_DEV_LIVE ?
+ "LIVE" : "QUIESCED");
+ /* quiesce_work has run. We let requeued rqs be aborted
+ * before running fallback_wq. "force_abort" must be seen
+ * after request queue is unqiuesced. Then del_gendisk()
+ * can move on.
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->force_abort = true;
+
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ /* We may have requeued some rqs in ublk_quiesce_queue() */
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto unlock;
+ if (ublk_can_use_recovery(ub)) {
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ __ublk_quiesce_dev(ub);
+ ublk_unquiesce_dev(ub);
+ }
del_gendisk(ub->ub_disk);
ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1;
@@ -1290,6 +1460,7 @@ static void ublk_remove(struct ublk_device *ub)
{
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
+ cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
put_device(&ub->cdev_dev);
}
@@ -1466,6 +1637,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->mm_lock);
+ INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
@@ -1586,6 +1758,7 @@ static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
+ cancel_work_sync(&ub->quiesce_work);
ublk_put_device(ub);
return 0;
@@ -1688,6 +1861,116 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
return ret;
}
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
+ /* All old ioucmds have to be completed */
+ WARN_ON_ONCE(ubq->nr_io_ready);
+ /* old daemon is PF_EXITING, put it now */
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+ ubq->ubq_daemon = NULL;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /* forget everything now and be ready for new FETCH_REQ */
+ io->flags = 0;
+ io->cmd = NULL;
+ io->addr = 0;
+ }
+}
+
+static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ struct ublk_device *ub;
+ int ret = -EINVAL;
+ int i;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return ret;
+
+ mutex_lock(&ub->mutex);
+ if (!ublk_can_use_recovery(ub))
+ goto out_unlock;
+ /*
+ * START_RECOVERY is only allowd after:
+ *
+ * (1) UB_STATE_OPEN is not set, which means the dying process is exited
+ * and related io_uring ctx is freed so file struct of /dev/ublkcX is
+ * released.
+ *
+ * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
+ * (a)has quiesced request queue
+ * (b)has requeued every inflight rqs whose io_flags is ACTIVE
+ * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
+ * (d)has completed/camceled all ioucmds owned by ther dying process
+ */
+ if (test_bit(UB_STATE_OPEN, &ub->state) ||
+ ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+ /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_queues_ready = 0;
+ init_completion(&ub->completion);
+ ret = 0;
+ out_unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ int ublksrv_pid = (int)header->data[0];
+ struct ublk_device *ub;
+ int ret = -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return ret;
+
+ pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
+ __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ /* wait until new ubq_daemon sending all FETCH_REQ */
+ wait_for_completion_interruptible(&ub->completion);
+ pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
+ __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+
+ mutex_lock(&ub->mutex);
+ if (!ublk_can_use_recovery(ub))
+ goto out_unlock;
+
+ if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ ub->dev_info.ublksrv_pid = ublksrv_pid;
+ pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
+ __func__, ublksrv_pid, header->dev_id);
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ pr_devel("%s: queue unquiesced, dev id %d.\n",
+ __func__, header->dev_id);
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
+ ret = 0;
+ out_unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+ return ret;
+}
+
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -1729,6 +2012,12 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_SET_PARAMS:
ret = ublk_ctrl_set_params(cmd);
break;
+ case UBLK_CMD_START_USER_RECOVERY:
+ ret = ublk_ctrl_start_recovery(cmd);
+ break;
+ case UBLK_CMD_END_USER_RECOVERY:
+ ret = ublk_ctrl_end_recovery(cmd);
+ break;
default:
break;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30255fcaf181..3f4739d52268 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -322,14 +322,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(status))
return status;
- blk_mq_start_request(req);
-
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
if (unlikely(vbr->sg_table.nents < 0)) {
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ blk_mq_start_request(req);
+
return BLK_STS_OK;
}
@@ -391,8 +391,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
}
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist,
- struct request **requeue_list)
+ struct request **rqlist)
{
unsigned long flags;
int err;
@@ -408,7 +407,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
if (err) {
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
- rq_list_add(requeue_list, req);
+ blk_mq_requeue_request(req, true);
}
}
@@ -436,7 +435,7 @@ static void virtio_queue_rqs(struct request **rqlist)
if (!next || req->mq_hctx != next->mq_hctx) {
req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ kick = virtblk_add_req_batch(vq, rqlist);
if (kick)
virtqueue_notify(vq->vq);
@@ -802,7 +801,7 @@ static const struct attribute_group *virtblk_attr_groups[] = {
NULL,
};
-static int virtblk_map_queues(struct blk_mq_tag_set *set)
+static void virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
int i, qoff;
@@ -827,8 +826,6 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
else
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
}
-
- return 0;
}
static void virtblk_complete_batch(struct io_comp_batch *iob)
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index bda5c815e441..a28473470e66 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -226,6 +226,9 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ /* Connect-time cached feature_persistent parameter value */
+ unsigned int feature_gnt_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index ee7ad2fb432d..c0227dfa4688 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -907,7 +907,7 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- be->blkif->vbd.feature_gnt_persistent);
+ be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1085,7 +1085,9 @@ static int connect_ring(struct backend_info *be)
return -ENOSYS;
}
- blkif->vbd.feature_gnt_persistent = feature_persistent &&
+ blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
+ blkif->vbd.feature_gnt_persistent =
+ blkif->vbd.feature_gnt_persistent_parm &&
xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8e56e69fb4c4..35b9bcad9db9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -213,6 +213,9 @@ struct blkfront_info
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ /* Connect-time cached feature_persistent parameter */
+ unsigned int feature_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
@@ -1756,6 +1759,12 @@ abort_transaction:
return err;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1847,8 +1856,9 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- info->feature_persistent);
+ info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -1916,12 +1926,6 @@ static int negotiate_mq(struct blkfront_info *info)
return 0;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -2281,7 +2285,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (feature_persistent)
+ if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 92cb929a45b7..e551433cd107 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -499,7 +499,7 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- strlcpy(file_name, buf, PATH_MAX);
+ strscpy(file_name, buf, PATH_MAX);
/* ignore trailing newline */
sz = strlen(file_name);
if (sz > 0 && file_name[sz - 1] == '\n')
@@ -1031,7 +1031,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
- strlcpy(compressor, buf, sizeof(compressor));
+ strscpy(compressor, buf, sizeof(compressor));
/* ignore trailing newline */
sz = strlen(compressor);
if (sz > 0 && compressor[sz - 1] == '\n')
@@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int version = 2;
+ int version = 1;
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu\n",
+ "version: %d\n%8llu %8llu\n",
version,
+ (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret = 0;
unsigned long alloced_pages;
- unsigned long handle = 0;
+ unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
@@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
kunmap_atomic(mem);
+compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
+ zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
-
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
-
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (IS_ERR((void *)handle))
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
+ atomic64_inc(&zram->stats.writestall);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (!IS_ERR((void *)handle))
+ goto compress_again;
return PTR_ERR((void *)handle);
}
@@ -1948,11 +1969,12 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
- strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+ strscpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 158c91e54850..80c3b43b4828 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -81,6 +81,7 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 818681c89db8..a657e9a3e96a 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -449,6 +449,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x17: /* TyP */
case 0x18: /* Slr */
case 0x19: /* Slr-F */
+ case 0x1b: /* Mgr */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -2330,6 +2331,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x17:
case 0x18:
case 0x19:
+ case 0x1b:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -2439,15 +2441,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
+ if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
+ set_bit(HCI_QUIRK_VALID_LE_STATES,
+ &hdev->quirks);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
case 0x0b: /* SfP */
- case 0x0c: /* WsP */
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ fallthrough;
+ case 0x0c: /* WsP */
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
@@ -2455,11 +2462,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
- /* Valid LE States quirk for JfP/ThP familiy */
- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
- set_bit(HCI_QUIRK_VALID_LE_STATES,
- &hdev->quirks);
-
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2530,9 +2532,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
- /* Valid LE States quirk for JfP/ThP familiy */
- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ /* Set Valid LE States quirk */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2542,6 +2543,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x17:
case 0x18:
case 0x19:
+ case 0x1b:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 15caa6469538..271963805a38 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -426,6 +426,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK |
@@ -438,6 +440,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -466,6 +470,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -478,9 +485,18 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -516,19 +532,17 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
- /* Additional Realtek 8761B Bluetooth devices */
+ /* Additional Realtek 8761BUV Bluetooth devices */
{ USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
-
- /* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
-
- /* Additional Realtek 8761BUV Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -2477,15 +2491,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
+ * it needs constantly polling control pipe until the host received the
+ * WMT event, thus, we should require to specifically acquire PM counter
+ * on the USB to prevent the interface from entering auto suspended
+ * while WMT cmd/event in progress.
+ */
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto err_free_wc;
+
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ usb_autopm_put_interface(data->intf);
goto err_free_wc;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
+
+ usb_autopm_put_interface(data->intf);
+
if (err < 0)
goto err_free_wc;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index f537673ede17..865112e96ff9 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -493,6 +493,11 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
+ if (percpu_init_rwsem(&hu->proto_lock)) {
+ BT_ERR("Can't allocate semaphore structure");
+ kfree(hu);
+ return -ENOMEM;
+ }
tty->disc_data = hu;
hu->tty = tty;
@@ -505,8 +510,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- percpu_init_rwsem(&hu->proto_lock);
-
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index c0e5f42ec6b7..f16fd79bc02b 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -310,11 +310,12 @@ int hci_uart_register_device(struct hci_uart *hu,
serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops);
+ if (percpu_init_rwsem(&hu->proto_lock))
+ return -ENOMEM;
+
err = serdev_device_open(hu->serdev);
if (err)
- return err;
-
- percpu_init_rwsem(&hu->proto_lock);
+ goto err_rwsem;
err = p->open(hu);
if (err)
@@ -389,6 +390,8 @@ err_alloc:
p->close(hu);
err_open:
serdev_device_close(hu->serdev);
+err_rwsem:
+ percpu_free_rwsem(&hu->proto_lock);
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
@@ -410,5 +413,6 @@ void hci_uart_unregister_device(struct hci_uart *hu)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
+ percpu_free_rwsem(&hu->proto_lock);
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 2e564803e786..5b65a48f17e7 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -85,7 +85,7 @@ static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
ndelay(LPC_NSEC_PERWAIT);
} while (--waitcnt);
- return -ETIME;
+ return -ETIMEDOUT;
}
/*
@@ -347,7 +347,7 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
unsigned long sys_port;
resource_size_t len = resource_size(res);
- sys_port = logic_pio_trans_hwaddr(&host->fwnode, res->start, len);
+ sys_port = logic_pio_trans_hwaddr(acpi_fwnode_handle(host), res->start, len);
if (sys_port == ~0UL)
return -EFAULT;
@@ -472,9 +472,7 @@ static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_us
struct hisi_lpc_acpi_cell {
const char *hid;
- const char *name;
- void *pdata;
- size_t pdata_size;
+ const struct platform_device_info *pdevinfo;
};
static void hisi_lpc_acpi_remove(struct device *hostdev)
@@ -505,28 +503,45 @@ static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
/* ipmi */
{
.hid = "IPI0001",
- .name = "hisi-lpc-ipmi",
+ .pdevinfo = (struct platform_device_info []) {
+ {
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "hisi-lpc-ipmi",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ },
+ },
},
/* 8250-compatible uart */
{
.hid = "HISI1031",
- .name = "serial8250",
- .pdata = (struct plat_serial8250_port []) {
+ .pdevinfo = (struct platform_device_info []) {
{
- .iobase = res->start,
- .uartclk = 1843200,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF,
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "serial8250",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ .data = (struct plat_serial8250_port []) {
+ {
+ .iobase = res->start,
+ .uartclk = 1843200,
+ .iotype = UPIO_PORT,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {}
+ },
+ .size_data = 2 * sizeof(struct plat_serial8250_port),
},
- {}
},
- .pdata_size = 2 *
- sizeof(struct plat_serial8250_port),
},
{}
};
- for (; cell && cell->name; cell++) {
+ for (; cell && cell->hid; cell++) {
if (!strcmp(cell->hid, hid)) {
found = true;
break;
@@ -540,31 +555,12 @@ static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
return 0;
}
- pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
-
- pdev->dev.parent = hostdev;
- ACPI_COMPANION_SET(&pdev->dev, child);
-
- ret = platform_device_add_resources(pdev, res, num_res);
- if (ret)
- goto fail;
-
- ret = platform_device_add_data(pdev, cell->pdata, cell->pdata_size);
- if (ret)
- goto fail;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
+ pdev = platform_device_register_full(cell->pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
acpi_device_set_enumerated(child);
return 0;
-
-fail:
- platform_device_put(pdev);
- return ret;
}
/*
@@ -589,11 +585,6 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
return ret;
}
-
-static const struct acpi_device_id hisi_lpc_acpi_match[] = {
- {"HISI0191"},
- {}
-};
#else
static int hisi_lpc_acpi_probe(struct device *dev)
{
@@ -615,11 +606,9 @@ static void hisi_lpc_acpi_remove(struct device *hostdev)
static int hisi_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_device = ACPI_COMPANION(dev);
struct logic_pio_hwaddr *range;
struct hisi_lpc_dev *lpcdev;
resource_size_t io_end;
- struct resource *res;
int ret;
lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
@@ -628,8 +617,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
spin_lock_init(&lpcdev->cycle_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpcdev->membase = devm_ioremap_resource(dev, res);
+ lpcdev->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpcdev->membase))
return PTR_ERR(lpcdev->membase);
@@ -637,7 +625,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
if (!range)
return -ENOMEM;
- range->fwnode = dev->fwnode;
+ range->fwnode = dev_fwnode(dev);
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
range->hostdata = lpcdev;
@@ -651,7 +639,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
}
/* register the LPC host PIO resources */
- if (acpi_device)
+ if (is_acpi_device_node(range->fwnode))
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -672,11 +660,10 @@ static int hisi_lpc_probe(struct platform_device *pdev)
static int hisi_lpc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_device = ACPI_COMPANION(dev);
struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
struct logic_pio_hwaddr *range = lpcdev->io_host;
- if (acpi_device)
+ if (is_acpi_device_node(range->fwnode))
hisi_lpc_acpi_remove(dev);
else
of_platform_depopulate(dev);
@@ -692,11 +679,16 @@ static const struct of_device_id hisi_lpc_of_match[] = {
{}
};
+static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+ {"HISI0191"},
+ {}
+};
+
static struct platform_driver hisi_lpc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = hisi_lpc_of_match,
- .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
+ .acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
.remove = hisi_lpc_remove,
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index f3aef77a6a4a..df0fbfee7b78 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -430,12 +430,25 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
{
struct mhi_event *mhi_event = dev;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct mhi_event_ctxt *er_ctxt =
- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_event_ctxt *er_ctxt;
struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+ dma_addr_t ptr;
void *dev_rp;
+ /*
+ * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
+ * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
+ * before handling the IRQs.
+ */
+ if (!mhi_cntrl->mhi_ctxt) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev,
+ "mhi_ctxt has been freed\n");
+ return IRQ_HANDLED;
+ }
+
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ ptr = le64_to_cpu(er_ctxt->rp);
+
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9e545f2a5a26..fa2246da63c1 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -507,6 +507,8 @@ static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index db40037eb347..a0e9e80d92ee 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -341,14 +341,12 @@ static int ipmb_probe(struct i2c_client *client)
return 0;
}
-static int ipmb_remove(struct i2c_client *client)
+static void ipmb_remove(struct i2c_client *client)
{
struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
i2c_slave_unregister(client);
misc_deregister(&ipmb_dev->miscdev);
-
- return 0;
}
static const struct i2c_device_id ipmb_id[] = {
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index ab19b4b3317e..25c010c9ec25 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -424,7 +424,7 @@ static void ipmi_ipmb_request_events(void *send_info)
/* We don't fetch events here. */
}
-static int ipmi_ipmb_remove(struct i2c_client *client)
+static void ipmi_ipmb_remove(struct i2c_client *client)
{
struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
@@ -438,8 +438,6 @@ static int ipmi_ipmb_remove(struct i2c_client *client)
ipmi_ipmb_stop_thread(iidev);
ipmi_unregister_smi(iidev->intf);
-
- return 0;
}
static int ipmi_ipmb_probe(struct i2c_client *client)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index fc742ee9c046..13da021e7c6b 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1281,13 +1281,13 @@ static void shutdown_ssif(void *send_info)
}
}
-static int ssif_remove(struct i2c_client *client)
+static void ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
struct ssif_addr_info *addr_info;
if (!ssif_info)
- return 0;
+ return;
/*
* After this point, we won't deliver anything asychronously
@@ -1303,8 +1303,6 @@ static int ssif_remove(struct i2c_client *client)
}
kfree(ssif_info);
-
- return 0;
}
static int read_response(struct i2c_client *client, unsigned char *resp)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 84ca98ed1dad..32a932a065a6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 3170d59d660c..a3aa411389e7 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -264,13 +264,11 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
* @param: client, the i2c_client description (TPM I2C description).
* @return: 0 in case of success.
*/
-static int st33zp24_i2c_remove(struct i2c_client *client)
+static void st33zp24_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
st33zp24_remove(chip);
-
- return 0;
}
static const struct i2c_device_id st33zp24_i2c_id[] = {
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index d5ac85558214..4be3677c1463 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -179,12 +179,11 @@ static int i2c_atmel_probe(struct i2c_client *client,
return tpm_chip_register(chip);
}
-static int i2c_atmel_remove(struct i2c_client *client)
+static void i2c_atmel_remove(struct i2c_client *client)
{
struct device *dev = &(client->dev);
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_chip_unregister(chip);
- return 0;
}
static const struct i2c_device_id i2c_atmel_id[] = {
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index a19d32cb4e94..fd3c3661e646 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -706,15 +706,13 @@ static int tpm_tis_i2c_probe(struct i2c_client *client,
return rc;
}
-static int tpm_tis_i2c_remove(struct i2c_client *client)
+static void tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = tpm_dev.chip;
tpm_chip_unregister(chip);
release_locality(chip, tpm_dev.locality, 1);
tpm_dev.client = NULL;
-
- return 0;
}
static struct i2c_driver tpm_tis_i2c_driver = {
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index b77c18e38662..95c37350cc8e 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -622,12 +622,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
return tpm_chip_register(chip);
}
-static int i2c_nuvoton_remove(struct i2c_client *client)
+static void i2c_nuvoton_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
tpm_chip_unregister(chip);
- return 0;
}
static const struct i2c_device_id i2c_nuvoton_id[] = {
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index ba0911b1d1ff..0692510dfcab 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -351,13 +351,12 @@ static int tpm_tis_i2c_probe(struct i2c_client *dev,
NULL);
}
-static int tpm_tis_i2c_remove(struct i2c_client *client)
+static void tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
- return 0;
}
static const struct i2c_device_id tpm_tis_i2c_id[] = {
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index 974479a1ec5a..77cea5b31c6e 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -763,20 +763,18 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_remove(struct i2c_client *client)
+static void tpm_cr50_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
struct device *dev = &client->dev;
if (!chip) {
dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n");
- return 0;
+ return;
}
tpm_chip_unregister(chip);
tpm_cr50_release_locality(chip, true);
-
- return 0;
}
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 1a098db12062..680f9d8d357c 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -726,6 +726,7 @@ void iproc_pll_clk_setup(struct device_node *node,
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@@ -773,7 +774,12 @@ void iproc_pll_clk_setup(struct device_node *node,
iclk = &iclk_array[0];
iclk->pll = pll;
- init.name = node->name;
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
init.ops = &iproc_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
@@ -793,13 +799,11 @@ void iproc_pll_clk_setup(struct device_node *node,
goto err_pll_register;
clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
- const char *clk_name;
-
memset(&init, 0, sizeof(init));
- parent_name = node->name;
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 73518009a0f2..876b37b8683c 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -203,7 +203,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
if (ret)
- return ret;
+ return 0;
return val;
}
@@ -220,7 +220,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
if (ret)
- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
clk_hw_get_name(hw), ret);
return ret;
@@ -288,7 +288,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
&min_rate);
if (ret) {
- dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
id, ret);
return ERR_PTR(ret);
}
@@ -344,8 +344,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct rpi_firmware_get_clocks_response *clks;
int ret;
+ /*
+ * The firmware doesn't guarantee that the last element of
+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
+ * zero element as sentinel.
+ */
clks = devm_kcalloc(rpi->dev,
- RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
@@ -360,7 +365,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct raspberrypi_clk_variant *variant;
if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
- dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
+ dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+ clks->id, RPI_FIRMWARE_NUM_CLK_ID);
return -EINVAL;
}
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index 5467d941ddfd..1449d0537674 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -665,10 +665,9 @@ static int cdce706_probe(struct i2c_client *client)
cdce);
}
-static int cdce706_remove(struct i2c_client *client)
+static void cdce706_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
- return 0;
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index aa5c72bab83e..320d39922206 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -557,7 +557,7 @@ static int cs2000_version_print(struct cs2000_priv *priv)
return 0;
}
-static int cs2000_remove(struct i2c_client *client)
+static void cs2000_remove(struct i2c_client *client)
{
struct cs2000_priv *priv = i2c_get_clientdata(client);
struct device *dev = priv_to_dev(priv);
@@ -566,8 +566,6 @@ static int cs2000_remove(struct i2c_client *client)
of_clk_del_provider(np);
clk_hw_unregister(&priv->hw);
-
- return 0;
}
static int cs2000_probe(struct i2c_client *client)
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 4481c4303534..c028fa103bed 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -370,10 +370,9 @@ static int si514_probe(struct i2c_client *client)
return 0;
}
-static int si514_remove(struct i2c_client *client)
+static void si514_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
- return 0;
}
static const struct i2c_device_id si514_id[] = {
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 4bca73212662..0e528d7ba656 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1796,7 +1796,7 @@ cleanup:
return err;
}
-static int si5341_remove(struct i2c_client *client)
+static void si5341_remove(struct i2c_client *client)
{
struct clk_si5341 *data = i2c_get_clientdata(client);
int i;
@@ -1807,8 +1807,6 @@ static int si5341_remove(struct i2c_client *client)
if (data->clk[i].vddo_reg)
regulator_disable(data->clk[i].vddo_reg);
}
-
- return 0;
}
static const struct i2c_device_id si5341_id[] = {
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index b9f088c4ba2f..9e939c98a455 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1651,11 +1651,9 @@ static int si5351_i2c_probe(struct i2c_client *client)
return 0;
}
-static int si5351_i2c_remove(struct i2c_client *client)
+static void si5351_i2c_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
-
- return 0;
}
static struct i2c_driver si5351_driver = {
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 1ff8f32f734d..0a6d70c49726 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -498,10 +498,9 @@ static int si570_probe(struct i2c_client *client)
return 0;
}
-static int si570_remove(struct i2c_client *client)
+static void si570_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
- return 0;
}
static const struct of_device_id clk_si570_of_match[] = {
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index e5fbefd6ac2d..38f44b5b9b1b 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -200,7 +200,9 @@ static int tps68470_clk_probe(struct platform_device *pdev)
.flags = CLK_SET_RATE_GATE,
};
struct tps68470_clkdata *tps68470_clkdata;
+ struct tps68470_clk_consumer *consumer;
int ret;
+ int i;
tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata),
GFP_KERNEL);
@@ -223,10 +225,13 @@ static int tps68470_clk_probe(struct platform_device *pdev)
return ret;
if (pdata) {
- ret = devm_clk_hw_register_clkdev(&pdev->dev,
- &tps68470_clkdata->clkout_hw,
- pdata->consumer_con_id,
- pdata->consumer_dev_name);
+ for (i = 0; i < pdata->n_consumers; i++) {
+ consumer = &pdata->consumers[i];
+ ret = devm_clk_hw_register_clkdev(&pdev->dev,
+ &tps68470_clkdata->clkout_hw,
+ consumer->consumer_con_id,
+ consumer->consumer_dev_name);
+ }
}
return ret;
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index e7be3e54b9be..657493ecce4c 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -1138,7 +1138,7 @@ err_clk:
return ret;
}
-static int vc5_remove(struct i2c_client *client)
+static void vc5_remove(struct i2c_client *client)
{
struct vc5_driver_data *vc5 = i2c_get_clientdata(client);
@@ -1146,8 +1146,6 @@ static int vc5_remove(struct i2c_client *client)
if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
clk_unregister_fixed_rate(vc5->pin_xin);
-
- return 0;
}
static int __maybe_unused vc5_suspend(struct device *dev)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7fc191c15507..bd0b35cac83e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -840,10 +840,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index fc1bd23d4583..598f3cf4eba4 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index f5c9fa40491c..dcc41d178238 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -332,7 +332,7 @@ static struct platform_driver imx93_clk_driver = {
.driver = {
.name = "imx93-ccm",
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx93_clk_of_match),
+ .of_match_table = imx93_clk_of_match,
},
};
module_platform_driver(imx93_clk_driver);
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 201bf6e6b6e0..d5544cbc5c48 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -101,15 +101,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw)
bool enabled = false;
/*
- * If the SoC has no global TCU clock, we must ungate the channel's
- * clock to be able to access its registers.
- * If we have a TCU clock, it will be enabled automatically as it has
- * been attached to the regmap.
+ * According to the programming manual, a timer channel's registers can
+ * only be accessed when the channel's stop bit is clear.
*/
- if (!tcu->clk) {
- enabled = !!ingenic_tcu_is_enabled(hw);
- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
- }
+ enabled = !!ingenic_tcu_is_enabled(hw);
+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
return enabled;
}
@@ -120,8 +116,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw)
const struct ingenic_tcu_clk_info *info = tcu_clk->info;
struct ingenic_tcu *tcu = tcu_clk->tcu;
- if (!tcu->clk)
- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
}
static u8 ingenic_tcu_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 070c3b896559..b6b89413e090 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -239,6 +239,11 @@ static const struct clk_ops mpfs_clk_cfg_ops = {
.hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
+#define CLK_CPU_OFFSET 0u
+#define CLK_AXI_OFFSET 1u
+#define CLK_AHB_OFFSET 2u
+#define CLK_RTCREF_OFFSET 3u
+
static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0,
REG_CLOCK_CONFIG_CR),
@@ -362,7 +367,7 @@ static const struct clk_ops mpfs_periph_clk_ops = {
_flags), \
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT].hw)
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw)
/*
* Critical clocks:
@@ -370,6 +375,8 @@ static const struct clk_ops mpfs_periph_clk_ops = {
* trap handler
* - CLK_MMUART0: reserved by the hss
* - CLK_DDRC: provides clock to the ddr subsystem
+ * - CLK_RTC: the onboard RTC's AHB bus clock must be kept running as the rtc will stop
+ * if the AHB interface clock is disabled
* - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect)
* clock domain crossers which provide the interface to the FPGA fabric. Disabling them
* causes the FPGA fabric to go into reset.
@@ -394,7 +401,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0),
CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0),
CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0),
- CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, 0),
+ CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0),
CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0),
CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 30056da3e0af..42568c616181 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1191,9 +1191,13 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- /* Force PLL_GPU output divider bits to 0 */
+ /*
+ * Force PLL_GPU output divider bits to 0 and adjust
+ * multiplier to sensible default value of 432 MHz.
+ */
val = readl(reg + SUN50I_H6_PLL_GPU_REG);
- val &= ~BIT(0);
+ val &= ~(GENMASK(15, 8) | BIT(0));
+ val |= 17 << 8;
writel(val, reg + SUN50I_H6_PLL_GPU_REG);
/* Force GPU_CLK divider bits to 0 */
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ef2a445c63a3..373e9438b57a 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -135,6 +135,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
continue;
if (!strncmp(n, tmp, strlen(tmp))) {
+ of_node_get(np);
found = true;
break;
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 62c2b7ac4339..4407203e0c9b 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -449,6 +449,9 @@ static int quad8_events_configure(struct counter_device *counter)
return -EINVAL;
}
+ /* Enable IRQ line */
+ irq_enabled |= BIT(event_node->channel);
+
/* Skip configuration if it is the same as previously set */
if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
continue;
@@ -462,9 +465,6 @@ static int quad8_events_configure(struct counter_device *counter)
priv->irq_trigger[event_node->channel] << 3;
iowrite8(QUAD8_CTR_IOR | ior_cfg,
&priv->reg->channel[event_node->channel].control);
-
- /* Enable IRQ line */
- irq_enabled |= BIT(event_node->channel);
}
iowrite8(irq_enabled, &priv->reg->index_interrupt);
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 55516043b656..310779b07daf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -51,6 +51,21 @@ config X86_AMD_PSTATE
If in doubt, say N.
+config X86_AMD_PSTATE_UT
+ tristate "selftest for AMD Processor P-State driver"
+ depends on X86 && ACPI_PROCESSOR
+ default n
+ help
+ This kernel module is used for testing. It's safe to say M here.
+
+ It can also be built-in without X86_AMD_PSTATE enabled.
+ Currently, only tests for amd-pstate are supported. If X86_AMD_PSTATE
+ is set disabled, it can tell the users test can only run on amd-pstate
+ driver, please set X86_AMD_PSTATE enabled.
+ In the future, comparison tests will be added. It can set amd-pstate
+ disabled and set acpi-cpufreq enabled to run test cases, then compare
+ the test results.
+
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
depends on ACPI_PROCESSOR
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 285de70af877..49b98c62c5af 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -30,6 +30,7 @@ amd_pstate-y := amd-pstate.o amd-pstate-trace.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_AMD_PSTATE) += amd_pstate.o
+obj-$(CONFIG_X86_AMD_PSTATE_UT) += amd-pstate-ut.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
new file mode 100644
index 000000000000..e4a5b4d90f83
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-1.0-or-later
+/*
+ * AMD Processor P-state Frequency Driver Unit Test
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ *
+ * The AMD P-State Unit Test is a test module for testing the amd-pstate
+ * driver. 1) It can help all users to verify their processor support
+ * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function
+ * test to avoid the kernel regression during the update. 3) We can
+ * introduce more functional or performance tests to align the result
+ * together, it will benefit power and performance scale optimization.
+ *
+ * This driver implements basic framework with plans to enhance it with
+ * additional test cases to improve the depth and coverage of the test.
+ *
+ * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for
+ * amd-pstate to get more detail.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fs.h>
+#include <linux/amd-pstate.h>
+
+#include <acpi/cppc_acpi.h>
+
+/*
+ * Abbreviations:
+ * amd_pstate_ut: used as a shortform for AMD P-State unit test.
+ * It helps to keep variable names smaller, simpler
+ */
+enum amd_pstate_ut_result {
+ AMD_PSTATE_UT_RESULT_PASS,
+ AMD_PSTATE_UT_RESULT_FAIL,
+};
+
+struct amd_pstate_ut_struct {
+ const char *name;
+ void (*func)(u32 index);
+ enum amd_pstate_ut_result result;
+};
+
+/*
+ * Kernel module for testing the AMD P-State unit test
+ */
+static void amd_pstate_ut_acpi_cpc_valid(u32 index);
+static void amd_pstate_ut_check_enabled(u32 index);
+static void amd_pstate_ut_check_perf(u32 index);
+static void amd_pstate_ut_check_freq(u32 index);
+
+static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
+ {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
+ {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
+ {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
+ {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
+};
+
+static bool get_shared_mem(void)
+{
+ bool result = false;
+ char path[] = "/sys/module/amd_pstate/parameters/shared_mem";
+ char buf[5] = {0};
+ struct file *filp = NULL;
+ loff_t pos = 0;
+ ssize_t ret;
+
+ if (!boot_cpu_has(X86_FEATURE_CPPC)) {
+ filp = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(filp))
+ pr_err("%s unable to open %s file!\n", __func__, path);
+ else {
+ ret = kernel_read(filp, &buf, sizeof(buf), &pos);
+ if (ret < 0)
+ pr_err("%s read %s file fail ret=%ld!\n",
+ __func__, path, (long)ret);
+ filp_close(filp, NULL);
+ }
+
+ if ('Y' == *buf)
+ result = true;
+ }
+
+ return result;
+}
+
+/*
+ * check the _CPC object is present in SBIOS.
+ */
+static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+{
+ if (acpi_cpc_valid())
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ }
+}
+
+static void amd_pstate_ut_pstate_enable(u32 index)
+{
+ int ret = 0;
+ u64 cppc_enable = 0;
+
+ ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ return;
+ }
+ if (cppc_enable)
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s amd pstate must be enabled!\n", __func__);
+ }
+}
+
+/*
+ * check if amd pstate is enabled
+ */
+static void amd_pstate_ut_check_enabled(u32 index)
+{
+ if (get_shared_mem())
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else
+ amd_pstate_ut_pstate_enable(index);
+}
+
+/*
+ * check if performance values are reasonable.
+ * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
+ */
+static void amd_pstate_ut_check_perf(u32 index)
+{
+ int cpu = 0, ret = 0;
+ u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
+ u64 cap1 = 0;
+ struct cppc_perf_caps cppc_perf;
+ struct cpufreq_policy *policy = NULL;
+ struct amd_cpudata *cpudata = NULL;
+
+ highest_perf = amd_get_highest_perf();
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ break;
+ cpudata = policy->driver_data;
+
+ if (get_shared_mem()) {
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
+ return;
+ }
+
+ nominal_perf = cppc_perf.nominal_perf;
+ lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ lowest_perf = cppc_perf.lowest_perf;
+ } else {
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
+ return;
+ }
+
+ nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
+ lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
+ lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ }
+
+ if ((highest_perf != READ_ONCE(cpudata->highest_perf)) ||
+ (nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
+ (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
+ (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
+ __func__, cpu, highest_perf, cpudata->highest_perf,
+ nominal_perf, cpudata->nominal_perf,
+ lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
+ lowest_perf, cpudata->lowest_perf);
+ return;
+ }
+
+ if (!((highest_perf >= nominal_perf) &&
+ (nominal_perf > lowest_nonlinear_perf) &&
+ (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_perf > 0))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
+ __func__, cpu, highest_perf, nominal_perf,
+ lowest_nonlinear_perf, lowest_perf);
+ return;
+ }
+ }
+
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+}
+
+/*
+ * Check if frequency values are reasonable.
+ * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
+ * check max freq when set support boost mode.
+ */
+static void amd_pstate_ut_check_freq(u32 index)
+{
+ int cpu = 0;
+ struct cpufreq_policy *policy = NULL;
+ struct amd_cpudata *cpudata = NULL;
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ break;
+ cpudata = policy->driver_data;
+
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
+ (cpudata->min_freq > 0))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, cpudata->min_freq);
+ return;
+ }
+
+ if (cpudata->min_freq != policy->min) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->min_freq, policy->min);
+ return;
+ }
+
+ if (cpudata->boost_supported) {
+ if ((policy->max == cpudata->max_freq) ||
+ (policy->max == cpudata->nominal_freq))
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
+ __func__, cpu, policy->max, cpudata->max_freq,
+ cpudata->nominal_freq);
+ return;
+ }
+ } else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d must support boost!\n", __func__, cpu);
+ return;
+ }
+ }
+
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+}
+
+static int __init amd_pstate_ut_init(void)
+{
+ u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
+
+ for (i = 0; i < arr_size; i++) {
+ amd_pstate_ut_cases[i].func(i);
+ switch (amd_pstate_ut_cases[i].result) {
+ case AMD_PSTATE_UT_RESULT_PASS:
+ pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
+ break;
+ case AMD_PSTATE_UT_RESULT_FAIL:
+ default:
+ pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void __exit amd_pstate_ut_exit(void)
+{
+}
+
+module_init(amd_pstate_ut_init);
+module_exit(amd_pstate_ut_exit);
+
+MODULE_AUTHOR("Meng Li <li.meng@amd.com>");
+MODULE_DESCRIPTION("AMD P-state driver Test module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9ac75c1cde9c..ace7d50cf2ac 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
+#include <linux/amd-pstate.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -46,8 +47,8 @@
#include <asm/cpu_device_id.h>
#include "amd-pstate-trace.h"
-#define AMD_PSTATE_TRANSITION_LATENCY 0x20000
-#define AMD_PSTATE_TRANSITION_DELAY 500
+#define AMD_PSTATE_TRANSITION_LATENCY 20000
+#define AMD_PSTATE_TRANSITION_DELAY 1000
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -65,65 +66,6 @@ MODULE_PARM_DESC(shared_mem,
static struct cpufreq_driver amd_pstate_driver;
-/**
- * struct amd_aperf_mperf
- * @aperf: actual performance frequency clock count
- * @mperf: maximum performance frequency clock count
- * @tsc: time stamp counter
- */
-struct amd_aperf_mperf {
- u64 aperf;
- u64 mperf;
- u64 tsc;
-};
-
-/**
- * struct amd_cpudata - private CPU data for AMD P-State
- * @cpu: CPU number
- * @req: constraint request to apply
- * @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
- * @max_freq: the frequency that mapped to highest_perf
- * @min_freq: the frequency that mapped to lowest_perf
- * @nominal_freq: the frequency that mapped to nominal_perf
- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
- * @prev: Last Aperf/Mperf/tsc count value read from register
- * @freq: current cpu frequency value
- * @boost_supported: check whether the Processor or SBIOS supports boost mode
- *
- * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
- * represents all the attributes and goals that AMD P-State requests at runtime.
- */
-struct amd_cpudata {
- int cpu;
-
- struct freq_qos_request req[2];
- u64 cppc_req_cached;
-
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
-
- u32 max_freq;
- u32 min_freq;
- u32 nominal_freq;
- u32 lowest_nonlinear_freq;
-
- struct amd_aperf_mperf cur;
- struct amd_aperf_mperf prev;
-
- u64 freq;
- bool boost_supported;
-};
-
static inline int pstate_enable(bool enable)
{
return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
@@ -152,6 +94,7 @@ static inline int amd_pstate_enable(bool enable)
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
+ u32 highest_perf;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
@@ -163,7 +106,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
*
* CPPC entry doesn't indicate the highest performance in some ASICs.
*/
- WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
+ highest_perf = amd_get_highest_perf();
+ if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+ highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
@@ -175,12 +122,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
static int cppc_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
+ highest_perf = amd_get_highest_perf();
+ if (highest_perf > cppc_perf.highest_perf)
+ highest_perf = cppc_perf.highest_perf;
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
@@ -269,6 +221,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf);
@@ -312,7 +265,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
return -ENODEV;
cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
freqs.old = policy->cur;
@@ -357,8 +310,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
}
@@ -555,9 +506,7 @@ free_cpudata1:
static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata;
-
- cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
@@ -599,9 +548,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
int max_freq;
- struct amd_cpudata *cpudata;
-
- cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
max_freq = amd_get_max_freq(cpudata);
if (max_freq < 0)
@@ -614,9 +561,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
char *buf)
{
int freq;
- struct amd_cpudata *cpudata;
-
- cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
freq = amd_get_lowest_nonlinear_freq(cpudata);
if (freq < 0)
@@ -662,7 +607,7 @@ static struct cpufreq_driver amd_pstate_driver = {
.resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.name = "amd-pstate",
- .attr = amd_pstate_attr,
+ .attr = amd_pstate_attr,
};
static int __init amd_pstate_init(void)
@@ -673,7 +618,7 @@ static int __init amd_pstate_init(void)
return -ENODEV;
if (!acpi_cpc_valid()) {
- pr_debug("the _CPC object is not present in SBIOS\n");
+ pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index f7c23fa468f0..39221a9a187a 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -156,7 +156,7 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.name = BMIPS_CPUFREQ_PREFIX,
};
-static int __init bmips_cpufreq_probe(void)
+static int __init bmips_cpufreq_driver_init(void)
{
struct cpufreq_compat *cc;
struct device_node *np;
@@ -176,7 +176,13 @@ static int __init bmips_cpufreq_probe(void)
return cpufreq_register_driver(&bmips_cpufreq_driver);
}
-device_initcall(bmips_cpufreq_probe);
+module_init(bmips_cpufreq_driver_init);
+
+static void __exit bmips_cpufreq_driver_exit(void)
+{
+ cpufreq_unregister_driver(&bmips_cpufreq_driver);
+}
+module_exit(bmips_cpufreq_driver_exit);
MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 24eaf0ec344d..432dfb4e8027 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -63,7 +63,15 @@ static struct cppc_workaround_oem_info wa_info[] = {
static struct cpufreq_driver cppc_cpufreq_driver;
+static enum {
+ FIE_UNSET = -1,
+ FIE_ENABLED,
+ FIE_DISABLED
+} fie_disabled = FIE_UNSET;
+
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
+module_param(fie_disabled, int, 0444);
+MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
/* Frequency invariance support */
struct cppc_freq_invariance {
@@ -158,7 +166,7 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
struct cppc_freq_invariance *cppc_fi;
int cpu, ret;
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled)
return;
for_each_cpu(cpu, policy->cpus) {
@@ -199,7 +207,7 @@ static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
struct cppc_freq_invariance *cppc_fi;
int cpu;
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled)
return;
/* policy->cpus will be empty here, use related_cpus instead */
@@ -229,7 +237,15 @@ static void __init cppc_freq_invariance_init(void)
};
int ret;
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
+ fie_disabled = FIE_ENABLED;
+ if (cppc_perf_ctrs_in_pcc()) {
+ pr_info("FIE not enabled on systems with registers in PCC\n");
+ fie_disabled = FIE_DISABLED;
+ }
+ }
+
+ if (fie_disabled)
return;
kworker_fie = kthread_create_worker(0, "cppc_fie");
@@ -247,7 +263,7 @@ static void __init cppc_freq_invariance_init(void)
static void cppc_freq_invariance_exit(void)
{
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled)
return;
kthread_destroy_worker(kworker_fie);
@@ -936,6 +952,7 @@ static void cppc_check_hisi_workaround(void)
wa_info[i].oem_revision == tbl->oem_revision) {
/* Overwrite the get() callback */
cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
+ fie_disabled = FIE_DISABLED;
break;
}
}
@@ -947,7 +964,7 @@ static int __init cppc_cpufreq_init(void)
{
int ret;
- if ((acpi_disabled) || !acpi_cpc_valid())
+ if (!acpi_cpc_valid())
return -ENODEV;
cppc_check_hisi_workaround();
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2c96de3f2d83..6ac3800db450 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -146,6 +146,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7820c4e74289..69b3d61852ac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
target_freq = clamp_val(target_freq, policy->min, policy->max);
- if (!cpufreq_driver->target_index)
+ if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index ac57cddc5f2f..a45864701143 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -55,7 +55,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
.notifier_call = hb_cpufreq_clk_notify,
};
-static int hb_cpufreq_driver_init(void)
+static int __init hb_cpufreq_driver_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct device *cpu_dev;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 57cdb3679885..fc3ebeb0bbe5 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2416,6 +2416,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(SKYLAKE_X, core_funcs),
X86_MATCH(COMETLAKE, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
+ X86_MATCH(TIGERLAKE, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d5ef3c66c762..833589bc95e4 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -13,6 +13,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/units.h>
@@ -56,6 +57,8 @@ struct qcom_cpufreq_data {
struct cpufreq_policy *policy;
bool per_core_dcvs;
+
+ struct freq_qos_request throttle_freq_req;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -316,14 +319,16 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else {
- throttled_freq = freq_hz / HZ_PER_KHZ;
-
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
-
dev_pm_opp_put(opp);
}
+ throttled_freq = freq_hz / HZ_PER_KHZ;
+
+ freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
+
+ /* Update thermal pressure (the boost frequencies are accepted) */
+ arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+
/*
* In the unlikely case policy is unregistered do not enable
* polling or h/w interrupt
@@ -413,6 +418,14 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
if (data->throttle_irq < 0)
return data->throttle_irq;
+ ret = freq_qos_add_request(&policy->constraints,
+ &data->throttle_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
+ return ret;
+ }
+
data->cancel_throttle = false;
data->policy = policy;
@@ -479,6 +492,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
if (data->throttle_irq <= 0)
return;
+ freq_qos_remove_request(&data->throttle_freq_req);
free_irq(data->throttle_irq, data);
}
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a67df90848c2..1a63aeea8711 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -252,7 +252,7 @@ static int sti_cpufreq_fetch_syscon_registers(void)
return 0;
}
-static int sti_cpufreq_init(void)
+static int __init sti_cpufreq_init(void)
{
int ret;
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 1216046cf4c2..c2004cae3f02 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -38,14 +38,6 @@
/* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
-enum cluster {
- CLUSTER0,
- CLUSTER1,
- CLUSTER2,
- CLUSTER3,
- MAX_CLUSTERS,
-};
-
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
@@ -67,12 +59,12 @@ struct tegra_cpufreq_ops {
struct tegra_cpufreq_soc {
struct tegra_cpufreq_ops *ops;
int maxcpus_per_cluster;
+ unsigned int num_clusters;
phys_addr_t actmon_cntr_base;
};
struct tegra194_cpufreq_data {
void __iomem *regs;
- size_t num_clusters;
struct cpufreq_frequency_table **tables;
const struct tegra_cpufreq_soc *soc;
};
@@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
+ .num_clusters = 3,
+};
+
+static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
+ .ops = &tegra234_cpufreq_ops,
+ .actmon_cntr_base = 0x4000,
+ .maxcpus_per_cluster = 8,
+ .num_clusters = 1,
};
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
@@ -314,11 +314,7 @@ static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
- int ret;
-
- ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
-
- return ret;
+ return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
}
static void tegra194_set_cpu_ndiv_sysreg(void *data)
@@ -382,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
- if (clusterid >= data->num_clusters || !data->tables[clusterid])
+ if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
return -EINVAL;
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
@@ -433,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
+ .num_clusters = 4,
};
static void tegra194_cpufreq_free_resources(void)
@@ -525,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
soc = of_device_get_match_data(&pdev->dev);
- if (soc->ops && soc->maxcpus_per_cluster) {
+ if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
data->soc = soc;
} else {
dev_err(&pdev->dev, "soc data missing\n");
return -EINVAL;
}
- data->num_clusters = MAX_CLUSTERS;
- data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
+ data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
sizeof(*data->tables), GFP_KERNEL);
if (!data->tables)
return -ENOMEM;
@@ -558,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
goto put_bpmp;
}
- for (i = 0; i < data->num_clusters; i++) {
+ for (i = 0; i < data->soc->num_clusters; i++) {
data->tables[i] = init_freq_table(pdev, bpmp, i);
if (IS_ERR(data->tables[i])) {
err = PTR_ERR(data->tables[i]);
@@ -590,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
static const struct of_device_id tegra194_cpufreq_of_match[] = {
{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
+ { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
{ /* sentinel */ }
};
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index df85a77d476b..f64180dd2005 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -398,7 +398,7 @@ fail_put_node:
return ret;
}
-static int ti_cpufreq_init(void)
+static int __init ti_cpufreq_init(void)
{
const struct of_device_id *match;
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 74068742cef3..9acde71558d5 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -54,7 +54,7 @@
* variable is not locked. It is only written from the cpu that
* it stores (or by the on/offlining cpu if that cpu is offline),
* and only read after all the cpus are ready for the coupled idle
- * state are are no longer updating it.
+ * state are no longer updating it.
*
* Three atomic counters are used. alive_count tracks the number
* of cpus in the coupled set that are currently or soon will be
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index c32c600b3cf8..0b5461b3d7dd 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -233,8 +233,8 @@ static inline void add_powernv_state(int index, const char *name,
unsigned int exit_latency,
u64 psscr_val, u64 psscr_mask)
{
- strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
- strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
+ strscpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
+ strscpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
powernv_states[index].flags = flags;
powernv_states[index].target_residency = target_residency;
powernv_states[index].exit_latency = exit_latency;
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 3db4fca1172b..821984947ed9 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -124,10 +124,8 @@ static bool psci_pd_try_set_osi_mode(void)
return false;
ret = psci_set_osi_mode(true);
- if (ret) {
- pr_warn("failed to enable OSI mode: %d\n", ret);
+ if (ret)
return false;
- }
return true;
}
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 29acaf48e575..0d0f9751ff8f 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -63,12 +63,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
cpuidle_curr_governor = gov;
- if (gov) {
- list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
- cpuidle_enable_device(dev);
- cpuidle_install_idle_handler();
- printk(KERN_INFO "cpuidle: using governor %s\n", gov->name);
- }
+ list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
+ cpuidle_enable_device(dev);
+
+ cpuidle_install_idle_handler();
+ pr_info("cpuidle: using governor %s\n", gov->name);
return 0;
}
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index a4b13d326cfc..82bf15d49561 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -343,7 +343,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
return ret;
}
-static int atmel_ecc_remove(struct i2c_client *client)
+static void atmel_ecc_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
@@ -358,7 +358,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
* accessing the freed memory.
*/
dev_emerg(&client->dev, "Device is busy, expect memory corruption.\n");
- return 0;
+ return;
}
crypto_unregister_kpp(&atmel_ecdh_nist_p256);
@@ -366,8 +366,6 @@ static int atmel_ecc_remove(struct i2c_client *client)
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index e4087bdd2475..a84b657598c6 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -116,18 +116,16 @@ static int atmel_sha204a_probe(struct i2c_client *client,
return ret;
}
-static int atmel_sha204a_remove(struct i2c_client *client)
+static void atmel_sha204a_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
if (atomic_read(&i2c_priv->tfm_count)) {
dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
- return 0;
+ return;
}
kfree((void *)i2c_priv->hwrng.priv);
-
- return 0;
}
static const struct of_device_id atmel_sha204a_dt_ids[] = {
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2a60d0525cde..168195672e2e 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
+ kfree(vc_akcipher_req->src_buf);
+ kfree(vc_akcipher_req->dst_buf);
+ vc_akcipher_req->src_buf = NULL;
+ vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index cb6401c9e9a4..acf31cc1dbcc 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
.start = r->start,
.end = r->end,
.flags = IORESOURCE_MEM,
+ .desc = IORES_DESC_SOFT_RESERVED,
};
struct platform_device *pdev;
struct memregion_info info;
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 9a88faaf8b27..39ac069cabc7 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -189,10 +189,9 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
data->clk = devm_clk_get(dev, "pclk_ddr_mon");
- if (IS_ERR(data->clk)) {
- dev_err(dev, "Cannot get the clk dmc_clk\n");
- return PTR_ERR(data->clk);
- }
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
/* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 71abb3fbd042..e5458ada5197 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -291,9 +291,13 @@ static int mtk_ccifreq_probe(struct platform_device *pdev)
}
drv->sram_reg = devm_regulator_get_optional(dev, "sram");
- if (IS_ERR(drv->sram_reg))
+ if (IS_ERR(drv->sram_reg)) {
+ ret = PTR_ERR(drv->sram_reg);
+ if (ret == -EPROBE_DEFER)
+ goto out_free_resources;
+
drv->sram_reg = NULL;
- else {
+ } else {
ret = regulator_enable(drv->sram_reg);
if (ret) {
dev_err(dev, "failed to enable sram regulator\n");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index efb4990b29e1..dd0f83ee505b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -53,7 +53,7 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
spin_unlock(&dmabuf->name_lock);
- return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
+ return dynamic_dname(buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
}
@@ -531,11 +531,11 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
* value.
*/
inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = dmabuf;
file->f_path.dentry->d_fsdata = dmabuf;
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 066400ed8841..406b4e26f538 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -136,6 +136,10 @@ struct dma_fence *dma_fence_get_stub(void)
&dma_fence_stub_ops,
&dma_fence_stub_lock,
0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
dma_fence_signal_locked(&dma_fence_stub);
}
spin_unlock(&dma_fence_stub_lock);
@@ -161,6 +165,10 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
&dma_fence_stub_ops,
&dma_fence_stub_lock,
0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
dma_fence_signal(fence);
return fence;
@@ -500,6 +508,8 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
__dma_fence_might_wait();
+ dma_fence_enable_sw_signaling(fence);
+
trace_dma_fence_wait_start(fence);
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
@@ -601,9 +611,6 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return;
-
spin_lock_irqsave(fence->lock, flags);
__dma_fence_enable_signaling(fence);
spin_unlock_irqrestore(fence->lock, flags);
@@ -756,19 +763,16 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return ret;
-
spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ goto out;
+
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
}
- if (!__dma_fence_enable_signaling(fence))
- goto out;
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 205acb2c744d..e3885c90a3ac 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
- if ((old->context == fence->context && old_usage >= usage) ||
+ if ((old->context == fence->context && old_usage >= usage &&
+ dma_fence_is_later(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 8ce1ea59d31b..0a9b099d0518 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -87,6 +87,8 @@ static int sanitycheck(void *arg)
if (!chain)
err = -ENOMEM;
+ dma_fence_enable_sw_signaling(chain);
+
dma_fence_signal(f);
dma_fence_put(f);
@@ -143,6 +145,8 @@ static int fence_chains_init(struct fence_chains *fc, unsigned int count,
}
fc->tail = fc->chains[i];
+
+ dma_fence_enable_sw_signaling(fc->chains[i]);
}
fc->chain_length = i;
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index 4105d5ea8dde..f0cee984b6c7 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -102,6 +102,8 @@ static int sanitycheck(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
array = mock_array(1, f);
if (!array)
return -ENOMEM;
@@ -124,12 +126,16 @@ static int unwrap_array(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
dma_fence_put(f1);
return -ENOMEM;
}
+ dma_fence_enable_sw_signaling(f2);
+
array = mock_array(2, f1, f2);
if (!array)
return -ENOMEM;
@@ -164,12 +170,16 @@ static int unwrap_chain(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
dma_fence_put(f1);
return -ENOMEM;
}
+ dma_fence_enable_sw_signaling(f2);
+
chain = mock_chain(f1, f2);
if (!chain)
return -ENOMEM;
@@ -204,12 +214,16 @@ static int unwrap_chain_array(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
dma_fence_put(f1);
return -ENOMEM;
}
+ dma_fence_enable_sw_signaling(f2);
+
array = mock_array(2, f1, f2);
if (!array)
return -ENOMEM;
@@ -248,12 +262,16 @@ static int unwrap_merge(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
err = -ENOMEM;
goto error_put_f1;
}
+ dma_fence_enable_sw_signaling(f2);
+
f3 = dma_fence_unwrap_merge(f1, f2);
if (!f3) {
err = -ENOMEM;
@@ -296,10 +314,14 @@ static int unwrap_merge_complex(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2)
goto error_put_f1;
+ dma_fence_enable_sw_signaling(f2);
+
f3 = dma_fence_unwrap_merge(f1, f2);
if (!f3)
goto error_put_f2;
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index c8a12d7ad71a..fb6e0a6ae2c9 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -102,6 +102,8 @@ static int sanitycheck(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_signal(f);
dma_fence_put(f);
@@ -117,6 +119,8 @@ static int test_signaling(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
if (dma_fence_is_signaled(f)) {
pr_err("Fence unexpectedly signaled on creation\n");
goto err_free;
@@ -190,6 +194,8 @@ static int test_late_add_callback(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_signal(f);
if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
@@ -282,6 +288,8 @@ static int test_status(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
if (dma_fence_get_status(f)) {
pr_err("Fence unexpectedly has signaled status on creation\n");
goto err_free;
@@ -308,6 +316,8 @@ static int test_error(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_set_error(f, -EIO);
if (dma_fence_get_status(f)) {
@@ -337,6 +347,8 @@ static int test_wait(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
pr_err("Wait reported complete before being signaled\n");
goto err_free;
@@ -379,6 +391,8 @@ static int test_wait_timeout(void *arg)
if (!wt.f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(wt.f);
+
if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
pr_err("Wait reported complete before being signaled\n");
goto err_free;
@@ -458,6 +472,8 @@ static int thread_signal_callback(void *arg)
break;
}
+ dma_fence_enable_sw_signaling(f1);
+
rcu_assign_pointer(t->fences[t->id], f1);
smp_wmb();
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index 813779e3c9be..15dbea1462ed 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -45,6 +45,8 @@ static int sanitycheck(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_signal(f);
dma_fence_put(f);
@@ -69,6 +71,8 @@ static int test_signaling(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
@@ -114,6 +118,8 @@ static int test_for_each(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
@@ -173,6 +179,8 @@ static int test_for_each_unlocked(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
@@ -244,6 +252,8 @@ static int test_get_fences(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 3ebec19a8e02..af57799c86ce 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -132,7 +132,7 @@ EXPORT_SYMBOL(sync_file_get_fence);
char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
{
if (sync_file->user_name[0]) {
- strlcpy(buf, sync_file->user_name, len);
+ strscpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
@@ -172,7 +172,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
return NULL;
}
sync_file->fence = fence;
- strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
+ strscpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
}
@@ -262,9 +262,9 @@ err_put_fd:
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+ strscpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 38e8767ec371..2bcdb935a3ac 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -124,17 +124,20 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
{
struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
+ int ret = 0;
if (!ubuf->sg) {
ubuf->sg = get_sg_table(dev, buf, direction);
- if (IS_ERR(ubuf->sg))
- return PTR_ERR(ubuf->sg);
+ if (IS_ERR(ubuf->sg)) {
+ ret = PTR_ERR(ubuf->sg);
+ ubuf->sg = NULL;
+ }
} else {
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
direction);
}
- return 0;
+ return ret;
}
static int end_cpu_udmabuf(struct dma_buf *buf,
@@ -210,7 +213,7 @@ static long udmabuf_create(struct miscdevice *device,
memfd = fget(list[i].memfd);
if (!memfd)
goto err;
- mapping = file_inode(memfd)->i_mapping;
+ mapping = memfd->f_mapping;
if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
goto err;
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index d4f1e4e9603a..85e00701473c 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
pdev = of_find_device_by_node(udma_node);
+ if (np != udma_node)
+ of_node_put(udma_node);
+
if (!pdev) {
pr_debug("UDMA device not found\n");
return ERR_PTR(-EPROBE_DEFER);
}
- if (np != udma_node)
- of_node_put(udma_node);
-
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 6276934d4d2b..8cd4e69dc7b4 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Request and map I/O memory */
xdev->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
- goto disable_clks;
+ goto error;
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index dc299ab36818..3f4ee3954384 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -849,7 +849,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
zynqmp_dma_desc_config_eod(chan, desc);
async_tx_ack(&first->async_tx);
- first->async_tx.flags = flags;
+ first->async_tx.flags = (enum dma_ctrl_flags)flags;
return &first->async_tx;
}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index eb58644bb019..6faeb2ab3960 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -103,7 +103,6 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm)
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
- edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 96f6de0c8ff6..50ed9f2425bb 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -28,13 +28,9 @@ void edac_mc_sysfs_exit(void);
extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups);
extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
-extern int edac_get_log_ue(void);
-extern int edac_get_log_ce(void);
-extern int edac_get_panic_on_ue(void);
extern int edac_mc_get_log_ue(void);
extern int edac_mc_get_log_ce(void);
extern int edac_mc_get_panic_on_ue(void);
-extern int edac_get_poll_msec(void);
extern unsigned int edac_mc_get_poll_msec(void);
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 6cf50ee0b77c..a22ea053f8e1 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -74,31 +74,47 @@ static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
static int retry_rd_err_log;
+static int decoding_via_mca;
+static bool mem_cfg_2lm;
static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
+static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
+static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
+static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
+static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable)
+static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
+ u32 *offsets_scrub, u32 *offsets_demand,
+ u32 *offsets_demand2)
{
- u32 s, d;
+ u32 s, d, d2;
- if (!imc->mbase)
- return;
-
- s = I10NM_GET_REG32(imc, chan, res_cfg->offsets_scrub[0]);
- d = I10NM_GET_REG32(imc, chan, res_cfg->offsets_demand[0]);
+ s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
+ d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
+ if (offsets_demand2)
+ d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
if (enable) {
/* Save default configurations */
imc->chan[chan].retry_rd_err_log_s = s;
imc->chan[chan].retry_rd_err_log_d = d;
+ if (offsets_demand2)
+ imc->chan[chan].retry_rd_err_log_d2 = d2;
s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
s |= RETRY_RD_ERR_LOG_EN;
d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
d |= RETRY_RD_ERR_LOG_EN;
+
+ if (offsets_demand2) {
+ d2 &= ~RETRY_RD_ERR_LOG_UC;
+ d2 |= RETRY_RD_ERR_LOG_NOOVER;
+ d2 |= RETRY_RD_ERR_LOG_EN;
+ }
} else {
/* Restore default configurations */
if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
@@ -113,23 +129,55 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
d |= RETRY_RD_ERR_LOG_NOOVER;
if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
d &= ~RETRY_RD_ERR_LOG_EN;
+
+ if (offsets_demand2) {
+ if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
+ d2 |= RETRY_RD_ERR_LOG_UC;
+ if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
+ d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
+ if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
+ d2 &= ~RETRY_RD_ERR_LOG_EN;
+ }
}
- I10NM_SET_REG32(imc, chan, res_cfg->offsets_scrub[0], s);
- I10NM_SET_REG32(imc, chan, res_cfg->offsets_demand[0], d);
+ I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
+ I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
+ if (offsets_demand2)
+ I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
}
static void enable_retry_rd_err_log(bool enable)
{
+ struct skx_imc *imc;
struct skx_dev *d;
int i, j;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list)
- for (i = 0; i < I10NM_NUM_IMC; i++)
- for (j = 0; j < I10NM_NUM_CHANNELS; j++)
- __enable_retry_rd_err_log(&d->imc[i], j, enable);
+ for (i = 0; i < I10NM_NUM_IMC; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase)
+ continue;
+
+ for (j = 0; j < I10NM_NUM_CHANNELS; j++) {
+ if (imc->hbm_mc) {
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm0,
+ res_cfg->offsets_demand_hbm0,
+ NULL);
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm1,
+ res_cfg->offsets_demand_hbm1,
+ NULL);
+ } else {
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub,
+ res_cfg->offsets_demand,
+ res_cfg->offsets_demand2);
+ }
+ }
+ }
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
@@ -138,14 +186,33 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
struct skx_imc *imc = &res->dev->imc[res->imc];
u32 log0, log1, log2, log3, log4;
u32 corr0, corr1, corr2, corr3;
+ u32 lxg0, lxg1, lxg3, lxg4;
+ u32 *xffsets = NULL;
u64 log2a, log5;
+ u64 lxg2a, lxg5;
u32 *offsets;
- int n;
+ int n, pch;
if (!imc->mbase)
return;
- offsets = scrub_err ? res_cfg->offsets_scrub : res_cfg->offsets_demand;
+ if (imc->hbm_mc) {
+ pch = res->cs & 1;
+
+ if (pch)
+ offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
+ res_cfg->offsets_demand_hbm1;
+ else
+ offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
+ res_cfg->offsets_demand_hbm0;
+ } else {
+ if (scrub_err) {
+ offsets = res_cfg->offsets_scrub;
+ } else {
+ offsets = res_cfg->offsets_demand;
+ xffsets = res_cfg->offsets_demand2;
+ }
+ }
log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
@@ -153,20 +220,52 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
+ if (xffsets) {
+ lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
+ lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
+ lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
+ lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
+ lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
+ }
+
if (res_cfg->type == SPR) {
log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx]",
+ n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
log0, log1, log2a, log3, log4, log5);
+
+ if (len - n > 0) {
+ if (xffsets) {
+ lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
+ n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
+ lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
+ } else {
+ n += snprintf(msg + n, len - n, "]");
+ }
+ }
} else {
log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
log0, log1, log2, log3, log4, log5);
}
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+ if (imc->hbm_mc) {
+ if (pch) {
+ corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
+ corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
+ corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
+ corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
+ } else {
+ corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
+ corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
+ corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
+ corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
+ }
+ } else {
+ corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
+ corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
+ corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
+ corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+ }
if (len - n > 0)
snprintf(msg + n, len - n,
@@ -177,9 +276,16 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
corr3 & 0xffff, corr3 >> 16);
/* Clear status bits */
- if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ if (retry_rd_err_log == 2) {
+ if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
+ log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+ I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ }
+
+ if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
+ lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+ I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
+ }
}
}
@@ -231,6 +337,103 @@ static bool i10nm_check_2lm(struct res_config *cfg)
return false;
}
+/*
+ * Check whether the error comes from DDRT by ICX/Tremont model specific error code.
+ * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS.
+ */
+static bool i10nm_mscod_is_ddrt(u32 mscod)
+{
+ switch (mscod) {
+ case 0x0106: case 0x0107:
+ case 0x0800: case 0x0804:
+ case 0x0806 ... 0x0808:
+ case 0x080a ... 0x080e:
+ case 0x0810: case 0x0811:
+ case 0x0816: case 0x081e:
+ case 0x081f:
+ return true;
+ }
+
+ return false;
+}
+
+static bool i10nm_mc_decode_available(struct mce *mce)
+{
+ u8 bank;
+
+ if (!decoding_via_mca || mem_cfg_2lm)
+ return false;
+
+ if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
+ != (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
+ return false;
+
+ bank = mce->bank;
+
+ switch (res_cfg->type) {
+ case I10NM:
+ if (bank < 13 || bank > 26)
+ return false;
+
+ /* DDRT errors can't be decoded from MCA bank registers */
+ if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
+ return false;
+
+ if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
+ return false;
+
+ /* Check whether one of {13,14,17,18,21,22,25,26} */
+ return ((bank - 13) & BIT(1)) == 0;
+ default:
+ return false;
+ }
+}
+
+static bool i10nm_mc_decode(struct decoded_addr *res)
+{
+ struct mce *m = res->mce;
+ struct skx_dev *d;
+ u8 bank;
+
+ if (!i10nm_mc_decode_available(m))
+ return false;
+
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ if (d->imc[0].src_id == m->socketid) {
+ res->socket = m->socketid;
+ res->dev = d;
+ break;
+ }
+ }
+
+ switch (res_cfg->type) {
+ case I10NM:
+ bank = m->bank - 13;
+ res->imc = bank / 4;
+ res->channel = bank % 2;
+ break;
+ default:
+ return false;
+ }
+
+ if (!res->dev) {
+ skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
+ m->socketid, res->imc);
+ return false;
+ }
+
+ res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
+ res->row = GET_BITFIELD(m->misc, 19, 39);
+ res->bank_group = GET_BITFIELD(m->misc, 40, 41);
+ res->bank_address = GET_BITFIELD(m->misc, 42, 43);
+ res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
+ res->rank = GET_BITFIELD(m->misc, 56, 58);
+ res->dimm = res->rank >> 2;
+ res->rank = res->rank % 4;
+
+ return true;
+}
+
static int i10nm_get_ddr_munits(void)
{
struct pci_dev *mdev;
@@ -420,7 +623,12 @@ static struct res_config spr_cfg = {
.sad_all_devfn = PCI_DEVFN(10, 0),
.sad_all_offset = 0x300,
.offsets_scrub = offsets_scrub_spr,
+ .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
+ .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
.offsets_demand = offsets_demand_spr,
+ .offsets_demand2 = offsets_demand2_spr,
+ .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
+ .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -574,7 +782,8 @@ static int __init i10nm_init(void)
return -ENODEV;
}
- skx_set_mem_cfg(i10nm_check_2lm(cfg));
+ mem_cfg_2lm = i10nm_check_2lm(cfg);
+ skx_set_mem_cfg(mem_cfg_2lm);
rc = i10nm_get_ddr_munits();
@@ -626,9 +835,11 @@ static int __init i10nm_init(void)
setup_i10nm_debug();
if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
- skx_set_decode(NULL, show_retry_rd_err_log);
+ skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
+ } else {
+ skx_set_decode(i10nm_mc_decode, NULL);
}
i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
@@ -658,6 +869,34 @@ static void __exit i10nm_exit(void)
module_init(i10nm_init);
module_exit(i10nm_exit);
+static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+
+ if (ret || val > 1)
+ return -EINVAL;
+
+ if (val && mem_cfg_2lm) {
+ i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
+ return -EIO;
+ }
+
+ ret = param_set_int(buf, kp);
+
+ return ret;
+}
+
+static const struct kernel_param_ops decoding_via_mca_param_ops = {
+ .set = set_decoding_via_mca,
+ .get = param_get_int,
+};
+
+module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
+MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
+
module_param(retry_rd_err_log, int, 0444);
MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 4f28b8c8d378..61adaa872ba7 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1193,7 +1193,7 @@ static int __init i7300_init(void)
}
/**
- * i7300_init() - Unregisters the driver
+ * i7300_exit() - Unregisters the driver
*/
static void __exit i7300_exit(void)
{
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 9a9ff5ad611a..9ef13570f2e5 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -20,11 +20,15 @@
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
* 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 190f: 6th Gen Core Dual-Core Processor Host Bridge/DRAM Registers
+ * 191f: 6th Gen Core Quad-Core Processor Host Bridge/DRAM Registers
* 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
*
* Based on Intel specification:
* https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/desktop-6th-gen-core-family-datasheet-vol-2.pdf
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v6-vol-2-datasheet.pdf
* https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
* https://www.intel.com/content/www/us/en/products/docs/processors/core/8th-gen-core-family-datasheet-vol-2.html
*
@@ -53,15 +57,17 @@
#define ie31200_printk(level, fmt, arg...) \
edac_printk(level, "ie31200", fmt, ##arg)
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x190F
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x1918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_10 0x191F
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x5918
/* Coffee Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00
@@ -80,6 +86,8 @@
#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
(((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
+ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
+ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
(((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
@@ -577,6 +585,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 0bc670778c99..046969b4e82e 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -178,11 +178,6 @@ struct ppc4xx_ecc_status {
u32 wmirq;
};
-/* Function Prototypes */
-
-static int ppc4xx_edac_probe(struct platform_device *device);
-static int ppc4xx_edac_remove(struct platform_device *device);
-
/* Global Variables */
/*
@@ -197,15 +192,6 @@ static const struct of_device_id ppc4xx_edac_match[] = {
};
MODULE_DEVICE_TABLE(of, ppc4xx_edac_match);
-static struct platform_driver ppc4xx_edac_driver = {
- .probe = ppc4xx_edac_probe,
- .remove = ppc4xx_edac_remove,
- .driver = {
- .name = PPC4XX_EDAC_MODULE_NAME,
- .of_match_table = ppc4xx_edac_match,
- },
-};
-
/*
* TODO: The row and channel parameters likely need to be dynamically
* set based on the aforementioned variant controller realizations.
@@ -1391,6 +1377,15 @@ ppc4xx_edac_opstate_init(void)
EDAC_OPSTATE_UNKNOWN_STR)));
}
+static struct platform_driver ppc4xx_edac_driver = {
+ .probe = ppc4xx_edac_probe,
+ .remove = ppc4xx_edac_remove,
+ .driver = {
+ .name = PPC4XX_EDAC_MODULE_NAME,
+ .of_match_table = ppc4xx_edac_match,
+ },
+};
+
/**
* ppc4xx_edac_init - driver/module insertion entry point
*
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 9678ab97c7ac..8e39370fdb5c 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -335,6 +335,12 @@ struct sbridge_info {
struct sbridge_channel {
u32 ranks;
u32 dimms;
+ struct dimm {
+ u32 rowbits;
+ u32 colbits;
+ u32 bank_xor_enable;
+ u32 amap_fine;
+ } dimm[MAX_DIMMS];
};
struct pci_id_descr {
@@ -1603,7 +1609,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
banks = 8;
for (i = 0; i < channels; i++) {
- u32 mtr;
+ u32 mtr, amap = 0;
int max_dimms_per_channel;
@@ -1615,6 +1621,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
if (!pvt->pci_tad[i])
continue;
+ pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
}
for (j = 0; j < max_dimms_per_channel; j++) {
@@ -1627,6 +1634,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
mtr_regs[j], &mtr);
}
edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
+
if (IS_DIMM_PRESENT(mtr)) {
if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
@@ -1661,6 +1669,11 @@ static int __populate_dimms(struct mem_ctl_info *mci,
dimm->dtype = pvt->info.get_width(pvt, mtr);
dimm->mtype = mtype;
dimm->edac_mode = mode;
+ pvt->channel[i].dimm[j].rowbits = order_base_2(rows);
+ pvt->channel[i].dimm[j].colbits = order_base_2(cols);
+ pvt->channel[i].dimm[j].bank_xor_enable =
+ GET_BITFIELD(pvt->info.mcmtr, 9, 9);
+ pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0);
snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
@@ -1922,6 +1935,99 @@ static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
return NULL;
}
+static u8 sb_close_row[] = {
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+
+static u8 sb_close_column[] = {
+ 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+
+static u8 sb_open_row[] = {
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+
+static u8 sb_open_column[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+
+static u8 sb_open_fine_column[] = {
+ 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int sb_bits(u64 addr, int nbits, u8 *bits)
+{
+ int i, res = 0;
+
+ for (i = 0; i < nbits; i++)
+ res |= ((addr >> bits[i]) & 1) << i;
+ return res;
+}
+
+static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+ int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+ if (do_xor)
+ ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+ return ret;
+}
+
+static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
+ u64 rank_addr, char *msg)
+{
+ int dimmno = 0;
+ int row, col, bank_address, bank_group;
+ struct sbridge_pvt *pvt;
+ u32 bg0 = 0, rowbits = 0, colbits = 0;
+ u32 amap_fine = 0, bank_xor_enable = 0;
+
+ dimmno = (rank < 12) ? rank / 4 : 2;
+ pvt = mci->pvt_info;
+ amap_fine = pvt->channel[ch].dimm[dimmno].amap_fine;
+ bg0 = amap_fine ? 6 : 13;
+ rowbits = pvt->channel[ch].dimm[dimmno].rowbits;
+ colbits = pvt->channel[ch].dimm[dimmno].colbits;
+ bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable;
+
+ if (pvt->is_lockstep) {
+ pr_warn_once("LockStep row/column decode is not supported yet!\n");
+ msg[0] = '\0';
+ return false;
+ }
+
+ if (pvt->is_close_pg) {
+ row = sb_bits(rank_addr, rowbits, sb_close_row);
+ col = sb_bits(rank_addr, colbits, sb_close_column);
+ col |= 0x400; /* C10 is autoprecharge, always set */
+ bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28);
+ bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21);
+ } else {
+ row = sb_bits(rank_addr, rowbits, sb_open_row);
+ if (amap_fine)
+ col = sb_bits(rank_addr, colbits, sb_open_fine_column);
+ else
+ col = sb_bits(rank_addr, colbits, sb_open_column);
+ bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23);
+ bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21);
+ }
+
+ row &= (1u << rowbits) - 1;
+
+ sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d",
+ row, col, bank_address, bank_group);
+ return true;
+}
+
+static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
+ u64 rank_addr, char *msg)
+{
+ pr_warn_once("DDR3 row/column decode not support yet!\n");
+ msg[0] = '\0';
+ return false;
+}
+
static int get_memory_error_data(struct mem_ctl_info *mci,
u64 addr,
u8 *socket, u8 *ha,
@@ -1937,12 +2043,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
int interleave_mode, shiftup = 0;
unsigned int sad_interleave[MAX_INTERLEAVE];
u32 reg, dram_rule;
- u8 ch_way, sck_way, pkg, sad_ha = 0;
+ u8 ch_way, sck_way, pkg, sad_ha = 0, rankid = 0;
u32 tad_offset;
u32 rir_way;
u32 mb, gb;
u64 ch_addr, offset, limit = 0, prv = 0;
-
+ u64 rank_addr;
+ enum mem_type mtype;
/*
* Step 0) Check if the address is at special memory ranges
@@ -2226,6 +2333,28 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
*rank = RIR_RNK_TGT(pvt->info.type, reg);
+ if (pvt->info.type == BROADWELL) {
+ if (pvt->is_close_pg)
+ shiftup = 6;
+ else
+ shiftup = 13;
+
+ rank_addr = ch_addr >> shiftup;
+ rank_addr /= (1 << rir_way);
+ rank_addr <<= shiftup;
+ rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0);
+ rank_addr -= RIR_OFFSET(pvt->info.type, reg);
+
+ mtype = pvt->info.get_memory_type(pvt);
+ rankid = *rank;
+ if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
+ sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg);
+ else
+ sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg);
+ } else {
+ msg[0] = '\0';
+ }
+
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
ch_addr,
@@ -2950,7 +3079,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
enum hw_event_mc_err_type tp_event;
- char *optype, msg[256];
+ char *optype, msg[256], msg_full[512];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -3089,18 +3218,17 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
*/
if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
channel = first_channel;
-
- snprintf(msg, sizeof(msg),
- "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
+ snprintf(msg_full, sizeof(msg_full),
+ "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
area_type,
mscod, errcode,
socket, ha,
channel_mask,
- rank);
+ rank, msg);
- edac_dbg(0, "%s\n", msg);
+ edac_dbg(0, "%s\n", msg_full);
/* FIXME: need support for channel mask */
@@ -3111,7 +3239,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, dimm, -1,
- optype, msg);
+ optype, msg_full);
return;
err_parsing:
edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 1abc020d49ab..7e2762f62eec 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -714,8 +714,13 @@ static int __init skx_init(void)
skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
- if (nvdimm_count && skx_adxl_get() == -ENODEV)
- skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
+ if (nvdimm_count && skx_adxl_get() != -ENODEV) {
+ skx_set_decode(NULL, skx_show_retry_rd_err_log);
+ } else {
+ if (nvdimm_count)
+ skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
+ skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
+ }
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 19c17c5198c5..f0f8e98f6efb 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -27,9 +27,11 @@ static const char * const component_names[] = {
[INDEX_MEMCTRL] = "MemoryControllerId",
[INDEX_CHANNEL] = "ChannelId",
[INDEX_DIMM] = "DimmSlotId",
+ [INDEX_CS] = "ChipSelect",
[INDEX_NM_MEMCTRL] = "NmMemoryControllerId",
[INDEX_NM_CHANNEL] = "NmChannelId",
[INDEX_NM_DIMM] = "NmDimmSlotId",
+ [INDEX_NM_CS] = "NmChipSelect",
};
static int component_indices[ARRAY_SIZE(component_names)];
@@ -40,7 +42,7 @@ static char *adxl_msg;
static unsigned long adxl_nm_bitmap;
static char skx_msg[MSG_SIZE];
-static skx_decode_f skx_decode;
+static skx_decode_f driver_decode;
static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
@@ -139,10 +141,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
(int)adxl_values[component_indices[INDEX_NM_CHANNEL]] : -1;
res->dimm = (adxl_nm_bitmap & BIT_NM_DIMM) ?
(int)adxl_values[component_indices[INDEX_NM_DIMM]] : -1;
+ res->cs = (adxl_nm_bitmap & BIT_NM_CS) ?
+ (int)adxl_values[component_indices[INDEX_NM_CS]] : -1;
} else {
res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+ res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
if (res->imc > NUM_IMC - 1 || res->imc < 0) {
@@ -173,6 +178,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
break;
}
+ res->decoded_by_adxl = true;
+
return true;
}
@@ -183,7 +190,7 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
- skx_decode = decode;
+ driver_decode = decode;
skx_show_retry_rd_err_log = show_retry_log;
}
@@ -591,19 +598,19 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
break;
}
}
- if (adxl_component_count) {
+ if (res->decoded_by_adxl) {
len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
len = snprintf(skx_msg, MSG_SIZE,
- "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x",
+ "%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode,
res->socket, res->imc, res->rank,
- res->bank_group, res->bank_address, res->row, res->column);
+ res->row, res->column, res->bank_address, res->bank_group);
}
if (skx_show_retry_rd_err_log)
@@ -649,13 +656,14 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
+ res.mce = mce;
res.addr = mce->addr;
- if (adxl_component_count) {
- if (!skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce)))
+ /* Try driver decoder first */
+ if (!(driver_decode && driver_decode(&res))) {
+ /* Then try firmware decoder (ACPI DSM methods) */
+ if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))))
return NOTIFY_DONE;
- } else if (!skx_decode || !skx_decode(&res)) {
- return NOTIFY_DONE;
}
mci = res.dev->imc[res.imc].mci;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 03ac067a80b9..0cbadd3d2cd3 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -10,6 +10,7 @@
#define _SKX_COMM_EDAC_H
#include <linux/bits.h>
+#include <asm/mce.h>
#define MSG_SIZE 1024
@@ -52,6 +53,9 @@
#define IS_DIMM_PRESENT(r) GET_BITFIELD(r, 15, 15)
#define IS_NVDIMM_PRESENT(r, i) GET_BITFIELD(r, i, i)
+#define MCI_MISC_ECC_MODE(m) (((m) >> 59) & 15)
+#define MCI_MISC_ECC_DDRT 8 /* read from DDRT */
+
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
@@ -82,6 +86,7 @@ struct skx_dev {
struct pci_dev *edev;
u32 retry_rd_err_log_s;
u32 retry_rd_err_log_d;
+ u32 retry_rd_err_log_d2;
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -108,18 +113,22 @@ enum {
INDEX_MEMCTRL,
INDEX_CHANNEL,
INDEX_DIMM,
+ INDEX_CS,
INDEX_NM_FIRST,
INDEX_NM_MEMCTRL = INDEX_NM_FIRST,
INDEX_NM_CHANNEL,
INDEX_NM_DIMM,
+ INDEX_NM_CS,
INDEX_MAX
};
#define BIT_NM_MEMCTRL BIT_ULL(INDEX_NM_MEMCTRL)
#define BIT_NM_CHANNEL BIT_ULL(INDEX_NM_CHANNEL)
#define BIT_NM_DIMM BIT_ULL(INDEX_NM_DIMM)
+#define BIT_NM_CS BIT_ULL(INDEX_NM_CS)
struct decoded_addr {
+ struct mce *mce;
struct skx_dev *dev;
u64 addr;
int socket;
@@ -129,6 +138,7 @@ struct decoded_addr {
int sktways;
int chanways;
int dimm;
+ int cs;
int rank;
int channel_rank;
u64 rank_address;
@@ -136,6 +146,7 @@ struct decoded_addr {
int column;
int bank_address;
int bank_group;
+ bool decoded_by_adxl;
};
struct res_config {
@@ -154,7 +165,12 @@ struct res_config {
int sad_all_offset;
/* Offsets of retry_rd_err_log registers */
u32 *offsets_scrub;
+ u32 *offsets_scrub_hbm0;
+ u32 *offsets_scrub_hbm1;
u32 *offsets_demand;
+ u32 *offsets_demand2;
+ u32 *offsets_demand_hbm0;
+ u32 *offsets_demand_hbm1;
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
diff --git a/drivers/edac/wq.c b/drivers/edac/wq.c
index d021d287eaec..ad3f516627c5 100644
--- a/drivers/edac/wq.c
+++ b/drivers/edac/wq.c
@@ -37,7 +37,6 @@ int edac_workqueue_setup(void)
void edac_workqueue_teardown(void)
{
- flush_workqueue(wq);
destroy_workqueue(wq);
wq = NULL;
}
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 02ba770acb27..e6e448f6ea2f 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -646,13 +646,11 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int rt8973a_muic_i2c_remove(struct i2c_client *i2c)
+static void rt8973a_muic_i2c_remove(struct i2c_client *i2c)
{
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
regmap_del_irq_chip(info->irq, info->irq_data);
-
- return 0;
}
static const struct of_device_id rt8973a_dt_match[] = {
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 641a91819088..99d439480612 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -167,7 +167,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
return valid;
}
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
{
int ret;
struct device *dev;
@@ -183,6 +184,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
ffa_dev->vm_id = vm_id;
+ ffa_dev->ops = ops;
uuid_copy(&ffa_dev->uuid, uuid);
ret = device_register(&ffa_dev->dev);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index ec731e9e942b..d5e86ef40b89 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -163,6 +163,7 @@ struct ffa_drv_info {
struct mutex tx_lock; /* lock to protect Tx buffer */
void *rx_buffer;
void *tx_buffer;
+ bool mem_ops_native;
};
static struct ffa_drv_info *drv_info;
@@ -263,18 +264,24 @@ static int ffa_rxtx_unmap(u16 vm_id)
return 0;
}
+#define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
+
/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
static int
__ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_partitions)
{
- int count;
+ int idx, count, flags = 0, sz, buf_sz;
ffa_value_t partition_info;
+ if (!buffer || !num_partitions) /* Just get the count for now */
+ flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
+
mutex_lock(&drv_info->rx_lock);
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_PARTITION_INFO_GET,
.a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
+ .a5 = flags,
}, &partition_info);
if (partition_info.a0 == FFA_ERROR) {
@@ -284,8 +291,19 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
count = partition_info.a2;
+ if (drv_info->version > FFA_VERSION_1_0) {
+ buf_sz = sz = partition_info.a3;
+ if (sz > sizeof(*buffer))
+ buf_sz = sizeof(*buffer);
+ } else {
+ /* FFA_VERSION_1_0 lacks size in the response */
+ buf_sz = sz = 8;
+ }
+
if (buffer && count <= num_partitions)
- memcpy(buffer, drv_info->rx_buffer, sizeof(*buffer) * count);
+ for (idx = 0; idx < count; idx++)
+ memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
+ buf_sz);
ffa_rx_release();
@@ -571,6 +589,39 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags)
return 0;
}
+static int ffa_features(u32 func_feat_id, u32 input_props,
+ u32 *if_props_1, u32 *if_props_2)
+{
+ ffa_value_t id;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
+ pr_err("%s: Invalid Parameters: %x, %x", __func__,
+ func_feat_id, input_props);
+ return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
+ }
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
+ }, &id);
+
+ if (id.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)id.a2);
+
+ if (if_props_1)
+ *if_props_1 = id.a2;
+ if (if_props_2)
+ *if_props_2 = id.a3;
+
+ return 0;
+}
+
+static void ffa_set_up_mem_ops_native_flag(void)
+{
+ if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
+ !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
+ drv_info->mem_ops_native = true;
+}
+
static u32 ffa_api_version_get(void)
{
return drv_info->version;
@@ -597,11 +648,19 @@ static int ffa_partition_info_get(const char *uuid_str,
return 0;
}
-static void ffa_mode_32bit_set(struct ffa_device *dev)
+static void _ffa_mode_32bit_set(struct ffa_device *dev)
{
dev->mode_32bit = true;
}
+static void ffa_mode_32bit_set(struct ffa_device *dev)
+{
+ if (drv_info->version > FFA_VERSION_1_0)
+ return;
+
+ _ffa_mode_32bit_set(dev);
+}
+
static int ffa_sync_send_receive(struct ffa_device *dev,
struct ffa_send_direct_data *data)
{
@@ -609,17 +668,15 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
dev->mode_32bit, data);
}
-static int
-ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+static int ffa_memory_share(struct ffa_mem_ops_args *args)
{
- if (dev->mode_32bit)
- return ffa_memory_ops(FFA_MEM_SHARE, args);
+ if (drv_info->mem_ops_native)
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
- return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+ return ffa_memory_ops(FFA_MEM_SHARE, args);
}
-static int
-ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+static int ffa_memory_lend(struct ffa_mem_ops_args *args)
{
/* Note that upon a successful MEM_LEND request the caller
* must ensure that the memory region specified is not accessed
@@ -628,36 +685,47 @@ ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
* however on systems without a hypervisor the responsibility
* falls to the calling kernel driver to prevent access.
*/
- if (dev->mode_32bit)
- return ffa_memory_ops(FFA_MEM_LEND, args);
+ if (drv_info->mem_ops_native)
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
- return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+ return ffa_memory_ops(FFA_MEM_LEND, args);
}
-static const struct ffa_dev_ops ffa_ops = {
+static const struct ffa_info_ops ffa_drv_info_ops = {
.api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get,
+};
+
+static const struct ffa_msg_ops ffa_drv_msg_ops = {
.mode_32bit_set = ffa_mode_32bit_set,
.sync_send_receive = ffa_sync_send_receive,
+};
+
+static const struct ffa_mem_ops ffa_drv_mem_ops = {
.memory_reclaim = ffa_memory_reclaim,
.memory_share = ffa_memory_share,
.memory_lend = ffa_memory_lend,
};
-const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
-{
- if (ffa_device_is_valid(dev))
- return &ffa_ops;
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ffa_dev_ops_get);
+static const struct ffa_ops ffa_drv_ops = {
+ .info_ops = &ffa_drv_info_ops,
+ .msg_ops = &ffa_drv_msg_ops,
+ .mem_ops = &ffa_drv_mem_ops,
+};
void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
{
int count, idx;
struct ffa_partition_info *pbuf, *tpbuf;
+ /*
+ * FF-A v1.1 provides UUID for each partition as part of the discovery
+ * API, the discovered UUID must be populated in the device's UUID and
+ * there is no need to copy the same from the driver table.
+ */
+ if (drv_info->version > FFA_VERSION_1_0)
+ return;
+
count = ffa_partition_probe(uuid, &pbuf);
if (count <= 0)
return;
@@ -671,6 +739,7 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
static void ffa_setup_partitions(void)
{
int count, idx;
+ uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_partition_info *pbuf, *tpbuf;
@@ -681,19 +750,24 @@ static void ffa_setup_partitions(void)
}
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
- /* Note that the &uuid_null parameter will require
+ import_uuid(&uuid, (u8 *)tpbuf->uuid);
+
+ /* Note that if the UUID will be uuid_null, that will require
* ffa_device_match() to find the UUID of this partition id
- * with help of ffa_device_match_uuid(). Once the FF-A spec
- * is updated to provide correct UUID here for each partition
- * as part of the discovery API, we need to pass the
- * discovered UUID here instead.
+ * with help of ffa_device_match_uuid(). FF-A v1.1 and above
+ * provides UUID here for each partition as part of the
+ * discovery API and the same is passed.
*/
- ffa_dev = ffa_device_register(&uuid_null, tpbuf->id);
+ ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+ _ffa_mode_32bit_set(ffa_dev);
}
kfree(pbuf);
}
@@ -751,6 +825,8 @@ static int __init ffa_init(void)
ffa_setup_partitions();
+ ffa_set_up_mem_ops_native_flag();
+
return 0;
free_pages:
if (drv_info->tx_buffer)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 3ed7ae0d6781..96060bf90a24 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
+ struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
- struct scmi_clock_info *clk = ci->clk + clk_id;
+ if (clk_id >= ci->num_clocks)
+ return NULL;
+
+ clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 8abace56b958..f42dad997ac9 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd {
* @channel_id: OP-TEE channel ID used for this transport
* @tee_session: TEE session identifier
* @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 673f3eb498f4..e9afa8cab730 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = ph->get_priv(ph);
- struct reset_dom_info *rdom = pi->dom_info + domain;
+ struct reset_dom_info *rdom;
- if (rdom->async_reset)
+ if (domain >= pi->num_domains)
+ return -EINVAL;
+
+ rdom = pi->dom_info + domain;
+ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
flags |= ASYNCHRONOUS_RESET;
ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
dom->flags = cpu_to_le32(flags);
dom->reset_state = cpu_to_le32(state);
- if (rdom->async_reset)
+ if (flags & ASYNCHRONOUS_RESET)
ret = ph->xops->do_xfer_with_response(ph, t);
else
ret = ph->xops->do_xfer(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 581d34c95769..0e05a79de82d 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
@@ -53,27 +52,6 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
return scmi_pd_power(domain, false);
}
-static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- int ret;
-
- ret = pm_clk_create(dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clks(dev);
- if (ret >= 0)
- return 0;
-
- pm_clk_destroy(dev);
- return ret;
-}
-
-static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- pm_clk_destroy(dev);
-}
-
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
@@ -124,10 +102,6 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
- scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
- scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK |
- GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
@@ -138,9 +112,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ dev_set_drvdata(dev, scmi_pd_data);
+
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER, "genpd" },
{ },
@@ -150,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 7288c6117838..0b5853fa9d87 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
{
int ret;
struct scmi_xfer *t;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
sizeof(__le32), sizeof(__le32), &t);
@@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
@@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
sizeof(*msg), 0, &t);
@@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
@@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
sizeof(*sensor), 0, &t);
@@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
+ s = si->sensors + sensor_id;
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = ph->xops->do_xfer_with_response(ph, t);
@@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
@@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
struct sensors_info *si = ph->get_priv(ph);
+ if (sensor_id >= si->num_sensors)
+ return NULL;
+
return si->sensors + sensor_id;
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f191a1f901ac..0eb6b617f709 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
- dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+ dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 4dde8edd53b6..3e8d4b51a814 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -243,29 +243,6 @@ failed:
}
/**
- * efi_capsule_flush - called by file close or file flush
- * @file: file pointer
- * @id: not used
- *
- * If a capsule is being partially uploaded then calling this function
- * will be treated as upload termination and will free those completed
- * buffer pages and -ECANCELED will be returned.
- **/
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
-{
- int ret = 0;
- struct capsule_info *cap_info = file->private_data;
-
- if (cap_info->index > 0) {
- pr_err("capsule upload not complete\n");
- efi_free_all_buff_pages(cap_info);
- ret = -ECANCELED;
- }
-
- return ret;
-}
-
-/**
* efi_capsule_release - called by file close
* @inode: not used
* @file: file pointer
@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
{
struct capsule_info *cap_info = file->private_data;
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
kfree(cap_info->pages);
kfree(cap_info->phys);
kfree(file->private_data);
@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = {
.owner = THIS_MODULE,
.open = efi_capsule_open,
.write = efi_capsule_write,
- .flush = efi_capsule_flush,
.release = efi_capsule_release,
.llseek = no_llseek,
};
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index eb9c65f97841..f80d87c199c3 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -15,9 +15,11 @@
static long __init parse_acpi_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
- char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */
struct acpi_device *adev;
struct device *phys_dev;
+ char hid[ACPI_ID_LEN];
+ u64 uid;
+ int ret;
if (node->header.length != 12)
return -EINVAL;
@@ -27,12 +29,12 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
node->acpi.hid >> 16);
- sprintf(uid, "%u", node->acpi.uid);
for_each_acpi_dev_match(adev, hid, NULL, -1) {
- if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid))
+ ret = acpi_dev_uid_to_integer(adev, &uid);
+ if (ret == 0 && node->acpi.uid == uid)
break;
- if (!adev->pnp.unique_id && node->acpi.uid == 0)
+ if (ret == -ENODATA && node->acpi.uid == 0)
break;
}
if (!adev)
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 8ced7af8e56d..4f9fb086eab7 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
return NOTIFY_DONE;
wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ if (!wdata)
+ return NOTIFY_DONE;
+
for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
wdata[l] = str[l];
wdata[l] = L'\0';
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d0537573501e..b43fdb319fd4 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -37,8 +37,17 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# disable CFI
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
# disable LTO
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 577173ee1f83..60973e84d7ab 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -23,8 +23,8 @@ efi_status_t check_platform_features(void)
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS;
- tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
- if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
+ if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 8a18930f3eb6..516f4f0069bd 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -14,7 +14,7 @@
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/*
* See if a user has put the shim into insecure mode. If so, and if the
- * variable doesn't have the runtime attribute set, we might as well
- * honor that.
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 05ae8bcc9d67..7a7abc8959d2 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -220,7 +220,6 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unsigned long end, next;
unsigned long rounded_start, rounded_end;
unsigned long unprotect_start, unprotect_size;
- int has_system_memory = 0;
if (efi_dxe_table == NULL)
return;
@@ -517,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index cfb448eabdaa..e7bcfca4159f 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h>
@@ -163,6 +164,8 @@ int psci_set_osi_mode(bool enable)
PSCI_1_0_SUSPEND_MODE_PC;
err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
+ if (err < 0)
+ pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err);
return psci_to_linux_errno(err);
}
@@ -274,7 +277,7 @@ static void set_conduit(enum arm_smccc_conduit conduit)
psci_conduit = conduit;
}
-static int get_set_conduit_method(struct device_node *np)
+static int get_set_conduit_method(const struct device_node *np)
{
const char *method;
@@ -324,17 +327,130 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
-static int __init psci_features(u32 psci_func_id)
+static int psci_features(u32 psci_func_id)
{
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
psci_func_id, 0, 0);
}
+#ifdef CONFIG_DEBUG_FS
+
+#define PSCI_ID(ver, _name) \
+ { .fn = PSCI_##ver##_FN_##_name, .name = #_name, }
+#define PSCI_ID_NATIVE(ver, _name) \
+ { .fn = PSCI_FN_NATIVE(ver, _name), .name = #_name, }
+
+/* A table of all optional functions */
+static const struct {
+ u32 fn;
+ const char *name;
+} psci_fn_ids[] = {
+ PSCI_ID_NATIVE(0_2, MIGRATE),
+ PSCI_ID(0_2, MIGRATE_INFO_TYPE),
+ PSCI_ID_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
+ PSCI_ID(1_0, CPU_FREEZE),
+ PSCI_ID_NATIVE(1_0, CPU_DEFAULT_SUSPEND),
+ PSCI_ID_NATIVE(1_0, NODE_HW_STATE),
+ PSCI_ID_NATIVE(1_0, SYSTEM_SUSPEND),
+ PSCI_ID(1_0, SET_SUSPEND_MODE),
+ PSCI_ID_NATIVE(1_0, STAT_RESIDENCY),
+ PSCI_ID_NATIVE(1_0, STAT_COUNT),
+ PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
+ PSCI_ID(1_1, MEM_PROTECT),
+ PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
+};
+
+static int psci_debugfs_read(struct seq_file *s, void *data)
+{
+ int feature, type, i;
+ u32 ver;
+
+ ver = psci_ops.get_version();
+ seq_printf(s, "PSCIv%d.%d\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ /* PSCI_FEATURES is available only starting from 1.0 */
+ if (PSCI_VERSION_MAJOR(ver) < 1)
+ return 0;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ ver = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ seq_printf(s, "SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+ } else {
+ seq_puts(s, "SMC Calling Convention v1.0 is assumed\n");
+ }
+
+ feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
+ if (feature < 0) {
+ seq_printf(s, "PSCI_FEATURES(CPU_SUSPEND) error (%d)\n", feature);
+ } else {
+ seq_printf(s, "OSI is %ssupported\n",
+ (feature & BIT(0)) ? "" : "not ");
+ seq_printf(s, "%s StateID format is used\n",
+ (feature & BIT(1)) ? "Extended" : "Original");
+ }
+
+ type = psci_ops.migrate_info_type();
+ if (type == PSCI_0_2_TOS_UP_MIGRATE ||
+ type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ unsigned long cpuid;
+
+ seq_printf(s, "Trusted OS %smigrate capable\n",
+ type == PSCI_0_2_TOS_UP_NO_MIGRATE ? "not " : "");
+ cpuid = psci_migrate_info_up_cpu();
+ seq_printf(s, "Trusted OS resident on physical CPU 0x%lx (#%d)\n",
+ cpuid, resident_cpu);
+ } else if (type == PSCI_0_2_TOS_MP) {
+ seq_puts(s, "Trusted OS migration not required\n");
+ } else {
+ if (type != PSCI_RET_NOT_SUPPORTED)
+ seq_printf(s, "MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(psci_fn_ids); i++) {
+ feature = psci_features(psci_fn_ids[i].fn);
+ if (feature == PSCI_RET_NOT_SUPPORTED)
+ continue;
+ if (feature < 0)
+ seq_printf(s, "PSCI_FEATURES(%s) error (%d)\n",
+ psci_fn_ids[i].name, feature);
+ else
+ seq_printf(s, "%s is supported\n", psci_fn_ids[i].name);
+ }
+
+ return 0;
+}
+
+static int psci_debugfs_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, psci_debugfs_read, NULL);
+}
+
+static const struct file_operations psci_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psci_debugfs_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static int __init psci_debugfs_init(void)
+{
+ return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
+ &psci_debugfs_ops));
+}
+late_initcall(psci_debugfs_init)
+#endif
+
#ifdef CONFIG_CPU_IDLE
static int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
- phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume));
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
@@ -359,7 +475,7 @@ int psci_cpu_suspend_enter(u32 state)
static int psci_system_suspend(unsigned long unused)
{
- phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume));
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
@@ -528,7 +644,7 @@ typedef int (*psci_initcall_t)(const struct device_node *);
*
* Probe based on PSCI PSCI_VERSION function
*/
-static int __init psci_0_2_init(struct device_node *np)
+static int __init psci_0_2_init(const struct device_node *np)
{
int err;
@@ -549,7 +665,7 @@ static int __init psci_0_2_init(struct device_node *np)
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
-static int __init psci_0_1_init(struct device_node *np)
+static int __init psci_0_1_init(const struct device_node *np)
{
u32 id;
int err;
@@ -585,7 +701,7 @@ static int __init psci_0_1_init(struct device_node *np)
return 0;
}
-static int __init psci_1_0_init(struct device_node *np)
+static int __init psci_1_0_init(const struct device_node *np)
{
int err;
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 0d51eef2472f..db3d08a01209 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -129,8 +129,6 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
-extern void __qcom_scm_init(void);
-
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 1f276f108cc9..3fd3563d962b 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -94,6 +94,10 @@ static __init int sysfb_init(void)
name = "efi-framebuffer";
else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
name = "vesa-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
+ name = "vga-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
+ name = "ega-framebuffer";
else
name = "platform-framebuffer";
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 0c440afd5224..9d3874cdaaee 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -377,18 +377,11 @@ static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
if (!filename)
return -ENOENT;
- databuf = kmalloc(count, GFP_KERNEL);
- if (!databuf)
- return -ENOMEM;
-
- if (copy_from_user(databuf, buf, count)) {
- err = -EFAULT;
- goto free_ret;
- }
+ databuf = memdup_user(buf, count);
+ if (IS_ERR(databuf))
+ return PTR_ERR(databuf);
err = mrq_debug_write(bpmp, filename, databuf, count);
-
-free_ret:
kfree(databuf);
return err ?: count;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index d1f652802181..ff5cabe70a2b 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1312,6 +1312,37 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
}
/**
+ * zynqmp_pm_set_sd_config - PM call to set value of SD config registers
+ * @node: SD node ID
+ * @config: The config type of SD registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
+
+/**
+ * zynqmp_pm_set_gem_config - PM call to set value of GEM config registers
+ * @node: GEM node ID
+ * @config: The config type of GEM registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
+
+/**
* struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
* @subtype: Shutdown subtype
* @name: Matching string for scope argument
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 72c677c910de..133e511355c9 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -148,10 +148,6 @@ static ssize_t flash_count_show(struct device *dev,
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
- flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
- if (!flash_buf)
- return -ENOMEM;
-
if (FLASH_COUNT_SIZE % stride) {
dev_err(sec->dev,
"FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
@@ -160,6 +156,10 @@ static ssize_t flash_count_show(struct device *dev,
return -EINVAL;
}
+ flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
+ if (!flash_buf)
+ return -ENOMEM;
+
ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
flash_buf, FLASH_COUNT_SIZE / stride);
if (ret) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0642f579196f..3f64345fe40b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -874,10 +874,11 @@ config GPIO_104_IDI_48
module parameter.
config GPIO_F7188X
- tristate "F71869, F71869A, F71882FG, F71889F and F81866 GPIO support"
+ tristate "Fintek and Nuvoton Super-I/O GPIO support"
help
This option enables support for GPIOs found on Fintek Super-I/O
chips F71869, F71869A, F71882FG, F71889F and F81866.
+ As well as Nuvoton Super-I/O chip NCT6116D.
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index a41551870759..74cc71bb3984 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -164,6 +164,7 @@ static void dio48e_irq_mask(struct irq_data *data)
dio48egpio->irq_mask &= ~BIT(0);
else
dio48egpio->irq_mask &= ~BIT(1);
+ gpiochip_disable_irq(chip, offset);
if (!dio48egpio->irq_mask)
/* disable interrupts */
@@ -191,6 +192,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
+ gpiochip_enable_irq(chip, offset);
if (offset == 19)
dio48egpio->irq_mask |= BIT(0);
else
@@ -213,12 +215,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip dio48e_irqchip = {
+static const struct irq_chip dio48e_irqchip = {
.name = "104-dio-48e",
.irq_ack = dio48e_irq_ack,
.irq_mask = dio48e_irq_mask,
.irq_unmask = dio48e_irq_unmask,
- .irq_set_type = dio48e_irq_set_type
+ .irq_set_type = dio48e_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
@@ -322,7 +326,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
girq = &dio48egpio->chip.irq;
- girq->chip = &dio48e_irqchip;
+ gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 40be76efeed7..3286b914a2cf 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -113,6 +113,7 @@ static void idi_48_irq_mask(struct irq_data *data)
spin_lock_irqsave(&idi48gpio->lock, flags);
idi48gpio->irq_mask[boundary] &= ~mask;
+ gpiochip_disable_irq(chip, offset);
/* Exit early if there are still input lines with IRQ unmasked */
if (idi48gpio->irq_mask[boundary])
@@ -140,6 +141,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
prev_irq_mask = idi48gpio->irq_mask[boundary];
+ gpiochip_enable_irq(chip, offset);
idi48gpio->irq_mask[boundary] |= mask;
/* Exit early if IRQ was already unmasked for this boundary */
@@ -164,12 +166,14 @@ static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idi_48_irqchip = {
+static const struct irq_chip idi_48_irqchip = {
.name = "104-idi-48",
.irq_ack = idi_48_irq_ack,
.irq_mask = idi_48_irq_mask,
.irq_unmask = idi_48_irq_unmask,
- .irq_set_type = idi_48_irq_set_type
+ .irq_set_type = idi_48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
@@ -267,7 +271,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
girq = &idi48gpio->chip.irq;
- girq->chip = &idi_48_irqchip;
+ gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 65a5f581d981..4756e583f223 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -174,10 +174,11 @@ static void idio_16_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
unsigned long flags;
- idio16gpio->irq_mask &= ~mask;
+ idio16gpio->irq_mask &= ~BIT(offset);
+ gpiochip_disable_irq(chip, offset);
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -192,11 +193,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
const unsigned long prev_irq_mask = idio16gpio->irq_mask;
unsigned long flags;
- idio16gpio->irq_mask |= mask;
+ gpiochip_enable_irq(chip, offset);
+ idio16gpio->irq_mask |= BIT(offset);
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -217,12 +219,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idio_16_irqchip = {
+static const struct irq_chip idio_16_irqchip = {
.name = "104-idio-16",
.irq_ack = idio_16_irq_ack,
.irq_mask = idio_16_irq_mask,
.irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type
+ .irq_set_type = idio_16_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
@@ -299,7 +303,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
idio16gpio->out_state = 0xFFFF;
girq = &idio16gpio->chip.irq;
- girq->chip = &idio_16_irqchip;
+ gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d49f12560cde..9b562dbbd733 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -409,14 +409,12 @@ static int adp5588_gpio_probe(struct i2c_client *client)
return 0;
}
-static int adp5588_gpio_remove(struct i2c_client *client)
+static void adp5588_gpio_remove(struct i2c_client *client)
{
struct adp5588_gpio *dev = i2c_get_clientdata(client);
if (dev->client->irq)
free_irq(dev->client->irq, dev);
-
- return 0;
}
static const struct i2c_device_id adp5588_gpio_id[] = {
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 18a3147f5a42..9effa7769bef 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882, F71889 and F81866
+ * GPIO driver for Fintek and Nuvoton Super-I/O chips
*
* Copyright (C) 2010-2013 LaCie
*
* Author: Simon Guinot <simon.guinot@sequanux.org>
*/
+#define DRVNAME "gpio-f7188x"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -14,30 +17,41 @@
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
-#define DRVNAME "gpio-f7188x"
-
/*
* Super-I/O registers
*/
#define SIO_LDSEL 0x07 /* Logical device select */
#define SIO_DEVID 0x20 /* Device ID (2 bytes) */
-#define SIO_DEVREV 0x22 /* Device revision */
-#define SIO_MANID 0x23 /* Fintek ID (2 bytes) */
-#define SIO_LD_GPIO 0x06 /* GPIO logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
-#define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */
+/*
+ * Fintek devices.
+ */
+#define SIO_FINTEK_DEVREV 0x22 /* Fintek Device revision */
+#define SIO_FINTEK_MANID 0x23 /* Fintek ID (2 bytes) */
+
+#define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */
+
#define SIO_F71869_ID 0x0814 /* F71869 chipset ID */
#define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
#define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */
#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
-#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for f81966 */
+#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for F81966 */
#define SIO_F81865_ID 0x0704 /* F81865 chipset ID */
+#define SIO_LD_GPIO_FINTEK 0x06 /* GPIO logical device */
+
+/*
+ * Nuvoton devices.
+ */
+#define SIO_NCT6116D_ID 0xD283 /* NCT6116D chipset ID */
+
+#define SIO_LD_GPIO_NUVOTON 0x07 /* GPIO logical device */
+
enum chips {
f71869,
@@ -48,6 +62,7 @@ enum chips {
f81866,
f81804,
f81865,
+ nct6116d,
};
static const char * const f7188x_names[] = {
@@ -59,10 +74,12 @@ static const char * const f7188x_names[] = {
"f81866",
"f81804",
"f81865",
+ "nct6116d",
};
struct f7188x_sio {
int addr;
+ int device;
enum chips type;
};
@@ -110,7 +127,7 @@ static inline int superio_enter(int base)
{
/* Don't step on other drivers' I/O space by accident. */
if (!request_muxed_region(base, 2, DRVNAME)) {
- pr_err(DRVNAME "I/O address 0x%04x already in use\n", base);
+ pr_err("I/O address 0x%04x already in use\n", base);
return -EBUSY;
}
@@ -146,10 +163,10 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config);
-#define F7188X_GPIO_BANK(_base, _ngpio, _regbase) \
+#define F7188X_GPIO_BANK(_base, _ngpio, _regbase, _label) \
{ \
.chip = { \
- .label = DRVNAME, \
+ .label = _label, \
.owner = THIS_MODULE, \
.get_direction = f7188x_gpio_get_direction, \
.direction_input = f7188x_gpio_direction_in, \
@@ -164,94 +181,108 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.regbase = _regbase, \
}
-#define gpio_dir(base) (base + 0)
-#define gpio_data_out(base) (base + 1)
-#define gpio_data_in(base) (base + 2)
+#define f7188x_gpio_dir(base) ((base) + 0)
+#define f7188x_gpio_data_out(base) ((base) + 1)
+#define f7188x_gpio_data_in(base) ((base) + 2)
/* Output mode register (0:open drain 1:push-pull). */
-#define gpio_out_mode(base) (base + 3)
+#define f7188x_gpio_out_mode(base) ((base) + 3)
+
+#define f7188x_gpio_dir_invert(type) ((type) == nct6116d)
+#define f7188x_gpio_data_single(type) ((type) == nct6116d)
static struct f7188x_gpio_bank f71869_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 6, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 6, 0x90),
+ F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 6, 0x90, DRVNAME "-6"),
};
static struct f7188x_gpio_bank f71869a_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 6, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
};
static struct f7188x_gpio_bank f71882_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 4, 0xC0),
- F7188X_GPIO_BANK(40, 4, 0xB0),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 4, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 4, 0xB0, DRVNAME "-4"),
};
static struct f7188x_gpio_bank f71889a_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 7, 0xF0),
- F7188X_GPIO_BANK(10, 7, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(0, 7, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 7, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
};
static struct f7188x_gpio_bank f71889_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 7, 0xF0),
- F7188X_GPIO_BANK(10, 7, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(0, 7, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 7, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
};
static struct f7188x_gpio_bank f81866_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 8, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
- F7188X_GPIO_BANK(80, 8, 0x88),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
+ F7188X_GPIO_BANK(80, 8, 0x88, DRVNAME "-8"),
};
static struct f7188x_gpio_bank f81804_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(50, 8, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
- F7188X_GPIO_BANK(90, 8, 0x98),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-4"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-5"),
+ F7188X_GPIO_BANK(90, 8, 0x98, DRVNAME "-6"),
};
static struct f7188x_gpio_bank f81865_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 8, 0xA0),
- F7188X_GPIO_BANK(60, 5, 0x90),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"),
+};
+
+static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xEC, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 1, 0xFC, DRVNAME "-7"),
};
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -264,13 +295,16 @@ static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
superio_exit(sio->addr);
- if (dir & 1 << offset)
+ if (f7188x_gpio_dir_invert(sio->type))
+ dir = ~dir;
+
+ if (dir & BIT(offset))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
@@ -286,11 +320,15 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir &= ~BIT(offset);
- superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
+
+ if (f7188x_gpio_dir_invert(sio->type))
+ dir |= BIT(offset);
+ else
+ dir &= ~BIT(offset);
+ superio_outb(sio->addr, f7188x_gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
@@ -307,14 +345,14 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset)
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
dir = !!(dir & BIT(offset));
- if (dir)
- data = superio_inb(sio->addr, gpio_data_out(bank->regbase));
+ if (f7188x_gpio_data_single(sio->type) || dir)
+ data = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
else
- data = superio_inb(sio->addr, gpio_data_in(bank->regbase));
+ data = superio_inb(sio->addr, f7188x_gpio_data_in(bank->regbase));
superio_exit(sio->addr);
@@ -332,18 +370,21 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
+ data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
if (value)
data_out |= BIT(offset);
else
data_out &= ~BIT(offset);
- superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
+ superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir |= BIT(offset);
- superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
+ if (f7188x_gpio_dir_invert(sio->type))
+ dir &= ~BIT(offset);
+ else
+ dir |= BIT(offset);
+ superio_outb(sio->addr, f7188x_gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
@@ -360,14 +401,14 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
err = superio_enter(sio->addr);
if (err)
return;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
+ data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
if (value)
data_out |= BIT(offset);
else
data_out &= ~BIT(offset);
- superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
+ superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
}
@@ -388,14 +429,14 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- data = superio_inb(sio->addr, gpio_out_mode(bank->regbase));
+ data = superio_inb(sio->addr, f7188x_gpio_out_mode(bank->regbase));
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
data &= ~BIT(offset);
else
data |= BIT(offset);
- superio_outb(sio->addr, gpio_out_mode(bank->regbase), data);
+ superio_outb(sio->addr, f7188x_gpio_out_mode(bank->regbase), data);
superio_exit(sio->addr);
return 0;
@@ -449,6 +490,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
data->bank = f81865_gpio_bank;
break;
+ case nct6116d:
+ data->nr_bank = ARRAY_SIZE(nct6116d_gpio_bank);
+ data->bank = nct6116d_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -479,18 +524,15 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
{
int err;
u16 devid;
+ u16 manid;
err = superio_enter(addr);
if (err)
return err;
err = -ENODEV;
- devid = superio_inw(addr, SIO_MANID);
- if (devid != SIO_FINTEK_ID) {
- pr_debug(DRVNAME ": Not a Fintek device at 0x%08x\n", addr);
- goto err;
- }
+ sio->device = SIO_LD_GPIO_FINTEK;
devid = superio_inw(addr, SIO_DEVID);
switch (devid) {
case SIO_F71869_ID:
@@ -517,17 +559,30 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F81865_ID:
sio->type = f81865;
break;
+ case SIO_NCT6116D_ID:
+ sio->device = SIO_LD_GPIO_NUVOTON;
+ sio->type = nct6116d;
+ break;
default:
- pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
+ pr_info("Unsupported Fintek device 0x%04x\n", devid);
goto err;
}
+
+ /* double check manufacturer where possible */
+ if (sio->type != nct6116d) {
+ manid = superio_inw(addr, SIO_FINTEK_MANID);
+ if (manid != SIO_FINTEK_ID) {
+ pr_debug("Not a Fintek device at 0x%08x\n", addr);
+ goto err;
+ }
+ }
+
sio->addr = addr;
err = 0;
- pr_info(DRVNAME ": Found %s at %#x, revision %d\n",
- f7188x_names[sio->type],
- (unsigned int) addr,
- (int) superio_inb(addr, SIO_DEVREV));
+ pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr);
+ if (sio->type != nct6116d)
+ pr_info(" revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV));
err:
superio_exit(addr);
@@ -548,13 +603,13 @@ f7188x_gpio_device_add(const struct f7188x_sio *sio)
err = platform_device_add_data(f7188x_gpio_pdev,
sio, sizeof(*sio));
if (err) {
- pr_err(DRVNAME "Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto err;
}
err = platform_device_add(f7188x_gpio_pdev);
if (err) {
- pr_err(DRVNAME "Device addition failed\n");
+ pr_err("Device addition failed\n");
goto err;
}
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index f422c3e129a0..f77a965f5780 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -41,14 +41,12 @@
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
* @gc: gpiochip for this instance
- * @irq: irqchip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
struct gpio_chip gc;
- struct irq_chip irq;
void __iomem *base;
struct clk *clk;
};
@@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d)
val = readl(g->base + GPIO_INT_EN);
val &= ~BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ftgpio_gpio_unmask_irq(struct irq_data *d)
@@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d)
struct ftgpio_gpio *g = gpiochip_get_data(gc);
u32 val;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
val = readl(g->base + GPIO_INT_EN);
val |= BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
@@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return 0;
}
+static const struct irq_chip ftgpio_irq_chip = {
+ .name = "FTGPIO010",
+ .irq_ack = ftgpio_gpio_ack_irq,
+ .irq_mask = ftgpio_gpio_mask_irq,
+ .irq_unmask = ftgpio_gpio_unmask_irq,
+ .irq_set_type = ftgpio_gpio_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (!IS_ERR(g->clk))
g->gc.set_config = ftgpio_gpio_set_config;
- g->irq.name = "FTGPIO010";
- g->irq.irq_ack = ftgpio_gpio_ack_irq;
- g->irq.irq_mask = ftgpio_gpio_mask_irq;
- g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
- g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
girq = &g->gc.irq;
- girq->chip = &g->irq;
+ gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 312309be0287..56656fb519f8 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
__raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
}
+static void ixp4xx_gpio_mask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
if (!(g->irq_edge & BIT(d->hwirq)))
ixp4xx_gpio_irq_ack(d);
+ gpiochip_enable_irq(gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip ixp4xx_gpio_irqchip = {
+static const struct irq_chip ixp4xx_gpio_irqchip = {
.name = "IXP4GPIO",
.irq_ack = ixp4xx_gpio_irq_ack,
- .irq_mask = irq_chip_mask_parent,
+ .irq_mask = ixp4xx_gpio_mask_irq,
.irq_unmask = ixp4xx_gpio_irq_unmask,
.irq_set_type = ixp4xx_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
g->gc.owner = THIS_MODULE;
girq = &g->gc.irq;
- girq->chip = &ixp4xx_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = g->fwnode;
girq->parent_domain = parent;
girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c
index b2b547dd6e84..43da381a4d7e 100644
--- a/drivers/gpio/gpio-max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -48,11 +48,9 @@ static int max7300_probe(struct i2c_client *client,
return __max730x_probe(ts);
}
-static int max7300_remove(struct i2c_client *client)
+static void max7300_remove(struct i2c_client *client)
{
__max730x_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id max7300_id[] = {
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8943cea92764..523dfd17dd92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
}
}
+static void gpio_mockup_debugfs_cleanup(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+
+ debugfs_remove_recursive(chip->dbg_dir);
+}
+
static void gpio_mockup_dispose_mappings(void *data)
{
struct gpio_mockup_chip *chip = data;
@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gpio_mockup_debugfs_setup(dev, chip);
- return 0;
+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
}
static const struct of_device_id gpio_mockup_of_match[] = {
@@ -526,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
}
fwnode = fwnode_create_software_node(properties, NULL);
- if (IS_ERR(fwnode))
+ if (IS_ERR(fwnode)) {
+ kfree_strarray(line_names, ngpio);
return PTR_ERR(fwnode);
+ }
pdevinfo.name = "gpio-mockup";
pdevinfo.id = idx;
@@ -590,9 +599,9 @@ static int __init gpio_mockup_init(void)
static void __exit gpio_mockup_exit(void)
{
+ gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- gpio_mockup_unregister_pdevs();
}
module_init(gpio_mockup_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 15049822937a..3eb08cd1fdc0 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d8a26e503ca5..f163f5ca857b 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
unsigned long flags;
u32 rise, fall, high, low;
+ gpiochip_enable_irq(gc, d->hwirq);
+
spin_lock_irqsave(&rg->lock, flags);
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
spin_unlock_irqrestore(&rg->lock, flags);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
static int
@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
return gpio % MTK_BANK_WIDTH;
}
+static const struct irq_chip mt7621_irq_chip = {
+ .name = "mt7621-gpio",
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_set_type = mediatek_gpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return -ENOMEM;
rg->chip.offset = bank * MTK_BANK_WIDTH;
- rg->irq_chip.name = dev_name(dev);
- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
}
girq = &rg->chip.irq;
- girq->chip = &rg->irq_chip;
+ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index aa126ab80f0c..1bb317b8dcce 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -790,8 +790,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 offset;
u32 set;
- if (of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-gpio")) {
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ int ret = of_property_read_u32(dev->of_node,
+ "marvell,pwm-offset", &offset);
+ if (ret < 0)
+ return 0;
+ } else {
/*
* There are only two sets of PWM configuration registers for
* all the GPIO lines on those SoCs which this driver reserves
@@ -801,13 +805,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
return 0;
offset = 0;
- } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
- int ret = of_property_read_u32(dev->of_node,
- "marvell,pwm-offset", &offset);
- if (ret < 0)
- return 0;
- } else {
- return 0;
}
if (IS_ERR(mvchip->clk))
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index ecd7d169470b..cf9bf3fcaee2 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1101,24 +1101,17 @@ err_exit:
return ret;
}
-static int pca953x_remove(struct i2c_client *client)
+static void pca953x_remove(struct i2c_client *client)
{
struct pca953x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pca953x_chip *chip = i2c_get_clientdata(client);
- int ret;
if (pdata && pdata->teardown) {
- ret = pdata->teardown(client, chip->gpio_chip.base,
- chip->gpio_chip.ngpio, pdata->context);
- if (ret < 0)
- dev_err(&client->dev, "teardown failed, %d\n", ret);
- } else {
- ret = 0;
+ pdata->teardown(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
}
regulator_disable(chip->regulator);
-
- return ret;
}
#ifdef CONFIG_PM_SLEEP
@@ -1175,7 +1168,9 @@ static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@@ -1198,13 +1193,17 @@ static int pca953x_resume(struct device *dev)
}
}
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&chip->i2c_lock);
return ret;
+ }
ret = regcache_sync(chip->regmap);
+ mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 59cc27e4de51..e98ea47d7237 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -399,7 +399,7 @@ fail:
return status;
}
-static int pcf857x_remove(struct i2c_client *client)
+static void pcf857x_remove(struct i2c_client *client)
{
struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pcf857x *gpio = i2c_get_clientdata(client);
@@ -407,8 +407,6 @@ static int pcf857x_remove(struct i2c_client *client)
if (pdata && pdata->teardown)
pdata->teardown(client, gpio->chip.base, gpio->chip.ngpio,
pdata->context);
-
- return 0;
}
static void pcf857x_shutdown(struct i2c_client *client)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index c7fbfa3ae43b..1198ab0305d0 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -661,24 +661,17 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
/* Initialize GPIO chips */
ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base);
- if (ret) {
- clk_put(clk);
+ if (ret)
return ret;
- }
/* clear all GPIO edge detects */
for_each_gpio_bank(gpio, c, pchip) {
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index 63dcf42f7c20..d6418f89d3f6 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -46,10 +46,20 @@
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
* @intr_type: Interrupt type selection
+ * @bank_read: Read a bank setting as a single 32-bit value
+ * @bank_write: Write a bank setting as a single 32-bit value
+ * @imr_line_pos: Bit shift of an IRQ line's IMR value.
+ *
+ * The DIR, DATA, and ISR registers consist of four 8-bit port values, packed
+ * into a single 32-bit register. Use @bank_read (@bank_write) to get (assign)
+ * a value from (to) these registers. The IMR register consists of four 16-bit
+ * port values, packed into two 32-bit registers. Use @imr_line_pos to get the
+ * bit shift of the 2-bit field for a line's IMR settings. Shifts larger than
+ * 32 overflow into the second register.
*
* Because the interrupt mask register (IMR) combines the function of IRQ type
* selection and masking, two extra values are stored. @intr_mask is used to
- * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
+ * mask/unmask the interrupts for a GPIO line, and @intr_type is used to store
* the selected interrupt types. The logical AND of these values is written to
* IMR on changes.
*/
@@ -59,10 +69,11 @@ struct realtek_gpio_ctrl {
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
raw_spinlock_t lock;
- u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
- u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
- unsigned int (*port_offset_u8)(unsigned int port);
- unsigned int (*port_offset_u16)(unsigned int port);
+ u8 intr_mask[REALTEK_GPIO_MAX];
+ u8 intr_type[REALTEK_GPIO_MAX];
+ u32 (*bank_read)(void __iomem *reg);
+ void (*bank_write)(void __iomem *reg, u32 value);
+ unsigned int (*line_imr_pos)(unsigned int line);
};
/* Expand with more flags as devices with other quirks are added */
@@ -101,14 +112,22 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
* port. The two interrupt mask registers store two bits per GPIO, so use u16
* values.
*/
-static unsigned int realtek_gpio_port_offset_u8(unsigned int port)
+static u32 realtek_gpio_bank_read_swapped(void __iomem *reg)
{
- return port;
+ return ioread32be(reg);
}
-static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
+static void realtek_gpio_bank_write_swapped(void __iomem *reg, u32 value)
{
- return 2 * port;
+ iowrite32be(value, reg);
+}
+
+static unsigned int realtek_gpio_line_imr_pos_swapped(unsigned int line)
+{
+ unsigned int port_pin = line % 8;
+ unsigned int port = line / 8;
+
+ return 2 * (8 * (port ^ 1) + port_pin);
}
/*
@@ -119,66 +138,67 @@ static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
* per GPIO, so use u16 values. The first register contains ports 1 and 0, the
* second ports 3 and 2.
*/
-static unsigned int realtek_gpio_port_offset_u8_rev(unsigned int port)
+static u32 realtek_gpio_bank_read(void __iomem *reg)
{
- return 3 - port;
+ return ioread32(reg);
}
-static unsigned int realtek_gpio_port_offset_u16_rev(unsigned int port)
+static void realtek_gpio_bank_write(void __iomem *reg, u32 value)
{
- return 2 * (port ^ 1);
+ iowrite32(value, reg);
}
-static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u16 irq_type, u16 irq_mask)
+static unsigned int realtek_gpio_line_imr_pos(unsigned int line)
{
- iowrite16(irq_type & irq_mask,
- ctrl->base + REALTEK_GPIO_REG_IMR + ctrl->port_offset_u16(port));
+ return 2 * line;
}
-static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u8 mask)
+static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, u32 mask)
{
- iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ ctrl->bank_write(ctrl->base + REALTEK_GPIO_REG_ISR, mask);
}
-static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
+static u32 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl)
{
- return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ return ctrl->bank_read(ctrl->base + REALTEK_GPIO_REG_ISR);
}
-/* Set the rising and falling edge mask bits for a GPIO port pin */
-static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
+/* Set the rising and falling edge mask bits for a GPIO pin */
+static void realtek_gpio_update_line_imr(struct realtek_gpio_ctrl *ctrl, unsigned int line)
{
- return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
+ void __iomem *reg = ctrl->base + REALTEK_GPIO_REG_IMR;
+ unsigned int line_shift = ctrl->line_imr_pos(line);
+ unsigned int shift = line_shift % 32;
+ u32 irq_type = ctrl->intr_type[line];
+ u32 irq_mask = ctrl->intr_mask[line];
+ u32 reg_val;
+
+ reg += 4 * (line_shift / 32);
+ reg_val = ioread32(reg);
+ reg_val &= ~(REALTEK_GPIO_IMR_LINE_MASK << shift);
+ reg_val |= (irq_type & irq_mask & REALTEK_GPIO_IMR_LINE_MASK) << shift;
+ iowrite32(reg_val, reg);
}
static void realtek_gpio_irq_ack(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
irq_hw_number_t line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
- realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
+ realtek_gpio_clear_isr(ctrl, BIT(line));
}
static void realtek_gpio_irq_unmask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
gpiochip_enable_irq(&ctrl->gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
@@ -186,16 +206,11 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = 0;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
gpiochip_disable_irq(&ctrl->gc, line);
@@ -205,10 +220,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 type, t;
+ u8 type;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -227,11 +240,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
irq_set_handler_locked(data, handle_edge_irq);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- t = ctrl->intr_type[port];
- t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- t |= realtek_gpio_imr_bits(port_pin, type);
- ctrl->intr_type[port] = t;
- realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
+ ctrl->intr_type[line] = type;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
@@ -242,28 +252,21 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned int lines_done;
- unsigned int port_pin_count;
unsigned long status;
int offset;
chained_irq_enter(irq_chip, desc);
- for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
- status = realtek_gpio_read_isr(ctrl, lines_done / 8);
- port_pin_count = min(gc->ngpio - lines_done, 8U);
- for_each_set_bit(offset, &status, port_pin_count)
- generic_handle_domain_irq(gc->irq.domain, offset + lines_done);
- }
+ status = realtek_gpio_read_isr(ctrl);
+ for_each_set_bit(offset, &status, gc->ngpio)
+ generic_handle_domain_irq(gc->irq.domain, offset);
chained_irq_exit(irq_chip, desc);
}
-static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, int cpu)
+static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
{
- return ctrl->cpumask_base + ctrl->port_offset_u8(port) +
- REALTEK_GPIO_PORTS_PER_BANK * cpu;
+ return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
}
static int realtek_gpio_irq_set_affinity(struct irq_data *data,
@@ -271,12 +274,10 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
void __iomem *irq_cpu_mask;
unsigned long flags;
int cpu;
- u8 v;
+ u32 v;
if (!ctrl->cpumask_base)
return -ENXIO;
@@ -284,15 +285,15 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
raw_spin_lock_irqsave(&ctrl->lock, flags);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
- irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, port, cpu);
- v = ioread8(irq_cpu_mask);
+ irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
+ v = ctrl->bank_read(irq_cpu_mask);
if (cpumask_test_cpu(cpu, dest))
- v |= BIT(port_pin);
+ v |= BIT(line);
else
- v &= ~BIT(port_pin);
+ v &= ~BIT(line);
- iowrite8(v, irq_cpu_mask);
+ ctrl->bank_write(irq_cpu_mask, v);
}
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -305,16 +306,17 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
static int realtek_gpio_irq_init(struct gpio_chip *gc)
{
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned int port;
+ u32 mask_all = GENMASK(gc->ngpio - 1, 0);
+ unsigned int line;
int cpu;
- for (port = 0; (port * 8) < gc->ngpio; port++) {
- realtek_gpio_write_imr(ctrl, port, 0, 0);
- realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
+ for (line = 0; line < gc->ngpio; line++)
+ realtek_gpio_update_line_imr(ctrl, line);
- for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
- iowrite8(GENMASK(7, 0), realtek_gpio_irq_cpu_mask(ctrl, port, cpu));
- }
+ realtek_gpio_clear_isr(ctrl, mask_all);
+
+ for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
+ ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
return 0;
}
@@ -387,12 +389,14 @@ static int realtek_gpio_probe(struct platform_device *pdev)
if (dev_flags & GPIO_PORTS_REVERSED) {
bgpio_flags = 0;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8_rev;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16_rev;
+ ctrl->bank_read = realtek_gpio_bank_read;
+ ctrl->bank_write = realtek_gpio_bank_write;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16;
+ ctrl->bank_read = realtek_gpio_bank_read_swapped;
+ ctrl->bank_write = realtek_gpio_bank_write_swapped;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
err = bgpio_init(&ctrl->gc, dev, 4,
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index f91e876fd969..bb50335239ac 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -419,11 +419,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
goto out;
} else {
bank->toggle_edge_mode |= mask;
- level |= mask;
+ level &= ~mask;
/*
* Determine gpio state. If 1 next interrupt should be
- * falling otherwise rising.
+ * low otherwise high.
*/
data = readl(bank->reg_base + bank->gpio_regs->ext_port);
if (data & mask)
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index a09b1e69b072..d642c35cb97c 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -126,13 +126,11 @@ static int tpic2810_probe(struct i2c_client *client,
return 0;
}
-static int tpic2810_remove(struct i2c_client *client)
+static void tpic2810_remove(struct i2c_client *client)
{
struct tpic2810 *gpio = i2c_get_clientdata(client);
gpiochip_remove(&gpio->chip);
-
- return 0;
}
static const struct i2c_device_id tpic2810_id_table[] = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index fa4bc7481f9a..e739dcea61b2 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->init_valid_mask = tqmx86_init_irq_valid_mask;
+
+ irq_domain_set_pm_device(girq->domain, dev);
}
ret = devm_gpiochip_add_data(dev, chip, gpio);
@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
goto out_pm_dis;
}
- irq_domain_set_pm_device(girq->domain, dev);
-
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index b098f2dc196b..59fb10641598 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -265,6 +265,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ gpiochip_disable_irq(chip, offset);
port_state = ws16c48gpio->irq_mask >> (8 * port);
/* Select Register Page 2; Unlock all I/O ports */
@@ -295,6 +296,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ gpiochip_enable_irq(chip, offset);
ws16c48gpio->irq_mask |= mask;
port_state = ws16c48gpio->irq_mask >> (8 * port);
@@ -356,12 +358,14 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return 0;
}
-static struct irq_chip ws16c48_irqchip = {
+static const struct irq_chip ws16c48_irqchip = {
.name = "ws16c48",
.irq_ack = ws16c48_irq_ack,
.irq_mask = ws16c48_irq_mask,
.irq_unmask = ws16c48_irq_unmask,
- .irq_set_type = ws16c48_irq_set_type
+ .irq_set_type = ws16c48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
@@ -463,7 +467,7 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
girq = &ws16c48gpio->chip.irq;
- girq->chip = &ws16c48_irqchip;
+ gpio_irq_chip_set_chip(girq, &ws16c48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f8041d4898d1..92f185575e94 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1986,7 +1986,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
ret = -ENODEV;
goto out_free_le;
}
- le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -2000,7 +1999,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
init_waitqueue_head(&le->wait);
/* Request a thread to read the events */
- ret = request_threaded_irq(le->irq,
+ ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
@@ -2009,6 +2008,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ le->irq = irq;
+
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6c2256e8474b..198ba846d34b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -31,6 +31,7 @@ menuconfig DRM
config DRM_MIPI_DBI
tristate
depends on DRM
+ select DRM_KMS_HELPER
config DRM_MIPI_DSI
bool
@@ -50,10 +51,9 @@ config DRM_DEBUG_MM
If in doubt, say "N".
-config DRM_DEBUG_SELFTEST
- tristate "kselftests for DRM"
- depends on DRM
- depends on DEBUG_KERNEL
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT
select PRIME_NUMBERS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
@@ -61,19 +61,6 @@ config DRM_DEBUG_SELFTEST
select DRM_KMS_HELPER
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
- default n
- help
- This option provides kernel modules that can be used to run
- various selftests on parts of the DRM api. This option is not
- useful for distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- If in doubt, say "N".
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT=y
- select DRM_KMS_HELPER
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -214,11 +201,11 @@ config DRM_TTM_HELPER
help
Helpers for ttm-based gem objects
-config DRM_GEM_CMA_HELPER
+config DRM_GEM_DMA_HELPER
tristate
depends on DRM
help
- Choose this if you need the GEM CMA helper functions
+ Choose this if you need the GEM DMA helper functions
config DRM_GEM_SHMEM_HELPER
tristate
@@ -248,6 +235,13 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ # radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -273,6 +267,13 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
+ # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
help
Choose this option if you have a recent AMD Radeon graphics card.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e7af358e6dda..25d0ba310509 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -40,9 +40,9 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
-drm_cma_helper-y := drm_gem_cma_helper.o
-drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o
-obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
+drm_dma_helper-y := drm_gem_dma_helper.o
+drm_dma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
+obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
@@ -75,7 +75,6 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
# Drivers and the rest
#
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
obj-$(CONFIG_DRM_KUNIT_TEST) += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 5a283d12f8e1..6ad39cf71bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -75,7 +75,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
- nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
+ sienna_cichlid.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
# add DF block
amdgpu-y += \
@@ -89,7 +89,7 @@ amdgpu-y += \
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \
- mmhub_v3_0_1.o
+ mmhub_v3_0_1.o gfxhub_v3_0_3.o
# add UMC block
amdgpu-y += \
@@ -134,7 +134,8 @@ amdgpu-y += \
gfx_v9_4_2.o \
gfx_v10_0.o \
imu_v11_0.o \
- gfx_v11_0.o
+ gfx_v11_0.o \
+ imu_v11_0_3.o
# add async DMA block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index c6cc493a5486..2b97b8a96fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -339,10 +331,13 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r;
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e146810c700b..ae9371b172e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,6 +274,9 @@ extern int amdgpu_vcnfw_log;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
+#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
+#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
+
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -317,7 +320,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -882,6 +885,7 @@ struct amdgpu_device {
u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+ struct dma_fence __rcu *gang_submit;
bool ib_pool_ready;
struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
@@ -1060,6 +1064,9 @@ struct amdgpu_device {
uint32_t scpm_status;
struct work_struct reset_work;
+
+ uint32_t amdgpu_reset_level_mask;
+ bool job_hang;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1288,6 +1295,8 @@ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
u32 reg);
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 130060834b4e..b14800ac179e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -849,6 +850,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
+
if (dm->backlight_dev[0])
atif->bd = dm->backlight_dev[0];
#endif
@@ -863,6 +865,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
enc->enc_priv) {
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
if (dig->bl_dev) {
atif->bd = dig->bl_dev;
break;
@@ -919,9 +922,9 @@ static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
return false;
- }
+
amdgpu_acpi_priv.atif.handle = atif_handle;
acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
@@ -954,9 +957,9 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
return false;
- }
+
amdgpu_acpi_priv.atcs.handle = atcs_handle;
acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
@@ -1050,6 +1053,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
@@ -1066,6 +1073,12 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
+ /*
+ * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
+ * risky to do any special firmware-related preparations for entering
+ * S0ix even though the system is suspending to idle, so return false
+ * in that case.
+ */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
dev_warn_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5e53a5293935..9e98f3866edc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@@ -130,11 +131,13 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
kfd.reset_work);
struct amdgpu_reset_context reset_context;
+
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
@@ -683,6 +686,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
ib->length_dw = ib_len;
/* This works for NO_HWS. TODO: need to handle without knowing VMID */
job->vmid = vmid;
+ job->num_ibs = 1;
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
@@ -752,11 +756,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo
{
struct ras_err_data err_data = {0, 0, 0, NULL};
- /* CPU MCA will handle page retirement if connected_to_cpu is 1 */
- if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_poison_handler(adev, &err_data, reset);
- else if (reset)
- amdgpu_amdkfd_gpu_reset(adev);
+ amdgpu_umc_poison_handler(adev, &err_data, reset);
}
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3c09dcc0986e..647220a8762d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a699134a1e8c..978d3970b5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014-2018 Advanced Micro Devices, Inc.
*
@@ -40,10 +41,10 @@
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
@@ -149,7 +150,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
- vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ vram_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -182,8 +183,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
- if (adev)
+ if (adev) {
adev->kfd.vram_used += vram_needed;
+ adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -203,8 +206,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
- if (adev)
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ if (adev) {
+ adev->kfd.vram_used -= size;
+ adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
@@ -293,7 +298,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
- replacement, DMA_RESV_USAGE_READ);
+ replacement, DMA_RESV_USAGE_BOOKKEEP);
dma_fence_put(replacement);
return 0;
}
@@ -1386,8 +1391,9 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(vm->root.bo,
- &vm->process_info->eviction_fence->base, true);
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv,
+ &vm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(vm->root.bo);
/* Update process info */
@@ -1611,12 +1617,12 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- - adev->kfd.vram_used
+ - adev->kfd.vram_used_aligned
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
- return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+ return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
@@ -1725,7 +1731,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+ pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
@@ -1984,9 +1990,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
- amdgpu_bo_fence(bo,
- &avm->process_info->eviction_fence->base,
- true);
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &avm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
ret = unreserve_bo_and_vms(&ctx, false, false);
goto out;
@@ -2213,7 +2219,7 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
{
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb();
+ mb(); /* make sure read happened */
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
@@ -2755,15 +2761,18 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (mem->bo->tbo.pin_count)
continue;
- amdgpu_bo_fence(mem->bo,
- &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(mem->bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
}
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *bo = peer_vm->root.bo;
- amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
}
validate_map_fail:
@@ -2817,7 +2826,9 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(gws_bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(gws_bo);
mutex_unlock(&(*mem)->process_info->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fd8f3731758e..b81b77a9efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index b7933c2ce765..491d4846fc02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1674,10 +1674,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
- if (amdgpu_audio != 0)
+ if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1799,6 +1801,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1852,6 +1855,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1902,6 +1906,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d8f1335bc68f..1bbd39b3b0fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,9 +39,82 @@
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
-static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_cs_chunk_fence *data,
- uint32_t *offset)
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
+ struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_cs *cs)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+ if (cs->in.num_chunks == 0)
+ return -EINVAL;
+
+ memset(p, 0, sizeof(*p));
+ p->adev = adev;
+ p->filp = filp;
+
+ p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
+ if (!p->ctx)
+ return -EINVAL;
+
+ if (atomic_read(&p->ctx->guilty)) {
+ amdgpu_ctx_put(p->ctx);
+ return -ECANCELED;
+ }
+ return 0;
+}
+
+static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib)
+{
+ struct drm_sched_entity *entity;
+ unsigned int i;
+ int r;
+
+ r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
+ chunk_ib->ip_instance,
+ chunk_ib->ring, &entity);
+ if (r)
+ return r;
+
+ /*
+ * Abort if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP.
+ */
+ if (entity->rq == NULL)
+ return -EINVAL;
+
+ /* Check if we can add this IB to some existing job */
+ for (i = 0; i < p->gang_size; ++i)
+ if (p->entities[i] == entity)
+ return i;
+
+ /* If not increase the gang size if possible */
+ if (i == AMDGPU_CS_GANG_SIZE)
+ return -EINVAL;
+
+ p->entities[i] = entity;
+ p->gang_size = i + 1;
+ return i;
+}
+
+static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib,
+ unsigned int *num_ibs)
+{
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ ++(num_ibs[r]);
+ return 0;
+}
+
+static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *data,
+ uint32_t *offset)
{
struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
@@ -80,11 +153,11 @@ error_unref:
return r;
}
-static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_bo_list_in *data)
+static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
{
+ struct drm_amdgpu_bo_list_entry *info;
int r;
- struct drm_amdgpu_bo_list_entry *info = NULL;
r = amdgpu_bo_create_list_entry_array(data, &info);
if (r)
@@ -104,38 +177,25 @@ error_free:
return r;
}
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
+/* Copy the data from userspace and go over it the first time */
+static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
- unsigned size, num_ibs = 0;
uint32_t uf_offset = 0;
- int i;
+ unsigned int size;
int ret;
+ int i;
- if (cs->in.num_chunks == 0)
- return -EINVAL;
-
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
+ GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
- p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
- if (!p->ctx) {
- ret = -EINVAL;
- goto free_chunk;
- }
-
- mutex_lock(&p->ctx->lock);
-
- /* skip guilty context job */
- if (atomic_read(&p->ctx->guilty) == 1) {
- ret = -ECANCELED;
- goto free_chunk;
- }
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -170,7 +230,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
size = p->chunks[i].length_dw;
cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
+ GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
@@ -182,36 +243,35 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_partial_kdata;
}
+ /* Assume the worst on the following checks */
+ ret = -EINVAL;
switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
- ++num_ibs;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
+ goto free_partial_kdata;
+
+ ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
+ if (ret)
+ goto free_partial_kdata;
break;
case AMDGPU_CHUNK_ID_FENCE:
- size = sizeof(struct drm_amdgpu_cs_chunk_fence);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
- &uf_offset);
+ ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
+ &uf_offset);
if (ret)
goto free_partial_kdata;
-
break;
case AMDGPU_CHUNK_ID_BO_HANDLES:
- size = sizeof(struct drm_amdgpu_bo_list_in);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_bo_list_in))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+ ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
-
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -223,22 +283,32 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
break;
default:
- ret = -EINVAL;
goto free_partial_kdata;
}
}
- ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
- if (ret)
- goto free_all_kdata;
+ if (!p->gang_size)
+ return -EINVAL;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
+ if (ret)
+ goto free_all_kdata;
+
+ ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
+ &fpriv->vm);
+ if (ret)
+ goto free_all_kdata;
+ }
+ p->gang_leader = p->jobs[p->gang_size - 1];
- if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+ if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
}
if (p->uf_entry.tv.bo)
- p->job->uf_addr = uf_offset;
+ p->gang_leader->uf_addr = uf_offset;
kvfree(chunk_array);
/* Use this opportunity to fill in task info for the vm */
@@ -260,6 +330,297 @@ free_chunk:
return ret;
}
+static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk,
+ unsigned int *ce_preempt,
+ unsigned int *de_preempt)
+{
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_ring *ring;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ job = p->jobs[r];
+ ring = amdgpu_job_ring(job);
+ ib = &job->ibs[job->num_ibs++];
+
+ /* MM engine doesn't support user fences */
+ if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
+ return -EINVAL;
+
+ if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
+ (*ce_preempt)++;
+ else
+ (*de_preempt)++;
+
+ /* Each GFX command submit allows only 1 IB max
+ * preemptible for CE & DE */
+ if (*ce_preempt > 1 || *de_preempt > 1)
+ return -EINVAL;
+ }
+
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+
+ r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+
+ ib->gpu_addr = chunk_ib->va_start;
+ ib->length_dw = chunk_ib->ib_bytes / 4;
+ ib->flags = chunk_ib->flags;
+ return 0;
+}
+
+static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_ctx *ctx;
+ struct drm_sched_entity *entity;
+ struct dma_fence *fence;
+
+ ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
+ r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
+ deps[i].ip_instance,
+ deps[i].ring, &entity);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ struct drm_sched_fence *s_fence;
+ struct dma_fence *old = fence;
+
+ s_fence = to_drm_sched_fence(fence);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(old);
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
+ uint32_t handle, u64 point,
+ u64 flags)
+{
+ struct dma_fence *fence;
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
+ return r;
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ dma_fence_put(fence);
+
+ return r;
+}
+
+static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+
+ for (i = 0; i < num_deps; ++i) {
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
+ return -EINVAL;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = dma_fence_chain_alloc();
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ dma_fence_chain_free(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
+{
+ unsigned int ce_preempt = 0, de_preempt = 0;
+ int i, r;
+
+ for (i = 0; i < p->nchunks; ++i) {
+ struct amdgpu_cs_chunk *chunk;
+
+ chunk = &p->chunks[i];
+
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_IB:
+ r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ r = amdgpu_cs_p2_dependencies(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ r = amdgpu_cs_p2_syncobj_in(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+ r = amdgpu_cs_p2_syncobj_out(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
+ if (r)
+ return r;
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
@@ -495,9 +856,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- struct amdgpu_bo *gds;
- struct amdgpu_bo *gws;
- struct amdgpu_bo *oa;
+ unsigned int i;
int r;
INIT_LIST_HEAD(&p->validated);
@@ -581,16 +940,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->bo_va = amdgpu_vm_bo_find(vm, bo);
}
- /* Move fence waiting after getting reservation lock of
- * PD root. Then there is no need on a ctx mutex lock.
- */
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- goto error_validate;
- }
-
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
@@ -611,197 +960,139 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (r)
goto error_validate;
- amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
- p->bytes_moved_vis);
+ if (p->uf_entry.tv.bo) {
+ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
+ if (r)
+ goto error_validate;
- if (gds) {
- p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
- p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
- }
- if (gws) {
- p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
- p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
- }
- if (oa) {
- p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
- p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
}
- if (!r && p->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
- r = amdgpu_ttm_alloc_gart(&uf->tbo);
- p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
- }
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
+ p->bo_list->gws_obj,
+ p->bo_list->oa_obj);
+ return 0;
error_validate:
- if (r)
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
out_free_user_pages:
- if (r) {
- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- if (!e->user_pages)
- continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
- kvfree(e->user_pages);
- e->user_pages = NULL;
- }
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (!e->user_pages)
+ continue;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
}
return r;
}
-static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_bo_list_entry *e;
- int r;
+ int i, j;
- list_for_each_entry(e, &p->validated, tv.head) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- struct dma_resv *resv = bo->tbo.base.resv;
- enum amdgpu_sync_mode sync_mode;
+ if (!trace_amdgpu_cs_enabled())
+ return;
- sync_mode = amdgpu_bo_explicit_sync(bo) ?
- AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
- &fpriv->vm);
- if (r)
- return r;
+ for (i = 0; i < p->gang_size; ++i) {
+ struct amdgpu_job *job = p->jobs[i];
+
+ for (j = 0; j < job->num_ibs; ++j)
+ trace_amdgpu_cs(p, job, &job->ibs[j]);
}
- return 0;
}
-/**
- * amdgpu_cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- * @backoff: indicator to backoff the reservation
- *
- * If error is set then unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
- bool backoff)
+static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
- unsigned i;
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ unsigned int i;
+ int r;
- if (error && backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
- mutex_unlock(&parser->bo_list->bo_list_mutex);
- }
+ /* Only for UVD/VCE VM emulation */
+ if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
+ return 0;
- for (i = 0; i < parser->num_post_deps; i++) {
- drm_syncobj_put(parser->post_deps[i].syncobj);
- kfree(parser->post_deps[i].chain);
- }
- kfree(parser->post_deps);
+ for (i = 0; i < job->num_ibs; ++i) {
+ struct amdgpu_ib *ib = &job->ibs[i];
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj;
+ uint64_t va_start;
+ uint8_t *kptr;
- dma_fence_put(parser->fence);
+ va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+ return r;
+ }
- if (parser->ctx) {
- mutex_unlock(&parser->ctx->lock);
- amdgpu_ctx_put(parser->ctx);
+ if ((va_start + ib->length_dw * 4) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
+
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r) {
+ return r;
+ }
+
+ kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
+
+ if (ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, ib->length_dw * 4);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, job, ib);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
}
- if (parser->bo_list)
- amdgpu_bo_list_put(parser->bo_list);
- for (i = 0; i < parser->nchunks; i++)
- kvfree(parser->chunks[i].kdata);
- kvfree(parser->chunks);
- if (parser->job)
- amdgpu_job_free(parser->job);
- if (parser->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
+ return 0;
+}
- amdgpu_bo_unref(&uf);
+static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
+{
+ unsigned int i;
+ int r;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
+ if (r)
+ return r;
}
+ return 0;
}
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_job *job = p->gang_leader;
struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
+ unsigned int i;
int r;
- /* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
- unsigned i, j;
-
- for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- struct amdgpu_cs_chunk *chunk;
- uint64_t offset, va_start;
- struct amdgpu_ib *ib;
- uint8_t *kptr;
-
- chunk = &p->chunks[i];
- ib = &p->job->ibs[j];
- chunk_ib = chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
- if (r) {
- DRM_ERROR("IB va_start is invalid\n");
- return r;
- }
-
- if ((va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
-
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
-
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += va_start - offset;
-
- if (ring->funcs->parse_cs) {
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
- if (r)
- return r;
- } else {
- ib->ptr = (uint32_t *)kptr;
- r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
- amdgpu_bo_kunmap(aobj);
- if (r)
- return r;
- }
-
- j++;
- }
- }
-
- if (!p->job->vm)
- return amdgpu_cs_sync_rings(p);
-
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -810,18 +1101,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
+ if (fpriv->csa_va) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -837,16 +1128,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
- r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
+ if (r)
return r;
- }
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -857,11 +1144,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
+ r = amdgpu_sync_fence(&job->sync, vm->last_update);
if (r)
return r;
- p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ for (i = 0; i < p->gang_size; ++i) {
+ job = p->jobs[i];
+
+ if (!job->vm)
+ continue;
+
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ }
if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
@@ -876,331 +1170,40 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
- return amdgpu_cs_sync_rings(p);
-}
-
-static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
-{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- int r, ce_preempt = 0, de_preempt = 0;
- struct amdgpu_ring *ring;
- int i, j;
-
- for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
- struct amdgpu_cs_chunk *chunk;
- struct amdgpu_ib *ib;
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct drm_sched_entity *entity;
-
- chunk = &parser->chunks[i];
- ib = &parser->job->ibs[j];
- chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
- (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
- ce_preempt++;
- else
- de_preempt++;
- }
-
- /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
- if (ce_preempt > 1 || de_preempt > 1)
- return -EINVAL;
- }
-
- r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &entity);
- if (r)
- return r;
-
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
- parser->job->preamble_status |=
- AMDGPU_PREAMBLE_IB_PRESENT;
-
- if (parser->entity && parser->entity != entity)
- return -EINVAL;
-
- /* Return if there is no run queue associated with this entity.
- * Possibly because of disabled HW IP*/
- if (entity->rq == NULL)
- return -EINVAL;
-
- parser->entity = entity;
-
- ring = to_amdgpu_ring(entity->rq->sched);
- r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0,
- AMDGPU_IB_POOL_DELAYED, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
- ib->gpu_addr = chunk_ib->va_start;
- ib->length_dw = chunk_ib->ib_bytes / 4;
- ib->flags = chunk_ib->flags;
-
- j++;
- }
-
- /* MM engine doesn't support user fences */
- ring = to_amdgpu_ring(parser->entity->rq->sched);
- if (parser->job->uf_addr && ring->funcs->no_user_fence)
- return -EINVAL;
-
return 0;
}
-static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
+static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_dep *deps;
-
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_dep);
-
- for (i = 0; i < num_deps; ++i) {
- struct amdgpu_ctx *ctx;
- struct drm_sched_entity *entity;
- struct dma_fence *fence;
-
- ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
- if (ctx == NULL)
- return -EINVAL;
-
- r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
- deps[i].ip_instance,
- deps[i].ring, &entity);
- if (r) {
- amdgpu_ctx_put(ctx);
- return r;
- }
-
- fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
- amdgpu_ctx_put(ctx);
-
- if (IS_ERR(fence))
- return PTR_ERR(fence);
- else if (!fence)
- continue;
-
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
- struct drm_sched_fence *s_fence;
- struct dma_fence *old = fence;
-
- s_fence = to_drm_sched_fence(fence);
- fence = dma_fence_get(&s_fence->scheduled);
- dma_fence_put(old);
- }
-
- r = amdgpu_sync_fence(&p->job->sync, fence);
- dma_fence_put(fence);
- if (r)
- return r;
- }
- return 0;
-}
-
-static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
- uint32_t handle, u64 point,
- u64 flags)
-{
- struct dma_fence *fence;
+ struct amdgpu_job *leader = p->gang_leader;
+ struct amdgpu_bo_list_entry *e;
+ unsigned int i;
int r;
- r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
- if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
- handle, point, r);
- return r;
- }
-
- r = amdgpu_sync_fence(&p->job->sync, fence);
- dma_fence_put(fence);
-
- return r;
-}
-
-static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_sem *deps;
- unsigned num_deps;
- int i, r;
+ list_for_each_entry(e, &p->validated, tv.head) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
- 0, 0);
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
- return 0;
-}
-
-
-static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
- unsigned num_deps;
- int i, r;
-
- syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_syncobj);
- for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p,
- syncobj_deps[i].handle,
- syncobj_deps[i].point,
- syncobj_deps[i].flags);
+ for (i = 0; i < p->gang_size - 1; ++i) {
+ r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
if (r)
return r;
}
- return 0;
-}
-
-static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_sem *deps;
- unsigned num_deps;
- int i;
-
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_sem);
-
- if (p->post_deps)
- return -EINVAL;
-
- p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
- GFP_KERNEL);
- p->num_post_deps = 0;
-
- if (!p->post_deps)
- return -ENOMEM;
-
-
- for (i = 0; i < num_deps; ++i) {
- p->post_deps[i].syncobj =
- drm_syncobj_find(p->filp, deps[i].handle);
- if (!p->post_deps[i].syncobj)
- return -EINVAL;
- p->post_deps[i].chain = NULL;
- p->post_deps[i].point = 0;
- p->num_post_deps++;
- }
-
- return 0;
-}
-
-
-static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
- unsigned num_deps;
- int i;
-
- syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_syncobj);
-
- if (p->post_deps)
- return -EINVAL;
-
- p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
- GFP_KERNEL);
- p->num_post_deps = 0;
-
- if (!p->post_deps)
- return -ENOMEM;
-
- for (i = 0; i < num_deps; ++i) {
- struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
-
- dep->chain = NULL;
- if (syncobj_deps[i].point) {
- dep->chain = dma_fence_chain_alloc();
- if (!dep->chain)
- return -ENOMEM;
- }
-
- dep->syncobj = drm_syncobj_find(p->filp,
- syncobj_deps[i].handle);
- if (!dep->syncobj) {
- dma_fence_chain_free(dep->chain);
- return -EINVAL;
- }
- dep->point = syncobj_deps[i].point;
- p->num_post_deps++;
- }
-
- return 0;
-}
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
+ if (r && r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
-{
- int i, r;
-
- /* TODO: Investigate why we still need the context lock */
- mutex_unlock(&p->ctx->lock);
-
- for (i = 0; i < p->nchunks; ++i) {
- struct amdgpu_cs_chunk *chunk;
-
- chunk = &p->chunks[i];
-
- switch (chunk->chunk_id) {
- case AMDGPU_CHUNK_ID_DEPENDENCIES:
- case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
- r = amdgpu_cs_process_fence_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
- r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
- r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
- r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
- r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
- if (r)
- goto out;
- break;
- }
- }
-
-out:
- mutex_lock(&p->ctx->lock);
return r;
}
@@ -1225,20 +1228,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct drm_sched_entity *entity = p->entity;
+ struct amdgpu_job *leader = p->gang_leader;
struct amdgpu_bo_list_entry *e;
- struct amdgpu_job *job;
+ unsigned int i;
uint64_t seq;
int r;
- job = p->job;
- p->job = NULL;
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_arm(&p->jobs[i]->base);
- r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
- if (r)
- goto error_unlock;
+ for (i = 0; i < (p->gang_size - 1); ++i) {
+ struct dma_fence *fence;
- drm_sched_job_arm(&job->base);
+ fence = &p->jobs[i]->base.s_fence->scheduled;
+ r = amdgpu_sync_fence(&leader->sync, fence);
+ if (r)
+ goto error_cleanup;
+ }
+
+ if (p->gang_size > 1) {
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_gang_leader(p->jobs[i], leader);
+ }
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
@@ -1249,6 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
*/
+ r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
@@ -1256,67 +1268,96 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
if (r) {
r = -EAGAIN;
- goto error_abort;
+ goto error_unlock;
}
- p->fence = dma_fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&leader->base.s_fence->finished);
+ list_for_each_entry(e, &p->validated, tv.head) {
+
+ /* Everybody except for the gang leader uses READ */
+ for (i = 0; i < (p->gang_size - 1); ++i) {
+ dma_resv_add_fence(e->tv.bo->base.resv,
+ &p->jobs[i]->base.s_fence->finished,
+ DMA_RESV_USAGE_READ);
+ }
- seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
+ /* The gang leader is remembered as writer */
+ e->tv.num_shared = 0;
+ }
+
+ seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
+ p->fence);
amdgpu_cs_post_dependencies(p);
- if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
!p->ctx->preamble_presented) {
- job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
p->ctx->preamble_presented = true;
}
cs->out.handle = seq;
- job->uf_sequence = seq;
-
- amdgpu_job_free_resources(job);
+ leader->uf_sequence = seq;
- trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- drm_sched_entity_push_job(&job->base);
+ for (i = 0; i < p->gang_size; ++i) {
+ amdgpu_job_free_resources(p->jobs[i]);
+ trace_amdgpu_cs_ioctl(p->jobs[i]);
+ drm_sched_entity_push_job(&p->jobs[i]->base);
+ p->jobs[i] = NULL;
+ }
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
-
- /* Make sure all BOs are remembered as writers */
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->tv.num_shared = 0;
-
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+
mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
-
return 0;
-error_abort:
- drm_sched_job_cleanup(&job->base);
+error_unlock:
mutex_unlock(&p->adev->notifier_lock);
-error_unlock:
- amdgpu_job_free(job);
+error_cleanup:
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_cleanup(&p->jobs[i]->base);
return r;
}
-static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
+/* Cleanup the parser structure */
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
- int i;
+ unsigned i;
- if (!trace_amdgpu_cs_enabled())
- return;
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
+ }
+ kfree(parser->post_deps);
+
+ dma_fence_put(parser->fence);
+
+ if (parser->ctx)
+ amdgpu_ctx_put(parser->ctx);
+ if (parser->bo_list)
+ amdgpu_bo_list_put(parser->bo_list);
+
+ for (i = 0; i < parser->nchunks; i++)
+ kvfree(parser->chunks[i].kdata);
+ kvfree(parser->chunks);
+ for (i = 0; i < parser->gang_size; ++i) {
+ if (parser->jobs[i])
+ amdgpu_job_free(parser->jobs[i]);
+ }
+ if (parser->uf_entry.tv.bo) {
+ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
- for (i = 0; i < parser->job->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
+ amdgpu_bo_unref(&uf);
+ }
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- union drm_amdgpu_cs *cs = data;
- struct amdgpu_cs_parser parser = {};
- bool reserved_buffers = false;
+ struct amdgpu_cs_parser parser;
int r;
if (amdgpu_ras_intr_triggered())
@@ -1325,25 +1366,20 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
- parser.adev = adev;
- parser.filp = filp;
-
- r = amdgpu_cs_parser_init(&parser, data);
+ r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
if (printk_ratelimit())
DRM_ERROR("Failed to initialize parser %d!\n", r);
- goto out;
+ return r;
}
- r = amdgpu_cs_ib_fill(adev, &parser);
+ r = amdgpu_cs_pass1(&parser, data);
if (r)
- goto out;
+ goto error_fini;
- r = amdgpu_cs_dependencies(adev, &parser);
- if (r) {
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
- goto out;
- }
+ r = amdgpu_cs_pass2(&parser);
+ if (r)
+ goto error_fini;
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
@@ -1351,22 +1387,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
- goto out;
+ goto error_fini;
}
- reserved_buffers = true;
+ r = amdgpu_cs_patch_jobs(&parser);
+ if (r)
+ goto error_backoff;
+
+ r = amdgpu_cs_vm_handling(&parser);
+ if (r)
+ goto error_backoff;
+
+ r = amdgpu_cs_sync_rings(&parser);
+ if (r)
+ goto error_backoff;
trace_amdgpu_cs_ibs(&parser);
- r = amdgpu_cs_vm_handling(&parser);
+ r = amdgpu_cs_submit(&parser, data);
if (r)
- goto out;
+ goto error_backoff;
- r = amdgpu_cs_submit(&parser, cs);
+ amdgpu_cs_parser_fini(&parser);
+ return 0;
-out:
- amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
+error_backoff:
+ ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
+ mutex_unlock(&parser.bo_list->bo_list_mutex);
+error_fini:
+ amdgpu_cs_parser_fini(&parser);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index 30ecc4917f81..cbaa19b2b8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -27,6 +27,8 @@
#include "amdgpu_bo_list.h"
#include "amdgpu_ring.h"
+#define AMDGPU_CS_GANG_SIZE 4
+
struct amdgpu_bo_va_mapping;
struct amdgpu_cs_chunk {
@@ -50,9 +52,11 @@ struct amdgpu_cs_parser {
unsigned nchunks;
struct amdgpu_cs_chunk *chunks;
- /* scheduler job object */
- struct amdgpu_job *job;
- struct drm_sched_entity *entity;
+ /* scheduler job objects */
+ unsigned int gang_size;
+ struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *gang_leader;
/* buffer objects */
struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 8ee4e8491f39..f6d9d5da53cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -315,7 +315,6 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
kref_init(&ctx->refcount);
ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@@ -402,12 +401,11 @@ static void amdgpu_ctx_fini(struct kref *ref)
}
}
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
drm_dev_exit(idx);
}
- mutex_destroy(&ctx->lock);
kfree(ctx);
}
@@ -848,7 +846,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
mgr->adev = adev;
mutex_init(&mgr->lock);
- idr_init(&mgr->ctx_handles);
+ idr_init_base(&mgr->ctx_handles, 1);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
atomic64_set(&mgr->time_spend[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index cc7c8afff414..0fa0e56daf67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -53,7 +53,6 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
- struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e2eec985adb3..6066aebf491c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1043,6 +1043,157 @@ err:
}
/**
+ * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * Read the last residency value logged. It doesn't auto update, one needs to
+ * stop logging before getting the current value.
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = amdgpu_get_gfx_off_residency(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u32 value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ amdgpu_set_gfx_off_residency(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+
+/**
+ * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u64 value = 0;
+
+ r = amdgpu_get_gfx_off_entrycount(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (u64 *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
* amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
*
* @f: open file handle
@@ -1249,6 +1400,19 @@ static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_count_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_residency_read,
+ .write = amdgpu_debugfs_gfxoff_residency_write,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs2_fops,
@@ -1261,6 +1425,8 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gpr_fops,
&amdgpu_debugfs_gfxoff_fops,
&amdgpu_debugfs_gfxoff_status_fops,
+ &amdgpu_debugfs_gfxoff_count_fops,
+ &amdgpu_debugfs_gfxoff_residency_fops,
};
static const char *debugfs_regs_names[] = {
@@ -1275,6 +1441,8 @@ static const char *debugfs_regs_names[] = {
"amdgpu_gpr",
"amdgpu_gfxoff",
"amdgpu_gfxoff_status",
+ "amdgpu_gfxoff_count",
+ "amdgpu_gfxoff_residency",
};
/**
@@ -1705,7 +1873,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
- uint32_t *new, *tmp = NULL;
+ uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
do {
@@ -1747,7 +1915,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
ret = size;
error_free:
- kfree(tmp);
+ if (tmp != new)
+ kfree(tmp);
kfree(new);
return ret;
}
@@ -1785,6 +1954,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return PTR_ERR(ent);
}
+ debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
+
/* Register debugfs entries for amdgpu_ttm */
amdgpu_ttm_debugfs_init(adev);
amdgpu_debugfs_pm_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a6fe3070b6..ab8f970b2849 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ /* need to do common hw init early so everything is set up for gmc */
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ if (r) {
+ DRM_ERROR("hw_init %d failed %d\n", i, r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* need to do gmc hw init early so we can allocate gpu mem */
/* Try to reserve bad pages early */
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
@@ -2451,17 +2459,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
*/
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (amdgpu_xgmi_add_device(adev) == 0) {
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ if (!amdgpu_sriov_vf(adev)) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
- if (!hive->reset_domain ||
- !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
- r = -ENOENT;
- goto init_failed;
- }
+ if (!hive->reset_domain ||
+ !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
+ goto init_failed;
+ }
- /* Drop the early temporary reset domain we created for device */
- amdgpu_reset_put_reset_domain(adev->reset_domain);
- adev->reset_domain = hive->reset_domain;
+ /* Drop the early temporary reset domain we created for device */
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
+ }
}
}
@@ -3050,8 +3062,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
int i, r;
static enum amd_ip_block_type ip_order[] = {
- AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -3142,7 +3154,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -3499,6 +3512,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
+ RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
@@ -3577,6 +3591,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
adev->gfx.gfx_off_req_count = 1;
+ adev->gfx.gfx_off_residency = 0;
+ adev->gfx.gfx_off_entrycount = 0;
adev->pm.ac_power = power_supply_is_system_supplied() > 0;
atomic_set(&adev->throttling_logging_enabled, 1);
@@ -3965,8 +3981,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_gart_dummy_page_fini(adev);
- if (drm_dev_is_unplugged(adev_to_drm(adev)))
- amdgpu_device_unmap_mmio(adev);
+ amdgpu_device_unmap_mmio(adev);
}
@@ -3979,6 +3994,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
+ dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
amdgpu_reset_fini(adev);
@@ -4054,12 +4070,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+ if (r)
+ return r;
+ }
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
@@ -4083,6 +4107,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -4101,6 +4128,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4115,6 +4148,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
r = amdgpu_device_ip_resume(adev);
+
+ /* no matter what r is, always need to properly release full GPU */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
+
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
@@ -4413,8 +4453,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
- amdgpu_amdkfd_pre_reset(adev);
-
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4509,14 +4547,15 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
*/
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{
- if (!amdgpu_device_ip_check_soft_reset(adev)) {
- dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
- return false;
- }
if (amdgpu_gpu_recovery == 0)
goto disabled;
+ if (!amdgpu_device_ip_check_soft_reset(adev)) {
+ dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
+ return false;
+ }
+
if (amdgpu_sriov_vf(adev))
return true;
@@ -4641,7 +4680,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!need_full_reset)
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- if (!need_full_reset) {
+ if (!need_full_reset && amdgpu_gpu_recovery) {
amdgpu_device_ip_pre_soft_reset(adev);
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
@@ -4737,11 +4776,14 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ bool gpu_reset_for_dev_remove = 0;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
+
+ reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
@@ -4754,6 +4796,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+ gpu_reset_for_dev_remove =
+ test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
/*
* ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
@@ -4798,6 +4844,18 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_ras_intr_cleared();
}
+ /* Since the mode1 reset affects base ip blocks, the
+ * phase1 ip blocks need to be resumed. Otherwise there
+ * will be a BIOS signature error and the psp bootloader
+ * can't load kdb on the next amdgpu install.
+ */
+ if (gpu_reset_for_dev_remove) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_device_ip_resume_phase1(tmp_adev);
+
+ goto end;
+ }
+
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
@@ -5037,6 +5095,7 @@ static void amdgpu_device_recheck_guilty_jobs(
/* set guilty */
drm_sched_increase_karma(s_job);
+ amdgpu_reset_prepare_hwcontext(adev, reset_context);
retry:
/* do hw reset */
if (amdgpu_sriov_vf(adev)) {
@@ -5119,6 +5178,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false;
bool audio_suspended = false;
int tmp_vram_lost_counter;
+ bool gpu_reset_for_dev_remove = false;
+
+ gpu_reset_for_dev_remove =
+ test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
/*
* Special case: RAS triggered and full reset isn't supported
@@ -5146,6 +5210,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->job = job;
reset_context->hive = hive;
+
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
@@ -5153,8 +5218,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (gpu_reset_for_dev_remove && adev->shutdown)
+ tmp_adev->shutdown = true;
+ }
if (!list_is_first(&adev->reset_list, &device_list))
list_rotate_to_front(&adev->reset_list, &device_list);
device_list_handle = &device_list;
@@ -5237,6 +5305,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ if (gpu_reset_for_dev_remove) {
+ /* Workaroud for ASICs need to disable SMC first */
+ amdgpu_device_smu_fini_early(tmp_adev);
+ }
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -5265,8 +5337,14 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
- if (r && r == -EAGAIN)
+ if (r && r == -EAGAIN) {
+ set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
+ adev->asic_reset_res = 0;
goto retry;
+ }
+
+ if (!r && gpu_reset_for_dev_remove)
+ goto recover_end;
}
skip_hw_reset:
@@ -5340,6 +5418,7 @@ skip_sched_resume:
amdgpu_device_unset_mp1_state(tmp_adev);
}
+recover_end:
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
@@ -5522,8 +5601,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
- &peer_adev->dev, 1, true) < 0);
+ bool p2p_access =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
@@ -5697,6 +5777,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
@@ -5906,3 +5987,36 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
(void)RREG32(data);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+
+/**
+ * amdgpu_device_switch_gang - switch to a new gang
+ * @adev: amdgpu_device pointer
+ * @gang: the gang to switch to
+ *
+ * Try to switch to a new gang.
+ * Returns: NULL if we switched to the new gang or a reference to the current
+ * gang leader.
+ */
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang)
+{
+ struct dma_fence *old = NULL;
+
+ do {
+ dma_fence_put(old);
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(&adev->gang_submit);
+ rcu_read_unlock();
+
+ if (old == gang)
+ break;
+
+ if (!dma_fence_is_signaled(old))
+ return old;
+
+ } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
+ old, gang) != old);
+
+ dma_fence_put(old);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 95d34590cad1..3993e6134914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -229,7 +229,7 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, ui
return r;
}
- memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
release_firmware(fw);
return 0;
@@ -1506,6 +1506,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
default:
@@ -1549,6 +1550,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
default:
@@ -1633,6 +1635,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
@@ -1682,6 +1685,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
@@ -1780,6 +1784,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
default:
@@ -1823,6 +1828,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@@ -1903,7 +1909,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
break;
default:
dev_err(adev->dev,
@@ -1940,6 +1947,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2165,6 +2173,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->family = AMDGPU_FAMILY_GC_11_0_0;
break;
case IP_VERSION(11, 0, 1):
@@ -2234,7 +2243,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 3, 0):
case IP_VERSION(4, 3, 1):
- adev->nbio.funcs = &nbio_v4_3_funcs;
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
break;
case IP_VERSION(7, 7, 0):
@@ -2332,6 +2344,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index c20922a5af9f..23998f727c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -498,6 +500,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = drm_atomic_helper_dirtyfb,
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1100,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 782cbca37538..7bd8e33b14be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -58,7 +58,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 429fcdf28836..16f6a313335e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -102,9 +102,10 @@
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
* - 3.48.0 - Add IP discovery version info to HW INFO
+ * 3.49.0 - Add gang submit into CS IOCTL
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 48
+#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -2181,15 +2182,46 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- drm_dev_unplug(dev);
-
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
+ bool need_to_reset_gpu = false;
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive->device_remove_count == 0)
+ need_to_reset_gpu = true;
+ hive->device_remove_count++;
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ need_to_reset_gpu = true;
+ }
+
+ /* Workaround for ASICs need to reset SMU.
+ * Called only when the first device is removed.
+ */
+ if (need_to_reset_gpu) {
+ struct amdgpu_reset_context reset_context;
+
+ adev->shutdown = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
+ }
+
amdgpu_driver_unload_kms(dev);
+ drm_dev_unplug(dev);
+
/*
* Flush any in flight DMA operations from device.
* Clear the Bus Master Enable bit and then wait on the PCIe Device
@@ -2563,8 +2595,11 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- if (ret)
+ if (ret) {
+ if (amdgpu_device_supports_px(drm_dev))
+ pci_disable_device(pdev);
return ret;
+ }
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8adeb7469f1e..d0d99ed607dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ecada5eadfe3..e325150879df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
+ sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
- return true;
- else
+ return false;
+ else
+ return true;
+ } else {
return false;
+ }
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 222d3d7ea076..9546adc8a76f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
@@ -477,7 +478,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- if (adev->gfx.kiq.ring.sched.ready)
+ if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
@@ -610,6 +611,45 @@ unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_set_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
{
@@ -826,3 +866,142 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
}
return amdgpu_num_kcq;
}
+
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ uint32_t ucode_id)
+{
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct firmware *ucode_fw;
+ unsigned int fw_size;
+
+ switch (ucode_id) {
+ case AMDGPU_UCODE_ID_CP_PFP:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.ce_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ default:
+ break;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[ucode_id];
+ info->ucode_id = ucode_id;
+ info->fw = ucode_fw;
+ adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 23a696d38390..832b3807f1d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -304,6 +304,10 @@ struct amdgpu_gfx {
uint32_t rlc_srlg_feature_version;
uint32_t rlc_srls_fw_version;
uint32_t rlc_srls_feature_version;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t mec_feature_version;
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
@@ -332,10 +336,12 @@ struct amdgpu_gfx {
uint32_t srbm_soft_reset;
/* gfx off */
- bool gfx_off_state; /* true: enabled, false: disabled */
- struct mutex gfx_off_mutex;
- uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
- struct delayed_work gfx_off_delay_work;
+ bool gfx_off_state; /* true: enabled, false: disabled */
+ struct mutex gfx_off_mutex; /* mutex to change gfxoff state */
+ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
+ struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */
+ uint32_t gfx_off_residency; /* last logged residency */
+ uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */
/* pipe reservation */
struct mutex pipe_reserve_mutex;
@@ -407,6 +413,10 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value);
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency);
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
@@ -416,4 +426,6 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index beabab515836..c7b44aeb671b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -35,6 +35,9 @@ struct amdgpu_gfxhub_funcs {
void (*init)(struct amdgpu_device *adev);
int (*get_xgmi_info)(struct amdgpu_device *adev);
void (*utcl2_harvest)(struct amdgpu_device *adev);
+ void (*mode2_save_regs)(struct amdgpu_device *adev);
+ void (*mode2_restore_regs)(struct amdgpu_device *adev);
+ void (*halt)(struct amdgpu_device *adev);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index aebc384531ac..34233a74248c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -572,45 +572,15 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
{
struct amdgpu_gmc *gmc = &adev->gmc;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- case IP_VERSION(9, 3, 0):
- case IP_VERSION(9, 4, 0):
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- case IP_VERSION(10, 3, 3):
- case IP_VERSION(10, 3, 4):
- case IP_VERSION(10, 3, 5):
- case IP_VERSION(10, 3, 6):
- case IP_VERSION(10, 3, 7):
- /*
- * noretry = 0 will cause kfd page fault tests fail
- * for some ASICs, so set default to 1 for these ASICs.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 1;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- default:
- /* Raven currently has issues with noretry
- * regardless of what we decide for other
- * asics, we should leave raven with
- * noretry = 0 until we root cause the
- * issues.
- *
- * default this to 0 for now, but we may want
- * to change this in the future for certain
- * GPUs as it can increase performance in
- * certain cases.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 0;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- }
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+ gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(9, 4, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver >= IP_VERSION(10, 3, 0));
+
+ gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
}
void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 008eaca27151..0305b660cd17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -264,6 +264,32 @@ struct amdgpu_gmc {
u64 mall_size;
/* number of UMC instances */
int num_umc;
+ /* mode2 save restore */
+ u64 VM_L2_CNTL;
+ u64 VM_L2_CNTL2;
+ u64 VM_DUMMY_PAGE_FAULT_CNTL;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_LO32;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_HI32;
+ u64 VM_L2_PROTECTION_FAULT_CNTL;
+ u64 VM_L2_PROTECTION_FAULT_CNTL2;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL3;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL4;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_LO32;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_HI32;
+ u64 VM_DEBUG;
+ u64 VM_L2_MM_GROUP_RT_CLASSES;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID2;
+ u64 VM_L2_CACHE_PARITY_CNTL;
+ u64 VM_L2_IH_LOG_CNTL;
+ u64 VM_CONTEXT_CNTL[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16];
+ u64 MC_VM_MX_L1_TLB_CNTL;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 8c6b2284cf56..1f3302aebeff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -205,6 +205,42 @@ void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
}
/**
+ * amdgpu_gtt_mgr_intersects - test for intersection
+ *
+ * @man: Our manager object
+ * @res: The resource to test
+ * @place: The place for the new allocation
+ * @size: The size of the new allocation
+ *
+ * Simplified intersection test, only interesting if we need GART or not.
+ */
+static bool amdgpu_gtt_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+}
+
+/**
+ * amdgpu_gtt_mgr_compatible - test for compatibility
+ *
+ * @man: Our manager object
+ * @res: The resource to test
+ * @place: The place for the new allocation
+ * @size: The size of the new allocation
+ *
+ * Simplified compatibility test.
+ */
+static bool amdgpu_gtt_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+}
+
+/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
@@ -225,6 +261,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
.alloc = amdgpu_gtt_mgr_new,
.free = amdgpu_gtt_mgr_del,
+ .intersects = amdgpu_gtt_mgr_intersects,
+ .compatible = amdgpu_gtt_mgr_compatible,
.debug = amdgpu_gtt_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5071b96be982..46c99331d7f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -49,6 +49,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
memset(&ti, 0, sizeof(struct amdgpu_task_info));
+ adev->job_hang = true;
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
@@ -71,6 +72,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
@@ -82,6 +84,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -102,7 +105,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
*/
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
- (*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->sched_sync);
@@ -122,6 +124,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
+ (*job)->num_ibs = 1;
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -129,6 +132,23 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
return r;
}
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa)
+{
+ if (gds) {
+ job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
+ job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+ }
+ if (gws) {
+ job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
+ job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+ }
+ if (oa) {
+ job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
+ job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+}
+
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
@@ -153,13 +173,34 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
dma_fence_put(&job->hw_fence);
}
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader)
+{
+ struct dma_fence *fence = &leader->base.s_fence->scheduled;
+
+ WARN_ON(job->gang_submit);
+
+ /*
+ * Don't add a reference when we are the gang leader to avoid circle
+ * dependency.
+ */
+ if (job != leader)
+ dma_fence_get(fence);
+ job->gang_submit = fence;
+}
+
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
+ if (job->gang_submit != &job->base.s_fence->scheduled)
+ dma_fence_put(job->gang_submit);
- dma_fence_put(&job->hw_fence);
+ if (!job->hw_fence.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -224,12 +265,16 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync);
}
+ if (!fence && job->gang_submit)
+ fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+
return fence;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
int r = 0;
@@ -241,8 +286,10 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
- if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
- dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+ /* Skip job if VRAM is lost and never resubmit gangs */
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
+ (job->job_run_counter && job->gang_submit))
+ dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
@@ -272,10 +319,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
-
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index babc0af751c2..ab7b150e5d50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -50,6 +50,7 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
struct dma_fence hw_fence;
+ struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
bool vm_needs_flush;
@@ -72,11 +73,20 @@ struct amdgpu_job {
struct amdgpu_ib ibs[];
};
+static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
+{
+ return to_amdgpu_ring(job->base.entity->rq->sched);
+}
+
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1369c25448dc..fe23e09eec98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -247,6 +247,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->gfx.rlc_srls_fw_version;
fw_info->feature = adev->gfx.rlc_srls_feature_version;
break;
+ case AMDGPU_INFO_FW_GFX_RLCP:
+ fw_info->ver = adev->gfx.rlcp_ucode_version;
+ fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLCV:
+ fw_info->ver = adev->gfx.rlcv_ucode_version;
+ fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
+ break;
case AMDGPU_INFO_FW_GFX_MEC:
if (query_fw->index == 0) {
fw_info->ver = adev->gfx.mec_fw_version;
@@ -328,6 +336,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->psp.cap_fw_version;
fw_info->feature = adev->psp.cap_feature_version;
break;
+ case AMDGPU_INFO_FW_MES_KIQ:
+ fw_info->ver = adev->mes.ucode_fw_version[0];
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_MES:
+ fw_info->ver = adev->mes.ucode_fw_version[1];
+ fw_info->feature = 0;
+ break;
default:
return -EINVAL;
}
@@ -1160,7 +1176,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
mutex_init(&fpriv->bo_list_lock);
- idr_init(&fpriv->bo_list_handles);
+ idr_init_base(&fpriv->bo_list_handles, 1);
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
@@ -1469,6 +1485,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* RLCP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLCV */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* MEC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
query_fw.index = 0;
@@ -1581,6 +1613,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
fw_info.feature, fw_info.ver);
}
+ /* MES_KIQ */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MES */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fe82b8b19a4e..0c546245793b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
+ /* zero sdma_hqd_mask for non-existent engine */
+ else if (adev->sdma.num_instances == 1)
+ adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
else
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7b46f6bf4187..ad980f4b66e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -222,6 +222,8 @@ struct mes_add_queue_input {
uint64_t tba_addr;
uint64_t tma_addr;
uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
};
struct mes_remove_queue_input {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d788a00043a5..37322550d750 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4570ad449390..e6a9b9fc9e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -591,7 +591,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!bp->destroy)
bp->destroy = &amdgpu_bo_destroy;
- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
&bo->placement, page_align, &ctx, NULL,
bp->resv, bp->destroy);
if (unlikely(r != 0))
@@ -1309,7 +1309,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->resource->mem_type != TTM_PL_VRAM ||
+ if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || adev->shutdown)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index b067ce45d226..effa7df3ddbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -138,6 +138,7 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
@@ -327,23 +328,32 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "vega10");
break;
case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "navi12");
break;
case IP_VERSION(11, 0, 7):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "sienna_cichlid");
break;
case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "aldebaran");
ret &= psp_init_ta_microcode(psp, "aldebaran");
break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ break;
default:
BUG();
break;
}
-
return ret;
}
@@ -486,11 +496,14 @@ static int psp_sw_fini(void *handle)
release_firmware(psp->ta_fw);
psp->ta_fw = NULL;
}
- if (adev->psp.cap_fw) {
+ if (psp->cap_fw) {
release_firmware(psp->cap_fw);
psp->cap_fw = NULL;
}
-
+ if (psp->toc_fw) {
+ release_firmware(psp->toc_fw);
+ psp->toc_fw = NULL;
+ }
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
@@ -498,6 +511,11 @@ static int psp_sw_fini(void *handle)
kfree(cmd);
cmd = NULL;
+ if (psp->km_ring.ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &psp->km_ring.ring_mem_mc_addr,
+ (void **)&psp->km_ring.ring_mem);
+
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
@@ -753,7 +771,7 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
@@ -766,6 +784,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 10):
return true;
default:
return false;
@@ -812,7 +831,7 @@ static int psp_tmr_unload(struct psp_context *psp)
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_unload_cmd_buf(psp, cmd);
- DRM_INFO("free PSP TMR buffer\n");
+ dev_info(psp->adev->dev, "free PSP TMR buffer\n");
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -2041,6 +2060,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->ras_drv)) &&
+ (psp->funcs->bootloader_load_ras_drv != NULL)) {
+ ret = psp_bootloader_load_ras_drv(psp);
+ if (ret) {
+ DRM_ERROR("PSP load ras_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -2401,7 +2429,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
- if (!ucode->fw)
+ if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
@@ -2411,20 +2439,7 @@ static bool fw_load_skip_check(struct psp_context *psp,
return true;
if (amdgpu_sriov_vf(psp->adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
- /*skip ucode loading in SRIOV VF */
+ amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
return true;
if (psp->autoload_supported &&
@@ -2498,7 +2513,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
- AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
+ adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -2641,6 +2656,9 @@ static int psp_hw_fini(void *handle)
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
@@ -3036,6 +3054,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->dbg_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c32b74bd970f..58ce3ebb446c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -36,6 +36,7 @@
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
enum psp_shared_mem_size {
@@ -71,6 +72,7 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_DBGDRV = 0xC0000,
PSP_BL__LOAD_INTFDRV = 0xD0000,
+ PSP_BL__LOAD_RASDRV = 0xE0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@@ -114,6 +116,7 @@ struct psp_funcs
int (*bootloader_load_soc_drv)(struct psp_context *psp);
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
+ int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
@@ -323,6 +326,7 @@ struct psp_context
struct psp_bin_desc soc_drv;
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
+ struct psp_bin_desc ras_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -403,6 +407,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
#define psp_bootloader_load_dbg_drv(psp) \
((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
+#define psp_bootloader_load_ras_drv(psp) \
+ ((psp)->funcs->bootloader_load_ras_drv ? \
+ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ff5361f5c2d4..ccebd8e2a2d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
amdgpu_ras_query_error_status(adev, &info);
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
if (amdgpu_ras_reset_error_status(adev, info.head.block))
dev_warn(adev->dev, "Failed to reset error counter and error status");
}
@@ -1949,6 +1950,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
@@ -2718,7 +2720,8 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
/* Need disable ras on all IPs here before ip [hw/sw]fini */
- amdgpu_ras_disable_all_features(adev, 0);
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
return 0;
}
@@ -2831,11 +2834,8 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
struct mce *m = (struct mce *)data;
struct amdgpu_device *adev = NULL;
uint32_t gpu_id = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst, channel_index = 0;
+ uint32_t umc_inst = 0, ch_inst = 0;
struct ras_err_data err_data = {0, 0, 0, NULL};
- struct eeprom_table_record err_rec;
- uint64_t retired_page;
/*
* If the error was generated in UMC_V2, which belongs to GPU UMCs,
@@ -2874,21 +2874,22 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
umc_inst, ch_inst);
+ err_data.err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if(!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record in mca notifier!\n");
+ return NOTIFY_DONE;
+ }
+
/*
* Translate UMC channel address to Physical address
*/
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
- + ch_inst];
-
- retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(m->addr);
-
- memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
- err_data.err_addr = &err_rec;
- amdgpu_umc_fill_error_record(&err_data, m->addr,
- retired_page, channel_index, umc_inst);
+ if (adev->umc.ras &&
+ adev->umc.ras->convert_ras_error_address)
+ adev->umc.ras->convert_ras_error_address(adev,
+ &err_data, 0, ch_inst, umc_inst, m->addr);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -2896,6 +2897,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
amdgpu_ras_save_bad_pages(adev);
}
+ kfree(err_data.err_addr);
return NOTIFY_OK;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c4283987bb1e..84c241b9a2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -38,6 +38,7 @@
#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
#define EEPROM_I2C_MADDR_ALDEBARAN 0x0
+#define EEPROM_I2C_MADDR_SMU_13_0_0 (0x54UL << 16)
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -156,6 +157,15 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return false;
}
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ control->i2c_address = EEPROM_I2C_MADDR_SMU_13_0_0;
+ break;
+
+ default:
+ break;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 32c86a0b145c..9da5ead50c90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -23,6 +23,7 @@
#include "amdgpu_reset.h"
#include "aldebaran.h"
+#include "sienna_cichlid.h"
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_handler *handler)
@@ -36,10 +37,15 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
+ adev->amdgpu_reset_level_mask = 0x1;
+
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_init(adev);
break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_init(adev);
+ break;
default:
break;
}
@@ -55,6 +61,9 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_fini(adev);
break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_fini(adev);
+ break;
default:
break;
}
@@ -67,6 +76,12 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
{
struct amdgpu_reset_handler *reset_handler = NULL;
+ if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
+ return -ENOSYS;
+
+ if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
+ return -ENOSYS;
+
if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
@@ -83,6 +98,12 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
int ret;
struct amdgpu_reset_handler *reset_handler = NULL;
+ if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
+ return -ENOSYS;
+
+ if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
+ return -ENOSYS;
+
if (adev->reset_cntl)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 9e55a5d7a825..f5318fedf2f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -30,6 +30,8 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_MODE2_RESET = 2,
+ AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
};
struct amdgpu_reset_context {
@@ -37,6 +39,7 @@ struct amdgpu_reset_context {
struct amdgpu_device *reset_req_dev;
struct amdgpu_job *job;
struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
unsigned long flags;
};
@@ -110,7 +113,8 @@ static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *dom
static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
{
- kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+ if (domain)
+ kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
}
static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d3558c34d406..3e316b013fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -405,6 +405,9 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
+ if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
+ return false;
+
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 6373bfb47d55..012b72d00e04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -272,3 +272,275 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ if (version_minor >= 0) {
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+ }
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 03ac36b2c2cf..23f060db9255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
-
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 42c1f050542f..3949b7e3907f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
@@ -150,3 +151,135 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
+{
+ int err = 0;
+ uint16_t version_major;
+ const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const struct sdma_firmware_header_v2_0 *hdr_v2;
+
+ err = amdgpu_ucode_validate(sdma_inst->fw);
+ if (err)
+ return err;
+
+ header = (const struct common_firmware_header *)
+ sdma_inst->fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ switch (version_major) {
+ case 1:
+ hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ break;
+ case 2:
+ hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sdma_inst->feature_version >= 20)
+ sdma_inst->burst_nop = true;
+
+ return 0;
+}
+
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ if (duplicate)
+ break;
+ }
+
+ memset((void *)adev->sdma.instance, 0,
+ sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
+}
+
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance,
+ bool duplicate)
+{
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ int err = 0, i;
+ const struct sdma_firmware_header_v2_0 *sdma_hdr;
+ uint16_t version_major;
+
+ err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ header = (const struct common_firmware_header *)
+ adev->sdma.instance[instance].fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ if ((duplicate && instance) || (!duplicate && version_major > 1))
+ return -EINVAL;
+
+ err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
+ if (err)
+ goto out;
+
+ if (duplicate) {
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ DRM_DEBUG("psp_load == '%s'\n",
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (version_major) {
+ case 1:
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!duplicate && (instance != i))
+ continue;
+ else {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+ info->fw = adev->sdma.instance[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+ break;
+ case 2:
+ sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
+ adev->sdma.instance[0].fw->data;
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
+ }
+ return err;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 53ac3ebae8d6..d2d88279fefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,4 +124,8 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance, bool duplicate);
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 504af1b93bfa..090e66a1b284 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
@@ -315,6 +316,7 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
struct hlist_node *tmp;
struct dma_fence *f;
int i;
+
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
@@ -392,7 +394,7 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- unsigned i;
+ unsigned int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 06dfcf297a8d..5e6ddc7e101c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -140,8 +140,10 @@ TRACE_EVENT(amdgpu_bo_create,
);
TRACE_EVENT(amdgpu_cs,
- TP_PROTO(struct amdgpu_cs_parser *p, int i),
- TP_ARGS(p, i),
+ TP_PROTO(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib),
+ TP_ARGS(p, job, ib),
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, bo_list)
__field(u32, ring)
@@ -151,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
- __entry->dw = p->job->ibs[i].length_dw;
+ __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
+ __entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- to_amdgpu_ring(p->entity->rq->sched));
+ to_amdgpu_ring(job->base.sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b4c19412625..b1c455329023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -471,7 +471,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
adev = amdgpu_ttm_adev(bo->bdev);
- if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
@@ -637,6 +638,8 @@ struct amdgpu_ttm_tt {
#endif
};
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -648,7 +651,7 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
struct ttm_tt *ttm = bo->tbo.ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -702,7 +705,7 @@ out_unlock:
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
@@ -751,7 +754,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -788,7 +791,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -822,7 +825,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -860,7 +863,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
uint64_t flags;
int r;
@@ -927,7 +930,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -998,7 +1001,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1025,7 +1028,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1079,7 +1082,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
pgoff_t i;
int ret;
@@ -1113,7 +1116,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
pgoff_t i;
@@ -1182,7 +1185,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
- gtt = (void *)bo->ttm;
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1199,7 +1202,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
@@ -1218,7 +1221,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1241,7 +1244,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL || !gtt->userptr)
return false;
@@ -1254,7 +1257,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
@@ -1327,11 +1330,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->resource->num_pages;
struct dma_resv_iter resv_cursor;
- struct amdgpu_res_cursor cursor;
struct dma_fence *f;
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return ttm_bo_eviction_valuable(bo, place);
+
/* Swapout? */
if (bo->resource->mem_type == TTM_PL_SYSTEM)
return true;
@@ -1350,40 +1354,20 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return false;
}
- switch (bo->resource->mem_type) {
- case AMDGPU_PL_PREEMPT:
- /* Preemptible BOs don't own system resources managed by the
- * driver (pages, VRAM, GART space). They point to resources
- * owned by someone else (e.g. pageable memory in user mode
- * or a DMABuf). They are used in a preemptible context so we
- * can guarantee no deadlocks and good QoS in case of MMU
- * notifiers or DMABuf move notifiers from the resource owner.
- */
+ /* Preemptible BOs don't own system resources managed by the
+ * driver (pages, VRAM, GART space). They point to resources
+ * owned by someone else (e.g. pageable memory in user mode
+ * or a DMABuf). They are used in a preemptible context so we
+ * can guarantee no deadlocks and good QoS in case of MMU
+ * notifiers or DMABuf move notifiers from the resource owner.
+ */
+ if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
return false;
- case TTM_PL_TT:
- if (amdgpu_bo_is_amdgpu_bo(bo) &&
- amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
- return false;
- return true;
- case TTM_PL_VRAM:
- /* Check each drm MM node individually */
- amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
- &cursor);
- while (cursor.remaining) {
- if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
- && !(place->lpfn &&
- place->lpfn <= PFN_DOWN(cursor.start)))
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
+ if (bo->resource->mem_type == TTM_PL_TT &&
+ amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
return false;
- default:
- break;
- }
-
return ttm_bo_eviction_valuable(bo, place);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 939c8614f0e3..dd0bc649a57d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -164,70 +164,138 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
} else if (version_major == 2) {
const struct rlc_firmware_header_v2_0 *rlc_hdr =
container_of(hdr, struct rlc_firmware_header_v2_0, header);
+ const struct rlc_firmware_header_v2_1 *rlc_hdr_v2_1 =
+ container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ const struct rlc_firmware_header_v2_2 *rlc_hdr_v2_2 =
+ container_of(rlc_hdr_v2_1, struct rlc_firmware_header_v2_2, v2_1);
+ const struct rlc_firmware_header_v2_3 *rlc_hdr_v2_3 =
+ container_of(rlc_hdr_v2_2, struct rlc_firmware_header_v2_3, v2_2);
+ const struct rlc_firmware_header_v2_4 *rlc_hdr_v2_4 =
+ container_of(rlc_hdr_v2_3, struct rlc_firmware_header_v2_4, v2_3);
- DRM_DEBUG("ucode_feature_version: %u\n",
- le32_to_cpu(rlc_hdr->ucode_feature_version));
- DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
- DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
- DRM_DEBUG("save_and_restore_offset: %u\n",
- le32_to_cpu(rlc_hdr->save_and_restore_offset));
- DRM_DEBUG("clear_state_descriptor_offset: %u\n",
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
- DRM_DEBUG("avail_scratch_ram_locations: %u\n",
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
- DRM_DEBUG("reg_restore_list_size: %u\n",
- le32_to_cpu(rlc_hdr->reg_restore_list_size));
- DRM_DEBUG("reg_list_format_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_start));
- DRM_DEBUG("reg_list_format_separate_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
- DRM_DEBUG("starting_offsets_start: %u\n",
- le32_to_cpu(rlc_hdr->starting_offsets_start));
- DRM_DEBUG("reg_list_format_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
- DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- DRM_DEBUG("reg_list_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_size_bytes));
- DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
- DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
- DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
- DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
- if (version_minor == 1) {
- const struct rlc_firmware_header_v2_1 *v2_1 =
- container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ switch (version_minor) {
+ case 0:
+ /* rlc_hdr v2_0 */
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("reg_restore_list_size: %u\n",
+ le32_to_cpu(rlc_hdr->reg_restore_list_size));
+ DRM_DEBUG("reg_list_format_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_start));
+ DRM_DEBUG("reg_list_format_separate_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
+ DRM_DEBUG("starting_offsets_start: %u\n",
+ le32_to_cpu(rlc_hdr->starting_offsets_start));
+ DRM_DEBUG("reg_list_format_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
+ DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ DRM_DEBUG("reg_list_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes));
+ DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
+ DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
+ DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+ DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
+ break;
+ case 1:
+ /* rlc_hdr v2_1 */
DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
- le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
+ le32_to_cpu(rlc_hdr_v2_1->reg_list_format_direct_reg_list_length));
DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_ucode_ver));
DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_feature_ver));
DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_size_bytes));
DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_offset_bytes));
DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_ucode_ver));
DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_feature_ver));
DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_size_bytes));
DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_offset_bytes));
DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_ucode_ver));
DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_feature_ver));
DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_size_bytes));
DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_offset_bytes));
+ break;
+ case 2:
+ /* rlc_hdr v2_2 */
+ DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_size_bytes));
+ DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_offset_bytes));
+ DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_size_bytes));
+ DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_offset_bytes));
+ break;
+ case 3:
+ /* rlc_hdr v2_3 */
+ DRM_DEBUG("rlcp_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_version));
+ DRM_DEBUG("rlcp_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_feature_version));
+ DRM_DEBUG("rlcp_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_size_bytes));
+ DRM_DEBUG("rlcp_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_offset_bytes));
+ DRM_DEBUG("rlcv_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_version));
+ DRM_DEBUG("rlcv_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_feature_version));
+ DRM_DEBUG("rlcv_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_size_bytes));
+ DRM_DEBUG("rlcv_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_offset_bytes));
+ break;
+ case 4:
+ /* rlc_hdr v2_4 */
+ DRM_DEBUG("global_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("global_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_offset_bytes));
+ break;
+ default:
+ DRM_ERROR("Unknown RLC v2 ucode: v2.%u\n", version_minor);
+ break;
}
} else {
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index ebed3f5226db..1c36235b4539 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -124,6 +124,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_SOC_DRV,
PSP_FW_TYPE_PSP_INTF_DRV,
PSP_FW_TYPE_PSP_DBG_DRV,
+ PSP_FW_TYPE_PSP_RAS_DRV,
};
/* version_major=2, version_minor=0 */
@@ -260,8 +261,12 @@ struct rlc_firmware_header_v2_2 {
/* version_major=2, version_minor=3 */
struct rlc_firmware_header_v2_3 {
struct rlc_firmware_header_v2_2 v2_2;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
uint32_t rlcp_ucode_size_bytes;
uint32_t rlcp_ucode_offset_bytes;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t rlcv_ucode_size_bytes;
uint32_t rlcv_ucode_offset_bytes;
};
@@ -390,6 +395,7 @@ union amdgpu_firmware_header {
struct rlc_firmware_header_v2_1 rlc_v2_1;
struct rlc_firmware_header_v2_2 rlc_v2_2;
struct rlc_firmware_header_v2_3 rlc_v2_3;
+ struct rlc_firmware_header_v2_4 rlc_v2_4;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct sdma_firmware_header_v2_0 sdma_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3629d8f292ef..2fb4951a6433 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,6 +22,8 @@
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
+#define UMC_INVALID_ADDR 0x1ULL
+
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -51,6 +53,10 @@ struct amdgpu_umc_ras {
struct amdgpu_ras_block_object ras_block;
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*convert_ras_error_address)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset, uint32_t ch_inst,
+ uint32_t umc_inst, uint64_t mca_addr);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f36e4f08db6d..0b52af415b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = false;
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 60c608144480..253ea6b159df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,6 +161,8 @@
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -170,6 +172,9 @@
#define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
#define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -317,12 +322,26 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
};
+struct amdgpu_fw_shared_rb_setup {
+ uint32_t is_rb_enabled_flags;
+ uint32_t rb_addr_lo;
+ uint32_t rb_addr_hi;
+ uint32_t rb_size;
+ uint32_t rb4_addr_lo;
+ uint32_t rb4_addr_hi;
+ uint32_t rb4_size;
+ uint32_t reserved[6];
+};
+
struct amdgpu_vcn4_fw_shared {
uint32_t present_flag_0;
uint8_t pad[12];
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
+ struct amdgpu_fw_shared_rb_setup rb_setup;
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 9be57389301b..e4af40b9a8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -690,7 +690,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -707,6 +706,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
+ case CHIP_IP_DISCOVERY:
reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
break;
default: /* other chip doesn't support SRIOV */
@@ -750,6 +750,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_IP_DISCOVERY:
nv_set_virt_ops(adev);
/* try send GPU_INIT_DATA request to host */
amdgpu_virt_request_init_data(adev);
@@ -807,6 +808,60 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
+{
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ /* no vf autoload, white list */
+ if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
+ ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ case IP_VERSION(13, 0, 10):
+ /* white list */
+ if (ucode_id == AMDGPU_UCODE_ID_CAP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
+ || ucode_id == AMDGPU_UCODE_ID_VCN1
+ || ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ default:
+ /* lagacy black list */
+ if (ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
+ }
+}
+
void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 239f232f9c02..d94c31e68a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -253,6 +253,9 @@ struct amdgpu_virt {
uint32_t decode_max_frame_pixels;
uint32_t encode_max_dimension_pixels;
uint32_t encode_max_frame_pixels;
+
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
};
struct amdgpu_video_codec_info;
@@ -343,4 +346,6 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
u32 acc_flags, u32 hwip);
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
u32 offset, u32 acc_flags, u32 hwip);
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
+ uint32_t ucode_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 108e8e8a1a36..f4b5301ea2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -282,8 +282,8 @@ static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (ret != 0)
return ret;
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 59cac347baa3..83b0c5d86e48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -183,10 +183,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -198,7 +200,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -211,7 +215,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -225,9 +231,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -240,10 +246,13 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
- if (vm_bo->bo->parent)
+ if (vm_bo->bo->parent) {
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- else
+ spin_unlock(&vm_bo->vm->status_lock);
+ } else {
amdgpu_vm_bo_idle(vm_bo);
+ }
}
/**
@@ -256,9 +265,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -363,12 +372,20 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{
- struct amdgpu_vm_bo_base *bo_base, *tmp;
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *shadow;
+ struct amdgpu_bo *bo;
int r;
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
- struct amdgpu_bo *bo = bo_base->bo;
- struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+ shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
@@ -385,7 +402,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -406,13 +425,18 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
+ bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
- return ret && list_empty(&vm->evicted);
+ spin_lock(&vm->status_lock);
+ empty = list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
+
+ return ret && empty;
}
/**
@@ -680,9 +704,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm_update_params params;
struct amdgpu_vm_bo_base *entry;
bool flush_tlb_needed = false;
+ LIST_HEAD(relocated);
int r, idx;
- if (list_empty(&vm->relocated))
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->relocated, &relocated);
+ spin_unlock(&vm->status_lock);
+
+ if (list_empty(&relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -697,7 +726,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &vm->relocated, vm_status) {
+ list_for_each_entry(entry, &relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -713,9 +742,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- while (!list_empty(&vm->relocated)) {
- entry = list_first_entry(&vm->relocated,
- struct amdgpu_vm_bo_base,
+ while (!list_empty(&relocated)) {
+ entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
vm_status);
amdgpu_vm_bo_idle(entry);
}
@@ -912,6 +940,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
{
struct amdgpu_bo_va *bo_va, *tmp;
+ spin_lock(&vm->status_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -936,7 +965,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
gtt_mem, cpu_mem);
}
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -949,7 +977,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
gtt_mem, cpu_mem);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
@@ -1278,24 +1306,29 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_bo_va *bo_va, *tmp;
+ struct amdgpu_bo_va *bo_va;
struct dma_resv *resv;
bool clear;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
+ spin_lock(&vm->status_lock);
}
- spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!amdgpu_vm_debug && dma_resv_trylock(resv))
@@ -1310,9 +1343,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (!clear)
dma_resv_unlock(resv);
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -1387,7 +1420,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!bo_va->base.moved) {
- list_move(&bo_va->base.vm_status, &vm->moved);
+ amdgpu_vm_bo_moved(&bo_va->base);
}
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
@@ -1763,9 +1796,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2019,9 +2052,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
INIT_LIST_HEAD(&vm->invalidated);
- spin_lock_init(&vm->invalidated_lock);
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
@@ -2223,6 +2258,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ flush_work(&vm->pt_free_work);
+
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
amdgpu_vm_set_pasid(adev, vm, 0);
@@ -2484,8 +2521,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
/* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault
*/
- flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
- AMDGPU_PTE_TF;
+ flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
@@ -2548,6 +2584,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -2585,7 +2622,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -2600,7 +2636,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 9ecb7f663e19..83acb7bd80fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -254,6 +254,9 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
+ /* Lock to protect vm_bo add/del/move on all lists of vm */
+ spinlock_t status_lock;
+
/* BOs who needs a validation */
struct list_head evicted;
@@ -268,7 +271,6 @@ struct amdgpu_vm {
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- spinlock_t invalidated_lock;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
@@ -276,6 +278,10 @@ struct amdgpu_vm {
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
+ struct list_head pt_freed;
+ struct work_struct pt_free_work;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -471,6 +477,7 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
+void amdgpu_vm_pt_free_work(struct work_struct *work);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 88de9f0d4728..358b91243e37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -637,10 +637,34 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
entry->bo->vm_bo = NULL;
+
+ spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
+ spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
+void amdgpu_vm_pt_free_work(struct work_struct *work)
+{
+ struct amdgpu_vm_bo_base *entry, *next;
+ struct amdgpu_vm *vm;
+ LIST_HEAD(pt_freed);
+
+ vm = container_of(work, struct amdgpu_vm, pt_free_work);
+
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->pt_freed, &pt_freed);
+ spin_unlock(&vm->status_lock);
+
+ /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
+ amdgpu_bo_reserve(vm->root.bo, true);
+
+ list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
+ amdgpu_vm_pt_free(entry);
+
+ amdgpu_bo_unreserve(vm->root.bo);
+}
+
/**
* amdgpu_vm_pt_free_dfs - free PD/PT levels
*
@@ -652,11 +676,24 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
*/
static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start)
+ struct amdgpu_vm_pt_cursor *start,
+ bool unlocked)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
+ if (unlocked) {
+ spin_lock(&vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ list_move(&entry->vm_status, &vm->pt_freed);
+
+ if (start)
+ list_move(&start->entry->vm_status, &vm->pt_freed);
+ spin_unlock(&vm->status_lock);
+ schedule_work(&vm->pt_free_work);
+ return;
+ }
+
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
amdgpu_vm_pt_free(entry);
@@ -673,7 +710,7 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL);
+ amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
}
/**
@@ -966,7 +1003,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
if (cursor.entry->bo) {
params->table_freed = true;
amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor);
+ &cursor,
+ params->unlocked);
}
amdgpu_vm_pt_next(adev, &cursor);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 1fd3cbca20a2..2b0669c464f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -112,7 +112,8 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
swap(p->vm->last_unlocked, tmp);
dma_fence_put(tmp);
} else {
- amdgpu_bo_fence(p->vm->root.bo, f, true);
+ dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
+ DMA_RESV_USAGE_BOOKKEEP);
}
if (fence && !p->immediate)
@@ -211,12 +212,15 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
int r;
/* Wait for PD/PT moves to be completed */
- dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
- DMA_RESV_USAGE_KERNEL, fence) {
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
r = amdgpu_sync_fence(&p->job->sync, fence);
- if (r)
+ if (r) {
+ dma_resv_iter_end(&cursor);
return r;
+ }
}
+ dma_resv_iter_end(&cursor);
do {
ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 28ec5f8ac1c1..73a517bcf5c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -721,6 +721,72 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
+ *
+ * @man: TTM memory type manager
+ * @res: The resource to test
+ * @place: The place to test against
+ * @size: Size of the new allocation
+ *
+ * Test each drm buddy block for intersection for eviction decision.
+ */
+static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &mgr->blocks, link) {
+ unsigned long fpfn =
+ amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn &&
+ (!place->lpfn || place->lpfn > fpfn))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
+ *
+ * @man: TTM memory type manager
+ * @res: The resource to test
+ * @place: The place to test against
+ * @size: Size of the new allocation
+ *
+ * Test each drm buddy block for placement compatibility.
+ */
+static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &mgr->blocks, link) {
+ unsigned long fpfn =
+ amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
+
+ if (fpfn < place->fpfn ||
+ (place->lpfn && lpfn > place->lpfn))
+ return false;
+ }
+
+ return true;
+}
+
+/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
@@ -753,6 +819,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
+ .intersects = amdgpu_vram_mgr_intersects,
+ .compatible = amdgpu_vram_mgr_compatible,
.debug = amdgpu_vram_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1b108d03e785..47159e9a0884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -392,12 +392,20 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
}
/**
+ * Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
+ * Host driver decide how to reset the GPU either through FLR or chain reset.
+ * Guest side will get individual notifications from the host for the FLR
+ * if necessary.
+ */
+ if (!amdgpu_sriov_vf(adev)) {
+ /**
* Avoid recreating reset domain when hive is reconstructed for the case
- * of reset the devices in the XGMI hive during probe for SRIOV
+ * of reset the devices in the XGMI hive during probe for passthrough GPU
* See https://www.spinics.net/lists/amd-gfx/msg58836.html
*/
- if (adev->reset_domain->type != XGMI_HIVE) {
- hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
+ if (adev->reset_domain->type != XGMI_HIVE) {
+ hive->reset_domain =
+ amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
if (!hive->reset_domain) {
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
ret = -ENOMEM;
@@ -406,9 +414,10 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
hive = NULL;
goto pro_end;
}
- } else {
- amdgpu_reset_get_reset_domain(adev->reset_domain);
- hive->reset_domain = adev->reset_domain;
+ } else {
+ amdgpu_reset_get_reset_domain(adev->reset_domain);
+ hive->reset_domain = adev->reset_domain;
+ }
}
hive->hive_id = adev->gmc.xgmi.hive_id;
@@ -504,6 +513,9 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
{
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
atomic_read(&hive->number_devices),
@@ -742,7 +754,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 552e6fb55aa8..30dcc1681b4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -43,6 +43,7 @@ struct amdgpu_hive_info {
} pstate;
struct amdgpu_reset_domain *reset_domain;
+ uint32_t device_remove_count;
};
struct amdgpu_pcs_ras_field {
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index 33a8a7365aef..f0e235f98afb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -28,13 +28,44 @@
#include "navi10_enum.h"
#include "soc15_common.h"
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
- data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index fa7421afb9a6..6be9ac2b9c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -26,6 +26,8 @@
#include <linux/pci.h>
+#include <acpi/video.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -182,7 +184,12 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
return;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
- return;
+ goto register_acpi_backlight;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n");
+ goto register_acpi_backlight;
+ }
pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
if (!pdata) {
@@ -218,6 +225,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
error:
kfree(pdata);
return;
+
+register_acpi_backlight:
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
}
void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9c964cd3b5d4..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e0ad9f27dc3f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 77f5e998a120..b1c44fab074f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 802e5c753271..a22b45c92792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fafbad3cf08d..af94ac580d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3943,56 +3943,6 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
-
-static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v10_0_init_tap_delays_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_4 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
-}
-
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
{
bool ret = false;
@@ -4028,12 +3978,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
char fw_name[40];
char *wks = "";
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -4091,9 +4036,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -4102,9 +4045,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -4113,69 +4054,27 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
+ /* don't check this. There are apparently firmwares in the wild with
+ * incorrect size in the header
+ */
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ dev_dbg(adev->dev,
+ "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v10_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 4) {
- gfx_v10_0_init_tap_delays_microcode(adev);
- }
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
@@ -4185,9 +4084,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -4195,154 +4093,18 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
-
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
dev_err(adev->dev,
- "gfx10: Failed to load firmware \"%s\"\n",
+ "gfx10: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -4846,7 +4608,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -5971,6 +5733,9 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
}
+ if (adev->job_hang && !enable)
+ return 0;
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
break;
@@ -7569,8 +7334,10 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
-
- return amdgpu_ring_test_helper(kiq_ring);
+ if (!adev->job_hang)
+ return amdgpu_ring_test_helper(kiq_ring);
+ else
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 6fd71cb10e54..251109723ab6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -53,6 +53,7 @@
#define GFX11_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
#define regCGTT_WD_CLK_CTRL 0x5086
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
@@ -72,21 +73,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
-
-static const struct soc15_reg_golden golden_settings_gc_11_0[] =
-{
- /* Pending on emulation bring up */
-};
-
-static const struct soc15_reg_golden golden_settings_gc_11_0_0[] =
-{
- /* Pending on emulation bring up */
-};
-
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] =
-{
- /* Pending on emulation bring up */
-};
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
@@ -130,6 +120,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
bool all_hub, uint8_t dst_sel);
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+ bool enable);
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -266,42 +258,17 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
}
-static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0));
- break;
- default:
- break;
- }
-}
-
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_0_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0));
- break;
case IP_VERSION(11, 0, 1):
soc15_program_register_sequence(adev,
- golden_settings_gc_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
- soc15_program_register_sequence(adev,
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
default:
break;
}
- gfx_v11_0_init_spm_golden_registers(adev);
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
@@ -471,61 +438,12 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_3 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
- adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
- adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
- adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
-}
-
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
char ucode_prefix[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
- const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -546,14 +464,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw->data, 2, 0);
if (adev->gfx.rs64_enable) {
dev_info(adev->dev, "CP RS64 enable\n");
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
@@ -564,14 +479,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
}
if (!amdgpu_sriov_vf(adev)) {
@@ -580,58 +492,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v11_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v11_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 3)
- gfx_v11_0_init_rlcp_rlcv_microcode(adev);
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
@@ -642,190 +510,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
- } else {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
- }
- }
-
out:
if (err) {
dev_err(adev->dev,
- "gfx11: Failed to load firmware \"%s\"\n",
+ "gfx11: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1137,7 +838,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
- .init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+ .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1147,6 +848,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1582,6 +1284,7 @@ static int gfx_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -2756,6 +2459,21 @@ static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
mec_hdr->ucode_start_addr_hi >> 2);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
+
+ /* reset mec pipe */
+ tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
+
+ /* clear mec pipe reset */
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
}
static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
@@ -5181,9 +4899,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
} else {
/* Program RLC_CGCG_CGLS_CTRL */
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5212,9 +4933,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
}
}
@@ -5250,6 +4974,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5263,6 +4989,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
@@ -5279,6 +5007,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
};
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+ // Program RLC_PG_DELAY3 for CGPG hysteresis
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 1):
+ WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v11_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static int gfx_v11_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -5293,6 +5053,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 2):
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case IP_VERSION(11, 0, 1):
+ gfx_v11_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
+ break;
default:
break;
}
@@ -5310,6 +5074,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c6e0f9313a7f..0320be4a5fc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -126,6 +126,8 @@ MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
@@ -1089,27 +1091,6 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.me_fw_write_wait = false;
@@ -1271,9 +1252,6 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
@@ -1282,9 +1260,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -1293,9 +1269,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -1304,37 +1278,12 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1351,11 +1300,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
uint32_t smu_version;
@@ -1384,92 +1329,17 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
- goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_0_init_rlc_ext_microcode(adev);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->gfx.rlc.is_rlc_v2_1 &&
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
- }
-
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
@@ -1492,35 +1362,34 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1530,49 +1399,12 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- /* TODO: Determine if MEC2 JT FW loading can be removed
- for all GFX V9 asic and above */
- if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
- }
-
out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
@@ -2587,7 +2419,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
@@ -5596,7 +5429,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
- cur = (ring->wptr & ring->buf_mask) - 1;
+ cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index d8c531581116..8cf53e039c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -576,6 +576,111 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
}
}
+static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
+{
+ int i;
+ adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
+ adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_LO32 = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_LO32);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_HI32 = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_HI32);
+ adev->gmc.VM_L2_PROTECTION_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
+ adev->gmc.VM_L2_PROTECTION_FAULT_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL2);
+ adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL3 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL3);
+ adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL4 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL4);
+ adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_LO32 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_LO32);
+ adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_HI32 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_HI32);
+ adev->gmc.VM_DEBUG = RREG32_SOC15(GC, 0, mmGCVM_DEBUG);
+ adev->gmc.VM_L2_MM_GROUP_RT_CLASSES = RREG32_SOC15(GC, 0, mmGCVM_L2_MM_GROUP_RT_CLASSES);
+ adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID = RREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID);
+ adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID2 = RREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID2);
+ adev->gmc.VM_L2_CACHE_PARITY_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CACHE_PARITY_CNTL);
+ adev->gmc.VM_L2_IH_LOG_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_IH_LOG_CNTL);
+
+ for (i = 0; i <= 15; i++) {
+ adev->gmc.VM_CONTEXT_CNTL[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, i * 2);
+ }
+
+ adev->gmc.MC_VM_MX_L1_TLB_CNTL = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
+}
+
+static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev)
+{
+ int i;
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_LO32, adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_LO32);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_HI32, adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_HI32);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, adev->gmc.VM_L2_PROTECTION_FAULT_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL2, adev->gmc.VM_L2_PROTECTION_FAULT_CNTL2);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL3, adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL3);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL4, adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL4);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_LO32, adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_LO32);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_HI32, adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_HI32);
+ WREG32_SOC15(GC, 0, mmGCVM_DEBUG, adev->gmc.VM_DEBUG);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_MM_GROUP_RT_CLASSES, adev->gmc.VM_L2_MM_GROUP_RT_CLASSES);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID, adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID2, adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID2);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CACHE_PARITY_CNTL, adev->gmc.VM_L2_CACHE_PARITY_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_IH_LOG_CNTL, adev->gmc.VM_L2_IH_LOG_CNTL);
+
+ for (i = 0; i <= 15; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i, adev->gmc.VM_CONTEXT_CNTL[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[i]);
+ }
+
+ WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, adev->gmc.vram_start >> 24);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, adev->gmc.vram_end >> 24);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, adev->gmc.MC_VM_MX_L1_TLB_CNTL);
+}
+
+static void gfxhub_v2_1_halt(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
+ uint32_t tmp;
+ int time = 1000;
+
+ gfxhub_v2_1_set_fault_enable_default(adev, false);
+
+ for (i = 0; i <= 14; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, ~0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, ~0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ 0);
+ }
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+ while ((tmp & (GRBM_STATUS2__EA_BUSY_MASK |
+ GRBM_STATUS2__EA_LINK_BUSY_MASK)) != 0 &&
+ time) {
+ udelay(100);
+ time--;
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+ }
+
+ if (!time) {
+ DRM_WARN("failed to wait for GRBM(EA) idle\n");
+ }
+}
+
const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.get_fb_location = gfxhub_v2_1_get_fb_location,
.get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
@@ -586,4 +691,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.init = gfxhub_v2_1_init,
.get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
+ .mode2_save_regs = gfxhub_v2_1_save_regs,
+ .mode2_restore_regs = gfxhub_v2_1_restore_regs,
+ .halt = gfxhub_v2_1_halt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
new file mode 100644
index 000000000000..5d3fffd4929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "gfxhub_v3_0_3.h"
+
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+#include "navi10_enum.h"
+#include "soc15_common.h"
+
+#define regGCVM_L2_CNTL3_DEFAULT 0x80100007
+#define regGCVM_L2_CNTL4_DEFAULT 0x000000c1
+#define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0
+
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v3_0_3_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v3_0_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v3_0_3_get_fb_location(struct amdgpu_device *adev)
+{
+ u64 base = RREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE);
+
+ base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ return base;
+}
+
+static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev)
+{
+ return (u64)RREG32_SOC15(GC, 0, regGCMC_VM_FB_OFFSET) << 24;
+}
+
+static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+}
+
+static void gfxhub_v3_0_3_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v3_0_3_setup_vm_pt_regs(adev, 0, pt_base);
+
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+}
+
+static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+}
+
+
+static void gfxhub_v3_0_3_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC); /* UC, uncached */
+
+ WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void gfxhub_v3_0_3_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
+ ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
+ L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL2, tmp);
+
+ tmp = regGCVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, tmp);
+
+ tmp = regGCVM_L2_CNTL4_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL4, tmp);
+
+ tmp = regGCVM_L2_CNTL5_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL5, tmp);
+}
+
+static void gfxhub_v3_0_3_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL, tmp);
+}
+
+static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0xFFFFFFFF);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+
+}
+
+static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ adev->vm_manager.num_level);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+
+ hub->vm_cntx_cntl = tmp;
+}
+
+static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ unsigned i;
+
+ for (i = 0 ; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ /*
+ * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ gfxhub_v3_0_3_init_gart_aperture_regs(adev);
+ gfxhub_v3_0_3_init_system_aperture_regs(adev);
+ gfxhub_v3_0_3_init_tlb_regs(adev);
+ gfxhub_v3_0_3_init_cache_regs(adev);
+
+ gfxhub_v3_0_3_enable_system_domain(adev);
+ gfxhub_v3_0_3_disable_identity_aperture(adev);
+ gfxhub_v3_0_3_setup_vmid_config(adev);
+ gfxhub_v3_0_3_program_invalidation(adev);
+
+ return 0;
+}
+
+static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ /* Setup L2 cache */
+ WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, 0);
+}
+
+/**
+ * gfxhub_v3_0_3_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gfxhub_v3_0_3_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value)
+{
+ u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v3_0_3_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v3_0_3_get_invalidate_req,
+};
+
+static void gfxhub_v3_0_3_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_SEM);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regGCVM_CONTEXT1_CNTL - regGCVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regGCVM_INVALIDATE_ENG1_REQ -
+ regGCVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v3_0_3_vmhub_funcs;
+}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v3_0_3_funcs = {
+ .get_fb_location = gfxhub_v3_0_3_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v3_0_3_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v3_0_3_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v3_0_3_gart_enable,
+ .gart_disable = gfxhub_v3_0_3_gart_disable,
+ .set_fault_enable_default = gfxhub_v3_0_3_set_fault_enable_default,
+ .init = gfxhub_v3_0_3_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h
new file mode 100644
index 000000000000..6153bd5e3083
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFXHUB_V3_0_3_H__
+#define __GFXHUB_V3_0_3_H__
+
+extern const struct amdgpu_gfxhub_funcs gfxhub_v3_0_3_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9ae8cdaa033e..f513e2c2e964 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 1471bfb9ae38..846ccb6cf07d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -39,6 +39,7 @@
#include "soc15_common.h"
#include "nbio_v4_3.h"
#include "gfxhub_v3_0.h"
+#include "gfxhub_v3_0_3.h"
#include "mmhub_v3_0.h"
#include "mmhub_v3_0_1.h"
#include "mmhub_v3_0_2.h"
@@ -233,7 +234,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Issue additional private vm invalidation to MMHUB */
if ((vmhub != AMDGPU_GFXHUB_0) &&
- (hub->vm_l2_bank_select_reserved_cid2)) {
+ (hub->vm_l2_bank_select_reserved_cid2) &&
+ !amdgpu_sriov_vf(adev)) {
inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
inv_req |= (1 << 25);
@@ -590,7 +592,14 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 3):
+ adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
+ break;
+ default:
+ adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
+ break;
+ }
}
static int gmc_v11_0_early_init(void *handle)
@@ -640,7 +649,10 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
/* base offset of vram pages */
- adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
+ if (amdgpu_sriov_vf(adev))
+ adev->vm_manager.vram_base_offset = 0;
+ else
+ adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
}
/**
@@ -732,6 +744,7 @@ static int gmc_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 22761a3bb818..67ca16a8027c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_domain->sem);
@@ -1102,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
*flags |= AMDGPU_PDE_BFS(0x9);
} else if (level == AMDGPU_VM_PDB0) {
- if (*flags & AMDGPU_PDE_PTE)
+ if (*flags & AMDGPU_PDE_PTE) {
*flags &= ~AMDGPU_PDE_PTE;
- else
+ if (!(*flags & AMDGPU_PTE_VALID))
+ *addr |= 1 << PAGE_SHIFT;
+ } else {
*flags |= AMDGPU_PTE_TF;
+ }
}
}
@@ -1624,12 +1628,15 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 39a696cd45b5..29c3484ae1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0);
}
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch, forced on MEM clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable MEM clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_2_update_mem_power_gating(adev, enable);
+ hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
.flush_hdp = hdp_v5_2_flush_hdp,
+ .update_clock_gating = hdp_v5_2_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 92dc60a9d209..7cd79a3844b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -105,7 +105,13 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_USED_INT_THRESHOLD, threshold);
- WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
+ return;
+ } else {
+ WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+
WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
}
@@ -132,7 +138,13 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
- WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
if (enable) {
ih->enabled = true;
@@ -242,7 +254,15 @@ static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
- WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
if (ih == &adev->irq.ih) {
/* set the ih ring 0 writeback address whether it's enabled or not */
@@ -727,6 +747,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
.get_wptr = ih_v6_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_0_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 76383baa3929..95548c512f4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -26,12 +26,15 @@
#include "amdgpu_imu.h"
#include "amdgpu_dpm.h"
+#include "imu_v11_0_3.h"
+
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
@@ -360,6 +363,9 @@ static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev)
program_imu_rlc_ram(adev, imu_rlc_ram_golden_11_0_2,
(const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_2));
break;
+ case IP_VERSION(11, 0, 3):
+ imu_v11_0_3_program_rlc_ram(adev);
+ break;
default:
BUG();
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
new file mode 100644
index 000000000000..fc69c1a29e23
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_imu.h"
+#include "imu_v11_0_3.h"
+
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+
+static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_3[] = {
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_RD_COMBINE_FLUSH, 0x00055555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_WR_COMBINE_FLUSH, 0x00055555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_DRAM_COMBINE_FLUSH, 0x00555555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC2, 0x00001ffe, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_CREDITS, 0x003f3fff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_TAG_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE0, 0x00041000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE0, 0x00040000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC, 0x00000017, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_ENABLE, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_CREDITS, 0x003f3fbf, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE0, 0x10200800, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE1, 0x00000088, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE0, 0x1d041040, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE1, 0x80000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_IO_PRIORITY, 0x88888888, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MAM_CTRL, 0x0000d800, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ARB_FINAL, 0x000007ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_DRAM_PAGE_BURST, 0x20080200, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ENABLE, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0x000fffff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MISC, 0x0c48bff0, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SA_UNIT_DISABLE, 0x00fffc01, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_PRIM_CONFIG, 0x000fffe1, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_RB_BACKEND_DISABLE, 0xffffff01, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0x40000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0x42000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x44000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x46000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x48000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x4A000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCGTS_TCC_DISABLE, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_RATE_CONFIG, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_EDC_CONFIG, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000500, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_START, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_END, 0x000005ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_BASE, 0x00006000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_TOP, 0x000065ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_TOP_OF_DRAM_SLOT1, 0xff800000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_LOWER_TOP_OF_DRAM2, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_UPPER_TOP_OF_DRAM2, 0x00000fff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, 0x00001ffc, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000551, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL, 0x00080603, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL2, 0x00000003, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL3, 0x00100003, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL5, 0x00003fe0, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000444, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_0, 0x54105410, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_2, 0x76323276, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000244, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCUTCL2_HARVEST_BYPASS_GROUPS, 0x00000006, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BASE, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BOT, 0x00000002, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_TOP, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA0_UCODE_SELFLOAD_CONTROL, 0x00000210, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA1_UCODE_SELFLOAD_CONTROL, 0x00000210, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPC_PSP_DEBUG, CPC_PSP_DEBUG__GPA_OVERRIDE_MASK, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0xe0000000),
+};
+
+static void program_rlc_ram_register_setting(struct amdgpu_device *adev,
+ const struct imu_rlc_ram_golden *regs,
+ const u32 array_size)
+{
+ const struct imu_rlc_ram_golden *entry;
+ u32 reg, data;
+ int i;
+
+ for (i = 0; i < array_size; ++i) {
+ entry = &regs[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ reg |= entry->addr_mask;
+
+ data = entry->data;
+ if (entry->reg == regGCMC_VM_AGP_BASE)
+ data = 0x00ffffff;
+ else if (entry->reg == regGCMC_VM_AGP_TOP)
+ data = 0x0;
+ else if (entry->reg == regGCMC_VM_FB_LOCATION_BASE)
+ data = adev->gmc.vram_start >> 24;
+ else if (entry->reg == regGCMC_VM_FB_LOCATION_TOP)
+ data = adev->gmc.vram_end >> 24;
+
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, reg);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, data);
+ }
+ //Indicate the latest entry
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, 0);
+}
+
+void imu_v11_0_3_program_rlc_ram(struct amdgpu_device *adev)
+{
+ program_rlc_ram_register_setting(adev,
+ imu_rlc_ram_golden_11_0_3,
+ (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_3));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h
new file mode 100644
index 000000000000..702be568f26b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __IMU_V11_0_3_H__
+#define __IMU_V11_0_3_H__
+
+void imu_v11_0_3_program_rlc_ram(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 120ea294abef..5cec6b259b7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -38,6 +38,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@@ -183,6 +185,20 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+ mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
+ if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) &&
+ (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3))))
+ mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3f44a099c52a..3e51e773f92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index bc11b2de37ae..a1d26c4d80b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -169,17 +169,17 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- /* Disable AGP. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
-
if (!amdgpu_sriov_vf(adev)) {
/*
* the new L1 policy will block SRIOV guest from writing
* these regs, and they will be programed at host.
* so skip programing these regs.
*/
+ /* Disable AGP. */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index cac72ced94c8..e8058edc1d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- //TODO
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
}
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 6e0145b2b408..445cb06b9d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
new file mode 100644
index 000000000000..f772bb499f3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V4_0_H__
+#define __MMSCH_V4_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 4
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+#define MMSCH_DOORBELL_OFFSET 0x8
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+
+enum mmsch_v4_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v4_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v4_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v4_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct mmsch_v4_0_table_info jpegdec;
+};
+
+struct mmsch_v4_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v4_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v4_0_cmd_direct_write {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v4_0_cmd_direct_read_modify_write {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v4_0_cmd_direct_polling {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v4_0_cmd_end {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v4_0_cmd_indirect_write {
+ struct mmsch_v4_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v4_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 12906ba74462..a2f04b249132 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -290,6 +290,7 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e07757eea7ad..a977f0027928 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -317,6 +317,7 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 288c414babdf..fd14fa9b9cd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -529,6 +529,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4b5396d3e60f..eec13cb5bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b465baa26762..aa761ff3a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v2_3_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v2_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 982a89f841d5..15eb3658d70e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -488,3 +488,47 @@ const struct amdgpu_nbio_funcs nbio_v4_3_funcs = {
.get_rom_offset = nbio_v4_3_get_rom_offset,
.program_aspm = nbio_v4_3_program_aspm,
};
+
+
+static void nbio_v4_3_sriov_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
+{
+}
+
+static void nbio_v4_3_sriov_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index,
+ int doorbell_size)
+{
+}
+
+static void nbio_v4_3_sriov_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+}
+
+static void nbio_v4_3_sriov_gc_doorbell_init(struct amdgpu_device *adev)
+{
+}
+
+const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = {
+ .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset,
+ .get_rev_id = nbio_v4_3_get_rev_id,
+ .mc_access_enable = nbio_v4_3_mc_access_enable,
+ .get_memsize = nbio_v4_3_get_memsize,
+ .sdma_doorbell_range = nbio_v4_3_sriov_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v4_3_sriov_vcn_doorbell_range,
+ .gc_doorbell_init = nbio_v4_3_sriov_gc_doorbell_init,
+ .enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v4_3_sriov_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v4_3_get_clockgating_state,
+ .ih_control = nbio_v4_3_ih_control,
+ .init_registers = nbio_v4_3_init_registers,
+ .remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
+ .get_rom_offset = nbio_v4_3_get_rom_offset,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
index ade43661d7a9..711999ceedf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
@@ -28,5 +28,6 @@
extern const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v4_3_funcs;
+extern const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index f7f6ddebd3e4..37615a77287b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v6_1_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v6_1_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 11848d1e238b..19455a725939 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
};
+#ifdef CONFIG_PCIEASPM
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v7_4_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v7_4_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 01e8288d09a8..def89379b51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -28,6 +28,14 @@
#include "nbio/nbio_7_7_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
@@ -68,12 +76,6 @@ static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instan
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- SIZE, doorbell_size);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE,
@@ -247,6 +249,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
}
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (enable) {
+ data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ } else {
+ data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (enable)
+ data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+ else
+ data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+ if (enable) {
+ data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -262,6 +339,10 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
+ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index a2588200ea58..0b2ac418e4ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 726a5bba40b2..21d822b1d589 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/dev_printk.h>
#include <drm/drm_drv.h>
#include <linux/vmalloc.h>
#include "amdgpu.h"
@@ -45,6 +44,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -110,6 +110,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -223,6 +224,12 @@ static int psp_v13_0_bootloader_load_dbg_drv(struct psp_context *psp)
return psp_v13_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
}
+static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp)
+{
+ return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
+}
+
+
static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
{
int ret;
@@ -719,6 +726,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_soc_drv = psp_v13_0_bootloader_load_soc_drv,
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
+ .bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_init = psp_v13_0_ring_init,
.ring_create = psp_v13_0_ring_create,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 65181efba50e..7241a9fb0121 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -561,44 +561,6 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
}
}
-static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- /* arcturus shares the same FW memory across
- all SDMA isntances */
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
- adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
- break;
- }
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v4_0_init_microcode - load ucode images from disk
*
@@ -615,9 +577,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
+ int ret, i;
DRM_DEBUG("\n");
@@ -656,58 +616,25 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
-
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
- else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
-
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
}
-out:
- if (err) {
- DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v4_0_destroy_inst_ctx(adev);
- }
- return err;
+ return ret;
}
/**
@@ -1504,6 +1431,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -1995,14 +1927,17 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- sdma_v4_0_destroy_inst_ctx(adev);
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
static int sdma_v4_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
@@ -2011,9 +1946,7 @@ static int sdma_v4_0_hw_init(void *handle)
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
- r = sdma_v4_0_start(adev);
-
- return r;
+ return sdma_v4_0_start(adev);
}
static int sdma_v4_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index a019ac92edb7..c05c3eebde4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -240,10 +240,7 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct sdma_firmware_header_v1_0 *hdr;
+ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
@@ -272,38 +269,12 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
- if (err)
- goto out;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
- if (adev->sdma.instance[i].feature_version >= 20)
- adev->sdma.instance[i].burst_nop = true;
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- }
-out:
- if (err) {
- DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
- return err;
+
+ return ret;
}
static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -1465,12 +1436,10 @@ static int sdma_v5_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
+ for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- }
+
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 83c6ccaaa9e4..f136fec7b4f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,33 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v5_2_init_microcode - load ucode images from disk
*
@@ -132,9 +105,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
DRM_DEBUG("\n");
@@ -169,42 +139,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++)
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
-
- if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0)))
- return 0;
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v5_2: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v5_2_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -1406,19 +1341,16 @@ static int sdma_v5_2_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v5_2_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
static int sdma_v5_2_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = sdma_v5_2_start(adev);
-
- return r;
+ return sdma_v5_2_start(adev);
}
static int sdma_v5_2_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 0200cb3a31a4..db51230163c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -47,6 +47,7 @@
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -77,33 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v6_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v2_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void*)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v6_0_init_microcode - load ucode images from disk
*
@@ -113,16 +87,10 @@ static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
-
-// emulation only, won't work on real chip
-// sdma 6.0.0 real chip need to use PSP to load firmware
static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30];
char ucode_prefix[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct sdma_firmware_header_v2_0 *sdma_hdr;
DRM_DEBUG("\n");
@@ -130,43 +98,7 @@ static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v6_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
- memcpy((void*)&adev->sdma.instance[i],
- (void*)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- sdma_hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v6_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v6_0_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -559,7 +491,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -593,7 +526,10 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev))
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
+ else
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
@@ -1365,19 +1301,16 @@ static int sdma_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v6_0_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
static int sdma_v6_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = sdma_v6_0_start(adev);
-
- return r;
+ return sdma_v6_0_start(adev);
}
static int sdma_v6_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
new file mode 100644
index 000000000000..7aa570c1ce4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "sienna_cichlid.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ } else {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
+ adev->pm.fw_version >= 0x3a5500 &&
+ !amdgpu_sriov_vf(adev)) {
+ reset_context->method = AMD_RESET_METHOD_MODE2;
+ return handler;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+sienna_cichlid_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->gfxhub.funcs->mode2_save_regs)
+ adev->gfxhub.funcs->mode2_save_regs(adev);
+ if (adev->gfxhub.funcs->halt)
+ adev->gfxhub.funcs->halt(adev);
+ r = sienna_cichlid_mode2_suspend_ip(adev);
+ }
+
+ return r;
+}
+
+static void sienna_cichlid_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+
+static int sienna_cichlid_mode2_reset(struct amdgpu_device *adev)
+{
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ return amdgpu_dpm_mode2_reset(adev);
+}
+
+static int
+sienna_cichlid_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r;
+
+ r = sienna_cichlid_mode2_reset(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "ASIC reset failed with error, %d ", r);
+ }
+ return r;
+}
+
+static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ int i, r;
+ struct psp_context *psp = &adev->psp;
+
+ r = psp_rlc_autoload_start(psp);
+ if (r) {
+ dev_err(adev->dev, "Failed to start rlc autoload\n");
+ return r;
+ }
+
+ /* Reinit GFXHUB */
+ if (adev->gfxhub.funcs->mode2_restore_regs)
+ adev->gfxhub.funcs->mode2_restore_regs(adev);
+ adev->gfxhub.funcs->init(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r) {
+ dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = sienna_cichlid_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ goto end;
+ }
+
+end:
+ if (r)
+ return -EAGAIN;
+ else
+ return r;
+}
+
+static struct amdgpu_reset_handler sienna_cichlid_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = sienna_cichlid_mode2_prepare_hwcontext,
+ .perform_reset = sienna_cichlid_mode2_perform_reset,
+ .restore_hwcontext = sienna_cichlid_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = sienna_cichlid_mode2_reset,
+};
+
+int sienna_cichlid_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = sienna_cichlid_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = sienna_cichlid_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &sienna_cichlid_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int sienna_cichlid_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h
index 5dcfbd8e2697..5213b162dacd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef DML_WRAPPER_H_
-#define DML_WRAPPER_H_
+#ifndef __SIENNA_CICHLID_H__
+#define __SIENNA_CICHLID_H__
-#include "dc.h"
-#include "dml/display_mode_vba.h"
+#include "amdgpu.h"
-bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate);
+int sienna_cichlid_reset_init(struct amdgpu_device *adev);
+int sienna_cichlid_reset_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index fde6154f2009..183024d7c184 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_ring *ring;
-
- /* sdma/ih doorbell range are programed by hypervisor */
- if (!amdgpu_sriov_vf(adev)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
- }
-
- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
- adev->irq.ih.doorbell_index);
- }
-}
-
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
- /* HW doorbell routing policy: doorbell writing not
- * in SDMA/IH/MM/ACV range will be routed to CP. So
- * we need to init SDMA/IH/MM/ACV doorbell range prior
- * to CP ip block init and ring test.
- */
- soc15_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 52816de5e17b..16b757664a35 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -179,7 +179,7 @@ void soc21_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL), grbm_gfx_cntl);
+ WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
return false;
default:
@@ -494,6 +495,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+ bool enter)
+{
+ if (enter)
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ else
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (adev->gfx.funcs->update_perfmon_mgcg)
+ adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
@@ -513,6 +528,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
+ .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
static int soc21_common_early_init(void *handle)
@@ -546,8 +562,10 @@ static int soc21_common_early_init(void *handle)
case IP_VERSION(11, 0, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_FGCG |
@@ -565,6 +583,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB |
AMD_PG_SUPPORT_MMHUB;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ }
adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
break;
case IP_VERSION(11, 0, 2):
@@ -575,7 +597,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_ATHUB_MGCG |
- AMD_CG_SUPPORT_ATHUB_LS;
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -586,12 +610,42 @@ static int soc21_common_early_init(void *handle)
break;
case IP_VERSION(11, 0, 1):
adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
+ case IP_VERSION(11, 0, 3):
+ adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ if (amdgpu_sriov_vf(adev)) {
+ /* hypervisor control CG and PG enablement */
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ }
+ adev->external_rev_id = adev->rev_id + 0x20;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -683,6 +737,8 @@ static int soc21_common_set_clockgating_state(void *handle,
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ case IP_VERSION(7, 7, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index bf7524f16b66..a0d19b768346 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -452,41 +452,47 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset,
- uint32_t ch_inst,
- uint32_t umc_inst)
+ uint32_t umc_reg_offset, uint32_t ch_inst,
+ uint32_t umc_inst, uint64_t mca_addr)
{
uint32_t mc_umc_status_addr;
uint32_t channel_index;
- uint64_t mc_umc_status, mc_umc_addrt0;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0;
uint64_t err_addr, soc_pa, retired_page, column;
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- mc_umc_addrt0 =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+ if (mca_addr == UMC_INVALID_ADDR) {
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
- if (mc_umc_status == 0)
- return;
+ if (mc_umc_status == 0)
+ return;
- if (!err_data->err_addr) {
- /* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
}
channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
/* calculate error address if ue/ce error is detected */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-
- err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) ||
+ mca_addr != UMC_INVALID_ADDR) {
+ if (mca_addr == UMC_INVALID_ADDR) {
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ } else {
+ err_addr = mca_addr;
+ }
/* translate umc channel address to soc pa, 3 parts are included */
soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
@@ -501,7 +507,8 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
/* we only save ue error information currently, ce is skipped */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
+ == 1 ||
+ mca_addr != UMC_INVALID_ADDR) {
/* loop for all possibilities of [C4 C3 C2] */
for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
@@ -519,7 +526,8 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
}
/* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ if (mca_addr == UMC_INVALID_ADDR)
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -540,9 +548,8 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
ch_inst);
umc_v6_7_query_error_address(adev,
err_data,
- umc_reg_offset,
- ch_inst,
- umc_inst);
+ umc_reg_offset, ch_inst,
+ umc_inst, UMC_INVALID_ADDR);
}
}
@@ -583,4 +590,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
+ .convert_ras_error_address = umc_v6_7_query_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index 36a2053f2e8b..a8cbda81828d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -101,22 +101,16 @@ static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
{
- uint32_t ecc_err_cnt, ecc_err_cnt_addr;
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
/* UMC 8_10 registers */
- ecc_err_cnt_addr =
- SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
- *error_count +=
- (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
- UMC_V8_10_CE_CNT_INIT);
-
- /* Check for SRAM correctable error, MCUMC_STATUS is a 64 bit register */
+ /* Rely on MCUMC_STATUS for correctable error counter
+ * MCUMC_STATUS is a 64 bit register
+ */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 39405f0db824..9c8b5fd99037 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1761,21 +1761,23 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1846,7 +1848,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v3_0_limit_sched(p);
+ r = vcn_v3_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1860,7 +1862,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
uint32_t msg_lo = 0, msg_hi = 0;
unsigned i;
int r;
@@ -1879,7 +1881,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
msg_hi = val;
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
val == 0) {
- r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ r = vcn_v3_0_dec_msg(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index ca14c3ef742e..897a5ce9c9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -30,6 +30,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "mmsch_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@@ -45,6 +46,8 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN_HARVEST_MMSCH 0
+
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
@@ -53,12 +56,14 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
};
+static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
+static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v4_0_early_init - set function pointers
@@ -71,6 +76,9 @@ static int vcn_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
+
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -92,6 +100,7 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ int vcn_doorbell_index = 0;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -103,6 +112,12 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_sriov_vf(adev)) {
+ vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
+ /* get DWORD offset */
+ vcn_doorbell_index = vcn_doorbell_index << 1;
+ }
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -119,7 +134,10 @@ static int vcn_v4_0_sw_init(void *handle)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
+ if (amdgpu_sriov_vf(adev))
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
sprintf(ring->name, "vcn_unified_%d", i);
@@ -132,10 +150,23 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
+ if (amdgpu_sriov_vf(adev))
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
@@ -169,6 +200,9 @@ static int vcn_v4_0_sw_fini(void *handle)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -191,18 +225,42 @@ static int vcn_v4_0_hw_init(void *handle)
struct amdgpu_ring *ring;
int i, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v4_0_start_sriov(adev);
+ if (r)
+ goto done;
- ring = &adev->vcn.inst[i].ring_enc[0];
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
+ ring->sched.ready = false;
+ ring->no_scheduler = true;
+ dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
+ } else {
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ } else {
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+
+ }
}
done:
@@ -230,12 +288,14 @@ static int vcn_v4_0_hw_fini(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
}
+
}
return 0;
@@ -1107,6 +1167,214 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
+{
+ int i;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v4_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v4_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v4_0_cmd_end end = { {0} };
+ struct mmsch_v4_0_init_header header;
+
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
+ for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
+ header.inst[i].init_status = 0;
+ header.inst[i].table_offset = 0;
+ header.inst[i].table_size = 0;
+ }
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ table_size = 0;
+
+ MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+ offset = 0;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ } else {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[i].gpu_addr + offset;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[i].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+
+ /* add end packet */
+ MMSCH_V4_0_INSERT_END();
+
+ /* refine header */
+ header.inst[i].init_status = 0;
+ header.inst[i].table_offset = header.total_size;
+ header.inst[i].table_size = table_size;
+ header.total_size += table_size;
+ }
+
+ /* Update init table header in memory */
+ size = sizeof(struct mmsch_v4_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ /* message MMSCH (in VCN[0]) to initialize this client
+ * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ /* 2, update vmid of descriptor */
+ tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ size = header.total_size;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ /* 5, kick off the initialization and wait until
+ * MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ param = 0x00000001;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+
+ return 0;
+}
+
/**
* vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
*
@@ -1115,7 +1383,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
@@ -1133,7 +1401,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
- return 0;
}
/**
@@ -1154,7 +1421,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_stop_dpg_mode(adev, i);
+ vcn_v4_0_stop_dpg_mode(adev, i);
continue;
}
@@ -1328,21 +1595,23 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1413,7 +1682,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v4_0_limit_sched(p);
+ r = vcn_v4_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1426,32 +1695,34 @@ out:
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
- struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ struct amdgpu_vcn_decode_buffer *decode_buffer;
+ uint64_t addr;
uint32_t val;
- int r = 0;
/* The first instance can decode anything */
if (!ring->me)
- return r;
+ return 0;
/* unified queue ib header has 8 double words. */
if (ib->length_dw < 8)
- return r;
+ return 0;
val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
+ if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
+ return 0;
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
- if (decode_buffer->valid_buf_flag & 0x1)
- r = vcn_v4_0_dec_msg(p, ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo);
- }
- return r;
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
}
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
@@ -1597,6 +1868,15 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if(state == adev->vcn.cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cdd599a08125..1e83db0c5438 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -334,9 +338,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -409,6 +415,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 3b4eb8285943..59dfca093155 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -385,9 +389,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -461,6 +467,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 60a81649cf12..c7118843db05 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -742,7 +742,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf88fffe, 0x877aff7f,
0x04000000, 0x8f7a857a,
0x886d7a6d, 0xb97b02dc,
- 0x8f7b997b, 0xb97a2a05,
+ 0x8f7b997b, 0xb97a3a05,
0x807a817a, 0xbf0d997b,
0xbf850002, 0x8f7a897a,
0xbf820001, 0x8f7a8a7a,
@@ -819,7 +819,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbefe037c, 0xbefc0370,
0xf4611c7a, 0xf8000000,
0x80708470, 0xbefc037e,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -1069,7 +1069,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9f9f816, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2114,7 +2114,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x007a0000, 0x7e000280,
0xbefe037a, 0xbeff037b,
0xb97b02dc, 0x8f7b997b,
- 0xb97a2a05, 0x807a817a,
+ 0xb97a3a05, 0x807a817a,
0xbf0d997b, 0xbf850002,
0x8f7a897a, 0xbf820001,
0x8f7a8a7a, 0xb97b1e06,
@@ -2157,7 +2157,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x01000000, 0xe0704100,
0x705d0100, 0xe0704200,
0x705d0200, 0xe0704300,
- 0x705d0300, 0xb9702a05,
+ 0x705d0300, 0xb9703a05,
0x80708170, 0xbf0d9973,
0xbf850002, 0x8f708970,
0xbf820001, 0x8f708a70,
@@ -2189,7 +2189,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbefe03ff, 0x0000ffff,
0xbeff0380, 0xe0704000,
0x705d0200, 0xbefe03c1,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -2475,7 +2475,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb9ef4803, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2494,438 +2494,441 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
-
static const uint32_t cwsr_trap_gfx11_hex[] = {
- 0xbfa00001, 0xbfa0021b,
+ 0xbfa00001, 0xbfa0021e,
0xb0804006, 0xb8f8f802,
- 0x91788678, 0xb8fbf803,
- 0x8b6eff78, 0x00002000,
- 0xbfa10009, 0x8b6eff6d,
- 0x00ff0000, 0xbfa2001e,
- 0x8b6eff7b, 0x00000400,
- 0xbfa20041, 0xbf830010,
- 0xb8fbf803, 0xbfa0fffa,
- 0x8b6eff7b, 0x00000900,
- 0xbfa20015, 0x8b6eff7b,
- 0x000071ff, 0xbfa10008,
- 0x8b6fff7b, 0x00007080,
- 0xbfa10001, 0xbeee1287,
- 0xb8eff801, 0x846e8c6e,
- 0x8b6e6f6e, 0xbfa2000a,
+ 0x9178ff78, 0x00020006,
+ 0xb8fbf803, 0xbf0d9f6d,
+ 0xbfa20006, 0x8b6eff78,
+ 0x00002000, 0xbfa10009,
0x8b6eff6d, 0x00ff0000,
- 0xbfa20007, 0xb8eef801,
- 0x8b6eff6e, 0x00000800,
- 0xbfa20003, 0x8b6eff7b,
- 0x00000400, 0xbfa20026,
- 0xbefa4d82, 0xbf89fc07,
- 0x84fa887a, 0xf4005bbd,
- 0xf8000010, 0xbf89fc07,
- 0x846e976e, 0x9177ff77,
- 0x00800000, 0x8c776e77,
- 0xf4045bbd, 0xf8000000,
- 0xbf89fc07, 0xf4045ebd,
- 0xf8000008, 0xbf89fc07,
- 0x8bee6e6e, 0xbfa10001,
- 0xbe80486e, 0x8b6eff6d,
- 0x01ff0000, 0xbfa20005,
- 0x8c78ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbfa00005, 0x8b6eff6d,
- 0x01000000, 0xbfa20002,
- 0x806c846c, 0x826d806d,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
- 0xb978f802, 0xbe804a6c,
- 0x8b6dff6d, 0x0000ffff,
- 0xbefa0080, 0xb97a0283,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbefe4d84,
- 0xbf89fc07, 0x8b7aff7f,
- 0x04000000, 0x847a857a,
- 0x8c6d7a6d, 0xbefa007e,
- 0x8b7bff7f, 0x0000ffff,
- 0xbefe00c1, 0xbeff00c1,
- 0xdca6c000, 0x007a0000,
- 0x7e000280, 0xbefe007a,
- 0xbeff007b, 0xb8fb02dc,
- 0x847b997b, 0xb8fa3b05,
- 0x807a817a, 0xbf0d997b,
- 0xbfa20002, 0x847a897a,
- 0xbfa00001, 0x847a8a7a,
- 0xb8fb1e06, 0x847b8a7b,
- 0x807a7b7a, 0x8b7bff7f,
- 0x0000ffff, 0x807aff7a,
- 0x00000200, 0x807a7e7a,
- 0x827b807b, 0xd7610000,
- 0x00010870, 0xd7610000,
- 0x00010a71, 0xd7610000,
- 0x00010c72, 0xd7610000,
- 0x00010e73, 0xd7610000,
- 0x00011074, 0xd7610000,
- 0x00011275, 0xd7610000,
- 0x00011476, 0xd7610000,
- 0x00011677, 0xd7610000,
- 0x00011a79, 0xd7610000,
- 0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
- 0x00003fff, 0xbeff0080,
- 0xdca6c040, 0x007a0000,
- 0xd760007a, 0x00011d00,
- 0xd760007b, 0x00011f00,
+ 0xbfa2001e, 0x8b6eff7b,
+ 0x00000400, 0xbfa20041,
+ 0xbf830010, 0xb8fbf803,
+ 0xbfa0fffa, 0x8b6eff7b,
+ 0x00000900, 0xbfa20015,
+ 0x8b6eff7b, 0x000071ff,
+ 0xbfa10008, 0x8b6fff7b,
+ 0x00007080, 0xbfa10001,
+ 0xbeee1287, 0xb8eff801,
+ 0x846e8c6e, 0x8b6e6f6e,
+ 0xbfa2000a, 0x8b6eff6d,
+ 0x00ff0000, 0xbfa20007,
+ 0xb8eef801, 0x8b6eff6e,
+ 0x00000800, 0xbfa20003,
+ 0x8b6eff7b, 0x00000400,
+ 0xbfa20026, 0xbefa4d82,
+ 0xbf89fc07, 0x84fa887a,
+ 0xf4005bbd, 0xf8000010,
+ 0xbf89fc07, 0x846e976e,
+ 0x9177ff77, 0x00800000,
+ 0x8c776e77, 0xf4045bbd,
+ 0xf8000000, 0xbf89fc07,
+ 0xf4045ebd, 0xf8000008,
+ 0xbf89fc07, 0x8bee6e6e,
+ 0xbfa10001, 0xbe80486e,
+ 0x8b6eff6d, 0x01ff0000,
+ 0xbfa20005, 0x8c78ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbfa00005,
+ 0x8b6eff6d, 0x01000000,
+ 0xbfa20002, 0x806c846c,
+ 0x826d806d, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb978f802,
+ 0xbe804a6c, 0x8b6dff6d,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbefe4d84, 0xbf89fc07,
+ 0x8b7aff7f, 0x04000000,
+ 0x847a857a, 0x8c6d7a6d,
+ 0xbefa007e, 0x8b7bff7f,
+ 0x0000ffff, 0xbefe00c1,
+ 0xbeff00c1, 0xdca6c000,
+ 0x007a0000, 0x7e000280,
0xbefe007a, 0xbeff007b,
- 0xbef4007e, 0x8b75ff7f,
- 0x0000ffff, 0x8c75ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x10807fac,
- 0xbef1007d, 0xbef00080,
- 0xb8f302dc, 0x84739973,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00002, 0xbeff00c1,
- 0xbfa00009, 0xbef600ff,
- 0x01000000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
- 0x701d0300, 0xbfa00008,
+ 0xb8fb02dc, 0x847b997b,
+ 0xb8fa3b05, 0x807a817a,
+ 0xbf0d997b, 0xbfa20002,
+ 0x847a897a, 0xbfa00001,
+ 0x847a8a7a, 0xb8fb1e06,
+ 0x847b8a7b, 0x807a7b7a,
+ 0x8b7bff7f, 0x0000ffff,
+ 0x807aff7a, 0x00000200,
+ 0x807a7e7a, 0x827b807b,
+ 0xd7610000, 0x00010870,
+ 0xd7610000, 0x00010a71,
+ 0xd7610000, 0x00010c72,
+ 0xd7610000, 0x00010e73,
+ 0xd7610000, 0x00011074,
+ 0xd7610000, 0x00011275,
+ 0xd7610000, 0x00011476,
+ 0xd7610000, 0x00011677,
+ 0xd7610000, 0x00011a79,
+ 0xd7610000, 0x00011c7e,
+ 0xd7610000, 0x00011e7f,
+ 0xbefe00ff, 0x00003fff,
+ 0xbeff0080, 0xdca6c040,
+ 0x007a0000, 0xd760007a,
+ 0x00011d00, 0xd760007b,
+ 0x00011f00, 0xbefe007a,
+ 0xbeff007b, 0xbef4007e,
+ 0x8b75ff7f, 0x0000ffff,
+ 0x8c75ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x10807fac, 0xbef1007d,
+ 0xbef00080, 0xb8f302dc,
+ 0x84739973, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00002,
+ 0xbeff00c1, 0xbfa00009,
0xbef600ff, 0x01000000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0xbfa00008, 0xbef600ff,
+ 0x01000000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
+ 0x701d0300, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0xbef600ff,
+ 0x01000000, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0xbefd0080, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xd7610002, 0x0000fa6c,
+ 0x807d817d, 0x917aff6d,
+ 0x80000000, 0xd7610002,
+ 0x0000fa7a, 0x807d817d,
+ 0xd7610002, 0x0000fa6e,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6f, 0x807d817d,
+ 0xd7610002, 0x0000fa78,
+ 0x807d817d, 0xb8faf803,
+ 0xd7610002, 0x0000fa7a,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa7b, 0x807d817d,
+ 0xb8f1f801, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f814, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f815, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xbefe00ff, 0x0000ffff,
+ 0xbeff0080, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
0xb8f03b05, 0x80708170,
0xbf0d9973, 0xbfa20002,
0x84708970, 0xbfa00001,
0x84708a70, 0xb8fa1e06,
0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
0xbef600ff, 0x01000000,
- 0x7e000280, 0x7e020280,
- 0x7e040280, 0xbefd0080,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xd7610002,
- 0x0000fa6c, 0x807d817d,
- 0x917aff6d, 0x80000000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa6e, 0x807d817d,
- 0xd7610002, 0x0000fa6f,
- 0x807d817d, 0xd7610002,
- 0x0000fa78, 0x807d817d,
- 0xb8faf803, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa7b,
- 0x807d817d, 0xb8f1f801,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f814,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f815,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xbefe00ff,
- 0x0000ffff, 0xbeff0080,
- 0xe0685000, 0x701d0200,
- 0xbefe00c1, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0xbef600ff,
- 0x01000000, 0xbef90080,
- 0xbefd0080, 0xbf800000,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xbe8c410c, 0xbe8e410e,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0xbef90080, 0xbefd0080,
+ 0xbf800000, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xbe8c410c,
+ 0xbe8e410e, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xd7610002, 0x0000f20c,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20d, 0x80798179,
- 0xd7610002, 0x0000f20e,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
- 0x0000f20f, 0x80798179,
- 0xbf06a079, 0xbfa10006,
- 0xe0685000, 0x701d0200,
- 0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbc,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
+ 0x0000f20c, 0x80798179,
+ 0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
+ 0x0000f20e, 0x80798179,
+ 0xd7610002, 0x0000f20f,
+ 0x80798179, 0xbf06a079,
+ 0xbfa10006, 0xe0685000,
+ 0x701d0200, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ffbc, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xe0685000, 0x701d0200,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
+ 0x80798179, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x8b7bc17b, 0xbfa10044,
+ 0xbfbd0000, 0x8b7aff6d,
+ 0x80000000, 0xbfa10040,
+ 0x847b867b, 0x847b827b,
+ 0xbef6007b, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xd71f0000,
+ 0x000100c1, 0xd7200000,
+ 0x000200c1, 0x16000084,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbefd0080,
+ 0xbfa20012, 0xbe8300ff,
+ 0x00000080, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7d, 0xbfa2fff4,
+ 0xbfa00011, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000100,
+ 0xbf0a7b7d, 0xbfa2fff4,
0xbefe00c1, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00001, 0xbeff00c1,
- 0xb8fb4306, 0x8b7bc17b,
- 0xbfa10044, 0xbfbd0000,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10040, 0x847b867b,
- 0x847b827b, 0xbef6007b,
- 0xb8f03b05, 0x80708170,
- 0xbf0d9973, 0xbfa20002,
- 0x84708970, 0xbfa00001,
- 0x84708a70, 0xb8fa1e06,
- 0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xd71f0000, 0x000100c1,
- 0xd7200000, 0x000200c1,
- 0x16000084, 0x857d9973,
+ 0xbfa20004, 0xbef000ff,
+ 0x00000200, 0xbeff0080,
+ 0xbfa00003, 0xbef000ff,
+ 0x00000400, 0xbeff00c1,
+ 0xb8fb3b05, 0x807b817b,
+ 0x847b827b, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbefd0080, 0xbfa20012,
- 0xbe8300ff, 0x00000080,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbfa00011,
- 0xbe8300ff, 0x00000100,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000100, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20004,
- 0xbef000ff, 0x00000200,
- 0xbeff0080, 0xbfa00003,
- 0xbef000ff, 0x00000400,
- 0xbeff00c1, 0xb8fb3b05,
- 0x807b817b, 0x847b827b,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20017,
+ 0xbfa20017, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10037,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0685000, 0x701d0000,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0x807d847d, 0x8070ff70,
+ 0x00000200, 0xbf0a7b7d,
+ 0xbfa2ffef, 0xbfa00025,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10037, 0x7e008700,
+ 0xbfa10011, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0685000,
- 0x701d0000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
+ 0x701d0000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
0x701d0300, 0x807d847d,
- 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000400,
0xbf0a7b7d, 0xbfa2ffef,
- 0xbfa00025, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10011,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
+ 0xb8fb1e06, 0x8b7bc17b,
+ 0xbfa1000c, 0x847b837b,
+ 0x807b7d7b, 0xbefe00c1,
+ 0xbeff0080, 0x7e008700,
0xe0685000, 0x701d0000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
- 0x807d847d, 0x8070ff70,
- 0x00000400, 0xbf0a7b7d,
- 0xbfa2ffef, 0xb8fb1e06,
- 0x8b7bc17b, 0xbfa1000c,
- 0x847b837b, 0x807b7d7b,
- 0xbefe00c1, 0xbeff0080,
- 0x7e008700, 0xe0685000,
- 0x701d0000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff8,
- 0xbfa00141, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xb8f202dc,
- 0x84729972, 0x8b6eff7f,
- 0x04000000, 0xbfa1003a,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff8, 0xbfa00146,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xb8f202dc, 0x84729972,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1003a, 0xbefe00c1,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x8b6fc16f, 0xbfa1002f,
+ 0x846f866f, 0x846f826f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000c,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbfa0000b,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbef80080,
0xbefe00c1, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
- 0xb8ef4306, 0x8b6fc16f,
- 0xbfa1002f, 0x846f866f,
- 0x846f826f, 0xbef6006f,
- 0xb8f83b05, 0x80788178,
- 0xbf0d9972, 0xbfa20002,
- 0x84788978, 0xbfa00001,
- 0x84788a78, 0xb8ee1e06,
- 0x846e8a6e, 0x80786e78,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20024, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000c, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbfa0000b, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20024,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10050,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10050, 0xe0505000,
+ 0x781d0000, 0xe0505080,
+ 0x781d0100, 0xe0505100,
+ 0x781d0200, 0xe0505180,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xe0505000,
+ 0x6e1d0000, 0xe0505080,
+ 0x6e1d0100, 0xe0505100,
+ 0x6e1d0200, 0xe0505180,
+ 0x6e1d0300, 0xbf8903f7,
+ 0xbfa00034, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10012, 0xe0505000,
+ 0x781d0000, 0xe0505100,
+ 0x781d0100, 0xe0505200,
+ 0x781d0200, 0xe0505300,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000e,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
0xe0505000, 0x781d0000,
- 0xe0505080, 0x781d0100,
- 0xe0505100, 0x781d0200,
- 0xe0505180, 0x781d0300,
0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffee,
+ 0x807d817d, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff7, 0xbeff00c1,
0xe0505000, 0x6e1d0000,
- 0xe0505080, 0x6e1d0100,
- 0xe0505100, 0x6e1d0200,
- 0xe0505180, 0x6e1d0300,
- 0xbf8903f7, 0xbfa00034,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10012,
- 0xe0505000, 0x781d0000,
- 0xe0505100, 0x781d0100,
- 0xe0505200, 0x781d0200,
- 0xe0505300, 0x781d0300,
- 0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffee,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000e, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xe0505000,
- 0x781d0000, 0xbf8903f7,
- 0x7e008500, 0x807d817d,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff7,
- 0xbeff00c1, 0xe0505000,
- 0x6e1d0000, 0xe0505100,
- 0x6e1d0100, 0xe0505200,
- 0x6e1d0200, 0xe0505300,
- 0x6e1d0300, 0xbf8903f7,
+ 0xe0505100, 0x6e1d0100,
+ 0xe0505200, 0x6e1d0200,
+ 0xe0505300, 0x6e1d0300,
+ 0xbf8903f7, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef600ff,
+ 0x01000000, 0xbefd00ff,
+ 0x0000006c, 0x80f89078,
+ 0xf428403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd847d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0x80f8a078,
+ 0xf42c403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd887d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0x80f8c078,
+ 0xf430403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd907d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0xbe884308,
+ 0xbe8a430a, 0xbe8c430c,
+ 0xbe8e430e, 0xbf06807d,
+ 0xbfa1fff0, 0xb980f801,
+ 0x00000000, 0xbfbd0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbefd00ff, 0x0000006c,
- 0x80f89078, 0xf428403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd847d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0x80f8a078, 0xf42c403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd887d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0x80f8c078, 0xf430403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd907d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0xbe884308, 0xbe8a430a,
- 0xbe8c430c, 0xbe8e430e,
- 0xbf06807d, 0xbfa1fff0,
- 0xb980f801, 0x00000000,
- 0xbfbd0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xf4205bfa,
+ 0xf4205bfa, 0xf0000000,
+ 0x80788478, 0xf4205b3a,
0xf0000000, 0x80788478,
- 0xf4205b3a, 0xf0000000,
- 0x80788478, 0xf4205b7a,
+ 0xf4205b7a, 0xf0000000,
+ 0x80788478, 0xf4205c3a,
0xf0000000, 0x80788478,
- 0xf4205c3a, 0xf0000000,
- 0x80788478, 0xf4205c7a,
+ 0xf4205c7a, 0xf0000000,
+ 0x80788478, 0xf4205eba,
0xf0000000, 0x80788478,
- 0xf4205eba, 0xf0000000,
- 0x80788478, 0xf4205efa,
+ 0xf4205efa, 0xf0000000,
+ 0x80788478, 0xf4205e7a,
0xf0000000, 0x80788478,
- 0xf4205e7a, 0xf0000000,
- 0x80788478, 0xf4205cfa,
+ 0xf4205cfa, 0xf0000000,
+ 0x80788478, 0xf4205bba,
0xf0000000, 0x80788478,
+ 0xbf89fc07, 0xb96ef814,
0xf4205bba, 0xf0000000,
0x80788478, 0xbf89fc07,
- 0xb96ef814, 0xf4205bba,
- 0xf0000000, 0x80788478,
- 0xbf89fc07, 0xb96ef815,
- 0xbefd006f, 0xbefe0070,
- 0xbeff0071, 0x8b6f7bff,
- 0x000003ff, 0xb96f4803,
- 0x8b6f7bff, 0xfffff800,
- 0x856f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee3b05,
- 0x806e816e, 0xbf0d9972,
- 0xbfa20002, 0x846e896e,
- 0xbfa00001, 0x846e8a6e,
- 0xb8ef1e06, 0x846f8a6f,
- 0x806e6f6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x8b6fff6f,
- 0x0000ffff, 0xf4085c37,
- 0xf8000050, 0xf4085d37,
- 0xf8000060, 0xf4005e77,
- 0xf8000074, 0xbf89fc07,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb96ef815, 0xbefd006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x8b6f7bff, 0x000003ff,
+ 0xb96f4803, 0x8b6f7bff,
+ 0xfffff800, 0x856f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee3b05, 0x806e816e,
+ 0xbf0d9972, 0xbfa20002,
+ 0x846e896e, 0xbfa00001,
+ 0x846e8a6e, 0xb8ef1e06,
+ 0x846f8a6f, 0x806e6f6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x8b6fff6f, 0x0000ffff,
+ 0xf4085c37, 0xf8000050,
+ 0xf4085d37, 0xf8000060,
+ 0xf4005e77, 0xf8000074,
+ 0xbf89fc07, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb8eef802,
+ 0xbf0d866e, 0xbfa20002,
+ 0xb97af802, 0xbe80486c,
0xb97af802, 0xbe804a6c,
0xbfb00000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 250ab007399b..0f81670f6f9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -43,12 +43,14 @@
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
+#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
+var SQ_WAVE_STATUS_TRAP_EN_SHIFT = 6
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -183,6 +185,13 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+#if SW_SA_TRAP
+ // If ttmp1[31] is set then trap may occur early.
+ // Spin wait until SAVECTX exception is raised.
+ s_bitcmp1_b32 s_save_pc_hi, 31
+ s_cbranch_scc1 L_CHECK_SAVE
+#endif
+
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
s_cbranch_scc0 L_NOT_HALTED
@@ -1061,8 +1070,20 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+#if SW_SA_TRAP
+ // If traps are enabled then return to the shader with PRIV=0.
+ // Otherwise retain PRIV=1 for subsequent context save requests.
+ s_getreg_b32 s_restore_tmp, hwreg(HW_REG_STATUS)
+ s_bitcmp1_b32 s_restore_tmp, SQ_WAVE_STATUS_TRAP_EN_SHIFT
+ s_cbranch_scc1 L_RETURN_WITHOUT_PRIV
+
s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ s_setpc_b64 [s_restore_pc_lo, s_restore_pc_hi]
+L_RETURN_WITHOUT_PRIV:
+#endif
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 2b3d8bc8f0aa..5feaba6a77de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -327,6 +327,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+ err = -ENOMEM;
+ goto err_alloc_doorbells;
+ }
+
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
@@ -404,6 +410,7 @@ err_create_queue:
if (wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
err_wptr_map_gart:
+err_alloc_doorbells:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
@@ -869,14 +876,11 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_wait_events_args *args = data;
- int err;
- err = kfd_wait_on_events(p, args->num_events,
+ return kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &args->wait_result);
-
- return err;
+ &args->timeout, &args->wait_result);
}
static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
struct kfd_process *p, void *data)
@@ -1092,6 +1096,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
offset = kfd_get_process_doorbells(pdd);
+ if (!offset) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
if (args->size != PAGE_SIZE) {
err = -EINVAL;
@@ -1576,6 +1584,8 @@ static int kfd_ioctl_smi_events(struct file *filep,
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
}
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
static int kfd_ioctl_set_xnack_mode(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1586,22 +1596,29 @@ static int kfd_ioctl_set_xnack_mode(struct file *filep,
if (args->xnack_enabled >= 0) {
if (!list_empty(&p->pqm.queues)) {
pr_debug("Process has user queues running\n");
- mutex_unlock(&p->mutex);
- return -EBUSY;
+ r = -EBUSY;
+ goto out_unlock;
}
- if (args->xnack_enabled && !kfd_process_xnack_mode(p, true))
+
+ if (p->xnack_enabled == args->xnack_enabled)
+ goto out_unlock;
+
+ if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
r = -EPERM;
- else
- p->xnack_enabled = args->xnack_enabled;
+ goto out_unlock;
+ }
+
+ r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
} else {
args->xnack_enabled = p->xnack_enabled;
}
+
+out_unlock:
mutex_unlock(&p->mutex);
return r;
}
-#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_svm_args *args = data;
@@ -1621,6 +1638,11 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
return r;
}
#else
+static int kfd_ioctl_set_xnack_mode(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ return -EPERM;
+}
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
return -EPERM;
@@ -2145,6 +2167,12 @@ static int criu_restore_devices(struct kfd_process *p,
ret = PTR_ERR(pdd);
goto exit;
}
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
}
/*
@@ -2173,6 +2201,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
+ if (!offset)
+ return -ENOMEM;
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
/* MMIO BOs need remapped bus address */
if (bo_bucket->size != PAGE_SIZE) {
@@ -2847,7 +2877,6 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
- int ret;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2867,12 +2896,11 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
process->pasid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
- ret = io_remap_pfn_range(vma,
+ return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
PAGE_SIZE,
vma->vm_page_prot);
- return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index a5409531a2fd..cd5f8b219bf9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1522,6 +1522,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
pcache_info = cache_info;
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev, pcache_info);
@@ -2283,7 +2284,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
/* Fill in Subtype: IO_LINKS
* Only direct links are added here which is Link from GPU to
- * to its NUMA node. Indirect links are added by userspace.
+ * its NUMA node. Indirect links are added by userspace.
*/
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
cache_mem_filled);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f5853835f03a..65a1d4f9004b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -91,6 +91,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
kfd->device_info.num_sdma_queues_per_engine = 8;
break;
default:
@@ -102,13 +103,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
- case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
break;
+ case IP_VERSION(6, 0, 1):
+ /* Reserve 1 for paging and 1 for gfx */
+ kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+ /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+ kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ break;
default:
break;
}
@@ -145,6 +152,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
default:
@@ -377,12 +385,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
- gfx_target_version = 100306;
- if (!vf)
- f2g = &gfx_v10_3_kfd2kgd;
- break;
case IP_VERSION(10, 3, 7):
- gfx_target_version = 100307;
+ gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
@@ -398,6 +402,11 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110002;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 0, 3):
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e83725a28106..ecb4c3abc629 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.is_kfd_process = 1;
+ queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
+ queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
@@ -1240,6 +1242,24 @@ static void init_interrupts(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
}
+static void init_sdma_bitmaps(struct device_queue_manager *dqm)
+{
+ unsigned int num_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
+ get_num_sdma_queues(dqm));
+ unsigned int num_xgmi_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
+ get_num_xgmi_sdma_queues(dqm));
+
+ if (num_sdma_queues)
+ dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
+ if (num_xgmi_sdma_queues)
+ dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
+
+ dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
+ pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
+}
+
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
int pipe, queue;
@@ -1268,11 +1288,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
- dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
+ init_sdma_bitmaps(dqm);
return 0;
}
@@ -1450,9 +1466,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
- uint64_t num_sdma_queues;
- uint64_t num_xgmi_sdma_queues;
-
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
@@ -1461,24 +1474,10 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
-
- num_sdma_queues = get_num_sdma_queues(dqm);
- if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
- dqm->sdma_bitmap = ULLONG_MAX;
- else
- dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
-
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
- if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
- dqm->xgmi_sdma_bitmap = ULLONG_MAX;
- else
- dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
-
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ init_sdma_bitmaps(dqm);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cb3d2ccc5100..cd4e61bf0493 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -157,6 +157,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(pdd);
+ if (!address)
+ return -ENOMEM;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
@@ -275,6 +277,13 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
+ if (!pdd->doorbell_index) {
+ int r = kfd_alloc_process_doorbells(pdd->dev,
+ &pdd->doorbell_index);
+ if (r)
+ return 0;
+ }
+
return pdd->dev->doorbell_base +
pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
}
@@ -294,6 +303,9 @@ int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_inde
if (r > 0)
*doorbell_index = r;
+ if (r < 0)
+ pr_err("Failed to allocate process doorbells\n");
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3942a56c28bb..83e3ce9f6049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
return msecs_to_jiffies(user_timeout_ms) + 1;
}
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+ bool undo_auto_reset)
{
uint32_t i;
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ if (undo_auto_reset && waiters[i].activated &&
+ waiters[i].event && waiters[i].event->auto_reset)
+ set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
- long timeout = user_timeout_to_jiffies(user_timeout_ms);
+ long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
}
if (signal_pending(current)) {
- /*
- * This is wrong when a nonzero, non-infinite timeout
- * is specified. We need to use
- * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
- * contains a union with data for each user and it's
- * in generic kernel code that I don't want to
- * touch yet.
- */
ret = -ERESTARTSYS;
+ if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+ *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+ *user_timeout_ms = jiffies_to_msecs(
+ max(0l, timeout-1));
break;
}
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
event_waiters, events);
out_unlock:
- free_waiters(num_events, event_waiters);
+ free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index a6fcbeeb7428..0d53f6067422 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
- NULL, 0))*/)
- return;
+ NULL, 0)))
+ return;*/
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b059a77b6081..c70c026c9a93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -322,12 +322,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
for (i = j = 0; i < npages; i++) {
struct page *spage;
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
@@ -522,9 +523,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
prange->start, prange->last, best_loc);
- /* FIXME: workaround for page locking bug with invalid pages */
- svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
-
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
@@ -886,7 +884,7 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long addr = vmf->address;
- struct vm_area_struct *vma;
+ struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
@@ -894,29 +892,42 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
struct mm_struct *mm;
int r = 0;
- vma = vmf->vma;
- mm = vma->vm_mm;
+ svm_bo = vmf->page->zone_device_data;
+ if (!svm_bo) {
+ pr_debug("failed get device page at addr 0x%lx\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+ if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+
+ mm = svm_bo->eviction_fence->mm;
+ if (mm != vmf->vma->vm_mm)
+ pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
- p = kfd_lookup_process_by_mm(vma->vm_mm);
+ p = kfd_lookup_process_by_mm(mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
- return VM_FAULT_SIGBUS;
+ r = VM_FAULT_SIGBUS;
+ goto out_mmput;
}
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
- kfd_unref_process(p);
- return 0;
+ r = 0;
+ goto out_unref_process;
}
- addr >>= PAGE_SHIFT;
+
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
+ addr >>= PAGE_SHIFT;
mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
- pr_debug("cannot find svm range at 0x%lx\n", addr);
+ pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
- goto out;
+ goto out_unlock_svms;
}
mutex_lock(&parent->migrate_mutex);
@@ -938,10 +949,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
goto out_unlock_prange;
}
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
if (r)
- pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
- prange, prange->start, prange->last);
+ pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
+ r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
@@ -955,9 +967,12 @@ out_unlock_prange:
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
-out:
+out_unlock_svms:
mutex_unlock(&p->svms.lock);
+out_unref_process:
kfd_unref_process(p);
+out_mmput:
+ mmput(mm);
pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index b8e14c2cc295..26b53b6d673e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -177,14 +181,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
-static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- struct queue_properties *p, struct mm_struct *mms)
-{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
- queue_id, p->doorbell_off);
-}
-
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
@@ -256,31 +252,6 @@ static uint32_t read_doorbell_id(void *mqd)
return m->queue_doorbell_id0;
}
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->adev, mqd, type, timeout,
- pipe_id, queue_id);
-}
-
-static void free_mqd(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->adev, queue_address,
- pipe_id, queue_id);
-}
-
static int get_wave_state(struct mqd_manager *mm, void *mqd,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
@@ -349,15 +320,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm->update_mqd(mm, m, q, NULL);
}
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- struct queue_properties *p, struct mm_struct *mms)
-{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
- (uint32_t __user *)p->write_ptr,
- mms);
-}
-
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -389,25 +351,6 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-/*
- * * preempt type here is ignored because there is only one way
- * * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
-}
-
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
-}
-
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
@@ -445,11 +388,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
- mqd->free_mqd = free_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
#if defined(CONFIG_DEBUG_FS)
@@ -462,10 +405,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -476,11 +419,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
- mqd->free_mqd = free_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -491,10 +434,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd_sdma;
+ mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
- mqd->destroy_mqd = destroy_mqd_sdma;
- mqd->is_occupied = is_occupied_sdma;
+ mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+ mqd->is_occupied = kfd_is_occupied_sdma;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d03a3b9c9c5d..bf610e3b683b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 6c83a519b3a1..951b63677248 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1499,11 +1499,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
- if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
- pr_err("Failed to alloc doorbell for pdd\n");
- goto err_free_pdd;
- }
-
if (init_doorbell_bitmap(&pdd->qpd, dev)) {
pr_err("Failed to init doorbell for process\n");
goto err_free_pdd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 6e3e7f54381b..5137476ec18e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -857,6 +857,13 @@ int kfd_criu_restore_queue(struct kfd_process *p,
ret = -EINVAL;
goto exit;
}
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
/* data stored in this order: mqd, ctl_stack */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a67ba8879a56..f5913ba22174 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -278,7 +278,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
svm_range_free_dma_mappings(prange);
if (update_mem_usage && !p->xnack_enabled) {
- pr_debug("unreserve mem limit: %lld\n", size);
+ pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
}
@@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
- svm_bo->svms = prange->svms;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -2957,6 +2956,64 @@ out:
return r;
}
+int
+svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
+{
+ struct svm_range *prange, *pchild;
+ uint64_t reserved_size = 0;
+ uint64_t size;
+ int r = 0;
+
+ pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
+
+ mutex_lock(&p->svms.lock);
+
+ list_for_each_entry(prange, &p->svms.list, list) {
+ svm_range_lock(prange);
+ list_for_each_entry(pchild, &prange->child_list, child_list) {
+ size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+ }
+
+ size = (prange->last - prange->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+out_unlock:
+ svm_range_unlock(prange);
+ if (r)
+ break;
+ }
+
+ if (r)
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ else
+ /* Change xnack mode must be inside svms lock, to avoid race with
+ * svm_range_deferred_list_work unreserve memory in parallel.
+ */
+ p->xnack_enabled = xnack_enabled;
+
+ mutex_unlock(&p->svms.lock);
+ return r;
+}
+
void svm_range_list_fini(struct kfd_process *p)
{
struct svm_range *prange;
@@ -3182,28 +3239,6 @@ out:
return best_loc;
}
-/* FIXME: This is a workaround for page locking bug when some pages are
- * invalid during migration to VRAM
- */
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
- void *owner)
-{
- struct hmm_range *hmm_range;
- int r;
-
- if (prange->validated_once)
- return;
-
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- prange->start << PAGE_SHIFT,
- prange->npages, &hmm_range,
- false, true, owner);
- if (!r) {
- amdgpu_hmm_range_get_pages_done(hmm_range);
- prange->validated_once = true;
- }
-}
-
/* svm_range_trigger_migration - start page migration if prefetch loc changed
* @mm: current process mm_struct
* @prange: svm range structure
@@ -3273,7 +3308,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
- struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
@@ -3281,13 +3315,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
- /* svm_range_bo_release destroys this worker thread. So during
- * the lifetime of this thread, kfd_process and mm will be valid.
- */
- p = container_of(svm_bo->svms, struct kfd_process, svms);
- mm = p->mm;
- if (!mm)
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+ } else {
+ svm_range_bo_unref(svm_bo);
return;
+ }
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
@@ -3305,8 +3338,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
- r = svm_migrate_vram_to_ram(prange,
- svm_bo->eviction_fence->mm,
+ r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
} while (!r && prange->actual_loc && --retries);
@@ -3324,6 +3356,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
+ mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9156b041ef17..7a33b93f9df6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -46,7 +46,6 @@ struct svm_range_bo {
spinlock_t list_lock;
struct amdgpu_amdkfd_fence *eviction_fence;
struct work_struct eviction_work;
- struct svm_range_list *svms;
uint32_t evicting;
struct work_struct release_work;
};
@@ -182,8 +181,6 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange);
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
- void *owner);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -206,6 +203,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
void svm_range_set_max_pages(struct amdgpu_device *adev);
+int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);
#else
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 25990bec600d..3f0a4a415907 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
+ struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
- struct kfd_iolink_properties *gpu_link, *cpu_link;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
continue;
/* find CPU <--> CPU links */
+ cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
- list_for_each_entry(cpu_link,
+ list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
- if (cpu_link->node_to == gpu_link->node_to)
+ if (tmp_link->node_to == gpu_link->node_to) {
+ cpu_link = tmp_link;
break;
+ }
}
}
- if (cpu_link->node_to != gpu_link->node_to)
+ if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 413d8c6d592f..6925e0280dbe 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -28,7 +28,6 @@ config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
depends on DRM_AMDGPU_SI
depends on DRM_AMD_DC
- default n
help
Choose this option to enable new AMD DC support for SI asics
by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
@@ -43,7 +42,6 @@ config DEBUG_KERNEL_DC
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
- default n
depends on DEBUG_FS
depends on DRM_AMD_DC_DCN
help
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8660d93cc405..4c73727e0b7d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -88,6 +88,9 @@
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -97,8 +100,6 @@
#include "soc15_common.h"
#include "vega10_ip_offset.h"
-#include "soc15_common.h"
-
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -1295,13 +1296,21 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
if (hpd_rx_offload_wq[i].wq == NULL) {
DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
- return NULL;
+ goto out_err;
}
spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
}
return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
}
struct amdgpu_stutter_quirk {
@@ -1529,7 +1538,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
- adev->dm.dc->debug.disable_dsc_edp = true;
}
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
@@ -2807,20 +2815,18 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
- .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
- u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct dc_link *link = NULL;
- static const u8 pre_computed_values[] = {
- 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
- 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+ struct drm_luminance_range_info *luminance_range;
int i;
if (!aconnector || !aconnector->dc_link)
@@ -2842,8 +2848,6 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[i];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
- min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
@@ -2855,31 +2859,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
else if (amdgpu_backlight == 1)
caps->aux_support = true;
- /* From the specification (CTA-861-G), for calculating the maximum
- * luminance we need to use:
- * Luminance = 50*2**(CV/32)
- * Where CV is a one-byte value.
- * For calculating this expression we may need float point precision;
- * to avoid this complexity level, we take advantage that CV is divided
- * by a constant. From the Euclids division algorithm, we know that CV
- * can be written as: CV = 32*q + r. Next, we replace CV in the
- * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
- * need to pre-compute the value of r/32. For pre-computing the values
- * We just used the following Ruby line:
- * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
- * The results of the above expressions can be verified at
- * pre_computed_values.
- */
- q = max_avg >> 5;
- r = max_avg % 32;
- max = (1 << q) * pre_computed_values[r];
-
- // min luminance: maxLum * (CV/255)^2 / 100
- q = DIV_ROUND_CLOSEST(min_cll, 255);
- min = max * DIV_ROUND_CLOSEST((q * q), 100);
-
- caps->aux_max_input_signal = max;
- caps->aux_min_input_signal = min;
+ luminance_range = &conn_base->display_info.luminance_range;
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
}
void amdgpu_dm_update_connector_after_detect(
@@ -3825,8 +3807,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4055,6 +4040,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
+ }
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -4135,6 +4127,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
/*
* In this architecture, the association
@@ -4326,6 +4319,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
+ amdgpu_set_panel_orientation(&aconnector->base);
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -4754,7 +4748,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
- plane_info->layer_index = 0;
+ plane_info->layer_index = plane_state->normalized_zpos;
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
@@ -4822,7 +4816,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
- dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+ dc_plane_state->layer_index = plane_info.layer_index;
dc_plane_state->flip_int_enabled = true;
/*
@@ -5624,7 +5618,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
@@ -6316,10 +6311,17 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
drm_atomic_get_old_connector_state(state, conn);
struct drm_crtc *crtc = new_con_state->crtc;
struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
trace_amdgpu_dm_connector_atomic_check(new_con_state);
+ if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
if (!crtc)
return 0;
@@ -6403,6 +6405,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
enum dc_color_depth color_depth;
int clock, bpp = 0;
bool is_y420 = false;
@@ -6416,6 +6419,13 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
return 0;
+ mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ if (!mst_state->pbn_div)
+ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
@@ -6427,11 +6437,10 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
}
- dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
- mst_mgr,
- mst_port,
- dm_new_connector_state->pbn,
- dm_mst_get_pbn_divider(aconnector->dc_link));
+
+ dm_new_connector_state->vcpi_slots =
+ drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+ dm_new_connector_state->pbn);
if (dm_new_connector_state->vcpi_slots < 0) {
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
return dm_new_connector_state->vcpi_slots;
@@ -6501,18 +6510,12 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
- drm_dp_mst_atomic_enable_dsc(state,
- aconnector->port,
- dm_conn_state->pbn,
- 0,
+ drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
false);
continue;
}
- vcpi = drm_dp_mst_atomic_enable_dsc(state,
- aconnector->port,
- pbn, pbn_div,
- true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
if (vcpi < 0)
return vcpi;
@@ -6684,6 +6687,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
@@ -6728,8 +6735,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
-
- amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -7380,11 +7385,6 @@ static void update_freesync_state_on_stream(
&vrr_infopacket,
pack_sdp_v1_3);
- new_crtc_state->freesync_timing_changed |=
- (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
- &vrr_params.adjust,
- sizeof(vrr_params.adjust)) != 0);
-
new_crtc_state->freesync_vrr_info_changed |=
(memcmp(&new_crtc_state->vrr_infopacket,
&vrr_infopacket,
@@ -7393,7 +7393,6 @@ static void update_freesync_state_on_stream(
acrtc->dm_irq_params.vrr_params = vrr_params;
new_crtc_state->vrr_infopacket = vrr_infopacket;
- new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
new_stream->vrr_infopacket = vrr_infopacket;
if (new_crtc_state->freesync_vrr_info_changed)
@@ -7456,10 +7455,6 @@ static void update_stream_irq_parameters(
new_stream,
&config, &vrr_params);
- new_crtc_state->freesync_timing_changed |=
- (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
- &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
-
new_crtc_state->freesync_config = config;
/* Copy state for access from DM IRQ handler */
acrtc->dm_irq_params.freesync_config = config;
@@ -7985,6 +7980,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_ERROR("Waiting for fences timed out!");
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) {
@@ -8383,7 +8379,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_release_state(dc_state_temp);
}
-
static int dm_force_atomic_commit(struct drm_connector *connector)
{
int ret = 0;
@@ -9314,6 +9309,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
* @dev: The DRM device
* @state: The atomic state to commit
*
@@ -9354,8 +9350,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
- struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_topology_mgr *mgr;
#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -9372,9 +9366,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
/* Skip connectors that are disabled or part of modeset already. */
- if (!old_con_state->crtc && !new_con_state->crtc)
- continue;
-
if (!new_con_state->crtc)
continue;
@@ -9478,6 +9469,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ drm_atomic_normalize_zpos(dev, state);
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -9594,33 +9593,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* set the slot info for each mst_state based on the link encoding format */
- for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- struct amdgpu_dm_connector *aconnector;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u8 link_coding_cap;
-
- if (!mgr->mst_state )
- continue;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- int id = connector->index;
-
- if (id == mst_state->mgr->conn_base_id) {
- aconnector = to_amdgpu_dm_connector(connector);
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
- drm_dp_mst_update_slots(mst_state, link_coding_cap);
-
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- }
-#endif
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through
@@ -9928,8 +9900,19 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
return valid_vsdb_found ? i : -ENODEV;
}
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid)
{
int i = 0;
struct detailed_timing *timing;
@@ -9942,8 +9925,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
- bool freesync_capable = false;
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
@@ -9972,7 +9955,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
-
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|| sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 90b306a1dd68..b5ce15c43bcc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -598,6 +598,10 @@ struct amdgpu_dm_connector {
* The 'current' sink is in dc_link->sink. */
struct dc_sink *dc_sink;
struct dc_link *dc_link;
+
+ /**
+ * @dc_em_sink: Reference to the emulated (virtual) sink.
+ */
struct dc_sink *dc_em_sink;
/* DM only */
@@ -610,7 +614,16 @@ struct amdgpu_dm_connector {
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
- int min_vfreq ;
+ /**
+ * @min_vfreq: Minimal frequency supported by the display in Hz. This
+ * value is set to zero when there is no FreeSync support.
+ */
+ int min_vfreq;
+
+ /**
+ * @max_vfreq: Maximum frequency supported by the display in Hz. This
+ * value is set to zero when there is no FreeSync support.
+ */
int max_vfreq ;
int pixel_clock_mhz;
@@ -668,7 +681,6 @@ struct dm_crtc_state {
int crc_skip_count;
- bool freesync_timing_changed;
bool freesync_vrr_info_changed;
bool dsc_force_changed;
@@ -705,11 +717,34 @@ struct dm_connector_state {
uint64_t pbn;
};
+/**
+ * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
+ *
+ * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
+ * struct is useful to keep track of the display-specific information about
+ * FreeSync.
+ */
struct amdgpu_hdmi_vsdb_info {
- unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */
- bool freesync_supported; /* FreeSync Supported */
- unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */
- unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */
+ /**
+ * @amd_vsdb_version: Vendor Specific Data Block Version, should be
+ * used to determine which Vendor Specific InfoFrame (VSIF) to send.
+ */
+ unsigned int amd_vsdb_version;
+
+ /**
+ * @freesync_supported: FreeSync Supported.
+ */
+ bool freesync_supported;
+
+ /**
+ * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
+ */
+ unsigned int min_refresh_rate_hz;
+
+ /**
+ * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
+ */
+ unsigned int max_refresh_rate_hz;
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a71177305bcd..a4cb23d059bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -29,7 +29,9 @@
#include "modules/color/color_gamma.h"
#include "basics/conversion.h"
-/*
+/**
+ * DOC: overview
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -71,8 +73,8 @@
#define MAX_DRM_LUT_VALUE 0xFFFF
-/*
- * Initialize the color module.
+/**
+ * amdgpu_dm_init_color_mod - Initialize the color module.
*
* We're not using the full color module, only certain components.
* Only call setup functions for components that we need.
@@ -82,7 +84,14 @@ void amdgpu_dm_init_color_mod(void)
setup_x_points_distribution();
}
-/* Extracts the DRM lut and lut size from a blob. */
+/**
+ * __extract_blob_lut - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
static const struct drm_color_lut *
__extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
{
@@ -90,13 +99,18 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
return blob ? (struct drm_color_lut *)blob->data : NULL;
}
-/*
- * Return true if the given lut is a linear mapping of values, i.e. it acts
- * like a bypass LUT.
+/**
+ * __is_lut_linear - check if the given lut is a linear mapping of values
+ * @lut: given lut to check values
+ * @size: lut size
*
* It is considered linear if the lut represents:
- * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in
- * [0, MAX_COLOR_LUT_ENTRIES)
+ * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in [0,
+ * MAX_COLOR_LUT_ENTRIES)
+ *
+ * Returns:
+ * True if the given lut is a linear mapping of values, i.e. it acts like a
+ * bypass LUT. Otherwise, false.
*/
static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
{
@@ -119,9 +133,13 @@ static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
return true;
}
-/*
- * Convert the drm_color_lut to dc_gamma. The conversion depends on the size
- * of the lut - whether or not it's legacy.
+/**
+ * __drm_lut_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ * @is_legacy: legacy or atomic gamma
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
*/
static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
struct dc_gamma *gamma, bool is_legacy)
@@ -154,8 +172,11 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
}
-/*
- * Converts a DRM CTM to a DC CSC float matrix.
+/**
+ * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
+ * @ctm: DRM color transformation matrix
+ * @matrix: DC CSC float matrix
+ *
* The matrix needs to be a 3x4 (12 entry) matrix.
*/
static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
@@ -189,7 +210,18 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
}
}
-/* Calculates the legacy transfer function - only for sRGB input space. */
+/**
+ * __set_legacy_tf - Calculates the legacy transfer function
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Only for sRGB input space
+ *
+ * Returns:
+ * 0 in case of success, -ENOMEM if fails
+ */
static int __set_legacy_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
@@ -218,7 +250,16 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-/* Calculates the output transfer function based on expected input space. */
+/**
+ * __set_output_tf - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
static int __set_output_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
@@ -262,7 +303,16 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-/* Caculates the input transfer function based on expected input space. */
+/**
+ * __set_input_tf - calculates the input transfer function based on expected
+ * input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
static int __set_input_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size)
{
@@ -285,13 +335,14 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
- * amdgpu_dm_verify_lut_sizes
+ * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes
* @crtc_state: the DRM CRTC state
*
- * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
- * the expected size.
+ * Verifies that the Degamma and Gamma LUTs attached to the &crtc_state
+ * are of the expected size.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. -EINVAL if any lut sizes are invalid.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
{
@@ -327,9 +378,9 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
* of the HW blocks as long as the CRTC CTM always comes before the
* CRTC RGM and after the CRTC DGM.
*
- * The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
@@ -338,7 +389,8 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
* management at once we have to either restrict the usage of CRTC properties
* or blend adjustments together.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. Error code if setup fails.
*/
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
{
@@ -393,7 +445,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
if (r)
return r;
} else if (has_regamma) {
- /* CRTC RGM goes into RGM LUT. */
+ /* If atomic regamma, CRTC RGM goes into RGM LUT. */
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
@@ -450,9 +502,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* Update the underlying dc_stream_state's input transfer function (ITF) in
* preparation for hardware commit. The transfer function used depends on
- * the prepartion done on the stream for color management.
+ * the preparation done on the stream for color management.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. -ENOMEM if mem allocation fails.
*/
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0e48824f55e3..ee242d9d8b06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3288,6 +3288,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
+ dput(dir);
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index a0154a5f7183..f0b01c8dc4a6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_edid.h>
@@ -153,41 +154,28 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
return result;
}
-static void get_payload_table(
- struct amdgpu_dm_connector *aconnector,
- struct dp_mst_stream_allocation_table *proposed_table)
+static void
+fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
+ struct amdgpu_dm_connector *aconnector,
+ struct dc_dp_mst_stream_allocation_table *table)
{
- int i;
- struct drm_dp_mst_topology_mgr *mst_mgr =
- &aconnector->mst_port->mst_mgr;
-
- mutex_lock(&mst_mgr->payload_lock);
-
- proposed_table->stream_count = 0;
-
- /* number of active streams */
- for (i = 0; i < mst_mgr->max_payloads; i++) {
- if (mst_mgr->payloads[i].num_slots == 0)
- break; /* end of vcp_id table */
-
- ASSERT(mst_mgr->payloads[i].payload_state !=
- DP_PAYLOAD_DELETE_LOCAL);
-
- if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
- mst_mgr->payloads[i].payload_state ==
- DP_PAYLOAD_REMOTE) {
-
- struct dp_mst_stream_allocation *sa =
- &proposed_table->stream_allocations[
- proposed_table->stream_count];
-
- sa->slot_count = mst_mgr->payloads[i].num_slots;
- sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
- proposed_table->stream_count++;
- }
+ struct dc_dp_mst_stream_allocation_table new_table = { 0 };
+ struct dc_dp_mst_stream_allocation *sa;
+ struct drm_dp_mst_atomic_payload *payload;
+
+ /* Fill payload info*/
+ list_for_each_entry(payload, &mst_state->payloads, next) {
+ if (payload->delete)
+ continue;
+
+ sa = &new_table.stream_allocations[new_table.stream_count];
+ sa->slot_count = payload->time_slots;
+ sa->vcp_id = payload->vcpi;
+ new_table.stream_count++;
}
- mutex_unlock(&mst_mgr->payload_lock);
+ /* Overwrite the old table */
+ *table = new_table;
}
void dm_helpers_dp_update_branch_info(
@@ -201,15 +189,13 @@ void dm_helpers_dp_update_branch_info(
bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dc_context *ctx,
const struct dc_stream_state *stream,
- struct dp_mst_stream_allocation_table *proposed_table,
+ struct dc_dp_mst_stream_allocation_table *proposed_table,
bool enable)
{
struct amdgpu_dm_connector *aconnector;
- struct dm_connector_state *dm_conn_state;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
- bool ret;
- u8 link_coding_cap = DP_8b_10b_ENCODING;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
/* Accessing the connector state is required for vcpi_slots allocation
@@ -220,40 +206,21 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
if (!aconnector || !aconnector->mst_port)
return false;
- dm_conn_state = to_dm_connector_state(aconnector->base.state);
-
mst_mgr = &aconnector->mst_port->mst_mgr;
-
- if (!mst_mgr->mst_state)
- return false;
-
- mst_port = aconnector->port;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
-#endif
-
- if (enable) {
-
- ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
- dm_conn_state->pbn,
- dm_conn_state->vcpi_slots);
- if (!ret)
- return false;
-
- } else {
- drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
- }
+ mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+ if (enable)
+ drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+ else
+ drm_dp_remove_payload(mst_mgr, mst_state, payload);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
-
- get_payload_table(aconnector, proposed_table);
+ fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
return true;
}
@@ -310,8 +277,9 @@ bool dm_helpers_dp_mst_send_payload_allocation(
bool enable)
{
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_atomic_payload *payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
@@ -320,19 +288,16 @@ bool dm_helpers_dp_mst_send_payload_allocation(
if (!aconnector || !aconnector->mst_port)
return false;
- mst_port = aconnector->port;
-
mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
- if (!mst_mgr->mst_state)
- return false;
-
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
if (!enable) {
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
}
- if (drm_dp_update_payload_part2(mst_mgr)) {
+ if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
set_flag, false);
} else {
@@ -342,9 +307,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
clr_flag, false);
}
- if (!enable)
- drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
-
return true;
}
@@ -729,8 +691,14 @@ bool dm_helpers_dp_write_dsc_enable(
const struct dc_stream_state *stream,
bool enable)
{
- uint8_t enable_dsc = enable ? 1 : 0;
+ static const uint8_t DSC_DISABLE;
+ static const uint8_t DSC_DECODING = 0x01;
+ static const uint8_t DSC_PASSTHROUGH = 0x02;
+
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_port *port;
+ uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
+ uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
uint8_t ret = 0;
if (!stream)
@@ -750,8 +718,39 @@ bool dm_helpers_dp_write_dsc_enable(
aconnector->dsc_aux, stream, enable_dsc);
#endif
- ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
- DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable");
+ port = aconnector->port;
+
+ if (enable) {
+ if (port->passthrough_aux) {
+ ret = drm_dp_dpcd_write(port->passthrough_aux,
+ DP_DSC_ENABLE,
+ &enable_passthrough, 1);
+ DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
+ ret);
+ }
+
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux,
+ DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n",
+ (port->passthrough_aux) ? "remote RX" :
+ "virtual dpcd",
+ ret);
+ } else {
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux,
+ DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n",
+ (port->passthrough_aux) ? "remote RX" :
+ "virtual dpcd",
+ ret);
+
+ if (port->passthrough_aux) {
+ ret = drm_dp_dpcd_write(port->passthrough_aux,
+ DP_DSC_ENABLE,
+ &enable_passthrough, 1);
+ DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
+ ret);
+ }
+ }
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
@@ -768,7 +767,7 @@ bool dm_helpers_dp_write_dsc_enable(
#endif
}
- return (ret > 0);
+ return ret;
}
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
@@ -879,6 +878,34 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
}
+void dm_helpers_init_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *panel_config,
+ struct dc_sink *sink)
+{
+ // Extra Panel Power Sequence
+ panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+ panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
+ panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
+ panel_config->pps.extra_post_t7_ms = 0;
+ panel_config->pps.extra_pre_t11_ms = 0;
+ panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
+ panel_config->pps.extra_post_OUI_ms = 0;
+ // Feature DSC
+ panel_config->dsc.disable_dsc_edp = false;
+ panel_config->dsc.force_dsc_edp_policy = 0;
+}
+
+void dm_helpers_override_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *panel_config)
+{
+ // Feature DSC
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ panel_config->dsc.disable_dsc_edp = true;
+ }
+}
+
void *dm_helpers_allocate_gpu_mem(
struct dc_context *ctx,
enum dc_gpu_mem_alloc_type type,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2e74ccf7df5b..6ff96b4bdda5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,6 +36,7 @@
#include "dm_helpers.h"
#include "dc_link_ddc.h"
+#include "dc_link_dp.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
@@ -447,34 +448,13 @@ dm_dp_mst_detect(struct drm_connector *connector,
}
static int dm_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct drm_crtc_state *new_crtc_state;
- struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr;
+ struct drm_dp_mst_port *mst_port = aconnector->port;
- mst_port = aconnector->port;
- mst_mgr = &aconnector->mst_port->mst_mgr;
-
- if (!old_conn_state->crtc)
- return 0;
-
- if (new_conn_state->crtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
- if (!new_crtc_state ||
- !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
- new_crtc_state->enable)
- return 0;
- }
-
- return drm_dp_atomic_release_vcpi_slots(state,
- mst_mgr,
- mst_port);
+ return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
@@ -618,15 +598,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
aconnector->mst_mgr.cbs = &dm_mst_cbs;
- drm_dp_mst_topology_mgr_init(
- &aconnector->mst_mgr,
- adev_to_drm(dm->adev),
- &aconnector->dm_dp_aux.aux,
- 16,
- 4,
- max_link_enc_cap.lane_count,
- drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
- aconnector->connector_id);
+ drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
+ &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
@@ -731,6 +704,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
}
static bool increase_dsc_bpp(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
@@ -743,12 +717,9 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
int min_initial_slack;
int next_index;
int remaining_to_increase = 0;
- int pbn_per_timeslot;
int link_timeslots_used;
int fair_pbn_alloc;
- pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
-
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
@@ -779,46 +750,43 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
link_timeslots_used = 0;
for (i = 0; i < count; i++)
- link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
+ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
- fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+ fair_pbn_alloc =
+ (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
} else {
vars[next_index].pbn -= fair_pbn_alloc;
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
}
} else {
vars[next_index].pbn += initial_slack[next_index];
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
} else {
vars[next_index].pbn -= initial_slack[next_index];
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
}
}
@@ -872,11 +840,10 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
@@ -884,11 +851,10 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ vars[next_index].pbn) < 0)
return false;
}
@@ -902,17 +868,27 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars,
+ struct drm_dp_mst_topology_mgr *mgr,
int *link_vars_start_index)
{
- int i, k;
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
int count = 0;
+ int i, k;
bool debugfs_overwrite = false;
memset(params, 0, sizeof(params));
+ if (IS_ERR(mst_state))
+ return false;
+
+ mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
+#endif
+
/* Set up params */
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -971,11 +947,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_vcpi_slots(state,
- params[i].port->mgr,
- params[i].port,
- vars[i + k].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+ vars[i + k].pbn) < 0)
return false;
}
if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
@@ -989,21 +962,15 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
- if (drm_dp_atomic_find_vcpi_slots(state,
- params[i].port->mgr,
- params[i].port,
- vars[i + k].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn) < 0)
return false;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_vcpi_slots(state,
- params[i].port->mgr,
- params[i].port,
- vars[i + k].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn) < 0)
return false;
}
}
@@ -1011,7 +978,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return false;
/* Optimize degree of compression */
- if (!increase_dsc_bpp(state, dc_link, params, vars, count, k))
+ if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
return false;
if (!try_disable_dsc(state, dc_link, params, vars, count, k))
@@ -1157,8 +1124,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
continue;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link,
- vars, &link_vars_start_index)) {
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
+ &aconnector->mst_mgr,
+ &link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
@@ -1216,10 +1184,8 @@ static bool
continue;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state,
- dc_state,
- stream->link,
- vars,
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
+ &aconnector->mst_mgr,
&link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
@@ -1386,19 +1352,90 @@ clean_exit:
return (ret == 0);
}
-#endif
+static unsigned int kbps_from_pbn(unsigned int pbn)
+{
+ unsigned int kbps = pbn;
+
+ kbps *= (1000000 / PEAK_FACTOR_X1000);
+ kbps *= 8;
+ kbps *= 54;
+ kbps /= 64;
+
+ return kbps;
+}
+
+static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+ struct dc_dsc_bw_range *bw_range)
+{
+ struct dc_dsc_policy dsc_policy = {0};
+
+ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
+ dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, bw_range);
+
+ return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+}
+#endif /* CONFIG_DRM_AMD_DC_DCN */
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
int bpp, pbn, branch_max_throughput_mps = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings cur_link_settings;
+ unsigned int end_to_end_bw_in_kbps = 0;
+ unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ unsigned int max_compressed_bw_in_kbps = 0;
+ struct dc_dsc_bw_range bw_range = {0};
- /* check if mode could be supported within fUll_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
- if (pbn > aconnector->port->full_pbn)
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ /*
+ * check if the mode could be supported if DSC pass-through is supported
+ * AND check if there enough bandwidth available to support the mode
+ * with DSC enabled.
+ */
+ if (is_dsc_common_config_possible(stream, &bw_range) &&
+ aconnector->port->passthrough_aux) {
+ mutex_lock(&aconnector->mst_mgr.lock);
+
+ cur_link_settings = stream->link->verified_link_cap;
+
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ &cur_link_settings
+ );
+ down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn);
+
+ /* pick the bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ down_link_bw_in_kbps);
+
+ mutex_unlock(&aconnector->mst_mgr.lock);
+
+ /*
+ * use the maximum dsc compression bandwidth as the required
+ * bandwidth for the mode
+ */
+ max_compressed_bw_in_kbps = bw_range.min_kbps;
+
+ if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ } else {
+#endif
+ /* check if mode could be supported within full_pbn */
+ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+
+ if (pbn > aconnector->port->full_pbn)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+#endif
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b841b8b0a9d8..dfd3be49eac8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -34,6 +34,7 @@
#include "dal_asic_id.h"
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
@@ -1562,7 +1563,7 @@ int dm_drm_plane_get_property(struct drm_plane *plane,
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 95168c2cfa6f..286981a2dd40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
- struct drm_framebuffer *fb,
- int *min_downscale, int *max_upscale);
-
int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..8ca10ab3dfc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- power_opt |= psr_power_opt_z10_static_screen;
+ /*
+ * Only enable static-screen optimizations for PSR1. For PSR SU, this
+ * causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+ * events.
+ */
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ power_opt |= psr_power_opt_z10_static_screen;
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 6767fab55c26..352e9afb85c6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -100,3 +100,24 @@ void convert_float_matrix(
matrix[i] = (uint16_t)reg_value;
}
}
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t remainder = 0;
+ while (b != 0) {
+ remainder = a % b;
+ a = b;
+ b = remainder;
+ }
+ return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den)
+{
+ uint32_t gcd = 0;
+
+ gcd = find_gcd(num, den);
+ *out_num = num / gcd;
+ *out_den = den / gcd;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index ade785c4fdc7..81da4e6f7a1a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -38,6 +38,9 @@ void convert_float_matrix(
struct fixed31_32 *flt,
uint32_t buffer_size);
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 09fbb7ad5362..53b077b40d72 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "core_types.h"
#include "ObjectID.h"
#include "atomfirmware.h"
@@ -44,25 +45,6 @@
#include "bios_parser_common.h"
-/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
-#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
-#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
-#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
-
-#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
-#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
-#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
-
-#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
-#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
-#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
-
#define DC_LOGGER \
bp->base.ctx->logger
@@ -868,6 +850,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -876,6 +860,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
/* TODO LVDS not support anymore? */
case AS_SIGNAL_TYPE_DISPLAY_PORT:
@@ -885,6 +871,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_firmware: DAL only get data from dce_info table.
@@ -898,13 +886,15 @@ static enum bp_result get_ss_info_v4_1(
DATA_TABLES(smu_info));
if (!smu_info)
return BP_RESULT_BADBIOSTABLE;
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
ss_info->spread_spectrum_percentage =
smu_info->waflclk_ss_percentage;
ss_info->spread_spectrum_range =
smu_info->gpuclk_ss_rate_10hz * 10;
if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_XGMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
default:
result = BP_RESULT_UNSUPPORTED;
@@ -941,6 +931,7 @@ static enum bp_result get_ss_info_v4_2(
if (!smu_info)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
ss_info->type.STEP_AND_DELAY_INFO = false;
ss_info->spread_percentage_divider = 1000;
/* BIOS no longer uses target clock. Always enable for now */
@@ -954,6 +945,8 @@ static enum bp_result get_ss_info_v4_2(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -962,6 +955,8 @@ static enum bp_result get_ss_info_v4_2(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
/* TODO LVDS not support anymore? */
case AS_SIGNAL_TYPE_DISPLAY_PORT:
@@ -971,6 +966,8 @@ static enum bp_result get_ss_info_v4_2(
smu_info->gpuclk_ss_rate_10hz * 10;
if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_firmware: DAL only get data from dce_info table.
@@ -1019,6 +1016,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -1027,6 +1026,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
ss_info->spread_spectrum_percentage =
@@ -1035,6 +1036,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_smu_info_v4_0 does not have fields for SS for SMU Display PLL anymore.
@@ -1372,7 +1375,7 @@ static enum bp_result bios_parser_get_lttpr_interop(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
return result;
}
@@ -1388,6 +1391,7 @@ static enum bp_result bios_parser_get_lttpr_caps(
if (!DATA_TABLES(dce_info))
return BP_RESULT_UNSUPPORTED;
+ *dce_caps = 0;
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(dce_info));
get_atom_data_table_revision(header, &tbl_revision);
@@ -1421,7 +1425,11 @@ static enum bp_result bios_parser_get_lttpr_caps(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
+ if (dcb->ctx->dc->config.force_bios_enable_lttpr && *dce_caps == 0) {
+ *dce_caps = 1;
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: forced enabled");
+ }
return result;
}
@@ -1859,7 +1867,7 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
@@ -1868,7 +1876,7 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
@@ -2010,7 +2018,7 @@ static enum bp_result get_firmware_info_v3_4(
if (!smu_info_v3_5)
return BP_RESULT_BADBIOSTABLE;
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_5->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_5->bootup_dcefclk_10khz * 10;
break;
@@ -2416,6 +2424,7 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2630,6 +2639,7 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2791,6 +2801,8 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2942,6 +2954,27 @@ static enum bp_result construct_integrated_info(
default:
return result;
}
+ if (result == BP_RESULT_OK) {
+
+ DC_LOG_BIOS("edp1:\n"
+ "\tedp_pwr_on_off_delay = %d\n"
+ "\tedp_pwr_on_vary_bl_to_blon = %d\n"
+ "\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
+ "\tedp_bootup_bl_level = %d\n",
+ info->edp1_info.edp_pwr_on_off_delay,
+ info->edp1_info.edp_pwr_on_vary_bl_to_blon,
+ info->edp1_info.edp_pwr_down_bloff_to_vary_bloff,
+ info->edp1_info.edp_bootup_bl_level);
+ DC_LOG_BIOS("edp2:\n"
+ "\tedp_pwr_on_off_delayv = %d\n"
+ "\tedp_pwr_on_vary_bl_to_blon = %d\n"
+ "\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
+ "\tedp_bootup_bl_level = %d\n",
+ info->edp2_info.edp_pwr_on_off_delay,
+ info->edp2_info.edp_pwr_on_vary_bl_to_blon,
+ info->edp2_info.edp_pwr_down_bloff_to_vary_bloff,
+ info->edp2_info.edp_bootup_bl_level);
+ }
}
if (result != BP_RESULT_OK)
@@ -2967,13 +3000,22 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
+ if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
+ info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ }
}
-
// Log the Checksum and Voltage Swing
DC_LOG_BIOS("Integrated info table CHECKSUM: %d\n"
"Integrated info table FIX_DP_VOLTAGE_SWING: %d\n",
info->ext_disp_conn_info.checksum,
info->ext_disp_conn_info.fixdpvoltageswing);
+ if (bp->base.ctx->dc->config.force_bios_fixed_vs && info->ext_disp_conn_info.fixdpvoltageswing == 0) {
+ info->ext_disp_conn_info.fixdpvoltageswing = bp->base.ctx->dc->config.force_bios_fixed_vs & 0xF;
+ DC_LOG_BIOS("driver forced fixdpvoltageswing = %d\n", info->ext_disp_conn_info.fixdpvoltageswing);
+ }
}
/* Sort voltage table from low to high*/
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
@@ -3319,6 +3361,7 @@ static enum bp_result bios_get_board_layout_info(
struct bios_parser *bp;
static enum bp_result record_result;
+ unsigned int max_slots;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
@@ -3335,8 +3378,14 @@ static enum bp_result bios_get_board_layout_info(
}
board_layout_info->num_of_slots = 0;
+ max_slots = MAX_BOARD_SLOTS;
+
+ // Assume single slot on v1_5
+ if (bp->object_info_tbl.revision.minor == 5) {
+ max_slots = 1;
+ }
- for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ for (i = 0; i < max_slots; ++i) {
record_result = get_bracket_layout_record(dcb,
slot_index_to_vbios_id[i],
&board_layout_info->slots[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c76091fd1f2..f276abb63bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- case AMDGPU_FAMILY_GC_11_0_2: {
+ case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn32_clk_mgr_destroy(clk_mgr);
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0202dc682682..ca6dfd2d7561 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -24,10 +24,9 @@
*/
#include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index 2e088c5171b2..f1319957e400 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -28,6 +28,7 @@
#include "clk_mgr.h"
#include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
extern struct wm_table ddr4_wm_table_gs;
extern struct wm_table lpddr4_wm_table_gs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index c09be3f15fe6..c1eaf571407a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -48,6 +48,11 @@
#include "dc_dmub_srv.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
#include "yellow_carp_offset.h"
#define regCLK1_CLK_PLL_REQ 0x0237
@@ -99,7 +104,7 @@ static int dcn31_get_active_display_cnt_wa(
return display_count;
}
-static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -110,9 +115,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -211,11 +217,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn31_disable_otg_wa(clk_mgr_base, true);
+ dcn31_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn31_disable_otg_wa(clk_mgr_base, false);
+ dcn31_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -737,8 +743,49 @@ void dcn31_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn31_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn31_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index ee99974b3b62..1131c6d73f6c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -51,6 +51,13 @@
#include "dc_link_dp.h"
#include "dcn314_smu.h"
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
+
#define MAX_INSTANCE 7
#define MAX_SEGMENT 8
@@ -119,7 +126,7 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -129,12 +136,21 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+
+ if (disable) {
+ if (stream_enc && stream_enc->funcs->disable_fifo)
+ pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+
+ if (stream_enc && stream_enc->funcs->enable_fifo)
+ pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
}
}
}
@@ -233,11 +249,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -307,16 +323,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn314_smu_enable_pme_wa(clk_mgr);
}
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -425,7 +431,7 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
@@ -510,7 +516,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn314_smu_dpm_clks *smu_dpm_clks)
{
- DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+ DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
@@ -527,6 +533,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+
+ default:
+ break;
+ }
+ return 1;
+}
+
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -540,89 +566,129 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
- int max_voltage = 0;
- int clock = 0;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage) {
- return clocks[i];
- } else if (clock_table->SocVoltage[i] >= max_voltage &&
- clock_table->SocVoltage[i] < voltage) {
- max_voltage = clock_table->SocVoltage[i];
- clock = clocks[i];
- }
- }
-
- ASSERT(clock);
- return clock;
-}
-
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+ const DpmClocks314_t *clock_table)
{
- int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ int i;
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ /* Find highest valid fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+ clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
+ /* We expect the table to contain at least one valid fclk entry. */
+ ASSERT(is_valid_clock_value(max_fclk));
- bw_params->clk_table.num_entries = j + 1;
-
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
- switch (clock_table->DfPstateTable[j].WckRatio) {
- case WCK_RATIO_1_2:
- bw_params->clk_table.entries[i].wck_ratio = 2;
- break;
- case WCK_RATIO_1_4:
- bw_params->clk_table.entries[i].wck_ratio = 4;
- break;
- default:
- bw_params->clk_table.entries[i].wck_ratio = 1;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+ int j;
+
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+ clock_table->DfPstateTable[j].FClk < min_fclk &&
+ clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
}
- bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[min_pstate].WckRatio);
+ }
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[max_pstate].WckRatio);
+ i++;
}
+ bw_params->clk_table.num_entries = i--;
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +707,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn314_init_clocks,
+ .init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +747,10 @@ void dcn314_clk_mgr_construct(
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
- smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+ smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- sizeof(DpmClocks_t),
+ sizeof(DpmClocks314_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
@@ -727,9 +793,50 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn314_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index c695a4498c50..171f84340eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index a7958dc96581..047d19ea919c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -36,6 +36,37 @@ typedef enum {
WCK_RATIO_MAX
} WCK_RATIO_e;
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
struct dcn314_watermarks {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
};
struct dcn314_smu_dpm_clks {
- DpmClocks_t *dpm_clks;
+ DpmClocks314_t *dpm_clks;
union large_integer mc_address;
};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index cc076621f5e6..893991a0eb97 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -41,11 +41,19 @@
#include "dc_dmub_srv.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
#include "dc_link_dp.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define UNSUPPORTED_DCFCLK 10000000
+#define MIN_DPP_DISP_CLK 100000
+
static int dcn315_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
@@ -79,7 +87,7 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
-static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -91,9 +99,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -146,6 +155,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
+ /* Lock pstate by requesting unsupported dcfclk if change is unsupported */
+ if (!new_clocks->p_state_change_support)
+ new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
@@ -159,10 +171,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
+ if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+ if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
@@ -175,12 +187,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, true);
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -275,7 +287,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -283,7 +295,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -291,7 +303,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -299,7 +311,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -507,7 +519,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i];
bw_params->clk_table.entries[i].dppclk_mhz = clock_table->DppClocks[i];
bw_params->clk_table.entries[i].wck_ratio = 1;
- };
+ }
/* Make sure to include at least one entry and highest pstate */
if (max_pstate != min_pstate || i == 0) {
@@ -556,8 +568,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
- if (!bw_params->num_channels)
- bw_params->num_channels = 2;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -666,7 +677,48 @@ void dcn315_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn315_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn315_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn315_clk_mgr_helper_populate_bw_params(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 0cd3d2eb7ac7..187f5b27fdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index c6785969eb1a..f0f3f66629cc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
+ unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
+ clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
@@ -179,6 +181,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_levels);
+ num_dcfclk_levels = num_levels;
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
@@ -189,11 +192,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
+ num_dtbclk_levels = num_levels;
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_levels);
+ num_dispclk_levels = num_levels;
+
+ if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
unsigned int i;
@@ -658,6 +666,12 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ if (clk_mgr->dpm_present && !num_levels)
+ clk_mgr->dpm_present = false;
+
+ if (!clk_mgr->dpm_present)
+ dcn32_patch_dpm_table(clk_mgr_base->bw_params);
+
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e42f44fc1c08..258ba5a872b1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -401,6 +401,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
+ return true;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -638,14 +641,17 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
/**
* dc_stream_get_crc() - Get CRC values for the given stream.
- * @dc: DC object
+ *
+ * @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr: CRC value for the first of the 3 channels stored here.
- * @g_y: CRC value for the second of the 3 channels stored here.
- * @b_cb: CRC value for the third of the 3 channels stored here.
+ * @r_cr: CRC value for the red component.
+ * @g_y: CRC value for the green component.
+ * @b_cb: CRC value for the blue component.
*
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
- * Return false if stream is not found, or if CRCs are not enabled.
+ *
+ * Return:
+ * false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@@ -1074,8 +1080,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
- bool pipe_split_change =
- context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -1087,7 +1100,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
- if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1187,7 +1201,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state)
+ if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1736,6 +1750,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1826,6 +1843,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context);
}
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
@@ -1989,6 +2009,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
dc->optimized_required = false;
dc->wm_optimized_required = false;
}
@@ -2308,9 +2331,13 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_scaling_info_update_type(u);
elevate_update_type(&overall_type, type);
- if (u->flip_addr)
+ if (u->flip_addr) {
update_flags->bits.addr_update = 1;
-
+ if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
+ update_flags->bits.tmz_changed = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+ }
if (u->in_transfer_func)
update_flags->bits.in_transfer_func_change = 1;
@@ -2745,11 +2772,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->abm_level)
stream->abm_level = *update->abm_level;
- if (update->periodic_interrupt0)
- stream->periodic_interrupt0 = *update->periodic_interrupt0;
-
- if (update->periodic_interrupt1)
- stream->periodic_interrupt1 = *update->periodic_interrupt1;
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
if (update->gamut_remap)
stream->gamut_remap_matrix = *update->gamut_remap;
@@ -2834,16 +2858,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
-void dc_reset_state(struct dc *dc, struct dc_state *context)
-{
- dc_resource_state_destruct(context);
-
- /* clear the structure, but don't reset the reference count */
- memset(context, 0, offsetof(struct dc_state, refcount));
-
- init_state(dc, context);
-}
-
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -2979,13 +2993,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
- if (stream_update->periodic_interrupt0 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
-
- if (stream_update->periodic_interrupt1 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
@@ -3063,7 +3072,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
-
core_link_enable_stream(dc->current_state, pipe_ctx);
}
}
@@ -3091,11 +3099,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
{
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
-
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
- dc->debug.enable_sw_cntl_psr)
+ if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
+ || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
return false;
@@ -3197,6 +3203,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
context_clock_trace(dc, context);
}
@@ -3229,7 +3238,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3256,6 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
}
- }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -3312,10 +3320,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
} else {
@@ -3323,16 +3327,15 @@ static void commit_planes_for_stream(struct dc *dc,
}
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
return;
}
@@ -3456,17 +3459,13 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
} else {
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3492,23 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
- }
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- /* Since phantom pipe programming is moved to post_unlock_program_front_end,
- * move the SubVP lock to after the phantom pipes have been setup
- */
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- }
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
}
// Fire manual trigger only when bottom plane is flipped
@@ -3528,19 +3529,72 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+/* Determines if the incoming context requires a applying transition state with unnecessary
+ * pipe splitting and ODM disabled, due to hardware limitations. In a case where
+ * the OPP associated with an MPCC might change due to plane additions, this function
+ * returns true.
+ */
+static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ struct dc_stream_state *stream,
+ int surface_count,
+ bool *is_plane_addition)
+{
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+
+ *is_plane_addition = false;
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count > 0 &&
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+ /* determine if minimal transition is required due to MPC*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 1 &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ /* determine if minimal transition is required due to dynamic ODM*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ return force_minimal_pipe_splitting;
+}
+
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_policy;
+ enum pipe_split_policy tmp_mpc_policy;
+ bool temp_dynamic_odm_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
if (!transition_context)
return false;
- tmp_policy = dc->debug.pipe_split_policy;
- dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ if (!dc->config.is_vmin_only_asic) {
+ tmp_mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
dc_resource_state_copy_construct(transition_base_context, transition_context);
@@ -3562,19 +3616,22 @@ static bool commit_minimal_transition_state(struct dc *dc,
ret = dc_commit_state_no_check(dc, transition_context);
}
- //always release as dc_commit_state_no_check retains in good case
+ /*always release as dc_commit_state_no_check retains in good case*/
dc_release_state(transition_context);
- //restore previous pipe split policy
- dc->debug.pipe_split_policy = tmp_policy;
+ /*restore previous pipe split and odm policy*/
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = tmp_mpc_policy;
+
+ dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
if (ret != DC_OK) {
- //this should never happen
+ /*this should never happen*/
BREAK_TO_DEBUGGER();
return false;
}
- //force full surface update
+ /*force full surface update*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
@@ -3597,22 +3654,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting = false;
- bool is_plane_addition = false;
-
- struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting;
+ bool is_plane_addition;
- if (cur_stream_status &&
- dc->current_state->stream_count > 0 &&
- dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
- /* determine if minimal transition is required */
- if (cur_stream_status->plane_count > surface_count) {
- force_minimal_pipe_splitting = true;
- } else if (cur_stream_status->plane_count < surface_count) {
- force_minimal_pipe_splitting = true;
- is_plane_addition = true;
- }
- }
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ surface_count,
+ &is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
@@ -3629,7 +3678,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
&context))
return false;
- /* on plane addition, minimal state is the new one */
+ /* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state(dc, context)) {
dc_release_state(context);
@@ -4016,7 +4065,7 @@ struct dc_sink *dc_link_add_remote_sink(
* Treat device as no EDID device if EDID
* parsing fails
*/
- if (edid_status != EDID_OK) {
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
dc_sink->dc_edid.length = 0;
dm_error("Bad EDID, status%d!\n", edid_status);
}
@@ -4271,8 +4320,8 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
/*
*****************************************************************************
* Function: dc_is_dmub_outbox_supported -
- *
- * @brief
+ *
+ * @brief
* Checks whether DMUB FW supports outbox notifications, if supported
* DM should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
@@ -4292,7 +4341,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
@@ -4340,6 +4389,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
struct dc_context *dc_ctx = dc->ctx;
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 2a8007928210..7c2e3b8dc26a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -402,6 +402,44 @@ void get_hdr_visual_confirm_color(
}
}
+void get_subvp_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ bool enable_subvp = false;
+ int i;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ /* SubVP enable - red */
+ color->color_r_cr = color_value;
+ enable_subvp = true;
+
+ if (pipe_ctx->stream == pipe->stream)
+ return;
+ break;
+ }
+ }
+
+ if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
+ color->color_r_cr = 0;
+ if (pipe_ctx->stream->ignore_msa_timing_param == 1)
+ /* SubVP enable and DRR on - green */
+ color->color_g_y = color_value;
+ else
+ /* SubVP enable and No DRR - blue */
+ color->color_b_cb = color_value;
+ }
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e51338441d0..3d19fb92333b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -832,8 +832,9 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
- apply_dpia_mst_dsc_always_on_wa(link);
link->type = dc_connection_mst_branch;
+ apply_dpia_mst_dsc_always_on_wa(link);
+
dm_helpers_dp_update_branch_info(link->ctx, link);
if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
@@ -847,20 +848,13 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
bool reset_cur_dp_mst_topology(struct dc_link *link)
{
- bool result = false;
DC_LOGGER_INIT(link->ctx->logger);
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
revert_dpia_mst_dsc_always_on_wa(link);
- result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-
- link->mst_stream_alloc_table.stream_count = 0;
- memset(link->mst_stream_alloc_table.stream_allocations,
- 0,
- sizeof(link->mst_stream_alloc_table.stream_allocations));
- return result;
+ return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
}
static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
@@ -1311,6 +1305,14 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink->edid_caps.audio_modes[i].sample_rate,
sink->edid_caps.audio_modes[i].sample_size);
}
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ // Init dc_panel_config
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
+ }
+
} else {
/* From Connected-to-Disconnected. */
link->type = dc_connection_none;
@@ -1975,7 +1977,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int i;
bool apply_seamless_boot_optimization = false;
uint32_t bl_oled_enable_delay = 50; // in ms
- const uint32_t post_oui_delay = 30; // 30ms
+ uint32_t post_oui_delay = 30; // 30ms
/* Reduce link bandwidth between failed link training attempts. */
bool do_fallback = false;
@@ -2022,8 +2024,10 @@ static enum dc_status enable_link_dp(struct dc_state *state,
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
- if (link->dpcd_sink_ext_caps.raw != 0)
+ if (link->dpcd_sink_ext_caps.raw != 0) {
+ post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
msleep(post_oui_delay);
+ }
// similarly, mode switch can cause loss of cable ID
dpcd_write_cable_id_to_dprx(link);
@@ -2069,11 +2073,7 @@ static enum dc_status enable_link_edp(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- enum dc_status status;
-
- status = enable_link_dp(state, pipe_ctx);
-
- return status;
+ return enable_link_dp(state, pipe_ctx);
}
static enum dc_status enable_link_dp_mst(
@@ -2639,9 +2639,8 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
dp_set_fec_ready(link, link_res, false);
}
}
- } else {
- if (signal != SIGNAL_TYPE_VIRTUAL)
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else if (signal != SIGNAL_TYPE_VIRTUAL) {
+ link->dc->hwss.disable_link_output(link, link_res, signal);
}
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
@@ -2663,6 +2662,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_over_340mhz = false;
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2702,11 +2702,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
display_color_depth = COLOR_DEPTH_888;
- link->link_enc->funcs->enable_tmds_output(
- link->link_enc,
+ dc->hwss.enable_tmds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->stream->signal,
pipe_ctx->clock_source->id,
display_color_depth,
- pipe_ctx->stream->signal,
stream->phy_pix_clk);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
@@ -2717,15 +2718,16 @@ static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dc *dc = stream->ctx->dc;
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
memset(&stream->link->cur_link_settings, 0,
sizeof(struct dc_link_settings));
-
- link->link_enc->funcs->enable_lvds_output(
- link->link_enc,
+ dc->hwss.enable_lvds_link_output(
+ link,
+ &pipe_ctx->link_res,
pipe_ctx->clock_source->id,
stream->phy_pix_clk);
@@ -3372,7 +3374,7 @@ bool dc_link_setup_psr(struct dc_link *link,
switch(link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
if(!dc->debug.disable_z10)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
break;
@@ -3516,7 +3518,7 @@ static void update_mst_stream_alloc_table(
struct dc_link *link,
struct stream_encoder *stream_enc,
struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
- const struct dp_mst_stream_allocation_table *proposed_table)
+ const struct dc_dp_mst_stream_allocation_table *proposed_table)
{
struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
struct link_mst_stream_allocation *dc_alloc;
@@ -3563,6 +3565,35 @@ static void update_mst_stream_alloc_table(
work_table[i];
}
+static void remove_stream_from_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *dio_stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
+{
+ int i = 0;
+ struct link_mst_stream_allocation_table *table =
+ &link->mst_stream_alloc_table;
+
+ if (hpo_dp_stream_enc) {
+ for (; i < table->stream_count; i++)
+ if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
+ break;
+ } else {
+ for (; i < table->stream_count; i++)
+ if (dio_stream_enc == table->stream_allocations[i].stream_enc)
+ break;
+ }
+
+ if (i < table->stream_count) {
+ i++;
+ for (; i < table->stream_count; i++)
+ table->stream_allocations[i-1] = table->stream_allocations[i];
+ memset(&table->stream_allocations[table->stream_count-1], 0,
+ sizeof(struct link_mst_stream_allocation));
+ table->stream_count--;
+ }
+}
+
static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
{
const uint32_t VCP_Y_PRECISION = 1000;
@@ -3679,7 +3710,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
@@ -3784,7 +3815,7 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
uint8_t i;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
DC_LOGGER_INIT(link->ctx->logger);
@@ -3873,7 +3904,7 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
uint8_t i;
enum act_return_status ret;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -3957,7 +3988,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
@@ -3980,26 +4011,32 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&empty_link_settings,
avg_time_slots_per_mtp);
- /* TODO: which component is responsible for remove payload table? */
if (mst_mode) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
if (dm_helpers_dp_mst_write_payload_allocation_table(
stream->ctx,
stream,
&proposed_table,
- false)) {
+ false))
update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- }
- else {
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- }
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ } else {
+ /* when link is no longer in mst mode (mst hub unplugged),
+ * remove payload with default dc logic
+ */
+ remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
}
DC_LOG_MST("%s"
@@ -4303,8 +4340,9 @@ void core_link_enable_stream(
*/
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+
}
status = enable_link(state, pipe_ctx);
@@ -4736,7 +4774,7 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
else if (link->connector_signal == SIGNAL_TYPE_EDP
&& (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
dsc_support.DSC_SUPPORT == false
- || link->dc->debug.disable_dsc_edp
+ || link->panel_config.dsc.disable_dsc_edp
|| !link->dc->caps.edp_dsc_support))
force_disable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index d01d2eeed813..651231387043 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -35,6 +35,8 @@
#include "dc_link_ddc.h"
#include "dce/dce_aux.h"
#include "dmub/inc/dmub_cmd.h"
+#include "link_dpcd.h"
+#include "include/dal_asic_id.h"
#define DC_LOGGER_INIT(logger)
@@ -683,6 +685,21 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
+ if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
+ ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ /* Fixed VS workaround for AUX timeout */
+ const uint32_t fixed_vs_address = 0xF004F;
+ const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+
+ core_link_write_dpcd(ddc->link,
+ fixed_vs_address,
+ fixed_vs_data,
+ sizeof(fixed_vs_data));
+
+ timeout = 3072;
+ }
+
/* Do not try to access nonexistent DDC pin. */
if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY)
return true;
@@ -691,6 +708,7 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
result = true;
}
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 48dad093ae8b..c57df45e83ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -526,9 +526,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
return disable_scrabled_data_symbols;
}
-static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
{
- return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
+ return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -545,7 +545,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -561,7 +561,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
__func__,
offset,
@@ -584,7 +584,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
lt_settings->dpcd_lane_settings,
size_in_bytes);
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_128b_132b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -873,7 +873,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
uint32_t lane;
enum dc_status status;
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -906,7 +906,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
ln_align->raw = dpcd_buf[2];
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
__func__,
@@ -954,7 +954,7 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(link_training_setting, offset))
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -963,7 +963,7 @@ enum dc_status dpcd_set_lane_settings(
(uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
DP_128b_132b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -1172,7 +1172,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* Note: also check that TPS4 is a supported feature*/
tr_pattern = lt_settings->pattern_for_eq;
- if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
@@ -1198,7 +1198,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 3. wait for receiver to lock-on*/
wait_time_microsec = lt_settings->eq_pattern_time;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
wait_time_microsec =
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
@@ -1469,7 +1469,6 @@ static inline void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->lttpr_mode = link->lttpr_mode;
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
@@ -1478,6 +1477,7 @@ static inline void decide_8b_10b_training_settings(
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1501,9 +1501,8 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link,
lt_settings->cds_pattern_time = 2500;
lt_settings->cds_wait_time_limit = (dp_convert_to_count(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
- lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
- LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
lt_settings->disallow_per_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1543,7 +1542,7 @@ static void override_training_settings(
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
lt_settings->always_match_dpcd_with_hw_lane_settings = false;
@@ -1584,6 +1583,15 @@ static void override_training_settings(
if (link->preferred_training_settings.fec_enable != NULL)
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+
+#endif
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
}
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
@@ -1649,7 +1657,7 @@ static enum dc_status configure_lttpr_mode_non_transparent(
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -2099,7 +2107,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2216,7 +2224,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
link->vendor_specific_lttpr_link_rate_wa = target_rate;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2288,7 +2296,7 @@ static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
return status;
}
@@ -2635,6 +2643,7 @@ enum link_training_result dc_link_dp_perform_link_training(
link,
link_settings,
&lt_settings);
+
override_training_settings(
link,
&link->preferred_training_settings,
@@ -2652,7 +2661,7 @@ enum link_training_result dc_link_dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
@@ -2758,8 +2767,14 @@ bool perform_link_training_with_retries(
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ /* Update verified link settings to current one
+ * Because DPIA LT might fallback to lower link setting.
+ */
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ }
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
@@ -3080,7 +3095,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
+ if (dp_is_lttpr_present(link)) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
@@ -3234,7 +3249,7 @@ static bool dp_verify_link_cap(
cur_link_settings = max_link_settings;
/* Grant extended timeout request */
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
+ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -3743,7 +3758,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
unsigned int policy = 0;
- policy = link->ctx->dc->debug.force_dsc_edp_policy;
+ policy = link->panel_config.dsc.force_dsc_edp_policy;
if (max_link_rate == LINK_RATE_UNKNOWN)
max_link_rate = link->verified_link_cap.link_rate;
/*
@@ -3909,7 +3924,7 @@ bool decide_link_settings(struct dc_stream_state *stream,
if (stream->timing.flags.DSC) {
enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
- if (link->ctx->dc->debug.force_dsc_edp_policy) {
+ if (link->panel_config.dsc.force_dsc_edp_policy) {
/* calculate link max link rate cap*/
struct dc_link_settings tmp_link_setting;
struct dc_crtc_timing tmp_timing = stream->timing;
@@ -4095,8 +4110,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
+ /* prepare link training settings */
+ link_training_settings.link_settings = link->cur_link_settings;
+
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
+
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
link_training_settings.dpcd_lane_settings);
@@ -4203,9 +4223,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- /* prepare link training settings */
- link_training_settings.link_settings = link->cur_link_settings;
-
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
@@ -4518,17 +4535,15 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
- }
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
}
}
@@ -5017,121 +5032,136 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-/* Logic to determine LTTPR mode */
-static void determine_lttpr_mode(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
- bool allow_lttpr_non_transparent_mode = 0;
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
+
+ /* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
+ if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
+ return false;
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- allow_lttpr_non_transparent_mode = 1;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- allow_lttpr_non_transparent_mode = 1;
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR extended aux timeout if LTTPR is present.
+ */
+ status = core_link_read_dpcd(link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value)
+ */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- if (vbios_lttpr_enable && vbios_lttpr_interop)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
- if (allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
- if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- else
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- }
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = dp_is_lttpr_present(link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Check DP tunnel LTTPR mode debug option. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-#endif
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+
+ return is_lttpr_present;
}
-bool dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_is_lttpr_present(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
+ return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+}
- memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting);
- /* Logic to determine LTTPR mode*/
- determine_lttpr_mode(link);
+ if (encoding == DP_8b_10b_ENCODING)
+ return dp_decide_8b_10b_lttpr_mode(link);
+ else if (encoding == DP_128b_132b_ENCODING)
+ return dp_decide_128b_132b_lttpr_mode(link);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- !link->dc->debug.disable_fixed_vs_aux_timeout_wa) {
- /* Fixed VS workaround for AUX timeout */
- const uint32_t fixed_vs_address = 0xF004F;
- const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+ ASSERT(0);
+ return LTTPR_MODE_NON_LTTPR;
+}
- core_link_write_dpcd(
- link,
- fixed_vs_address,
- fixed_vs_data,
- sizeof(fixed_vs_data));
- }
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
+{
+ if (!dp_is_lttpr_present(link))
+ return;
- /* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR extended aux timeout if LTTPR is present.
- */
- status = core_link_read_dpcd(
- link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- lttpr_dpcd_data,
- sizeof(lttpr_dpcd_data));
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
+ *override = LTTPR_MODE_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
+ *override = LTTPR_MODE_NON_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
+ *override = LTTPR_MODE_NON_LTTPR;
}
- return is_lttpr_present;
+}
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
+{
+ bool is_lttpr_present = dp_is_lttpr_present(link);
+ bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
+
+ if (!is_lttpr_present)
+ return LTTPR_MODE_NON_LTTPR;
+
+ if (vbios_lttpr_aware) {
+ if (vbios_lttpr_force_non_transparent)
+ return LTTPR_MODE_NON_TRANSPARENT;
+ else
+ return LTTPR_MODE_TRANSPARENT;
+ }
+
+ if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ link->dc->caps.extended_aux_timeout_support)
+ return LTTPR_MODE_NON_TRANSPARENT;
+
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
+{
+ return dp_is_lttpr_present(link) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_NON_LTTPR;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5193,13 +5223,16 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
uint64_t current_ts = 0;
uint64_t time_taken_ms = 0;
enum dc_connection_type type = dc_connection_none;
+ bool lttpr_present;
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- determine_lttpr_mode(link);
+ lttpr_present = dp_is_lttpr_present(link) ||
+ (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
*/
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ if (lttpr_present)
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
@@ -5267,6 +5300,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t aux_channel_retry_cnt = 0;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -5294,20 +5328,46 @@ static bool retrieve_link_cap(struct dc_link *link)
status = wa_try_to_wake_dprx(link, timeout_ms);
}
+ while (status != DC_OK && aux_channel_retry_cnt < 10) {
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
+ udelay(1000);
+ aux_channel_retry_cnt++;
+ }
+ }
+
+ /* If aux channel is not active, return false and trigger another detect*/
+ if (status != DC_OK) {
+ dpcd_power_state = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+
+ dpcd_power_state = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+ return false;
+ }
+
is_lttpr_present = dp_retrieve_lttpr_cap(link);
- /* Read DP tunneling information. */
- status = dpcd_get_tunneling_device_data(link);
- status = core_link_read_dpcd(link, DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
+ if (is_lttpr_present)
+ configure_lttpr_mode_transparent(link);
- /* Delay 1 ms if AUX CH is in power down state. Based on spec
- * section 2.3.1.2, if AUX CH may be powered down due to
- * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
- * signal and may need up to 1 ms before being able to reply.
- */
- if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
- udelay(1000);
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
@@ -6057,7 +6117,7 @@ bool dc_link_dp_set_test_pattern(
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
p_link_settings->dpcd_lane_settings,
@@ -7034,68 +7094,16 @@ void dp_enable_link_phy(
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct pipe_ctx *pipes =
- link->dc->current_state->res_ctx.pipe_ctx;
- struct clock_source *dp_cs =
- link->dc->res_pool->dp_clock_source;
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- unsigned int i;
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- /* If the current pixel clock source is not DTO(happens after
- * switching from HDMI passive dongle to DP on the same connector),
- * switch the pixel clock source to DTO.
- */
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream != NULL &&
- pipes[i].stream->link == link) {
- if (pipes[i].clock_source != NULL &&
- pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
- pipes[i].clock_source = dp_cs;
- pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
- pipes[i].stream->timing.pix_clk_100hz;
- pipes[i].clock_source->funcs->program_pix_clk(
- pipes[i].clock_source,
- &pipes[i].stream_res.pix_clk_params,
- dp_get_link_encoding_format(link_settings),
- &pipes[i].pll_settings);
- }
- }
- }
-
link->cur_link_settings = *link_settings;
-
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
- if (dc->clk_mgr->funcs->notify_link_rate_change)
- dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
- }
-
- if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
-
- if (link_hwss->ext.enable_dp_link_output)
- link_hwss->ext.enable_dp_link_output(link, link_res, signal,
- clock_source, link_settings);
-
- if (dmcu != NULL && dmcu->funcs->unlock_phy)
- dmcu->funcs->unlock_phy(dmcu);
-
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link->dc->hwss.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
dp_receiver_power_ctrl(link, true);
}
void edp_add_delay_for_T9(struct dc_link *link)
{
- if (link->local_sink &&
- link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
- udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
+ if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
+ udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
}
bool edp_receiver_ready_T9(struct dc_link *link)
@@ -7151,9 +7159,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
}
- if (link->local_sink &&
- link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
- udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
+ if (link && link->panel_config.pps.extra_t7_ms > 0)
+ udelay(link->panel_config.pps.extra_t7_ms * 1000);
return result;
}
@@ -7162,29 +7169,11 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_
enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
- if (signal == SIGNAL_TYPE_EDP) {
- if (link->dc->hwss.edp_backlight_control)
- link->dc->hwss.edp_backlight_control(link, false);
- if (link_hwss->ext.disable_dp_link_output)
- link_hwss->ext.disable_dp_link_output(link, link_res, signal);
- link->dc->hwss.edp_power_control(link, false);
- } else {
- if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
- if (link_hwss->ext.disable_dp_link_output)
- link_hwss->ext.disable_dp_link_output(link, link_res, signal);
- if (dmcu != NULL && dmcu->funcs->unlock_phy)
- dmcu->funcs->unlock_phy(dmcu);
- }
-
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
-
+ dc->hwss.disable_link_output(link, link_res, signal);
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
@@ -7250,7 +7239,7 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
+ if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index 468e39589ed8..74e36b34d3f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -115,12 +115,14 @@ static enum link_training_result dpia_configure_link(
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
dp_decide_training_settings(link,
link_setting,
lt_settings);
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
status = dpcd_configure_channel_coding(link, lt_settings);
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
@@ -178,7 +180,7 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
switch (type) {
case DPIA_SET_CFG_SET_LINK:
- data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
+ data.set_link.mode = lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
break;
case DPIA_SET_CFG_SET_PHY_TEST_MODE:
break;
@@ -553,7 +555,7 @@ static enum link_training_result dpia_training_cr_phase(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_cr_transparent(link, link_res, lt_settings);
@@ -830,7 +832,7 @@ static enum link_training_result dpia_training_eq_phase(
{
enum link_training_result result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_eq_transparent(link, link_res, lt_settings);
@@ -870,13 +872,14 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
* @param hop The Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_end(struct dc_link *link,
+ struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
enum dc_status status;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
if (hop == repeater_cnt) { /* DPTX-to-DPIA */
@@ -916,7 +919,7 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
link->link_id.enum_id - ENUM_ID_1,
hop,
result,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
return result;
}
@@ -928,7 +931,9 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
* @param link DPIA link being trained.
* @param hop The Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(struct dc_link *link, uint32_t hop)
+static void dpia_training_abort(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
{
uint8_t data = 0;
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
@@ -936,7 +941,7 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode,
+ lt_settings->lttpr_mode,
link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
@@ -964,12 +969,16 @@ enum link_training_result dc_link_dpia_perform_link_training(
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
int8_t repeater_id; /* Current hop. */
+ struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
+
+ lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
+
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
if (result != LINK_TRAINING_SUCCESS)
return result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Train each hop in turn starting with the one closest to DPTX.
@@ -987,7 +996,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
break;
/* Stop training hop. */
- result = dpia_training_end(link, repeater_id);
+ result = dpia_training_end(link, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
}
@@ -1001,9 +1010,9 @@ enum link_training_result dc_link_dpia_perform_link_training(
msleep(5);
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT) {
- dpia_training_abort(link, repeater_id);
+ dpia_training_abort(link, &lt_settings, repeater_id);
} else {
- dpia_training_end(link, repeater_id);
+ dpia_training_end(link, &lt_settings, repeater_id);
}
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ffc0f1c0ea93..8ee0d946bb2f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_21;
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dc_version = DCN_VERSION_3_14;
break;
default:
@@ -1904,9 +1904,6 @@ bool dc_is_stream_unchanged(
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
- if (old_stream->odm_2to1_policy_applied != stream->odm_2to1_policy_applied)
- return false;
-
return true;
}
@@ -3584,6 +3581,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
}
}
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx_reset;
+
+ /* reset the otg sync context for the pipe and its slave pipes if any */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
+
+ if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
+ }
+}
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
@@ -3648,3 +3662,25 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
else
return get_virtual_link_hwss();
}
+
+bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
+{
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (stream) {
+ h_blank_start = stream->timing.h_total - stream->timing.h_front_porch;
+ h_blank_end = h_blank_start - stream->timing.h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (stream->timing.h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (stream->timing.h_sync_width % 2 == 0);
+ }
+ return divisible;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f62d50901d92..ae13887756bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,7 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER dc->ctx->logger
@@ -329,7 +330,7 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- if (attributes->height * attributes->width * 4 > 16384)
+ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
if (stream->mall_stream_config.type == SUBVP_MAIN)
return false;
@@ -519,7 +520,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
/* remove writeback info for disabled writeback pipes from stream */
- for (i = 0, j = 0; i < stream->num_wb_info; i++) {
+ for (i = 0, j = 0; i < stream->num_wb_info && j < MAX_DWB_PIPES; i++) {
if (stream->writeback_info[i].wb_enabled) {
if (i != j)
/* trim the array */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8e1e40083ec8..2ecf36e6329b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.205"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -118,7 +118,26 @@ struct dc_plane_cap {
uint32_t min_height;
};
-// Color management caps (DPP and MPC)
+/**
+ * DOC: color-management-caps
+ *
+ * **Color management caps (DPP and MPC)**
+ *
+ * Modules/color calculates various color operations which are translated to
+ * abstracted HW. DCE 5-12 had almost no important changes, but starting with
+ * DCN1, every new generation comes with fairly major differences in color
+ * pipeline. Therefore, we abstract color pipe capabilities so modules/DM can
+ * decide mapping to HW block based on logical capabilities.
+ */
+
+/**
+ * struct rom_curve_caps - predefined transfer function caps for degamma and regamma
+ * @srgb: RGB color space transfer func
+ * @bt2020: BT.2020 transfer func
+ * @gamma2_2: standard gamma
+ * @pq: perceptual quantizer transfer function
+ * @hlg: hybrid log–gamma transfer function
+ */
struct rom_curve_caps {
uint16_t srgb : 1;
uint16_t bt2020 : 1;
@@ -127,36 +146,68 @@ struct rom_curve_caps {
uint16_t hlg : 1;
};
+/**
+ * struct dpp_color_caps - color pipeline capabilities for display pipe and
+ * plane blocks
+ *
+ * @dcn_arch: all DCE generations treated the same
+ * @input_lut_shared: shared with DGAM. Input LUT is different than most LUTs,
+ * just plain 256-entry lookup
+ * @icsc: input color space conversion
+ * @dgam_ram: programmable degamma LUT
+ * @post_csc: post color space conversion, before gamut remap
+ * @gamma_corr: degamma correction
+ * @hw_3d_lut: 3D LUT support. It implies a shaper LUT before. It may be shared
+ * with MPC by setting mpc:shared_3d_lut flag
+ * @ogam_ram: programmable out/blend gamma LUT
+ * @ocsc: output color space conversion
+ * @dgam_rom_for_yuv: pre-defined degamma LUT for YUV planes
+ * @dgam_rom_caps: pre-definied curve caps for degamma 1D LUT
+ * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ *
+ * Note: hdr_mult and gamut remap (CTM) are always available in DPP (in that order)
+ */
struct dpp_color_caps {
- uint16_t dcn_arch : 1; // all DCE generations treated the same
- // input lut is different than most LUTs, just plain 256-entry lookup
- uint16_t input_lut_shared : 1; // shared with DGAM
+ uint16_t dcn_arch : 1;
+ uint16_t input_lut_shared : 1;
uint16_t icsc : 1;
uint16_t dgam_ram : 1;
- uint16_t post_csc : 1; // before gamut remap
+ uint16_t post_csc : 1;
uint16_t gamma_corr : 1;
-
- // hdr_mult and gamut remap always available in DPP (in that order)
- // 3d lut implies shaper LUT,
- // it may be shared with MPC - check MPC:shared_3d_lut flag
uint16_t hw_3d_lut : 1;
- uint16_t ogam_ram : 1; // blnd gam
+ uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t dgam_rom_for_yuv : 1;
struct rom_curve_caps dgam_rom_caps;
struct rom_curve_caps ogam_rom_caps;
};
+/**
+ * struct mpc_color_caps - color pipeline capabilities for multiple pipe and
+ * plane combined blocks
+ *
+ * @gamut_remap: color transformation matrix
+ * @ogam_ram: programmable out gamma LUT
+ * @ocsc: output color space conversion matrix
+ * @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
+ * instance
+ * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ */
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
- uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
- uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
-
+ uint16_t num_3dluts : 3;
+ uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
};
+/**
+ * struct dc_color_caps - color pipes capabilities for DPP and MPC hw blocks
+ * @dpp: color pipes caps for DPP
+ * @mpc: color pipes caps for MPC
+ */
struct dc_color_caps {
struct dpp_color_caps dpp;
struct mpc_color_caps mpc;
@@ -213,6 +264,7 @@ struct dc_caps {
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
+ uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
@@ -349,9 +401,14 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool is_vmin_only_asic;
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
+ bool use_default_clock_table;
+ bool force_bios_enable_lttpr;
+ uint8_t force_bios_fixed_vs;
+
};
enum visual_confirm {
@@ -363,6 +420,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SUBVP = 14,
};
enum dc_psr_power_opts {
@@ -384,9 +442,31 @@ enum dcc_option {
DCC_HALF_REQ_DISALBE = 2,
};
+/**
+ * enum pipe_split_policy - Pipe split strategy supported by DCN
+ *
+ * This enum is used to define the pipe split policy supported by DCN. By
+ * default, DC favors MPC_SPLIT_DYNAMIC.
+ */
enum pipe_split_policy {
+ /**
+ * @MPC_SPLIT_DYNAMIC: DC will automatically decide how to split the
+ * pipe in order to bring the best trade-off between performance and
+ * power consumption. This is the recommended option.
+ */
MPC_SPLIT_DYNAMIC = 0,
+
+ /**
+ * @MPC_SPLIT_DYNAMIC: Avoid pipe split, which means that DC will not
+ * try any sort of split optimization.
+ */
MPC_SPLIT_AVOID = 1,
+
+ /**
+ * @MPC_SPLIT_DYNAMIC: With this option, DC will only try to optimize
+ * the pipe utilization when using a single display; if the user
+ * connects to a second display, DC will avoid pipe split.
+ */
MPC_SPLIT_AVOID_MULT_DISP = 2,
};
@@ -609,6 +689,7 @@ struct dc_bounding_box_overrides {
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
+ int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -620,6 +701,14 @@ struct dc_state;
struct resource_pool;
struct dce_hwseq;
+/**
+ * struct dc_debug_options - DC debug struct
+ *
+ * This struct provides a simple mechanism for developers to change some
+ * configurations, enable/disable features, and activate extra debug options.
+ * This can be very handy to narrow down whether some specific feature is
+ * causing an issue or not.
+ */
struct dc_debug_options {
bool native422_support;
bool disable_dsc;
@@ -639,6 +728,11 @@ struct dc_debug_options {
bool disable_stutter;
bool use_max_lb;
enum dcc_option disable_dcc;
+
+ /**
+ * @pipe_split_policy: Define which pipe split policy is used by the
+ * display core.
+ */
enum pipe_split_policy pipe_split_policy;
bool force_single_disp_pipe_split;
bool voltage_align_fclk;
@@ -712,8 +806,6 @@ struct dc_debug_options {
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
- bool disable_dsc_edp;
- unsigned int force_dsc_edp_policy;
bool enable_dram_clock_change_one_display_vactive;
/* TODO - remove once tested */
bool legacy_dp2_lt;
@@ -737,11 +829,14 @@ struct dc_debug_options {
int crb_alloc_policy_min_disp_count;
bool disable_z10;
bool enable_z9_disable_interface;
- bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
+ bool allow_sw_cursor_fallback;
+ unsigned int force_subvp_num_ways;
+ unsigned int force_mall_ss_num_ways;
+ bool alloc_extra_way_for_cursor;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -751,10 +846,13 @@ struct dc_debug_options {
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
+ bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
+ bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
+ enum lttpr_mode lttpr_mode_override;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -810,6 +908,17 @@ struct dc {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
+
+ /* Scratch memory */
+ struct {
+ struct {
+ /*
+ * For matching clock_limits table in driver with table
+ * from PMFW.
+ */
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ } update_bw_bounding_box;
+ } scratch;
};
enum frame_buffer_mode {
@@ -1013,6 +1122,7 @@ union surface_update_flags {
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
uint32_t lut_3d:1;
+ uint32_t tmz_changed:1;
uint32_t full_update:1;
} bits;
@@ -1081,6 +1191,7 @@ struct dc_plane_state {
/* private to dc_surface.c */
enum dc_irq_source irq_source;
struct kref refcount;
+ struct tg_color visual_confirm_color;
};
struct dc_plane_info {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2d61c2a91cee..89d7d3fd3321 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dc_hw_types.h"
#include "core_types.h"
+#include "../basics/conversion.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
union dmub_rb_cmd cmd = { 0 };
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- // TODO: Uncomment once FW headers are promoted
- //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+ cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -323,11 +323,13 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
int i = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
- uint8_t visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
+ uint8_t visual_confirm_enabled;
if (dc == NULL)
return false;
+ visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
+
// Format command.
cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
@@ -387,6 +389,37 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
}
}
+void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ enum dmub_status status;
+ unsigned int panel_inst = 0;
+
+ dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ // Prepare fw command
+ cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
+ cmd.visual_confirm_color.header.sub_type = 0;
+ cmd.visual_confirm_color.header.ret_status = 1;
+ cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
+ cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
+
+ // Send command to fw
+ status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd);
+
+ ASSERT(status == DMUB_STATUS_OK);
+
+ // If command was processed, copy feature caps to dmub srv
+ if (status == DMUB_STATUS_OK &&
+ cmd.visual_confirm_color.header.ret_status == 0) {
+ memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
+ &cmd.visual_confirm_color.visual_confirm_color_data,
+ sizeof(struct dmub_visual_confirm_color));
+ }
+}
+
#ifdef CONFIG_DRM_AMD_DC_DCN
/**
* ***********************************************************************************************
@@ -417,44 +450,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
- int16_t drr_frame_us = 0;
- int16_t min_drr_supported_us = 0;
- int16_t max_drr_supported_us = 0;
- int16_t max_drr_vblank_us = 0;
- int16_t max_drr_mallregion_us = 0;
- int16_t mall_region_us = 0;
- int16_t prefetch_us = 0;
- int16_t subvp_active_us = 0;
- int16_t drr_active_us = 0;
- int16_t min_vtotal_supported = 0;
- int16_t max_vtotal_supported = 0;
+ uint16_t drr_frame_us = 0;
+ uint16_t min_drr_supported_us = 0;
+ uint16_t max_drr_supported_us = 0;
+ uint16_t max_drr_vblank_us = 0;
+ uint16_t max_drr_mallregion_us = 0;
+ uint16_t mall_region_us = 0;
+ uint16_t prefetch_us = 0;
+ uint16_t subvp_active_us = 0;
+ uint16_t drr_active_us = 0;
+ uint16_t min_vtotal_supported = 0;
+ uint16_t max_vtotal_supported = 0;
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
- drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+ drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
// P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+ mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
- (div64_s64((int64_t)min_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
-
- prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
- (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
- drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
- max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+ min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
+
+ prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+ (((uint64_t)main_timing->pix_clk_100hz * 100)));
+ drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+ max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
- max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
+ max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -548,10 +579,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
- subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
- (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
- (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+ subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+ (uint64_t)phantom_timing0->h_total * 1000000),
+ (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+ (uint64_t)phantom_timing1->h_total * 1000000),
+ (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
// should increase it's prefetch time to match the other
@@ -559,16 +592,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
- (int64_t)phantom_timing1->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing1->h_total * 1000000));
+
} else if (subvp1_prefetch_us > subvp0_prefetch_us) {
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
- (int64_t)phantom_timing0->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing0->h_total * 1000000));
}
}
@@ -601,6 +635,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +647,21 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+ /* Calculate the scaling factor from the src and dst height.
+ * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+ * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ *
+ * Make sure to combine stream and plane scaling together.
+ */
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
+ &out_num_stream, &out_den_stream);
+ reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
+ &out_num_plane, &out_den_plane);
+ reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
+ pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
@@ -619,19 +669,33 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
// Round up
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
pipe_data->pipe_config.subvp_data.processing_delay_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
+
+ if (subvp_pipe->bottom_pipe) {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
+ } else if (subvp_pipe->next_odm_pipe) {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+ }
+
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
+ if (phantom_pipe->bottom_pipe) {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
+ } else if (phantom_pipe->next_odm_pipe) {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+ }
break;
}
}
@@ -676,7 +740,9 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && !pipe->top_pipe &&
+ /* For SubVP pipe count, only count the top most (ODM / MPC) pipe
+ */
+ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -689,7 +755,12 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (!pipe->stream)
continue;
+ /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
+ * Any ODM or MPC splits being used in SubVP will be handled internally in
+ * populate_subvp_cmd_pipe_info
+ */
if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 159782cd6659..7e438345b1a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -78,12 +78,14 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst);
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
+void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 584aaf6967fd..848db8676adf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -417,19 +417,43 @@ enum dc_scan_direction {
SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
};
+/**
+ * struct dc_cursor_position: Hardware cursor data.
+ *
+ * This struct keeps the action information related to the cursor that will be
+ * sent and received from our DC core.
+ */
struct dc_cursor_position {
+ /**
+ * @x: It represents the top left abscissa coordinate of the cursor.
+ */
uint32_t x;
+
+ /**
+ * @y: It is the top ordinate of the cursor coordinate.
+ */
uint32_t y;
+ /**
+ * @x_hotspot: Define the abscissa point where mouse click happens.
+ */
uint32_t x_hotspot;
+
+ /**
+ * @y_hotspot: Define the ordinate point where mouse click happens.
+ */
uint32_t y_hotspot;
- /*
- * This parameter indicates whether HW cursor should be enabled
+ /**
+ * @enable: This parameter indicates whether hardware cursor should be
+ * enabled.
*/
bool enable;
- /* Translate cursor x/y by the source rectangle for each plane. */
+ /**
+ * @translate_by_source: Translate cursor x/y by the source rectangle
+ * for each plane.
+ */
bool translate_by_source;
};
@@ -494,7 +518,9 @@ struct dc_gamma {
/* Used by both ipp amd opp functions*/
/* TODO: to be consolidated with enum color_space */
-/*
+/**
+ * enum dc_cursor_color_format - DC cursor programming mode
+ *
* This enum is for programming CURSOR_MODE register field. What this register
* should be programmed to depends on OS requested cursor shape flags and what
* we stored in the cursor surface.
@@ -530,17 +556,39 @@ union dc_cursor_attribute_flags {
};
struct dc_cursor_attributes {
+ /**
+ * @address: This field represents the framebuffer address associated
+ * with the cursor. It is important to highlight that this address is
+ * divided into a high and low parts.
+ */
PHYSICAL_ADDRESS_LOC address;
+
+ /**
+ * @pitch: Cursor line stride.
+ */
uint32_t pitch;
- /* Width and height should correspond to cursor surface width x heigh */
+ /**
+ * @width: Width should correspond to cursor surface width.
+ */
uint32_t width;
+ /**
+ * @heigh: Height should correspond to cursor surface heigh.
+ */
uint32_t height;
+ /**
+ * @color_format: DC cursor programming mode.
+ */
enum dc_cursor_color_format color_format;
- uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
+ /**
+ * @sdr_white_level: Boosting (SDR) cursor in HDR mode.
+ */
+ uint32_t sdr_white_level;
- /* In case we support HW Cursor rotation in the future */
+ /**
+ * @rotation_angle: In case we support HW Cursor rotation in the future
+ */
enum dc_rotation_angle rotation_angle;
union dc_cursor_attribute_flags attribute_flags;
@@ -764,22 +812,108 @@ struct dc_dsc_config {
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
};
+
+/**
+ * struct dc_crtc_timing - Timing parameters used to configure DCN blocks
+ *
+ * DCN provides multiple signals and parameters that can be used to adjust
+ * timing parameters, this struct aggregate multiple of these values for easy
+ * access. In this struct, fields prefixed with h_* are related to horizontal
+ * timing, and v_* to vertical timing. Keep in mind that when we talk about
+ * vertical timings, the values, in general, are described in the number of
+ * lines; on the other hand, the horizontal values are in pixels.
+ */
struct dc_crtc_timing {
+ /**
+ * @h_total: The total number of pixels from the rising edge of HSync
+ * until the rising edge of the current HSync.
+ */
uint32_t h_total;
+
+ /**
+ * @h_border_left: The black pixels related to the left border
+ */
uint32_t h_border_left;
+
+ /**
+ * @h_addressable: It is the range of pixels displayed horizontally.
+ * For example, if the display resolution is 3840@2160, the horizontal
+ * addressable area is 3840.
+ */
uint32_t h_addressable;
+
+ /**
+ * @h_border_right: The black pixels related to the right border
+ */
uint32_t h_border_right;
+
+ /**
+ * @h_front_porch: Period (in pixels) between HBlank start and the
+ * rising edge of HSync.
+ */
uint32_t h_front_porch;
+
+ /**
+ * @h_sync_width: HSync duration in pixels.
+ */
uint32_t h_sync_width;
+ /**
+ * @v_total: It is the total number of lines from the rising edge of
+ * the previous VSync until the rising edge of the current VSync.
+ *
+ * |--------------------------|
+ * +-+ V_TOTAL +-+
+ * | | | |
+ * VSync ---+ +--------- // -----------+ +---
+ */
uint32_t v_total;
+
+ /**
+ * @v_border_top: The black border on the top.
+ */
uint32_t v_border_top;
+
+ /**
+ * @v_addressable: It is the range of the scanout at which the
+ * framebuffer is displayed. For example, if the display resolution is
+ * 3840@2160, the addressable area is 2160 lines, or if the resolution
+ * is 1920x1080, the addressable area is 1080 lines.
+ */
uint32_t v_addressable;
+
+ /**
+ * @v_border_bottom: The black border on the bottom.
+ */
uint32_t v_border_bottom;
+
+ /**
+ * @v_front_porch: Period (in lines) between VBlank start and rising
+ * edge of VSync.
+ * +-+
+ * VSync | |
+ * ----------+ +--------...
+ * +------------------...
+ * VBlank |
+ * --+
+ * |-------|
+ * v_front_porch
+ */
uint32_t v_front_porch;
+
+ /**
+ * @v_sync_width: VSync signal width in lines.
+ */
uint32_t v_sync_width;
+ /**
+ * @pix_clk_100hz: Pipe pixel precision
+ *
+ * This field is used to communicate pixel clocks with 100 Hz accuracy
+ * from dc_crtc_timing to BIOS command table.
+ */
uint32_t pix_clk_100hz;
+
uint32_t min_refresh_in_uhz;
uint32_t vic;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index a0af0f6afeef..bf5f9e2773bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -113,6 +113,32 @@ struct psr_settings {
unsigned int psr_power_opt;
};
+/* To split out "global" and "per-panel" config settings.
+ * Add a struct dc_panel_config under dc_link
+ */
+struct dc_panel_config {
+ // extra panel power sequence parameters
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+ unsigned int extra_delay_backlight_off;
+ unsigned int extra_post_t7_ms;
+ unsigned int extra_pre_t11_ms;
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+ // ABM
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
+ // edp DSC
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+ } dsc;
+};
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -131,7 +157,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- enum lttpr_mode lttpr_mode;
bool is_internal_display;
/* TODO: Rename. Flag an endpoint as having a programmable mapping to a
@@ -224,6 +249,7 @@ struct dc_link {
bool dpia_mst_dsc_always_on;
/* Forced DPIA into TBT3 compatibility mode. */
bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -232,6 +258,8 @@ struct dc_link {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
@@ -344,6 +372,7 @@ enum dc_detect_reason {
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index f87f852d4829..9e6025c98db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -212,8 +212,7 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- struct periodic_interrupt_config periodic_interrupt0;
- struct periodic_interrupt_config periodic_interrupt1;
+ struct periodic_interrupt_config periodic_interrupt;
/* from core_stream struct */
struct dc_context *ctx;
@@ -268,8 +267,6 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
struct mall_stream_config mall_stream_config;
-
- bool odm_2to1_policy_applied;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -283,8 +280,7 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- struct periodic_interrupt_config *periodic_interrupt0;
- struct periodic_interrupt_config *periodic_interrupt1;
+ struct periodic_interrupt_config *periodic_interrupt;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index bdb6bac8dd97..c94a966c6612 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -300,7 +300,7 @@ static void set_high_bit_rate_capable(
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
}
-/* set video latency in in ms/2+1 */
+/* set video latency in ms/2+1 */
static void set_video_latency(
struct audio *audio,
int latency_in_ms)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 919c2c2ba84b..32782ef9ef77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -814,12 +814,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER");
retry_on_defer = true;
- fallthrough;
- case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
- if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK)
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
@@ -848,7 +842,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
}
}
break;
-
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: FAILURE: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
+ goto fail;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 213de8cabfad..165392380842 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 0df06740ec39..bec5e9f787fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -393,17 +393,18 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
if (copy_settings_data->dsc_enable_status &&
link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
- sizeof(link->dpcd_caps.sink_dev_id_str)))
+ sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
else
link->psr_settings.force_ffu_mode = 0;
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
if (link->fec_state == dc_link_fec_enabled &&
+ link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
- sizeof(link->dpcd_caps.sink_dev_id_str)) ||
+ sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
- sizeof(link->dpcd_caps.sink_dev_id_str))))
+ sizeof(DP_SINK_DEVICE_STR_ID_2))))
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
else
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 38a67051d470..d260eaa1509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -722,7 +722,6 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_context *ctx = link->ctx;
struct graphics_object_id connector = link->link_enc->connector;
struct gpio *hpd;
- struct dc_sink *sink = link->local_sink;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
uint32_t timeout = power_up ?
@@ -755,9 +754,9 @@ void dce110_edp_wait_for_hpd_ready(
return;
}
- if (sink != NULL) {
- if (sink->edid_caps.panel_patch.extra_t3_ms > 0) {
- int extra_t3_in_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+ if (link != NULL) {
+ if (link->panel_config.pps.extra_t3_ms > 0) {
+ int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms;
msleep(extra_t3_in_ms);
}
@@ -842,7 +841,7 @@ void dce110_edp_power_control(
/* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
if (link->local_sink != NULL)
remaining_min_edp_poweroff_time_ms +=
- link->local_sink->edid_caps.panel_patch.extra_t12_ms;
+ link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
@@ -946,7 +945,7 @@ void dce110_edp_wait_for_T12(
current_ts,
dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
- t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+ t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
if (time_since_edp_poweroff_ms < t12_duration)
msleep(t12_duration - time_since_edp_poweroff_ms);
@@ -965,6 +964,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t panel_instance;
+ unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
+ unsigned int post_T7_delay = OLED_POST_T7_DELAY;
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -1043,8 +1044,10 @@ void dce110_edp_backlight_control(
link_transmitter_control(ctx->dc_bios, &cntl);
- if (enable && link->dpcd_sink_ext_caps.bits.oled)
- msleep(OLED_POST_T7_DELAY);
+ if (enable && link->dpcd_sink_ext_caps.bits.oled) {
+ post_T7_delay += link->panel_config.pps.extra_post_t7_ms;
+ msleep(post_T7_delay);
+ }
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
@@ -1066,8 +1069,10 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled)
- msleep(OLED_PRE_T11_DELAY);
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
+ msleep(pre_T11_delay);
+ }
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -1441,6 +1446,14 @@ static enum dc_status dce110_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -2114,6 +2127,7 @@ static void dce110_reset_hw_ctx_wrap(
BREAK_TO_DEBUGGER();
}
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
@@ -2992,6 +3006,124 @@ void dce110_set_pipe(struct pipe_ctx *pipe_ctx)
abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst);
}
+void dce110_enable_lvds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock)
+{
+ link->link_enc->funcs->enable_lvds_output(
+ link->link_enc,
+ clock_source,
+ pixel_clock);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+}
+
+void dce110_enable_tmds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock)
+{
+ link->link_enc->funcs->enable_tmds_output(
+ link->link_enc,
+ clock_source,
+ color_depth,
+ signal,
+ pixel_clock);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+}
+
+void dce110_enable_dp_link_output(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct pipe_ctx *pipes =
+ link->dc->current_state->res_ctx.pipe_ctx;
+ struct clock_source *dp_cs =
+ link->dc->res_pool->dp_clock_source;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ unsigned int i;
+
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ /* If the current pixel clock source is not DTO(happens after
+ * switching from HDMI passive dongle to DP on the same connector),
+ * switch the pixel clock source to DTO.
+ */
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->link == link) {
+ if (pipes[i].clock_source != NULL &&
+ pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ pipes[i].clock_source = dp_cs;
+ pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
+ pipes[i].stream->timing.pix_clk_100hz;
+ pipes[i].clock_source->funcs->program_pix_clk(
+ pipes[i].clock_source,
+ &pipes[i].stream_res.pix_clk_params,
+ dp_get_link_encoding_format(link_settings),
+ &pipes[i].pll_settings);
+ }
+ }
+ }
+
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+ }
+
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ if (link_hwss->ext.enable_dp_link_output)
+ link_hwss->ext.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
+
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+void dce110_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -3031,6 +3163,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index b6f3843d3d05..758f4b3b0087 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -90,6 +90,24 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t frame_ramp);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
-
+void dce110_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+void dce110_enable_lvds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock);
+void dce110_enable_tmds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock);
+void dce110_enable_dp_link_output(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d4a6504dfe00..897f412f539e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
@@ -450,11 +448,12 @@ void dpp1_set_cursor_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
+
src_y_offset = pos->y - param->viewport.y;
}
-
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b54c12400323..52e201e9b091 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
@@ -1211,13 +1208,10 @@ void hubp1_cursor_set_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ src_y_offset = pos->y - param->viewport.y;
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bed783747f16..72521749c01d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
@@ -898,6 +899,14 @@ enum dc_status dcn10_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -1016,6 +1025,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2150,8 +2160,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
grouped_pipes[i]->stream_res.tg->inst, &pclk);
- grouped_pipes[i]->stream->timing.pix_clk_100hz =
- pclk*get_clock_divider(grouped_pipes[i], false);
+ grouped_pipes[i]->stream->timing.pix_clk_100hz =
+ pclk*get_clock_divider(grouped_pipes[i], false);
if (master == -1)
master = i;
}
@@ -2198,14 +2208,14 @@ void dcn10_enable_vblanks_synchronization(
if (master >= 0) {
for (i = 0; i < group_size; i++) {
if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
- grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
- grouped_pipes[master]->stream_res.tg,
- grouped_pipes[i]->stream_res.tg,
- grouped_pipes[master]->stream->timing.pix_clk_100hz,
- grouped_pipes[i]->stream->timing.pix_clk_100hz,
- get_clock_divider(grouped_pipes[master], false),
- get_clock_divider(grouped_pipes[i], false));
- grouped_pipes[i]->stream->vblank_synchronized = true;
+ grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
+ grouped_pipes[master]->stream_res.tg,
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[master]->stream->timing.pix_clk_100hz,
+ grouped_pipes[i]->stream->timing.pix_clk_100hz,
+ get_clock_divider(grouped_pipes[master], false),
+ get_clock_divider(grouped_pipes[i], false));
+ grouped_pipes[i]->stream->vblank_synchronized = true;
}
grouped_pipes[master]->stream->vblank_synchronized = true;
DC_SYNC_INFO("Sync complete\n");
@@ -2538,8 +2548,10 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
color_space_to_black_color(
dc, pipe_ctx->stream->output_color_space, color);
- if (mpc->funcs->set_bg_color)
+ if (mpc->funcs->set_bg_color) {
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ }
}
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -3339,11 +3351,11 @@ static bool dcn10_dmub_should_update_cursor_data(
if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
return false;
- if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
+ if (dcn10_can_pipe_disable_cursor(pipe_ctx))
+ return false;
- if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
- debug->enable_sw_cntl_psr)
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
return false;
@@ -3467,8 +3479,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
- (pipe_ctx->bottom_pipe != NULL);
+ bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
@@ -3477,6 +3488,13 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
+ if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
+ (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
+ pipe_split_on = true;
+ }
+ }
+
/**
* DC cursor is stream space, HW cursor is plane space and drawn
* as part of the framebuffer.
@@ -3548,8 +3566,36 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
+
+ if (param.rotation == ROTATION_ANGLE_0) {
+ int viewport_width =
+ pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_x =
+ pipe_ctx->plane_res.scl_data.viewport.x;
+
+ if (param.mirror) {
+ if (pipe_split_on || odm_combine_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
+ }
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
+ }
+ }
+ }
// Swap axis and mirror horizontally
- if (param.rotation == ROTATION_ANGLE_90) {
+ else if (param.rotation == ROTATION_ANGLE_90) {
uint32_t temp_x = pos_cpy.x;
pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
@@ -3620,23 +3666,25 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int viewport_x =
pipe_ctx->plane_res.scl_data.viewport.x;
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ if (!param.mirror) {
+ if (pipe_split_on || odm_combine_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
}
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
/**
@@ -3737,7 +3785,6 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
int vesa_sync_start;
int asic_blank_end;
int interlace_factor;
- int vertical_line_start;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
@@ -3753,10 +3800,8 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
patched_crtc_timing.v_border_top)
* interlace_factor;
- vertical_line_start = asic_blank_end -
+ return asic_blank_end -
pipe_ctx->pipe_dlg_param.vstartup_start + 1;
-
- return vertical_line_start;
}
void dcn10_calc_vupdate_position(
@@ -3767,7 +3812,7 @@ void dcn10_calc_vupdate_position(
{
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt0.lines_offset;
+ pipe_ctx->stream->periodic_interrupt.lines_offset;
int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
int start_position;
@@ -3792,18 +3837,10 @@ void dcn10_calc_vupdate_position(
static void dcn10_cal_vline_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
- enum vline_select vline,
uint32_t *start_line,
uint32_t *end_line)
{
- enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
-
- if (vline == VLINE0)
- ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
- else if (vline == VLINE1)
- ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
-
- switch (ref_point) {
+ switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
case START_V_UPDATE:
dcn10_calc_vupdate_position(
dc,
@@ -3812,7 +3849,9 @@ static void dcn10_cal_vline_position(
end_line);
break;
case START_V_SYNC:
- // Suppose to do nothing because vsync is 0;
+ // vsync is line 0 so start_line is just the requested line offset
+ *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
+ *end_line = *start_line + 2;
break;
default:
ASSERT(0);
@@ -3822,24 +3861,15 @@ static void dcn10_cal_vline_position(
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline)
+ struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
- if (vline == VLINE0) {
- uint32_t start_line = 0;
- uint32_t end_line = 0;
-
- dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
+ dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
- tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
-
- } else if (vline == VLINE1) {
- pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
- tg,
- pipe_ctx->stream->periodic_interrupt1.lines_offset);
- }
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
}
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 9ae07c77fdc0..0ef7bf7ddb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -175,8 +175,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 10e613ec7d24..f2371c948822 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -82,6 +82,10 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 769974375b4b..8e9384094f6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e1a9a45b03b6..ea7739255119 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -312,6 +312,20 @@ void optc1_program_timing(
}
}
+/**
+ * optc1_set_vtg_params - Set Vertical Timing Generator (VTG) parameters
+ *
+ * @optc: timing_generator struct used to extract the optc parameters
+ * @dc_crtc_timing: Timing parameters configured
+ * @program_fp2: Boolean value indicating if FP2 will be programmed or not
+ *
+ * OTG is responsible for generating the global sync signals, including
+ * vertical timing information for each HUBP in the dcfclk domain. Each VTG is
+ * associated with one OTG that provides HUBP with vertical timing information
+ * (i.e., there is 1:1 correspondence between OTG and VTG). This function is
+ * responsible for setting the OTG parameters to the VTG during the pipe
+ * programming.
+ */
void optc1_set_vtg_params(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
@@ -465,6 +479,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
@@ -1067,7 +1086,7 @@ static void optc1_set_test_pattern(
src_color[index] >> (src_bpc - dst_bpc);
/* CRTC_TEST_PATTERN_DATA has 16 bits,
* lowest 6 are hardwired to ZERO
- * color bits should be left aligned aligned to MSB
+ * color bits should be left aligned to MSB
* XXXXXXXXXX000000 for 10 bit,
* XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
*/
@@ -1374,6 +1393,12 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+ REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
+ OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
+
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
@@ -1493,8 +1518,23 @@ bool optc1_configure_crc(struct timing_generator *optc,
return true;
}
+/**
+ * optc1_get_crc - Capture CRC result per component
+ *
+ * @optc: timing_generator instance.
+ * @r_cr: 16-bit primary CRC signature for red data.
+ * @g_y: 16-bit primary CRC signature for green data.
+ * @b_cb: 16-bit primary CRC signature for blue data.
+ *
+ * This function reads the CRC signature from the OPTC registers. Notice that
+ * we have three registers to keep the CRC result per color component (RGB).
+ *
+ * Returns:
+ * If CRC is disabled, return false; otherwise, return true, and the CRC
+ * results in the parameters.
+ */
bool optc1_get_crc(struct timing_generator *optc,
- uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1505,12 +1545,14 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ CRC0_B_CB, b_cb);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 3fe5882ed018..6323ca6dc3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -583,6 +583,8 @@ struct dcn_otg_state {
uint32_t underflow_occurred_status;
uint32_t otg_enabled;
uint32_t blank_enabled;
+ uint32_t vertical_interrupt1_en;
+ uint32_t vertical_interrupt1_line;
uint32_t vertical_interrupt2_en;
uint32_t vertical_interrupt2_line;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 174eebbe8b4f..831080b9eb87 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1495,6 +1495,24 @@ static bool dcn10_resource_construct(
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
+ if (!dc->config.is_vmin_only_asic)
+ if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev))
+ switch (dc->ctx->asic_id.pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ dc->config.is_vmin_only_asic = true;
+ break;
+ default:
+ break;
+ }
+
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 2b9d3e63191b..915a20461c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -274,6 +274,7 @@ struct dccg_registers {
uint32_t DSCCLK2_DTO_PARAM;
uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
uint32_t DCCG_GATE_DISABLE_CNTL2;
uint32_t DCCG_GATE_DISABLE_CNTL3;
uint32_t HDMISTREAMCLK0_DTO_PARAM;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index ea1f14af0db7..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index cd2671161ef1..7ce64a3c1b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -445,226 +445,6 @@
type DSCRM_DSC_FORWARD_EN; \
type DSCRM_DSC_OPP_PIPE_SOURCE
-#define DSC_REG_LIST_DCN314(id) \
- SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
- SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
- SRI(DSCC_CONFIG0, DSCC, id),\
- SRI(DSCC_CONFIG1, DSCC, id),\
- SRI(DSCC_STATUS, DSCC, id),\
- SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
- SRI(DSCC_PPS_CONFIG0, DSCC, id),\
- SRI(DSCC_PPS_CONFIG1, DSCC, id),\
- SRI(DSCC_PPS_CONFIG2, DSCC, id),\
- SRI(DSCC_PPS_CONFIG3, DSCC, id),\
- SRI(DSCC_PPS_CONFIG4, DSCC, id),\
- SRI(DSCC_PPS_CONFIG5, DSCC, id),\
- SRI(DSCC_PPS_CONFIG6, DSCC, id),\
- SRI(DSCC_PPS_CONFIG7, DSCC, id),\
- SRI(DSCC_PPS_CONFIG8, DSCC, id),\
- SRI(DSCC_PPS_CONFIG9, DSCC, id),\
- SRI(DSCC_PPS_CONFIG10, DSCC, id),\
- SRI(DSCC_PPS_CONFIG11, DSCC, id),\
- SRI(DSCC_PPS_CONFIG12, DSCC, id),\
- SRI(DSCC_PPS_CONFIG13, DSCC, id),\
- SRI(DSCC_PPS_CONFIG14, DSCC, id),\
- SRI(DSCC_PPS_CONFIG15, DSCC, id),\
- SRI(DSCC_PPS_CONFIG16, DSCC, id),\
- SRI(DSCC_PPS_CONFIG17, DSCC, id),\
- SRI(DSCC_PPS_CONFIG18, DSCC, id),\
- SRI(DSCC_PPS_CONFIG19, DSCC, id),\
- SRI(DSCC_PPS_CONFIG20, DSCC, id),\
- SRI(DSCC_PPS_CONFIG21, DSCC, id),\
- SRI(DSCC_PPS_CONFIG22, DSCC, id),\
- SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
- SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCCIF_CONFIG0, DSCCIF, id),\
- SRI(DSCCIF_CONFIG1, DSCCIF, id),\
- SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
-
-#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
- DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
- DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
- DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
-
-
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
uint32_t DSC_DEBUG_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 936af65381ef..b1ec0e6f7f58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
@@ -990,13 +987,10 @@ void hubp2_cursor_set_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ src_y_offset = pos->y - param->viewport.y;
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 884fa060f375..e1d271fe9e64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -706,6 +706,14 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
@@ -1565,6 +1573,7 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
@@ -1898,8 +1907,14 @@ void dcn20_post_unlock_program_front_end(
* can underflow due to HUBP_VTG_SEL programming if done in the regular front end
* programming sequence).
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ while (pipe) {
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
dcn20_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
}
}
@@ -2346,7 +2361,9 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
- struct dc_link *link;
+ struct dc_link *link = pipe_ctx->stream->link;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -2354,7 +2371,6 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- link = pipe_ctx->stream->link;
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
* feature. When system resume from S4 with second
@@ -2403,6 +2419,16 @@ static void dcn20_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
+ /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2462,9 +2488,13 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
get_mpctree_visual_confirm_color(pipe_ctx, color);
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
get_surface_tile_visual_confirm_color(pipe_ctx, color);
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
+ get_subvp_visual_confirm_color(dc, pipe_ctx, color);
- if (mpc->funcs->set_bg_color)
+ if (mpc->funcs->set_bg_color) {
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ }
}
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 91e4885b743e..7c5817c426fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -96,6 +96,10 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
index 694260c10a01..ccd91792991b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -215,7 +215,8 @@ void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en);
REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en);
- REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
+ if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN)
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
}
void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 3d307dd58e9a..116f67a0b989 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 05b3fba9ccce..61bcfa03c4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -82,7 +82,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
+static bool gpu_addr_to_uma(struct dce_hwseq *hwseq,
PHYSICAL_ADDRESS_LOC *addr)
{
bool is_in_uma;
@@ -98,6 +98,7 @@ static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
} else {
is_in_uma = false;
}
+ return is_in_uma;
}
static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
index 1826dd7f3da1..9c16633e473a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
@@ -86,6 +86,10 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..5752271f22df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active;
+ uint32_t riommu_active, prefetch_done;
int i;
+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+ if (prefetch_done) {
+ hubbub->riommu_active = true;
+ return;
+ }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index b270f0b194dc..fe1a8e2e08ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 77b00f86c216..4a668d6563df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 6a4dcafb9bba..dc3e8df706b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index fb59fed8f425..8c5045711264 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -939,13 +939,32 @@ bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, s
void dcn30_hardware_release(struct dc *dc)
{
+ bool subvp_in_use = false;
+ uint32_t i;
+
dc_dmub_srv_p_state_delegate(dc, false, NULL);
+ dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false);
+
+ /* SubVP treated the same way as FPO. If driver disable and
+ * we are using a SubVP config, disable and force on DCN side
+ * to prevent P-State hang on driver enable.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ subvp_in_use = true;
+ break;
+ }
+ }
/* If pstate unsupported, or still supported
* by firmware, force it supported by dcn
*/
if (dc->current_state)
- if ((!dc->clk_mgr->clks.p_state_change_support ||
+ if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use ||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 4c06e6e1ba4a..3216d10c58ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
index f2580e65196c..7446e54bf5aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
@@ -227,11 +227,7 @@
SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
@@ -363,11 +359,7 @@
SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 64320e0ca446..3a3b2ac791c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -724,7 +724,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.disable_psr = false,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1916,7 +1917,7 @@ static int get_refresh_rate(struct dc_state *context)
*/
#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
-int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
+static int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
{
struct dc_crtc_timing *timing = NULL;
uint32_t sec_per_100_lines;
@@ -1946,7 +1947,7 @@ int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
return scaled_refresh_rate;
}
-bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
+static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
{
int refresh_rate_max_stretch_100hz;
int min_refresh_100hz;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 3d42a1a337ec..6192851c59ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.optimize_pwr_state = dcn21_optimize_pwr_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index db172677d613..559e563d5bc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -634,7 +634,7 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
-static const struct resource_caps res_cap_dcn301 = {
+static struct resource_caps res_cap_dcn301 = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
@@ -700,6 +700,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.use_max_lb = false,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1429,6 +1430,8 @@ static bool dcn301_resource_construct(
ctx->dc_bios->regs = &bios_regs;
+ if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435)
+ res_cap_dcn301.num_pll = 2;
pool->base.res_cap = &res_cap_dcn301;
pool->base.funcs = &dcn301_res_pool_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 4fab537e822f..b925b6ddde5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -93,7 +93,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 0a67f8a5656d..527d5c902878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -77,6 +77,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
+ .exit_idle_opt_for_cursor_updates = true,
.disable_idle_power_optimizations = false,
};
@@ -372,7 +373,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGE) {
+ if (eng_id <= ENGINE_ID_DIGB) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index a788d160953b..ab70ebd8f223 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ if (enc->ctx->dce_version >= DCN_VERSION_3_15)
+ return true;
+
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 23621ff08c90..52fb2bf3d578 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -150,9 +150,9 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
* 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
- // VID_STREAM_STATUS, 0,
- // 10, 5000);
+ REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_STATUS, 0,
+ 10, 5000);
/* Disable SDP tranmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
index 7c77c71591a0..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -162,7 +162,8 @@
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
- SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 51c5f3685470..6360dc9502e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -876,7 +876,7 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
return true;
}
-static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
index e3a654bf04e8..70c60de448ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
@@ -122,6 +122,8 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub31_construct(struct dcn20_hubbub *hubbub3,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 1ed1404e969d..bdf101547484 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -535,11 +535,11 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg,
OPTC_DSC_DISABLED, 0, 0);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
-
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index e708f07fe75a..3a32810bbe38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 468a893ff785..8c1a6fb36306 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -889,9 +889,8 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.disable_z10 = true,
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+ .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -2153,7 +2152,7 @@ static bool dcn31_resource_construct(
pool->base.usb4_dpia_count = 4;
}
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 41f8ec99da6b..901436591ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn31_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
struct dcn31_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index e3b5a95e03b1..702c28c2560e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -13,31 +13,6 @@
DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 232cc15979dd..1bd7e0f327d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
+ return;
switch (otg_inst) {
case 0:
@@ -137,7 +184,7 @@ static void dccg314_set_dtbclk_p_src(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg314_set_dtbclk_dto(
+static void dccg314_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
@@ -181,7 +228,7 @@ void dccg314_set_dtbclk_dto(
}
}
-void dccg314_set_dpstreamclk(
+static void dccg314_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -220,7 +267,7 @@ void dccg314_set_dpstreamclk(
}
}
-void dccg314_set_valid_pixel_rate(
+static void dccg314_set_valid_pixel_rate(
struct dccg *dccg,
int ref_dtbclk_khz,
int otg_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
index 9a4a9efc0203..6a35986307af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
@@ -63,34 +63,28 @@
DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL2),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
SR(DTBCLK_P_CNTL),\
SR(DCCG_AUDIO_DTO_SOURCE)
-
-#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, mask_sh),\
@@ -100,7 +94,6 @@
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK3_SRC_SEL, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_EN, mask_sh),\
- DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
@@ -148,7 +141,48 @@
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh)
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
+
+#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
+ DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh)
struct dccg *dccg314_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index b384f30395d3..0d2ffb692957 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
@@ -67,8 +68,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
- DIG_FIFO_READ_START_LEVEL, 0);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
static void enc314_dp_set_odm_combine(
@@ -81,7 +81,7 @@ static void enc314_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc314_stream_encoder_dvi_set_stream_attribute(
+static void enc314_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -262,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+void enc314_stream_encoder_dp_blank(
+ struct dc_link *link,
+ struct stream_encoder *enc)
+{
+ /* New to DCN314 - disable the FIFO before VID stream disable. */
+ enc314_disable_fifo(enc);
+
+ enc1_stream_encoder_dp_blank(link, enc);
+}
+
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -322,9 +332,6 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
- /* DIG Resync FIFO now needs to be explicitly enabled. */
- enc314_enable_fifo(enc);
-
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
@@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+ /*
+ * DIG Resync FIFO now needs to be explicitly enabled.
+ * This should come after DP_VID_STREAM_ENABLE per HW docs.
+ */
+ enc314_enable_fifo(enc);
+
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
@@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
- enc1_stream_encoder_dp_blank,
+ enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 755c715ad8dc..588c1c71241f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,9 +343,14 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
+ bool two_pix_per_container = false;
+ two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return odm_combine_factor;
+
if (is_dp_128b_132b_signal(pipe_ctx)) {
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -355,16 +360,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
- } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- *k1_div = PIXEL_RATE_DIV_BY_2;
- *k2_div = PIXEL_RATE_DIV_BY_2;
} else {
- if (odm_combine_factor == 1)
- *k2_div = PIXEL_RATE_DIV_BY_4;
- else if (odm_combine_factor == 2)
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+ if (odm_combine_factor == 2)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -374,3 +376,20 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
return odm_combine_factor;
}
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t pix_per_cycle = 1;
+ uint32_t odm_combine_factor = 1;
+
+ if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
+ pix_per_cycle = 2;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+ pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ pix_per_cycle);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index be0f5e4d48e1..244280298212 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -39,4 +39,6 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index b9debeb081fd..5b6c2d94ec71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -102,6 +102,10 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
@@ -145,6 +149,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 0c7980266b85..47eb162f1a75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
- REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
@@ -149,7 +150,7 @@ static bool optc314_disable_crtc(struct timing_generator *optc)
return true;
}
-void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
+static void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 63861cdfb09f..24ec71cbd3e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -70,6 +70,7 @@
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -86,6 +87,9 @@
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
@@ -132,155 +136,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
#define DC_LOGGER_INIT(logger)
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 668,
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -602,6 +457,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
@@ -726,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
#define dsc_regsDCN314(id)\
[id] = {\
- DSC_REG_LIST_DCN314(id)\
+ DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
@@ -737,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
};
static const struct dcn20_dsc_shift dsc_shift = {
- DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
- DSC_REG_LIST_SH_MASK_DCN314(_MASK)
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
@@ -991,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
- .num_dsc = 4,
+ .num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
@@ -1059,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.seamless_boot_odm_combine = true
};
@@ -1402,7 +1257,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGF) {
+ if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
@@ -1447,7 +1302,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
- vpg_inst = hpo_dp_inst + 6;
+ //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+ vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
@@ -1790,112 +1646,20 @@ static struct clock_source *dcn31_clock_source_create(
}
BREAK_TO_DEBUGGER();
+ kfree(clk_src);
return NULL;
}
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
- bool upscaled = false;
-
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
- if (pipe->plane_state &&
- (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
- pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
- upscaled = true;
-
- /*
- * Immediate flip can be set dynamically after enabling the plane.
- * We need to require support for immediate flip or underflow can be
- * intermittently experienced depending on peak b/w requirements.
- */
- pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
- pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.src.dcc_rate = 3;
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
+ int pipe_cnt;
- pipe_cnt++;
- }
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
- /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
- && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
- } else if (context->stream_count >= 3 && upscaled) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->stream)
- continue;
-
- if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
- pipe->stream->apply_seamless_boot_optimization) {
-
- if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
- context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
- break;
- }
- }
- }
+ DC_FP_START();
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ DC_FP_END();
return pipe_cnt;
}
@@ -1906,88 +1670,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
- if (bw_params->num_channels > 0)
- dcn3_14_soc.num_chans = bw_params->num_channels;
-
- ASSERT(dcn3_14_soc.num_chans);
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_14_soc.num_states - 1;
- }
-
- clock_tmp[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
- if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
- clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_14_soc.clock_limits[i] = clock_tmp[i];
- if (clk_table->num_entries)
- dcn3_14_soc.num_states = clk_table->num_entries;
- }
-
- if (max_dispclk_mhz) {
- dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ DC_FP_START();
+ dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2034,6 +1719,7 @@ static struct clock_source *dcn30_clock_source_create(
}
BREAK_TO_DEBUGGER();
+ kfree(clk_src);
return NULL;
}
@@ -2069,6 +1755,7 @@ static bool dcn314_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
@@ -2132,8 +1819,6 @@ static bool dcn314_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
- dc->debug = debug_defaults_diags;
else
dc->debug = debug_defaults_diags;
// Init the vm_helper
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index c41108847ce0..0dd3153aa5c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -29,6 +29,9 @@
#include "core_types.h"
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
#define TO_DCN314_RES_POOL(pool)\
container_of(pool, struct dcn314_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 7463b12ae4a3..eebb42c9ddd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -886,7 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.psr_power_use_phy_fsm = 0,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index 39929fa67a51..22849eaa6f24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn315_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
struct dcn315_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index d56a212e065c..f4b52a35ad84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -886,7 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 0dc5a6c13ae7..aba6d634131b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn316_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
struct dcn316_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index a31c64b50410..e4daed44ef5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg32_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ return;
+
+ dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == cur_k1 && k2 == cur_k2)
+ return;
+
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
@@ -133,7 +186,7 @@ static void dccg32_set_dtbclk_p_src(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg32_set_dtbclk_dto(
+static void dccg32_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
@@ -208,7 +261,7 @@ static void dccg32_get_dccg_ref_freq(struct dccg *dccg,
return;
}
-void dccg32_set_dpstreamclk(
+static void dccg32_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -225,19 +278,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
@@ -245,7 +298,7 @@ void dccg32_set_dpstreamclk(
}
}
-void dccg32_otg_add_pixel(struct dccg *dccg,
+static void dccg32_otg_add_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -254,7 +307,7 @@ void dccg32_otg_add_pixel(struct dccg *dccg,
OTG_ADD_PIXEL[otg_inst], 1);
}
-void dccg32_otg_drop_pixel(struct dccg *dccg,
+static void dccg32_otg_drop_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d6855d4f749b..fdae6aa89908 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -118,7 +118,7 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t dp_alt_mode_disable = 0;
@@ -133,7 +133,7 @@ bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
return is_usb_c_alt_mode;
}
-void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 26648ce772da..0e9dce414641 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -60,7 +60,7 @@ static void enc32_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc32_stream_encoder_dvi_set_stream_attribute(
+static void enc32_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
+ /* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
+ * so set it to 1/2 full = 7 before reset as suggested by hardware team.
+ */
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
index f349cbe2a0f0..dcf12a0b031c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_cm_common.h"
/* Compute the maximum number of lines that we can fit in the line buffer */
-void dscl32_calc_lb_num_partitions(
+static void dscl32_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 99eb239bbc7b..f6d3da475835 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -68,7 +68,7 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
-static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
+void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
- /* Should never be hit, if it is we have an erroneous hw config*/
- ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
- + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+ if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
+ /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
+ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
+ hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
+ hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
+ }
}
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
@@ -140,7 +144,7 @@ static uint32_t convert_and_clamp(
return ret_val;
}
-static bool hubbub32_program_urgent_watermarks(
+bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -330,7 +334,7 @@ static bool hubbub32_program_urgent_watermarks(
return wm_pending;
}
-static bool hubbub32_program_stutter_watermarks(
+bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -476,7 +480,7 @@ static bool hubbub32_program_stutter_watermarks(
}
-static bool hubbub32_program_pstate_watermarks(
+bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -629,7 +633,7 @@ static bool hubbub32_program_pstate_watermarks(
}
-static bool hubbub32_program_usr_watermarks(
+bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -769,7 +773,7 @@ static bool hubbub32_program_watermarks(
}
/* Copy values from WM set A to all other sets */
-void hubbub32_init_watermarks(struct hubbub *hubbub)
+static void hubbub32_init_watermarks(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t reg;
@@ -820,7 +824,7 @@ void hubbub32_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg);
}
-void hubbub32_wm_read_state(struct hubbub *hubbub,
+static void hubbub32_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index 3bae6e558971..cda94e0e31bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -161,6 +161,35 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+bool hubbub32_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_usr_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);
+
+void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+
+void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 6ec1c52535b9..2038cbda33f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes(
enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
attr->width, attr->color_format);
+ //Round cursor width up to next multiple of 64
+ uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+ uint32_t cursor_height = attr->height;
+ uint32_t cursor_size = cursor_width * cursor_height;
+
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes(
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
- if (attr->width * attr->height * 4 > 16384)
+ switch (attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d38341f68b17..a750343ca521 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -49,6 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "dcn32_resource.h"
#include "dc_link_dp.h"
#include "dmub/inc/dmub_subvp_state.h"
@@ -198,42 +199,6 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
return false;
}
-/* This function takes in the start address and surface size to be cached in CAB
- * and calculates the total number of cache lines required to store the surface.
- * The number of cache lines used for each surface is calculated independently of
- * one another. For example, if there is a primary surface(1), meta surface(2), and
- * cursor(3), this function should be called 3 times to calculate the number of cache
- * lines used for each of those surfaces.
- */
-static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_size, uint64_t start_address)
-{
- uint32_t lines_used = 1;
- uint32_t num_cached_bytes = 0;
- uint32_t remaining_size = 0;
- uint32_t cache_line_size = dc->caps.cache_line_size;
- uint32_t remainder = 0;
-
- /* 1. Calculate surface size minus the number of bytes stored
- * in the first cache line (all bytes in first cache line might
- * not be fully used).
- */
- div_u64_rem(start_address, cache_line_size, &remainder);
- num_cached_bytes = cache_line_size - remainder;
- remaining_size = surface_size - num_cached_bytes;
-
- /* 2. Calculate number of cache lines that will be fully used with
- * the remaining number of bytes to be stored.
- */
- lines_used += (remaining_size / cache_line_size);
-
- /* 3. Check if we need an extra line due to the remaining size not being
- * a multiple of CACHE_LINE_SIZE.
- */
- if (remaining_size % cache_line_size > 0)
- lines_used++;
-
- return lines_used;
-}
/* This function loops through every surface that needs to be cached in CAB for SS,
* and calculates the total number of ways required to store all surfaces (primary,
@@ -241,69 +206,116 @@ static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_si
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i, j;
+ uint8_t i;
+ int j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
- uint32_t surface_size = 0;
uint32_t cursor_size = 0;
- uint32_t cache_lines_used = 0;
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
- uint32_t num_ways = 0;
+ uint8_t num_ways = 0;
+ uint8_t bytes_per_pixel = 0;
+ uint8_t cursor_bpp = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint16_t mall_alloc_width_blk_aligned = 0;
+ uint16_t mall_alloc_height_blk_aligned = 0;
+ uint16_t num_mblks = 0;
+ uint32_t bytes_in_mall = 0;
+ uint32_t cache_lines_used = 0;
+ uint32_t cache_lines_per_plane = 0;
- for (i = 0; i < ctx->stream_count; i++) {
- stream = ctx->streams[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- // Don't include PSR surface in the total surface size for CAB allocation
- if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ if (!pipe->stream || !pipe->plane_state ||
+ pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED ||
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
- if (ctx->stream_status[i].plane_count == 0)
- continue;
+ bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
- // For each stream, loop through each plane to calculate the number of cache
- // lines required to store the surface in CAB
- for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
- plane = ctx->stream_status[i].plane_states[j];
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ *
+ * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
+ */
+ mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ *
+ * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
+ */
+ mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
- // Calculate total surface size
- surface_size = plane->plane_size.surface_pitch *
- plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
- // Convert surface size + starting address to number of cache lines required
- // (alignment accounted for)
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.addr.quad_part);
+ /* For DCC:
+ * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1)
+ */
+ if (pipe->plane_state->dcc.enable)
+ num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel +
+ (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
- if (plane->address.grph.meta_addr.quad_part) {
- // Meta surface
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.meta_addr.quad_part);
- }
- }
+ bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- // Include cursor size for CAB allocation
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
- cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
+ * (MALL is 64-byte aligned)
+ */
+ cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
+ cache_lines_used += cache_lines_per_plane;
+ }
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
+ // Include cursor size for CAB allocation
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ using the max for calculation */
+
+ if (hubp->curs_attr.width > 0) {
+ // Round cursor width to next multiple of 64
+ cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+
+ switch (pipe->stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ cursor_bpp = 4;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ cursor_bpp = 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ cursor_bpp = 8;
+ break;
+ }
+
+ if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
+ /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
+ */
+ cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
+ DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
+ DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ }
break;
}
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.cursor_cache_addr.quad_part);
- }
}
// Convert number of cache lines required to number of ways
@@ -314,13 +326,38 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ for (i = 0; i < ctx->stream_count; i++) {
+ stream = ctx->streams[i];
+ for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
+ plane = ctx->stream_status[i].plane_states[j];
+
+ if (stream->cursor_position.enable && plane &&
+ dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
+ /* Cursor caching is not supported since it won't be on the same line.
+ * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
+ * this case triggers corruption. If we're at the edge, then dont trigger display refresh
+ * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
+ */
+ num_ways++;
+ /* We only expect one cursor plane */
+ break;
+ }
+ }
+ }
+ if (dc->debug.force_mall_ss_num_ways > 0) {
+ num_ways = dc->debug.force_mall_ss_num_ways;
+ }
return num_ways;
}
bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- uint8_t ways;
+ uint8_t ways, i;
+ int j;
+ bool mall_ss_unsupported = false;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -349,7 +386,24 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
* and configure HUBP's to fetch from MALL
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- if (ways <= dc->caps.cache_num_ways) {
+
+ /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
+ * or TMZ surface, don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
+
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
+ plane->address.tmz_surface) {
+ mall_ss_unsupported = true;
+ break;
+ }
+ }
+ if (mall_ss_unsupported)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -387,7 +441,6 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
*/
void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
{
-/*
int i;
bool enable_subvp = false;
@@ -405,7 +458,6 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
}
}
dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp);
-*/
}
/* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
@@ -611,9 +663,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
- /* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
- BREAK_TO_DEBUGGER();
+ /* there are no ROM LUTs in OUTGAM */
+ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ BREAK_TO_DEBUGGER();
}
}
@@ -677,15 +729,40 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+ //Round cursor width up to next multiple of 64
+ int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+ int cursor_height = hubp->curs_attr.height;
+ int cursor_size = cursor_width * cursor_height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
cache_cursor = true;
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
+ // MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
- pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface ? 2 : 0,
cache_cursor);
}
}
@@ -795,6 +872,7 @@ void dcn32_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
@@ -1093,6 +1171,9 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return odm_combine_factor;
+
if (is_dp_128b_132b_signal(pipe_ctx)) {
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -1186,3 +1267,155 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return true;
return false;
}
+
+static void apply_symclk_on_tx_off_wa(struct dc_link *link)
+{
+ /* There are use cases where SYMCLK is referenced by OTG. For instance
+ * for TMDS signal, OTG relies SYMCLK even if TX video output is off.
+ * However current link interface will power off PHY when disabling link
+ * output. This will turn off SYMCLK generated by PHY. The workaround is
+ * to identify such case where SYMCLK is still in use by OTG when we
+ * power off PHY. When this is detected, we will temporarily power PHY
+ * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
+ * program_pix_clk interface. When OTG is disabled, we will then power
+ * off PHY by calling disable link output again.
+ *
+ * In future dcn generations, we plan to rework transmitter control
+ * interface so that we could have an option to set SYMCLK ON TX OFF
+ * state in one step without this workaround
+ */
+
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ if (link->phy_state.symclk_ref_cnts.otg > 0) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ break;
+ }
+ }
+ }
+}
+
+void dcn32_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
+ apply_symclk_on_tx_off_wa(link);
+}
+
+/* For SubVP the main pipe can have a viewport position change
+ * without a full update. In this case we must also update the
+ * viewport positions for the phantom pipe accordingly.
+ */
+void dcn32_update_phantom_vp_position(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe)
+{
+ uint32_t i;
+ struct dc_plane_state *phantom_plane = phantom_pipe->plane_state;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+ pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+ if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
+
+ phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
+ phantom_plane->src_rect.y = pipe->plane_state->src_rect.y;
+ phantom_plane->clip_rect.x = pipe->plane_state->clip_rect.x;
+ phantom_plane->dst_rect.x = pipe->plane_state->dst_rect.x;
+ phantom_plane->dst_rect.y = pipe->plane_state->dst_rect.y;
+
+ phantom_pipe->plane_state->update_flags.bits.position_change = 1;
+ resource_build_scaling_params(phantom_pipe);
+ return;
+ }
+ }
+ }
+}
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst)
+{
+ uint32_t pwr_status = 0;
+
+ switch (dsc_inst) {
+ case 0: /* DSC0 */
+ REG_GET(DOMAIN16_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 1: /* DSC1 */
+
+ REG_GET(DOMAIN17_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 2: /* DSC2 */
+ REG_GET(DOMAIN18_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 3: /* DSC3 */
+ REG_GET(DOMAIN19_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ return pwr_status == 0 ? true : false;
+}
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst);
+
+ if (context->res_ctx.is_dsc_acquired[i]) {
+ if (!is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, true);
+ }
+ } else if (safe_to_disable) {
+ if (is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, false);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 083f3aeb54f0..ac3657a5b9ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -84,4 +84,20 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn32_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
+void dcn32_update_phantom_vp_position(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst);
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index c279a25ea293..45a949ba6f3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -99,11 +99,17 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dcn32_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
@@ -133,6 +139,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn32_update_odm,
.dsc_pg_control = dcn32_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
index adf93cc8359c..41b0baf8e183 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
@@ -100,7 +100,7 @@ static void mmhubbub32_warmup_mcif(struct mcif_wb *mcif_wb,
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
-void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb,
+static void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
index 22355051f5f7..e460cf8d9041 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
@@ -90,7 +90,6 @@
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\
@@ -101,7 +100,6 @@
SF(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\
@@ -116,7 +114,6 @@
SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\
@@ -131,7 +128,6 @@
SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\
@@ -146,7 +142,6 @@
SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\
@@ -172,11 +167,6 @@
SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 357bd2461bc9..4edd0655965b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -701,7 +701,7 @@ static void mpc32_power_on_shaper_3dlut(
}
-bool mpc32_program_shaper(
+static bool mpc32_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -726,7 +726,7 @@ bool mpc32_program_shaper(
else
next_mode = LUT_RAM_A;
- mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A ? true:false, mpcc_id);
+ mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id);
if (next_mode == LUT_RAM_A)
mpc32_program_shaper_luta_settings(mpc, params, mpcc_id);
@@ -897,7 +897,7 @@ static void mpc32_set_3dlut_mode(
}
-bool mpc32_program_3dlut(
+static bool mpc32_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index eff1f4e17689..ec3989d37086 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -156,7 +156,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
return true;
}
-void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
+static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -190,7 +190,7 @@ static void optc32_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1;
}
-void optc32_setup_manual_trigger(struct timing_generator *optc)
+static void optc32_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct dc *dc = optc->ctx->dc;
@@ -215,7 +215,7 @@ void optc32_setup_manual_trigger(struct timing_generator *optc)
}
}
-void optc32_set_drr(
+static void optc32_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
@@ -281,7 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+ .set_drr = optc32_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9a26d24b579f..05de97ea855f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -90,29 +90,6 @@
#include "dcn20/dcn20_vmid.h"
#include "dml/dcn32/dcn32_fpu.h"
-#define DCN_BASE__INST0_SEG1 0x000000C0
-#define DCN_BASE__INST0_SEG2 0x000034C0
-#define DCN_BASE__INST0_SEG3 0x00009000
-#define NBIO_BASE__INST0_SEG1 0x00000014
-
-#define MAX_INSTANCE 6
-#define MAX_SEGMENT 6
-
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-
#define DC_LOGGER_INIT(logger)
enum dcn32_clk_src_array_id {
@@ -131,79 +108,103 @@ enum dcn32_clk_src_array_id {
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
-#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
#define SRI(reg_name, block, id)\
- .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## temp_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
#define DCCG_SRII(reg_name, block, id)\
- .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
- reg ## reg_name ## _ ## block ## id
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
/* NBIO */
-#define NBIO_BASE_INNER(seg) \
- NBIO_BASE__INST0_SEG ## seg
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX0_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
#undef CTX
#define CTX ctx
#define REG(reg_name) \
- (DCN_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-static const struct bios_registers bios_regs = {
- NBIO_SR(BIOS_SCRATCH_3),
- NBIO_SR(BIOS_SCRATCH_6)
-};
+static struct bios_registers bios_regs;
-#define clk_src_regs(index, pllid)\
-[index] = {\
- CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
-}
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
-static const struct dce110_clk_src_regs clk_src_regs[] = {
- clk_src_regs(0, A),
- clk_src_regs(1, B),
- clk_src_regs(2, C),
- clk_src_regs(3, D),
- clk_src_regs(4, E)
-};
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -213,17 +214,10 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define abm_regs(id)\
-[id] = {\
- ABM_DCN32_REG_LIST(id)\
-}
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
-static const struct dce_abm_registers abm_regs[] = {
- abm_regs(0),
- abm_regs(1),
- abm_regs(2),
- abm_regs(3),
-};
+static struct dce_abm_registers abm_regs[4];
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN32(__SHIFT)
@@ -233,18 +227,10 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN32(_MASK)
};
-#define audio_regs(id)\
-[id] = {\
- AUD_COMMON_REG_LIST(id)\
-}
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
-static const struct dce_audio_registers audio_regs[] = {
- audio_regs(0),
- audio_regs(1),
- audio_regs(2),
- audio_regs(3),
- audio_regs(4)
-};
+static struct dce_audio_registers audio_regs[5];
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
@@ -259,23 +245,10 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
-#define vpg_regs(id)\
-[id] = {\
- VPG_DCN3_REG_LIST(id)\
-}
+#define vpg_regs_init(id)\
+ VPG_DCN3_REG_LIST_RI(id)
-static const struct dcn30_vpg_registers vpg_regs[] = {
- vpg_regs(0),
- vpg_regs(1),
- vpg_regs(2),
- vpg_regs(3),
- vpg_regs(4),
- vpg_regs(5),
- vpg_regs(6),
- vpg_regs(7),
- vpg_regs(8),
- vpg_regs(9),
-};
+static struct dcn30_vpg_registers vpg_regs[10];
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
@@ -285,19 +258,10 @@ static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
-#define afmt_regs(id)\
-[id] = {\
- AFMT_DCN3_REG_LIST(id)\
-}
+#define afmt_regs_init(id)\
+ AFMT_DCN3_REG_LIST_RI(id)
-static const struct dcn30_afmt_registers afmt_regs[] = {
- afmt_regs(0),
- afmt_regs(1),
- afmt_regs(2),
- afmt_regs(3),
- afmt_regs(4),
- afmt_regs(5)
-};
+static struct dcn30_afmt_registers afmt_regs[6];
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
@@ -307,17 +271,10 @@ static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
-#define apg_regs(id)\
-[id] = {\
- APG_DCN31_REG_LIST(id)\
-}
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
-static const struct dcn31_apg_registers apg_regs[] = {
- apg_regs(0),
- apg_regs(1),
- apg_regs(2),
- apg_regs(3)
-};
+static struct dcn31_apg_registers apg_regs[4];
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
@@ -327,18 +284,10 @@ static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
-#define stream_enc_regs(id)\
-[id] = {\
- SE_DCN32_REG_LIST(id)\
-}
+#define stream_enc_regs_init(id)\
+ SE_DCN32_REG_LIST_RI(id)
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
- stream_enc_regs(0),
- stream_enc_regs(1),
- stream_enc_regs(2),
- stream_enc_regs(3),
- stream_enc_regs(4)
-};
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -349,46 +298,24 @@ static const struct dcn10_stream_encoder_mask se_mask = {
};
-#define aux_regs(id)\
-[id] = {\
- DCN2_AUX_REG_LIST(id)\
-}
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
-static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
- aux_regs(0),
- aux_regs(1),
- aux_regs(2),
- aux_regs(3),
- aux_regs(4)
-};
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
-#define hpd_regs(id)\
-[id] = {\
- HPD_REG_LIST(id)\
-}
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
-static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
- hpd_regs(0),
- hpd_regs(1),
- hpd_regs(2),
- hpd_regs(3),
- hpd_regs(4)
-};
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
-#define link_regs(id, phyid)\
-[id] = {\
- LE_DCN31_REG_LIST(id), \
- UNIPHY_DCN2_REG_LIST(phyid), \
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN31_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
/*DPCS_DCN31_REG_LIST(id),*/ \
-}
-static const struct dcn10_link_enc_registers link_enc_regs[] = {
- link_regs(0, A),
- link_regs(1, B),
- link_regs(2, C),
- link_regs(3, D),
- link_regs(4, E)
-};
+static struct dcn10_link_enc_registers link_enc_regs[5];
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
@@ -401,17 +328,10 @@ static const struct dcn10_link_enc_mask le_mask = {
//DPCS_DCN31_MASK_SH_LIST(_MASK)
};
-#define hpo_dp_stream_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
-}
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
-static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
- hpo_dp_stream_encoder_reg_list(0),
- hpo_dp_stream_encoder_reg_list(1),
- hpo_dp_stream_encoder_reg_list(2),
- hpo_dp_stream_encoder_reg_list(3),
-};
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
@@ -422,20 +342,14 @@ static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
};
-#define hpo_dp_link_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
- /*DCN3_1_RDPCSTX_REG_LIST(0),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(1),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(2),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(3),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(4)*/\
-}
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
-static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
- hpo_dp_link_encoder_reg_list(0),
- hpo_dp_link_encoder_reg_list(1),
-};
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
@@ -445,17 +359,10 @@ static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
-#define dpp_regs(id)\
-[id] = {\
- DPP_REG_LIST_DCN30_COMMON(id),\
-}
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN30_COMMON_RI(id)
-static const struct dcn3_dpp_registers dpp_regs[] = {
- dpp_regs(0),
- dpp_regs(1),
- dpp_regs(2),
- dpp_regs(3)
-};
+static struct dcn3_dpp_registers dpp_regs[4];
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT)
@@ -466,17 +373,10 @@ static const struct dcn3_dpp_mask tf_mask = {
};
-#define opp_regs(id)\
-[id] = {\
- OPP_REG_LIST_DCN30(id),\
-}
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN30_RI(id)
-static const struct dcn20_opp_registers opp_regs[] = {
- opp_regs(0),
- opp_regs(1),
- opp_regs(2),
- opp_regs(3)
-};
+static struct dcn20_opp_registers opp_regs[4];
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
@@ -486,21 +386,16 @@ static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
-#define aux_engine_regs(id)\
-[id] = {\
- AUX_COMMON_REG_LIST0(id), \
- .AUXN_IMPCAL = 0, \
- .AUXP_IMPCAL = 0, \
- .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
-}
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\
+ )
-static const struct dce110_aux_registers aux_engine_regs[] = {
- aux_engine_regs(0),
- aux_engine_regs(1),
- aux_engine_regs(2),
- aux_engine_regs(3),
- aux_engine_regs(4)
-};
+static struct dce110_aux_registers aux_engine_regs[5];
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
@@ -510,15 +405,10 @@ static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
-#define dwbc_regs_dcn3(id)\
-[id] = {\
- DWBC_COMMON_REG_LIST_DCN30(id),\
-}
-
-static const struct dcn30_dwbc_registers dwbc30_regs[] = {
- dwbc_regs_dcn3(0),
-};
+static struct dcn30_dwbc_registers dwbc30_regs[1];
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -528,14 +418,10 @@ static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
-#define mcif_wb_regs_dcn3(id)\
-[id] = {\
- MCIF_WB_COMMON_REG_LIST_DCN32(id),\
-}
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id)
-static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
- mcif_wb_regs_dcn3(0)
-};
+static struct dcn30_mmhubbub_registers mcif_wb30_regs[1];
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -545,17 +431,10 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define dsc_regsDCN20(id)\
-[id] = {\
- DSC_REG_LIST_DCN20(id)\
-}
+#define dsc_regsDCN20_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
-static const struct dcn20_dsc_registers dsc_regs[] = {
- dsc_regsDCN20(0),
- dsc_regsDCN20(1),
- dsc_regsDCN20(2),
- dsc_regsDCN20(3)
-};
+static struct dcn20_dsc_registers dsc_regs[4];
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
@@ -565,17 +444,18 @@ static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-static const struct dcn30_mpc_registers mpc_regs = {
- MPC_REG_LIST_DCN3_2(0),
- MPC_REG_LIST_DCN3_2(1),
- MPC_REG_LIST_DCN3_2(2),
- MPC_REG_LIST_DCN3_2(3),
- MPC_OUT_MUX_REG_LIST_DCN3_0(0),
- MPC_OUT_MUX_REG_LIST_DCN3_0(1),
- MPC_OUT_MUX_REG_LIST_DCN3_0(2),
- MPC_OUT_MUX_REG_LIST_DCN3_0(3),
- MPC_DWB_MUX_REG_LIST_DCN3_0(0),
-};
+static struct dcn30_mpc_registers mpc_regs;
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -585,19 +465,10 @@ static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define optc_regs(id)\
-[id] = {OPTC_COMMON_REG_LIST_DCN3_2(id)}
-
-//#ifdef DIAGS_BUILD
-//static struct dcn_optc_registers optc_regs[] = {
-//#else
-static const struct dcn_optc_registers optc_regs[] = {
-//#endif
- optc_regs(0),
- optc_regs(1),
- optc_regs(2),
- optc_regs(3)
-};
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_2_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -607,17 +478,10 @@ static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define hubp_regs(id)\
-[id] = {\
- HUBP_REG_LIST_DCN32(id)\
-}
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN32_RI(id)
-static const struct dcn_hubp2_registers hubp_regs[] = {
- hubp_regs(0),
- hubp_regs(1),
- hubp_regs(2),
- hubp_regs(3)
-};
+static struct dcn_hubp2_registers hubp_regs[4];
static const struct dcn_hubp2_shift hubp_shift = {
@@ -627,9 +491,10 @@ static const struct dcn_hubp2_shift hubp_shift = {
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dcn_hubbub_registers hubbub_reg = {
- HUBBUB_REG_LIST_DCN32(0)
-};
+
+static struct dcn_hubbub_registers hubbub_reg;
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN32_RI(0)
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN32(__SHIFT)
@@ -639,9 +504,10 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dccg_registers dccg_regs = {
- DCCG_REG_LIST_DCN32()
-};
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN32_RI()
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN32(__SHIFT)
@@ -714,9 +580,10 @@ static const struct dccg_mask dccg_mask = {
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING)
-static const struct dce_hwseq_registers hwseq_reg = {
- HWSEQ_DCN32_REG_LIST()
-};
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN32_REG_LIST()
#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -759,29 +626,10 @@ static const struct dce_hwseq_shift hwseq_shift = {
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN32_MASK_SH_LIST(_MASK)
};
-#define vmid_regs(id)\
-[id] = {\
- DCN20_VMID_REG_LIST(id)\
-}
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
-static const struct dcn_vmid_registers vmid_regs[] = {
- vmid_regs(0),
- vmid_regs(1),
- vmid_regs(2),
- vmid_regs(3),
- vmid_regs(4),
- vmid_regs(5),
- vmid_regs(6),
- vmid_regs(7),
- vmid_regs(8),
- vmid_regs(9),
- vmid_regs(10),
- vmid_regs(11),
- vmid_regs(12),
- vmid_regs(13),
- vmid_regs(14),
- vmid_regs(15)
-};
+static struct dcn_vmid_registers vmid_regs[16];
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
@@ -867,10 +715,15 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
+ .alloc_extra_way_for_cursor = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -903,6 +756,14 @@ static struct dce_aux *dcn32_aux_engine_create(
if (!aux_engine)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
@@ -912,15 +773,10 @@ static struct dce_aux *dcn32_aux_engine_create(
return &aux_engine->base;
}
-#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
-
-static const struct dce_i2c_registers i2c_hw_regs[] = {
- i2c_inst_regs(1),
- i2c_inst_regs(2),
- i2c_inst_regs(3),
- i2c_inst_regs(4),
- i2c_inst_regs(5),
-};
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -940,6 +796,14 @@ static struct dce_i2c_hw *dcn32_i2c_hw_create(
if (!dce_i2c_hw)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -979,6 +843,29 @@ static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx)
if (!hubbub2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
hubbub32_construct(hubbub2, ctx,
&hubbub_reg,
&hubbub_shift,
@@ -1011,6 +898,13 @@ static struct hubp *dcn32_hubp_create(
if (!hubp2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
@@ -1036,6 +930,13 @@ static struct dpp *dcn32_dpp_create(
if (!dpp3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
if (dpp32_construct(dpp3, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp3->base;
@@ -1056,6 +957,10 @@ static struct mpc *dcn32_mpc_create(
if (!mpc30)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
dcn32_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
@@ -1077,6 +982,13 @@ static struct output_pixel_processor *dcn32_opp_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
dcn20_opp_construct(opp2, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp2->base;
@@ -1093,6 +1005,13 @@ static struct timing_generator *dcn32_timing_generator_create(
if (!tgn10)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
@@ -1127,6 +1046,30 @@ static struct link_encoder *dcn32_link_encoder_create(
if (!enc20)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
dcn32_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
@@ -1156,7 +1099,7 @@ static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
- generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS,
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
@@ -1164,6 +1107,15 @@ static void read_dce_straps(
static struct audio *dcn32_create_audio(
struct dc_context *ctx, unsigned int inst)
{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
@@ -1177,6 +1129,19 @@ static struct vpg *dcn32_vpg_create(
if (!vpg3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
@@ -1194,6 +1159,15 @@ static struct afmt *dcn32_afmt_create(
if (!afmt3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
@@ -1211,6 +1185,13 @@ static struct apg *dcn31_apg_create(
if (!apg31)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
@@ -1247,6 +1228,14 @@ static struct stream_encoder *dcn32_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1297,6 +1286,13 @@ static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
@@ -1314,6 +1310,11 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
@@ -1326,6 +1327,10 @@ static struct dce_hwseq *dcn32_hwseq_create(
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
@@ -1517,6 +1522,10 @@ static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT dwbc30_regs
+ dwbc_regs_dcn3_init(0);
+
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
@@ -1542,6 +1551,10 @@ static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb30_regs
+ mcif_wb_regs_dcn3_init(0);
+
dcn32_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
@@ -1564,6 +1577,13 @@ static struct display_stream_compressor *dcn32_dsc_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN20_init(0),
+ dsc_regsDCN20_init(1),
+ dsc_regsDCN20_init(2),
+ dsc_regsDCN20_init(3);
+
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
dsc->max_image_width = 6016;
@@ -1701,13 +1721,26 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
{
int i;
bool removed_pipe = false;
+ struct dc_plane_state *phantom_plane = NULL;
+ struct dc_stream_state *phantom_stream = NULL;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// build scaling params for phantom pipes
if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ phantom_plane = pipe->plane_state;
+ phantom_stream = pipe->stream;
+
dc_rem_all_planes_for_stream(dc, pipe->stream, context);
dc_remove_stream_from_ctx(dc, context, pipe->stream);
+
+ /* Ref count is incremented on allocation and also when added to the context.
+ * Therefore we must call release for the the phantom plane and stream once
+ * they are removed from the ctx to finally decrement the refcount to 0 to free.
+ */
+ dc_plane_state_release(phantom_plane);
+ dc_stream_release(phantom_stream);
+
removed_pipe = true;
}
@@ -1807,12 +1840,6 @@ validate_out:
return out;
}
-
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1821,12 +1848,37 @@ int dcn32_populate_dml_pipes_from_context(
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
- bool subvp_in_use = false, is_pipe_split_expected[MAX_PIPES];
- int plane_count = 0;
+ bool subvp_in_use = false;
+ uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ /* Determine whether we will apply ODM 2to1 policy:
+ * Applies to single display and where the number of planes is less than 3.
+ * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes.
+ *
+ * Apply pipe split policy first so we can predict the pipe split correctly
+ * (dcn32_predict_pipe_split).
+ */
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (context->stream_count == 1 &&
+ context->stream_status[0].plane_count <= 1 &&
+ !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
+ pipe_cnt++;
+ }
+
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1879,59 +1931,18 @@ int dcn32_populate_dml_pipes_from_context(
}
}
- /* Calculate the number of planes we have so we can determine
- * whether to apply ODM 2to1 policy or not
- */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
-
DC_FP_START();
- is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, pipes[i].pipe, i);
+ is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
}
- /* Determine whether we will apply ODM 2to1 policy
- * Applies to single display and where the number of planes is less than 3
- * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes
- */
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
- res_ctx->pipe_ctx[i].stream->odm_2to1_policy_applied = false;
- if (context->stream_count == 1 && timing->dsc_cfg.num_slices_h != 1) {
- if (dc->debug.enable_single_display_2to1_odm_policy) {
- if (!((plane_count > 2) && pipe->top_pipe))
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
- }
- res_ctx->pipe_ctx[i].stream->odm_2to1_policy_applied = true;
- }
- pipe_cnt++;
- }
-
/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
* the DET available for each pipe). Use the DET override input to maintain our driver
* policy.
*/
- if (pipe_cnt == 1 && !is_pipe_split_expected[0]) {
- pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
- if (pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (!is_dual_plane(pipe->plane_state->format)) {
- pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
- pipes[0].pipe.src.unbounded_req_mode = true;
- if (pipe->plane_state->src_rect.width >= 5120 &&
- pipe->plane_state->src_rect.height >= 2880)
- pipes[0].pipe.src.det_size_override = 320; // 5K or higher
- }
- }
- } else
- dcn32_determine_det_override(context, pipes, is_pipe_split_expected, dc->res_pool->pipe_count);
+ dcn32_set_det_allocations(dc, context, pipes);
// In general cases we want to keep the dram clock change requirement
// (prefer configs that support MCLK switch). Only override to false
@@ -2002,6 +2013,28 @@ static bool dcn32_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
+ #undef REG_STRUCT
+ #define REG_STRUCT bios_regs
+ bios_regs_init();
+
+ #undef REG_STRUCT
+ #define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+ #undef REG_STRUCT
+ #define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+ #undef REG_STRUCT
+ #define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
DC_FP_START();
ctx->dc_bios->regs = &bios_regs;
@@ -2039,7 +2072,8 @@ static bool dcn32_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
@@ -2051,6 +2085,7 @@ static bool dcn32_resource_construct(
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 1e7e6201c880..55945cca2260 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -28,8 +28,16 @@
#include "core_types.h"
+#define DCN3_2_DEFAULT_DET_SIZE 256
+#define DCN3_2_MAX_DET_SIZE 1152
+#define DCN3_2_MIN_DET_SIZE 128
+#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
#define DCN3_2_DET_SEG_SIZE 64
#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
+#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -106,7 +114,1167 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_stream_state *stream,
struct pipe_ctx *head_pipe);
-void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
- bool *is_pipe_split_expected, int pipe_cnt);
+void dcn32_determine_det_override(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+
+void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+/* definitions for run time init of reg offsets */
+
+/* CLK SRC */
+#define CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) \
+ ( \
+ SRI_ARR_ALPHABET(PIXCLK_RESYNC_CNTL, PHYPLL, index, pllid), \
+ SRII_ARR_2(PHASE, DP_DTO, 0, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 1, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 2, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 3, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 0, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 1, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 2, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 3, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 0, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 1, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 2, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index) \
+ )
+
+/* ABM */
+#define ABM_DCN32_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI_ARR(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI_ARR(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI_ARR(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI_ARR(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id) \
+ )
+
+/* Audio */
+#define AUD_COMMON_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id), \
+ SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, id), \
+ SR_ARR(DCCG_AUDIO_DTO_SOURCE, id), SR_ARR(DCCG_AUDIO_DTO0_MODULE, id), \
+ SR_ARR(DCCG_AUDIO_DTO0_PHASE, id), SR_ARR(DCCG_AUDIO_DTO1_MODULE, id), \
+ SR_ARR(DCCG_AUDIO_DTO1_PHASE, id) \
+ )
+
+/* VPG */
+
+#define VPG_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \
+ SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
+ SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id) \
+ )
+
+/* AFMT */
+#define AFMT_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_60958_0, AFMT, id), SRI_ARR(AFMT_60958_1, AFMT, id), \
+ SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id) \
+ )
+
+/* APG */
+#define APG_DCN31_REG_LIST_RI(id) \
+ (\
+ SRI_ARR(APG_CONTROL, APG, id), SRI_ARR(APG_CONTROL2, APG, id), \
+ SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id) \
+ )
+
+/* Stream encoder */
+#define SE_DCN32_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \
+ SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_GC, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
+ SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_ACR_32_0, DIG, id), SRI_ARR(HDMI_ACR_32_1, DIG, id), \
+ SRI_ARR(HDMI_ACR_44_0, DIG, id), SRI_ARR(HDMI_ACR_44_1, DIG, id), \
+ SRI_ARR(HDMI_ACR_48_0, DIG, id), SRI_ARR(HDMI_ACR_48_1, DIG, id), \
+ SRI_ARR(DP_DB_CNTL, DP, id), SRI_ARR(DP_MSA_MISC, DP, id), \
+ SRI_ARR(DP_MSA_VBID_MISC, DP, id), SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \
+ SRI_ARR(DP_SEC_CNTL1, DP, id), SRI_ARR(DP_SEC_CNTL2, DP, id), \
+ SRI_ARR(DP_SEC_CNTL5, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \
+ SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \
+ SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
+ SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \
+ SRI_ARR(DP_SEC_TIMESTAMP, DP, id), SRI_ARR(DP_DSC_CNTL, DP, id), \
+ SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(DP_SEC_FRAMING4, DP, id), SRI_ARR(DP_GSP11_CNTL, DP, id), \
+ SRI_ARR(DME_CONTROL, DME, id), \
+ SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(DIG_FE_CNTL, DIG, id), SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \
+ SRI_ARR(DIG_FIFO_CTRL0, DIG, id) \
+ )
+
+/* Aux regs */
+
+#define AUX_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+ SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id) \
+ )
+
+#define DCN2_AUX_REG_LIST_RI(id) \
+ ( \
+ AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id) \
+ )
+
+/* HDP */
+#define HPD_REG_LIST_RI(id) SRI_ARR(DC_HPD_CONTROL, HPD, id)
+
+/* Link encoder */
+#define LE_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DIG_BE_CNTL, DIG, id), SRI_ARR(DIG_BE_EN_CNTL, DIG, id), \
+ SRI_ARR(TMDS_CTL_BITS, DIG, id), \
+ SRI_ARR(TMDS_DCBALANCER_CONTROL, DIG, id), SRI_ARR(DP_CONFIG, DP, id), \
+ SRI_ARR(DP_DPHY_CNTL, DP, id), SRI_ARR(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_SCRAM_CNTL, DP, id), SRI_ARR(DP_DPHY_SYM0, DP, id), \
+ SRI_ARR(DP_DPHY_SYM1, DP, id), SRI_ARR(DP_DPHY_SYM2, DP, id), \
+ SRI_ARR(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI_ARR(DP_LINK_CNTL, DP, id), SRI_ARR(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI_ARR(DP_MSE_SAT0, DP, id), SRI_ARR(DP_MSE_SAT1, DP, id), \
+ SRI_ARR(DP_MSE_SAT2, DP, id), SRI_ARR(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI_ARR(DP_SEC_CNTL, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_FAST_TRAINING, DP, id), SRI_ARR(DP_SEC_CNTL1, DP, id), \
+ SRI_ARR(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) \
+ )
+
+#define LE_DCN31_REG_LIST_RI(id) \
+ ( \
+ LE_DCN3_REG_LIST_RI(id), SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR_ARR(DIO_LINKA_CNTL, id), SR_ARR(DIO_LINKB_CNTL, id), \
+ SR_ARR(DIO_LINKC_CNTL, id), SR_ARR(DIO_LINKD_CNTL, id), \
+ SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id) \
+ )
+
+#define UNIPHY_DCN2_REG_LIST_RI(id, phyid) \
+ ( \
+ SRI_ARR_ALPHABET(CLOCK_ENABLE, SYMCLK, id, phyid), \
+ SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid) \
+ )
+
+/* HPO DP stream encoder */
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) \
+ ( \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL0, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL1, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL2, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL3, id), \
+ SRI_ARR(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) \
+ )
+
+/* HPO DP link encoder regs */
+#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
+ SRI_ARR(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) \
+ )
+
+/* DPP */
+#define DPP_REG_LIST_DCN30_COMMON_RI(id) \
+ ( \
+ SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \
+ SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \
+ SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_DATA, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_REGION_0_1, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_REGION_32_33, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_REGION_0_1, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_REGION_32_33, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C11_C12, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C13_C14, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C21_C22, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C23_C24, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C31_C32, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C33_C34, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C11_C12, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C13_C14, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C21_C22, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C23_C24, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C31_C32, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C33_C34, CM, id), \
+ SRI_ARR(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI_ARR(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \
+ SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \
+ SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \
+ SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \
+ SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI_ARR(DSCL_2TAP_CONTROL, DSCL, id), SRI_ARR(MPC_SIZE, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI_ARR(RECOUT_START, DSCL, id), SRI_ARR(RECOUT_SIZE, DSCL, id), \
+ SRI_ARR(PRE_DEALPHA, CNVC_CFG, id), SRI_ARR(PRE_REALPHA, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_MODE, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_C11_C12, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_C33_C34, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_B_C11_C12, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_B_C33_C34, CNVC_CFG, id), \
+ SRI_ARR(CM_POST_CSC_CONTROL, CM, id), \
+ SRI_ARR(CM_POST_CSC_C11_C12, CM, id), \
+ SRI_ARR(CM_POST_CSC_C33_C34, CM, id), \
+ SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \
+ SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \
+ SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \
+ SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
+ SRI_ARR(DPP_CONTROL, DPP_TOP, id), SRI_ARR(CM_HDR_MULT_COEF, CM, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(ALPHA_2BIT_LUT, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_R, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_G, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_B, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_R, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_G, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_B, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_RED, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_GREEN, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_BLUE, CNVC_CFG, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \
+ SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \
+ SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id) \
+ )
+
+/* OPP */
+#define OPP_REG_LIST_DCN_RI(id) \
+ ( \
+ SRI_ARR(FMT_BIT_DEPTH_CONTROL, FMT, id), SRI_ARR(FMT_CONTROL, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI_ARR(FMT_CLAMP_CNTL, FMT, id), \
+ SRI_ARR(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI_ARR(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
+ SRI_ARR(OPPBUF_CONTROL, OPPBUF, id), \
+ SRI_ARR(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
+ SRI_ARR(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \
+ SRI_ARR(OPP_PIPE_CONTROL, OPP_PIPE, id) \
+ )
+
+#define OPP_REG_LIST_DCN10_RI(id) OPP_REG_LIST_DCN_RI(id)
+
+#define OPP_DPG_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DPG_CONTROL, DPG, id), SRI_ARR(DPG_DIMENSIONS, DPG, id), \
+ SRI_ARR(DPG_OFFSET_SEGMENT, DPG, id), SRI_ARR(DPG_COLOUR_B_CB, DPG, id), \
+ SRI_ARR(DPG_COLOUR_G_Y, DPG, id), SRI_ARR(DPG_COLOUR_R_CR, DPG, id), \
+ SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id) \
+ )
+
+#define OPP_REG_LIST_DCN30_RI(id) \
+ ( \
+ OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \
+ SRI_ARR(FMT_422_CONTROL, FMT, id) \
+ )
+
+/* Aux engine regs */
+#define AUX_COMMON_REG_LIST0_RI(id) \
+ ( \
+ SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_SW_DATA, DP_AUX, id), SRI_ARR(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
+ SRI_ARR(AUX_SW_STATUS, DP_AUX, id) \
+ )
+
+/* DWBC */
+#define DWBC_COMMON_REG_LIST_DCN30_RI(id) \
+ ( \
+ SR_ARR(DWB_ENABLE_CLK_CTRL, id), SR_ARR(DWB_MEM_PWR_CTRL, id), \
+ SR_ARR(FC_MODE_CTRL, id), SR_ARR(FC_FLOW_CTRL, id), \
+ SR_ARR(FC_WINDOW_START, id), SR_ARR(FC_WINDOW_SIZE, id), \
+ SR_ARR(FC_SOURCE_SIZE, id), SR_ARR(DWB_UPDATE_CTRL, id), \
+ SR_ARR(DWB_CRC_CTRL, id), SR_ARR(DWB_CRC_MASK_R_G, id), \
+ SR_ARR(DWB_CRC_MASK_B_A, id), SR_ARR(DWB_CRC_VAL_R_G, id), \
+ SR_ARR(DWB_CRC_VAL_B_A, id), SR_ARR(DWB_OUT_CTRL, id), \
+ SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, id), \
+ SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT, id), \
+ SR_ARR(DWB_HOST_READ_CONTROL, id), SR_ARR(DWB_SOFT_RESET, id), \
+ SR_ARR(DWB_HDR_MULT_COEF, id), SR_ARR(DWB_GAMUT_REMAP_MODE, id), \
+ SR_ARR(DWB_GAMUT_REMAP_COEF_FORMAT, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C11_C12, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C13_C14, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C21_C22, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C23_C24, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C31_C32, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C33_C34, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C11_C12, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C13_C14, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C21_C22, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C23_C24, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C31_C32, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C33_C34, id), SR_ARR(DWB_OGAM_CONTROL, id), \
+ SR_ARR(DWB_OGAM_LUT_INDEX, id), SR_ARR(DWB_OGAM_LUT_DATA, id), \
+ SR_ARR(DWB_OGAM_LUT_CONTROL, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMA_OFFSET_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_OFFSET_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_0_1, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_2_3, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_4_5, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_6_7, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_8_9, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_10_11, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_12_13, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_14_15, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_16_17, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_18_19, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_20_21, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_22_23, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_24_25, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_26_27, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_28_29, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_30_31, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_32_33, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMB_OFFSET_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_OFFSET_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_0_1, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_2_3, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_4_5, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_6_7, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_8_9, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_10_11, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_12_13, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_14_15, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_16_17, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_18_19, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_20_21, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_22_23, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_24_25, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_26_27, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_28_29, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_30_31, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id) \
+ )
+
+/* MCIF */
+
+#define MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst) \
+ ( \
+ SRI2_ARR(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_PITCH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst), \
+ SRI2_ARR(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_WATERMARK, MMHUBBUB, inst), \
+ SRI2_ARR(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst) \
+ )
+
+/* DSC */
+
+#define DSC_REG_LIST_DCN20_RI(id) \
+ ( \
+ SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \
+ SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \
+ SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \
+ SRI_ARR(DSCC_STATUS, DSCC, id), \
+ SRI_ARR(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG0, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG1, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG2, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG3, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG4, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG5, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG6, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG7, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG8, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG9, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG10, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG11, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG12, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG13, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG14, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG15, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG16, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG17, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG18, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG19, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG20, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG21, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG22, DSCC, id), \
+ SRI_ARR(DSCC_MEM_POWER_CONTROL, DSCC, id), \
+ SRI_ARR(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_MAX_ABS_ERROR0, DSCC, id), \
+ SRI_ARR(DSCC_MAX_ABS_ERROR1, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
+ SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \
+ SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) \
+ )
+
+/* MPC */
+
+#define MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst) \
+ SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst)
+
+#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \
+ ( \
+ SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) \
+ )
+
+#define MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst) \
+ ( \
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(CSC_MODE, MPC_OUT, inst), \
+ SRII(CSC_C11_C12_A, MPC_OUT, inst), SRII(CSC_C33_C34_A, MPC_OUT, inst), \
+ SRII(CSC_C11_C12_B, MPC_OUT, inst), SRII(CSC_C33_C34_B, MPC_OUT, inst), \
+ SRII(DENORM_CONTROL, MPC_OUT, inst), \
+ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst), \
+ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT) \
+ )
+
+#define MPC_COMMON_REG_LIST_DCN1_0_RI(inst) \
+ ( \
+ SRII(MPCC_TOP_SEL, MPCC, inst), SRII(MPCC_BOT_SEL, MPCC, inst), \
+ SRII(MPCC_CONTROL, MPCC, inst), SRII(MPCC_STATUS, MPCC, inst), \
+ SRII(MPCC_OPP_ID, MPCC, inst), SRII(MPCC_BG_G_Y, MPCC, inst), \
+ SRII(MPCC_BG_R_CR, MPCC, inst), SRII(MPCC_BG_B_CB, MPCC, inst), \
+ SRII(MPCC_SM_CONTROL, MPCC, inst), \
+ SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) \
+ )
+
+#define MPC_REG_LIST_DCN3_0_RI(inst) \
+ ( \
+ MPC_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(MPCC_TOP_GAIN, MPCC, inst), \
+ SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst), \
+ SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst), \
+ SRII(MPCC_MEM_PWR_CTRL, MPCC, inst), \
+ SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) \
+ )
+
+#define MPC_REG_LIST_DCN3_2_RI(inst) \
+ MPC_REG_LIST_DCN3_0_RI(inst),\
+ SRII(MPCC_MOVABLE_CM_LOCATION_CONTROL, MPCC, inst),\
+ SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), /*TODO: may need to add other 3DLUT regs*/\
+ SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst)
+
+/* OPTC */
+
+#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \
+ ( \
+ SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \
+ SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \
+ SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \
+ SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst), \
+ SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_H_TOTAL, OTG, inst), \
+ SRI_ARR(OTG_H_BLANK_START_END, OTG, inst), \
+ SRI_ARR(OTG_H_SYNC_A, OTG, inst), SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst), \
+ SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst), SRI_ARR(OTG_V_TOTAL, OTG, inst), \
+ SRI_ARR(OTG_V_BLANK_START_END, OTG, inst), \
+ SRI_ARR(OTG_V_SYNC_A, OTG, inst), SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst), \
+ SRI_ARR(OTG_CONTROL, OTG, inst), SRI_ARR(OTG_STEREO_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_STEREO_STATUS, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_TRIGA_CNTL, OTG, inst), \
+ SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst), \
+ SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst), \
+ SRI_ARR(OTG_STATUS, OTG, inst), SRI_ARR(OTG_STATUS_POSITION, OTG, inst), \
+ SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst), \
+ SRI_ARR(OTG_M_CONST_DTO0, OTG, inst), \
+ SRI_ARR(OTG_M_CONST_DTO1, OTG, inst), \
+ SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst), \
+ SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \
+ SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \
+ SRI_ARR(CONTROL, VTG, inst), SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_GSL_CONTROL, OTG, inst), SRI_ARR(OTG_CRC_CNTL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst), \
+ SRI_ARR(OTG_CRC0_DATA_B, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst), \
+ SR_ARR(GSL_SOURCE_SELECT, inst), \
+ SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
+ SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst), \
+ SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst), \
+ SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst), \
+ SRI_ARR(OTG_DSC_START_POSITION, OTG, inst), \
+ SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst), \
+ SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst), \
+ SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
+ SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst) \
+ )
+
+/* HUBP */
+
+#define HUBP_REG_LIST_DCN_VM_RI(id) \
+ ( \
+ SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN_RI(id) \
+ ( \
+ SRI_ARR(DCHUBP_CNTL, HUBP, id), SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \
+ SRI_ARR(HUBPREQ_DEBUG, HUBP, id), SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_SURFACE_PITCH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_PITCH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_FLIP_CONTROL, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_CONTROL, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id), \
+ SRI_ARR(HUBPRET_CONTROL, HUBPRET, id), \
+ SRI_ARR(HUBPRET_READ_LINE_STATUS, HUBPRET, id), \
+ SRI_ARR(DCN_EXPANSION_MODE, HUBPREQ, id), \
+ SRI_ARR(DCHUBP_REQ_SIZE_CONFIG, HUBP, id), \
+ SRI_ARR(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id), \
+ SRI_ARR(BLANK_OFFSET_0, HUBPREQ, id), \
+ SRI_ARR(BLANK_OFFSET_1, HUBPREQ, id), \
+ SRI_ARR(DST_DIMENSIONS, HUBPREQ, id), \
+ SRI_ARR(DST_AFTER_SCALER, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(PER_LINE_DELIVERY_PRE, HUBPREQ, id), \
+ SRI_ARR(PER_LINE_DELIVERY, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_6, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_7, HUBPREQ, id), \
+ SRI_ARR(DCN_TTU_QOS_WM, HUBPREQ, id), \
+ SRI_ARR(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF0_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF0_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF1_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id) \
+ )
+
+#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
+ SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
+ SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id), \
+ SRI_ARR(CURSOR_SETTINGS, HUBPREQ, id), \
+ SRI_ARR(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI_ARR(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI_ARR(CURSOR_SIZE, CURSOR0_, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(CURSOR_POSITION, CURSOR0_, id), \
+ SRI_ARR(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI_ARR(CURSOR_DST_OFFSET, CURSOR0_, id), \
+ SRI_ARR(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI_ARR(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
+ SRI_ARR(DMDATA_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_SW_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_QOS_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_SW_DATA, CURSOR0_, id), \
+ SRI_ARR(DMDATA_STATUS, CURSOR0_, id), \
+ SRI_ARR(FLIP_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
+ SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN21_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN2_COMMON_RI(id), SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN30_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN32_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN30_RI(id), SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
+ SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
+ SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id) \
+ )
+
+/* HUBBUB */
+
+#define HUBBUB_REG_LIST_DCN32_RI(id) \
+ ( \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \
+ SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \
+ SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \
+ SR(DCN_VM_AGP_BASE), HUBBUB_SR_WATERMARK_REG_LIST(), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D), SR(DCHUBBUB_DET0_CTRL), \
+ SR(DCHUBBUB_DET1_CTRL), SR(DCHUBBUB_DET2_CTRL), SR(DCHUBBUB_DET3_CTRL), \
+ SR(DCHUBBUB_COMPBUF_CTRL), SR(COMPBUF_RESERVED_SPACE), \
+ SR(DCHUBBUB_DEBUG_CTRL_0), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D), \
+ SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
+ SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS) \
+ )
+
+/* DCCG */
+
+#define DCCG_REG_LIST_DCN32_RI() \
+ ( \
+ SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \
+ DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \
+ DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \
+ SR(PHYASYMCLK_CLOCK_CNTL), SR(PHYBSYMCLK_CLOCK_CNTL), \
+ SR(PHYCSYMCLK_CLOCK_CNTL), SR(PHYDSYMCLK_CLOCK_CNTL), \
+ SR(PHYESYMCLK_CLOCK_CNTL), SR(DPSTREAMCLK_CNTL), SR(HDMISTREAMCLK_CNTL), \
+ SR(SYMCLK32_SE_CNTL), SR(SYMCLK32_LE_CNTL), \
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1), \
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3), \
+ DCCG_SRII(MODULO, DTBCLK_DTO, 0), DCCG_SRII(MODULO, DTBCLK_DTO, 1), \
+ DCCG_SRII(MODULO, DTBCLK_DTO, 2), DCCG_SRII(MODULO, DTBCLK_DTO, 3), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
+ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
+ SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE) \
+ )
+
+/* VMID */
+#define DCN20_VMID_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(CNTL, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id) \
+ )
+
+/* I2C HW */
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR_I2C(SETUP, DC_I2C_DDC, id), SRI_ARR_I2C(SPEED, DC_I2C_DDC, id), \
+ SRI_ARR_I2C(HW_STATUS, DC_I2C_DDC, id), \
+ SR_ARR_I2C(DC_I2C_ARBITRATION, id), \
+ SR_ARR_I2C(DC_I2C_CONTROL, id), SR_ARR_I2C(DC_I2C_SW_STATUS, id), \
+ SR_ARR_I2C(DC_I2C_TRANSACTION0, id), SR_ARR_I2C(DC_I2C_TRANSACTION1, id),\
+ SR_ARR_I2C(DC_I2C_TRANSACTION2, id), SR_ARR_I2C(DC_I2C_TRANSACTION3, id),\
+ SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id) \
+ )
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \
+ ( \
+ I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR_I2C(DIO_MEM_PWR_CTRL, id), \
+ SR_ARR_I2C(DIO_MEM_PWR_STATUS, id) \
+ )
#endif /* _DCN32_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b3f8503cea9c..a2a70a1572b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -28,6 +28,11 @@
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
/**
* ********************************************************************************************
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
@@ -46,7 +51,6 @@
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
{
uint32_t num_ways = 0;
- uint32_t mall_region_pixels = 0;
uint32_t bytes_per_pixel = 0;
uint32_t cache_lines_used = 0;
uint32_t lines_per_way = 0;
@@ -54,28 +58,77 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
uint32_t bytes_in_mall = 0;
uint32_t num_mblks = 0;
uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0;
+ uint32_t i = 0, j = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint32_t full_vp_width_blk_aligned = 0;
+ uint32_t full_vp_height_blk_aligned = 0;
+ uint32_t mall_alloc_width_blk_aligned = 0;
+ uint32_t mall_alloc_height_blk_aligned = 0;
+ uint16_t full_vp_height = 0;
+ bool subvp_in_use = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // Find the phantom pipes
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+ /* Find the phantom pipes.
+ * - For pipe split case we need to loop through the bottom and next ODM
+ * pipes or only half the viewport size is counted
+ */
+ if (pipe->stream && pipe->plane_state &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct pipe_ctx *main_pipe = NULL;
+
+ subvp_in_use = true;
+ /* Get full viewport height from main pipe (required for MBLK calculation) */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ main_pipe = &context->res_ctx.pipe_ctx[j];
+ if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+ full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+ break;
+ }
+ }
+
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ */
+ full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ */
+ full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+ /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+ mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
- // For bytes required in MALL, calculate based on number of MBlks required
- num_mblks = (mall_region_pixels * bytes_per_pixel +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+ /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+ mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+ mblk_height * mblk_height + mblk_height;
+
+ /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+ * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+ * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+ * (Should be divisible, but round up if not)
+ */
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
// (MALL is 64-byte aligned)
cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
- // For DCC we must cache the meat surface, so double cache lines required
+ /* For DCC divide by 256 */
if (pipe->plane_state->dcc.enable)
- cache_lines_per_plane *= 2;
+ cache_lines_per_plane = cache_lines_per_plane + (cache_lines_per_plane / 256) + 1;
cache_lines_used += cache_lines_per_plane;
}
}
@@ -86,6 +139,9 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
+ num_ways = dc->debug.force_subvp_num_ways;
+
return num_ways;
}
@@ -144,7 +200,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
- continue;
+ return false;
if (!pipe->plane_state)
return false;
@@ -177,36 +233,133 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
-void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
- bool *is_pipe_split_expected, int pipe_cnt)
+/**
+ * *******************************************************************************************
+ * dcn32_determine_det_override: Determine DET allocation for each pipe
+ *
+ * This function determines how much DET to allocate for each pipe. The total number of
+ * DET segments will be split equally among each of the streams, and after that the DET
+ * segments per stream will be split equally among the planes for the given stream.
+ *
+ * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
+ * number of DET for that given plane will be split among the pipes driving that plane.
+ *
+ *
+ * High level algorithm:
+ * 1. Split total DET among number of streams
+ * 2. For each stream, split DET among the planes
+ * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
+ * among those pipes.
+ * 4. Assign the DET override to the DML pipes.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [in]: pipes: Array of DML pipes
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_determine_det_override(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
{
- int i, j, count, stream_segments, pipe_segments[MAX_PIPES];
+ uint32_t i, j, k;
+ uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
+ uint8_t pipe_counted[MAX_PIPES] = {0};
+ uint8_t pipe_cnt = 0;
+ struct dc_plane_state *current_plane = NULL;
+ uint8_t stream_count = 0;
+
+ for (i = 0; i < context->stream_count; i++) {
+ /* Don't count SubVP streams for DET allocation */
+ if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+ stream_count++;
+ }
+ }
- if (context->stream_count > 0) {
- stream_segments = 18 / context->stream_count;
+ if (stream_count > 0) {
+ stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
- count = 0;
- for (j = 0; j < pipe_cnt; j++) {
- if (context->res_ctx.pipe_ctx[j].stream == context->streams[i]) {
- count++;
- if (is_pipe_split_expected[j])
- count++;
+ if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+ if (context->stream_status[i].plane_count > 0)
+ plane_segments = stream_segments / context->stream_status[i].plane_count;
+ else
+ plane_segments = stream_segments;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_plane_count = 0;
+ if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
+ pipe_counted[j] != 1) {
+ /* Note: pipe_plane_count indicates the number of pipes to be used for a
+ * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
+ * pipe_plane_count = 2 means 2:1 split, etc.
+ */
+ pipe_plane_count++;
+ pipe_counted[j] = 1;
+ current_plane = context->res_ctx.pipe_ctx[j].plane_state;
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
+ context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
+ pipe_plane_count++;
+ pipe_counted[k] = 1;
+ }
+ }
+
+ pipe_segments[j] = plane_segments / pipe_plane_count;
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
+ context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
+ pipe_segments[k] = plane_segments / pipe_plane_count;
+ }
+ }
}
}
- pipe_segments[i] = stream_segments / count;
}
- for (i = 0; i < pipe_cnt; i++) {
- pipes[i].pipe.src.det_size_override = 0;
- for (j = 0; j < context->stream_count; j++) {
- if (context->res_ctx.pipe_ctx[i].stream == context->streams[j]) {
- pipes[i].pipe.src.det_size_override = pipe_segments[j] * DCN3_2_DET_SEG_SIZE;
- break;
- }
- }
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
+ pipe_cnt++;
}
} else {
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
}
}
+
+void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ pipe_cnt++;
+ }
+
+ /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
+ * the DET available for each pipe). Use the DET override input to maintain our driver
+ * policy.
+ */
+ if (pipe_cnt == 1) {
+ pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
+ if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (!is_dual_plane(pipe->plane_state->format)) {
+ pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ if (pipe->plane_state->src_rect.width >= 5120 &&
+ pipe->plane_state->src_rect.height >= 2880)
+ pipes[0].pipe.src.det_size_override = 320; // 5K or higher
+ }
+ }
+ } else
+ dcn32_determine_det_override(dc, context, pipes);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 8157e40d2c7e..aed0f689cbbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -93,31 +93,6 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#define DCN_BASE__INST0_SEG1 0x000000C0
-#define DCN_BASE__INST0_SEG2 0x000034C0
-#define DCN_BASE__INST0_SEG3 0x00009000
-#define NBIO_BASE__INST0_SEG1 0x00000014
-
-#define MAX_INSTANCE 8
-#define MAX_SEGMENT 6
-
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-
#define DC_LOGGER_INIT(logger)
#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
@@ -138,78 +113,102 @@ enum dcn321_clk_src_array_id {
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
-#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SR_ARR_INIT(reg_name, id, value)\
+ REG_STRUCT[id].reg_name = value
#define SRI(reg_name, block, id)\
- .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ reg ## reg_name
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## temp_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
#define DCCG_SRII(reg_name, block, id)\
- .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
- reg ## reg_name ## _ ## block ## id
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
/* NBIO */
-#define NBIO_BASE_INNER(seg) \
- NBIO_BASE__INST0_SEG ## seg
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX0_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
#define CTX ctx
#define REG(reg_name) \
- (DCN_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-static const struct bios_registers bios_regs = {
- NBIO_SR(BIOS_SCRATCH_3),
- NBIO_SR(BIOS_SCRATCH_6)
-};
+static struct bios_registers bios_regs;
-#define clk_src_regs(index, pllid)\
-[index] = {\
- CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
-}
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
-static const struct dce110_clk_src_regs clk_src_regs[] = {
- clk_src_regs(0, A),
- clk_src_regs(1, B),
- clk_src_regs(2, C),
- clk_src_regs(3, D),
- clk_src_regs(4, E)
-};
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -219,17 +218,10 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define abm_regs(id)\
-[id] = {\
- ABM_DCN32_REG_LIST(id)\
-}
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
-static const struct dce_abm_registers abm_regs[] = {
- abm_regs(0),
- abm_regs(1),
- abm_regs(2),
- abm_regs(3),
-};
+static struct dce_abm_registers abm_regs[4];
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN32(__SHIFT)
@@ -239,18 +231,10 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN32(_MASK)
};
-#define audio_regs(id)\
-[id] = {\
- AUD_COMMON_REG_LIST(id)\
-}
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
-static const struct dce_audio_registers audio_regs[] = {
- audio_regs(0),
- audio_regs(1),
- audio_regs(2),
- audio_regs(3),
- audio_regs(4)
-};
+static struct dce_audio_registers audio_regs[5];
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
@@ -265,23 +249,10 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
-#define vpg_regs(id)\
-[id] = {\
- VPG_DCN3_REG_LIST(id)\
-}
+#define vpg_regs_init(id)\
+ VPG_DCN3_REG_LIST_RI(id)
-static const struct dcn30_vpg_registers vpg_regs[] = {
- vpg_regs(0),
- vpg_regs(1),
- vpg_regs(2),
- vpg_regs(3),
- vpg_regs(4),
- vpg_regs(5),
- vpg_regs(6),
- vpg_regs(7),
- vpg_regs(8),
- vpg_regs(9),
-};
+static struct dcn30_vpg_registers vpg_regs[10];
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
@@ -291,19 +262,10 @@ static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
-#define afmt_regs(id)\
-[id] = {\
- AFMT_DCN3_REG_LIST(id)\
-}
+#define afmt_regs_init(id)\
+ AFMT_DCN3_REG_LIST_RI(id)
-static const struct dcn30_afmt_registers afmt_regs[] = {
- afmt_regs(0),
- afmt_regs(1),
- afmt_regs(2),
- afmt_regs(3),
- afmt_regs(4),
- afmt_regs(5)
-};
+static struct dcn30_afmt_registers afmt_regs[6];
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
@@ -313,17 +275,10 @@ static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
-#define apg_regs(id)\
-[id] = {\
- APG_DCN31_REG_LIST(id)\
-}
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
-static const struct dcn31_apg_registers apg_regs[] = {
- apg_regs(0),
- apg_regs(1),
- apg_regs(2),
- apg_regs(3)
-};
+static struct dcn31_apg_registers apg_regs[4];
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
@@ -333,18 +288,10 @@ static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
-#define stream_enc_regs(id)\
-[id] = {\
- SE_DCN32_REG_LIST(id)\
-}
+#define stream_enc_regs_init(id)\
+ SE_DCN32_REG_LIST_RI(id)
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
- stream_enc_regs(0),
- stream_enc_regs(1),
- stream_enc_regs(2),
- stream_enc_regs(3),
- stream_enc_regs(4)
-};
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -355,46 +302,24 @@ static const struct dcn10_stream_encoder_mask se_mask = {
};
-#define aux_regs(id)\
-[id] = {\
- DCN2_AUX_REG_LIST(id)\
-}
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
-static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
- aux_regs(0),
- aux_regs(1),
- aux_regs(2),
- aux_regs(3),
- aux_regs(4)
-};
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
-#define hpd_regs(id)\
-[id] = {\
- HPD_REG_LIST(id)\
-}
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
-static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
- hpd_regs(0),
- hpd_regs(1),
- hpd_regs(2),
- hpd_regs(3),
- hpd_regs(4)
-};
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
-#define link_regs(id, phyid)\
-[id] = {\
- LE_DCN31_REG_LIST(id), \
- UNIPHY_DCN2_REG_LIST(phyid), \
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN31_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
/*DPCS_DCN31_REG_LIST(id),*/ \
-}
-static const struct dcn10_link_enc_registers link_enc_regs[] = {
- link_regs(0, A),
- link_regs(1, B),
- link_regs(2, C),
- link_regs(3, D),
- link_regs(4, E)
-};
+static struct dcn10_link_enc_registers link_enc_regs[5];
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
@@ -406,17 +331,10 @@ static const struct dcn10_link_enc_mask le_mask = {
// DPCS_DCN31_MASK_SH_LIST(_MASK)
};
-#define hpo_dp_stream_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
-}
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
-static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
- hpo_dp_stream_encoder_reg_list(0),
- hpo_dp_stream_encoder_reg_list(1),
- hpo_dp_stream_encoder_reg_list(2),
- hpo_dp_stream_encoder_reg_list(3),
-};
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
@@ -427,20 +345,14 @@ static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
};
-#define hpo_dp_link_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
- /*DCN3_1_RDPCSTX_REG_LIST(0),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(1),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(2),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(3),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(4)*/\
-}
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
-static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
- hpo_dp_link_encoder_reg_list(0),
- hpo_dp_link_encoder_reg_list(1),
-};
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
@@ -450,17 +362,10 @@ static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
-#define dpp_regs(id)\
-[id] = {\
- DPP_REG_LIST_DCN30_COMMON(id),\
-}
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN30_COMMON_RI(id)
-static const struct dcn3_dpp_registers dpp_regs[] = {
- dpp_regs(0),
- dpp_regs(1),
- dpp_regs(2),
- dpp_regs(3)
-};
+static struct dcn3_dpp_registers dpp_regs[4];
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT)
@@ -471,17 +376,10 @@ static const struct dcn3_dpp_mask tf_mask = {
};
-#define opp_regs(id)\
-[id] = {\
- OPP_REG_LIST_DCN30(id),\
-}
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN30_RI(id)
-static const struct dcn20_opp_registers opp_regs[] = {
- opp_regs(0),
- opp_regs(1),
- opp_regs(2),
- opp_regs(3)
-};
+static struct dcn20_opp_registers opp_regs[4];
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
@@ -491,21 +389,15 @@ static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
-#define aux_engine_regs(id)\
-[id] = {\
- AUX_COMMON_REG_LIST0(id), \
- .AUXN_IMPCAL = 0, \
- .AUXP_IMPCAL = 0, \
- .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
-}
+#define aux_engine_regs_init(id) \
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\
+ )
-static const struct dce110_aux_registers aux_engine_regs[] = {
- aux_engine_regs(0),
- aux_engine_regs(1),
- aux_engine_regs(2),
- aux_engine_regs(3),
- aux_engine_regs(4)
-};
+static struct dce110_aux_registers aux_engine_regs[5];
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
@@ -515,15 +407,10 @@ static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
-#define dwbc_regs_dcn3(id)\
-[id] = {\
- DWBC_COMMON_REG_LIST_DCN30(id),\
-}
-
-static const struct dcn30_dwbc_registers dwbc30_regs[] = {
- dwbc_regs_dcn3(0),
-};
+static struct dcn30_dwbc_registers dwbc30_regs[1];
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -533,14 +420,10 @@ static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
-#define mcif_wb_regs_dcn3(id)\
-[id] = {\
- MCIF_WB_COMMON_REG_LIST_DCN32(id),\
-}
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id)
-static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
- mcif_wb_regs_dcn3(0)
-};
+static struct dcn30_mmhubbub_registers mcif_wb30_regs[1];
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -550,17 +433,10 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define dsc_regsDCN20(id)\
-[id] = {\
- DSC_REG_LIST_DCN20(id)\
-}
+#define dsc_regsDCN20_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
-static const struct dcn20_dsc_registers dsc_regs[] = {
- dsc_regsDCN20(0),
- dsc_regsDCN20(1),
- dsc_regsDCN20(2),
- dsc_regsDCN20(3)
-};
+static struct dcn20_dsc_registers dsc_regs[4];
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
@@ -570,17 +446,17 @@ static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-static const struct dcn30_mpc_registers mpc_regs = {
- MPC_REG_LIST_DCN3_2(0),
- MPC_REG_LIST_DCN3_2(1),
- MPC_REG_LIST_DCN3_2(2),
- MPC_REG_LIST_DCN3_2(3),
- MPC_OUT_MUX_REG_LIST_DCN3_0(0),
- MPC_OUT_MUX_REG_LIST_DCN3_0(1),
- MPC_OUT_MUX_REG_LIST_DCN3_0(2),
- MPC_OUT_MUX_REG_LIST_DCN3_0(3),
- MPC_DWB_MUX_REG_LIST_DCN3_0(0),
-};
+static struct dcn30_mpc_registers mpc_regs;
+#define dcn_mpc_regs_init()\
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -590,15 +466,10 @@ static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define optc_regs(id)\
-[id] = {OPTC_COMMON_REG_LIST_DCN3_2(id)}
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_2_RI(id)
-static const struct dcn_optc_registers optc_regs[] = {
- optc_regs(0),
- optc_regs(1),
- optc_regs(2),
- optc_regs(3)
-};
+static struct dcn_optc_registers optc_regs[4];
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -608,18 +479,10 @@ static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define hubp_regs(id)\
-[id] = {\
- HUBP_REG_LIST_DCN32(id)\
-}
-
-static const struct dcn_hubp2_registers hubp_regs[] = {
- hubp_regs(0),
- hubp_regs(1),
- hubp_regs(2),
- hubp_regs(3)
-};
+#define hubp_regs_init(id) \
+ HUBP_REG_LIST_DCN32_RI(id)
+static struct dcn_hubp2_registers hubp_regs[4];
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN32(__SHIFT)
@@ -628,9 +491,10 @@ static const struct dcn_hubp2_shift hubp_shift = {
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dcn_hubbub_registers hubbub_reg = {
- HUBBUB_REG_LIST_DCN32(0)
-};
+
+static struct dcn_hubbub_registers hubbub_reg;
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN32_RI(0)
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN32(__SHIFT)
@@ -640,9 +504,10 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dccg_registers dccg_regs = {
- DCCG_REG_LIST_DCN32()
-};
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN32_RI()
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN32(__SHIFT)
@@ -715,9 +580,10 @@ static const struct dccg_mask dccg_mask = {
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING)
-static const struct dce_hwseq_registers hwseq_reg = {
- HWSEQ_DCN32_REG_LIST()
-};
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN32_REG_LIST()
#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -760,29 +626,10 @@ static const struct dce_hwseq_shift hwseq_shift = {
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN32_MASK_SH_LIST(_MASK)
};
-#define vmid_regs(id)\
-[id] = {\
- DCN20_VMID_REG_LIST(id)\
-}
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
-static const struct dcn_vmid_registers vmid_regs[] = {
- vmid_regs(0),
- vmid_regs(1),
- vmid_regs(2),
- vmid_regs(3),
- vmid_regs(4),
- vmid_regs(5),
- vmid_regs(6),
- vmid_regs(7),
- vmid_regs(8),
- vmid_regs(9),
- vmid_regs(10),
- vmid_regs(11),
- vmid_regs(12),
- vmid_regs(13),
- vmid_regs(14),
- vmid_regs(15)
-};
+static struct dcn_vmid_registers vmid_regs[16];
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
@@ -868,10 +715,15 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
+ .alloc_extra_way_for_cursor = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -905,6 +757,14 @@ static struct dce_aux *dcn321_aux_engine_create(
if (!aux_engine)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
@@ -914,15 +774,10 @@ static struct dce_aux *dcn321_aux_engine_create(
return &aux_engine->base;
}
-#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
-
-static const struct dce_i2c_registers i2c_hw_regs[] = {
- i2c_inst_regs(1),
- i2c_inst_regs(2),
- i2c_inst_regs(3),
- i2c_inst_regs(4),
- i2c_inst_regs(5),
-};
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -942,6 +797,14 @@ static struct dce_i2c_hw *dcn321_i2c_hw_create(
if (!dce_i2c_hw)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -981,6 +844,29 @@ static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx)
if (!hubbub2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
hubbub32_construct(hubbub2, ctx,
&hubbub_reg,
&hubbub_shift,
@@ -1013,6 +899,13 @@ static struct hubp *dcn321_hubp_create(
if (!hubp2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
@@ -1038,6 +931,13 @@ static struct dpp *dcn321_dpp_create(
if (!dpp3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
if (dpp32_construct(dpp3, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp3->base;
@@ -1058,6 +958,10 @@ static struct mpc *dcn321_mpc_create(
if (!mpc30)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
dcn32_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
@@ -1079,6 +983,13 @@ static struct output_pixel_processor *dcn321_opp_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
dcn20_opp_construct(opp2, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp2->base;
@@ -1095,6 +1006,13 @@ static struct timing_generator *dcn321_timing_generator_create(
if (!tgn10)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
@@ -1129,6 +1047,30 @@ static struct link_encoder *dcn321_link_encoder_create(
if (!enc20)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
dcn321_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
@@ -1145,7 +1087,7 @@ static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
- generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS,
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
@@ -1153,6 +1095,15 @@ static void read_dce_straps(
static struct audio *dcn321_create_audio(
struct dc_context *ctx, unsigned int inst)
{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
@@ -1166,6 +1117,19 @@ static struct vpg *dcn321_vpg_create(
if (!vpg3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
@@ -1183,6 +1147,15 @@ static struct afmt *dcn321_afmt_create(
if (!afmt3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
@@ -1200,6 +1173,13 @@ static struct apg *dcn321_apg_create(
if (!apg31)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
@@ -1236,6 +1216,14 @@ static struct stream_encoder *dcn321_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1286,6 +1274,13 @@ static struct hpo_dp_stream_encoder *dcn321_hpo_dp_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
@@ -1303,6 +1298,11 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
@@ -1315,6 +1315,10 @@ static struct dce_hwseq *dcn321_hwseq_create(
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
@@ -1505,6 +1509,10 @@ static bool dcn321_dwbc_create(struct dc_context *ctx, struct resource_pool *poo
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT dwbc30_regs
+ dwbc_regs_dcn3_init(0);
+
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
@@ -1530,6 +1538,10 @@ static bool dcn321_mmhubbub_create(struct dc_context *ctx, struct resource_pool
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb30_regs
+ mcif_wb_regs_dcn3_init(0);
+
dcn32_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
@@ -1552,6 +1564,13 @@ static struct display_stream_compressor *dcn321_dsc_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN20_init(0),
+ dsc_regsDCN20_init(1),
+ dsc_regsDCN20_init(2),
+ dsc_regsDCN20_init(3);
+
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
dsc->max_image_width = 6016;
@@ -1616,6 +1635,30 @@ static bool dcn321_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn321;
@@ -1651,7 +1694,8 @@ static bool dcn321_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
@@ -1662,8 +1706,9 @@ static bool dcn321_resource_construct(
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
-
+ dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index fb6a2d7b6470..e3e5c39895a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -33,7 +33,7 @@
#include "dc_types.h"
#include "dc.h"
-struct dp_mst_stream_allocation_table;
+struct dc_dp_mst_stream_allocation_table;
struct aux_payload;
enum aux_return_code_type;
@@ -77,7 +77,7 @@ void dm_helpers_dp_update_branch_info(
bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dc_context *ctx,
const struct dc_stream_state *stream,
- struct dp_mst_stream_allocation_table *proposed_table,
+ struct dc_dp_mst_stream_allocation_table *proposed_table,
bool enable);
/*
@@ -171,7 +171,13 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy
#define IS_SMU_TIMEOUT(result) \
(result == 0x0)
-
+void dm_helpers_init_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *config,
+ struct dc_sink *sink);
+void dm_helpers_override_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *config);
int dm_helper_dmub_aux_transfer_sync(
struct dc_context *ctx,
const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 359f6e9a1da0..cb81ed2fbd53 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +84,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -124,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
DML += dcn31/dcn31_fpu.o
DML += dcn32/dcn32_fpu.o
@@ -131,6 +133,7 @@ DML += dcn321/dcn321_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
index 6ca288fb5fb9..3aa8dd0acd5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
@@ -25,12 +25,11 @@
#include "dm_services.h"
#include "bw_fixed.h"
+#define MAX_I64 \
+ ((int64_t)((1ULL << 63) - 1))
#define MIN_I64 \
- (int64_t)(-(1LL << 63))
-
-#define MAX_I64 \
- (int64_t)((1ULL << 63) - 1)
+ (-MAX_I64 - 1)
#define FRACTIONAL_PART_MASK \
((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
@@ -49,6 +48,7 @@ static uint64_t abs_i64(int64_t arg)
struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
{
struct bw_fixed res;
+
ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
return res;
@@ -78,14 +78,12 @@ struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
{
uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
- do
- {
+ do {
remainder <<= 1;
res_value <<= 1;
- if (remainder >= arg2_value)
- {
+ if (remainder >= arg2_value) {
res_value |= 1;
remainder -= arg2_value;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
index 41284e263325..288d22a16cf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
@@ -526,10 +526,10 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
}
if (v->max_swath_height_c[k] > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
- }
- v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
- if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
- v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
@@ -552,14 +552,14 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
}
v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
- v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
- v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
}
else {
- v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+ v->effective_lb_latency_hiding_source_lines_chroma = dcn_bw_min2(v->max_line_buffer_lines, dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 / dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_detlb_lines_chroma = dcn_bw_floor2(v->lines_in_det_chroma + dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+ v->urgent_latency_support_us_per_state[i][j][k] = dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] * dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 * dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
}
}
}
@@ -1146,10 +1146,10 @@ void display_pipe_configuration(struct dcn_bw_internal_vars *v)
}
if (v->maximum_swath_height_c > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
- }
- v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
- if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
- v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_y[k] = v->maximum_swath_height_y;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
index 07d18e78de49..cac72413a097 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
@@ -23,6 +23,7 @@
*
*/
+#include "os_types.h"
#include "dcn_calc_math.h"
#define isNaN(number) ((number) != (number))
@@ -69,8 +70,8 @@ float dcn_bw_max2(const float arg1, const float arg2)
float dcn_bw_floor2(const float arg, const float significance)
{
- if (significance == 0)
- return 0;
+ ASSERT(significance != 0);
+
return ((int) (arg / significance)) * significance;
}
float dcn_bw_floor(const float arg)
@@ -80,17 +81,14 @@ float dcn_bw_floor(const float arg)
float dcn_bw_ceil(const float arg)
{
- float flr = dcn_bw_floor2(arg, 1);
-
- return flr + 0.00001 >= arg ? arg : flr + 1;
+ return (int) (arg + 0.99999);
}
float dcn_bw_ceil2(const float arg, const float significance)
{
- float flr = dcn_bw_floor2(arg, significance);
- if (significance == 0)
- return 0;
- return flr + 0.00001 >= arg ? arg : flr + significance;
+ ASSERT(significance != 0);
+
+ return ((int) (arg / significance + 0.99999)) * significance;
}
float dcn_bw_max3(float v1, float v2, float v3)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index db3b16b77034..d46adc849d2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -736,30 +736,13 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
- uint32_t hw_internal_rev,
- uint32_t pci_revision_id)
+static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
- if ((chip_family == FAMILY_RV) &&
- ASICREV_IS_RAVEN2(hw_internal_rev))
- switch (pci_revision_id) {
- case PRID_DALI_DE:
- case PRID_DALI_DF:
- case PRID_DALI_E3:
- case PRID_DALI_E4:
- case PRID_POLLOCK_94:
- case PRID_POLLOCK_95:
- case PRID_POLLOCK_E9:
- case PRID_POLLOCK_EA:
- case PRID_POLLOCK_EB:
- return 0;
- default:
- break;
- }
-
- /* we are ok with all levels */
- return 4;
+ if (is_vmin_only_asic)
+ return 0;
+ else /* we are ok with all levels */
+ return 4;
}
bool dcn_validate_bandwidth(
@@ -1323,10 +1306,7 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
- dc->ctx->asic_id.chip_family,
- dc->ctx->asic_id.hw_internal_rev,
- dc->ctx->asic_id.pci_revision_id))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->config.is_vmin_only_asic))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ca44df4fca74..d680f1c5b69f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,6 +30,7 @@
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "dcn20_fpu.h"
@@ -2233,6 +2234,7 @@ static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_li
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl = 0, k = 0;
@@ -2246,8 +2248,7 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
ASSERT(clk_table->num_entries);
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
- memcpy(&dcn2_1_soc._clock_tmp, &dcn2_1_soc.clock_limits,
- sizeof(dcn2_1_soc.clock_limits));
+ memcpy(s, dcn2_1_soc.clock_limits, sizeof(dcn2_1_soc.clock_limits));
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
@@ -2262,25 +2263,25 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
if (i == 1)
k++;
- dcn2_1_soc._clock_tmp[k].state = k;
- dcn2_1_soc._clock_tmp[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc._clock_tmp[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc._clock_tmp[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc._clock_tmp[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn2_1_soc._clock_tmp[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn2_1_soc._clock_tmp[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn2_1_soc._clock_tmp[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn2_1_soc._clock_tmp[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn2_1_soc._clock_tmp[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn2_1_soc._clock_tmp[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn2_1_soc._clock_tmp[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[k].state = k;
+ s[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[k].dram_bw_per_chan_gbps =
+ dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
k++;
}
- memcpy(&dcn2_1_soc.clock_limits, &dcn2_1_soc._clock_tmp,
- sizeof(dcn2_1_soc.clock_limits));
+ memcpy(dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 63bbdf8b8678..edd098c7eb92 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -4478,17 +4478,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
- locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
- locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
- locals->EffectiveLBLatencyHidingSourceLinesChroma),
- locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
+ locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
+ locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesChroma),
+ locals->SwathHeightCPerState[i][j][k]);
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 8a7485e21d53..1d84ae50311d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -806,10 +806,12 @@ static bool CalculatePrefetchSchedule(
if (myPipe->SourceScan == dm_horz) {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockWidth256BytesY) + myPipe->BlockWidth256BytesY;
- *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
+ if (myPipe->BlockWidth256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
} else {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
- *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
+ if (myPipe->BlockWidth256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
}
prefetch_bw_oto = (PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) / Tsw_oto;
@@ -2634,7 +2636,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
- locals->XFCRemoteSurfaceFlipLatency[k] =
+ locals->XFCRemoteSurfaceFlipLatency[k] =
dml_floor(
mode_lib->vba.XFCRemoteSurfaceFlipDelay
/ (mode_lib->vba.HTotal[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 876b321b30ca..479e2c1a1301 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -396,64 +396,10 @@ static void CalculateUrgentBurstFactor(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
+ struct vba_vars_st *v,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2]);
+ int ReorderingBytes);
+
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
@@ -4692,66 +4638,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (v->UseMinimumRequiredDCFCLK == true) {
- UseMinimumDCFCLK(
- mode_lib,
- v->MaxInterDCNTileRepeaters,
- MaxPrefetchMode,
- v->FinalDRAMClockChangeLatency,
- v->SREnterPlusExitTime,
- v->ReturnBusWidth,
- v->RoundTripPingLatencyCycles,
- ReorderingBytes,
- v->PixelChunkSizeInKByte,
- v->MetaChunkSize,
- v->GPUVMEnable,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->NumberOfActivePlanes,
- v->HostVMMinPageSize,
- v->HostVMMaxNonCachedPageTableLevels,
- v->DynamicMetadataVMEnabled,
- v->ImmediateFlipRequirement[0],
- v->ProgressiveToInterlaceUnitInOPP,
- v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- v->VTotal,
- v->VActive,
- v->DynamicMetadataTransmittedBytes,
- v->DynamicMetadataLinesBeforeActiveRequired,
- v->Interlace,
- v->RequiredDPPCLK,
- v->RequiredDISPCLK,
- v->UrgLatency,
- v->NoOfDPP,
- v->ProjectedDCFCLKDeepSleep,
- v->MaximumVStartup,
- v->TotalVActivePixelBandwidth,
- v->TotalVActiveCursorBandwidth,
- v->TotalMetaRowBandwidth,
- v->TotalDPTERowBandwidth,
- v->TotalNumberOfActiveDPP,
- v->TotalNumberOfDCCActiveDPP,
- v->dpte_group_bytes,
- v->PrefetchLinesY,
- v->PrefetchLinesC,
- v->swath_width_luma_ub_all_states,
- v->swath_width_chroma_ub_all_states,
- v->BytePerPixelY,
- v->BytePerPixelC,
- v->HTotal,
- v->PixelClock,
- v->PDEAndMetaPTEBytesPerFrame,
- v->DPTEBytesPerRow,
- v->MetaRowBytes,
- v->DynamicMetadataEnable,
- v->VActivePixelBandwidth,
- v->VActiveCursorBandwidth,
- v->ReadBandwidthLuma,
- v->ReadBandwidthChroma,
- v->DCFCLKPerState,
- v->DCFCLKState);
+ UseMinimumDCFCLK(mode_lib, v, MaxPrefetchMode, ReorderingBytes);
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
@@ -6435,10 +6322,6 @@ static void CalculateSwathWidth(
for (k = 0; k < NumberOfActivePlanes; ++k) {
enum odm_combine_mode MainPlaneODMCombine = 0;
- surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
- surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (SourceScan[k] != dm_vert) {
SwathWidthSingleDPPY[k] = ViewportWidth[k];
@@ -6478,8 +6361,6 @@ static void CalculateSwathWidth(
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (SourceScan[k] != dm_vert) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
@@ -6487,6 +6368,7 @@ static void CalculateSwathWidth(
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
} else {
@@ -6498,6 +6380,7 @@ static void CalculateSwathWidth(
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
} else {
@@ -6610,77 +6493,21 @@ static double CalculateUrgentLatency(
return ret;
}
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
+ struct vba_vars_st *v,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2])
+ int ReorderingBytes)
{
double NormalEfficiency = 0;
double PTEEfficiency = 0;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2] = { { 0 } };
unsigned int i, j, k;
- NormalEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
- : PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
- PTEEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
- / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
+ NormalEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ : v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
+ PTEEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
+ / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX] = { 0 };
@@ -6698,58 +6525,58 @@ static void UseMinimumDCFCLK(
double MinimumTvmPlus2Tr0 = 0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
- + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]);
+ + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
- NoOfDPPState[k] = NoOfDPP[i][j][k];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
+ NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
- MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime);
- NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j];
- DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ?
- TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j];
- DCFCLKRequiredForAverageBandwidth = dml_max3(ProjectedDCFCLKDeepSleep[i][j],
- (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth / (MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
- (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / ReturnBusWidth);
-
- ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, TotalNumberOfActiveDPP[i][j], PixelChunkSizeInKByte, TotalNumberOfDCCActiveDPP[i][j],
- MetaChunkSize, GPUVMEnable, HostVMEnable, NumberOfActivePlanes, NoOfDPPState, dpte_group_bytes,
- PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- HostVMMinPageSize, HostVMMaxNonCachedPageTableLevels);
- ExtraLatencyCycles = RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
+ NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
+ DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
+ DCFCLKRequiredForAverageBandwidth = dml_max3(v->ProjectedDCFCLKDeepSleep[i][j],
+ (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth / (v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
+ (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / v->ReturnBusWidth);
+
+ ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, v->TotalNumberOfActiveDPP[i][j], v->PixelChunkSizeInKByte, v->TotalNumberOfDCCActiveDPP[i][j],
+ v->MetaChunkSize, v->GPUVMEnable, v->HostVMEnable, v->NumberOfActivePlanes, NoOfDPPState, v->dpte_group_bytes,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->HostVMMinPageSize, v->HostVMMaxNonCachedPageTableLevels);
+ ExtraLatencyCycles = v->RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch = { 0 };
double ExpectedPrefetchBWAcceleration = { 0 };
double PrefetchTime = { 0 };
- PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
- + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth;
- DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
- / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * DPTEBytesPerRow[i][j][k] / PTEEfficiency
- / NormalEfficiency / ReturnBusWidth + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
- PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k];
- ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
- DynamicMetadataVMExtraLatency[k] = (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
- UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
- PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait - UrgLatency[i] * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels
- : GPUVMMaxPageTableLevels - 2) * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
+ PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
+ DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
+ / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * v->DPTEBytesPerRow[i][j][k] / PTEEfficiency
+ / NormalEfficiency / v->ReturnBusWidth + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
+ PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
+ ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
+ DynamicMetadataVMExtraLatency[k] = (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
+ v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
+ PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - v->UrgLatency[i] * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels
+ : v->GPUVMMaxPageTableLevels - 2) * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch = { 0 };
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k] / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
- if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) {
+ if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
- + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / ReturnBusWidth;
+ + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
- if (DynamicMetadataEnable[k] == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
double TsetupPipe = { 0 };
double TdmbfPipe = { 0 };
double TdmsksPipe = { 0 };
@@ -6757,49 +6584,49 @@ static void UseMinimumDCFCLK(
double AllowedTimeForUrgentExtraLatency = { 0 };
CalculateDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
- RequiredDPPCLK[i][j][k],
- RequiredDISPCLK[i][j],
- ProjectedDCFCLKDeepSleep[i][j],
- PixelClock[k],
- HTotal[k],
- VTotal[k] - VActive[k],
- DynamicMetadataTransmittedBytes[k],
- DynamicMetadataLinesBeforeActiveRequired[k],
- Interlace[k],
- ProgressiveToInterlaceUnitInOPP,
+ v->MaxInterDCNTileRepeaters,
+ v->RequiredDPPCLK[i][j][k],
+ v->RequiredDISPCLK[i][j],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->PixelClock[k],
+ v->HTotal[k],
+ v->VTotal[k] - v->VActive[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
&TsetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe);
- AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TsetupPipe
+ AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TsetupPipe
- TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
- MinimumTvmPlus2Tr0 = UrgLatency[i] * (GPUVMEnable == true ? (HostVMEnable == true ?
- (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : 0);
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ MinimumTvmPlus2Tr0 = v->UrgLatency[i] * (v->GPUVMEnable == true ? (v->HostVMEnable == true ?
+ (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) : 0);
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw = { 0 };
- MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
+ MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
- DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(DCFCLKRequiredForPeakBandwidth, 2 * ExtraLatencyCycles
/ (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
- DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
+ v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
* dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 7ef66e511ec8..422f17aefd4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -26,6 +26,7 @@
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
@@ -321,6 +322,7 @@ static void calculate_wm_set_for_vlevel(int vlevel,
void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
@@ -328,8 +330,7 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dc_assert_fp_enabled();
- memcpy(&dcn3_01_soc._clock_tmp, &dcn3_01_soc.clock_limits,
- sizeof(dcn3_01_soc.clock_limits));
+ memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits));
/* Default clock levels are used for diags, which may lead to overclocking. */
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -347,35 +348,42 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
}
- dcn3_01_soc._clock_tmp[i].state = i;
- dcn3_01_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn3_01_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_01_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_01_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn3_01_soc._clock_tmp[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_01_soc._clock_tmp[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_01_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_01_soc._clock_tmp[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_01_soc._clock_tmp[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_01_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_01_soc._clock_tmp[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].state = i;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_01_soc.num_states = clk_table->num_entries;
/* duplicate last level */
- dcn3_01_soc._clock_tmp[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- dcn3_01_soc._clock_tmp[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+ s[dcn3_01_soc.num_states] =
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
}
}
- memcpy(&dcn3_01_soc.clock_limits, &dcn3_01_soc._clock_tmp,
- sizeof(dcn3_01_soc.clock_limits));
+ memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits));
dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ if ((int)(dcn3_01_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_01_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index e36cfa5985ea..b6e99eefe869 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -25,6 +25,9 @@
#include "resource.h"
#include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@@ -288,6 +291,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -355,7 +359,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -594,14 +598,14 @@ void dcn31_calculate_wm_and_dlg_fp(
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int j;
dc_assert_fp_enabled();
- memcpy(&dcn3_1_soc._clock_tmp, &dcn3_1_soc.clock_limits,
- sizeof(dcn3_1_soc.clock_limits));
+ memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -630,38 +634,46 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
}
- dcn3_1_soc._clock_tmp[i].state = i;
+ s[i].state = i;
/* Clocks dependent on voltage level. */
- dcn3_1_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn3_1_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_1_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_1_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
- dcn3_1_soc._clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_1_soc._clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_1_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_1_soc._clock_tmp[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_1_soc._clock_tmp[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_1_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_1_soc._clock_tmp[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_1_soc.num_states = clk_table->num_entries;
}
}
- memcpy(&dcn3_1_soc.clock_limits, &dcn3_1_soc._clock_tmp,
- sizeof(dcn3_1_soc.clock_limits));
+ memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits));
dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ if ((int)(dcn3_1_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
else
@@ -677,7 +689,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ if (bw_params->num_channels > 0)
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
ASSERT(clk_table->num_entries);
@@ -716,6 +732,12 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
*/
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ if ((int)(dcn3_15_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
else
@@ -724,6 +746,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
@@ -731,8 +754,7 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dc_assert_fp_enabled();
- memcpy(&dcn3_16_soc._clock_tmp, &dcn3_16_soc.clock_limits,
- sizeof(dcn3_16_soc.clock_limits));
+ memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -754,7 +776,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
@@ -765,44 +788,53 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
closest_clk_lvl = dcn3_16_soc.num_states - 1;
}
- dcn3_16_soc._clock_tmp[i].state = i;
+ s[i].state = i;
/* Clocks dependent on voltage level. */
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
if (clk_table->num_entries == 1 &&
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ s[i].dcfclk_mhz <
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
/*SMU fix not released yet*/
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ s[i].dcfclk_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
- dcn3_16_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_16_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_16_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
- dcn3_16_soc._clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_16_soc._clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_16_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_16_soc._clock_tmp[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_16_soc._clock_tmp[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_16_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_16_soc._clock_tmp[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_16_soc.num_states = clk_table->num_entries;
}
}
- memcpy(&dcn3_16_soc.clock_limits, &dcn3_16_soc._clock_tmp,
- sizeof(dcn3_16_soc.clock_limits));
+ memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits));
if (max_dispclk_mhz) {
dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
+ if ((int)(dcn3_16_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 3fab19134480..8dfe639b6508 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -26,7 +26,7 @@
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
@@ -251,33 +251,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1107,10 +1051,10 @@ static bool CalculatePrefetchSchedule(
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
/*rev 99*/
prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane);
- max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+ max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
+ prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
@@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -6933,8 +6711,6 @@ static void CalculateSwathWidth(
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
@@ -6945,6 +6721,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
@@ -6956,6 +6734,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 66b82e4f05c6..35d10b4d018b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -27,7 +27,7 @@
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644
index 000000000000..cf420ad2b8dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dcn3_14_soc.clock_limits;
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+ dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
+ if (bw_params->num_channels > 0)
+ dcn3_14_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(dcn3_14_soc.num_chans);
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_14_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_14_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_14_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if ((int)(dcn3_14_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_14_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
+ else
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ dc_assert_fp_enabled();
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+ && pipe->stream->adjust.v_total_min > timing->v_total)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644
index 000000000000..d32c5bb99f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index fc4d7474c111..0d12fd079cd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -61,7 +61,7 @@
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
-struct {
+typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
@@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1362,7 +1306,7 @@ static bool CalculatePrefetchSchedule(
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
- ((GPUVMEnable || myPipe->DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) : 0.0); // TODO: Did someone else add this??
#else
- LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#endif
#ifdef __DML_VBA_DEBUG__
@@ -1599,7 +1543,7 @@ static void CalculateDCCConfiguration(
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
- enum {
+ typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
@@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -4071,9 +3941,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
- || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
- || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+ if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
v->SourceFormatPixelAndScanSupport = false;
}
}
@@ -5414,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5498,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5681,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5758,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5864,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5879,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5891,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -7049,8 +6825,6 @@ static void CalculateSwathWidth(
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
@@ -7061,6 +6835,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
@@ -7072,6 +6848,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
@@ -7157,12 +6935,13 @@ static double CalculateExtraLatencyBytes(
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
- else
+ } else {
HostVMDynamicLevels = 0;
+ }
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
- if (GPUVMEnable == true)
+ if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k)
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
@@ -7406,7 +7185,7 @@ static unsigned int CalculateMaxVStartup(
double line_time_us = HTotal / PixelClock;
unsigned int vblank_actual = VTotal - VActive;
unsigned int vblank_nom_default_in_line = dml_floor(VBlankNomDefaultUS / line_time_us, 1.0);
- unsigned int vblank_nom_input = dml_min(VBlankNom, vblank_nom_default_in_line);
+ unsigned int vblank_nom_input = VBlankNom; //dml_min(VBlankNom, vblank_nom_default_in_line);
unsigned int vblank_avail = vblank_nom_input == 0 ? vblank_nom_default_in_line : vblank_nom_input;
vblank_size = (unsigned int) dml_min(vblank_actual, vblank_avail);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 66453546e24f..0571700f53f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
},
},
.num_states = 1,
- .sr_exit_time_us = 20.16,
- .sr_enter_plus_exit_time_us = 27.13,
+ .sr_exit_time_us = 42.97,
+ .sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
@@ -244,6 +244,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
}
/**
+ * Finds dummy_latency_index when MCLK switching using firmware based
+ * vblank stretch is enabled. This function will iterate through the
+ * table of dummy pstate latencies until the lowest value that allows
+ * dm_allow_self_refresh_and_mclk_switch to happen is found
+ */
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ const int max_latency_table_entries = 4;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ int dummy_latency_index = 0;
+
+ dc_assert_fp_enabled();
+
+ while (dummy_latency_index < max_latency_table_entries) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+
+ if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
+ break;
+
+ dummy_latency_index++;
+ }
+
+ if (dummy_latency_index == max_latency_table_entries) {
+ ASSERT(dummy_latency_index != max_latency_table_entries);
+ /* If the execution gets here, it means dummy p_states are
+ * not possible. This should never happen and would mean
+ * something is severely wrong.
+ * Here we reset dummy_latency_index to 3, because it is
+ * better to have underflows than system crashes.
+ */
+ dummy_latency_index = max_latency_table_entries - 1;
+ }
+
+ return dummy_latency_index;
+}
+
+/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
*
@@ -286,41 +330,92 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
}
-bool dcn32_predict_pipe_split(struct dc_state *context, display_pipe_params_st pipe, int index)
+/**
+ * *******************************************************************************************
+ * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ *
+ * This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
+ * ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
+ * determined by DPPClk requirements
+ *
+ * This function follows the same policy as DML:
+ * - Check for ODM combine requirements / policy first
+ * - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
+ * MPC is required
+ *
+ * @param [in]: context: New DC state to be programmed
+ * @param [in]: pipe_e2e: DML pipe end to end context
+ *
+ * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
+ *
+ * *******************************************************************************************
+ */
+uint8_t dcn32_predict_pipe_split(struct dc_state *context,
+ display_e2e_pipe_params_st *pipe_e2e)
{
double pscl_throughput;
double pscl_throughput_chroma;
double dpp_clk_single_dpp, clock;
double clk_frequency = 0.0;
double vco_speed = context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz;
+ bool total_available_pipes_support = false;
+ uint32_t number_of_dpp = 0;
+ enum odm_combine_mode odm_mode = dm_odm_combine_mode_disabled;
+ double req_dispclk_per_surface = 0;
+ uint8_t num_splits = 0;
dc_assert_fp_enabled();
- dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe.scale_ratio_depth.hscl_ratio,
- pipe.scale_ratio_depth.hscl_ratio_c,
- pipe.scale_ratio_depth.vscl_ratio,
- pipe.scale_ratio_depth.vscl_ratio_c,
- context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
- context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
- pipe.dest.pixel_rate_mhz,
- pipe.src.source_format,
- pipe.scale_taps.htaps,
- pipe.scale_taps.htaps_c,
- pipe.scale_taps.vtaps,
- pipe.scale_taps.vtaps_c,
- /* Output */
- &pscl_throughput, &pscl_throughput_chroma,
- &dpp_clk_single_dpp);
+ dml32_CalculateODMMode(context->bw_ctx.dml.ip.maximum_pixels_per_line_per_dsc_unit,
+ pipe_e2e->pipe.dest.hactive,
+ pipe_e2e->dout.output_format,
+ pipe_e2e->dout.output_type,
+ pipe_e2e->pipe.dest.odm_combine_policy,
+ context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
+ context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
+ pipe_e2e->dout.dsc_enable != 0,
+ 0, /* TotalNumberOfActiveDPP can be 0 since we're predicting pipe split requirement */
+ context->bw_ctx.dml.ip.max_num_dpp,
+ pipe_e2e->pipe.dest.pixel_rate_mhz,
+ context->bw_ctx.dml.soc.dcn_downspread_percent,
+ context->bw_ctx.dml.ip.dispclk_ramp_margin_percent,
+ context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz,
+ pipe_e2e->dout.dsc_slices,
+ /* Output */
+ &total_available_pipes_support,
+ &number_of_dpp,
+ &odm_mode,
+ &req_dispclk_per_surface);
+
+ dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe_e2e->pipe.scale_ratio_depth.hscl_ratio,
+ pipe_e2e->pipe.scale_ratio_depth.hscl_ratio_c,
+ pipe_e2e->pipe.scale_ratio_depth.vscl_ratio,
+ pipe_e2e->pipe.scale_ratio_depth.vscl_ratio_c,
+ context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
+ context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
+ pipe_e2e->pipe.dest.pixel_rate_mhz,
+ pipe_e2e->pipe.src.source_format,
+ pipe_e2e->pipe.scale_taps.htaps,
+ pipe_e2e->pipe.scale_taps.htaps_c,
+ pipe_e2e->pipe.scale_taps.vtaps,
+ pipe_e2e->pipe.scale_taps.vtaps_c,
+ /* Output */
+ &pscl_throughput, &pscl_throughput_chroma,
+ &dpp_clk_single_dpp);
clock = dpp_clk_single_dpp * (1 + context->bw_ctx.dml.soc.dcn_downspread_percent / 100);
if (clock > 0)
- clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0));
+ clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0) / clock);
- if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[index].dppclk_mhz)
- return true;
- else
- return false;
+ if (odm_mode == dm_odm_combine_mode_2to1)
+ num_splits = 1;
+ else if (odm_mode == dm_odm_combine_mode_4to1)
+ num_splits = 3;
+ else if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dppclk_mhz)
+ num_splits = 1;
+
+ return num_splits;
}
static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
@@ -473,8 +568,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
+ /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+ * to 2 swaths (i.e. 16 lines)
+ */
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
- pstate_width_fw_delay_lines;
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +588,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
+ phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
@@ -556,6 +655,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
bool valid_assignment_found = false;
unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
bool current_assignment_freesync = false;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -569,8 +669,16 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ /* SubVP pipe candidate requirements:
+ * - Refresh rate < 120hz
+ * - Not able to switch in vactive naturally (switching in active means the
+ * DET provides enough buffer to hide the P-State switch latency -- trying
+ * to combine this with SubVP can cause issues with the scheduling).
+ * - Not TMZ surface
+ */
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120) {
+ pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
+ vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
@@ -983,13 +1091,21 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
+ * Override present for testing.
*/
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ if (dc->debug.dml_disallow_alternate_prefetch_modes)
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
+ else
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
- if (*vlevel < context->bw_ctx.dml.soc.num_states)
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
+ }
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
@@ -1004,6 +1120,17 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
+ /* to re-initialize viewport after the pipe merge */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state || !pipe_ctx->stream)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1014,7 +1141,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
- if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+ if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+ dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_stutter;
/* There are params (such as FabricClock) that need to be recalculated
@@ -1070,17 +1199,31 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->remove_phantom_pipes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
- } else {
- // only call dcn20_validate_apply_pipe_split_flags if we found a supported config
- memset(split, 0, MAX_PIPES * sizeof(int));
- memset(merge, 0, MAX_PIPES * sizeof(bool));
- *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* This may adjust vlevel and maxMpcComb */
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
+ }
+ } else {
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
DC_FP_START();
dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
DC_FP_END();
+ /* Call validate_apply_pipe_split flags after calling DML getters for
+ * phantom dlg params, or some of the VBA params indicating pipe split
+ * can be overwritten by the getters.
+ *
+ * When setting up SubVP config, all pipes are merged before attempting to
+ * add phantom pipes. If pipe split (ODM / MPC) is required, both the main
+ * and phantom pipes will be split in the regular pipe splitting sequence.
+ */
+ memset(split, 0, MAX_PIPES * sizeof(int));
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+ *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
// until driver has acquired the DMCUB lock to do it safely.
}
@@ -1344,7 +1487,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1373,17 +1517,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
DC_FP_END();
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
*
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
- * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_fclk_and_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1398,6 +1547,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(split, 0, sizeof(split));
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
+ vba->VoltageLevel = vlevel;
}
}
@@ -1440,6 +1591,28 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+ /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
+ if (pipe->bottom_pipe) {
+ if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
+ /*MPC split rules will handle this case*/
+ pipe->bottom_pipe->top_pipe = NULL;
+ } else {
+ if (pipe->prev_odm_pipe->bottom_pipe) {
+ /* 3 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
+ pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
+ } else {
+ /* 2 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
+ pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ if (pipe->top_pipe) {
+ pipe->top_pipe->bottom_pipe = NULL;
+ }
+
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
@@ -1572,8 +1745,20 @@ bool dcn32_internal_validate_bw(struct dc *dc,
goto validate_fail;
}
- if (repopulate_pipes)
+ if (repopulate_pipes) {
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+
+ /* repopulate_pipes = 1 means the pipes were either split or merged. In this case
+ * we have to re-calculate the DET allocation and run through DML once more to
+ * ensure all the params are calculated correctly. We do not need to run the
+ * pipe split check again after this call (pipes are already split / merged).
+ * */
+ if (!fast_validate) {
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ }
+ }
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -1619,7 +1804,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+ dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
@@ -1758,7 +1943,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ /* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state.
+ * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM
+ * value.
+ */
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
@@ -1855,6 +2044,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
+{
+ int i;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
+ max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
+ max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
+
+ /* Scan through clock values we currently have and if they are 0,
+ * then populate it with dcn3_2_soc.clock_limits[] value.
+ *
+ * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
+ * 0, will cause it to skip building the clock table.
+ */
+ if (max_dcfclk_mhz == 0)
+ bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ if (max_dispclk_mhz == 0)
+ bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
+ if (max_dtbclk_mhz == 0)
+ bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
+ if (max_uclk_mhz == 0)
+ bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
+}
+
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
@@ -2089,6 +2317,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
@@ -2098,6 +2327,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
@@ -2111,13 +2347,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_2_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_2_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_2_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_2_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 3ed06ab855be..3a3dc2ce4c73 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -29,11 +29,6 @@
#include "clk_mgr_internal.h"
-#define DCN3_2_DEFAULT_DET_SIZE 256
-#define DCN3_2_MAX_DET_SIZE 1152
-#define DCN3_2_MIN_DET_SIZE 128
-#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
-
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr);
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
@@ -41,9 +36,8 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn32_predict_pipe_split(struct dc_state *context,
- display_pipe_params_st pipe,
- int index);
+uint8_t dcn32_predict_pipe_split(struct dc_state *context,
+ display_e2e_pipe_params_st *pipe_e2e);
void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
@@ -71,4 +65,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 890612db08dc..75be1e1ce543 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
@@ -679,9 +677,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
dml_ceil((double) v->WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1));
- // Clamp to max OTG vstartup register limit
- if (v->MaxVStartupLines[k] > 1023)
- v->MaxVStartupLines[k] = 1023;
+ // Clamp to max OTG vstartup register limit
+ if (v->MaxVStartupLines[k] > 1023)
+ v->MaxVStartupLines[k] = 1023;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
@@ -758,31 +756,17 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
+ v,
+ k,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
- &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
- mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
+ &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+ v->DSCDelay[k],
+ (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
- mode_lib->vba.TCalc,
+ v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
@@ -796,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.SwathHeightC[k],
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
TWait,
/* Output */
&v->DSTXAfterScaler[k],
@@ -1167,59 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLK,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- v->dpte_group_bytes,
- v->meta_row_height,
- v->meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+ v->DCFCLK,
+ v->ReturnBW,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLK,
+ v->SOCCLK,
v->DCFCLKDeepSleep,
- mode_lib->vba.DETBufferSizeY,
- mode_lib->vba.DETBufferSizeC,
- mode_lib->vba.SwathHeightY,
- mode_lib->vba.SwathHeightC,
- mode_lib->vba.LBBitPerPixel,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.DPPPerPlane,
+ v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
/* Output */
- &v->Watermark,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
v->MaxActiveDRAMClockChangeLatencySupported,
v->SubViewportLinesNeededInMALL,
@@ -1811,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.Read256BlockHeightC[k],
&mode_lib->vba.Read256BlockWidthY[k],
&mode_lib->vba.Read256BlockWidthC[k],
- &mode_lib->vba.MicroTileHeightY[k],
- &mode_lib->vba.MicroTileHeightC[k],
- &mode_lib->vba.MicroTileWidthY[k],
- &mode_lib->vba.MicroTileWidthC[k]);
+ &mode_lib->vba.MacroTileHeightY[k],
+ &mode_lib->vba.MacroTileHeightC[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MacroTileWidthC[k]);
}
/*Bandwidth Support Check*/
@@ -1952,7 +1905,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2040,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2051,6 +2004,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
+ mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportNoDSC,
@@ -2061,6 +2015,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2072,6 +2027,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
+ mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportDSC,
@@ -2549,7 +2505,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2666,10 +2621,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
- mode_lib->vba.MicroTileWidthY,
- mode_lib->vba.MicroTileWidthC,
- mode_lib->vba.MicroTileHeightY,
- mode_lib->vba.MicroTileHeightC,
+ mode_lib->vba.MacroTileWidthY,
+ mode_lib->vba.MacroTileWidthC,
+ mode_lib->vba.MacroTileHeightY,
+ mode_lib->vba.MacroTileHeightC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
@@ -2716,10 +2671,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -2749,7 +2704,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,64 +3220,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
+ v,
+ k,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
- mode_lib->vba.DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal +
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- mode_lib->vba.SwathWidthYThisState[k] /
- mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup,
- mode_lib->vba.MaximumVStartup[i][j][k]),
- mode_lib->vba.MaximumVStartup[i][j][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.UrgLatency[i],
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
- mode_lib->vba.MetaRowBytes[i][j][k],
- mode_lib->vba.DPTEBytesPerRow[i][j][k],
- mode_lib->vba.PrefetchLinesY[i][j][k],
- mode_lib->vba.SwathWidthYThisState[k],
- mode_lib->vba.PrefillY[k],
- mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[i][j][k],
- mode_lib->vba.SwathWidthCThisState[k],
- mode_lib->vba.PrefillC[k],
- mode_lib->vba.MaxNumSwC[k],
- mode_lib->vba.swath_width_luma_ub_this_state[k],
- mode_lib->vba.swath_width_chroma_ub_this_state[k],
- mode_lib->vba.SwathHeightYThisState[k],
- mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+ v->DSCDelayPerState[i][k],
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
- &mode_lib->vba.LineTimesForPrefetch[k],
- &mode_lib->vba.PrefetchBW[k],
- &mode_lib->vba.LinesForMetaPTE[k],
- &mode_lib->vba.LinesForMetaAndDPTERow[k],
- &mode_lib->vba.VRatioPreY[i][j][k],
- &mode_lib->vba.VRatioPreC[i][j][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
- &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.prefetch_vmrow_bw[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+ &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup
@@ -3566,66 +3503,35 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[i][j],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLKState[i][j],
- mode_lib->vba.ReturnBWPerState[i][j],
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- mode_lib->vba.dpte_group_bytes,
- mode_lib->vba.meta_row_height,
- mode_lib->vba.meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[i][j],
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLKPerState[i],
- mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
- mode_lib->vba.DETBufferSizeYThisState,
- mode_lib->vba.DETBufferSizeCThisState,
- mode_lib->vba.SwathHeightYThisState,
- mode_lib->vba.SwathHeightCThisState,
- mode_lib->vba.LBBitPerPixel,
- mode_lib->vba.SwathWidthYThisState, // 24
- mode_lib->vba.SwathWidthCThisState,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.NoOfDPPThisState,
- mode_lib->vba.BytePerPixelInDETY,
- mode_lib->vba.BytePerPixelInDETC,
+ v->SOCCLKPerState[i],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->SwathWidthYThisState, // 24
+ v->SwathWidthCThisState,
+ v->NoOfDPPThisState,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
- mode_lib->vba.UnboundedRequestEnabledThisState,
- mode_lib->vba.CompressedBufferSizeInkByteThisState,
+ v->UnboundedRequestEnabledThisState,
+ v->CompressedBufferSizeInkByteThisState,
/* Output */
- &mode_lib->vba.Watermark, // Store the values in vba
- &mode_lib->vba.DRAMClockChangeSupport[i][j],
+ &v->DRAMClockChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
- &mode_lib->vba.FCLKChangeSupport[i][j],
+ &v->FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMarginPerState[i][j]);
}
}
} // End of Prefetch Check
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 07f8f3b8626b..ad66e241f9ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -27,6 +27,8 @@
#include "display_mode_vba_32.h"
#include "../display_mode_lib.h"
+#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
+
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
double BPP,
@@ -391,7 +393,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +457,18 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpSwathSizeBytesY;
+ unsigned int RoundedUpSwathSizeBytesC;
+ double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
-
- st_vars->TotalActiveDPP = 0;
- st_vars->NoChromaSurfaces = true;
+ unsigned int TotalActiveDPP = 0;
+ bool NoChromaSurfaces = true;
+ unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +503,43 @@ void dml32_CalculateSwathAndDETConfiguration(
DPPPerSurface,
/* Output */
- st_vars->SwathWidthdoubleDPP,
- st_vars->SwathWidthdoubleDPPChroma,
+ SwathWidthdoubleDPP,
+ SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
- st_vars->MaximumSwathHeightY,
- st_vars->MaximumSwathHeightC,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+ RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+ RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+ RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+ RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+ RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+ TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
- st_vars->NoChromaSurfaces = false;
+ NoChromaSurfaces = false;
}
}
@@ -540,10 +549,10 @@ void dml32_CalculateSwathAndDETConfiguration(
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
- *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+ *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
- *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+ *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
@@ -551,7 +560,7 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
- *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+ *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
@@ -566,8 +575,8 @@ void dml32_CalculateSwathAndDETConfiguration(
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
- st_vars->RoundedUpMaxSwathSizeBytesY,
- st_vars->RoundedUpMaxSwathSizeBytesC,
+ RoundedUpMaxSwathSizeBytesY,
+ RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
@@ -575,7 +584,7 @@ void dml32_CalculateSwathAndDETConfiguration(
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+ dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +595,42 @@ void dml32_CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+ DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
- st_vars->DETBufferSizeInKByteForSwathCalculation);
+ DETBufferSizeInKByteForSwathCalculation);
#endif
- if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
- if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+ if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
@@ -636,7 +645,7 @@ void dml32_CalculateSwathAndDETConfiguration(
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
- } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
@@ -654,11 +663,11 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+ k, RoundedUpMaxSwathSizeBytesC[k]);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -712,8 +721,8 @@ void dml32_CalculateSwathWidth(
unsigned int surface_width_ub_l;
unsigned int surface_height_ub_l;
- unsigned int surface_width_ub_c;
- unsigned int surface_height_ub_c;
+ unsigned int surface_width_ub_c = 0;
+ unsigned int surface_height_ub_c = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -777,21 +786,6 @@ void dml32_CalculateSwathWidth(
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
- dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
- dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
- dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
- dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
- dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
-#endif
if (!IsVertical(SourceRotation[k])) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
@@ -811,6 +805,7 @@ void dml32_CalculateSwathWidth(
Read256BytesBlockWidthY[k]);
}
if (BytePerPixC[k] > 0) {
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c,
dml_floor(ViewportXStartC[k] + SwathWidthC[k] +
@@ -841,6 +836,7 @@ void dml32_CalculateSwathWidth(
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
}
if (BytePerPixC[k] > 0) {
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c,
dml_floor(ViewportYStartC[k] + SwathWidthC[k] +
@@ -859,6 +855,16 @@ void dml32_CalculateSwathWidth(
}
#ifdef __DML_VBA_DEBUG__
+ dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
+ dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
+ dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
+ dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
+ dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
+ dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
+ dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%d swath_width_luma_ub=%0d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%d swath_width_chroma_ub=%0d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%d MaximumSwathHeightY=%0d\n", __func__, k, MaximumSwathHeightY[k]);
@@ -1175,6 +1181,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -1186,6 +1193,7 @@ void dml32_CalculateODMMode(
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
+ unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
@@ -1221,7 +1229,8 @@ void dml32_CalculateODMMode(
if (!(Output == dm_hdmi || Output == dm_dp || Output == dm_edp) && (ODMUse == dm_odm_combine_policy_4to1 ||
((SurfaceRequiredDISPCLKWithODMCombineTwoToOne > StateDispclk ||
- (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit))
+ || NumberOfDSCSlices > 8)))) {
if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_4to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
@@ -1232,7 +1241,8 @@ void dml32_CalculateODMMode(
} else if (Output != dm_hdmi && (ODMUse == dm_odm_combine_policy_2to1 ||
(((SurfaceRequiredDISPCLKWithoutODMCombine > StateDispclk &&
SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= StateDispclk) ||
- (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit))
+ || (NumberOfDSCSlices <= 8 && NumberOfDSCSlices > 4))))) {
if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_2to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
@@ -1246,6 +1256,29 @@ void dml32_CalculateODMMode(
else
*TotalAvailablePipesSupport = false;
}
+ if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
+ ODMUse != dm_odm_combine_policy_4to1) {
+ if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
+ *ODMMode == dm_odm_combine_mode_4to1) {
+ *ODMMode = dm_odm_combine_mode_4to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
+ *NumberOfDPP = 4;
+ } else {
+ *ODMMode = dm_odm_combine_mode_2to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
+ *NumberOfDPP = 2;
+ }
+ }
+ if (Output == dm_hdmi && OutFormat == dm_420 &&
+ HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ }
}
double dml32_CalculateRequiredDispclk(
@@ -1863,11 +1896,10 @@ void dml32_CalculateSurfaceSizeInMall(
if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
}
- *ExceededMALLSize = (TotalSurfaceSizeInMALL <= MALLAllocatedForDCN * 1024 * 1024 ? false : true);
+ *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024);
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1965,21 @@ void dml32_CalculateVMRowAndSwath(
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
+ unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+ unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+ bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
@@ -1954,15 +2001,15 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
- st_vars->PTEBufferSizeInRequestsForLuma[k] =
+ PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
- st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -1982,21 +2029,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForChroma[k],
+ PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
- &st_vars->MetaRowByteC[k],
- &st_vars->PixelPTEBytesPerRowC[k],
+ &MetaRowByteC[k],
+ &PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
- &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
- &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowC_one_row_per_frame[k],
+ &dpte_row_width_chroma_ub_one_row_per_frame[k],
+ &dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
@@ -2024,19 +2071,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
- st_vars->PixelPTEBytesPerRowC[k] = 0;
- st_vars->PDEAndMetaPTEBytesFrameC = 0;
- st_vars->MetaRowByteC[k] = 0;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForChroma[k] = 0;
+ PixelPTEBytesPerRowC[k] = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
- st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+ dpte_row_height_chroma_one_row_per_frame[k] = 0;
+ dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+ PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
- st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -2056,21 +2103,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForLuma[k],
+ PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
- &st_vars->MetaRowByteY[k],
- &st_vars->PixelPTEBytesPerRowY[k],
+ &MetaRowByteY[k],
+ &PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
- &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
- &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowY_one_row_per_frame[k],
+ &dpte_row_width_luma_ub_one_row_per_frame[k],
+ &dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
@@ -2098,19 +2145,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillY[k],
&MaxNumSwathY[k]);
- PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
- MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+ PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+ MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
- if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+ if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
- st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
- st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+ one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+ PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2165,7 @@ void dml32_CalculateVMRowAndSwath(
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
- st_vars->one_row_per_frame_fits_in_buffer,
+ one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
@@ -2144,13 +2191,13 @@ void dml32_CalculateVMRowAndSwath(
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
- dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
- dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
- dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
- dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
- PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+ dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+ dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+ dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+ dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+ PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2205,7 @@ void dml32_CalculateVMRowAndSwath(
else
DCCMetaBufferSizeNotExceeded[k] = false;
- PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+ PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
@@ -2169,11 +2216,11 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
- st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+ MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
- st_vars->PixelPTEBytesPerRowY[k],
- st_vars->PixelPTEBytesPerRowC[k],
+ PixelPTEBytesPerRowY[k],
+ PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
@@ -2189,12 +2236,12 @@ void dml32_CalculateVMRowAndSwath(
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,29 +3389,14 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -3405,72 +3437,100 @@ bool dml32_CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
-
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
- st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
- st_vars->Tsw_est1 = 0;
- st_vars->Tsw_est3 = 0;
-
- if (GPUVMEnable == true && HostVMEnable == true)
- st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler;
+ double LineTime;
+ double dst_y_prefetch_equ;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double trip_to_mem;
+ double Tvm_trips;
+ double Tr0_trips;
+ double Tvm_trips_rounded;
+ double Tr0_trips_rounded;
+ double Lsw_oto;
+ double Tpre_rounded;
+ double prefetch_bw_equ;
+ double Tvm_equ;
+ double Tr0_equ;
+ double Tdmbf;
+ double Tdmec;
+ double Tdmsks;
+ double prefetch_sw_bytes;
+ double bytes_pp;
+ double dep_bytes;
+ unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ double min_Lsw;
+ double Tsw_est1 = 0;
+ double Tsw_est3 = 0;
+
+ if (v->GPUVMEnable == true && v->HostVMEnable == true)
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
else
- st_vars->HostVMDynamicLevelsTrips = 0;
+ HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
- dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+ dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+ dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
- dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
- __func__, HostVMEnable, HostVMInefficiencyFactor);
+ dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+ __func__, v->HostVMEnable, HostVMInefficiencyFactor);
#endif
dml32_CalculateVUpdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->Dppclk,
myPipe->Dispclk,
myPipe->DCFClkDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
/* output */
- &st_vars->Tdmbf,
- &st_vars->Tdmec,
- &st_vars->Tdmsks,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
- st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
- st_vars->trip_to_mem = UrgentLatency;
- st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true)
- *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+ if (v->DynamicMetadataVMEnabled == true)
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
#ifdef __DML_VBA_ALLOW_DELTA__
- if (DynamicMetadataEnable == false)
+ if (v->DynamicMetadataEnable[k] == false)
*Tdmdl = 0.0;
#endif
- if (DynamicMetadataEnable == true) {
- if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+ if (v->DynamicMetadataEnable[k] == true) {
+ if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
- __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
- __func__, st_vars->Tdmsks);
+ __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
@@ -3481,22 +3541,22 @@ bool dml32_CalculatePrefetchSchedule(
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+ *Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+ v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
- *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3566,10 @@ bool dml32_CalculatePrefetchSchedule(
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+ dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
- dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+ dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
@@ -3517,14 +3577,14 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
#endif
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
- st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3592,132 @@ bool dml32_CalculatePrefetchSchedule(
MyError = false;
- st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
-
- if (GPUVMEnable == true) {
- st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
- } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
- 4.0 * st_vars->LineTime; // VBA_ERROR
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
+
+ if (v->GPUVMEnable == true) {
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
+ if (v->GPUVMMaxPageTableLevels >= 3) {
+ *Tno_bw = UrgentExtraLatency + trip_to_mem *
+ (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+ } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
+ Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+ 4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
- st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
- st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+ Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+ Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
- st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
- st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
-
- st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
- st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
- st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
-
- if (GPUVMEnable == true) {
- st_vars->Tvm_oto = dml_max3(
- st_vars->Tvm_trips,
- *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
- st_vars->LineTime / 4.0);
+ prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+ prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
+
+ min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+ min_Lsw = dml_max(min_Lsw, 1.0);
+ Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
+
+ if (v->GPUVMEnable == true) {
+ Tvm_oto = dml_max3(
+ Tvm_trips,
+ *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ LineTime / 4.0);
} else
- st_vars->Tvm_oto = st_vars->LineTime / 4.0;
-
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_oto = dml_max4(
- st_vars->Tr0_trips,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
- (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
- st_vars->LineTime / 4.0);
+ Tvm_oto = LineTime / 4.0;
+
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ Tr0_oto = dml_max4(
+ Tr0_trips,
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ (LineTime - Tvm_oto)/2.0,
+ LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
- dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+ dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
- st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
- st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
- st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+ dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
- dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+ dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
- dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+ dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
- dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
- dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+ dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+ dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
- dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
- dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
- dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
- dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
- dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
- dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
- dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+ dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+ dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+ dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+ dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+ dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+ dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+ dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+ dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
- st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
- dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+ dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
- __func__, VStartup * st_vars->LineTime);
+ __func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
- dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
- st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+ dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
- if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
- st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+ if (prefetch_sw_bytes < dep_bytes)
+ prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3725,61 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (st_vars->dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
- if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+ if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
- st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+ + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+ Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
- && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+ if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+ && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
- if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
- PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
- (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+ if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
- st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+ + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+ Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
- (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
- st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+ (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+ LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
- PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
- (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+ PrefetchBandwidth4 = prefetch_sw_bytes /
+ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+ dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
- dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
- dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
- dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+ dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+ dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+ dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3792,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
@@ -3745,9 +3805,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
@@ -3758,9 +3818,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
- st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+ Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
- st_vars->Tr0_trips_rounded) {
+ Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
@@ -3770,80 +3830,80 @@ bool dml32_CalculatePrefetchSchedule(
}
if (Case1OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+ prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+ prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+ prefetch_bw_equ = PrefetchBandwidth3;
else
- st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+ prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
- dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+ dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
- if (st_vars->prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
- st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
- HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
- st_vars->Tvm_trips, st_vars->LineTime / 4);
+ if (prefetch_bw_equ > 0) {
+ if (v->GPUVMEnable == true) {
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor / prefetch_bw_equ,
+ Tvm_trips, LineTime / 4);
} else {
- st_vars->Tvm_equ = st_vars->LineTime / 4;
+ Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
- HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
- (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+ (LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
- st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
- st_vars->Tvm_equ = 0;
- st_vars->Tr0_equ = 0;
+ Tvm_equ = 0;
+ Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
- if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
- *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
} else {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
- *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
}
- *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
- dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+ dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
- dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+ dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
- if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
- *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+ if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3911,12 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
- st_vars->LinesToRequestPrefetchPixelData,
+ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
@@ -3870,7 +3930,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
}
- *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3939,11 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
@@ -3898,25 +3958,25 @@ bool dml32_CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
- / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
- / st_vars->LineTime;
+ / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+ / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
- st_vars->LinesToRequestPrefetchPixelData
+ LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
- * swath_width_chroma_ub / st_vars->LineTime;
+ * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
- __func__, st_vars->LinesToRequestPrefetchPixelData);
+ __func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
@@ -3925,15 +3985,15 @@ bool dml32_CalculatePrefetchSchedule(
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
- (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
- 2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
- dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+ (double)LinesToRequestPrefetchPixelData * LineTime +
+ 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+ (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
- st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
- ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+ TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+ ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
@@ -3941,7 +4001,7 @@ bool dml32_CalculatePrefetchSchedule(
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
- __func__, st_vars->dst_y_prefetch_equ);
+ __func__, dst_y_prefetch_equ);
#endif
}
@@ -3957,10 +4017,10 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
- (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -3977,7 +4037,7 @@ bool dml32_CalculatePrefetchSchedule(
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
- (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4060,12 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
+ LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,59 +4219,28 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
@@ -4221,229 +4250,251 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
-
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
- st_vars->DRAMClockChangeSupportNumber = 0;
- st_vars->DRAMClockChangeMethod = 0;
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
- st_vars->MinActiveFCLKChangeMargin = 0.;
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
- st_vars->TotalPixelBW = 0.0;
- st_vars->TotalActiveWriteback = 0;
-
- Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
- Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+ unsigned int DRAMClockChangeSupportNumber = 0;
+ unsigned int LastSurfaceWithoutMargin;
+ unsigned int DRAMClockChangeMethod = 0;
+ bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+ double MinActiveFCLKChangeMargin = 0.;
+ double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+ double ActiveClockChangeLatencyHidingY;
+ double ActiveClockChangeLatencyHidingC;
+ double ActiveClockChangeLatencyHiding;
+ double EffectiveDETBufferSizeY;
+ double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+ double TotalPixelBW = 0.0;
+ bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeY;
+ double FullDETBufferingTimeC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double WritebackFCLKChangeLatencyMargin;
+ double WritebackLatencyHiding;
+ bool SameTimingForFCLKChange;
+
+ unsigned int TotalActiveWriteback = 0;
+ unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+ unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
+
+ v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+ v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
- Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
- Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
- Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
- dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
- dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
- dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
- dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
- dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+ dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+ dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+ dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+ dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
- __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+ __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
#endif
- st_vars->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (WritebackEnable[k] == true)
- st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+ TotalActiveWriteback = 0;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->WritebackEnable[k] == true)
+ TotalActiveWriteback = TotalActiveWriteback + 1;
}
- if (st_vars->TotalActiveWriteback <= 1) {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+ if (TotalActiveWriteback <= 1) {
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
- + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+ + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (st_vars->TotalActiveWriteback <= 1) {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ if (TotalActiveWriteback <= 1) {
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
- __func__, Watermark->WritebackDRAMClockChangeWatermark);
- dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
- dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
- dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+ __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+ dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+ dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+ dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
#endif
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
- SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+ SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
- st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
- dml_print("DML::%s: k=%d, LineBufferSize = %d\n", __func__, k, LineBufferSize);
- dml_print("DML::%s: k=%d, LBBitPerPixel = %d\n", __func__, k, LBBitPerPixel[k]);
- dml_print("DML::%s: k=%d, HRatio = %f\n", __func__, k, HRatio[k]);
- dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
+ dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+ dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal);
+ dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]);
+ dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]);
+ dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]);
#endif
- st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
- st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
- * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+ * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+ / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
- st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
- st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
- st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+ ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
- / PixelClock[k] / VRatio[k];
+ if (v->NumberOfActiveSurfaces > 1) {
+ ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
- st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
- st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
- / VRatioChroma[k];
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
- / PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
- / PixelClock[k] / VRatioChroma[k];
+ LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+ / v->VRatioChroma[k];
+ ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+ / v->PixelClock[k];
+ if (v->NumberOfActiveSurfaces > 1) {
+ ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatioChroma[k];
}
- st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
- st_vars->ActiveClockChangeLatencyHidingC);
+ ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+ ActiveClockChangeLatencyHidingC);
} else {
- st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+ ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->DRAMClockChangeWatermark;
- st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->FCLKChangeWatermark;
- st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.DRAMClockChangeWatermark;
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.FCLKChangeWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
- if (WritebackEnable[k]) {
- st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
- / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64)
- st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+ if (v->WritebackEnable[k]) {
+ WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64)
+ WritebackLatencyHiding = WritebackLatencyHiding / 2;
- st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
- - Watermark->WritebackDRAMClockChangeWatermark;
+ WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
+ - v->Watermark.WritebackDRAMClockChangeWatermark;
- st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
- - Watermark->WritebackFCLKChangeWatermark;
+ WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
+ - v->Watermark.WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
- st_vars->WritebackFCLKChangeLatencyMargin);
- st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
- st_vars->WritebackDRAMClockChangeLatencyMargin);
+ WritebackFCLKChangeLatencyMargin);
+ ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
- (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+ (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
0 :
(ActiveDRAMClockChangeLatencyMargin[k]
+ mmSOCParameters.DRAMClockChangeLatency);
}
- for (i = 0; i < NumberOfActiveSurfaces; ++i) {
- for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+ for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+ for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
if (i == j ||
- (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
- (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
- (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
- (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
- HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
- VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
- (DRRDisplay[i] || DRRDisplay[j]))) {
- st_vars->SynchronizedSurfaces[i][j] = true;
+ (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+ (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+ (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+ (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+ v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+ v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+ (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
+ SynchronizedSurfaces[i][j] = true;
} else {
- st_vars->SynchronizedSurfaces[i][j] = false;
+ SynchronizedSurfaces[i][j] = false;
}
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
- st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+ ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+ FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+ MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+ SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
- *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+ *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
- st_vars->SameTimingForFCLKChange = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->SameTimingForFCLKChange ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] <
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+ SameTimingForFCLKChange = true;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ (SameTimingForFCLKChange ||
+ ActiveFCLKChangeLatencyMargin[k] <
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
- st_vars->SameTimingForFCLKChange = false;
+ SameTimingForFCLKChange = false;
}
}
- if (st_vars->MinActiveFCLKChangeMargin > 0) {
+ if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
- } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+ } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
@@ -4451,95 +4502,95 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
*USRRetrainingSupport = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ (USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
- st_vars->DRAMClockChangeSupportNumber = 2;
- } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
- st_vars->DRAMClockChangeSupportNumber = 1;
- st_vars->LastSurfaceWithoutMargin = k;
- } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
- !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
- st_vars->DRAMClockChangeSupportNumber = 2;
+ DRAMClockChangeSupportNumber = 2;
+ } else if (DRAMClockChangeSupportNumber == 0) {
+ DRAMClockChangeSupportNumber = 1;
+ LastSurfaceWithoutMargin = k;
+ } else if (DRAMClockChangeSupportNumber == 1 &&
+ !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+ DRAMClockChangeSupportNumber = 2;
}
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
- st_vars->DRAMClockChangeMethod = 1;
- else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
- st_vars->DRAMClockChangeMethod = 2;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+ DRAMClockChangeMethod = 1;
+ else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+ DRAMClockChangeMethod = 2;
}
- if (st_vars->DRAMClockChangeMethod == 0) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeMethod == 0) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- } else if (st_vars->DRAMClockChangeMethod == 1) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ } else if (DRAMClockChangeMethod == 1) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
unsigned int dst_y_pstate;
unsigned int src_y_pstate_l;
unsigned int src_y_pstate_c;
unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
- dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
- src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
- src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
- sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+ dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+ src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
+ src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
+ sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l);
#endif
SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
if (BytePerPixelDETC[k] > 0) {
- src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
- src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
- sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+ src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
+ src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
+ sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c);
dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c);
#endif
}
@@ -4592,10 +4643,6 @@ void dml32_CalculateMinAndMaxPrefetchMode(
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
- } else if (AllowForPStateChangeOrStutterInVBlankFinal ==
- dm_prefetch_support_uclk_fclk_and_stutter_if_possible) {
- *MinPrefetchMode = 0;
- *MaxPrefetchMode = 3;
} else {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 37a314ce284b..55cead0d4237 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -82,7 +82,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double *DPPCLKUsingSingleDPP);
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -217,6 +216,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -228,6 +228,7 @@ void dml32_CalculateODMMode(
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
+ unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
@@ -362,7 +363,6 @@ void dml32_CalculateSurfaceSizeInMall(
bool *ExceededMALLSize);
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -715,29 +715,14 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -811,59 +796,28 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 84b4b00f29cb..dd90f241e906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -489,6 +489,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
@@ -498,6 +499,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
@@ -511,13 +519,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_21_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_21_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_21_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_21_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 5d27ff0ebb5f..f5400eda07a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,6 +35,8 @@
#include "dcn30/display_rq_dlg_calc_30.h"
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
#include "dcn32/display_mode_vba_32.h"
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
+const struct dml_funcs dml314_funcs = {
+ .validate = dml314_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml314_recalculate,
+ .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
.recalculate = dml32_recalculate,
@@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_DCN31_FPGA:
lib->funcs = dml31_funcs;
break;
+ case DML_PROJECT_DCN314:
+ lib->funcs = dml314_funcs;
+ break;
case DML_PROJECT_DCN32:
lib->funcs = dml32_funcs;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 2bdd6ed22611..b1878a1440e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,6 +41,7 @@ enum dml_project {
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
DML_PROJECT_DCN31_FPGA,
+ DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index e8b094006d95..f33a8879b05a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -26,6 +26,16 @@
#include "dc_features.h"
#include "display_mode_enums.h"
+/**
+ * DOC: overview
+ *
+ * Most of the DML code is automatically generated and tested via hardware
+ * description language. Usually, we use the reference _vcs_dpi in the code
+ * where VCS means "Verilog Compiled Simulator" and DPI stands for "Direct
+ * Programmer Interface". In other words, those structs can be used to
+ * interface with Verilog with other languages such as C.
+ */
+
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
@@ -159,13 +169,20 @@ struct _vcs_dpi_voltage_scaling_st {
double dtbclk_mhz;
};
+/**
+ * _vcs_dpi_soc_bounding_box_st: SOC definitions
+ *
+ * This struct maintains the SOC Bounding Box information for the ASIC; it
+ * defines things such as clock, voltage, performance, etc. Usually, we load
+ * these values from VBIOS; if something goes wrong, we use some hard-coded
+ * values, which will enable the ASIC to light up with limitations.
+ */
struct _vcs_dpi_soc_bounding_box_st {
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- /*
- * This is a temporary stash for updating @clock_limits with the PMFW
- * clock table. Do not use outside of *update_bw_boudning_box functions.
+ /**
+ * @num_states: It represents the total of Display Power Management
+ * (DPM) supported by the specific ASIC.
*/
- struct _vcs_dpi_voltage_scaling_st _clock_tmp[DC__VOLTAGE_STATES];
unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
@@ -231,6 +248,14 @@ struct _vcs_dpi_soc_bounding_box_st {
enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank;
};
+/**
+ * @_vcs_dpi_ip_params_st: IP configuraion for DCN blocks
+ *
+ * In this struct you can find the DCN configuration associated to the specific
+ * ASIC. For example, here we can save how many DPPs the ASIC is using and it
+ * is available.
+ *
+ */
struct _vcs_dpi_ip_params_st {
bool use_min_dcfclk;
bool clamp_min_dcfclk;
@@ -283,6 +308,9 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_line_buffer_chroma_buffer_size;
unsigned int max_page_table_levels;
+ /**
+ * @max_num_dpp: Maximum number of DPP supported in the target ASIC.
+ */
unsigned int max_num_dpp;
unsigned int max_num_otg;
unsigned int cursor_chunk_size;
@@ -482,6 +510,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int htotal;
unsigned int vtotal;
unsigned int vfront_porch;
+ unsigned int vblank_nom;
unsigned int vactive;
unsigned int hactive;
unsigned int vstartup_start;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 503e7d984ff0..03924aed8d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -597,6 +597,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
mode_lib->vba.VFrontPorch[mode_lib->vba.NumberOfActivePlanes] = dst->vfront_porch;
+ mode_lib->vba.VBlankNom[mode_lib->vba.NumberOfActivePlanes] = dst->vblank_nom;
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_luma;
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_chroma;
mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 8460aefe7b6d..630f3395e90a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
-struct dml32_CalculateSwathAndDETConfiguration {
- unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
- unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpSwathSizeBytesY;
- unsigned int RoundedUpSwathSizeBytesC;
- double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
- double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- bool NoChromaSurfaces;
- unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
- unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
- unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
- bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
- unsigned int SurfaceWithMinActiveFCLKChangeMargin;
- unsigned int DRAMClockChangeSupportNumber;
- unsigned int LastSurfaceWithoutMargin;
- unsigned int DRAMClockChangeMethod;
- bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
- double MinActiveFCLKChangeMargin;
- double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
- double ActiveClockChangeLatencyHidingY;
- double ActiveClockChangeLatencyHidingC;
- double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
- double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
- double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
- double TotalPixelBW;
- bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double LinesInDETY[DC__NUM_DPP__MAX];
- double LinesInDETC[DC__NUM_DPP__MAX];
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double FullDETBufferingTimeY;
- double FullDETBufferingTimeC;
- double WritebackDRAMClockChangeLatencyMargin;
- double WritebackFCLKChangeLatencyMargin;
- double WritebackLatencyHiding;
- bool SameTimingForFCLKChange;
- unsigned int TotalActiveWriteback;
- unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
- unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler;
- double LineTime;
- double dst_y_prefetch_equ;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tvm_oto_lines;
- double Tr0_oto_lines;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE;
- double TimeForFetchingRowInVBlank;
- double LinesToRequestPrefetchPixelData;
- unsigned int HostVMDynamicLevelsTrips;
- double trip_to_mem;
- double Tvm_trips;
- double Tr0_trips;
- double Tvm_trips_rounded;
- double Tr0_trips_rounded;
- double Lsw_oto;
- double Tpre_rounded;
- double prefetch_bw_equ;
- double Tvm_equ;
- double Tr0_equ;
- double Tdmbf;
- double Tdmec;
- double Tdmsks;
- double prefetch_sw_bytes;
- double bytes_pp;
- double dep_bytes;
- unsigned int max_vratio_pre;
- double min_Lsw;
- double Tsw_est1;
- double Tsw_est3;
-};
-
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
- struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
- struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
- struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
};
struct vba_vars_st {
@@ -418,6 +312,7 @@ struct vba_vars_st {
unsigned int ActiveDPPs;
unsigned int LBLatencyHidingSourceLinesY;
unsigned int LBLatencyHidingSourceLinesC;
+ double ActiveDRAMClockChangeLatencyMarginPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];// DML doesn't save active margin per state
double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
double CachedActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; // Cache in dml_get_voltage_level for debug purposes only
double MinActiveDRAMClockChangeMargin;
@@ -757,10 +652,10 @@ struct vba_vars_st {
unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
bool ImmediateFlipRequiredFinal;
bool DCCProgrammingAssumesScanDirectionUnknownFinal;
bool EnoughWritebackUnits;
@@ -906,8 +801,6 @@ struct vba_vars_st {
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
double AlignedYPitch[DC__NUM_DPP__MAX];
double AlignedCPitch[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 479d7d83220c..072bd0539605 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -76,14 +76,9 @@ static inline double dml_floor(double a, double granularity)
static inline double dml_round(double a)
{
- double round_pt = 0.5;
- double ceil = dml_ceil(a, 1);
- double floor = dml_floor(a, 1);
+ const double round_pt = 0.5;
- if (a - floor >= round_pt)
- return ceil;
- else
- return floor;
+ return dml_floor(a + round_pt, 1);
}
/* float
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
deleted file mode 100644
index b4b51e51fc25..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
+++ /dev/null
@@ -1,1884 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "resource.h"
-#include "core_types.h"
-#include "dsc.h"
-#include "clk_mgr.h"
-
-#ifndef DC_LOGGER_INIT
-#define DC_LOGGER_INIT
-#undef DC_LOG_WARNING
-#define DC_LOG_WARNING
-#endif
-
-#define DML_WRAPPER_TRANSLATION_
-#include "dml_wrapper_translation.c"
-#undef DML_WRAPPER_TRANSLATION_
-
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
-static void build_clamping_params(struct dc_stream_state *stream)
-{
- stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
- stream->clamping.c_depth = stream->timing.display_color_depth;
- stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
-}
-
-static void get_pixel_clock_parameters(
- const struct pipe_ctx *pipe_ctx,
- struct pixel_clk_params *pixel_clk_params)
-{
- const struct dc_stream_state *stream = pipe_ctx->stream;
-
- /*TODO: is this halved for YCbCr 420? in that case we might want to move
- * the pixel clock normalization for hdmi up to here instead of doing it
- * in pll_adjust_pix_clk
- */
- pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
- pixel_clk_params->signal_type = pipe_ctx->stream->signal;
- pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
- /* TODO: un-hardcode*/
- pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
- LINK_RATE_REF_FREQ_IN_KHZ;
- pixel_clk_params->flags.ENABLE_SS = 0;
- pixel_clk_params->color_depth =
- stream->timing.display_color_depth;
- pixel_clk_params->flags.DISPLAY_BLANKED = 1;
- pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
- PIXEL_ENCODING_YCBCR420);
- pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- pixel_clk_params->color_depth = COLOR_DEPTH_888;
- }
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
- }
- if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- pixel_clk_params->requested_pix_clk_100hz *= 2;
-
-}
-
-static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
- struct bit_depth_reduction_params *fmt_bit_depth)
-{
- enum dc_dither_option option = stream->dither_option;
- enum dc_pixel_encoding pixel_encoding =
- stream->timing.pixel_encoding;
-
- memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
-
- if (option == DITHER_OPTION_DEFAULT) {
- switch (stream->timing.display_color_depth) {
- case COLOR_DEPTH_666:
- option = DITHER_OPTION_SPATIAL6;
- break;
- case COLOR_DEPTH_888:
- option = DITHER_OPTION_SPATIAL8;
- break;
- case COLOR_DEPTH_101010:
- option = DITHER_OPTION_SPATIAL10;
- break;
- default:
- option = DITHER_OPTION_DISABLE;
- }
- }
-
- if (option == DITHER_OPTION_DISABLE)
- return;
-
- if (option == DITHER_OPTION_TRUN6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
- } else if (option == DITHER_OPTION_TRUN8 ||
- option == DITHER_OPTION_TRUN8_SPATIAL6 ||
- option == DITHER_OPTION_TRUN8_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
- } else if (option == DITHER_OPTION_TRUN10 ||
- option == DITHER_OPTION_TRUN10_SPATIAL6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8 ||
- option == DITHER_OPTION_TRUN10_FM8 ||
- option == DITHER_OPTION_TRUN10_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
- }
-
- /* special case - Formatter can only reduce by 4 bits at most.
- * When reducing from 12 to 6 bits,
- * HW recommends we use trunc with round mode
- * (if we did nothing, trunc to 10 bits would be used)
- * note that any 12->10 bit reduction is ignored prior to DCE8,
- * as the input was 10 bits.
- */
- if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
- fmt_bit_depth->flags.TRUNCATE_MODE = 1;
- }
-
- /* spatial dither
- * note that spatial modes 1-3 are never used
- */
- if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL6 ||
- option == DITHER_OPTION_TRUN8_SPATIAL6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL8 ||
- option == DITHER_OPTION_SPATIAL8_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL10 ||
- option == DITHER_OPTION_SPATIAL10_FM8 ||
- option == DITHER_OPTION_SPATIAL10_FM6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- }
-
- if (option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_SPATIAL8 ||
- option == DITHER_OPTION_SPATIAL10) {
- fmt_bit_depth->flags.FRAME_RANDOM = 0;
- } else {
- fmt_bit_depth->flags.FRAME_RANDOM = 1;
- }
-
- //////////////////////
- //// temporal dither
- //////////////////////
- if (option == DITHER_OPTION_FM6 ||
- option == DITHER_OPTION_SPATIAL8_FM6 ||
- option == DITHER_OPTION_SPATIAL10_FM6 ||
- option == DITHER_OPTION_TRUN10_FM6 ||
- option == DITHER_OPTION_TRUN8_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
- } else if (option == DITHER_OPTION_FM8 ||
- option == DITHER_OPTION_SPATIAL10_FM8 ||
- option == DITHER_OPTION_TRUN10_FM8) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
- } else if (option == DITHER_OPTION_FM10) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
- }
-
- fmt_bit_depth->pixel_encoding = pixel_encoding;
-}
-
-/* Move this after the above function as VS complains about
- * declaration issues for resource_build_bit_depth_reduction_params.
- */
-
-static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
-{
-
- get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
-
- if (pipe_ctx->clock_source)
- pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings);
-
- pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &pipe_ctx->stream->bit_depth_params);
- build_clamping_params(pipe_ctx->stream);
-
- return DC_OK;
-}
-
-bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
-{
- int i;
-
- /* Validate DSC config, dsc count validation is already done */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dsc_config dsc_cfg;
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- /* Only need to validate top pipe */
- if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
- continue;
-
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
- + stream->timing.h_border_right) / opp_cnt;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
- + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
- dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
-
- if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
- return false;
- }
- return true;
-}
-
-enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
-{
- enum dc_status status = DC_OK;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
-
- if (!pipe_ctx)
- return DC_ERROR_UNEXPECTED;
-
-
- status = build_pipe_hw_param(pipe_ctx);
-
- return status;
-}
-
-void dml_acquire_dsc(const struct dc *dc,
- struct resource_context *res_ctx,
- struct display_stream_compressor **dsc,
- int pipe_idx)
-{
- int i;
- const struct resource_pool *pool = dc->res_pool;
- struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
-
- ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
- *dsc = NULL;
-
- /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
- if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
- *dsc = pool->dscs[pipe_idx];
- res_ctx->is_dsc_acquired[pipe_idx] = true;
- return;
- }
-
- /* Return old DSC to avoid the need for redo it */
- if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
- *dsc = dsc_old;
- res_ctx->is_dsc_acquired[dsc_old->inst] = true;
- return ;
- }
-
- /* Find first free DSC */
- for (i = 0; i < pool->res_cap->num_dsc; i++)
- if (!res_ctx->is_dsc_acquired[i]) {
- *dsc = pool->dscs[i];
- res_ctx->is_dsc_acquired[i] = true;
- break;
- }
-}
-
-static bool dml_split_stream_for_mpc_or_odm(
- const struct dc *dc,
- struct resource_context *res_ctx,
- struct pipe_ctx *pri_pipe,
- struct pipe_ctx *sec_pipe,
- bool odm)
-{
- int pipe_idx = sec_pipe->pipe_idx;
- const struct resource_pool *pool = dc->res_pool;
-
- *sec_pipe = *pri_pipe;
-
- sec_pipe->pipe_idx = pipe_idx;
- sec_pipe->plane_res.mi = pool->mis[pipe_idx];
- sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
- sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
- sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
- sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
- sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
- sec_pipe->stream_res.dsc = NULL;
- if (odm) {
- if (pri_pipe->next_odm_pipe) {
- ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
- sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
- sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
- }
- if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
- pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
- sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
- }
- if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
- pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
- sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
- }
- pri_pipe->next_odm_pipe = sec_pipe;
- sec_pipe->prev_odm_pipe = pri_pipe;
- ASSERT(sec_pipe->top_pipe == NULL);
-
- if (!sec_pipe->top_pipe)
- sec_pipe->stream_res.opp = pool->opps[pipe_idx];
- else
- sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
- if (sec_pipe->stream->timing.flags.DSC == 1) {
- dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
- ASSERT(sec_pipe->stream_res.dsc);
- if (sec_pipe->stream_res.dsc == NULL)
- return false;
- }
- } else {
- if (pri_pipe->bottom_pipe) {
- ASSERT(pri_pipe->bottom_pipe != sec_pipe);
- sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
- sec_pipe->bottom_pipe->top_pipe = sec_pipe;
- }
- pri_pipe->bottom_pipe = sec_pipe;
- sec_pipe->top_pipe = pri_pipe;
-
- ASSERT(pri_pipe->plane_state);
- }
-
- return true;
-}
-
-static struct pipe_ctx *dml_find_split_pipe(
- struct dc *dc,
- struct dc_state *context,
- int old_index)
-{
- struct pipe_ctx *pipe = NULL;
- int i;
-
- if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[old_index];
- pipe->pipe_idx = old_index;
- }
-
- if (!pipe)
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
- && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
- if (context->res_ctx.pipe_ctx[i].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[i];
- pipe->pipe_idx = i;
- break;
- }
- }
- }
-
- /*
- * May need to fix pipes getting tossed from 1 opp to another on flip
- * Add for debugging transient underflow during topology updates:
- * ASSERT(pipe);
- */
- if (!pipe)
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- if (context->res_ctx.pipe_ctx[i].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[i];
- pipe->pipe_idx = i;
- break;
- }
- }
-
- return pipe;
-}
-
-static void dml_release_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
- struct display_stream_compressor **dsc)
-{
- int i;
-
- for (i = 0; i < pool->res_cap->num_dsc; i++)
- if (pool->dscs[i] == *dsc) {
- res_ctx->is_dsc_acquired[i] = false;
- *dsc = NULL;
- break;
- }
-}
-
-static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
-{
- int mpc_split_count = 0;
- struct pipe_ctx *other_pipe = pipe->bottom_pipe;
-
- while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
- mpc_split_count++;
- other_pipe = other_pipe->bottom_pipe;
- }
- other_pipe = pipe->top_pipe;
- while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
- mpc_split_count++;
- other_pipe = other_pipe->top_pipe;
- }
-
- return mpc_split_count;
-}
-
-static bool dml_enough_pipes_for_subvp(struct dc *dc,
- struct dc_state *context)
-{
- int i = 0;
- int num_pipes = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream && pipe->plane_state)
- num_pipes++;
- }
-
- // Sub-VP only possible if the number of "real" pipes is
- // less than or equal to half the number of available pipes
- if (num_pipes * 2 > dc->res_pool->pipe_count)
- return false;
-
- return true;
-}
-
-static int dml_validate_apply_pipe_split_flags(
- struct dc *dc,
- struct dc_state *context,
- int vlevel,
- int *split,
- bool *merge)
-{
- int i, pipe_idx, vlevel_split;
- int plane_count = 0;
- bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- struct vba_vars_st *v = &context->bw_ctx.dml.vba;
- int max_mpc_comb = v->maxMpcComb;
-
- if (context->stream_count > 1) {
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
- avoid_split = true;
- } else if (dc->debug.force_single_disp_pipe_split)
- force_split = true;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /**
- * Workaround for avoiding pipe-split in cases where we'd split
- * planes that are too small, resulting in splits that aren't
- * valid for the scaler.
- */
- if (pipe->plane_state &&
- (pipe->plane_state->dst_rect.width <= 16 ||
- pipe->plane_state->dst_rect.height <= 16 ||
- pipe->plane_state->src_rect.width <= 16 ||
- pipe->plane_state->src_rect.height <= 16))
- avoid_split = true;
-
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
- }
- if (plane_count > dc->res_pool->pipe_count / 2)
- avoid_split = true;
-
- /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct dc_crtc_timing timing;
-
- if (!pipe->stream)
- continue;
- else {
- timing = pipe->stream->timing;
- if (timing.h_border_left + timing.h_border_right
- + timing.v_border_top + timing.v_border_bottom > 0) {
- avoid_split = true;
- break;
- }
- }
- }
-
- /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
- if (avoid_split) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
- if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
- v->ModeSupport[vlevel][0])
- break;
- /* Impossible to not split this pipe */
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- vlevel = vlevel_split;
- else
- max_mpc_comb = 0;
- pipe_idx++;
- }
- v->maxMpcComb = max_mpc_comb;
- }
-
- /* Split loop sets which pipe should be split based on dml outputs and dc flags */
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- int pipe_plane = v->pipe_plane[pipe_idx];
- bool split4mpc = context->stream_count == 1 && plane_count == 1
- && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
-
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
- split[i] = 4;
- else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
- split[i] = 2;
-
- if ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE))
- split[i] = 2;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- split[i] = 2;
- v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
- }
- if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
- split[i] = 4;
- v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
- }
- /*420 format workaround*/
- if (pipe->stream->timing.h_addressable > 7680 &&
- pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- split[i] = 4;
- }
-
- v->ODMCombineEnabled[pipe_plane] =
- v->ODMCombineEnablePerState[vlevel][pipe_plane];
-
- if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- if (dml_get_num_mpc_splits(pipe) == 1) {
- /*If need split for mpc but 2 way split already*/
- if (split[i] == 4)
- split[i] = 2; /* 2 -> 4 MPC */
- else if (split[i] == 2)
- split[i] = 0; /* 2 -> 2 MPC */
- else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
- merge[i] = true; /* 2 -> 1 MPC */
- } else if (dml_get_num_mpc_splits(pipe) == 3) {
- /*If need split for mpc but 4 way split already*/
- if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
- || !pipe->bottom_pipe)) {
- merge[i] = true; /* 4 -> 2 MPC */
- } else if (split[i] == 0 && pipe->top_pipe &&
- pipe->top_pipe->plane_state == pipe->plane_state)
- merge[i] = true; /* 4 -> 1 MPC */
- split[i] = 0;
- } else if (dml_get_num_mpc_splits(pipe)) {
- /* ODM -> MPC transition */
- if (pipe->prev_odm_pipe) {
- split[i] = 0;
- merge[i] = true;
- }
- }
- } else {
- if (dml_get_num_mpc_splits(pipe) == 1) {
- /*If need split for odm but 2 way split already*/
- if (split[i] == 4)
- split[i] = 2; /* 2 -> 4 ODM */
- else if (split[i] == 2)
- split[i] = 0; /* 2 -> 2 ODM */
- else if (pipe->prev_odm_pipe) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* exit ODM */
- }
- } else if (dml_get_num_mpc_splits(pipe) == 3) {
- /*If need split for odm but 4 way split already*/
- if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
- || !pipe->next_odm_pipe)) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* 4 -> 2 ODM */
- } else if (split[i] == 0 && pipe->prev_odm_pipe) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* exit ODM */
- }
- split[i] = 0;
- } else if (dml_get_num_mpc_splits(pipe)) {
- /* MPC -> ODM transition */
- ASSERT(0); /* NOT expected yet */
- if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
- split[i] = 0;
- merge[i] = true;
- }
- }
- }
-
- /* Adjust dppclk when split is forced, do not bother with dispclk */
- if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
- v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
- pipe_idx++;
- }
-
- return vlevel;
-}
-
-static void dml_set_phantom_stream_timing(struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *ref_pipe,
- struct dc_stream_state *phantom_stream)
-{
- // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
- uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
- dc->caps.subvp_fw_processing_delay_us +
- dc->caps.subvp_pstate_allow_width_us;
- uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
- (ref_pipe->stream->timing.pix_clk_100hz * 100) /
- (double)ref_pipe->stream->timing.h_total;
- uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
-
- phantom_stream->dst.y = 0;
- phantom_stream->dst.height = phantom_vactive;
- phantom_stream->src.y = 0;
- phantom_stream->src.height = phantom_vactive;
-
- phantom_stream->timing.v_addressable = phantom_vactive;
- phantom_stream->timing.v_front_porch = 1;
- phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
- phantom_stream->timing.v_front_porch +
- phantom_stream->timing.v_sync_width +
- phantom_bp;
-}
-
-static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *ref_pipe)
-{
- struct dc_stream_state *phantom_stream = NULL;
-
- phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
-
- /* stream has limited viewport and small timing */
- memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
- memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
- memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
- dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
-
- dc_add_stream_to_ctx(dc, context, phantom_stream);
- dc->hwss.apply_ctx_to_hw(dc, context);
- return phantom_stream;
-}
-
-static void dml_enable_phantom_plane(struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *phantom_stream,
- struct pipe_ctx *main_pipe)
-{
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_plane_state *prev_phantom_plane = NULL;
- struct pipe_ctx *curr_pipe = main_pipe;
-
- while (curr_pipe) {
- if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
- phantom_plane = prev_phantom_plane;
- else
- phantom_plane = dc_create_plane_state(dc);
-
- memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
- memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
- sizeof(phantom_plane->scaling_quality));
- memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
- memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
- memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
- memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
- sizeof(phantom_plane->plane_size));
- memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
- sizeof(phantom_plane->tiling_info));
- memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
- /* Currently compat_level is undefined in dc_state
- * phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
- */
- phantom_plane->format = curr_pipe->plane_state->format;
- phantom_plane->rotation = curr_pipe->plane_state->rotation;
- phantom_plane->visible = curr_pipe->plane_state->visible;
-
- /* Shadow pipe has small viewport. */
- phantom_plane->clip_rect.y = 0;
- phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
-
- dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
-
- curr_pipe = curr_pipe->bottom_pipe;
- prev_phantom_plane = phantom_plane;
- }
-}
-
-static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct dc_stream_state *ref_stream = pipe->stream;
- // Only construct phantom stream for top pipes that have plane enabled
- if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
- struct dc_stream_state *phantom_stream = NULL;
-
- phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
- dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state && pipe->stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- pipe->stream->use_dynamic_meta = false;
- pipe->plane_state->flip_immediate = false;
- if (!resource_build_scaling_params(pipe)) {
- // Log / remove phantom pipes since failed to build scaling params
- }
- }
- }
-}
-
-static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i;
- bool removed_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- dc_rem_all_planes_for_stream(dc, pipe->stream, context);
- dc_remove_stream_from_ctx(dc, context, pipe->stream);
- removed_pipe = true;
- }
-
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
- }
- if (removed_pipe)
- dc->hwss.apply_ctx_to_hw(dc, context);
-}
-
-/*
- * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
- * we need to populate some "conservative" plane information as DML cannot handle "no planes"
- */
-static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
-{
- pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
- pipe->src.source_scan = dm_horz;
- pipe->src.sw_mode = dm_sw_4kb_s;
- pipe->src.macro_tile_size = dm_64k_tile;
- pipe->src.viewport_width = timing->h_addressable;
- if (pipe->src.viewport_width > 1920)
- pipe->src.viewport_width = 1920;
- pipe->src.viewport_height = timing->v_addressable;
- if (pipe->src.viewport_height > 1080)
- pipe->src.viewport_height = 1080;
- pipe->src.surface_height_y = pipe->src.viewport_height;
- pipe->src.surface_width_y = pipe->src.viewport_width;
- pipe->src.surface_height_c = pipe->src.viewport_height;
- pipe->src.surface_width_c = pipe->src.viewport_width;
- pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
- pipe->src.source_format = dm_444_32;
- pipe->dest.recout_width = pipe->src.viewport_width;
- pipe->dest.recout_height = pipe->src.viewport_height;
- pipe->dest.full_recout_width = pipe->dest.recout_width;
- pipe->dest.full_recout_height = pipe->dest.recout_height;
- pipe->scale_ratio_depth.lb_depth = dm_lb_16;
- pipe->scale_ratio_depth.hscl_ratio = 1.0;
- pipe->scale_ratio_depth.vscl_ratio = 1.0;
- pipe->scale_ratio_depth.scl_enable = 0;
- pipe->scale_taps.htaps = 1;
- pipe->scale_taps.vtaps = 1;
- pipe->dest.vtotal_min = timing->v_total;
- pipe->dest.vtotal_max = timing->v_total;
-
- if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
- pipe->src.viewport_width /= 2;
- pipe->dest.recout_width /= 2;
- } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
- pipe->src.viewport_width /= 4;
- pipe->dest.recout_width /= 4;
- }
-
- pipe->src.dcc = false;
- pipe->src.dcc_rate = 1;
-}
-
-/*
- * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
- * hsplit group is equal to its own pipe ID
- * Otherwise, all pipes part of the same blending tree have the same hsplit group
- * ID as the top most pipe
- *
- * If the pipe ctx is ODM combined, then similar logic follows
- */
-static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
-{
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
-
- if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
- == dc_pipe_ctx->plane_state) {
- struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
- int split_idx = 0;
-
- while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
- == dc_pipe_ctx->plane_state) {
- first_pipe = first_pipe->top_pipe;
- split_idx++;
- }
-
- /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
- if (split_idx == 0)
- e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
- else if (split_idx == 1)
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
- else if (split_idx == 2)
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
-
- } else if (dc_pipe_ctx->prev_odm_pipe) {
- struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
-
- while (first_pipe->prev_odm_pipe)
- first_pipe = first_pipe->prev_odm_pipe;
- e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
- }
-}
-
-static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
-{
- const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
- const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
-
- e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
- e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
- || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
- || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
-
- /* stereo is not split */
- if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
- pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
- e2e_pipe->pipe.src.is_hsplit = false;
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
- }
-
- e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
- || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
- e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
- e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
- e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
- e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
- e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
- e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
- e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
- e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
- e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
- e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
- e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
-
- if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
- || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
- e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
- e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
- e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
- } else {
- e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
- e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
- }
- e2e_pipe->pipe.src.dcc = pln->dcc.enable;
- e2e_pipe->pipe.src.dcc_rate = 1;
- e2e_pipe->pipe.dest.recout_width = scl->recout.width;
- e2e_pipe->pipe.dest.recout_height = scl->recout.height;
- e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
- e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
- if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
- e2e_pipe->pipe.dest.full_recout_width *= 2;
- else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
- e2e_pipe->pipe.dest.full_recout_width *= 4;
- else {
- struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
-
- while (split_pipe && split_pipe->plane_state == pln) {
- e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
- split_pipe = split_pipe->bottom_pipe;
- }
- split_pipe = dc_pipe_ctx->top_pipe;
- while (split_pipe && split_pipe->plane_state == pln) {
- e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
- split_pipe = split_pipe->top_pipe;
- }
- }
-
- e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
- e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.scl_enable =
- scl->ratios.vert.value != dc_fixpt_one.value
- || scl->ratios.horz.value != dc_fixpt_one.value
- || scl->ratios.vert_c.value != dc_fixpt_one.value
- || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
- || always_scale; /*support always scale*/
- e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
- e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
- e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
- e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
-
- /* Currently compat_level is not defined. Commenting it until further resolution
- * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
- swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
- &e2e_pipe->pipe.src.sw_mode);
- e2e_pipe->pipe.src.macro_tile_size =
- swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
- } else {
- gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
- pln->compat_level,
- &e2e_pipe->pipe.src.sw_mode);
- e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
- }*/
-
- e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
-}
-
-static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
-{
- /*
- * For graphic plane, cursor number is 1, nv12 is 0
- * bw calculations due to cursor on/off
- */
- if (dc_pipe_ctx->plane_state &&
- (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
- e2e_pipe->pipe.src.num_cursors = 0;
- else
- e2e_pipe->pipe.src.num_cursors = 1;
-
- e2e_pipe->pipe.src.cur0_src_width = 256;
- e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
-}
-
-static int populate_dml_pipes_from_context_base(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
-{
- int pipe_cnt, i;
- bool synchronized_vblank = true;
- struct resource_context *res_ctx = &context->res_ctx;
-
- for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (pipe_cnt < 0) {
- pipe_cnt = i;
- continue;
- }
-
- if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (dc->debug.disable_timing_sync ||
- (!resource_are_streams_timing_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream) &&
- !resource_are_vblanks_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream))) {
- synchronized_vblank = false;
- break;
- }
- }
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
-
- struct audio_check aud_check = {0};
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- /* todo:
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
- pipes[pipe_cnt].pipe.src.dcc = 0;
- pipes[pipe_cnt].pipe.src.vm = 0;*/
-
- pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
-
- pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
- /* todo: rotation?*/
- pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
- if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
- /* 1/2 vblank */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
- - timing->v_border_top - timing->v_border_bottom) / 2;
- /* 36 bytes dp, 32 hdmi */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
- dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
- }
- pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
-
- dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
- pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
- pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
-
- pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
-
- pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
-
- populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
-
- pipes[pipe_cnt].dout.dp_lanes = 4;
- pipes[pipe_cnt].dout.is_virtual = 0;
- pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
- if (pipes[pipe_cnt].dout.output_type < 0) {
- pipes[pipe_cnt].dout.output_type = dm_dp;
- pipes[pipe_cnt].dout.is_virtual = 1;
- }
-
- populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
-
- if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
- pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
-
- /* todo: default max for now, until there is logic reflecting this in dc*/
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- /*fill up the audio sample rate (unit in kHz)*/
- get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
- pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
-
- populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
-
- if (!res_ctx->pipe_ctx[i].plane_state) {
- populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
- } else {
- populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
- }
-
- pipe_cnt++;
- }
-
- /* populate writeback information */
- if (dc->res_pool)
- dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
-
- return pipe_cnt;
-}
-
-static int dml_populate_dml_pipes_from_context(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
-{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe = NULL; // Fix potentially uninitialized error from VS
-
- populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
-
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
- pipe_cnt++;
- }
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format)) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- }
-
- return pipe_cnt;
-}
-
-static void dml_full_validate_bw_helper(struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *vlevel,
- int *split,
- bool *merge,
- int *pipe_cnt)
-{
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- /*
- * DML favors voltage over p-state, but we're more interested in
- * supporting p-state over voltage. We can't support p-state in
- * prefetch mode > 0 so try capping the prefetch mode to start.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh_and_mclk_switch;
- *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
- /* This may adjust vlevel and maxMpcComb */
- if (*vlevel < context->bw_ctx.dml.soc.num_states)
- *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
-
- /* Conditions for setting up phantom pipes for SubVP:
- * 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
- * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
- * 4. Display configuration passes validation
- * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
- */
- if (!dc->debug.force_disable_subvp &&
- dml_enough_pipes_for_subvp(dc, context) &&
- *vlevel < context->bw_ctx.dml.soc.num_states &&
- (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
- dc->debug.force_subvp_mclk_switch)) {
-
- dml_add_phantom_pipes(dc, context);
-
- /* Create input to DML based on new context which includes phantom pipes
- * TODO: Input to DML should mark which pipes are phantom
- */
- *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
- *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
- if (*vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, MAX_PIPES * sizeof(*split));
- memset(merge, 0, MAX_PIPES * sizeof(*merge));
- *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
- }
-
- // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
- // remove phantom pipes and repopulate dml pipes
- if (*vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- dml_remove_phantom_pipes(dc, context);
- *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
- }
- }
-}
-
-static void dcn20_adjust_adaptive_sync_v_startup(
- const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
-{
- struct dc_crtc_timing patched_crtc_timing;
- uint32_t asic_blank_end = 0;
- uint32_t asic_blank_start = 0;
- uint32_t newVstartup = 0;
-
- patched_crtc_timing = *dc_crtc_timing;
-
- if (patched_crtc_timing.flags.INTERLACE == 1) {
- if (patched_crtc_timing.v_front_porch < 2)
- patched_crtc_timing.v_front_porch = 2;
- } else {
- if (patched_crtc_timing.v_front_porch < 1)
- patched_crtc_timing.v_front_porch = 1;
- }
-
- /* blank_start = frame end - front porch */
- asic_blank_start = patched_crtc_timing.v_total -
- patched_crtc_timing.v_front_porch;
-
- /* blank_end = blank_start - active */
- asic_blank_end = asic_blank_start -
- patched_crtc_timing.v_border_bottom -
- patched_crtc_timing.v_addressable -
- patched_crtc_timing.v_border_top;
-
- newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
-
- *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
-}
-
-static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
-{
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal));
-}
-
-static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
-{
- int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
- return true;
- }
- return false;
-}
-
-static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- }
-}
-
-static bool dml_internal_validate(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *vlevel_out,
- bool fast_validate)
-{
- bool out = false;
- bool repopulate_pipes = false;
- int split[MAX_PIPES] = { 0 };
- bool merge[MAX_PIPES] = { false };
- bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- ASSERT(pipes);
- if (!pipes)
- return false;
-
- // For each full update, remove all existing phantom pipes first
- dml_remove_phantom_pipes(dc, context);
-
- dml_update_soc_for_wm_a(dc, context);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state) {
- // On initial pass through DML, we intend to use MALL for SS on all
- // (non-PSR) surfaces with none using MALL for P-State
- // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
- //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
- // pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
- }
- }
- pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (!fast_validate) {
- dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
- }
-
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- /*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
- *
- * We don't actually support prefetch mode 2, so require that we
- * at least support prefetch mode 1.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, sizeof(split));
- memset(merge, 0, sizeof(merge));
- vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
- }
- }
-
- dml_log_mode_support_params(&context->bw_ctx.dml);
-
- if (vlevel == context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
-
- if (!pipe->stream)
- continue;
-
- /* We only support full screen mpo with ODM */
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
- }
- pipe_idx++;
- }
-
- /* merge pipes if necessary */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /*skip pipes that don't need merging*/
- if (!merge[i])
- continue;
-
- /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
- if (pipe->prev_odm_pipe) {
- /*split off odm pipe*/
- pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
- if (pipe->next_odm_pipe)
- pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
-
- pipe->bottom_pipe = NULL;
- pipe->next_odm_pipe = NULL;
- pipe->plane_state = NULL;
- pipe->stream = NULL;
- pipe->top_pipe = NULL;
- pipe->prev_odm_pipe = NULL;
- if (pipe->stream_res.dsc)
- dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
- memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
- memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
- repopulate_pipes = true;
- } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
- struct pipe_ctx *top_pipe = pipe->top_pipe;
- struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
-
- top_pipe->bottom_pipe = bottom_pipe;
- if (bottom_pipe)
- bottom_pipe->top_pipe = top_pipe;
-
- pipe->top_pipe = NULL;
- pipe->bottom_pipe = NULL;
- pipe->plane_state = NULL;
- pipe->stream = NULL;
- memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
- memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
- repopulate_pipes = true;
- } else
- ASSERT(0); /* Should never try to merge master pipe */
-
- }
-
- for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *hsplit_pipe = NULL;
- bool odm;
- int old_index = -1;
-
- if (!pipe->stream || newly_split[i])
- continue;
-
- pipe_idx++;
- odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
-
- if (!pipe->plane_state && !odm)
- continue;
-
- if (split[i]) {
- if (odm) {
- if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (old_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->pipe_idx;
- } else {
- if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
- else if (old_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->pipe_idx;
- }
- hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
- ASSERT(hsplit_pipe);
- if (!hsplit_pipe)
- goto validate_fail;
-
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- pipe, hsplit_pipe, odm))
- goto validate_fail;
-
- newly_split[hsplit_pipe->pipe_idx] = true;
- repopulate_pipes = true;
- }
- if (split[i] == 4) {
- struct pipe_ctx *pipe_4to1;
-
- if (odm && old_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->pipe_idx;
- else
- old_index = -1;
- pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
- ASSERT(pipe_4to1);
- if (!pipe_4to1)
- goto validate_fail;
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- pipe, pipe_4to1, odm))
- goto validate_fail;
- newly_split[pipe_4to1->pipe_idx] = true;
-
- if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
- && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
- else
- old_index = -1;
- pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
- ASSERT(pipe_4to1);
- if (!pipe_4to1)
- goto validate_fail;
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- hsplit_pipe, pipe_4to1, odm))
- goto validate_fail;
- newly_split[pipe_4to1->pipe_idx] = true;
- }
- if (odm)
- dml_build_mapped_resource(dc, context, pipe->stream);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state) {
- if (!resource_build_scaling_params(pipe))
- goto validate_fail;
- }
- }
-
- /* Actual dsc count per stream dsc validation*/
- if (!dml_validate_dsc(dc, context)) {
- vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
- goto validate_fail;
- }
-
- if (repopulate_pipes)
- pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
- *vlevel_out = vlevel;
- *pipe_cnt_out = pipe_cnt;
-
- out = true;
- goto validate_out;
-
-validate_fail:
- out = false;
-
-validate_out:
- return out;
-}
-
-static void dml_calculate_dlg_params(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx;
- int plane_count;
-
- /* Writeback MCIF_WB arbitration parameters */
- if (dc->res_pool)
- dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
-
- context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
- context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
- context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
- context->bw_ctx.bw.dcn.clk.p_state_change_support =
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
- != dm_dram_clock_change_unsupported;
-
- context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
- * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
- DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
- */
- plane_count = 0;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].plane_state)
- plane_count++;
- }
-
- /* Commented out as per above error for now.
- if (plane_count == 0)
- context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
- */
- context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
- context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support =
- context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
- context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
- // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
- context->res_ctx.pipe_ctx[i].unbounded_req = false;
- } else {
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
- context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
- }
-
- if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
- pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
- pipe_idx++;
- }
- /*save a original dppclock copy*/
- context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
- context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
- context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
- context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
- context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
- - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
-
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].dlg_regs,
- &context->res_ctx.pipe_ctx[i].ttu_regs,
- pipes,
- pipe_cnt,
- pipe_idx,
- cstate_en,
- context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, true);
-
- context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].rq_regs,
- &pipes[pipe_idx].pipe);
- pipe_idx++;
- }
-}
-
-static void dml_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx, vlevel_temp = 0;
-
- double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
- dm_dram_clock_change_unsupported;
-
- /* Set B:
- * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
- * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
- * calculations to cover bootup clocks.
- * DCFCLK: soc.clock_limits[2] when available
- * UCLK: soc.clock_limits[2] when available
- */
- if (context->bw_ctx.dml.soc.num_states > 2) {
- vlevel_temp = 2;
- dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
- } else
- dcfclk = 615; //DCFCLK Vmin_lv
-
- pipes[0].clks_cfg.voltage = vlevel_temp;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
-
- /* Set D:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW when available
- * UCLK : Min, as reported by PM FW when available
- * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
- */
-
- if (context->bw_ctx.dml.soc.num_states > 2) {
- vlevel_temp = 0;
- dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- } else
- dcfclk = 615; //DCFCLK Vmin_lv
-
- pipes[0].clks_cfg.voltage = vlevel_temp;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
- /* Set C, for Dummy P-State:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW, when available
- * UCLK : Min, as reported by PM FW, when available
- * pstate latency as per UCLK state dummy pstate latency
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts_margin = 160;
-
- if ((!pstate_en))
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
- if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
- /* The only difference between A and C is p-state latency, if p-state is not supported
- * with full p-state latency we want to calculate DLG based on dummy p-state latency,
- * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW, when available
- * UCLK: Min, as reported by PM FW, when available
- */
- dml_update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-bool dml_validate(struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
-{
- bool out = false;
-
- BW_VAL_TRACE_SETUP();
-
- int vlevel = 0;
- int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- BW_VAL_TRACE_COUNT();
-
- out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
-
- if (pipe_cnt == 0)
- goto validate_out;
-
- if (!out)
- goto validate_fail;
-
- BW_VAL_TRACE_END_VOLTAGE_LEVEL();
-
- if (fast_validate) {
- BW_VAL_TRACE_SKIP(fast);
- goto validate_out;
- }
-
- dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
-
- BW_VAL_TRACE_END_WATERMARKS();
-
- goto validate_out;
-
-validate_fail:
- DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
- dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
-
- BW_VAL_TRACE_SKIP(fail);
- out = false;
-
-validate_out:
- BW_VAL_TRACE_FINISH();
-
- return out;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
deleted file mode 100644
index 4ec5310a2962..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifdef DML_WRAPPER_TRANSLATION_
-
-static void gfx10array_mode_to_dml_params(
- enum array_mode_values array_mode,
- enum legacy_tiling_compat_level compat_level,
- unsigned int *sw_mode)
-{
- switch (array_mode) {
- case DC_ARRAY_LINEAR_ALLIGNED:
- case DC_ARRAY_LINEAR_GENERAL:
- *sw_mode = dm_sw_linear;
- break;
- case DC_ARRAY_2D_TILED_THIN1:
-// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed
-#if 0
- if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO)
- *sw_mode = dm_sw_gfx7_2d_thin_l_vp;
- else
- *sw_mode = dm_sw_gfx7_2d_thin_gl;
-#endif
- break;
- default:
- ASSERT(0); /* Not supported */
- break;
- }
-}
-
-static void swizzle_to_dml_params(
- enum swizzle_mode_values swizzle,
- unsigned int *sw_mode)
-{
- switch (swizzle) {
- case DC_SW_LINEAR:
- *sw_mode = dm_sw_linear;
- break;
- case DC_SW_4KB_S:
- *sw_mode = dm_sw_4kb_s;
- break;
- case DC_SW_4KB_S_X:
- *sw_mode = dm_sw_4kb_s_x;
- break;
- case DC_SW_4KB_D:
- *sw_mode = dm_sw_4kb_d;
- break;
- case DC_SW_4KB_D_X:
- *sw_mode = dm_sw_4kb_d_x;
- break;
- case DC_SW_64KB_S:
- *sw_mode = dm_sw_64kb_s;
- break;
- case DC_SW_64KB_S_X:
- *sw_mode = dm_sw_64kb_s_x;
- break;
- case DC_SW_64KB_S_T:
- *sw_mode = dm_sw_64kb_s_t;
- break;
- case DC_SW_64KB_D:
- *sw_mode = dm_sw_64kb_d;
- break;
- case DC_SW_64KB_D_X:
- *sw_mode = dm_sw_64kb_d_x;
- break;
- case DC_SW_64KB_D_T:
- *sw_mode = dm_sw_64kb_d_t;
- break;
- case DC_SW_64KB_R_X:
- *sw_mode = dm_sw_64kb_r_x;
- break;
- case DC_SW_VAR_S:
- *sw_mode = dm_sw_var_s;
- break;
- case DC_SW_VAR_S_X:
- *sw_mode = dm_sw_var_s_x;
- break;
- case DC_SW_VAR_D:
- *sw_mode = dm_sw_var_d;
- break;
- case DC_SW_VAR_D_X:
- *sw_mode = dm_sw_var_d_x;
- break;
-
- default:
- ASSERT(0); /* Not supported */
- break;
- }
-}
-
-static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest)
-{
- dest->hblank_start = timing->h_total - timing->h_front_porch;
- dest->hblank_end = dest->hblank_start
- - timing->h_addressable
- - timing->h_border_left
- - timing->h_border_right;
- dest->vblank_start = timing->v_total - timing->v_front_porch;
- dest->vblank_end = dest->vblank_start
- - timing->v_addressable
- - timing->v_border_top
- - timing->v_border_bottom;
- dest->htotal = timing->h_total;
- dest->vtotal = timing->v_total;
- dest->hactive = timing->h_addressable;
- dest->vactive = timing->v_addressable;
- dest->interlaced = timing->flags.INTERLACE;
- dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
- if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- dest->pixel_rate_mhz *= 2;
-}
-
-static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe)
-{
- int odm_split_count = 0;
- enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-
- // Traverse pipe tree to determine odm split count
- while (next_pipe) {
- odm_split_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_split_count++;
- pipe = pipe->prev_odm_pipe;
- }
-
- // Translate split to DML odm combine factor
- switch (odm_split_count) {
- case 1:
- combine_mode = dm_odm_combine_mode_2to1;
- break;
- case 3:
- combine_mode = dm_odm_combine_mode_4to1;
- break;
- default:
- combine_mode = dm_odm_combine_mode_disabled;
- }
-
- return combine_mode;
-}
-
-static int get_dml_output_type(enum signal_type dc_signal)
-{
- int dml_output_type = -1;
-
- switch (dc_signal) {
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- case SIGNAL_TYPE_DISPLAY_PORT:
- dml_output_type = dm_dp;
- break;
- case SIGNAL_TYPE_EDP:
- dml_output_type = dm_edp;
- break;
- case SIGNAL_TYPE_HDMI_TYPE_A:
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- dml_output_type = dm_hdmi;
- break;
- default:
- break;
- }
-
- return dml_output_type;
-}
-
-static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout)
-{
- int output_bpc = 0;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- output_bpc = 6;
- break;
- case COLOR_DEPTH_888:
- output_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- output_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- output_bpc = 12;
- break;
- case COLOR_DEPTH_141414:
- output_bpc = 14;
- break;
- case COLOR_DEPTH_161616:
- output_bpc = 16;
- break;
- case COLOR_DEPTH_999:
- output_bpc = 9;
- break;
- case COLOR_DEPTH_111111:
- output_bpc = 11;
- break;
- default:
- output_bpc = 8;
- break;
- }
-
- switch (timing->pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- dout->output_format = dm_444;
- dout->output_bpp = output_bpc * 3;
- break;
- case PIXEL_ENCODING_YCBCR420:
- dout->output_format = dm_420;
- dout->output_bpp = (output_bpc * 3.0) / 2;
- break;
- case PIXEL_ENCODING_YCBCR422:
- if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple)
- dout->output_format = dm_n422;
- else
- dout->output_format = dm_s422;
- dout->output_bpp = output_bpc * 2;
- break;
- default:
- dout->output_format = dm_444;
- dout->output_bpp = output_bpc * 3;
- }
-}
-
-static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format)
-{
- enum source_format_class dml_format = dm_444_32;
-
- switch (dc_format) {
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
- dml_format = dm_420_8;
- break;
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
- dml_format = dm_420_10;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
- dml_format = dm_444_64;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
- dml_format = dm_444_16;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
- dml_format = dm_444_8;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
- dml_format = dm_rgbe_alpha;
- break;
- default:
- dml_format = dm_444_32;
- break;
- }
-
- return dml_format;
-}
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b3d0a4ea2446..8919a2092ac5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -399,6 +399,10 @@ struct pipe_ctx {
struct dc_stream_state *stream;
struct plane_resource plane_res;
+
+ /**
+ * @stream_res: Reference to DCN resource components such OPP and DSC.
+ */
struct stream_resource stream_res;
struct link_resource link_res;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 6682d9e181c6..b304d450b038 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -194,6 +194,11 @@ enum dc_status dpcd_configure_lttpr_mode(
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
bool dp_retrieve_lttpr_cap(struct dc_link *link);
+bool dp_is_lttpr_present(struct dc_link *link);
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting);
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override);
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
bool dpcd_write_128b_132b_sst_payload_allocation_table(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 5d2b028e5dad..d9f1b0a4fbd4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -214,6 +214,7 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 68c2ed434d2c..cff5fd55a0ad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -340,6 +340,8 @@ struct clk_mgr_internal {
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
+
+ bool dpm_present;
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index d89bd55f110f..cd2be729846b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -268,6 +268,20 @@ enum dc_lut_mode {
LUT_RAM_B
};
+enum symclk_state {
+ SYMCLK_OFF_TX_OFF,
+ SYMCLK_ON_TX_ON,
+ SYMCLK_ON_TX_OFF,
+};
+
+struct phy_state {
+ struct {
+ uint8_t otg : 1;
+ uint8_t reserved : 7;
+ } symclk_ref_cnts;
+ enum symclk_state symclk_state;
+};
+
/**
* speakersToChannels
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 5097037e3962..8d86159d9de0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -22,6 +22,16 @@
*
*/
+/**
+ * DOC: mpc-overview
+ *
+ * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * that performs blending of multiple planes, using global and per-pixel alpha.
+ * It also performs post-blending color correction operations according to the
+ * hardware capabilities, such as color transformation matrix and gamma 1D and
+ * 3D LUT.
+ */
+
#ifndef __DC_MPCC_H__
#define __DC_MPCC_H__
@@ -48,14 +58,39 @@ enum mpcc_blend_mode {
MPCC_BLEND_MODE_TOP_BOT_BLENDING
};
+/**
+ * enum mpcc_alpha_blend_mode - define the alpha blend mode regarding pixel
+ * alpha and plane alpha values
+ */
enum mpcc_alpha_blend_mode {
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA: per pixel alpha using DPP
+ * alpha value
+ */
MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA,
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN: per
+ * pixel alpha using DPP alpha value multiplied by a global gain (plane
+ * alpha)
+ */
MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN,
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA: global alpha value, ignores
+ * pixel alpha and consider only plane alpha
+ */
MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
};
-/*
- * MPCC blending configuration
+/**
+ * struct mpcc_blnd_cfg - MPCC blending configuration
+ *
+ * @black_color: background color
+ * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
+ * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
+ * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
+ * @global_gain: used when blend mode considers both pixel alpha and plane
+ * alpha value and assumes the global alpha value.
+ * @global_alpha: plane alpha value
*/
struct mpcc_blnd_cfg {
struct tg_color black_color; /* background color */
@@ -107,8 +142,15 @@ struct mpc_dwb_flow_control {
int flow_ctrl_cnt1;
};
-/*
- * MPCC connection and blending configuration for a single MPCC instance.
+/**
+ * struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
+ * @mpcc_id: MPCC physical instance
+ * @dpp_id: DPP input to this MPCC
+ * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
+ * @blnd_cfg: the blending configuration for this MPCC
+ * @sm_cfg: stereo mix setting for this MPCC
+ * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+ *
* This struct is used as a node in an MPC tree.
*/
struct mpcc {
@@ -120,8 +162,12 @@ struct mpcc {
bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
};
-/*
- * MPC tree represents all MPCC connections for a pipe.
+/**
+ * struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
+ *
+ * @opp_id: the OPP instance that owns this MPC tree
+ * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+ *
*/
struct mpc_tree {
int opp_id; /* The OPP instance that owns this MPC tree */
@@ -149,13 +195,18 @@ struct mpcc_state {
uint32_t busy;
};
+/**
+ * struct mpc_funcs - funcs
+ */
struct mpc_funcs {
void (*read_mpcc_state)(
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
- /*
+ /**
+ * @insert_plane:
+ *
* Insert DPP into MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for OPP output
*
@@ -180,7 +231,9 @@ struct mpc_funcs {
int dpp_id,
int mpcc_id);
- /*
+ /**
+ * @remove_mpcc:
+ *
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
@@ -195,7 +248,9 @@ struct mpc_funcs {
struct mpc_tree *tree,
struct mpcc *mpcc);
- /*
+ /**
+ * @mpc_init:
+ *
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
@@ -208,7 +263,9 @@ struct mpc_funcs {
struct mpc *mpc,
unsigned int mpcc_id);
- /*
+ /**
+ * @update_blending:
+ *
* Update the blending configuration for a specified MPCC.
*
* Parameters:
@@ -223,7 +280,9 @@ struct mpc_funcs {
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id);
- /*
+ /**
+ * @cursor_lock:
+ *
* Lock cursor updates for the specified OPP.
* OPP defines the set of MPCC that are locked together for cursor.
*
@@ -239,8 +298,10 @@ struct mpc_funcs {
int opp_id,
bool lock);
- /*
- * Add DPP into 'secondary' MPC tree based on specified blending position.
+ /**
+ * @insert_plane_to_secondary:
+ *
+ * Add DPP into secondary MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for DWB output
*
* Parameters:
@@ -264,7 +325,9 @@ struct mpc_funcs {
int dpp_id,
int mpcc_id);
- /*
+ /**
+ * @remove_mpcc_from_secondary:
+ *
* Remove a specified DPP from the 'secondary' MPC tree.
*
* Parameters:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 4cfa733cf96f..72eef7a5ed83 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -137,7 +137,13 @@ struct crc_params {
bool enable;
};
+/**
+ * struct timing_generator - Entry point to Output Timing Generator feature.
+ */
struct timing_generator {
+ /**
+ * @funcs: Timing generator control functions
+ */
const struct timing_generator_funcs *funcs;
struct dc_bios *bp;
struct dc_context *ctx;
@@ -148,7 +154,9 @@ struct dc_crtc_timing;
struct drr_params;
-
+/**
+ * struct timing_generator_funcs - Control timing generator on a given device.
+ */
struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
@@ -273,8 +281,8 @@ struct timing_generator_funcs {
const struct crc_params *params);
/**
- * Get CRCs for the given timing generator. Return false if CRCs are
- * not enabled (via configure_crc).
+ * @get_crc: Get CRCs for the given timing generator. Return false if
+ * CRCs are not enabled (via configure_crc).
*/
bool (*get_crc)(struct timing_generator *tg,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index ccb3c719fc4d..d04b68dad413 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,11 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum vline_select {
- VLINE0,
- VLINE1
-};
-
struct pipe_ctx;
struct dc_state;
struct dc_stream_status;
@@ -48,6 +43,7 @@ struct dc_phy_addr_space_config;
struct dc_virtual_addr_space_config;
struct dpp;
struct dce_hwseq;
+struct link_resource;
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
@@ -88,6 +84,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*power_down)(struct dc *dc);
+ void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -116,8 +113,7 @@ struct hw_sequencer_funcs {
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*setup_periodic_interrupt)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct dc_crtc_timing_adjust adjust);
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
@@ -218,6 +214,25 @@ struct hw_sequencer_funcs {
void (*set_pipe)(struct pipe_ctx *pipe_ctx);
+ void (*enable_dp_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+ void (*enable_tmds_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock);
+ void (*enable_lvds_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock);
+ void (*disable_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
/* Idle Optimization Related */
@@ -245,6 +260,10 @@ struct hw_sequencer_funcs {
struct tg_color *color,
int mpcc_id);
+ void (*update_phantom_vp_position)(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
@@ -271,6 +290,11 @@ void get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_subvp_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 1cdea0efe5c1..a4d61bb724b6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -124,6 +124,8 @@ struct hwseq_private_funcs {
void (*dsc_pg_control)(struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
+ bool (*dsc_pg_status)(struct dce_hwseq *hws,
+ unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 3482a877b6af..89964c980b87 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -55,9 +55,6 @@ struct link_hwss_ext {
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
- void (*disable_dp_link_output)(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal);
void (*set_dp_link_test_pattern)(struct dc_link *link,
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params);
@@ -79,6 +76,9 @@ struct link_hwss {
void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*setup_stream_attribute)(struct pipe_ctx *pipe_ctx);
+ void (*disable_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
};
#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 58158764adc0..c37d1141febe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -219,9 +219,15 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
struct dc_state *context,
uint8_t disabled_master_pipe_idx);
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx);
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
const struct link_hwss *get_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
+bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
index 5e92019539c8..4227adbc646a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
@@ -130,7 +130,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
-void disable_dio_dp_link_output(struct dc_link *link,
+void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
@@ -174,10 +174,10 @@ static const struct link_hwss dio_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
.update_stream_allocation_table = update_dio_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
index 08f22b32df48..126d37f847a1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
@@ -40,7 +40,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
-void disable_dio_dp_link_output(struct dc_link *link,
+void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal);
void set_dio_dp_link_test_pattern(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
index 89d4e8159138..64f7ea6a9aa3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
@@ -56,10 +56,10 @@ static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index db7b0b155374..7d3147175ca2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
@@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
@@ -266,11 +266,11 @@ static const struct link_hwss hpo_dp_link_hwss = {
.setup_stream_encoder = setup_hpo_dp_stream_encoder,
.reset_stream_encoder = reset_hpo_dp_stream_encoder,
.setup_stream_attribute = setup_hpo_dp_stream_attribute,
+ .disable_link_output = disable_hpo_dp_link_output,
.ext = {
.set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
.set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
.enable_dp_link_output = enable_hpo_dp_link_output,
- .disable_dp_link_output = disable_hpo_dp_link_output,
.set_dp_link_test_pattern = set_hpo_dp_link_test_pattern,
.set_dp_lane_settings = set_hpo_dp_lane_settings,
.update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 501173ce270e..9522fe0b36c9 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -36,10 +36,18 @@ void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx)
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
+
+void virtual_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+}
+
static const struct link_hwss virtual_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
.setup_stream_attribute = virtual_setup_stream_attribute,
+ .disable_link_output = virtual_disable_link_output,
};
const struct link_hwss *get_virtual_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index ced176d17bae..f34c45b19fcb 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -441,6 +441,7 @@ struct dmub_srv {
/* Feature capabilities reported by fw */
struct dmub_feature_caps feature_caps;
+ struct dmub_visual_confirm_color visual_confirm_color;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index d7f3619352f0..5d1aadade8a5 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -234,8 +234,7 @@ union dmub_psr_debug_flags {
};
/**
- * DMUB feature capabilities.
- * After DMUB init, driver will query FW capabilities prior to enabling certain features.
+ * DMUB visual confirm color
*/
struct dmub_feature_caps {
/**
@@ -246,6 +245,16 @@ struct dmub_feature_caps {
uint8_t reserved[6];
};
+struct dmub_visual_confirm_color {
+ /**
+ * Maximum 10 bits color value
+ */
+ uint16_t color_r_cr;
+ uint16_t color_g_y;
+ uint16_t color_b_cb;
+ uint16_t panel_inst;
+};
+
#if defined(__cplusplus)
}
#endif
@@ -645,6 +654,10 @@ enum dmub_cmd_type {
*/
DMUB_CMD__QUERY_FEATURE_CAPS = 6,
/**
+ * Command type used to get visual confirm color.
+ */
+ DMUB_CMD__GET_VISUAL_CONFIRM_COLOR = 8,
+ /**
* Command type used for all PSR commands.
*/
DMUB_CMD__PSR = 64,
@@ -747,6 +760,11 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
+enum dmub_cmd_header_sub_type {
+ DMUB_CMD__SUB_TYPE_GENERAL = 0,
+ DMUB_CMD__SUB_TYPE_CURSOR_POSITION = 1
+};
+
#pragma pack(push, 1)
/**
@@ -976,8 +994,17 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
uint16_t vtotal;
uint8_t main_pipe_index;
uint8_t phantom_pipe_index;
+ /* Since the microschedule is calculated in terms of OTG lines,
+ * include any scaling factors to make sure when we get accurate
+ * conversion when programming MALL_START_LINE (which is in terms
+ * of HUBP lines). If 4K is being downscaled to 1080p, scale factor
+ * is 1/2 (numerator = 1, denominator = 2).
+ */
+ uint8_t scale_factor_numerator;
+ uint8_t scale_factor_denominator;
uint8_t is_drr;
- uint8_t padding;
+ uint8_t main_split_pipe_index;
+ uint8_t phantom_split_pipe_index;
} subvp_data;
struct {
@@ -999,7 +1026,11 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
} vblank_data;
} pipe_config;
- enum mclk_switch_mode mode;
+ /* - subvp_data in the union (pipe_config) takes up 27 bytes.
+ * - Make the "mode" field a uint8_t instead of enum so we only use 1 byte (only
+ * for the DMCUB command, cast to enum once we populate the DMCUB subvp state).
+ */
+ uint8_t mode; // enum mclk_switch_mode
};
/**
@@ -2766,6 +2797,31 @@ struct dmub_rb_cmd_query_feature_caps {
struct dmub_cmd_query_feature_caps_data query_feature_caps_data;
};
+/**
+ * Data passed from driver to FW in a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+struct dmub_cmd_visual_confirm_color_data {
+ /**
+ * DMUB feature capabilities.
+ * After DMUB init, driver will query FW capabilities prior to enabling certain features.
+ */
+struct dmub_visual_confirm_color visual_confirm_color;
+};
+
+/**
+ * Definition of a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+struct dmub_rb_cmd_get_visual_confirm_color {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+ struct dmub_cmd_visual_confirm_color_data visual_confirm_color_data;
+};
+
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
@@ -3138,6 +3194,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_rb_cmd_query_feature_caps query_feature_caps;
+
+ /**
+ * Definition of a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+ struct dmub_rb_cmd_get_visual_confirm_color visual_confirm_color;
struct dmub_rb_cmd_drr_update drr_update;
struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index ab06c7fc7452..c3089c673975 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -215,6 +215,7 @@ enum {
#define DEVICE_ID_NV_143F 0x143F
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
+#define DEVICE_ID_VGH_1435 0x1435
#define VANGOGH_A0 0x01
#define VANGOGH_UNKNOWN 0xFF
@@ -244,13 +245,15 @@ enum {
#define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
#define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 05096c644a60..a7ba5bd8dc16 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -128,8 +128,8 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
-static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
-static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 79fabc51c991..d1e91d31d151 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -83,6 +83,7 @@ enum link_training_result {
};
enum lttpr_mode {
+ LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
LTTPR_MODE_TRANSPARENT,
LTTPR_MODE_NON_TRANSPARENT,
@@ -246,8 +247,16 @@ union dpcd_training_lane_set {
};
+/* AMD's copy of various payload data for MST. We have two copies of the payload table (one in DRM,
+ * one in DC) since DRM's MST helpers can't be accessed here. This stream allocation table should
+ * _ONLY_ be filled out from DM and then passed to DC, do NOT use these for _any_ kind of atomic
+ * state calculations in DM, or you will break something.
+ */
+
+struct drm_dp_mst_port;
+
/* DP MST stream allocation (payload bandwidth number) */
-struct dp_mst_stream_allocation {
+struct dc_dp_mst_stream_allocation {
uint8_t vcp_id;
/* number of slots required for the DP stream in
* transport packet */
@@ -255,11 +264,11 @@ struct dp_mst_stream_allocation {
};
/* DP MST stream allocation table */
-struct dp_mst_stream_allocation_table {
+struct dc_dp_mst_stream_allocation_table {
/* number of DP video streams */
int stream_count;
/* array of stream allocations */
- struct dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+ struct dc_dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
#endif /*__DAL_LINK_SERVICE_TYPES_H__*/
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f093b49c5e6e..3bf08a60c45c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -119,13 +119,15 @@ enum dc_log_type {
LOG_HDMI_RETIMER_REDRIVER,
LOG_DSC,
LOG_SMU_MSG,
+ LOG_DC2RESERVED4,
+ LOG_DC2RESERVED5,
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
LOG_DP2,
- LOG_SECTION_TOTAL_COUNT
+ LOG_DC2RESERVED12,
};
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 859ffd8725c5..04f7656906ca 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index da09ba7589f7..0f39ab9dc5b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
- /* FreeSync HDR */
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
- *payload_size = 0x0A;
-
+ *payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
- *payload_size = 0x10;
- infopacket->hb2 = *payload_size - 1; //-1 for checksum
+ infopacket->hb2 = 0x10;
+ *payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index f21554a1c86c..3973110f149c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -3129,6 +3129,8 @@
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x15cc
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define mmGCVM_DEBUG 0x15cd
+#define mmGCVM_DEBUG_BASE_IDX 0
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x15ce
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x15cf
@@ -3151,6 +3153,8 @@
#define mmGCVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
#define mmGCVM_L2_CACHE_PARITY_CNTL 0x15d8
#define mmGCVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define mmGCVM_L2_IH_LOG_CNTL 0x15d9
+#define mmGCVM_L2_IH_LOG_CNTL_BASE_IDX 0
#define mmGCVM_L2_CNTL5 0x15dc
#define mmGCVM_L2_CNTL5_BASE_IDX 0
#define mmGCVM_L2_GCR_CNTL 0x15dd
@@ -9796,14 +9800,118 @@
// addressBlock: gc_pwrdec
// base address: 0x3c000
+#define mmCGTS_RD_CTRL_REG 0x5004
+#define mmCGTS_RD_CTRL_REG_BASE_IDX 1
+#define mmCGTS_RD_REG 0x5005
+#define mmCGTS_RD_REG_BASE_IDX 1
+#define mmCGTS_TCC_DISABLE 0x5006
+#define mmCGTS_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE 0x5007
+#define mmCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_STATUS_REG 0x5008
+#define mmCGTS_STATUS_REG_BASE_IDX 1
+#define mmCGTT_SPI_CGTSSM_CLK_CTRL 0x5009
+#define mmCGTT_SPI_CGTSSM_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_PS_CLK_CTRL 0x507d
+#define mmCGTT_SPI_PS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPIS_CLK_CTRL 0x507e
+#define mmCGTT_SPIS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_CLK_CTRL 0x5080
+#define mmCGTT_SPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PC_CLK_CTRL 0x5081
+#define mmCGTT_PC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_BCI_CLK_CTRL 0x5082
+#define mmCGTT_BCI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_VGT_CLK_CTRL 0x5084
+#define mmCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_IA_CLK_CTRL 0x5085
+#define mmCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_WD_CLK_CTRL 0x5086
+#define mmCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GS_NGG_CLK_CTRL 0x5087
+#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PA_CLK_CTRL 0x5088
+#define mmCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL0 0x5089
+#define mmCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL1 0x508a
+#define mmCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL2 0x508b
+#define mmCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SQ_CLK_CTRL 0x508c
+#define mmCGTT_SQ_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SQG_CLK_CTRL 0x508d
+#define mmCGTT_SQG_CLK_CTRL_BASE_IDX 1
#define mmSQ_ALU_CLK_CTRL 0x508e
#define mmSQ_ALU_CLK_CTRL_BASE_IDX 1
#define mmSQ_TEX_CLK_CTRL 0x508f
#define mmSQ_TEX_CLK_CTRL_BASE_IDX 1
#define mmSQ_LDS_CLK_CTRL 0x5090
#define mmSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL0 0x5094
+#define mmCGTT_SX_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL1 0x5095
+#define mmCGTT_SX_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL2 0x5096
+#define mmCGTT_SX_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL3 0x5097
+#define mmCGTT_SX_CLK_CTRL3_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL4 0x5098
+#define mmCGTT_SX_CLK_CTRL4_BASE_IDX 1
+#define mmTD_CGTT_CTRL 0x509c
+#define mmTD_CGTT_CTRL_BASE_IDX 1
+#define mmTA_CGTT_CTRL 0x509d
+#define mmTA_CGTT_CTRL_BASE_IDX 1
+#define mmCGTT_TCPI_CLK_CTRL 0x5109
+#define mmCGTT_TCPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GDS_CLK_CTRL 0x50a0
+#define mmCGTT_GDS_CLK_CTRL_BASE_IDX 1
+#define mmDB_CGTT_CLK_CTRL_0 0x50a4
+#define mmDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define mmCB_CGTT_SCLK_CTRL 0x50a8
+#define mmCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2C_CGTT_SCLK_CTRL 0x50fc
+#define mmGL2C_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2A_CGTT_SCLK_CTRL 0x50ac
+#define mmGL2A_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2A_CGTT_SCLK_CTRL_1 0x50ad
+#define mmGL2A_CGTT_SCLK_CTRL_1_BASE_IDX 1
+#define mmCGTT_CP_CLK_CTRL 0x50b0
+#define mmCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPF_CLK_CTRL 0x50b1
+#define mmCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPC_CLK_CTRL 0x50b2
+#define mmCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_RLC_CLK_CTRL 0x50b5
+#define mmCGTT_RLC_CLK_CTRL_BASE_IDX 1
#define mmRLC_GFX_RM_CNTL 0x50b6
#define mmRLC_GFX_RM_CNTL_BASE_IDX 1
+#define mmRMI_CGTT_SCLK_CTRL 0x50c0
+#define mmRMI_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmCGTT_TCPF_CLK_CTRL 0x5111
+#define mmCGTT_TCPF_CLK_CTRL_BASE_IDX 1
+#define mmGCR_CGTT_SCLK_CTRL 0x50c2
+#define mmGCR_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmUTCL1_CGTT_CLK_CTRL 0x50c3
+#define mmUTCL1_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGCEA_CGTT_CLK_CTRL 0x50c4
+#define mmGCEA_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmSE_CAC_CGTT_CLK_CTRL 0x50d0
+#define mmSE_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGC_CAC_CGTT_CLK_CTRL 0x50d8
+#define mmGC_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGRBM_CGTT_CLK_CNTL 0x50e0
+#define mmGRBM_CGTT_CLK_CNTL_BASE_IDX 1
+#define mmGUS_CGTT_CLK_CTRL 0x50f4
+#define mmGUS_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL0 0x50f8
+#define mmCGTT_PH_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL1 0x50f9
+#define mmCGTT_PH_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL2 0x50fa
+#define mmCGTT_PH_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL3 0x50fb
+#define mmCGTT_PH_CLK_CTRL3_BASE_IDX 1
// addressBlock: gc_hypdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index a827b0ff8905..d4e8ff22ecb8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -34547,6 +34547,503 @@
// addressBlock: gc_pwrdec
+//CGTS_RD_CTRL_REG
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x4
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000000FL
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x000000F0L
+//CGTS_RD_REG
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
+#define CGTS_RD_REG__READ_DATA_MASK 0xFFFFFFFFL
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_STATUS_REG
+#define CGTS_STATUS_REG__SA0_QUAD0_MGCG_ENABLED__SHIFT 0x0
+#define CGTS_STATUS_REG__SA0_QUAD0_CG_STATUS__SHIFT 0x1
+#define CGTS_STATUS_REG__SA1_QUAD0_MGCG_ENABLED__SHIFT 0x8
+#define CGTS_STATUS_REG__SA1_QUAD0_CG_STATUS__SHIFT 0x9
+#define CGTS_STATUS_REG__SA0_QUAD0_MGCG_ENABLED_MASK 0x00000001L
+#define CGTS_STATUS_REG__SA0_QUAD0_CG_STATUS_MASK 0x00000006L
+#define CGTS_STATUS_REG__SA1_QUAD0_MGCG_ENABLED_MASK 0x00000100L
+#define CGTS_STATUS_REG__SA1_QUAD0_CG_STATUS_MASK 0x00000600L
+//CGTT_SPI_CGTSSM_CLK_CTRL
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+//CGTT_SPI_PS_CLK_CTRL
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPIS_CLK_CTRL
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPI_CLK_CTRL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PC_CLK_CTRL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE__SHIFT 0x11
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0xd
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0xe
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE_MASK 0x00020000L
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x00002000L
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x00004000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+//CGTT_BCI_CLK_CTRL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE__SHIFT 0x1d
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE__SHIFT 0x1e
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE_MASK 0x20000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE_MASK 0x40000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__DBR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__DBR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQ_CLK_CTRL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
//SQ_ALU_CLK_CTRL
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
@@ -34562,12 +35059,982 @@
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//CGTT_SX_CLK_CTRL0
+#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL1
+#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL2
+#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL3
+#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL4
+#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
+//TD_CGTT_CTRL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPI_CLK_CTRL
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0xf
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x17
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x18
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x00007000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00008000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x00800000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x01000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x40000000L
+//CGTT_GDS_CLK_CTRL
+#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GDS_CLK_CTRL__UNUSED__SHIFT 0xc
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GDS_CLK_CTRL__UNUSED_MASK 0x0000F000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2C_CGTT_SCLK_CTRL
+#define GL2C_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GL2C_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2C_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GL2C_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2A_CGTT_SCLK_CTRL
+#define GL2A_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GL2A_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2A_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GL2A_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2A_CGTT_SCLK_CTRL_1
+#define GL2A_CGTT_SCLK_CTRL_1__ON_DELAY__SHIFT 0x0
+#define GL2A_CGTT_SCLK_CTRL_1__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2A_CGTT_SCLK_CTRL_1__ON_DELAY_MASK 0x0000000FL
+#define GL2A_CGTT_SCLK_CTRL_1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1a
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT__SHIFT 0x1b
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP__SHIFT 0x1c
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x04000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT_MASK 0x08000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP_MASK 0x10000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__RESERVED__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_RLC_CLK_CTRL__RESERVED_MASK 0x0000000FL
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
//RLC_GFX_RM_CNTL
#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
-
+//RMI_CGTT_SCLK_CTRL
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPF_CLK_CTRL
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0xf
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x17
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x18
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x00007000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00008000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x00800000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x01000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x40000000L
+//GCR_CGTT_SCLK_CTRL
+#define GCR_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCR_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GCR_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCR_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//UTCL1_CGTT_CLK_CTRL
+#define UTCL1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define UTCL1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GCEA_CGTT_CLK_CTRL
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCEA_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GCEA_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCEA_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GCEA_CGTT_CLK_CTRL__SPARE1_MASK 0x0F800000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//SE_CAC_CGTT_CLK_CTRL
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GC_CAC_CGTT_CLK_CTRL
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GRBM_CGTT_CLK_CNTL
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0x0000000FL
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+//GUS_CGTT_CLK_CTRL
+#define GUS_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GUS_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GUS_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_DRAM__SHIFT 0x13
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GUS_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_DRAM__SHIFT 0x1b
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GUS_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GUS_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GUS_CGTT_CLK_CTRL__SPARE0_MASK 0x0007F000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_DRAM_MASK 0x00080000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GUS_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_DRAM_MASK 0x08000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL0
+#define CGTT_PH_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PH_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL1
+#define CGTT_PH_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL2
+#define CGTT_PH_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL3
+#define CGTT_PH_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
// addressBlock: gc_hypdec
//CP_HYP_PFP_UCODE_ADDR
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
new file mode 100644
index 000000000000..3b95a59b196c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
@@ -0,0 +1,12086 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_11_0_3_OFFSET_HEADER
+#define _gc_11_0_3_OFFSET_HEADER
+
+
+
+// addressBlock: gc_sdma0_sdma0dec
+// base address: 0x4980
+#define regSDMA0_DEC_START 0x0000
+#define regSDMA0_DEC_START_BASE_IDX 0
+#define regSDMA0_F32_MISC_CNTL 0x000b
+#define regSDMA0_F32_MISC_CNTL_BASE_IDX 0
+#define regSDMA0_GLOBAL_TIMESTAMP_LO 0x000f
+#define regSDMA0_GLOBAL_TIMESTAMP_LO_BASE_IDX 0
+#define regSDMA0_GLOBAL_TIMESTAMP_HI 0x0010
+#define regSDMA0_GLOBAL_TIMESTAMP_HI_BASE_IDX 0
+#define regSDMA0_POWER_CNTL 0x001a
+#define regSDMA0_POWER_CNTL_BASE_IDX 0
+#define regSDMA0_CNTL 0x001c
+#define regSDMA0_CNTL_BASE_IDX 0
+#define regSDMA0_CHICKEN_BITS 0x001d
+#define regSDMA0_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA0_GB_ADDR_CONFIG 0x001e
+#define regSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA0_GB_ADDR_CONFIG_READ 0x001f
+#define regSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA0_RB_RPTR_FETCH 0x0020
+#define regSDMA0_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA0_RB_RPTR_FETCH_HI 0x0021
+#define regSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0022
+#define regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA0_IB_OFFSET_FETCH 0x0023
+#define regSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA0_PROGRAM 0x0024
+#define regSDMA0_PROGRAM_BASE_IDX 0
+#define regSDMA0_STATUS_REG 0x0025
+#define regSDMA0_STATUS_REG_BASE_IDX 0
+#define regSDMA0_STATUS1_REG 0x0026
+#define regSDMA0_STATUS1_REG_BASE_IDX 0
+#define regSDMA0_CNTL1 0x0027
+#define regSDMA0_CNTL1_BASE_IDX 0
+#define regSDMA0_HBM_PAGE_CONFIG 0x0028
+#define regSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA0_UCODE_CHECKSUM 0x0029
+#define regSDMA0_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA0_FREEZE 0x002b
+#define regSDMA0_FREEZE_BASE_IDX 0
+#define regSDMA0_PROCESS_QUANTUM0 0x002c
+#define regSDMA0_PROCESS_QUANTUM0_BASE_IDX 0
+#define regSDMA0_PROCESS_QUANTUM1 0x002d
+#define regSDMA0_PROCESS_QUANTUM1_BASE_IDX 0
+#define regSDMA0_WATCHDOG_CNTL 0x002e
+#define regSDMA0_WATCHDOG_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE_STATUS0 0x002f
+#define regSDMA0_QUEUE_STATUS0_BASE_IDX 0
+#define regSDMA0_EDC_CONFIG 0x0032
+#define regSDMA0_EDC_CONFIG_BASE_IDX 0
+#define regSDMA0_BA_THRESHOLD 0x0033
+#define regSDMA0_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA0_ID 0x0034
+#define regSDMA0_ID_BASE_IDX 0
+#define regSDMA0_VERSION 0x0035
+#define regSDMA0_VERSION_BASE_IDX 0
+#define regSDMA0_EDC_COUNTER 0x0036
+#define regSDMA0_EDC_COUNTER_BASE_IDX 0
+#define regSDMA0_EDC_COUNTER_CLEAR 0x0037
+#define regSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define regSDMA0_STATUS2_REG 0x0038
+#define regSDMA0_STATUS2_REG_BASE_IDX 0
+#define regSDMA0_ATOMIC_CNTL 0x0039
+#define regSDMA0_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA0_ATOMIC_PREOP_LO 0x003a
+#define regSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA0_ATOMIC_PREOP_HI 0x003b
+#define regSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA0_UTCL1_CNTL 0x003c
+#define regSDMA0_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA0_UTCL1_WATERMK 0x003d
+#define regSDMA0_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA0_UTCL1_TIMEOUT 0x003e
+#define regSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA0_UTCL1_PAGE 0x003f
+#define regSDMA0_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_STATUS 0x0040
+#define regSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_STATUS 0x0041
+#define regSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA0_UTCL1_INV0 0x0042
+#define regSDMA0_UTCL1_INV0_BASE_IDX 0
+#define regSDMA0_UTCL1_INV1 0x0043
+#define regSDMA0_UTCL1_INV1_BASE_IDX 0
+#define regSDMA0_UTCL1_INV2 0x0044
+#define regSDMA0_UTCL1_INV2_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_XNACK0 0x0045
+#define regSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_XNACK1 0x0046
+#define regSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_XNACK0 0x0047
+#define regSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_XNACK1 0x0048
+#define regSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA0_RELAX_ORDERING_LUT 0x004a
+#define regSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA0_CHICKEN_BITS_2 0x004b
+#define regSDMA0_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA0_STATUS3_REG 0x004c
+#define regSDMA0_STATUS3_REG_BASE_IDX 0
+#define regSDMA0_PHYSICAL_ADDR_LO 0x004d
+#define regSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_PHYSICAL_ADDR_HI 0x004e
+#define regSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_GLOBAL_QUANTUM 0x004f
+#define regSDMA0_GLOBAL_QUANTUM_BASE_IDX 0
+#define regSDMA0_ERROR_LOG 0x0050
+#define regSDMA0_ERROR_LOG_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG0 0x0051
+#define regSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG1 0x0052
+#define regSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG2 0x0053
+#define regSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG3 0x0054
+#define regSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA0_F32_COUNTER 0x0055
+#define regSDMA0_F32_COUNTER_BASE_IDX 0
+#define regSDMA0_CRD_CNTL 0x005b
+#define regSDMA0_CRD_CNTL_BASE_IDX 0
+#define regSDMA0_RLC_CGCG_CTRL 0x005c
+#define regSDMA0_RLC_CGCG_CTRL_BASE_IDX 0
+#define regSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
+#define regSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA0_AQL_STATUS 0x005f
+#define regSDMA0_AQL_STATUS_BASE_IDX 0
+#define regSDMA0_EA_DBIT_ADDR_DATA 0x0060
+#define regSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA0_EA_DBIT_ADDR_INDEX 0x0061
+#define regSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA0_TLBI_GCR_CNTL 0x0062
+#define regSDMA0_TLBI_GCR_CNTL_BASE_IDX 0
+#define regSDMA0_TILING_CONFIG 0x0063
+#define regSDMA0_TILING_CONFIG_BASE_IDX 0
+#define regSDMA0_HASH 0x0064
+#define regSDMA0_HASH_BASE_IDX 0
+#define regSDMA0_INT_STATUS 0x0070
+#define regSDMA0_INT_STATUS_BASE_IDX 0
+#define regSDMA0_GPU_IOV_VIOLATION_LOG2 0x0071
+#define regSDMA0_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA0_HOLE_ADDR_LO 0x0072
+#define regSDMA0_HOLE_ADDR_LO_BASE_IDX 0
+#define regSDMA0_HOLE_ADDR_HI 0x0073
+#define regSDMA0_HOLE_ADDR_HI_BASE_IDX 0
+#define regSDMA0_CLOCK_GATING_STATUS 0x0075
+#define regSDMA0_CLOCK_GATING_STATUS_BASE_IDX 0
+#define regSDMA0_STATUS4_REG 0x0076
+#define regSDMA0_STATUS4_REG_BASE_IDX 0
+#define regSDMA0_SCRATCH_RAM_DATA 0x0077
+#define regSDMA0_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA0_SCRATCH_RAM_ADDR 0x0078
+#define regSDMA0_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA0_TIMESTAMP_CNTL 0x0079
+#define regSDMA0_TIMESTAMP_CNTL_BASE_IDX 0
+#define regSDMA0_STATUS5_REG 0x007a
+#define regSDMA0_STATUS5_REG_BASE_IDX 0
+#define regSDMA0_QUEUE_RESET_REQ 0x007b
+#define regSDMA0_QUEUE_RESET_REQ_BASE_IDX 0
+#define regSDMA0_STATUS6_REG 0x007c
+#define regSDMA0_STATUS6_REG_BASE_IDX 0
+#define regSDMA0_UCODE1_CHECKSUM 0x007d
+#define regSDMA0_UCODE1_CHECKSUM_BASE_IDX 0
+#define regSDMA0_CE_CTRL 0x007e
+#define regSDMA0_CE_CTRL_BASE_IDX 0
+#define regSDMA0_FED_STATUS 0x007f
+#define regSDMA0_FED_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_CNTL 0x0080
+#define regSDMA0_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_BASE 0x0081
+#define regSDMA0_QUEUE0_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_BASE_HI 0x0082
+#define regSDMA0_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR 0x0083
+#define regSDMA0_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_HI 0x0084
+#define regSDMA0_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR 0x0085
+#define regSDMA0_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_HI 0x0086
+#define regSDMA0_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_HI 0x0088
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_LO 0x0089
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_CNTL 0x008a
+#define regSDMA0_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_RPTR 0x008b
+#define regSDMA0_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_OFFSET 0x008c
+#define regSDMA0_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_BASE_LO 0x008d
+#define regSDMA0_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_BASE_HI 0x008e
+#define regSDMA0_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_SIZE 0x008f
+#define regSDMA0_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE0_SKIP_CNTL 0x0090
+#define regSDMA0_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_CONTEXT_STATUS 0x0091
+#define regSDMA0_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL 0x0092
+#define regSDMA0_QUEUE0_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL_LOG 0x00a9
+#define regSDMA0_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL_OFFSET 0x00ab
+#define regSDMA0_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE0_CSA_ADDR_LO 0x00ac
+#define regSDMA0_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_CSA_ADDR_HI 0x00ad
+#define regSDMA0_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_SCHEDULE_CNTL 0x00ae
+#define regSDMA0_QUEUE0_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_SUB_REMAIN 0x00af
+#define regSDMA0_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE0_PREEMPT 0x00b0
+#define regSDMA0_QUEUE0_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE0_DUMMY_REG 0x00b1
+#define regSDMA0_QUEUE0_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_AQL_CNTL 0x00b4
+#define regSDMA0_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_MINOR_PTR_UPDATE 0x00b5
+#define regSDMA0_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_PREEMPT 0x00b6
+#define regSDMA0_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA0 0x00c0
+#define regSDMA0_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA1 0x00c1
+#define regSDMA0_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA2 0x00c2
+#define regSDMA0_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA3 0x00c3
+#define regSDMA0_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA4 0x00c4
+#define regSDMA0_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA5 0x00c5
+#define regSDMA0_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA6 0x00c6
+#define regSDMA0_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA7 0x00c7
+#define regSDMA0_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA8 0x00c8
+#define regSDMA0_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA9 0x00c9
+#define regSDMA0_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA10 0x00ca
+#define regSDMA0_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_CNTL 0x00cb
+#define regSDMA0_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_CNTL 0x00d8
+#define regSDMA0_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_BASE 0x00d9
+#define regSDMA0_QUEUE1_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_BASE_HI 0x00da
+#define regSDMA0_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR 0x00db
+#define regSDMA0_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_HI 0x00dc
+#define regSDMA0_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR 0x00dd
+#define regSDMA0_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_HI 0x00de
+#define regSDMA0_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_HI 0x00e0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_LO 0x00e1
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_CNTL 0x00e2
+#define regSDMA0_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_RPTR 0x00e3
+#define regSDMA0_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_OFFSET 0x00e4
+#define regSDMA0_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_BASE_LO 0x00e5
+#define regSDMA0_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_BASE_HI 0x00e6
+#define regSDMA0_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_SIZE 0x00e7
+#define regSDMA0_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE1_SKIP_CNTL 0x00e8
+#define regSDMA0_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_CONTEXT_STATUS 0x00e9
+#define regSDMA0_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL 0x00ea
+#define regSDMA0_QUEUE1_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL_LOG 0x0101
+#define regSDMA0_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL_OFFSET 0x0103
+#define regSDMA0_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE1_CSA_ADDR_LO 0x0104
+#define regSDMA0_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_CSA_ADDR_HI 0x0105
+#define regSDMA0_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_SCHEDULE_CNTL 0x0106
+#define regSDMA0_QUEUE1_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_SUB_REMAIN 0x0107
+#define regSDMA0_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE1_PREEMPT 0x0108
+#define regSDMA0_QUEUE1_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE1_DUMMY_REG 0x0109
+#define regSDMA0_QUEUE1_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x010a
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x010b
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_AQL_CNTL 0x010c
+#define regSDMA0_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_MINOR_PTR_UPDATE 0x010d
+#define regSDMA0_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_PREEMPT 0x010e
+#define regSDMA0_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA0 0x0118
+#define regSDMA0_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA1 0x0119
+#define regSDMA0_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA2 0x011a
+#define regSDMA0_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA3 0x011b
+#define regSDMA0_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA4 0x011c
+#define regSDMA0_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA5 0x011d
+#define regSDMA0_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA6 0x011e
+#define regSDMA0_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA7 0x011f
+#define regSDMA0_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA8 0x0120
+#define regSDMA0_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA9 0x0121
+#define regSDMA0_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA10 0x0122
+#define regSDMA0_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_CNTL 0x0123
+#define regSDMA0_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_CNTL 0x0130
+#define regSDMA0_QUEUE2_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_BASE 0x0131
+#define regSDMA0_QUEUE2_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_BASE_HI 0x0132
+#define regSDMA0_QUEUE2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR 0x0133
+#define regSDMA0_QUEUE2_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_HI 0x0134
+#define regSDMA0_QUEUE2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR 0x0135
+#define regSDMA0_QUEUE2_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_HI 0x0136
+#define regSDMA0_QUEUE2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_HI 0x0138
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_LO 0x0139
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_CNTL 0x013a
+#define regSDMA0_QUEUE2_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_RPTR 0x013b
+#define regSDMA0_QUEUE2_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_OFFSET 0x013c
+#define regSDMA0_QUEUE2_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_BASE_LO 0x013d
+#define regSDMA0_QUEUE2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_BASE_HI 0x013e
+#define regSDMA0_QUEUE2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_SIZE 0x013f
+#define regSDMA0_QUEUE2_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE2_SKIP_CNTL 0x0140
+#define regSDMA0_QUEUE2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_CONTEXT_STATUS 0x0141
+#define regSDMA0_QUEUE2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL 0x0142
+#define regSDMA0_QUEUE2_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL_LOG 0x0159
+#define regSDMA0_QUEUE2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL_OFFSET 0x015b
+#define regSDMA0_QUEUE2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE2_CSA_ADDR_LO 0x015c
+#define regSDMA0_QUEUE2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_CSA_ADDR_HI 0x015d
+#define regSDMA0_QUEUE2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_SCHEDULE_CNTL 0x015e
+#define regSDMA0_QUEUE2_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_SUB_REMAIN 0x015f
+#define regSDMA0_QUEUE2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE2_PREEMPT 0x0160
+#define regSDMA0_QUEUE2_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE2_DUMMY_REG 0x0161
+#define regSDMA0_QUEUE2_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI 0x0162
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO 0x0163
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_AQL_CNTL 0x0164
+#define regSDMA0_QUEUE2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_MINOR_PTR_UPDATE 0x0165
+#define regSDMA0_QUEUE2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_PREEMPT 0x0166
+#define regSDMA0_QUEUE2_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA0 0x0170
+#define regSDMA0_QUEUE2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA1 0x0171
+#define regSDMA0_QUEUE2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA2 0x0172
+#define regSDMA0_QUEUE2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA3 0x0173
+#define regSDMA0_QUEUE2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA4 0x0174
+#define regSDMA0_QUEUE2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA5 0x0175
+#define regSDMA0_QUEUE2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA6 0x0176
+#define regSDMA0_QUEUE2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA7 0x0177
+#define regSDMA0_QUEUE2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA8 0x0178
+#define regSDMA0_QUEUE2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA9 0x0179
+#define regSDMA0_QUEUE2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA10 0x017a
+#define regSDMA0_QUEUE2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_CNTL 0x017b
+#define regSDMA0_QUEUE2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_CNTL 0x0188
+#define regSDMA0_QUEUE3_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_BASE 0x0189
+#define regSDMA0_QUEUE3_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_BASE_HI 0x018a
+#define regSDMA0_QUEUE3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR 0x018b
+#define regSDMA0_QUEUE3_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_HI 0x018c
+#define regSDMA0_QUEUE3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR 0x018d
+#define regSDMA0_QUEUE3_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_HI 0x018e
+#define regSDMA0_QUEUE3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_HI 0x0190
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_LO 0x0191
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_CNTL 0x0192
+#define regSDMA0_QUEUE3_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_RPTR 0x0193
+#define regSDMA0_QUEUE3_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_OFFSET 0x0194
+#define regSDMA0_QUEUE3_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_BASE_LO 0x0195
+#define regSDMA0_QUEUE3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_BASE_HI 0x0196
+#define regSDMA0_QUEUE3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_SIZE 0x0197
+#define regSDMA0_QUEUE3_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE3_SKIP_CNTL 0x0198
+#define regSDMA0_QUEUE3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_CONTEXT_STATUS 0x0199
+#define regSDMA0_QUEUE3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL 0x019a
+#define regSDMA0_QUEUE3_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL_LOG 0x01b1
+#define regSDMA0_QUEUE3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL_OFFSET 0x01b3
+#define regSDMA0_QUEUE3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE3_CSA_ADDR_LO 0x01b4
+#define regSDMA0_QUEUE3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_CSA_ADDR_HI 0x01b5
+#define regSDMA0_QUEUE3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_SCHEDULE_CNTL 0x01b6
+#define regSDMA0_QUEUE3_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_SUB_REMAIN 0x01b7
+#define regSDMA0_QUEUE3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE3_PREEMPT 0x01b8
+#define regSDMA0_QUEUE3_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE3_DUMMY_REG 0x01b9
+#define regSDMA0_QUEUE3_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_AQL_CNTL 0x01bc
+#define regSDMA0_QUEUE3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_MINOR_PTR_UPDATE 0x01bd
+#define regSDMA0_QUEUE3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_PREEMPT 0x01be
+#define regSDMA0_QUEUE3_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA0 0x01c8
+#define regSDMA0_QUEUE3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA1 0x01c9
+#define regSDMA0_QUEUE3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA2 0x01ca
+#define regSDMA0_QUEUE3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA3 0x01cb
+#define regSDMA0_QUEUE3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA4 0x01cc
+#define regSDMA0_QUEUE3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA5 0x01cd
+#define regSDMA0_QUEUE3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA6 0x01ce
+#define regSDMA0_QUEUE3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA7 0x01cf
+#define regSDMA0_QUEUE3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA8 0x01d0
+#define regSDMA0_QUEUE3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA9 0x01d1
+#define regSDMA0_QUEUE3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA10 0x01d2
+#define regSDMA0_QUEUE3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_CNTL 0x01d3
+#define regSDMA0_QUEUE3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_CNTL 0x01e0
+#define regSDMA0_QUEUE4_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_BASE 0x01e1
+#define regSDMA0_QUEUE4_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_BASE_HI 0x01e2
+#define regSDMA0_QUEUE4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR 0x01e3
+#define regSDMA0_QUEUE4_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_HI 0x01e4
+#define regSDMA0_QUEUE4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR 0x01e5
+#define regSDMA0_QUEUE4_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_HI 0x01e6
+#define regSDMA0_QUEUE4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_HI 0x01e8
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_LO 0x01e9
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_CNTL 0x01ea
+#define regSDMA0_QUEUE4_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_RPTR 0x01eb
+#define regSDMA0_QUEUE4_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_OFFSET 0x01ec
+#define regSDMA0_QUEUE4_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_BASE_LO 0x01ed
+#define regSDMA0_QUEUE4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_BASE_HI 0x01ee
+#define regSDMA0_QUEUE4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_SIZE 0x01ef
+#define regSDMA0_QUEUE4_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE4_SKIP_CNTL 0x01f0
+#define regSDMA0_QUEUE4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_CONTEXT_STATUS 0x01f1
+#define regSDMA0_QUEUE4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL 0x01f2
+#define regSDMA0_QUEUE4_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL_LOG 0x0209
+#define regSDMA0_QUEUE4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL_OFFSET 0x020b
+#define regSDMA0_QUEUE4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE4_CSA_ADDR_LO 0x020c
+#define regSDMA0_QUEUE4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_CSA_ADDR_HI 0x020d
+#define regSDMA0_QUEUE4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_SCHEDULE_CNTL 0x020e
+#define regSDMA0_QUEUE4_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_SUB_REMAIN 0x020f
+#define regSDMA0_QUEUE4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE4_PREEMPT 0x0210
+#define regSDMA0_QUEUE4_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE4_DUMMY_REG 0x0211
+#define regSDMA0_QUEUE4_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI 0x0212
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO 0x0213
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_AQL_CNTL 0x0214
+#define regSDMA0_QUEUE4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_MINOR_PTR_UPDATE 0x0215
+#define regSDMA0_QUEUE4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_PREEMPT 0x0216
+#define regSDMA0_QUEUE4_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA0 0x0220
+#define regSDMA0_QUEUE4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA1 0x0221
+#define regSDMA0_QUEUE4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA2 0x0222
+#define regSDMA0_QUEUE4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA3 0x0223
+#define regSDMA0_QUEUE4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA4 0x0224
+#define regSDMA0_QUEUE4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA5 0x0225
+#define regSDMA0_QUEUE4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA6 0x0226
+#define regSDMA0_QUEUE4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA7 0x0227
+#define regSDMA0_QUEUE4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA8 0x0228
+#define regSDMA0_QUEUE4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA9 0x0229
+#define regSDMA0_QUEUE4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA10 0x022a
+#define regSDMA0_QUEUE4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_CNTL 0x022b
+#define regSDMA0_QUEUE4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_CNTL 0x0238
+#define regSDMA0_QUEUE5_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_BASE 0x0239
+#define regSDMA0_QUEUE5_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_BASE_HI 0x023a
+#define regSDMA0_QUEUE5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR 0x023b
+#define regSDMA0_QUEUE5_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_HI 0x023c
+#define regSDMA0_QUEUE5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR 0x023d
+#define regSDMA0_QUEUE5_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_HI 0x023e
+#define regSDMA0_QUEUE5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_HI 0x0240
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_LO 0x0241
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_CNTL 0x0242
+#define regSDMA0_QUEUE5_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_RPTR 0x0243
+#define regSDMA0_QUEUE5_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_OFFSET 0x0244
+#define regSDMA0_QUEUE5_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_BASE_LO 0x0245
+#define regSDMA0_QUEUE5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_BASE_HI 0x0246
+#define regSDMA0_QUEUE5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_SIZE 0x0247
+#define regSDMA0_QUEUE5_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE5_SKIP_CNTL 0x0248
+#define regSDMA0_QUEUE5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_CONTEXT_STATUS 0x0249
+#define regSDMA0_QUEUE5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL 0x024a
+#define regSDMA0_QUEUE5_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL_LOG 0x0261
+#define regSDMA0_QUEUE5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL_OFFSET 0x0263
+#define regSDMA0_QUEUE5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE5_CSA_ADDR_LO 0x0264
+#define regSDMA0_QUEUE5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_CSA_ADDR_HI 0x0265
+#define regSDMA0_QUEUE5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_SCHEDULE_CNTL 0x0266
+#define regSDMA0_QUEUE5_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_SUB_REMAIN 0x0267
+#define regSDMA0_QUEUE5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE5_PREEMPT 0x0268
+#define regSDMA0_QUEUE5_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE5_DUMMY_REG 0x0269
+#define regSDMA0_QUEUE5_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI 0x026a
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO 0x026b
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_AQL_CNTL 0x026c
+#define regSDMA0_QUEUE5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_MINOR_PTR_UPDATE 0x026d
+#define regSDMA0_QUEUE5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_PREEMPT 0x026e
+#define regSDMA0_QUEUE5_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA0 0x0278
+#define regSDMA0_QUEUE5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA1 0x0279
+#define regSDMA0_QUEUE5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA2 0x027a
+#define regSDMA0_QUEUE5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA3 0x027b
+#define regSDMA0_QUEUE5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA4 0x027c
+#define regSDMA0_QUEUE5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA5 0x027d
+#define regSDMA0_QUEUE5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA6 0x027e
+#define regSDMA0_QUEUE5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA7 0x027f
+#define regSDMA0_QUEUE5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA8 0x0280
+#define regSDMA0_QUEUE5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA9 0x0281
+#define regSDMA0_QUEUE5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA10 0x0282
+#define regSDMA0_QUEUE5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_CNTL 0x0283
+#define regSDMA0_QUEUE5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_CNTL 0x0290
+#define regSDMA0_QUEUE6_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_BASE 0x0291
+#define regSDMA0_QUEUE6_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_BASE_HI 0x0292
+#define regSDMA0_QUEUE6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR 0x0293
+#define regSDMA0_QUEUE6_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_HI 0x0294
+#define regSDMA0_QUEUE6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR 0x0295
+#define regSDMA0_QUEUE6_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_HI 0x0296
+#define regSDMA0_QUEUE6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_HI 0x0298
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_LO 0x0299
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_CNTL 0x029a
+#define regSDMA0_QUEUE6_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_RPTR 0x029b
+#define regSDMA0_QUEUE6_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_OFFSET 0x029c
+#define regSDMA0_QUEUE6_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_BASE_LO 0x029d
+#define regSDMA0_QUEUE6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_BASE_HI 0x029e
+#define regSDMA0_QUEUE6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_SIZE 0x029f
+#define regSDMA0_QUEUE6_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE6_SKIP_CNTL 0x02a0
+#define regSDMA0_QUEUE6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_CONTEXT_STATUS 0x02a1
+#define regSDMA0_QUEUE6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL 0x02a2
+#define regSDMA0_QUEUE6_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL_LOG 0x02b9
+#define regSDMA0_QUEUE6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL_OFFSET 0x02bb
+#define regSDMA0_QUEUE6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE6_CSA_ADDR_LO 0x02bc
+#define regSDMA0_QUEUE6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_CSA_ADDR_HI 0x02bd
+#define regSDMA0_QUEUE6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_SCHEDULE_CNTL 0x02be
+#define regSDMA0_QUEUE6_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_SUB_REMAIN 0x02bf
+#define regSDMA0_QUEUE6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE6_PREEMPT 0x02c0
+#define regSDMA0_QUEUE6_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE6_DUMMY_REG 0x02c1
+#define regSDMA0_QUEUE6_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_AQL_CNTL 0x02c4
+#define regSDMA0_QUEUE6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_MINOR_PTR_UPDATE 0x02c5
+#define regSDMA0_QUEUE6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_PREEMPT 0x02c6
+#define regSDMA0_QUEUE6_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA0 0x02d0
+#define regSDMA0_QUEUE6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA1 0x02d1
+#define regSDMA0_QUEUE6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA2 0x02d2
+#define regSDMA0_QUEUE6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA3 0x02d3
+#define regSDMA0_QUEUE6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA4 0x02d4
+#define regSDMA0_QUEUE6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA5 0x02d5
+#define regSDMA0_QUEUE6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA6 0x02d6
+#define regSDMA0_QUEUE6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA7 0x02d7
+#define regSDMA0_QUEUE6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA8 0x02d8
+#define regSDMA0_QUEUE6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA9 0x02d9
+#define regSDMA0_QUEUE6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA10 0x02da
+#define regSDMA0_QUEUE6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_CNTL 0x02db
+#define regSDMA0_QUEUE6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_CNTL 0x02e8
+#define regSDMA0_QUEUE7_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_BASE 0x02e9
+#define regSDMA0_QUEUE7_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_BASE_HI 0x02ea
+#define regSDMA0_QUEUE7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR 0x02eb
+#define regSDMA0_QUEUE7_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_HI 0x02ec
+#define regSDMA0_QUEUE7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR 0x02ed
+#define regSDMA0_QUEUE7_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_HI 0x02ee
+#define regSDMA0_QUEUE7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_HI 0x02f0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_LO 0x02f1
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_CNTL 0x02f2
+#define regSDMA0_QUEUE7_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_RPTR 0x02f3
+#define regSDMA0_QUEUE7_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_OFFSET 0x02f4
+#define regSDMA0_QUEUE7_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_BASE_LO 0x02f5
+#define regSDMA0_QUEUE7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_BASE_HI 0x02f6
+#define regSDMA0_QUEUE7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_SIZE 0x02f7
+#define regSDMA0_QUEUE7_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE7_SKIP_CNTL 0x02f8
+#define regSDMA0_QUEUE7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_CONTEXT_STATUS 0x02f9
+#define regSDMA0_QUEUE7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL 0x02fa
+#define regSDMA0_QUEUE7_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL_LOG 0x0311
+#define regSDMA0_QUEUE7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL_OFFSET 0x0313
+#define regSDMA0_QUEUE7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE7_CSA_ADDR_LO 0x0314
+#define regSDMA0_QUEUE7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_CSA_ADDR_HI 0x0315
+#define regSDMA0_QUEUE7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_SCHEDULE_CNTL 0x0316
+#define regSDMA0_QUEUE7_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_SUB_REMAIN 0x0317
+#define regSDMA0_QUEUE7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE7_PREEMPT 0x0318
+#define regSDMA0_QUEUE7_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE7_DUMMY_REG 0x0319
+#define regSDMA0_QUEUE7_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI 0x031a
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO 0x031b
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_AQL_CNTL 0x031c
+#define regSDMA0_QUEUE7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_MINOR_PTR_UPDATE 0x031d
+#define regSDMA0_QUEUE7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_PREEMPT 0x031e
+#define regSDMA0_QUEUE7_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA0 0x0328
+#define regSDMA0_QUEUE7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA1 0x0329
+#define regSDMA0_QUEUE7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA2 0x032a
+#define regSDMA0_QUEUE7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA3 0x032b
+#define regSDMA0_QUEUE7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA4 0x032c
+#define regSDMA0_QUEUE7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA5 0x032d
+#define regSDMA0_QUEUE7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA6 0x032e
+#define regSDMA0_QUEUE7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA7 0x032f
+#define regSDMA0_QUEUE7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA8 0x0330
+#define regSDMA0_QUEUE7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA9 0x0331
+#define regSDMA0_QUEUE7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA10 0x0332
+#define regSDMA0_QUEUE7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_CNTL 0x0333
+#define regSDMA0_QUEUE7_MIDCMD_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sdma0_sdma1dec
+// base address: 0x6180
+#define regSDMA1_DEC_START 0x0600
+#define regSDMA1_DEC_START_BASE_IDX 0
+#define regSDMA1_F32_MISC_CNTL 0x060b
+#define regSDMA1_F32_MISC_CNTL_BASE_IDX 0
+#define regSDMA1_GLOBAL_TIMESTAMP_LO 0x060f
+#define regSDMA1_GLOBAL_TIMESTAMP_LO_BASE_IDX 0
+#define regSDMA1_GLOBAL_TIMESTAMP_HI 0x0610
+#define regSDMA1_GLOBAL_TIMESTAMP_HI_BASE_IDX 0
+#define regSDMA1_POWER_CNTL 0x061a
+#define regSDMA1_POWER_CNTL_BASE_IDX 0
+#define regSDMA1_CNTL 0x061c
+#define regSDMA1_CNTL_BASE_IDX 0
+#define regSDMA1_CHICKEN_BITS 0x061d
+#define regSDMA1_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA1_GB_ADDR_CONFIG 0x061e
+#define regSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA1_GB_ADDR_CONFIG_READ 0x061f
+#define regSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA1_RB_RPTR_FETCH 0x0620
+#define regSDMA1_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA1_RB_RPTR_FETCH_HI 0x0621
+#define regSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0622
+#define regSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA1_IB_OFFSET_FETCH 0x0623
+#define regSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA1_PROGRAM 0x0624
+#define regSDMA1_PROGRAM_BASE_IDX 0
+#define regSDMA1_STATUS_REG 0x0625
+#define regSDMA1_STATUS_REG_BASE_IDX 0
+#define regSDMA1_STATUS1_REG 0x0626
+#define regSDMA1_STATUS1_REG_BASE_IDX 0
+#define regSDMA1_CNTL1 0x0627
+#define regSDMA1_CNTL1_BASE_IDX 0
+#define regSDMA1_HBM_PAGE_CONFIG 0x0628
+#define regSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA1_UCODE_CHECKSUM 0x0629
+#define regSDMA1_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA1_FREEZE 0x062b
+#define regSDMA1_FREEZE_BASE_IDX 0
+#define regSDMA1_PROCESS_QUANTUM0 0x062c
+#define regSDMA1_PROCESS_QUANTUM0_BASE_IDX 0
+#define regSDMA1_PROCESS_QUANTUM1 0x062d
+#define regSDMA1_PROCESS_QUANTUM1_BASE_IDX 0
+#define regSDMA1_WATCHDOG_CNTL 0x062e
+#define regSDMA1_WATCHDOG_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE_STATUS0 0x062f
+#define regSDMA1_QUEUE_STATUS0_BASE_IDX 0
+#define regSDMA1_EDC_CONFIG 0x0632
+#define regSDMA1_EDC_CONFIG_BASE_IDX 0
+#define regSDMA1_BA_THRESHOLD 0x0633
+#define regSDMA1_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA1_ID 0x0634
+#define regSDMA1_ID_BASE_IDX 0
+#define regSDMA1_VERSION 0x0635
+#define regSDMA1_VERSION_BASE_IDX 0
+#define regSDMA1_EDC_COUNTER 0x0636
+#define regSDMA1_EDC_COUNTER_BASE_IDX 0
+#define regSDMA1_EDC_COUNTER_CLEAR 0x0637
+#define regSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define regSDMA1_STATUS2_REG 0x0638
+#define regSDMA1_STATUS2_REG_BASE_IDX 0
+#define regSDMA1_ATOMIC_CNTL 0x0639
+#define regSDMA1_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA1_ATOMIC_PREOP_LO 0x063a
+#define regSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA1_ATOMIC_PREOP_HI 0x063b
+#define regSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA1_UTCL1_CNTL 0x063c
+#define regSDMA1_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA1_UTCL1_WATERMK 0x063d
+#define regSDMA1_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA1_UTCL1_TIMEOUT 0x063e
+#define regSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA1_UTCL1_PAGE 0x063f
+#define regSDMA1_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_STATUS 0x0640
+#define regSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_STATUS 0x0641
+#define regSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA1_UTCL1_INV0 0x0642
+#define regSDMA1_UTCL1_INV0_BASE_IDX 0
+#define regSDMA1_UTCL1_INV1 0x0643
+#define regSDMA1_UTCL1_INV1_BASE_IDX 0
+#define regSDMA1_UTCL1_INV2 0x0644
+#define regSDMA1_UTCL1_INV2_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_XNACK0 0x0645
+#define regSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_XNACK1 0x0646
+#define regSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_XNACK0 0x0647
+#define regSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_XNACK1 0x0648
+#define regSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA1_RELAX_ORDERING_LUT 0x064a
+#define regSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA1_CHICKEN_BITS_2 0x064b
+#define regSDMA1_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA1_STATUS3_REG 0x064c
+#define regSDMA1_STATUS3_REG_BASE_IDX 0
+#define regSDMA1_PHYSICAL_ADDR_LO 0x064d
+#define regSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_PHYSICAL_ADDR_HI 0x064e
+#define regSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_GLOBAL_QUANTUM 0x064f
+#define regSDMA1_GLOBAL_QUANTUM_BASE_IDX 0
+#define regSDMA1_ERROR_LOG 0x0650
+#define regSDMA1_ERROR_LOG_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG0 0x0651
+#define regSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG1 0x0652
+#define regSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG2 0x0653
+#define regSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG3 0x0654
+#define regSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA1_F32_COUNTER 0x0655
+#define regSDMA1_F32_COUNTER_BASE_IDX 0
+#define regSDMA1_CRD_CNTL 0x065b
+#define regSDMA1_CRD_CNTL_BASE_IDX 0
+#define regSDMA1_RLC_CGCG_CTRL 0x065c
+#define regSDMA1_RLC_CGCG_CTRL_BASE_IDX 0
+#define regSDMA1_GPU_IOV_VIOLATION_LOG 0x065d
+#define regSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA1_AQL_STATUS 0x065f
+#define regSDMA1_AQL_STATUS_BASE_IDX 0
+#define regSDMA1_EA_DBIT_ADDR_DATA 0x0660
+#define regSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA1_EA_DBIT_ADDR_INDEX 0x0661
+#define regSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA1_TLBI_GCR_CNTL 0x0662
+#define regSDMA1_TLBI_GCR_CNTL_BASE_IDX 0
+#define regSDMA1_TILING_CONFIG 0x0663
+#define regSDMA1_TILING_CONFIG_BASE_IDX 0
+#define regSDMA1_HASH 0x0664
+#define regSDMA1_HASH_BASE_IDX 0
+#define regSDMA1_INT_STATUS 0x0670
+#define regSDMA1_INT_STATUS_BASE_IDX 0
+#define regSDMA1_GPU_IOV_VIOLATION_LOG2 0x0671
+#define regSDMA1_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA1_HOLE_ADDR_LO 0x0672
+#define regSDMA1_HOLE_ADDR_LO_BASE_IDX 0
+#define regSDMA1_HOLE_ADDR_HI 0x0673
+#define regSDMA1_HOLE_ADDR_HI_BASE_IDX 0
+#define regSDMA1_CLOCK_GATING_STATUS 0x0675
+#define regSDMA1_CLOCK_GATING_STATUS_BASE_IDX 0
+#define regSDMA1_STATUS4_REG 0x0676
+#define regSDMA1_STATUS4_REG_BASE_IDX 0
+#define regSDMA1_SCRATCH_RAM_DATA 0x0677
+#define regSDMA1_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA1_SCRATCH_RAM_ADDR 0x0678
+#define regSDMA1_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA1_TIMESTAMP_CNTL 0x0679
+#define regSDMA1_TIMESTAMP_CNTL_BASE_IDX 0
+#define regSDMA1_STATUS5_REG 0x067a
+#define regSDMA1_STATUS5_REG_BASE_IDX 0
+#define regSDMA1_QUEUE_RESET_REQ 0x067b
+#define regSDMA1_QUEUE_RESET_REQ_BASE_IDX 0
+#define regSDMA1_STATUS6_REG 0x067c
+#define regSDMA1_STATUS6_REG_BASE_IDX 0
+#define regSDMA1_UCODE1_CHECKSUM 0x067d
+#define regSDMA1_UCODE1_CHECKSUM_BASE_IDX 0
+#define regSDMA1_CE_CTRL 0x067e
+#define regSDMA1_CE_CTRL_BASE_IDX 0
+#define regSDMA1_FED_STATUS 0x067f
+#define regSDMA1_FED_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_CNTL 0x0680
+#define regSDMA1_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_BASE 0x0681
+#define regSDMA1_QUEUE0_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_BASE_HI 0x0682
+#define regSDMA1_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR 0x0683
+#define regSDMA1_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_HI 0x0684
+#define regSDMA1_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR 0x0685
+#define regSDMA1_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_HI 0x0686
+#define regSDMA1_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_HI 0x0688
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_LO 0x0689
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_CNTL 0x068a
+#define regSDMA1_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_RPTR 0x068b
+#define regSDMA1_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_OFFSET 0x068c
+#define regSDMA1_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_BASE_LO 0x068d
+#define regSDMA1_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_BASE_HI 0x068e
+#define regSDMA1_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_SIZE 0x068f
+#define regSDMA1_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE0_SKIP_CNTL 0x0690
+#define regSDMA1_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_CONTEXT_STATUS 0x0691
+#define regSDMA1_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL 0x0692
+#define regSDMA1_QUEUE0_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL_LOG 0x06a9
+#define regSDMA1_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL_OFFSET 0x06ab
+#define regSDMA1_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE0_CSA_ADDR_LO 0x06ac
+#define regSDMA1_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_CSA_ADDR_HI 0x06ad
+#define regSDMA1_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_SCHEDULE_CNTL 0x06ae
+#define regSDMA1_QUEUE0_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_SUB_REMAIN 0x06af
+#define regSDMA1_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE0_PREEMPT 0x06b0
+#define regSDMA1_QUEUE0_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE0_DUMMY_REG 0x06b1
+#define regSDMA1_QUEUE0_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x06b2
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x06b3
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_AQL_CNTL 0x06b4
+#define regSDMA1_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_MINOR_PTR_UPDATE 0x06b5
+#define regSDMA1_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_PREEMPT 0x06b6
+#define regSDMA1_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA0 0x06c0
+#define regSDMA1_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA1 0x06c1
+#define regSDMA1_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA2 0x06c2
+#define regSDMA1_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA3 0x06c3
+#define regSDMA1_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA4 0x06c4
+#define regSDMA1_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA5 0x06c5
+#define regSDMA1_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA6 0x06c6
+#define regSDMA1_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA7 0x06c7
+#define regSDMA1_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA8 0x06c8
+#define regSDMA1_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA9 0x06c9
+#define regSDMA1_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA10 0x06ca
+#define regSDMA1_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_CNTL 0x06cb
+#define regSDMA1_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_CNTL 0x06d8
+#define regSDMA1_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_BASE 0x06d9
+#define regSDMA1_QUEUE1_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_BASE_HI 0x06da
+#define regSDMA1_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR 0x06db
+#define regSDMA1_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_HI 0x06dc
+#define regSDMA1_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR 0x06dd
+#define regSDMA1_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_HI 0x06de
+#define regSDMA1_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_HI 0x06e0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_LO 0x06e1
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_CNTL 0x06e2
+#define regSDMA1_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_RPTR 0x06e3
+#define regSDMA1_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_OFFSET 0x06e4
+#define regSDMA1_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_BASE_LO 0x06e5
+#define regSDMA1_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_BASE_HI 0x06e6
+#define regSDMA1_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_SIZE 0x06e7
+#define regSDMA1_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE1_SKIP_CNTL 0x06e8
+#define regSDMA1_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_CONTEXT_STATUS 0x06e9
+#define regSDMA1_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL 0x06ea
+#define regSDMA1_QUEUE1_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL_LOG 0x0701
+#define regSDMA1_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL_OFFSET 0x0703
+#define regSDMA1_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE1_CSA_ADDR_LO 0x0704
+#define regSDMA1_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_CSA_ADDR_HI 0x0705
+#define regSDMA1_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_SCHEDULE_CNTL 0x0706
+#define regSDMA1_QUEUE1_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_SUB_REMAIN 0x0707
+#define regSDMA1_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE1_PREEMPT 0x0708
+#define regSDMA1_QUEUE1_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE1_DUMMY_REG 0x0709
+#define regSDMA1_QUEUE1_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x070a
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x070b
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_AQL_CNTL 0x070c
+#define regSDMA1_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_MINOR_PTR_UPDATE 0x070d
+#define regSDMA1_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_PREEMPT 0x070e
+#define regSDMA1_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA0 0x0718
+#define regSDMA1_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA1 0x0719
+#define regSDMA1_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA2 0x071a
+#define regSDMA1_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA3 0x071b
+#define regSDMA1_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA4 0x071c
+#define regSDMA1_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA5 0x071d
+#define regSDMA1_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA6 0x071e
+#define regSDMA1_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA7 0x071f
+#define regSDMA1_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA8 0x0720
+#define regSDMA1_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA9 0x0721
+#define regSDMA1_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA10 0x0722
+#define regSDMA1_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_CNTL 0x0723
+#define regSDMA1_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_CNTL 0x0730
+#define regSDMA1_QUEUE2_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_BASE 0x0731
+#define regSDMA1_QUEUE2_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_BASE_HI 0x0732
+#define regSDMA1_QUEUE2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR 0x0733
+#define regSDMA1_QUEUE2_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_HI 0x0734
+#define regSDMA1_QUEUE2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR 0x0735
+#define regSDMA1_QUEUE2_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_HI 0x0736
+#define regSDMA1_QUEUE2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_HI 0x0738
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_LO 0x0739
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_CNTL 0x073a
+#define regSDMA1_QUEUE2_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_RPTR 0x073b
+#define regSDMA1_QUEUE2_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_OFFSET 0x073c
+#define regSDMA1_QUEUE2_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_BASE_LO 0x073d
+#define regSDMA1_QUEUE2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_BASE_HI 0x073e
+#define regSDMA1_QUEUE2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_SIZE 0x073f
+#define regSDMA1_QUEUE2_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE2_SKIP_CNTL 0x0740
+#define regSDMA1_QUEUE2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_CONTEXT_STATUS 0x0741
+#define regSDMA1_QUEUE2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL 0x0742
+#define regSDMA1_QUEUE2_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL_LOG 0x0759
+#define regSDMA1_QUEUE2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL_OFFSET 0x075b
+#define regSDMA1_QUEUE2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE2_CSA_ADDR_LO 0x075c
+#define regSDMA1_QUEUE2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_CSA_ADDR_HI 0x075d
+#define regSDMA1_QUEUE2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_SCHEDULE_CNTL 0x075e
+#define regSDMA1_QUEUE2_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_SUB_REMAIN 0x075f
+#define regSDMA1_QUEUE2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE2_PREEMPT 0x0760
+#define regSDMA1_QUEUE2_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE2_DUMMY_REG 0x0761
+#define regSDMA1_QUEUE2_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI 0x0762
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO 0x0763
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_AQL_CNTL 0x0764
+#define regSDMA1_QUEUE2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_MINOR_PTR_UPDATE 0x0765
+#define regSDMA1_QUEUE2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_PREEMPT 0x0766
+#define regSDMA1_QUEUE2_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA0 0x0770
+#define regSDMA1_QUEUE2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA1 0x0771
+#define regSDMA1_QUEUE2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA2 0x0772
+#define regSDMA1_QUEUE2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA3 0x0773
+#define regSDMA1_QUEUE2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA4 0x0774
+#define regSDMA1_QUEUE2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA5 0x0775
+#define regSDMA1_QUEUE2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA6 0x0776
+#define regSDMA1_QUEUE2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA7 0x0777
+#define regSDMA1_QUEUE2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA8 0x0778
+#define regSDMA1_QUEUE2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA9 0x0779
+#define regSDMA1_QUEUE2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA10 0x077a
+#define regSDMA1_QUEUE2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_CNTL 0x077b
+#define regSDMA1_QUEUE2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_CNTL 0x0788
+#define regSDMA1_QUEUE3_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_BASE 0x0789
+#define regSDMA1_QUEUE3_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_BASE_HI 0x078a
+#define regSDMA1_QUEUE3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR 0x078b
+#define regSDMA1_QUEUE3_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_HI 0x078c
+#define regSDMA1_QUEUE3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR 0x078d
+#define regSDMA1_QUEUE3_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_HI 0x078e
+#define regSDMA1_QUEUE3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_HI 0x0790
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_LO 0x0791
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_CNTL 0x0792
+#define regSDMA1_QUEUE3_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_RPTR 0x0793
+#define regSDMA1_QUEUE3_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_OFFSET 0x0794
+#define regSDMA1_QUEUE3_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_BASE_LO 0x0795
+#define regSDMA1_QUEUE3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_BASE_HI 0x0796
+#define regSDMA1_QUEUE3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_SIZE 0x0797
+#define regSDMA1_QUEUE3_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE3_SKIP_CNTL 0x0798
+#define regSDMA1_QUEUE3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_CONTEXT_STATUS 0x0799
+#define regSDMA1_QUEUE3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL 0x079a
+#define regSDMA1_QUEUE3_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL_LOG 0x07b1
+#define regSDMA1_QUEUE3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL_OFFSET 0x07b3
+#define regSDMA1_QUEUE3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE3_CSA_ADDR_LO 0x07b4
+#define regSDMA1_QUEUE3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_CSA_ADDR_HI 0x07b5
+#define regSDMA1_QUEUE3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_SCHEDULE_CNTL 0x07b6
+#define regSDMA1_QUEUE3_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_SUB_REMAIN 0x07b7
+#define regSDMA1_QUEUE3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE3_PREEMPT 0x07b8
+#define regSDMA1_QUEUE3_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE3_DUMMY_REG 0x07b9
+#define regSDMA1_QUEUE3_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI 0x07ba
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO 0x07bb
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_AQL_CNTL 0x07bc
+#define regSDMA1_QUEUE3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_MINOR_PTR_UPDATE 0x07bd
+#define regSDMA1_QUEUE3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_PREEMPT 0x07be
+#define regSDMA1_QUEUE3_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA0 0x07c8
+#define regSDMA1_QUEUE3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA1 0x07c9
+#define regSDMA1_QUEUE3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA2 0x07ca
+#define regSDMA1_QUEUE3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA3 0x07cb
+#define regSDMA1_QUEUE3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA4 0x07cc
+#define regSDMA1_QUEUE3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA5 0x07cd
+#define regSDMA1_QUEUE3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA6 0x07ce
+#define regSDMA1_QUEUE3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA7 0x07cf
+#define regSDMA1_QUEUE3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA8 0x07d0
+#define regSDMA1_QUEUE3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA9 0x07d1
+#define regSDMA1_QUEUE3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA10 0x07d2
+#define regSDMA1_QUEUE3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_CNTL 0x07d3
+#define regSDMA1_QUEUE3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_CNTL 0x07e0
+#define regSDMA1_QUEUE4_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_BASE 0x07e1
+#define regSDMA1_QUEUE4_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_BASE_HI 0x07e2
+#define regSDMA1_QUEUE4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR 0x07e3
+#define regSDMA1_QUEUE4_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_HI 0x07e4
+#define regSDMA1_QUEUE4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR 0x07e5
+#define regSDMA1_QUEUE4_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_HI 0x07e6
+#define regSDMA1_QUEUE4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_HI 0x07e8
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_LO 0x07e9
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_CNTL 0x07ea
+#define regSDMA1_QUEUE4_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_RPTR 0x07eb
+#define regSDMA1_QUEUE4_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_OFFSET 0x07ec
+#define regSDMA1_QUEUE4_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_BASE_LO 0x07ed
+#define regSDMA1_QUEUE4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_BASE_HI 0x07ee
+#define regSDMA1_QUEUE4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_SIZE 0x07ef
+#define regSDMA1_QUEUE4_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE4_SKIP_CNTL 0x07f0
+#define regSDMA1_QUEUE4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_CONTEXT_STATUS 0x07f1
+#define regSDMA1_QUEUE4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL 0x07f2
+#define regSDMA1_QUEUE4_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL_LOG 0x0809
+#define regSDMA1_QUEUE4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL_OFFSET 0x080b
+#define regSDMA1_QUEUE4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE4_CSA_ADDR_LO 0x080c
+#define regSDMA1_QUEUE4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_CSA_ADDR_HI 0x080d
+#define regSDMA1_QUEUE4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_SCHEDULE_CNTL 0x080e
+#define regSDMA1_QUEUE4_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_SUB_REMAIN 0x080f
+#define regSDMA1_QUEUE4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE4_PREEMPT 0x0810
+#define regSDMA1_QUEUE4_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE4_DUMMY_REG 0x0811
+#define regSDMA1_QUEUE4_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI 0x0812
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO 0x0813
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_AQL_CNTL 0x0814
+#define regSDMA1_QUEUE4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_MINOR_PTR_UPDATE 0x0815
+#define regSDMA1_QUEUE4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_PREEMPT 0x0816
+#define regSDMA1_QUEUE4_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA0 0x0820
+#define regSDMA1_QUEUE4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA1 0x0821
+#define regSDMA1_QUEUE4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA2 0x0822
+#define regSDMA1_QUEUE4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA3 0x0823
+#define regSDMA1_QUEUE4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA4 0x0824
+#define regSDMA1_QUEUE4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA5 0x0825
+#define regSDMA1_QUEUE4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA6 0x0826
+#define regSDMA1_QUEUE4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA7 0x0827
+#define regSDMA1_QUEUE4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA8 0x0828
+#define regSDMA1_QUEUE4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA9 0x0829
+#define regSDMA1_QUEUE4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA10 0x082a
+#define regSDMA1_QUEUE4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_CNTL 0x082b
+#define regSDMA1_QUEUE4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_CNTL 0x0838
+#define regSDMA1_QUEUE5_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_BASE 0x0839
+#define regSDMA1_QUEUE5_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_BASE_HI 0x083a
+#define regSDMA1_QUEUE5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR 0x083b
+#define regSDMA1_QUEUE5_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_HI 0x083c
+#define regSDMA1_QUEUE5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR 0x083d
+#define regSDMA1_QUEUE5_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_HI 0x083e
+#define regSDMA1_QUEUE5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_HI 0x0840
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_LO 0x0841
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_CNTL 0x0842
+#define regSDMA1_QUEUE5_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_RPTR 0x0843
+#define regSDMA1_QUEUE5_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_OFFSET 0x0844
+#define regSDMA1_QUEUE5_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_BASE_LO 0x0845
+#define regSDMA1_QUEUE5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_BASE_HI 0x0846
+#define regSDMA1_QUEUE5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_SIZE 0x0847
+#define regSDMA1_QUEUE5_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE5_SKIP_CNTL 0x0848
+#define regSDMA1_QUEUE5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_CONTEXT_STATUS 0x0849
+#define regSDMA1_QUEUE5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL 0x084a
+#define regSDMA1_QUEUE5_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL_LOG 0x0861
+#define regSDMA1_QUEUE5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL_OFFSET 0x0863
+#define regSDMA1_QUEUE5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE5_CSA_ADDR_LO 0x0864
+#define regSDMA1_QUEUE5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_CSA_ADDR_HI 0x0865
+#define regSDMA1_QUEUE5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_SCHEDULE_CNTL 0x0866
+#define regSDMA1_QUEUE5_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_SUB_REMAIN 0x0867
+#define regSDMA1_QUEUE5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE5_PREEMPT 0x0868
+#define regSDMA1_QUEUE5_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE5_DUMMY_REG 0x0869
+#define regSDMA1_QUEUE5_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI 0x086a
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO 0x086b
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_AQL_CNTL 0x086c
+#define regSDMA1_QUEUE5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_MINOR_PTR_UPDATE 0x086d
+#define regSDMA1_QUEUE5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_PREEMPT 0x086e
+#define regSDMA1_QUEUE5_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA0 0x0878
+#define regSDMA1_QUEUE5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA1 0x0879
+#define regSDMA1_QUEUE5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA2 0x087a
+#define regSDMA1_QUEUE5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA3 0x087b
+#define regSDMA1_QUEUE5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA4 0x087c
+#define regSDMA1_QUEUE5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA5 0x087d
+#define regSDMA1_QUEUE5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA6 0x087e
+#define regSDMA1_QUEUE5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA7 0x087f
+#define regSDMA1_QUEUE5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA8 0x0880
+#define regSDMA1_QUEUE5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA9 0x0881
+#define regSDMA1_QUEUE5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA10 0x0882
+#define regSDMA1_QUEUE5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_CNTL 0x0883
+#define regSDMA1_QUEUE5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_CNTL 0x0890
+#define regSDMA1_QUEUE6_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_BASE 0x0891
+#define regSDMA1_QUEUE6_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_BASE_HI 0x0892
+#define regSDMA1_QUEUE6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR 0x0893
+#define regSDMA1_QUEUE6_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_HI 0x0894
+#define regSDMA1_QUEUE6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR 0x0895
+#define regSDMA1_QUEUE6_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_HI 0x0896
+#define regSDMA1_QUEUE6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_HI 0x0898
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_LO 0x0899
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_CNTL 0x089a
+#define regSDMA1_QUEUE6_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_RPTR 0x089b
+#define regSDMA1_QUEUE6_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_OFFSET 0x089c
+#define regSDMA1_QUEUE6_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_BASE_LO 0x089d
+#define regSDMA1_QUEUE6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_BASE_HI 0x089e
+#define regSDMA1_QUEUE6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_SIZE 0x089f
+#define regSDMA1_QUEUE6_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE6_SKIP_CNTL 0x08a0
+#define regSDMA1_QUEUE6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_CONTEXT_STATUS 0x08a1
+#define regSDMA1_QUEUE6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL 0x08a2
+#define regSDMA1_QUEUE6_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL_LOG 0x08b9
+#define regSDMA1_QUEUE6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL_OFFSET 0x08bb
+#define regSDMA1_QUEUE6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE6_CSA_ADDR_LO 0x08bc
+#define regSDMA1_QUEUE6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_CSA_ADDR_HI 0x08bd
+#define regSDMA1_QUEUE6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_SCHEDULE_CNTL 0x08be
+#define regSDMA1_QUEUE6_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_SUB_REMAIN 0x08bf
+#define regSDMA1_QUEUE6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE6_PREEMPT 0x08c0
+#define regSDMA1_QUEUE6_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE6_DUMMY_REG 0x08c1
+#define regSDMA1_QUEUE6_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI 0x08c2
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO 0x08c3
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_AQL_CNTL 0x08c4
+#define regSDMA1_QUEUE6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_MINOR_PTR_UPDATE 0x08c5
+#define regSDMA1_QUEUE6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_PREEMPT 0x08c6
+#define regSDMA1_QUEUE6_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA0 0x08d0
+#define regSDMA1_QUEUE6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA1 0x08d1
+#define regSDMA1_QUEUE6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA2 0x08d2
+#define regSDMA1_QUEUE6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA3 0x08d3
+#define regSDMA1_QUEUE6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA4 0x08d4
+#define regSDMA1_QUEUE6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA5 0x08d5
+#define regSDMA1_QUEUE6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA6 0x08d6
+#define regSDMA1_QUEUE6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA7 0x08d7
+#define regSDMA1_QUEUE6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA8 0x08d8
+#define regSDMA1_QUEUE6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA9 0x08d9
+#define regSDMA1_QUEUE6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA10 0x08da
+#define regSDMA1_QUEUE6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_CNTL 0x08db
+#define regSDMA1_QUEUE6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_CNTL 0x08e8
+#define regSDMA1_QUEUE7_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_BASE 0x08e9
+#define regSDMA1_QUEUE7_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_BASE_HI 0x08ea
+#define regSDMA1_QUEUE7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR 0x08eb
+#define regSDMA1_QUEUE7_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_HI 0x08ec
+#define regSDMA1_QUEUE7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR 0x08ed
+#define regSDMA1_QUEUE7_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_HI 0x08ee
+#define regSDMA1_QUEUE7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_HI 0x08f0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_LO 0x08f1
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_CNTL 0x08f2
+#define regSDMA1_QUEUE7_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_RPTR 0x08f3
+#define regSDMA1_QUEUE7_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_OFFSET 0x08f4
+#define regSDMA1_QUEUE7_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_BASE_LO 0x08f5
+#define regSDMA1_QUEUE7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_BASE_HI 0x08f6
+#define regSDMA1_QUEUE7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_SIZE 0x08f7
+#define regSDMA1_QUEUE7_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE7_SKIP_CNTL 0x08f8
+#define regSDMA1_QUEUE7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_CONTEXT_STATUS 0x08f9
+#define regSDMA1_QUEUE7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL 0x08fa
+#define regSDMA1_QUEUE7_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL_LOG 0x0911
+#define regSDMA1_QUEUE7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL_OFFSET 0x0913
+#define regSDMA1_QUEUE7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE7_CSA_ADDR_LO 0x0914
+#define regSDMA1_QUEUE7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_CSA_ADDR_HI 0x0915
+#define regSDMA1_QUEUE7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_SCHEDULE_CNTL 0x0916
+#define regSDMA1_QUEUE7_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_SUB_REMAIN 0x0917
+#define regSDMA1_QUEUE7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE7_PREEMPT 0x0918
+#define regSDMA1_QUEUE7_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE7_DUMMY_REG 0x0919
+#define regSDMA1_QUEUE7_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI 0x091a
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO 0x091b
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_AQL_CNTL 0x091c
+#define regSDMA1_QUEUE7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_MINOR_PTR_UPDATE 0x091d
+#define regSDMA1_QUEUE7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_PREEMPT 0x091e
+#define regSDMA1_QUEUE7_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA0 0x0928
+#define regSDMA1_QUEUE7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA1 0x0929
+#define regSDMA1_QUEUE7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA2 0x092a
+#define regSDMA1_QUEUE7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA3 0x092b
+#define regSDMA1_QUEUE7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA4 0x092c
+#define regSDMA1_QUEUE7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA5 0x092d
+#define regSDMA1_QUEUE7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA6 0x092e
+#define regSDMA1_QUEUE7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA7 0x092f
+#define regSDMA1_QUEUE7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA8 0x0930
+#define regSDMA1_QUEUE7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA9 0x0931
+#define regSDMA1_QUEUE7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA10 0x0932
+#define regSDMA1_QUEUE7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_CNTL 0x0933
+#define regSDMA1_QUEUE7_MIDCMD_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sdma0_sdma0hypdec
+// base address: 0x3e200
+#define regSDMA0_UCODE_ADDR 0x5880
+#define regSDMA0_UCODE_ADDR_BASE_IDX 1
+#define regSDMA0_UCODE_DATA 0x5881
+#define regSDMA0_UCODE_DATA_BASE_IDX 1
+#define regSDMA0_UCODE_SELFLOAD_CONTROL 0x5882
+#define regSDMA0_UCODE_SELFLOAD_CONTROL_BASE_IDX 1
+#define regSDMA0_BROADCAST_UCODE_ADDR 0x5886
+#define regSDMA0_BROADCAST_UCODE_ADDR_BASE_IDX 1
+#define regSDMA0_BROADCAST_UCODE_DATA 0x5887
+#define regSDMA0_BROADCAST_UCODE_DATA_BASE_IDX 1
+#define regSDMA0_VM_CTX_LO 0x588c
+#define regSDMA0_VM_CTX_LO_BASE_IDX 1
+#define regSDMA0_VM_CTX_HI 0x588d
+#define regSDMA0_VM_CTX_HI_BASE_IDX 1
+#define regSDMA0_ACTIVE_FCN_ID 0x588e
+#define regSDMA0_ACTIVE_FCN_ID_BASE_IDX 1
+#define regSDMA0_VM_CTX_CNTL 0x588f
+#define regSDMA0_VM_CTX_CNTL_BASE_IDX 1
+#define regSDMA0_VIRT_RESET_REQ 0x5890
+#define regSDMA0_VIRT_RESET_REQ_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE0 0x5891
+#define regSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE1 0x5892
+#define regSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE2 0x5893
+#define regSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE0 0x5894
+#define regSDMA0_PUB_REG_TYPE0_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE1 0x5895
+#define regSDMA0_PUB_REG_TYPE1_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE2 0x5896
+#define regSDMA0_PUB_REG_TYPE2_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE3 0x5897
+#define regSDMA0_PUB_REG_TYPE3_BASE_IDX 1
+#define regSDMA0_VM_CNTL 0x5899
+#define regSDMA0_VM_CNTL_BASE_IDX 1
+#define regSDMA0_F32_CNTL 0x589a
+#define regSDMA0_F32_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1hypdec
+// base address: 0x3e280
+#define regSDMA1_UCODE_ADDR 0x58a0
+#define regSDMA1_UCODE_ADDR_BASE_IDX 1
+#define regSDMA1_UCODE_DATA 0x58a1
+#define regSDMA1_UCODE_DATA_BASE_IDX 1
+#define regSDMA1_UCODE_SELFLOAD_CONTROL 0x58a2
+#define regSDMA1_UCODE_SELFLOAD_CONTROL_BASE_IDX 1
+#define regSDMA1_BROADCAST_UCODE_ADDR 0x58a6
+#define regSDMA1_BROADCAST_UCODE_ADDR_BASE_IDX 1
+#define regSDMA1_BROADCAST_UCODE_DATA 0x58a7
+#define regSDMA1_BROADCAST_UCODE_DATA_BASE_IDX 1
+#define regSDMA1_VM_CTX_LO 0x58ac
+#define regSDMA1_VM_CTX_LO_BASE_IDX 1
+#define regSDMA1_VM_CTX_HI 0x58ad
+#define regSDMA1_VM_CTX_HI_BASE_IDX 1
+#define regSDMA1_ACTIVE_FCN_ID 0x58ae
+#define regSDMA1_ACTIVE_FCN_ID_BASE_IDX 1
+#define regSDMA1_VM_CTX_CNTL 0x58af
+#define regSDMA1_VM_CTX_CNTL_BASE_IDX 1
+#define regSDMA1_VIRT_RESET_REQ 0x58b0
+#define regSDMA1_VIRT_RESET_REQ_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE0 0x58b1
+#define regSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE1 0x58b2
+#define regSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE2 0x58b3
+#define regSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE0 0x58b4
+#define regSDMA1_PUB_REG_TYPE0_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE1 0x58b5
+#define regSDMA1_PUB_REG_TYPE1_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE2 0x58b6
+#define regSDMA1_PUB_REG_TYPE2_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE3 0x58b7
+#define regSDMA1_PUB_REG_TYPE3_BASE_IDX 1
+#define regSDMA1_VM_CNTL 0x58b9
+#define regSDMA1_VM_CNTL_BASE_IDX 1
+#define regSDMA1_F32_CNTL 0x58ba
+#define regSDMA1_F32_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma0perfsdec
+// base address: 0x37880
+#define regSDMA0_PERFCNT_PERFCOUNTER0_CFG 0x3e20
+#define regSDMA0_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER1_CFG 0x3e21
+#define regSDMA0_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x3e22
+#define regSDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSDMA0_PERFCNT_MISC_CNTL 0x3e23
+#define regSDMA0_PERFCNT_MISC_CNTL_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_SELECT 0x3e24
+#define regSDMA0_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_SELECT1 0x3e25
+#define regSDMA0_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_SELECT 0x3e26
+#define regSDMA0_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_SELECT1 0x3e27
+#define regSDMA0_PERFCOUNTER1_SELECT1_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1perfsdec
+// base address: 0x378b0
+#define regSDMA1_PERFCNT_PERFCOUNTER0_CFG 0x3e2c
+#define regSDMA1_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER1_CFG 0x3e2d
+#define regSDMA1_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x3e2e
+#define regSDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSDMA1_PERFCNT_MISC_CNTL 0x3e2f
+#define regSDMA1_PERFCNT_MISC_CNTL_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_SELECT 0x3e30
+#define regSDMA1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_SELECT1 0x3e31
+#define regSDMA1_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_SELECT 0x3e32
+#define regSDMA1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_SELECT1 0x3e33
+#define regSDMA1_PERFCOUNTER1_SELECT1_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma0perfddec
+// base address: 0x35980
+#define regSDMA0_PERFCNT_PERFCOUNTER_LO 0x3660
+#define regSDMA0_PERFCNT_PERFCOUNTER_LO_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER_HI 0x3661
+#define regSDMA0_PERFCNT_PERFCOUNTER_HI_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_LO 0x3662
+#define regSDMA0_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_HI 0x3663
+#define regSDMA0_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_LO 0x3664
+#define regSDMA0_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_HI 0x3665
+#define regSDMA0_PERFCOUNTER1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1perfddec
+// base address: 0x359b0
+#define regSDMA1_PERFCNT_PERFCOUNTER_LO 0x366c
+#define regSDMA1_PERFCNT_PERFCOUNTER_LO_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER_HI 0x366d
+#define regSDMA1_PERFCNT_PERFCOUNTER_HI_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_LO 0x366e
+#define regSDMA1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_HI 0x366f
+#define regSDMA1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_LO 0x3670
+#define regSDMA1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_HI 0x3671
+#define regSDMA1_PERFCOUNTER1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define regGRBM_CNTL 0x0da0
+#define regGRBM_CNTL_BASE_IDX 0
+#define regGRBM_SKEW_CNTL 0x0da1
+#define regGRBM_SKEW_CNTL_BASE_IDX 0
+#define regGRBM_STATUS2 0x0da2
+#define regGRBM_STATUS2_BASE_IDX 0
+#define regGRBM_PWR_CNTL 0x0da3
+#define regGRBM_PWR_CNTL_BASE_IDX 0
+#define regGRBM_STATUS 0x0da4
+#define regGRBM_STATUS_BASE_IDX 0
+#define regGRBM_STATUS_SE0 0x0da5
+#define regGRBM_STATUS_SE0_BASE_IDX 0
+#define regGRBM_STATUS_SE1 0x0da6
+#define regGRBM_STATUS_SE1_BASE_IDX 0
+#define regGRBM_STATUS3 0x0da7
+#define regGRBM_STATUS3_BASE_IDX 0
+#define regGRBM_SOFT_RESET 0x0da8
+#define regGRBM_SOFT_RESET_BASE_IDX 0
+#define regGRBM_GFX_CLKEN_CNTL 0x0dac
+#define regGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define regGRBM_WAIT_IDLE_CLOCKS 0x0dad
+#define regGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define regGRBM_STATUS_SE2 0x0dae
+#define regGRBM_STATUS_SE2_BASE_IDX 0
+#define regGRBM_READ_ERROR 0x0db6
+#define regGRBM_READ_ERROR_BASE_IDX 0
+#define regGRBM_READ_ERROR2 0x0db7
+#define regGRBM_READ_ERROR2_BASE_IDX 0
+#define regGRBM_INT_CNTL 0x0db8
+#define regGRBM_INT_CNTL_BASE_IDX 0
+#define regGRBM_TRAP_OP 0x0db9
+#define regGRBM_TRAP_OP_BASE_IDX 0
+#define regGRBM_TRAP_ADDR 0x0dba
+#define regGRBM_TRAP_ADDR_BASE_IDX 0
+#define regGRBM_TRAP_ADDR_MSK 0x0dbb
+#define regGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define regGRBM_TRAP_WD 0x0dbc
+#define regGRBM_TRAP_WD_BASE_IDX 0
+#define regGRBM_TRAP_WD_MSK 0x0dbd
+#define regGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define regGRBM_DSM_BYPASS 0x0dbe
+#define regGRBM_DSM_BYPASS_BASE_IDX 0
+#define regGRBM_WRITE_ERROR 0x0dbf
+#define regGRBM_WRITE_ERROR_BASE_IDX 0
+#define regGRBM_CHIP_REVISION 0x0dc1
+#define regGRBM_CHIP_REVISION_BASE_IDX 0
+#define regGRBM_RSMU_CFG 0x0dc3
+#define regGRBM_RSMU_CFG_BASE_IDX 0
+#define regGRBM_IH_CREDIT 0x0dc4
+#define regGRBM_IH_CREDIT_BASE_IDX 0
+#define regGRBM_PWR_CNTL2 0x0dc5
+#define regGRBM_PWR_CNTL2_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_START 0x0dc6
+#define regGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_END 0x0dc7
+#define regGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define regGRBM_RSMU_READ_ERROR 0x0dc8
+#define regGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define regGRBM_INVALID_PIPE 0x0dc9
+#define regGRBM_INVALID_PIPE_BASE_IDX 0
+#define regGRBM_FENCE_RANGE0 0x0dca
+#define regGRBM_FENCE_RANGE0_BASE_IDX 0
+#define regGRBM_FENCE_RANGE1 0x0dcb
+#define regGRBM_FENCE_RANGE1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG0 0x0de0
+#define regGRBM_SCRATCH_REG0_BASE_IDX 0
+#define regGRBM_SCRATCH_REG1 0x0de1
+#define regGRBM_SCRATCH_REG1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG2 0x0de2
+#define regGRBM_SCRATCH_REG2_BASE_IDX 0
+#define regGRBM_SCRATCH_REG3 0x0de3
+#define regGRBM_SCRATCH_REG3_BASE_IDX 0
+#define regGRBM_SCRATCH_REG4 0x0de4
+#define regGRBM_SCRATCH_REG4_BASE_IDX 0
+#define regGRBM_SCRATCH_REG5 0x0de5
+#define regGRBM_SCRATCH_REG5_BASE_IDX 0
+#define regGRBM_SCRATCH_REG6 0x0de6
+#define regGRBM_SCRATCH_REG6_BASE_IDX 0
+#define regGRBM_SCRATCH_REG7 0x0de7
+#define regGRBM_SCRATCH_REG7_BASE_IDX 0
+#define regVIOLATION_DATA_ASYNC_VF_PROG 0x0df1
+#define regVIOLATION_DATA_ASYNC_VF_PROG_BASE_IDX 0
+
+
+// addressBlock: gc_cpdec
+// base address: 0x8200
+#define regCP_CPC_DEBUG_CNTL 0x0e20
+#define regCP_CPC_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPF_DEBUG_CNTL 0x0e22
+#define regCP_CPF_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPC_STATUS 0x0e24
+#define regCP_CPC_STATUS_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT 0x0e25
+#define regCP_CPC_BUSY_STAT_BASE_IDX 0
+#define regCP_CPC_STALLED_STAT1 0x0e26
+#define regCP_CPC_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPF_STATUS 0x0e27
+#define regCP_CPF_STATUS_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT 0x0e28
+#define regCP_CPF_BUSY_STAT_BASE_IDX 0
+#define regCP_CPF_STALLED_STAT1 0x0e29
+#define regCP_CPF_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT2 0x0e2a
+#define regCP_CPC_BUSY_STAT2_BASE_IDX 0
+#define regCP_CPC_GRBM_FREE_COUNT 0x0e2b
+#define regCP_CPC_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPC_PRIV_VIOLATION_ADDR 0x0e2c
+#define regCP_CPC_PRIV_VIOLATION_ADDR_BASE_IDX 0
+#define regCP_MEC_ME1_HEADER_DUMP 0x0e2e
+#define regCP_MEC_ME1_HEADER_DUMP_BASE_IDX 0
+#define regCP_MEC_ME2_HEADER_DUMP 0x0e2f
+#define regCP_MEC_ME2_HEADER_DUMP_BASE_IDX 0
+#define regCP_CPC_SCRATCH_INDEX 0x0e30
+#define regCP_CPC_SCRATCH_INDEX_BASE_IDX 0
+#define regCP_CPC_SCRATCH_DATA 0x0e31
+#define regCP_CPC_SCRATCH_DATA_BASE_IDX 0
+#define regCP_CPF_GRBM_FREE_COUNT 0x0e32
+#define regCP_CPF_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT2 0x0e33
+#define regCP_CPF_BUSY_STAT2_BASE_IDX 0
+#define regCP_CPC_HALT_HYST_COUNT 0x0e47
+#define regCP_CPC_HALT_HYST_COUNT_BASE_IDX 0
+#define regCP_STALLED_STAT3 0x0f3c
+#define regCP_STALLED_STAT3_BASE_IDX 0
+#define regCP_STALLED_STAT1 0x0f3d
+#define regCP_STALLED_STAT1_BASE_IDX 0
+#define regCP_STALLED_STAT2 0x0f3e
+#define regCP_STALLED_STAT2_BASE_IDX 0
+#define regCP_BUSY_STAT 0x0f3f
+#define regCP_BUSY_STAT_BASE_IDX 0
+#define regCP_STAT 0x0f40
+#define regCP_STAT_BASE_IDX 0
+#define regCP_ME_HEADER_DUMP 0x0f41
+#define regCP_ME_HEADER_DUMP_BASE_IDX 0
+#define regCP_PFP_HEADER_DUMP 0x0f42
+#define regCP_PFP_HEADER_DUMP_BASE_IDX 0
+#define regCP_GRBM_FREE_COUNT 0x0f43
+#define regCP_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_PFP_INSTR_PNTR 0x0f45
+#define regCP_PFP_INSTR_PNTR_BASE_IDX 0
+#define regCP_ME_INSTR_PNTR 0x0f46
+#define regCP_ME_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC1_INSTR_PNTR 0x0f48
+#define regCP_MEC1_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC2_INSTR_PNTR 0x0f49
+#define regCP_MEC2_INSTR_PNTR_BASE_IDX 0
+#define regCP_CSF_STAT 0x0f54
+#define regCP_CSF_STAT_BASE_IDX 0
+#define regCP_CNTX_STAT 0x0f58
+#define regCP_CNTX_STAT_BASE_IDX 0
+#define regCP_ME_PREEMPTION 0x0f59
+#define regCP_ME_PREEMPTION_BASE_IDX 0
+#define regCP_RB1_RPTR 0x0f5f
+#define regCP_RB1_RPTR_BASE_IDX 0
+#define regCP_RB0_RPTR 0x0f60
+#define regCP_RB0_RPTR_BASE_IDX 0
+#define regCP_RB_RPTR 0x0f60
+#define regCP_RB_RPTR_BASE_IDX 0
+#define regCP_RB_WPTR_DELAY 0x0f61
+#define regCP_RB_WPTR_DELAY_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_CNTL 0x0f62
+#define regCP_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_ROQ1_THRESHOLDS 0x0f75
+#define regCP_ROQ1_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ2_THRESHOLDS 0x0f76
+#define regCP_ROQ2_THRESHOLDS_BASE_IDX 0
+#define regCP_STQ_THRESHOLDS 0x0f77
+#define regCP_STQ_THRESHOLDS_BASE_IDX 0
+#define regCP_MEQ_THRESHOLDS 0x0f79
+#define regCP_MEQ_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_AVAIL 0x0f7a
+#define regCP_ROQ_AVAIL_BASE_IDX 0
+#define regCP_STQ_AVAIL 0x0f7b
+#define regCP_STQ_AVAIL_BASE_IDX 0
+#define regCP_ROQ2_AVAIL 0x0f7c
+#define regCP_ROQ2_AVAIL_BASE_IDX 0
+#define regCP_MEQ_AVAIL 0x0f7d
+#define regCP_MEQ_AVAIL_BASE_IDX 0
+#define regCP_CMD_INDEX 0x0f7e
+#define regCP_CMD_INDEX_BASE_IDX 0
+#define regCP_CMD_DATA 0x0f7f
+#define regCP_CMD_DATA_BASE_IDX 0
+#define regCP_ROQ_RB_STAT 0x0f80
+#define regCP_ROQ_RB_STAT_BASE_IDX 0
+#define regCP_ROQ_IB1_STAT 0x0f81
+#define regCP_ROQ_IB1_STAT_BASE_IDX 0
+#define regCP_ROQ_IB2_STAT 0x0f82
+#define regCP_ROQ_IB2_STAT_BASE_IDX 0
+#define regCP_STQ_STAT 0x0f83
+#define regCP_STQ_STAT_BASE_IDX 0
+#define regCP_STQ_WR_STAT 0x0f84
+#define regCP_STQ_WR_STAT_BASE_IDX 0
+#define regCP_MEQ_STAT 0x0f85
+#define regCP_MEQ_STAT_BASE_IDX 0
+#define regCP_ROQ3_THRESHOLDS 0x0f8c
+#define regCP_ROQ3_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_DB_STAT 0x0f8d
+#define regCP_ROQ_DB_STAT_BASE_IDX 0
+#define regCP_INT_STAT_DEBUG 0x0f97
+#define regCP_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_DEBUG_CNTL 0x0f98
+#define regCP_DEBUG_CNTL_BASE_IDX 0
+#define regCP_PRIV_VIOLATION_ADDR 0x0f9a
+#define regCP_PRIV_VIOLATION_ADDR_BASE_IDX 0
+
+
+// addressBlock: gc_padec
+// base address: 0x8800
+#define regVGT_DMA_DATA_FIFO_DEPTH 0x0fcd
+#define regVGT_DMA_DATA_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DMA_REQ_FIFO_DEPTH 0x0fce
+#define regVGT_DMA_REQ_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DRAW_INIT_FIFO_DEPTH 0x0fcf
+#define regVGT_DRAW_INIT_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_MC_LAT_CNTL 0x0fd6
+#define regVGT_MC_LAT_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS_2 0x0fd7
+#define regIA_UTCL1_STATUS_2_BASE_IDX 0
+#define regWD_CNTL_STATUS 0x0fdf
+#define regWD_CNTL_STATUS_BASE_IDX 0
+#define regCC_GC_PRIM_CONFIG 0x0fe0
+#define regCC_GC_PRIM_CONFIG_BASE_IDX 0
+#define regWD_QOS 0x0fe2
+#define regWD_QOS_BASE_IDX 0
+#define regWD_UTCL1_CNTL 0x0fe3
+#define regWD_UTCL1_CNTL_BASE_IDX 0
+#define regWD_UTCL1_STATUS 0x0fe4
+#define regWD_UTCL1_STATUS_BASE_IDX 0
+#define regIA_UTCL1_CNTL 0x0fe6
+#define regIA_UTCL1_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS 0x0fe7
+#define regIA_UTCL1_STATUS_BASE_IDX 0
+#define regCC_GC_SA_UNIT_DISABLE 0x0fe9
+#define regCC_GC_SA_UNIT_DISABLE_BASE_IDX 0
+#define regGE_RATE_CNTL_1 0x0ff4
+#define regGE_RATE_CNTL_1_BASE_IDX 0
+#define regGE_RATE_CNTL_2 0x0ff5
+#define regGE_RATE_CNTL_2_BASE_IDX 0
+#define regVGT_SYS_CONFIG 0x1003
+#define regVGT_SYS_CONFIG_BASE_IDX 0
+#define regGE_PRIV_CONTROL 0x1004
+#define regGE_PRIV_CONTROL_BASE_IDX 0
+#define regGE_STATUS 0x1005
+#define regGE_STATUS_BASE_IDX 0
+#define regVGT_GS_MAX_WAVE_ID 0x1009
+#define regVGT_GS_MAX_WAVE_ID_BASE_IDX 0
+#define regGFX_PIPE_CONTROL 0x100d
+#define regGFX_PIPE_CONTROL_BASE_IDX 0
+#define regCC_GC_SHADER_ARRAY_CONFIG 0x100f
+#define regCC_GC_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define regGE2_SE_CNTL_STATUS 0x1011
+#define regGE2_SE_CNTL_STATUS_BASE_IDX 0
+#define regVGT_RESET_DEBUG 0x1014
+#define regVGT_RESET_DEBUG_BASE_IDX 0
+#define regGE_SPI_IF_SAFE_REG 0x1018
+#define regGE_SPI_IF_SAFE_REG_BASE_IDX 0
+#define regGE_PA_IF_SAFE_REG 0x1019
+#define regGE_PA_IF_SAFE_REG_BASE_IDX 0
+#define regPA_CL_CNTL_STATUS 0x1024
+#define regPA_CL_CNTL_STATUS_BASE_IDX 0
+#define regPA_CL_ENHANCE 0x1025
+#define regPA_CL_ENHANCE_BASE_IDX 0
+#define regPA_CL_RESET_DEBUG 0x1026
+#define regPA_CL_RESET_DEBUG_BASE_IDX 0
+#define regPA_SU_CNTL_STATUS 0x1034
+#define regPA_SU_CNTL_STATUS_BASE_IDX 0
+#define regPA_SC_FIFO_DEPTH_CNTL 0x1035
+#define regPA_SC_FIFO_DEPTH_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define regSQ_CONFIG 0x10a0
+#define regSQ_CONFIG_BASE_IDX 0
+#define regSQC_CONFIG 0x10a1
+#define regSQC_CONFIG_BASE_IDX 0
+#define regLDS_CONFIG 0x10a2
+#define regLDS_CONFIG_BASE_IDX 0
+#define regSQ_RANDOM_WAVE_PRI 0x10a3
+#define regSQ_RANDOM_WAVE_PRI_BASE_IDX 0
+#define regSQG_STATUS 0x10a4
+#define regSQG_STATUS_BASE_IDX 0
+#define regSQ_FIFO_SIZES 0x10a5
+#define regSQ_FIFO_SIZES_BASE_IDX 0
+#define regSQ_DSM_CNTL 0x10a6
+#define regSQ_DSM_CNTL_BASE_IDX 0
+#define regSQ_DSM_CNTL2 0x10a7
+#define regSQ_DSM_CNTL2_BASE_IDX 0
+#define regSP_CONFIG 0x10ab
+#define regSP_CONFIG_BASE_IDX 0
+#define regSQ_ARB_CONFIG 0x10ac
+#define regSQ_ARB_CONFIG_BASE_IDX 0
+#define regSQ_DEBUG_HOST_TRAP_STATUS 0x10b6
+#define regSQ_DEBUG_HOST_TRAP_STATUS_BASE_IDX 0
+#define regSQG_GL1H_STATUS 0x10b9
+#define regSQG_GL1H_STATUS_BASE_IDX 0
+#define regSQG_CONFIG 0x10ba
+#define regSQG_CONFIG_BASE_IDX 0
+#define regSQ_PERF_SNAPSHOT_CTRL 0x10bb
+#define regSQ_PERF_SNAPSHOT_CTRL_BASE_IDX 0
+#define regCC_GC_SHADER_RATE_CONFIG 0x10bc
+#define regCC_GC_SHADER_RATE_CONFIG_BASE_IDX 0
+#define regSQ_INTERRUPT_AUTO_MASK 0x10be
+#define regSQ_INTERRUPT_AUTO_MASK_BASE_IDX 0
+#define regSQ_INTERRUPT_MSG_CTRL 0x10bf
+#define regSQ_INTERRUPT_MSG_CTRL_BASE_IDX 0
+#define regSQ_WATCH0_ADDR_H 0x10d0
+#define regSQ_WATCH0_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH0_ADDR_L 0x10d1
+#define regSQ_WATCH0_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH0_CNTL 0x10d2
+#define regSQ_WATCH0_CNTL_BASE_IDX 0
+#define regSQ_WATCH1_ADDR_H 0x10d3
+#define regSQ_WATCH1_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH1_ADDR_L 0x10d4
+#define regSQ_WATCH1_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH1_CNTL 0x10d5
+#define regSQ_WATCH1_CNTL_BASE_IDX 0
+#define regSQ_WATCH2_ADDR_H 0x10d6
+#define regSQ_WATCH2_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH2_ADDR_L 0x10d7
+#define regSQ_WATCH2_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH2_CNTL 0x10d8
+#define regSQ_WATCH2_CNTL_BASE_IDX 0
+#define regSQ_WATCH3_ADDR_H 0x10d9
+#define regSQ_WATCH3_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH3_ADDR_L 0x10da
+#define regSQ_WATCH3_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH3_CNTL 0x10db
+#define regSQ_WATCH3_CNTL_BASE_IDX 0
+#define regSQ_IND_INDEX 0x1118
+#define regSQ_IND_INDEX_BASE_IDX 0
+#define regSQ_IND_DATA 0x1119
+#define regSQ_IND_DATA_BASE_IDX 0
+#define regSQ_CMD 0x111b
+#define regSQ_CMD_BASE_IDX 0
+#define regSQC_MISC_CONFIG 0x1179
+#define regSQC_MISC_CONFIG_BASE_IDX 0
+
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define regSX_DEBUG_BUSY 0x11b4
+#define regSX_DEBUG_BUSY_BASE_IDX 0
+#define regSX_DEBUG_BUSY_2 0x11b5
+#define regSX_DEBUG_BUSY_2_BASE_IDX 0
+#define regSX_DEBUG_BUSY_3 0x11b6
+#define regSX_DEBUG_BUSY_3_BASE_IDX 0
+#define regSX_DEBUG_BUSY_4 0x11b7
+#define regSX_DEBUG_BUSY_4_BASE_IDX 0
+#define regSX_DEBUG_1 0x11b8
+#define regSX_DEBUG_1_BASE_IDX 0
+#define regSX_DEBUG_BUSY_5 0x11b9
+#define regSX_DEBUG_BUSY_5_BASE_IDX 0
+#define regSX_DEBUG_BUSY_6 0x11ba
+#define regSX_DEBUG_BUSY_6_BASE_IDX 0
+#define regSX_DEBUG_BUSY_7 0x11bb
+#define regSX_DEBUG_BUSY_7_BASE_IDX 0
+#define regSX_DEBUG_BUSY_8 0x11bc
+#define regSX_DEBUG_BUSY_8_BASE_IDX 0
+#define regSX_DEBUG_BUSY_9 0x11bd
+#define regSX_DEBUG_BUSY_9_BASE_IDX 0
+#define regSX_DEBUG_BUSY_10 0x11be
+#define regSX_DEBUG_BUSY_10_BASE_IDX 0
+#define regSPI_PS_MAX_WAVE_ID 0x11da
+#define regSPI_PS_MAX_WAVE_ID_BASE_IDX 0
+#define regSPI_GFX_CNTL 0x11dc
+#define regSPI_GFX_CNTL_BASE_IDX 0
+#define regSPI_DEBUG_READ 0x11e2
+#define regSPI_DEBUG_READ_BASE_IDX 0
+#define regSPI_DSM_CNTL 0x11e3
+#define regSPI_DSM_CNTL_BASE_IDX 0
+#define regSPI_DSM_CNTL2 0x11e4
+#define regSPI_DSM_CNTL2_BASE_IDX 0
+#define regSPI_EDC_CNT 0x11e5
+#define regSPI_EDC_CNT_BASE_IDX 0
+#define regSPI_DEBUG_BUSY 0x11f0
+#define regSPI_DEBUG_BUSY_BASE_IDX 0
+#define regSPI_CONFIG_PS_CU_EN 0x11f2
+#define regSPI_CONFIG_PS_CU_EN_BASE_IDX 0
+#define regSPI_WF_LIFETIME_CNTL 0x124a
+#define regSPI_WF_LIFETIME_CNTL_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_0 0x124b
+#define regSPI_WF_LIFETIME_LIMIT_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_1 0x124c
+#define regSPI_WF_LIFETIME_LIMIT_1_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_2 0x124d
+#define regSPI_WF_LIFETIME_LIMIT_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_3 0x124e
+#define regSPI_WF_LIFETIME_LIMIT_3_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_4 0x124f
+#define regSPI_WF_LIFETIME_LIMIT_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_5 0x1250
+#define regSPI_WF_LIFETIME_LIMIT_5_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_0 0x1255
+#define regSPI_WF_LIFETIME_STATUS_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_2 0x1257
+#define regSPI_WF_LIFETIME_STATUS_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_4 0x1259
+#define regSPI_WF_LIFETIME_STATUS_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_6 0x125b
+#define regSPI_WF_LIFETIME_STATUS_6_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_7 0x125c
+#define regSPI_WF_LIFETIME_STATUS_7_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_9 0x125e
+#define regSPI_WF_LIFETIME_STATUS_9_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_11 0x1260
+#define regSPI_WF_LIFETIME_STATUS_11_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_13 0x1262
+#define regSPI_WF_LIFETIME_STATUS_13_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_14 0x1263
+#define regSPI_WF_LIFETIME_STATUS_14_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_15 0x1264
+#define regSPI_WF_LIFETIME_STATUS_15_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_16 0x1265
+#define regSPI_WF_LIFETIME_STATUS_16_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_17 0x1266
+#define regSPI_WF_LIFETIME_STATUS_17_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_18 0x1267
+#define regSPI_WF_LIFETIME_STATUS_18_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_19 0x1268
+#define regSPI_WF_LIFETIME_STATUS_19_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_20 0x1269
+#define regSPI_WF_LIFETIME_STATUS_20_BASE_IDX 0
+#define regSPI_WF_LIFETIME_DEBUG 0x126a
+#define regSPI_WF_LIFETIME_DEBUG_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_21 0x126b
+#define regSPI_WF_LIFETIME_STATUS_21_BASE_IDX 0
+#define regSPI_LB_CTR_CTRL 0x1274
+#define regSPI_LB_CTR_CTRL_BASE_IDX 0
+#define regSPI_LB_WGP_MASK 0x1275
+#define regSPI_LB_WGP_MASK_BASE_IDX 0
+#define regSPI_LB_DATA_REG 0x1276
+#define regSPI_LB_DATA_REG_BASE_IDX 0
+#define regSPI_PG_ENABLE_STATIC_WGP_MASK 0x1277
+#define regSPI_PG_ENABLE_STATIC_WGP_MASK_BASE_IDX 0
+#define regSPI_GDS_CREDITS 0x1278
+#define regSPI_GDS_CREDITS_BASE_IDX 0
+#define regSPI_SX_EXPORT_BUFFER_SIZES 0x1279
+#define regSPI_SX_EXPORT_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES 0x127a
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_STATUS 0x127b
+#define regSPI_CSQ_WF_ACTIVE_STATUS_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0 0x127c
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1 0x127d
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2 0x127e
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3 0x127f
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3_BASE_IDX 0
+#define regSPI_LB_DATA_WAVES 0x1284
+#define regSPI_LB_DATA_WAVES_BASE_IDX 0
+#define regSPI_LB_DATA_PERWGP_WAVE_HSGS 0x1285
+#define regSPI_LB_DATA_PERWGP_WAVE_HSGS_BASE_IDX 0
+#define regSPI_LB_DATA_PERWGP_WAVE_CS 0x1287
+#define regSPI_LB_DATA_PERWGP_WAVE_CS_BASE_IDX 0
+#define regSPIS_DEBUG_READ 0x128a
+#define regSPIS_DEBUG_READ_BASE_IDX 0
+#define regBCI_DEBUG_READ 0x128b
+#define regBCI_DEBUG_READ_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO 0x128c
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI 0x128d
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO 0x128e
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI 0x128f
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN 0x1290
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO 0x1291
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI 0x1292
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO 0x1293
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI 0x1294
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN 0x1295
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define regTD_CNTL 0x12c5
+#define regTD_CNTL_BASE_IDX 0
+#define regTD_STATUS 0x12c6
+#define regTD_STATUS_BASE_IDX 0
+#define regTD_POWER_CNTL 0x12ca
+#define regTD_POWER_CNTL_BASE_IDX 0
+#define regTD_CNTL2 0x12cb
+#define regTD_CNTL2_BASE_IDX 0
+#define regTD_DSM_CNTL 0x12cf
+#define regTD_DSM_CNTL_BASE_IDX 0
+#define regTD_DSM_CNTL2 0x12d0
+#define regTD_DSM_CNTL2_BASE_IDX 0
+#define regTD_SCRATCH 0x12d3
+#define regTD_SCRATCH_BASE_IDX 0
+#define regTA_CNTL 0x12e1
+#define regTA_CNTL_BASE_IDX 0
+#define regTA_CNTL_AUX 0x12e2
+#define regTA_CNTL_AUX_BASE_IDX 0
+#define regTA_CNTL2 0x12e5
+#define regTA_CNTL2_BASE_IDX 0
+#define regTA_STATUS 0x12e8
+#define regTA_STATUS_BASE_IDX 0
+#define regTA_SCRATCH 0x1304
+#define regTA_SCRATCH_BASE_IDX 0
+
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define regGDS_CONFIG 0x1360
+#define regGDS_CONFIG_BASE_IDX 0
+#define regGDS_CNTL_STATUS 0x1361
+#define regGDS_CNTL_STATUS_BASE_IDX 0
+#define regGDS_ENHANCE 0x1362
+#define regGDS_ENHANCE_BASE_IDX 0
+#define regGDS_PROTECTION_FAULT 0x1363
+#define regGDS_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_VM_PROTECTION_FAULT 0x1364
+#define regGDS_VM_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_EDC_CNT 0x1365
+#define regGDS_EDC_CNT_BASE_IDX 0
+#define regGDS_EDC_GRBM_CNT 0x1366
+#define regGDS_EDC_GRBM_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_DED 0x1367
+#define regGDS_EDC_OA_DED_BASE_IDX 0
+#define regGDS_DSM_CNTL 0x136a
+#define regGDS_DSM_CNTL_BASE_IDX 0
+#define regGDS_EDC_OA_PHY_CNT 0x136b
+#define regGDS_EDC_OA_PHY_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_PIPE_CNT 0x136c
+#define regGDS_EDC_OA_PIPE_CNT_BASE_IDX 0
+#define regGDS_DSM_CNTL2 0x136d
+#define regGDS_DSM_CNTL2_BASE_IDX 0
+
+
+// addressBlock: gc_rbdec
+// base address: 0x9800
+#define regDB_DEBUG 0x13ac
+#define regDB_DEBUG_BASE_IDX 0
+#define regDB_DEBUG2 0x13ad
+#define regDB_DEBUG2_BASE_IDX 0
+#define regDB_DEBUG3 0x13ae
+#define regDB_DEBUG3_BASE_IDX 0
+#define regDB_DEBUG4 0x13af
+#define regDB_DEBUG4_BASE_IDX 0
+#define regDB_ETILE_STUTTER_CONTROL 0x13b0
+#define regDB_ETILE_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_LTILE_STUTTER_CONTROL 0x13b1
+#define regDB_LTILE_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_EQUAD_STUTTER_CONTROL 0x13b2
+#define regDB_EQUAD_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_LQUAD_STUTTER_CONTROL 0x13b3
+#define regDB_LQUAD_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_CREDIT_LIMIT 0x13b4
+#define regDB_CREDIT_LIMIT_BASE_IDX 0
+#define regDB_WATERMARKS 0x13b5
+#define regDB_WATERMARKS_BASE_IDX 0
+#define regDB_SUBTILE_CONTROL 0x13b6
+#define regDB_SUBTILE_CONTROL_BASE_IDX 0
+#define regDB_FREE_CACHELINES 0x13b7
+#define regDB_FREE_CACHELINES_BASE_IDX 0
+#define regDB_FIFO_DEPTH1 0x13b8
+#define regDB_FIFO_DEPTH1_BASE_IDX 0
+#define regDB_FIFO_DEPTH2 0x13b9
+#define regDB_FIFO_DEPTH2_BASE_IDX 0
+#define regDB_LAST_OF_BURST_CONFIG 0x13ba
+#define regDB_LAST_OF_BURST_CONFIG_BASE_IDX 0
+#define regDB_RING_CONTROL 0x13bb
+#define regDB_RING_CONTROL_BASE_IDX 0
+#define regDB_MEM_ARB_WATERMARKS 0x13bc
+#define regDB_MEM_ARB_WATERMARKS_BASE_IDX 0
+#define regDB_FIFO_DEPTH3 0x13bd
+#define regDB_FIFO_DEPTH3_BASE_IDX 0
+#define regDB_DEBUG6 0x13be
+#define regDB_DEBUG6_BASE_IDX 0
+#define regDB_EXCEPTION_CONTROL 0x13bf
+#define regDB_EXCEPTION_CONTROL_BASE_IDX 0
+#define regDB_DEBUG7 0x13d0
+#define regDB_DEBUG7_BASE_IDX 0
+#define regDB_DEBUG5 0x13d1
+#define regDB_DEBUG5_BASE_IDX 0
+#define regDB_FGCG_SRAMS_CLK_CTRL 0x13d7
+#define regDB_FGCG_SRAMS_CLK_CTRL_BASE_IDX 0
+#define regDB_FGCG_INTERFACES_CLK_CTRL 0x13d8
+#define regDB_FGCG_INTERFACES_CLK_CTRL_BASE_IDX 0
+#define regDB_FIFO_DEPTH4 0x13d9
+#define regDB_FIFO_DEPTH4_BASE_IDX 0
+#define regCC_RB_REDUNDANCY 0x13dc
+#define regCC_RB_REDUNDANCY_BASE_IDX 0
+#define regCC_RB_BACKEND_DISABLE 0x13dd
+#define regCC_RB_BACKEND_DISABLE_BASE_IDX 0
+#define regGB_ADDR_CONFIG 0x13de
+#define regGB_ADDR_CONFIG_BASE_IDX 0
+#define regGB_BACKEND_MAP 0x13df
+#define regGB_BACKEND_MAP_BASE_IDX 0
+#define regGB_GPU_ID 0x13e0
+#define regGB_GPU_ID_BASE_IDX 0
+#define regCC_RB_DAISY_CHAIN 0x13e1
+#define regCC_RB_DAISY_CHAIN_BASE_IDX 0
+#define regGB_ADDR_CONFIG_READ 0x13e2
+#define regGB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regCB_HW_CONTROL_4 0x1422
+#define regCB_HW_CONTROL_4_BASE_IDX 0
+#define regCB_HW_CONTROL_3 0x1423
+#define regCB_HW_CONTROL_3_BASE_IDX 0
+#define regCB_HW_CONTROL 0x1424
+#define regCB_HW_CONTROL_BASE_IDX 0
+#define regCB_HW_CONTROL_1 0x1425
+#define regCB_HW_CONTROL_1_BASE_IDX 0
+#define regCB_HW_CONTROL_2 0x1426
+#define regCB_HW_CONTROL_2_BASE_IDX 0
+#define regCB_DCC_CONFIG 0x1427
+#define regCB_DCC_CONFIG_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_RD 0x1428
+#define regCB_HW_MEM_ARBITER_RD_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_WR 0x1429
+#define regCB_HW_MEM_ARBITER_WR_BASE_IDX 0
+#define regCB_FGCG_SRAM_OVERRIDE 0x142a
+#define regCB_FGCG_SRAM_OVERRIDE_BASE_IDX 0
+#define regCB_DCC_CONFIG2 0x142b
+#define regCB_DCC_CONFIG2_BASE_IDX 0
+#define regCHICKEN_BITS 0x142d
+#define regCHICKEN_BITS_BASE_IDX 0
+#define regCB_CACHE_EVICT_POINTS 0x142e
+#define regCB_CACHE_EVICT_POINTS_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec
+// base address: 0xa800
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0 0x17a0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1 0x17a1
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0 0x17a2
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1 0x17a3
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_RD_GRP2VC_MAP 0x17a4
+#define regGCEA_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_WR_GRP2VC_MAP 0x17a5
+#define regGCEA_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_RD_LAZY 0x17a6
+#define regGCEA_DRAM_RD_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_WR_LAZY 0x17a7
+#define regGCEA_DRAM_WR_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_RD_CAM_CNTL 0x17a8
+#define regGCEA_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_WR_CAM_CNTL 0x17a9
+#define regGCEA_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_PAGE_BURST 0x17aa
+#define regGCEA_DRAM_PAGE_BURST_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_AGE 0x17ab
+#define regGCEA_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_AGE 0x17ac
+#define regGCEA_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUEUING 0x17ad
+#define regGCEA_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUEUING 0x17ae
+#define regGCEA_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_FIXED 0x17af
+#define regGCEA_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_FIXED 0x17b0
+#define regGCEA_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_URGENCY 0x17b1
+#define regGCEA_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_URGENCY 0x17b2
+#define regGCEA_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1 0x17b3
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2 0x17b4
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3 0x17b5
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1 0x17b6
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2 0x17b7
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3 0x17b8
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP0 0x187d
+#define regGCEA_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP1 0x187e
+#define regGCEA_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP0 0x187f
+#define regGCEA_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP1 0x1880
+#define regGCEA_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_RD_COMBINE_FLUSH 0x1881
+#define regGCEA_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_WR_COMBINE_FLUSH 0x1882
+#define regGCEA_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_GROUP_BURST 0x1883
+#define regGCEA_IO_GROUP_BURST_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_AGE 0x1884
+#define regGCEA_IO_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_AGE 0x1885
+#define regGCEA_IO_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUEUING 0x1886
+#define regGCEA_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUEUING 0x1887
+#define regGCEA_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_FIXED 0x1888
+#define regGCEA_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_FIXED 0x1889
+#define regGCEA_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY 0x188a
+#define regGCEA_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY 0x188b
+#define regGCEA_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING 0x188c
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING 0x188d
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI1 0x188e
+#define regGCEA_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI2 0x188f
+#define regGCEA_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI3 0x1890
+#define regGCEA_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI1 0x1891
+#define regGCEA_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI2 0x1892
+#define regGCEA_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI3 0x1893
+#define regGCEA_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_SDP_ARB_DRAM 0x1894
+#define regGCEA_SDP_ARB_DRAM_BASE_IDX 0
+#define regGCEA_SDP_ARB_FINAL 0x1896
+#define regGCEA_SDP_ARB_FINAL_BASE_IDX 0
+#define regGCEA_SDP_DRAM_PRIORITY 0x1897
+#define regGCEA_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_IO_PRIORITY 0x1899
+#define regGCEA_SDP_IO_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_CREDITS 0x189a
+#define regGCEA_SDP_CREDITS_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE0 0x189b
+#define regGCEA_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE1 0x189c
+#define regGCEA_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE0 0x189d
+#define regGCEA_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE1 0x189e
+#define regGCEA_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCD_RESERVE0 0x189f
+#define regGCEA_SDP_VCD_RESERVE0_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec2
+// base address: 0x9c00
+#define regGCEA_SDP_VCD_RESERVE1 0x14a0
+#define regGCEA_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_REQ_CNTL 0x14a1
+#define regGCEA_SDP_REQ_CNTL_BASE_IDX 0
+#define regGCEA_MISC 0x14a2
+#define regGCEA_MISC_BASE_IDX 0
+#define regGCEA_LATENCY_SAMPLING 0x14a3
+#define regGCEA_LATENCY_SAMPLING_BASE_IDX 0
+#define regGCEA_MAM_CTRL2 0x14a9
+#define regGCEA_MAM_CTRL2_BASE_IDX 0
+#define regGCEA_MAM_CTRL 0x14ab
+#define regGCEA_MAM_CTRL_BASE_IDX 0
+#define regGCEA_EDC_CNT 0x14b2
+#define regGCEA_EDC_CNT_BASE_IDX 0
+#define regGCEA_EDC_CNT2 0x14b3
+#define regGCEA_EDC_CNT2_BASE_IDX 0
+#define regGCEA_DSM_CNTL 0x14b4
+#define regGCEA_DSM_CNTL_BASE_IDX 0
+#define regGCEA_DSM_CNTLA 0x14b5
+#define regGCEA_DSM_CNTLA_BASE_IDX 0
+#define regGCEA_DSM_CNTLB 0x14b6
+#define regGCEA_DSM_CNTLB_BASE_IDX 0
+#define regGCEA_DSM_CNTL2 0x14b7
+#define regGCEA_DSM_CNTL2_BASE_IDX 0
+#define regGCEA_DSM_CNTL2A 0x14b8
+#define regGCEA_DSM_CNTL2A_BASE_IDX 0
+#define regGCEA_DSM_CNTL2B 0x14b9
+#define regGCEA_DSM_CNTL2B_BASE_IDX 0
+#define regGCEA_GL2C_XBR_CREDITS 0x14ba
+#define regGCEA_GL2C_XBR_CREDITS_BASE_IDX 0
+#define regGCEA_GL2C_XBR_MAXBURST 0x14bb
+#define regGCEA_GL2C_XBR_MAXBURST_BASE_IDX 0
+#define regGCEA_PROBE_CNTL 0x14bc
+#define regGCEA_PROBE_CNTL_BASE_IDX 0
+#define regGCEA_PROBE_MAP 0x14bd
+#define regGCEA_PROBE_MAP_BASE_IDX 0
+#define regGCEA_ERR_STATUS 0x14be
+#define regGCEA_ERR_STATUS_BASE_IDX 0
+#define regGCEA_MISC2 0x14bf
+#define regGCEA_MISC2_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec3
+// base address: 0x9dc0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0 0x1512
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1 0x1513
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0 0x1514
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1 0x1515
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS 0x1516
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0
+#define regGCEA_RRET_MEM_RESERVE 0x1518
+#define regGCEA_RRET_MEM_RESERVE_BASE_IDX 0
+#define regGCEA_EDC_CNT3 0x151a
+#define regGCEA_EDC_CNT3_BASE_IDX 0
+#define regGCEA_SDP_ENABLE 0x151e
+#define regGCEA_SDP_ENABLE_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec2
+// base address: 0x9c80
+#define regSPI_PQEV_CTRL 0x14c0
+#define regSPI_PQEV_CTRL_BASE_IDX 0
+#define regSPI_EXP_THROTTLE_CTRL 0x14c3
+#define regSPI_EXP_THROTTLE_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_rmi_rmidec
+// base address: 0x2e200
+#define regRMI_GENERAL_CNTL 0x1880
+#define regRMI_GENERAL_CNTL_BASE_IDX 1
+#define regRMI_GENERAL_CNTL1 0x1881
+#define regRMI_GENERAL_CNTL1_BASE_IDX 1
+#define regRMI_GENERAL_STATUS 0x1882
+#define regRMI_GENERAL_STATUS_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS0 0x1883
+#define regRMI_SUBBLOCK_STATUS0_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS1 0x1884
+#define regRMI_SUBBLOCK_STATUS1_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS2 0x1885
+#define regRMI_SUBBLOCK_STATUS2_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS3 0x1886
+#define regRMI_SUBBLOCK_STATUS3_BASE_IDX 1
+#define regRMI_XBAR_CONFIG 0x1887
+#define regRMI_XBAR_CONFIG_BASE_IDX 1
+#define regRMI_PROBE_POP_LOGIC_CNTL 0x1888
+#define regRMI_PROBE_POP_LOGIC_CNTL_BASE_IDX 1
+#define regRMI_UTC_XNACK_N_MISC_CNTL 0x1889
+#define regRMI_UTC_XNACK_N_MISC_CNTL_BASE_IDX 1
+#define regRMI_DEMUX_CNTL 0x188a
+#define regRMI_DEMUX_CNTL_BASE_IDX 1
+#define regRMI_UTCL1_CNTL1 0x188b
+#define regRMI_UTCL1_CNTL1_BASE_IDX 1
+#define regRMI_UTCL1_CNTL2 0x188c
+#define regRMI_UTCL1_CNTL2_BASE_IDX 1
+#define regRMI_UTC_UNIT_CONFIG 0x188d
+#define regRMI_UTC_UNIT_CONFIG_BASE_IDX 1
+#define regRMI_TCIW_FORMATTER0_CNTL 0x188e
+#define regRMI_TCIW_FORMATTER0_CNTL_BASE_IDX 1
+#define regRMI_TCIW_FORMATTER1_CNTL 0x188f
+#define regRMI_TCIW_FORMATTER1_CNTL_BASE_IDX 1
+#define regRMI_SCOREBOARD_CNTL 0x1890
+#define regRMI_SCOREBOARD_CNTL_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS0 0x1891
+#define regRMI_SCOREBOARD_STATUS0_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS1 0x1892
+#define regRMI_SCOREBOARD_STATUS1_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS2 0x1893
+#define regRMI_SCOREBOARD_STATUS2_BASE_IDX 1
+#define regRMI_XBAR_ARBITER_CONFIG 0x1894
+#define regRMI_XBAR_ARBITER_CONFIG_BASE_IDX 1
+#define regRMI_XBAR_ARBITER_CONFIG_1 0x1895
+#define regRMI_XBAR_ARBITER_CONFIG_1_BASE_IDX 1
+#define regRMI_CLOCK_CNTRL 0x1896
+#define regRMI_CLOCK_CNTRL_BASE_IDX 1
+#define regRMI_UTCL1_STATUS 0x1897
+#define regRMI_UTCL1_STATUS_BASE_IDX 1
+#define regRMI_RB_GLX_CID_MAP 0x1898
+#define regRMI_RB_GLX_CID_MAP_BASE_IDX 1
+#define regRMI_XNACK_DEBUG 0x189e
+#define regRMI_XNACK_DEBUG_BASE_IDX 1
+#define regRMI_SPARE 0x189f
+#define regRMI_SPARE_BASE_IDX 1
+#define regRMI_SPARE_1 0x18a0
+#define regRMI_SPARE_1_BASE_IDX 1
+#define regRMI_SPARE_2 0x18a1
+#define regRMI_SPARE_2_BASE_IDX 1
+#define regCC_RMI_REDUNDANCY 0x18a2
+#define regCC_RMI_REDUNDANCY_BASE_IDX 1
+
+
+// addressBlock: gc_pmmdec
+// base address: 0x9f80
+#define regGCR_PIO_CNTL 0x1580
+#define regGCR_PIO_CNTL_BASE_IDX 0
+#define regGCR_PIO_DATA 0x1581
+#define regGCR_PIO_DATA_BASE_IDX 0
+#define regPMM_CNTL 0x1582
+#define regPMM_CNTL_BASE_IDX 0
+#define regPMM_STATUS 0x1583
+#define regPMM_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_utcl1dec
+// base address: 0x9fb0
+#define regUTCL1_CTRL_1 0x158c
+#define regUTCL1_CTRL_1_BASE_IDX 0
+#define regUTCL1_ALOG 0x158f
+#define regUTCL1_ALOG_BASE_IDX 0
+#define regUTCL1_STATUS 0x1594
+#define regUTCL1_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_gcvmsharedpfdec
+// base address: 0xa000
+#define regGCMC_VM_NB_MMIOBASE 0x15a0
+#define regGCMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define regGCMC_VM_NB_MMIOLIMIT 0x15a1
+#define regGCMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define regGCMC_VM_NB_PCI_CTRL 0x15a2
+#define regGCMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define regGCMC_VM_NB_PCI_ARB 0x15a3
+#define regGCMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define regGCMC_VM_NB_TOP_OF_DRAM_SLOT1 0x15a4
+#define regGCMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define regGCMC_VM_NB_LOWER_TOP_OF_DRAM2 0x15a5
+#define regGCMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define regGCMC_VM_NB_UPPER_TOP_OF_DRAM2 0x15a6
+#define regGCMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define regGCMC_VM_FB_OFFSET 0x15a7
+#define regGCMC_VM_FB_OFFSET_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x15a8
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x15a9
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define regGCMC_VM_STEERING 0x15aa
+#define regGCMC_VM_STEERING_BASE_IDX 0
+#define regGCMC_SHARED_VIRT_RESET_REQ 0x15ab
+#define regGCMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regGCMC_MEM_POWER_LS 0x15ac
+#define regGCMC_MEM_POWER_LS_BASE_IDX 0
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x15ad
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x15ae
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_START 0x15af
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_END 0x15b0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_APT_CNTL 0x15b1
+#define regGCMC_VM_APT_CNTL_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_START 0x15b2
+#define regGCMC_VM_LOCAL_FB_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_END 0x15b3
+#define regGCMC_VM_LOCAL_FB_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL 0x15b4
+#define regGCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define regGCUTCL2_ICG_CTRL 0x15b5
+#define regGCUTCL2_ICG_CTRL_BASE_IDX 0
+#define regGCMC_SHARED_ACTIVE_FCN_ID 0x15b6
+#define regGCMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define regGCUTCL2_CGTT_BUSY_CTRL 0x15b7
+#define regGCUTCL2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regGCMC_VM_FB_NOALLOC_CNTL 0x15b8
+#define regGCMC_VM_FB_NOALLOC_CNTL_BASE_IDX 0
+#define regGCUTCL2_HARVEST_BYPASS_GROUPS 0x15b9
+#define regGCUTCL2_HARVEST_BYPASS_GROUPS_BASE_IDX 0
+#define regGCUTCL2_GROUP_RET_FAULT_STATUS 0x15bb
+#define regGCUTCL2_GROUP_RET_FAULT_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2pfdec
+// base address: 0xa080
+#define regGCVM_L2_CNTL 0x15c0
+#define regGCVM_L2_CNTL_BASE_IDX 0
+#define regGCVM_L2_CNTL2 0x15c1
+#define regGCVM_L2_CNTL2_BASE_IDX 0
+#define regGCVM_L2_CNTL3 0x15c2
+#define regGCVM_L2_CNTL3_BASE_IDX 0
+#define regGCVM_L2_STATUS 0x15c3
+#define regGCVM_L2_STATUS_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_CNTL 0x15c4
+#define regGCVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x15c5
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x15c6
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_CNTL 0x15c7
+#define regGCVM_INVALIDATE_CNTL_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_CNTL 0x15c8
+#define regGCVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_CNTL2 0x15c9
+#define regGCVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL3 0x15ca
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL4 0x15cb
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_STATUS 0x15cc
+#define regGCVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_LO32 0x15cd
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_HI32 0x15ce
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x15cf
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x15d0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x15d2
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x15d3
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x15d4
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x15d5
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x15d6
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x15d7
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define regGCVM_L2_CNTL4 0x15d8
+#define regGCVM_L2_CNTL4_BASE_IDX 0
+#define regGCVM_L2_MM_GROUP_RT_CLASSES 0x15d9
+#define regGCVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID 0x15da
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID2 0x15db
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define regGCVM_L2_CACHE_PARITY_CNTL 0x15dc
+#define regGCVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define regGCVM_L2_ICG_CTRL 0x15dd
+#define regGCVM_L2_ICG_CTRL_BASE_IDX 0
+#define regGCVM_L2_CNTL5 0x15de
+#define regGCVM_L2_CNTL5_BASE_IDX 0
+#define regGCVM_L2_GCR_CNTL 0x15df
+#define regGCVM_L2_GCR_CNTL_BASE_IDX 0
+#define regGCVML2_WALKER_MACRO_THROTTLE_TIME 0x15e0
+#define regGCVML2_WALKER_MACRO_THROTTLE_TIME_BASE_IDX 0
+#define regGCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT 0x15e1
+#define regGCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT_BASE_IDX 0
+#define regGCVML2_WALKER_MICRO_THROTTLE_TIME 0x15e2
+#define regGCVML2_WALKER_MICRO_THROTTLE_TIME_BASE_IDX 0
+#define regGCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT 0x15e3
+#define regGCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT_BASE_IDX 0
+#define regGCVM_L2_CGTT_BUSY_CTRL 0x15e4
+#define regGCVM_L2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regGCVM_L2_PTE_CACHE_DUMP_CNTL 0x15e5
+#define regGCVM_L2_PTE_CACHE_DUMP_CNTL_BASE_IDX 0
+#define regGCVM_L2_PTE_CACHE_DUMP_READ 0x15e6
+#define regGCVM_L2_PTE_CACHE_DUMP_READ_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_MASKS 0x15e9
+#define regGCVM_L2_BANK_SELECT_MASKS_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC 0x15ea
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC 0x15eb
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC 0x15ec
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC_BASE_IDX 0
+#define regGCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT 0x15ed
+#define regGCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT_BASE_IDX 0
+#define regGCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ 0x15ee
+#define regGCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ_BASE_IDX 0
+
+
+// addressBlock: gc_gcatcl2dec
+// base address: 0xa300
+#define regGC_ATC_L2_CNTL 0x1660
+#define regGC_ATC_L2_CNTL_BASE_IDX 0
+#define regGC_ATC_L2_CNTL2 0x1661
+#define regGC_ATC_L2_CNTL2_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA0 0x1664
+#define regGC_ATC_L2_CACHE_DATA0_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA1 0x1665
+#define regGC_ATC_L2_CACHE_DATA1_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA2 0x1666
+#define regGC_ATC_L2_CACHE_DATA2_BASE_IDX 0
+#define regGC_ATC_L2_CNTL3 0x1667
+#define regGC_ATC_L2_CNTL3_BASE_IDX 0
+#define regGC_ATC_L2_STATUS 0x1668
+#define regGC_ATC_L2_STATUS_BASE_IDX 0
+#define regGC_ATC_L2_STATUS2 0x1669
+#define regGC_ATC_L2_STATUS2_BASE_IDX 0
+#define regGC_ATC_L2_MISC_CG 0x166a
+#define regGC_ATC_L2_MISC_CG_BASE_IDX 0
+#define regGC_ATC_L2_MEM_POWER_LS 0x166b
+#define regGC_ATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define regGC_ATC_L2_SDPPORT_CTRL 0x166f
+#define regGC_ATC_L2_SDPPORT_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_gcl2tlbpfdec
+// base address: 0xa380
+#define regGCL2TLB_TLB0_STATUS 0x1681
+#define regGCL2TLB_TLB0_STATUS_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO 0x1683
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI 0x1684
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO 0x1685
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI 0x1686
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI_BASE_IDX 0
+
+
+// addressBlock: gc_gcvmsharedvcdec
+// base address: 0xa3a0
+#define regGCMC_VM_FB_LOCATION_BASE 0x1688
+#define regGCMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regGCMC_VM_FB_LOCATION_TOP 0x1689
+#define regGCMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define regGCMC_VM_AGP_TOP 0x168a
+#define regGCMC_VM_AGP_TOP_BASE_IDX 0
+#define regGCMC_VM_AGP_BOT 0x168b
+#define regGCMC_VM_AGP_BOT_BASE_IDX 0
+#define regGCMC_VM_AGP_BASE 0x168c
+#define regGCMC_VM_AGP_BASE_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x168d
+#define regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x168e
+#define regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define regGCMC_VM_MX_L1_TLB_CNTL 0x168f
+#define regGCMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2vcdec
+// base address: 0xa3e0
+#define regGCVM_CONTEXT0_CNTL 0x1698
+#define regGCVM_CONTEXT0_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT1_CNTL 0x1699
+#define regGCVM_CONTEXT1_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT2_CNTL 0x169a
+#define regGCVM_CONTEXT2_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT3_CNTL 0x169b
+#define regGCVM_CONTEXT3_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT4_CNTL 0x169c
+#define regGCVM_CONTEXT4_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT5_CNTL 0x169d
+#define regGCVM_CONTEXT5_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT6_CNTL 0x169e
+#define regGCVM_CONTEXT6_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT7_CNTL 0x169f
+#define regGCVM_CONTEXT7_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT8_CNTL 0x16a0
+#define regGCVM_CONTEXT8_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT9_CNTL 0x16a1
+#define regGCVM_CONTEXT9_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT10_CNTL 0x16a2
+#define regGCVM_CONTEXT10_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT11_CNTL 0x16a3
+#define regGCVM_CONTEXT11_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT12_CNTL 0x16a4
+#define regGCVM_CONTEXT12_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT13_CNTL 0x16a5
+#define regGCVM_CONTEXT13_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT14_CNTL 0x16a6
+#define regGCVM_CONTEXT14_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT15_CNTL 0x16a7
+#define regGCVM_CONTEXT15_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXTS_DISABLE 0x16a8
+#define regGCVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_SEM 0x16a9
+#define regGCVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_SEM 0x16aa
+#define regGCVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_SEM 0x16ab
+#define regGCVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_SEM 0x16ac
+#define regGCVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_SEM 0x16ad
+#define regGCVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_SEM 0x16ae
+#define regGCVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_SEM 0x16af
+#define regGCVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_SEM 0x16b0
+#define regGCVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_SEM 0x16b1
+#define regGCVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_SEM 0x16b2
+#define regGCVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_SEM 0x16b3
+#define regGCVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_SEM 0x16b4
+#define regGCVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_SEM 0x16b5
+#define regGCVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_SEM 0x16b6
+#define regGCVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_SEM 0x16b7
+#define regGCVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_SEM 0x16b8
+#define regGCVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_SEM 0x16b9
+#define regGCVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_SEM 0x16ba
+#define regGCVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_REQ 0x16bb
+#define regGCVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_REQ 0x16bc
+#define regGCVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_REQ 0x16bd
+#define regGCVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_REQ 0x16be
+#define regGCVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_REQ 0x16bf
+#define regGCVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_REQ 0x16c0
+#define regGCVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_REQ 0x16c1
+#define regGCVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_REQ 0x16c2
+#define regGCVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_REQ 0x16c3
+#define regGCVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_REQ 0x16c4
+#define regGCVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_REQ 0x16c5
+#define regGCVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_REQ 0x16c6
+#define regGCVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_REQ 0x16c7
+#define regGCVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_REQ 0x16c8
+#define regGCVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_REQ 0x16c9
+#define regGCVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_REQ 0x16ca
+#define regGCVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_REQ 0x16cb
+#define regGCVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_REQ 0x16cc
+#define regGCVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ACK 0x16cd
+#define regGCVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ACK 0x16ce
+#define regGCVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ACK 0x16cf
+#define regGCVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ACK 0x16d0
+#define regGCVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ACK 0x16d1
+#define regGCVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ACK 0x16d2
+#define regGCVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ACK 0x16d3
+#define regGCVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ACK 0x16d4
+#define regGCVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ACK 0x16d5
+#define regGCVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ACK 0x16d6
+#define regGCVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ACK 0x16d7
+#define regGCVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ACK 0x16d8
+#define regGCVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ACK 0x16d9
+#define regGCVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ACK 0x16da
+#define regGCVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ACK 0x16db
+#define regGCVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ACK 0x16dc
+#define regGCVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ACK 0x16dd
+#define regGCVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ACK 0x16de
+#define regGCVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x16df
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x16e0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x16e1
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x16e2
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x16e3
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x16e4
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x16e5
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x16e6
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x16e7
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x16e8
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x16e9
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x16ea
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x16eb
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x16ec
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x16ed
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x16ee
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x16ef
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x16f0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x16f1
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x16f2
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x16f3
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x16f4
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x16f5
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x16f6
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x16f7
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x16f8
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x16f9
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x16fa
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x16fb
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x16fc
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x16fd
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x16fe
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x16ff
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x1700
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x1701
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x1702
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x1703
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x1704
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x1705
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x1706
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x1707
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x1708
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x1709
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x170a
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x170b
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x170c
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x170d
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x170e
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x170f
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x1710
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x1711
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x1712
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x1713
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x1714
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x1715
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x1716
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x1717
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x1718
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x1719
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x171a
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x171b
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x171c
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x171d
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x171e
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x171f
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x1720
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x1721
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x1722
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x1723
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x1724
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x1725
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x1726
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x1727
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x1728
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x1729
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x172a
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x172b
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x172c
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x172d
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x172e
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x172f
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x1730
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x1731
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x1732
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x1733
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x1734
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x1735
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x1736
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x1737
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x1738
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x1739
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x173a
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x173b
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x173c
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x173d
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x173e
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x173f
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x1740
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x1741
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x1742
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x1743
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x1744
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x1745
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x1746
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x1747
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x1748
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x1749
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x174a
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x174b
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x174c
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x174d
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x174e
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x174f
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x1750
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x1751
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x1752
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x1753
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x1754
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x1755
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x1756
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x1757
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x1758
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x1759
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x175a
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x175b
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x175c
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x175d
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x175e
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x175f
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x1760
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x1761
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x1762
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1763
+#define regGCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1764
+#define regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1765
+#define regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1766
+#define regGCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1767
+#define regGCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1768
+#define regGCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1769
+#define regGCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176a
+#define regGCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176b
+#define regGCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176c
+#define regGCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176d
+#define regGCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176e
+#define regGCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176f
+#define regGCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1770
+#define regGCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1771
+#define regGCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1772
+#define regGCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1773
+#define regGCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2perfddec
+// base address: 0x35380
+#define regGCVML2_PERFCOUNTER2_0_LO 0x34e0
+#define regGCVML2_PERFCOUNTER2_0_LO_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_LO 0x34e1
+#define regGCVML2_PERFCOUNTER2_1_LO_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_HI 0x34e2
+#define regGCVML2_PERFCOUNTER2_0_HI_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_HI 0x34e3
+#define regGCVML2_PERFCOUNTER2_1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2prdec
+// base address: 0x35390
+#define regGCMC_VM_L2_PERFCOUNTER_LO 0x34e4
+#define regGCMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER_HI 0x34e5
+#define regGCMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_LO 0x34e6
+#define regGCUTCL2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_HI 0x34e7
+#define regGCUTCL2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2perfddec
+// base address: 0x353d0
+#define regGC_ATC_L2_PERFCOUNTER2_LO 0x34f4
+#define regGC_ATC_L2_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_HI 0x34f5
+#define regGC_ATC_L2_PERFCOUNTER2_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2pfcntrdec
+// base address: 0x353e0
+#define regGC_ATC_L2_PERFCOUNTER_LO 0x34f8
+#define regGC_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER_HI 0x34f9
+#define regGC_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbprdec
+// base address: 0x353e8
+#define regGCL2TLB_PERFCOUNTER_LO 0x34fa
+#define regGCL2TLB_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER_HI 0x34fb
+#define regGCL2TLB_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2perfsdec
+// base address: 0x37480
+#define regGCVML2_PERFCOUNTER2_0_SELECT 0x3d20
+#define regGCVML2_PERFCOUNTER2_0_SELECT_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_SELECT 0x3d21
+#define regGCVML2_PERFCOUNTER2_1_SELECT_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_SELECT1 0x3d22
+#define regGCVML2_PERFCOUNTER2_0_SELECT1_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_SELECT1 0x3d23
+#define regGCVML2_PERFCOUNTER2_1_SELECT1_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_MODE 0x3d24
+#define regGCVML2_PERFCOUNTER2_0_MODE_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_MODE 0x3d25
+#define regGCVML2_PERFCOUNTER2_1_MODE_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2pldec
+// base address: 0x374c0
+#define regGCMC_VM_L2_PERFCOUNTER0_CFG 0x3d30
+#define regGCMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER1_CFG 0x3d31
+#define regGCMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER2_CFG 0x3d32
+#define regGCMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER3_CFG 0x3d33
+#define regGCMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER4_CFG 0x3d34
+#define regGCMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER5_CFG 0x3d35
+#define regGCMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER6_CFG 0x3d36
+#define regGCMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER7_CFG 0x3d37
+#define regGCMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3d38
+#define regGCMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER0_CFG 0x3d39
+#define regGCUTCL2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER1_CFG 0x3d3a
+#define regGCUTCL2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER2_CFG 0x3d3b
+#define regGCUTCL2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER3_CFG 0x3d3c
+#define regGCUTCL2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_RSLT_CNTL 0x3d3d
+#define regGCUTCL2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2perfsdec
+// base address: 0x37500
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT 0x3d40
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT1 0x3d41
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_MODE 0x3d42
+#define regGC_ATC_L2_PERFCOUNTER2_MODE_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2pfcntldec
+// base address: 0x37510
+#define regGC_ATC_L2_PERFCOUNTER0_CFG 0x3d44
+#define regGC_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER1_CFG 0x3d45
+#define regGC_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x3d46
+#define regGC_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbpldec
+// base address: 0x37528
+#define regGCL2TLB_PERFCOUNTER0_CFG 0x3d4a
+#define regGCL2TLB_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER1_CFG 0x3d4b
+#define regGCL2TLB_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER2_CFG 0x3d4c
+#define regGCL2TLB_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER3_CFG 0x3d4d
+#define regGCL2TLB_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL 0x3d4e
+#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2pspdec
+// base address: 0x3f900
+#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41
+#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID_BASE_IDX 1
+#define regGCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE 0x5e43
+#define regGCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE_BASE_IDX 1
+#define regGCVM_IOMMU_CONTROL_REGISTER 0x5e44
+#define regGCVM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define regGCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x5e45
+#define regGCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define regGCVM_IOMMU_MMIO_CNTRL_1 0x5e46
+#define regGCVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_0 0x5e47
+#define regGCMC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_1 0x5e48
+#define regGCMC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_2 0x5e49
+#define regGCMC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_3 0x5e4a
+#define regGCMC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_4 0x5e4b
+#define regGCMC_VM_MARC_BASE_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_5 0x5e4c
+#define regGCMC_VM_MARC_BASE_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_6 0x5e4d
+#define regGCMC_VM_MARC_BASE_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_7 0x5e4e
+#define regGCMC_VM_MARC_BASE_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_8 0x5e4f
+#define regGCMC_VM_MARC_BASE_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_9 0x5e50
+#define regGCMC_VM_MARC_BASE_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_10 0x5e51
+#define regGCMC_VM_MARC_BASE_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_11 0x5e52
+#define regGCMC_VM_MARC_BASE_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_12 0x5e53
+#define regGCMC_VM_MARC_BASE_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_13 0x5e54
+#define regGCMC_VM_MARC_BASE_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_14 0x5e55
+#define regGCMC_VM_MARC_BASE_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_15 0x5e56
+#define regGCMC_VM_MARC_BASE_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_0 0x5e57
+#define regGCMC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_1 0x5e58
+#define regGCMC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_2 0x5e59
+#define regGCMC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_3 0x5e5a
+#define regGCMC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_4 0x5e5b
+#define regGCMC_VM_MARC_BASE_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_5 0x5e5c
+#define regGCMC_VM_MARC_BASE_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_6 0x5e5d
+#define regGCMC_VM_MARC_BASE_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_7 0x5e5e
+#define regGCMC_VM_MARC_BASE_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_8 0x5e5f
+#define regGCMC_VM_MARC_BASE_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_9 0x5e60
+#define regGCMC_VM_MARC_BASE_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_10 0x5e61
+#define regGCMC_VM_MARC_BASE_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_11 0x5e62
+#define regGCMC_VM_MARC_BASE_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_12 0x5e63
+#define regGCMC_VM_MARC_BASE_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_13 0x5e64
+#define regGCMC_VM_MARC_BASE_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_14 0x5e65
+#define regGCMC_VM_MARC_BASE_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_15 0x5e66
+#define regGCMC_VM_MARC_BASE_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_0 0x5e67
+#define regGCMC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_1 0x5e68
+#define regGCMC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_2 0x5e69
+#define regGCMC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_3 0x5e6a
+#define regGCMC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_4 0x5e6b
+#define regGCMC_VM_MARC_RELOC_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_5 0x5e6c
+#define regGCMC_VM_MARC_RELOC_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_6 0x5e6d
+#define regGCMC_VM_MARC_RELOC_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_7 0x5e6e
+#define regGCMC_VM_MARC_RELOC_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_8 0x5e6f
+#define regGCMC_VM_MARC_RELOC_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_9 0x5e70
+#define regGCMC_VM_MARC_RELOC_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_10 0x5e71
+#define regGCMC_VM_MARC_RELOC_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_11 0x5e72
+#define regGCMC_VM_MARC_RELOC_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_12 0x5e73
+#define regGCMC_VM_MARC_RELOC_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_13 0x5e74
+#define regGCMC_VM_MARC_RELOC_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_14 0x5e75
+#define regGCMC_VM_MARC_RELOC_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_15 0x5e76
+#define regGCMC_VM_MARC_RELOC_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_0 0x5e77
+#define regGCMC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_1 0x5e78
+#define regGCMC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_2 0x5e79
+#define regGCMC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_3 0x5e7a
+#define regGCMC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_4 0x5e7b
+#define regGCMC_VM_MARC_RELOC_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_5 0x5e7c
+#define regGCMC_VM_MARC_RELOC_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_6 0x5e7d
+#define regGCMC_VM_MARC_RELOC_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_7 0x5e7e
+#define regGCMC_VM_MARC_RELOC_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_8 0x5e7f
+#define regGCMC_VM_MARC_RELOC_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_9 0x5e80
+#define regGCMC_VM_MARC_RELOC_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_10 0x5e81
+#define regGCMC_VM_MARC_RELOC_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_11 0x5e82
+#define regGCMC_VM_MARC_RELOC_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_12 0x5e83
+#define regGCMC_VM_MARC_RELOC_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_13 0x5e84
+#define regGCMC_VM_MARC_RELOC_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_14 0x5e85
+#define regGCMC_VM_MARC_RELOC_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_15 0x5e86
+#define regGCMC_VM_MARC_RELOC_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_0 0x5e87
+#define regGCMC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_1 0x5e88
+#define regGCMC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_2 0x5e89
+#define regGCMC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_3 0x5e8a
+#define regGCMC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_4 0x5e8b
+#define regGCMC_VM_MARC_LEN_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_5 0x5e8c
+#define regGCMC_VM_MARC_LEN_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_6 0x5e8d
+#define regGCMC_VM_MARC_LEN_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_7 0x5e8e
+#define regGCMC_VM_MARC_LEN_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_8 0x5e8f
+#define regGCMC_VM_MARC_LEN_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_9 0x5e90
+#define regGCMC_VM_MARC_LEN_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_10 0x5e91
+#define regGCMC_VM_MARC_LEN_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_11 0x5e92
+#define regGCMC_VM_MARC_LEN_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_12 0x5e93
+#define regGCMC_VM_MARC_LEN_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_13 0x5e94
+#define regGCMC_VM_MARC_LEN_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_14 0x5e95
+#define regGCMC_VM_MARC_LEN_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_15 0x5e96
+#define regGCMC_VM_MARC_LEN_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_0 0x5e97
+#define regGCMC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_1 0x5e98
+#define regGCMC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_2 0x5e99
+#define regGCMC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_3 0x5e9a
+#define regGCMC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_4 0x5e9b
+#define regGCMC_VM_MARC_LEN_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_5 0x5e9c
+#define regGCMC_VM_MARC_LEN_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_6 0x5e9d
+#define regGCMC_VM_MARC_LEN_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_7 0x5e9e
+#define regGCMC_VM_MARC_LEN_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_8 0x5e9f
+#define regGCMC_VM_MARC_LEN_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_9 0x5ea0
+#define regGCMC_VM_MARC_LEN_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_10 0x5ea1
+#define regGCMC_VM_MARC_LEN_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_11 0x5ea2
+#define regGCMC_VM_MARC_LEN_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_12 0x5ea3
+#define regGCMC_VM_MARC_LEN_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_13 0x5ea4
+#define regGCMC_VM_MARC_LEN_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_14 0x5ea5
+#define regGCMC_VM_MARC_LEN_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_15 0x5ea6
+#define regGCMC_VM_MARC_LEN_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_0 0x5ea7
+#define regGCMC_VM_MARC_PFVF_MAPPING_0_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_1 0x5ea8
+#define regGCMC_VM_MARC_PFVF_MAPPING_1_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_2 0x5ea9
+#define regGCMC_VM_MARC_PFVF_MAPPING_2_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_3 0x5eaa
+#define regGCMC_VM_MARC_PFVF_MAPPING_3_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_4 0x5eab
+#define regGCMC_VM_MARC_PFVF_MAPPING_4_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_5 0x5eac
+#define regGCMC_VM_MARC_PFVF_MAPPING_5_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_6 0x5ead
+#define regGCMC_VM_MARC_PFVF_MAPPING_6_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_7 0x5eae
+#define regGCMC_VM_MARC_PFVF_MAPPING_7_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_8 0x5eaf
+#define regGCMC_VM_MARC_PFVF_MAPPING_8_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_9 0x5eb0
+#define regGCMC_VM_MARC_PFVF_MAPPING_9_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_10 0x5eb1
+#define regGCMC_VM_MARC_PFVF_MAPPING_10_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_11 0x5eb2
+#define regGCMC_VM_MARC_PFVF_MAPPING_11_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_12 0x5eb3
+#define regGCMC_VM_MARC_PFVF_MAPPING_12_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_13 0x5eb4
+#define regGCMC_VM_MARC_PFVF_MAPPING_13_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_14 0x5eb5
+#define regGCMC_VM_MARC_PFVF_MAPPING_14_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_15 0x5eb6
+#define regGCMC_VM_MARC_PFVF_MAPPING_15_BASE_IDX 1
+#define regGCUTC_TRANSLATION_FAULT_CNTL0 0x5eb7
+#define regGCUTC_TRANSLATION_FAULT_CNTL0_BASE_IDX 1
+#define regGCUTC_TRANSLATION_FAULT_CNTL1 0x5eb8
+#define regGCUTC_TRANSLATION_FAULT_CNTL1_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbpspdec
+// base address: 0x3fb10
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL 0x5ec4
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_shdec
+// base address: 0xb000
+#define regSPI_SHADER_PGM_RSRC4_PS 0x19a1
+#define regSPI_SHADER_PGM_RSRC4_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_PS 0x19a6
+#define regSPI_SHADER_PGM_CHKSUM_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_PS 0x19a7
+#define regSPI_SHADER_PGM_RSRC3_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_PS 0x19a8
+#define regSPI_SHADER_PGM_LO_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_PS 0x19a9
+#define regSPI_SHADER_PGM_HI_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_PS 0x19aa
+#define regSPI_SHADER_PGM_RSRC1_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_PS 0x19ab
+#define regSPI_SHADER_PGM_RSRC2_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_0 0x19ac
+#define regSPI_SHADER_USER_DATA_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_1 0x19ad
+#define regSPI_SHADER_USER_DATA_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_2 0x19ae
+#define regSPI_SHADER_USER_DATA_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_3 0x19af
+#define regSPI_SHADER_USER_DATA_PS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_4 0x19b0
+#define regSPI_SHADER_USER_DATA_PS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_5 0x19b1
+#define regSPI_SHADER_USER_DATA_PS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_6 0x19b2
+#define regSPI_SHADER_USER_DATA_PS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_7 0x19b3
+#define regSPI_SHADER_USER_DATA_PS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_8 0x19b4
+#define regSPI_SHADER_USER_DATA_PS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_9 0x19b5
+#define regSPI_SHADER_USER_DATA_PS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_10 0x19b6
+#define regSPI_SHADER_USER_DATA_PS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_11 0x19b7
+#define regSPI_SHADER_USER_DATA_PS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_12 0x19b8
+#define regSPI_SHADER_USER_DATA_PS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_13 0x19b9
+#define regSPI_SHADER_USER_DATA_PS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_14 0x19ba
+#define regSPI_SHADER_USER_DATA_PS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_15 0x19bb
+#define regSPI_SHADER_USER_DATA_PS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_16 0x19bc
+#define regSPI_SHADER_USER_DATA_PS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_17 0x19bd
+#define regSPI_SHADER_USER_DATA_PS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_18 0x19be
+#define regSPI_SHADER_USER_DATA_PS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_19 0x19bf
+#define regSPI_SHADER_USER_DATA_PS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_20 0x19c0
+#define regSPI_SHADER_USER_DATA_PS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_21 0x19c1
+#define regSPI_SHADER_USER_DATA_PS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_22 0x19c2
+#define regSPI_SHADER_USER_DATA_PS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_23 0x19c3
+#define regSPI_SHADER_USER_DATA_PS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_24 0x19c4
+#define regSPI_SHADER_USER_DATA_PS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_25 0x19c5
+#define regSPI_SHADER_USER_DATA_PS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_26 0x19c6
+#define regSPI_SHADER_USER_DATA_PS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_27 0x19c7
+#define regSPI_SHADER_USER_DATA_PS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_28 0x19c8
+#define regSPI_SHADER_USER_DATA_PS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_29 0x19c9
+#define regSPI_SHADER_USER_DATA_PS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_30 0x19ca
+#define regSPI_SHADER_USER_DATA_PS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_31 0x19cb
+#define regSPI_SHADER_USER_DATA_PS_31_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_PS 0x19d0
+#define regSPI_SHADER_REQ_CTRL_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_0 0x19d2
+#define regSPI_SHADER_USER_ACCUM_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_1 0x19d3
+#define regSPI_SHADER_USER_ACCUM_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_2 0x19d4
+#define regSPI_SHADER_USER_ACCUM_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_3 0x19d5
+#define regSPI_SHADER_USER_ACCUM_PS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_GS 0x1a20
+#define regSPI_SHADER_PGM_CHKSUM_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_GS 0x1a21
+#define regSPI_SHADER_PGM_RSRC4_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS 0x1a22
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS 0x1a23
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES_GS 0x1a24
+#define regSPI_SHADER_PGM_LO_ES_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES_GS 0x1a25
+#define regSPI_SHADER_PGM_HI_ES_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_GS 0x1a27
+#define regSPI_SHADER_PGM_RSRC3_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_GS 0x1a28
+#define regSPI_SHADER_PGM_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_GS 0x1a29
+#define regSPI_SHADER_PGM_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_GS 0x1a2a
+#define regSPI_SHADER_PGM_RSRC1_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_GS 0x1a2b
+#define regSPI_SHADER_PGM_RSRC2_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_0 0x1a2c
+#define regSPI_SHADER_USER_DATA_GS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_1 0x1a2d
+#define regSPI_SHADER_USER_DATA_GS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_2 0x1a2e
+#define regSPI_SHADER_USER_DATA_GS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_3 0x1a2f
+#define regSPI_SHADER_USER_DATA_GS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_4 0x1a30
+#define regSPI_SHADER_USER_DATA_GS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_5 0x1a31
+#define regSPI_SHADER_USER_DATA_GS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_6 0x1a32
+#define regSPI_SHADER_USER_DATA_GS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_7 0x1a33
+#define regSPI_SHADER_USER_DATA_GS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_8 0x1a34
+#define regSPI_SHADER_USER_DATA_GS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_9 0x1a35
+#define regSPI_SHADER_USER_DATA_GS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_10 0x1a36
+#define regSPI_SHADER_USER_DATA_GS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_11 0x1a37
+#define regSPI_SHADER_USER_DATA_GS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_12 0x1a38
+#define regSPI_SHADER_USER_DATA_GS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_13 0x1a39
+#define regSPI_SHADER_USER_DATA_GS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_14 0x1a3a
+#define regSPI_SHADER_USER_DATA_GS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_15 0x1a3b
+#define regSPI_SHADER_USER_DATA_GS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_16 0x1a3c
+#define regSPI_SHADER_USER_DATA_GS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_17 0x1a3d
+#define regSPI_SHADER_USER_DATA_GS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_18 0x1a3e
+#define regSPI_SHADER_USER_DATA_GS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_19 0x1a3f
+#define regSPI_SHADER_USER_DATA_GS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_20 0x1a40
+#define regSPI_SHADER_USER_DATA_GS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_21 0x1a41
+#define regSPI_SHADER_USER_DATA_GS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_22 0x1a42
+#define regSPI_SHADER_USER_DATA_GS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_23 0x1a43
+#define regSPI_SHADER_USER_DATA_GS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_24 0x1a44
+#define regSPI_SHADER_USER_DATA_GS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_25 0x1a45
+#define regSPI_SHADER_USER_DATA_GS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_26 0x1a46
+#define regSPI_SHADER_USER_DATA_GS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_27 0x1a47
+#define regSPI_SHADER_USER_DATA_GS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_28 0x1a48
+#define regSPI_SHADER_USER_DATA_GS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_29 0x1a49
+#define regSPI_SHADER_USER_DATA_GS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_30 0x1a4a
+#define regSPI_SHADER_USER_DATA_GS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_31 0x1a4b
+#define regSPI_SHADER_USER_DATA_GS_31_BASE_IDX 0
+#define regSPI_SHADER_GS_MESHLET_DIM 0x1a4c
+#define regSPI_SHADER_GS_MESHLET_DIM_BASE_IDX 0
+#define regSPI_SHADER_GS_MESHLET_EXP_ALLOC 0x1a4d
+#define regSPI_SHADER_GS_MESHLET_EXP_ALLOC_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_ESGS 0x1a50
+#define regSPI_SHADER_REQ_CTRL_ESGS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_0 0x1a52
+#define regSPI_SHADER_USER_ACCUM_ESGS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_1 0x1a53
+#define regSPI_SHADER_USER_ACCUM_ESGS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_2 0x1a54
+#define regSPI_SHADER_USER_ACCUM_ESGS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_3 0x1a55
+#define regSPI_SHADER_USER_ACCUM_ESGS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES 0x1a68
+#define regSPI_SHADER_PGM_LO_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES 0x1a69
+#define regSPI_SHADER_PGM_HI_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_HS 0x1aa0
+#define regSPI_SHADER_PGM_CHKSUM_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_HS 0x1aa1
+#define regSPI_SHADER_PGM_RSRC4_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS 0x1aa2
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS 0x1aa3
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS_HS 0x1aa4
+#define regSPI_SHADER_PGM_LO_LS_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS_HS 0x1aa5
+#define regSPI_SHADER_PGM_HI_LS_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_HS 0x1aa7
+#define regSPI_SHADER_PGM_RSRC3_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_HS 0x1aa8
+#define regSPI_SHADER_PGM_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_HS 0x1aa9
+#define regSPI_SHADER_PGM_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_HS 0x1aaa
+#define regSPI_SHADER_PGM_RSRC1_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_HS 0x1aab
+#define regSPI_SHADER_PGM_RSRC2_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_0 0x1aac
+#define regSPI_SHADER_USER_DATA_HS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_1 0x1aad
+#define regSPI_SHADER_USER_DATA_HS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_2 0x1aae
+#define regSPI_SHADER_USER_DATA_HS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_3 0x1aaf
+#define regSPI_SHADER_USER_DATA_HS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_4 0x1ab0
+#define regSPI_SHADER_USER_DATA_HS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_5 0x1ab1
+#define regSPI_SHADER_USER_DATA_HS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_6 0x1ab2
+#define regSPI_SHADER_USER_DATA_HS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_7 0x1ab3
+#define regSPI_SHADER_USER_DATA_HS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_8 0x1ab4
+#define regSPI_SHADER_USER_DATA_HS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_9 0x1ab5
+#define regSPI_SHADER_USER_DATA_HS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_10 0x1ab6
+#define regSPI_SHADER_USER_DATA_HS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_11 0x1ab7
+#define regSPI_SHADER_USER_DATA_HS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_12 0x1ab8
+#define regSPI_SHADER_USER_DATA_HS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_13 0x1ab9
+#define regSPI_SHADER_USER_DATA_HS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_14 0x1aba
+#define regSPI_SHADER_USER_DATA_HS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_15 0x1abb
+#define regSPI_SHADER_USER_DATA_HS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_16 0x1abc
+#define regSPI_SHADER_USER_DATA_HS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_17 0x1abd
+#define regSPI_SHADER_USER_DATA_HS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_18 0x1abe
+#define regSPI_SHADER_USER_DATA_HS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_19 0x1abf
+#define regSPI_SHADER_USER_DATA_HS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_20 0x1ac0
+#define regSPI_SHADER_USER_DATA_HS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_21 0x1ac1
+#define regSPI_SHADER_USER_DATA_HS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_22 0x1ac2
+#define regSPI_SHADER_USER_DATA_HS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_23 0x1ac3
+#define regSPI_SHADER_USER_DATA_HS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_24 0x1ac4
+#define regSPI_SHADER_USER_DATA_HS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_25 0x1ac5
+#define regSPI_SHADER_USER_DATA_HS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_26 0x1ac6
+#define regSPI_SHADER_USER_DATA_HS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_27 0x1ac7
+#define regSPI_SHADER_USER_DATA_HS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_28 0x1ac8
+#define regSPI_SHADER_USER_DATA_HS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_29 0x1ac9
+#define regSPI_SHADER_USER_DATA_HS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_30 0x1aca
+#define regSPI_SHADER_USER_DATA_HS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_31 0x1acb
+#define regSPI_SHADER_USER_DATA_HS_31_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_LSHS 0x1ad0
+#define regSPI_SHADER_REQ_CTRL_LSHS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_0 0x1ad2
+#define regSPI_SHADER_USER_ACCUM_LSHS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_1 0x1ad3
+#define regSPI_SHADER_USER_ACCUM_LSHS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_2 0x1ad4
+#define regSPI_SHADER_USER_ACCUM_LSHS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_3 0x1ad5
+#define regSPI_SHADER_USER_ACCUM_LSHS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS 0x1ae8
+#define regSPI_SHADER_PGM_LO_LS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS 0x1ae9
+#define regSPI_SHADER_PGM_HI_LS_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INITIATOR 0x1ba0
+#define regCOMPUTE_DISPATCH_INITIATOR_BASE_IDX 0
+#define regCOMPUTE_DIM_X 0x1ba1
+#define regCOMPUTE_DIM_X_BASE_IDX 0
+#define regCOMPUTE_DIM_Y 0x1ba2
+#define regCOMPUTE_DIM_Y_BASE_IDX 0
+#define regCOMPUTE_DIM_Z 0x1ba3
+#define regCOMPUTE_DIM_Z_BASE_IDX 0
+#define regCOMPUTE_START_X 0x1ba4
+#define regCOMPUTE_START_X_BASE_IDX 0
+#define regCOMPUTE_START_Y 0x1ba5
+#define regCOMPUTE_START_Y_BASE_IDX 0
+#define regCOMPUTE_START_Z 0x1ba6
+#define regCOMPUTE_START_Z_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_X 0x1ba7
+#define regCOMPUTE_NUM_THREAD_X_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Y 0x1ba8
+#define regCOMPUTE_NUM_THREAD_Y_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Z 0x1ba9
+#define regCOMPUTE_NUM_THREAD_Z_BASE_IDX 0
+#define regCOMPUTE_PIPELINESTAT_ENABLE 0x1baa
+#define regCOMPUTE_PIPELINESTAT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PERFCOUNT_ENABLE 0x1bab
+#define regCOMPUTE_PERFCOUNT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PGM_LO 0x1bac
+#define regCOMPUTE_PGM_LO_BASE_IDX 0
+#define regCOMPUTE_PGM_HI 0x1bad
+#define regCOMPUTE_PGM_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO 0x1bae
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI 0x1baf
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO 0x1bb0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI 0x1bb1
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC1 0x1bb2
+#define regCOMPUTE_PGM_RSRC1_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC2 0x1bb3
+#define regCOMPUTE_PGM_RSRC2_BASE_IDX 0
+#define regCOMPUTE_VMID 0x1bb4
+#define regCOMPUTE_VMID_BASE_IDX 0
+#define regCOMPUTE_RESOURCE_LIMITS 0x1bb5
+#define regCOMPUTE_RESOURCE_LIMITS_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE0 0x1bb6
+#define regCOMPUTE_DESTINATION_EN_SE0_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0 0x1bb6
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE1 0x1bb7
+#define regCOMPUTE_DESTINATION_EN_SE1_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1 0x1bb7
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1_BASE_IDX 0
+#define regCOMPUTE_TMPRING_SIZE 0x1bb8
+#define regCOMPUTE_TMPRING_SIZE_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE2 0x1bb9
+#define regCOMPUTE_DESTINATION_EN_SE2_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2 0x1bb9
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE3 0x1bba
+#define regCOMPUTE_DESTINATION_EN_SE3_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3 0x1bba
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define regCOMPUTE_RESTART_X 0x1bbb
+#define regCOMPUTE_RESTART_X_BASE_IDX 0
+#define regCOMPUTE_RESTART_Y 0x1bbc
+#define regCOMPUTE_RESTART_Y_BASE_IDX 0
+#define regCOMPUTE_RESTART_Z 0x1bbd
+#define regCOMPUTE_RESTART_Z_BASE_IDX 0
+#define regCOMPUTE_THREAD_TRACE_ENABLE 0x1bbe
+#define regCOMPUTE_THREAD_TRACE_ENABLE_BASE_IDX 0
+#define regCOMPUTE_MISC_RESERVED 0x1bbf
+#define regCOMPUTE_MISC_RESERVED_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_ID 0x1bc0
+#define regCOMPUTE_DISPATCH_ID_BASE_IDX 0
+#define regCOMPUTE_THREADGROUP_ID 0x1bc1
+#define regCOMPUTE_THREADGROUP_ID_BASE_IDX 0
+#define regCOMPUTE_REQ_CTRL 0x1bc2
+#define regCOMPUTE_REQ_CTRL_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_0 0x1bc4
+#define regCOMPUTE_USER_ACCUM_0_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_1 0x1bc5
+#define regCOMPUTE_USER_ACCUM_1_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_2 0x1bc6
+#define regCOMPUTE_USER_ACCUM_2_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_3 0x1bc7
+#define regCOMPUTE_USER_ACCUM_3_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC3 0x1bc8
+#define regCOMPUTE_PGM_RSRC3_BASE_IDX 0
+#define regCOMPUTE_DDID_INDEX 0x1bc9
+#define regCOMPUTE_DDID_INDEX_BASE_IDX 0
+#define regCOMPUTE_SHADER_CHKSUM 0x1bca
+#define regCOMPUTE_SHADER_CHKSUM_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE4 0x1bcb
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE5 0x1bcc
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE6 0x1bcd
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE7 0x1bce
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INTERLEAVE 0x1bcf
+#define regCOMPUTE_DISPATCH_INTERLEAVE_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH 0x1bd0
+#define regCOMPUTE_RELAUNCH_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO 0x1bd1
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI 0x1bd2
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH2 0x1bd3
+#define regCOMPUTE_RELAUNCH2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_0 0x1be0
+#define regCOMPUTE_USER_DATA_0_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_1 0x1be1
+#define regCOMPUTE_USER_DATA_1_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_2 0x1be2
+#define regCOMPUTE_USER_DATA_2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_3 0x1be3
+#define regCOMPUTE_USER_DATA_3_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_4 0x1be4
+#define regCOMPUTE_USER_DATA_4_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_5 0x1be5
+#define regCOMPUTE_USER_DATA_5_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_6 0x1be6
+#define regCOMPUTE_USER_DATA_6_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_7 0x1be7
+#define regCOMPUTE_USER_DATA_7_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_8 0x1be8
+#define regCOMPUTE_USER_DATA_8_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_9 0x1be9
+#define regCOMPUTE_USER_DATA_9_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_10 0x1bea
+#define regCOMPUTE_USER_DATA_10_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_11 0x1beb
+#define regCOMPUTE_USER_DATA_11_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_12 0x1bec
+#define regCOMPUTE_USER_DATA_12_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_13 0x1bed
+#define regCOMPUTE_USER_DATA_13_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_14 0x1bee
+#define regCOMPUTE_USER_DATA_14_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_15 0x1bef
+#define regCOMPUTE_USER_DATA_15_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_TUNNEL 0x1c1d
+#define regCOMPUTE_DISPATCH_TUNNEL_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_END 0x1c1e
+#define regCOMPUTE_DISPATCH_END_BASE_IDX 0
+#define regCOMPUTE_NOWHERE 0x1c1f
+#define regCOMPUTE_NOWHERE_BASE_IDX 0
+#define regSH_RESERVED_REG0 0x1c20
+#define regSH_RESERVED_REG0_BASE_IDX 0
+#define regSH_RESERVED_REG1 0x1c21
+#define regSH_RESERVED_REG1_BASE_IDX 0
+
+
+// addressBlock: gc_cppdec
+// base address: 0xc080
+#define regCP_CU_MASK_ADDR_LO 0x1dd2
+#define regCP_CU_MASK_ADDR_LO_BASE_IDX 0
+#define regCP_CU_MASK_ADDR_HI 0x1dd3
+#define regCP_CU_MASK_ADDR_HI_BASE_IDX 0
+#define regCP_CU_MASK_CNTL 0x1dd4
+#define regCP_CU_MASK_CNTL_BASE_IDX 0
+#define regCP_EOPQ_WAIT_TIME 0x1dd5
+#define regCP_EOPQ_WAIT_TIME_BASE_IDX 0
+#define regCP_CPC_MGCG_SYNC_CNTL 0x1dd6
+#define regCP_CPC_MGCG_SYNC_CNTL_BASE_IDX 0
+#define regCPC_INT_INFO 0x1dd7
+#define regCPC_INT_INFO_BASE_IDX 0
+#define regCP_VIRT_STATUS 0x1dd8
+#define regCP_VIRT_STATUS_BASE_IDX 0
+#define regCPC_INT_ADDR 0x1dd9
+#define regCPC_INT_ADDR_BASE_IDX 0
+#define regCPC_INT_PASID 0x1dda
+#define regCPC_INT_PASID_BASE_IDX 0
+#define regCP_GFX_ERROR 0x1ddb
+#define regCP_GFX_ERROR_BASE_IDX 0
+#define regCPG_UTCL1_CNTL 0x1ddc
+#define regCPG_UTCL1_CNTL_BASE_IDX 0
+#define regCPC_UTCL1_CNTL 0x1ddd
+#define regCPC_UTCL1_CNTL_BASE_IDX 0
+#define regCPF_UTCL1_CNTL 0x1dde
+#define regCPF_UTCL1_CNTL_BASE_IDX 0
+#define regCP_AQL_SMM_STATUS 0x1ddf
+#define regCP_AQL_SMM_STATUS_BASE_IDX 0
+#define regCP_RB0_BASE 0x1de0
+#define regCP_RB0_BASE_BASE_IDX 0
+#define regCP_RB_BASE 0x1de0
+#define regCP_RB_BASE_BASE_IDX 0
+#define regCP_RB0_CNTL 0x1de1
+#define regCP_RB0_CNTL_BASE_IDX 0
+#define regCP_RB_CNTL 0x1de1
+#define regCP_RB_CNTL_BASE_IDX 0
+#define regCP_RB_RPTR_WR 0x1de2
+#define regCP_RB_RPTR_WR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR 0x1de3
+#define regCP_RB0_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR 0x1de3
+#define regCP_RB_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR_HI 0x1de4
+#define regCP_RB0_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR_HI 0x1de4
+#define regCP_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB0_BUFSZ_MASK 0x1de5
+#define regCP_RB0_BUFSZ_MASK_BASE_IDX 0
+#define regCP_RB_BUFSZ_MASK 0x1de5
+#define regCP_RB_BUFSZ_MASK_BASE_IDX 0
+#define regGC_PRIV_MODE 0x1de8
+#define regGC_PRIV_MODE_BASE_IDX 0
+#define regCP_INT_CNTL 0x1de9
+#define regCP_INT_CNTL_BASE_IDX 0
+#define regCP_INT_STATUS 0x1dea
+#define regCP_INT_STATUS_BASE_IDX 0
+#define regCP_DEVICE_ID 0x1deb
+#define regCP_DEVICE_ID_BASE_IDX 0
+#define regCP_ME0_PIPE_PRIORITY_CNTS 0x1dec
+#define regCP_ME0_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_RING_PRIORITY_CNTS 0x1dec
+#define regCP_RING_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME0_PIPE0_PRIORITY 0x1ded
+#define regCP_ME0_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_RING0_PRIORITY 0x1ded
+#define regCP_RING0_PRIORITY_BASE_IDX 0
+#define regCP_ME0_PIPE1_PRIORITY 0x1dee
+#define regCP_ME0_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_RING1_PRIORITY 0x1dee
+#define regCP_RING1_PRIORITY_BASE_IDX 0
+#define regCP_FATAL_ERROR 0x1df0
+#define regCP_FATAL_ERROR_BASE_IDX 0
+#define regCP_RB_VMID 0x1df1
+#define regCP_RB_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE0_VMID 0x1df2
+#define regCP_ME0_PIPE0_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE1_VMID 0x1df3
+#define regCP_ME0_PIPE1_VMID_BASE_IDX 0
+#define regCP_RB0_WPTR 0x1df4
+#define regCP_RB0_WPTR_BASE_IDX 0
+#define regCP_RB_WPTR 0x1df4
+#define regCP_RB_WPTR_BASE_IDX 0
+#define regCP_RB0_WPTR_HI 0x1df5
+#define regCP_RB0_WPTR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_HI 0x1df5
+#define regCP_RB_WPTR_HI_BASE_IDX 0
+#define regCP_RB1_WPTR 0x1df6
+#define regCP_RB1_WPTR_BASE_IDX 0
+#define regCP_RB1_WPTR_HI 0x1df7
+#define regCP_RB1_WPTR_HI_BASE_IDX 0
+#define regCP_PROCESS_QUANTUM 0x1df9
+#define regCP_PROCESS_QUANTUM_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_LOWER 0x1dfa
+#define regCP_RB_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_UPPER 0x1dfb
+#define regCP_RB_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_LOWER 0x1dfc
+#define regCP_MEC_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_UPPER 0x1dfd
+#define regCP_MEC_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCPG_UTCL1_ERROR 0x1dfe
+#define regCPG_UTCL1_ERROR_BASE_IDX 0
+#define regCPC_UTCL1_ERROR 0x1dff
+#define regCPC_UTCL1_ERROR_BASE_IDX 0
+#define regCP_RB1_BASE 0x1e00
+#define regCP_RB1_BASE_BASE_IDX 0
+#define regCP_RB1_CNTL 0x1e01
+#define regCP_RB1_CNTL_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR 0x1e02
+#define regCP_RB1_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR_HI 0x1e03
+#define regCP_RB1_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB1_BUFSZ_MASK 0x1e04
+#define regCP_RB1_BUFSZ_MASK_BASE_IDX 0
+#define regCP_INT_CNTL_RING0 0x1e0a
+#define regCP_INT_CNTL_RING0_BASE_IDX 0
+#define regCP_INT_CNTL_RING1 0x1e0b
+#define regCP_INT_CNTL_RING1_BASE_IDX 0
+#define regCP_INT_STATUS_RING0 0x1e0d
+#define regCP_INT_STATUS_RING0_BASE_IDX 0
+#define regCP_INT_STATUS_RING1 0x1e0e
+#define regCP_INT_STATUS_RING1_BASE_IDX 0
+#define regCP_ME_F32_INTERRUPT 0x1e13
+#define regCP_ME_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PFP_F32_INTERRUPT 0x1e14
+#define regCP_PFP_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC1_F32_INTERRUPT 0x1e16
+#define regCP_MEC1_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC2_F32_INTERRUPT 0x1e17
+#define regCP_MEC2_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PWR_CNTL 0x1e18
+#define regCP_PWR_CNTL_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE 0x1e1a
+#define regCP_ECC_FIRSTOCCURRENCE_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING0 0x1e1b
+#define regCP_ECC_FIRSTOCCURRENCE_RING0_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING1 0x1e1c
+#define regCP_ECC_FIRSTOCCURRENCE_RING1_BASE_IDX 0
+#define regGB_EDC_MODE 0x1e1e
+#define regGB_EDC_MODE_BASE_IDX 0
+#define regCP_DEBUG 0x1e1f
+#define regCP_DEBUG_BASE_IDX 0
+#define regCP_CPF_DEBUG 0x1e20
+#define regCP_CPF_DEBUG_BASE_IDX 0
+#define regCP_CPC_DEBUG 0x1e21
+#define regCP_CPC_DEBUG_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL 0x1e23
+#define regCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL1 0x1e24
+#define regCP_PQ_WPTR_POLL_CNTL1_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_CNTL 0x1e25
+#define regCP_ME1_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_CNTL 0x1e26
+#define regCP_ME1_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_CNTL 0x1e27
+#define regCP_ME1_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_CNTL 0x1e28
+#define regCP_ME1_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_CNTL 0x1e29
+#define regCP_ME2_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_CNTL 0x1e2a
+#define regCP_ME2_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_CNTL 0x1e2b
+#define regCP_ME2_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_CNTL 0x1e2c
+#define regCP_ME2_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_STATUS 0x1e2d
+#define regCP_ME1_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_STATUS 0x1e2e
+#define regCP_ME1_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_STATUS 0x1e2f
+#define regCP_ME1_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_STATUS 0x1e30
+#define regCP_ME1_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_STATUS 0x1e31
+#define regCP_ME2_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_STATUS 0x1e32
+#define regCP_ME2_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_STATUS 0x1e33
+#define regCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_STATUS 0x1e34
+#define regCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_INT_STAT_DEBUG 0x1e35
+#define regCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_ME2_INT_STAT_DEBUG 0x1e36
+#define regCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_GFX_QUEUE_INDEX 0x1e37
+#define regCP_GFX_QUEUE_INDEX_BASE_IDX 0
+#define regCC_GC_EDC_CONFIG 0x1e38
+#define regCC_GC_EDC_CONFIG_BASE_IDX 0
+#define regCP_ME1_PIPE_PRIORITY_CNTS 0x1e39
+#define regCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME1_PIPE0_PRIORITY 0x1e3a
+#define regCP_ME1_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE1_PRIORITY 0x1e3b
+#define regCP_ME1_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE2_PRIORITY 0x1e3c
+#define regCP_ME1_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE3_PRIORITY 0x1e3d
+#define regCP_ME1_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE_PRIORITY_CNTS 0x1e3e
+#define regCP_ME2_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME2_PIPE0_PRIORITY 0x1e3f
+#define regCP_ME2_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE1_PRIORITY 0x1e40
+#define regCP_ME2_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE2_PRIORITY 0x1e41
+#define regCP_ME2_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE3_PRIORITY 0x1e42
+#define regCP_ME2_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START 0x1e44
+#define regCP_PFP_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START 0x1e45
+#define regCP_ME_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC1_PRGRM_CNTR_START 0x1e46
+#define regCP_MEC1_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC2_PRGRM_CNTR_START 0x1e47
+#define regCP_MEC2_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START 0x1e49
+#define regCP_PFP_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START 0x1e4a
+#define regCP_ME_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC1_INTR_ROUTINE_START 0x1e4b
+#define regCP_MEC1_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC2_INTR_ROUTINE_START 0x1e4c
+#define regCP_MEC2_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_CONTEXT_CNTL 0x1e4d
+#define regCP_CONTEXT_CNTL_BASE_IDX 0
+#define regCP_MAX_CONTEXT 0x1e4e
+#define regCP_MAX_CONTEXT_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME1 0x1e4f
+#define regCP_IQ_WAIT_TIME1_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME2 0x1e50
+#define regCP_IQ_WAIT_TIME2_BASE_IDX 0
+#define regCP_RB0_BASE_HI 0x1e51
+#define regCP_RB0_BASE_HI_BASE_IDX 0
+#define regCP_RB1_BASE_HI 0x1e52
+#define regCP_RB1_BASE_HI_BASE_IDX 0
+#define regCP_VMID_RESET 0x1e53
+#define regCP_VMID_RESET_BASE_IDX 0
+#define regCPC_INT_CNTL 0x1e54
+#define regCPC_INT_CNTL_BASE_IDX 0
+#define regCPC_INT_STATUS 0x1e55
+#define regCPC_INT_STATUS_BASE_IDX 0
+#define regCP_VMID_PREEMPT 0x1e56
+#define regCP_VMID_PREEMPT_BASE_IDX 0
+#define regCPC_INT_CNTX_ID 0x1e57
+#define regCPC_INT_CNTX_ID_BASE_IDX 0
+#define regCP_PQ_STATUS 0x1e58
+#define regCP_PQ_STATUS_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START_HI 0x1e59
+#define regCP_PFP_PRGRM_CNTR_START_HI_BASE_IDX 0
+#define regCP_MAX_DRAW_COUNT 0x1e5c
+#define regCP_MAX_DRAW_COUNT_BASE_IDX 0
+#define regCP_MEC1_F32_INT_DIS 0x1e5d
+#define regCP_MEC1_F32_INT_DIS_BASE_IDX 0
+#define regCP_MEC2_F32_INT_DIS 0x1e5e
+#define regCP_MEC2_F32_INT_DIS_BASE_IDX 0
+#define regCP_VMID_STATUS 0x1e5f
+#define regCP_VMID_STATUS_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO 0x1e60
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI 0x1e61
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_CONTROL 0x1e62
+#define regCPC_SUSPEND_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCPC_SUSPEND_CNTL_STACK_OFFSET 0x1e63
+#define regCPC_SUSPEND_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCPC_SUSPEND_CNTL_STACK_SIZE 0x1e64
+#define regCPC_SUSPEND_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCPC_SUSPEND_WG_STATE_OFFSET 0x1e65
+#define regCPC_SUSPEND_WG_STATE_OFFSET_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_SIZE 0x1e66
+#define regCPC_SUSPEND_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCPC_OS_PIPES 0x1e67
+#define regCPC_OS_PIPES_BASE_IDX 0
+#define regCP_SUSPEND_RESUME_REQ 0x1e68
+#define regCP_SUSPEND_RESUME_REQ_BASE_IDX 0
+#define regCP_SUSPEND_CNTL 0x1e69
+#define regCP_SUSPEND_CNTL_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME3 0x1e6a
+#define regCP_IQ_WAIT_TIME3_BASE_IDX 0
+#define regCPC_DDID_BASE_ADDR_LO 0x1e6b
+#define regCPC_DDID_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_DDID_BASE_ADDR_LO 0x1e6b
+#define regCP_DDID_BASE_ADDR_LO_BASE_IDX 0
+#define regCPC_DDID_BASE_ADDR_HI 0x1e6c
+#define regCPC_DDID_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_DDID_BASE_ADDR_HI 0x1e6c
+#define regCP_DDID_BASE_ADDR_HI_BASE_IDX 0
+#define regCPC_DDID_CNTL 0x1e6d
+#define regCPC_DDID_CNTL_BASE_IDX 0
+#define regCP_DDID_CNTL 0x1e6d
+#define regCP_DDID_CNTL_BASE_IDX 0
+#define regCP_GFX_DDID_INFLIGHT_COUNT 0x1e6e
+#define regCP_GFX_DDID_INFLIGHT_COUNT_BASE_IDX 0
+#define regCP_GFX_DDID_WPTR 0x1e6f
+#define regCP_GFX_DDID_WPTR_BASE_IDX 0
+#define regCP_GFX_DDID_RPTR 0x1e70
+#define regCP_GFX_DDID_RPTR_BASE_IDX 0
+#define regCP_GFX_DDID_DELTA_RPT_COUNT 0x1e71
+#define regCP_GFX_DDID_DELTA_RPT_COUNT_BASE_IDX 0
+#define regCP_GFX_HPD_STATUS0 0x1e72
+#define regCP_GFX_HPD_STATUS0_BASE_IDX 0
+#define regCP_GFX_HPD_CONTROL0 0x1e73
+#define regCP_GFX_HPD_CONTROL0_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_LO 0x1e74
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_LO_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_HI 0x1e75
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_HI_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_LO 0x1e76
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_LO_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_HI 0x1e77
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_HI_BASE_IDX 0
+#define regCP_GFX_INDEX_MUTEX 0x1e78
+#define regCP_GFX_INDEX_MUTEX_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START_HI 0x1e79
+#define regCP_ME_PRGRM_CNTR_START_HI_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START_HI 0x1e7a
+#define regCP_PFP_INTR_ROUTINE_START_HI_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START_HI 0x1e7b
+#define regCP_ME_INTR_ROUTINE_START_HI_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR 0x1e7e
+#define regCP_GFX_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR_HI 0x1e7f
+#define regCP_GFX_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_GFX_HQD_ACTIVE 0x1e80
+#define regCP_GFX_HQD_ACTIVE_BASE_IDX 0
+#define regCP_GFX_HQD_VMID 0x1e81
+#define regCP_GFX_HQD_VMID_BASE_IDX 0
+#define regCP_GFX_HQD_QUEUE_PRIORITY 0x1e84
+#define regCP_GFX_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_GFX_HQD_QUANTUM 0x1e85
+#define regCP_GFX_HQD_QUANTUM_BASE_IDX 0
+#define regCP_GFX_HQD_BASE 0x1e86
+#define regCP_GFX_HQD_BASE_BASE_IDX 0
+#define regCP_GFX_HQD_BASE_HI 0x1e87
+#define regCP_GFX_HQD_BASE_HI_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR 0x1e88
+#define regCP_GFX_HQD_RPTR_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR_ADDR 0x1e89
+#define regCP_GFX_HQD_RPTR_ADDR_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR_ADDR_HI 0x1e8a
+#define regCP_GFX_HQD_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_LO 0x1e8b
+#define regCP_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_HI 0x1e8c
+#define regCP_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL 0x1e8d
+#define regCP_RB_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_GFX_HQD_OFFSET 0x1e8e
+#define regCP_GFX_HQD_OFFSET_BASE_IDX 0
+#define regCP_GFX_HQD_CNTL 0x1e8f
+#define regCP_GFX_HQD_CNTL_BASE_IDX 0
+#define regCP_GFX_HQD_CSMD_RPTR 0x1e90
+#define regCP_GFX_HQD_CSMD_RPTR_BASE_IDX 0
+#define regCP_GFX_HQD_WPTR 0x1e91
+#define regCP_GFX_HQD_WPTR_BASE_IDX 0
+#define regCP_GFX_HQD_WPTR_HI 0x1e92
+#define regCP_GFX_HQD_WPTR_HI_BASE_IDX 0
+#define regCP_GFX_HQD_DEQUEUE_REQUEST 0x1e93
+#define regCP_GFX_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_GFX_HQD_MAPPED 0x1e94
+#define regCP_GFX_HQD_MAPPED_BASE_IDX 0
+#define regCP_GFX_HQD_QUE_MGR_CONTROL 0x1e95
+#define regCP_GFX_HQD_QUE_MGR_CONTROL_BASE_IDX 0
+#define regCP_GFX_HQD_IQ_TIMER 0x1e96
+#define regCP_GFX_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_GFX_HQD_HQ_STATUS0 0x1e98
+#define regCP_GFX_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_GFX_HQD_HQ_CONTROL0 0x1e99
+#define regCP_GFX_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_GFX_MQD_CONTROL 0x1e9a
+#define regCP_GFX_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_CONTROL 0x1e9f
+#define regCP_HQD_GFX_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_STATUS 0x1ea0
+#define regCP_HQD_GFX_STATUS_BASE_IDX 0
+#define regCP_DMA_WATCH0_ADDR_LO 0x1ec0
+#define regCP_DMA_WATCH0_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH0_ADDR_HI 0x1ec1
+#define regCP_DMA_WATCH0_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH0_MASK 0x1ec2
+#define regCP_DMA_WATCH0_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH0_CNTL 0x1ec3
+#define regCP_DMA_WATCH0_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH1_ADDR_LO 0x1ec4
+#define regCP_DMA_WATCH1_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH1_ADDR_HI 0x1ec5
+#define regCP_DMA_WATCH1_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH1_MASK 0x1ec6
+#define regCP_DMA_WATCH1_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH1_CNTL 0x1ec7
+#define regCP_DMA_WATCH1_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH2_ADDR_LO 0x1ec8
+#define regCP_DMA_WATCH2_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH2_ADDR_HI 0x1ec9
+#define regCP_DMA_WATCH2_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH2_MASK 0x1eca
+#define regCP_DMA_WATCH2_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH2_CNTL 0x1ecb
+#define regCP_DMA_WATCH2_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH3_ADDR_LO 0x1ecc
+#define regCP_DMA_WATCH3_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH3_ADDR_HI 0x1ecd
+#define regCP_DMA_WATCH3_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH3_MASK 0x1ece
+#define regCP_DMA_WATCH3_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH3_CNTL 0x1ecf
+#define regCP_DMA_WATCH3_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT_ADDR_LO 0x1ed0
+#define regCP_DMA_WATCH_STAT_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT_ADDR_HI 0x1ed1
+#define regCP_DMA_WATCH_STAT_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT 0x1ed2
+#define regCP_DMA_WATCH_STAT_BASE_IDX 0
+#define regCP_PFP_JT_STAT 0x1ed3
+#define regCP_PFP_JT_STAT_BASE_IDX 0
+#define regCP_MEC_JT_STAT 0x1ed5
+#define regCP_MEC_JT_STAT_BASE_IDX 0
+#define regCP_CPC_BUSY_HYSTERESIS 0x1edb
+#define regCP_CPC_BUSY_HYSTERESIS_BASE_IDX 0
+#define regCP_CPF_BUSY_HYSTERESIS1 0x1edc
+#define regCP_CPF_BUSY_HYSTERESIS1_BASE_IDX 0
+#define regCP_CPF_BUSY_HYSTERESIS2 0x1edd
+#define regCP_CPF_BUSY_HYSTERESIS2_BASE_IDX 0
+#define regCP_CPG_BUSY_HYSTERESIS1 0x1ede
+#define regCP_CPG_BUSY_HYSTERESIS1_BASE_IDX 0
+#define regCP_CPG_BUSY_HYSTERESIS2 0x1edf
+#define regCP_CPG_BUSY_HYSTERESIS2_BASE_IDX 0
+#define regCP_RB_DOORBELL_CLEAR 0x1f28
+#define regCP_RB_DOORBELL_CLEAR_BASE_IDX 0
+#define regCP_RB0_ACTIVE 0x1f40
+#define regCP_RB0_ACTIVE_BASE_IDX 0
+#define regCP_RB_ACTIVE 0x1f40
+#define regCP_RB_ACTIVE_BASE_IDX 0
+#define regCP_RB1_ACTIVE 0x1f41
+#define regCP_RB1_ACTIVE_BASE_IDX 0
+#define regCP_RB_STATUS 0x1f43
+#define regCP_RB_STATUS_BASE_IDX 0
+#define regCPG_RCIU_CAM_INDEX 0x1f44
+#define regCPG_RCIU_CAM_INDEX_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA 0x1f45
+#define regCPG_RCIU_CAM_DATA_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE0 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE0_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE1 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE1_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE2 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE2_BASE_IDX 0
+#define regCP_GPU_TIMESTAMP_OFFSET_LO 0x1f4c
+#define regCP_GPU_TIMESTAMP_OFFSET_LO_BASE_IDX 0
+#define regCP_GPU_TIMESTAMP_OFFSET_HI 0x1f4d
+#define regCP_GPU_TIMESTAMP_OFFSET_HI_BASE_IDX 0
+#define regCP_SDMA_DMA_DONE 0x1f4e
+#define regCP_SDMA_DMA_DONE_BASE_IDX 0
+#define regCP_PFP_SDMA_CS 0x1f4f
+#define regCP_PFP_SDMA_CS_BASE_IDX 0
+#define regCP_ME_SDMA_CS 0x1f50
+#define regCP_ME_SDMA_CS_BASE_IDX 0
+#define regCPF_GCR_CNTL 0x1f53
+#define regCPF_GCR_CNTL_BASE_IDX 0
+#define regCPG_UTCL1_STATUS 0x1f54
+#define regCPG_UTCL1_STATUS_BASE_IDX 0
+#define regCPC_UTCL1_STATUS 0x1f55
+#define regCPC_UTCL1_STATUS_BASE_IDX 0
+#define regCPF_UTCL1_STATUS 0x1f56
+#define regCPF_UTCL1_STATUS_BASE_IDX 0
+#define regCP_SD_CNTL 0x1f57
+#define regCP_SD_CNTL_BASE_IDX 0
+#define regCP_SOFT_RESET_CNTL 0x1f59
+#define regCP_SOFT_RESET_CNTL_BASE_IDX 0
+#define regCP_CPC_GFX_CNTL 0x1f5a
+#define regCP_CPC_GFX_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec
+// base address: 0xc700
+#define regSPI_ARB_PRIORITY 0x1f60
+#define regSPI_ARB_PRIORITY_BASE_IDX 0
+#define regSPI_ARB_CYCLES_0 0x1f61
+#define regSPI_ARB_CYCLES_0_BASE_IDX 0
+#define regSPI_ARB_CYCLES_1 0x1f62
+#define regSPI_ARB_CYCLES_1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_GFX 0x1f67
+#define regSPI_WCL_PIPE_PERCENT_GFX_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_HP3D 0x1f68
+#define regSPI_WCL_PIPE_PERCENT_HP3D_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS0 0x1f69
+#define regSPI_WCL_PIPE_PERCENT_CS0_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS1 0x1f6a
+#define regSPI_WCL_PIPE_PERCENT_CS1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS2 0x1f6b
+#define regSPI_WCL_PIPE_PERCENT_CS2_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS3 0x1f6c
+#define regSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS4 0x1f6d
+#define regSPI_WCL_PIPE_PERCENT_CS4_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS5 0x1f6e
+#define regSPI_WCL_PIPE_PERCENT_CS5_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS6 0x1f6f
+#define regSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS7 0x1f70
+#define regSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0
+#define regSPI_USER_ACCUM_VMID_CNTL 0x1f71
+#define regSPI_USER_ACCUM_VMID_CNTL_BASE_IDX 0
+#define regSPI_GDBG_PER_VMID_CNTL 0x1f72
+#define regSPI_GDBG_PER_VMID_CNTL_BASE_IDX 0
+#define regSPI_COMPUTE_QUEUE_RESET 0x1f73
+#define regSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
+#define regSPI_COMPUTE_WF_CTX_SAVE 0x1f74
+#define regSPI_COMPUTE_WF_CTX_SAVE_BASE_IDX 0
+
+
+// addressBlock: gc_cpphqddec
+// base address: 0xc800
+#define regCP_HPD_UTCL1_CNTL 0x1fa3
+#define regCP_HPD_UTCL1_CNTL_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR 0x1fa7
+#define regCP_HPD_UTCL1_ERROR_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR_ADDR 0x1fa8
+#define regCP_HPD_UTCL1_ERROR_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR 0x1fa9
+#define regCP_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR_HI 0x1faa
+#define regCP_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_ACTIVE 0x1fab
+#define regCP_HQD_ACTIVE_BASE_IDX 0
+#define regCP_HQD_VMID 0x1fac
+#define regCP_HQD_VMID_BASE_IDX 0
+#define regCP_HQD_PERSISTENT_STATE 0x1fad
+#define regCP_HQD_PERSISTENT_STATE_BASE_IDX 0
+#define regCP_HQD_PIPE_PRIORITY 0x1fae
+#define regCP_HQD_PIPE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUEUE_PRIORITY 0x1faf
+#define regCP_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUANTUM 0x1fb0
+#define regCP_HQD_QUANTUM_BASE_IDX 0
+#define regCP_HQD_PQ_BASE 0x1fb1
+#define regCP_HQD_PQ_BASE_BASE_IDX 0
+#define regCP_HQD_PQ_BASE_HI 0x1fb2
+#define regCP_HQD_PQ_BASE_HI_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR 0x1fb3
+#define regCP_HQD_PQ_RPTR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR 0x1fb4
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x1fb5
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR 0x1fb6
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x1fb7
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_DOORBELL_CONTROL 0x1fb8
+#define regCP_HQD_PQ_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_CONTROL 0x1fba
+#define regCP_HQD_PQ_CONTROL_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR 0x1fbb
+#define regCP_HQD_IB_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR_HI 0x1fbc
+#define regCP_HQD_IB_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_IB_RPTR 0x1fbd
+#define regCP_HQD_IB_RPTR_BASE_IDX 0
+#define regCP_HQD_IB_CONTROL 0x1fbe
+#define regCP_HQD_IB_CONTROL_BASE_IDX 0
+#define regCP_HQD_IQ_TIMER 0x1fbf
+#define regCP_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_HQD_IQ_RPTR 0x1fc0
+#define regCP_HQD_IQ_RPTR_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_REQUEST 0x1fc1
+#define regCP_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_HQD_DMA_OFFLOAD 0x1fc2
+#define regCP_HQD_DMA_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_OFFLOAD 0x1fc2
+#define regCP_HQD_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_SEMA_CMD 0x1fc3
+#define regCP_HQD_SEMA_CMD_BASE_IDX 0
+#define regCP_HQD_MSG_TYPE 0x1fc4
+#define regCP_HQD_MSG_TYPE_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_LO 0x1fc5
+#define regCP_HQD_ATOMIC0_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_HI 0x1fc6
+#define regCP_HQD_ATOMIC0_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_LO 0x1fc7
+#define regCP_HQD_ATOMIC1_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_HI 0x1fc8
+#define regCP_HQD_ATOMIC1_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER0 0x1fc9
+#define regCP_HQD_HQ_SCHEDULER0_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS0 0x1fc9
+#define regCP_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL0 0x1fca
+#define regCP_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER1 0x1fca
+#define regCP_HQD_HQ_SCHEDULER1_BASE_IDX 0
+#define regCP_MQD_CONTROL 0x1fcb
+#define regCP_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS1 0x1fcc
+#define regCP_HQD_HQ_STATUS1_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL1 0x1fcd
+#define regCP_HQD_HQ_CONTROL1_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR 0x1fce
+#define regCP_HQD_EOP_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR_HI 0x1fcf
+#define regCP_HQD_EOP_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_EOP_CONTROL 0x1fd0
+#define regCP_HQD_EOP_CONTROL_BASE_IDX 0
+#define regCP_HQD_EOP_RPTR 0x1fd1
+#define regCP_HQD_EOP_RPTR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR 0x1fd2
+#define regCP_HQD_EOP_WPTR_BASE_IDX 0
+#define regCP_HQD_EOP_EVENTS 0x1fd3
+#define regCP_HQD_EOP_EVENTS_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO 0x1fd4
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI 0x1fd5
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_CONTROL 0x1fd6
+#define regCP_HQD_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_OFFSET 0x1fd7
+#define regCP_HQD_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_SIZE 0x1fd8
+#define regCP_HQD_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCP_HQD_WG_STATE_OFFSET 0x1fd9
+#define regCP_HQD_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_SIZE 0x1fda
+#define regCP_HQD_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCP_HQD_GDS_RESOURCE_STATE 0x1fdb
+#define regCP_HQD_GDS_RESOURCE_STATE_BASE_IDX 0
+#define regCP_HQD_ERROR 0x1fdc
+#define regCP_HQD_ERROR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR_MEM 0x1fdd
+#define regCP_HQD_EOP_WPTR_MEM_BASE_IDX 0
+#define regCP_HQD_AQL_CONTROL 0x1fde
+#define regCP_HQD_AQL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_LO 0x1fdf
+#define regCP_HQD_PQ_WPTR_LO_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_HI 0x1fe0
+#define regCP_HQD_PQ_WPTR_HI_BASE_IDX 0
+#define regCP_HQD_SUSPEND_CNTL_STACK_OFFSET 0x1fe1
+#define regCP_HQD_SUSPEND_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT 0x1fe2
+#define regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT_BASE_IDX 0
+#define regCP_HQD_SUSPEND_WG_STATE_OFFSET 0x1fe3
+#define regCP_HQD_SUSPEND_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_DDID_RPTR 0x1fe4
+#define regCP_HQD_DDID_RPTR_BASE_IDX 0
+#define regCP_HQD_DDID_WPTR 0x1fe5
+#define regCP_HQD_DDID_WPTR_BASE_IDX 0
+#define regCP_HQD_DDID_INFLIGHT_COUNT 0x1fe6
+#define regCP_HQD_DDID_INFLIGHT_COUNT_BASE_IDX 0
+#define regCP_HQD_DDID_DELTA_RPT_COUNT 0x1fe7
+#define regCP_HQD_DDID_DELTA_RPT_COUNT_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_STATUS 0x1fe8
+#define regCP_HQD_DEQUEUE_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_tcpdec
+// base address: 0xca80
+#define regTCP_WATCH0_ADDR_H 0x2048
+#define regTCP_WATCH0_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH0_ADDR_L 0x2049
+#define regTCP_WATCH0_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH0_CNTL 0x204a
+#define regTCP_WATCH0_CNTL_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_H 0x204b
+#define regTCP_WATCH1_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_L 0x204c
+#define regTCP_WATCH1_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH1_CNTL 0x204d
+#define regTCP_WATCH1_CNTL_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_H 0x204e
+#define regTCP_WATCH2_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_L 0x204f
+#define regTCP_WATCH2_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH2_CNTL 0x2050
+#define regTCP_WATCH2_CNTL_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_H 0x2051
+#define regTCP_WATCH3_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_L 0x2052
+#define regTCP_WATCH3_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH3_CNTL 0x2053
+#define regTCP_WATCH3_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_gdspdec
+// base address: 0xcc00
+#define regGDS_VMID0_BASE 0x20a0
+#define regGDS_VMID0_BASE_BASE_IDX 0
+#define regGDS_VMID0_SIZE 0x20a1
+#define regGDS_VMID0_SIZE_BASE_IDX 0
+#define regGDS_VMID1_BASE 0x20a2
+#define regGDS_VMID1_BASE_BASE_IDX 0
+#define regGDS_VMID1_SIZE 0x20a3
+#define regGDS_VMID1_SIZE_BASE_IDX 0
+#define regGDS_VMID2_BASE 0x20a4
+#define regGDS_VMID2_BASE_BASE_IDX 0
+#define regGDS_VMID2_SIZE 0x20a5
+#define regGDS_VMID2_SIZE_BASE_IDX 0
+#define regGDS_VMID3_BASE 0x20a6
+#define regGDS_VMID3_BASE_BASE_IDX 0
+#define regGDS_VMID3_SIZE 0x20a7
+#define regGDS_VMID3_SIZE_BASE_IDX 0
+#define regGDS_VMID4_BASE 0x20a8
+#define regGDS_VMID4_BASE_BASE_IDX 0
+#define regGDS_VMID4_SIZE 0x20a9
+#define regGDS_VMID4_SIZE_BASE_IDX 0
+#define regGDS_VMID5_BASE 0x20aa
+#define regGDS_VMID5_BASE_BASE_IDX 0
+#define regGDS_VMID5_SIZE 0x20ab
+#define regGDS_VMID5_SIZE_BASE_IDX 0
+#define regGDS_VMID6_BASE 0x20ac
+#define regGDS_VMID6_BASE_BASE_IDX 0
+#define regGDS_VMID6_SIZE 0x20ad
+#define regGDS_VMID6_SIZE_BASE_IDX 0
+#define regGDS_VMID7_BASE 0x20ae
+#define regGDS_VMID7_BASE_BASE_IDX 0
+#define regGDS_VMID7_SIZE 0x20af
+#define regGDS_VMID7_SIZE_BASE_IDX 0
+#define regGDS_VMID8_BASE 0x20b0
+#define regGDS_VMID8_BASE_BASE_IDX 0
+#define regGDS_VMID8_SIZE 0x20b1
+#define regGDS_VMID8_SIZE_BASE_IDX 0
+#define regGDS_VMID9_BASE 0x20b2
+#define regGDS_VMID9_BASE_BASE_IDX 0
+#define regGDS_VMID9_SIZE 0x20b3
+#define regGDS_VMID9_SIZE_BASE_IDX 0
+#define regGDS_VMID10_BASE 0x20b4
+#define regGDS_VMID10_BASE_BASE_IDX 0
+#define regGDS_VMID10_SIZE 0x20b5
+#define regGDS_VMID10_SIZE_BASE_IDX 0
+#define regGDS_VMID11_BASE 0x20b6
+#define regGDS_VMID11_BASE_BASE_IDX 0
+#define regGDS_VMID11_SIZE 0x20b7
+#define regGDS_VMID11_SIZE_BASE_IDX 0
+#define regGDS_VMID12_BASE 0x20b8
+#define regGDS_VMID12_BASE_BASE_IDX 0
+#define regGDS_VMID12_SIZE 0x20b9
+#define regGDS_VMID12_SIZE_BASE_IDX 0
+#define regGDS_VMID13_BASE 0x20ba
+#define regGDS_VMID13_BASE_BASE_IDX 0
+#define regGDS_VMID13_SIZE 0x20bb
+#define regGDS_VMID13_SIZE_BASE_IDX 0
+#define regGDS_VMID14_BASE 0x20bc
+#define regGDS_VMID14_BASE_BASE_IDX 0
+#define regGDS_VMID14_SIZE 0x20bd
+#define regGDS_VMID14_SIZE_BASE_IDX 0
+#define regGDS_VMID15_BASE 0x20be
+#define regGDS_VMID15_BASE_BASE_IDX 0
+#define regGDS_VMID15_SIZE 0x20bf
+#define regGDS_VMID15_SIZE_BASE_IDX 0
+#define regGDS_GWS_VMID0 0x20c0
+#define regGDS_GWS_VMID0_BASE_IDX 0
+#define regGDS_GWS_VMID1 0x20c1
+#define regGDS_GWS_VMID1_BASE_IDX 0
+#define regGDS_GWS_VMID2 0x20c2
+#define regGDS_GWS_VMID2_BASE_IDX 0
+#define regGDS_GWS_VMID3 0x20c3
+#define regGDS_GWS_VMID3_BASE_IDX 0
+#define regGDS_GWS_VMID4 0x20c4
+#define regGDS_GWS_VMID4_BASE_IDX 0
+#define regGDS_GWS_VMID5 0x20c5
+#define regGDS_GWS_VMID5_BASE_IDX 0
+#define regGDS_GWS_VMID6 0x20c6
+#define regGDS_GWS_VMID6_BASE_IDX 0
+#define regGDS_GWS_VMID7 0x20c7
+#define regGDS_GWS_VMID7_BASE_IDX 0
+#define regGDS_GWS_VMID8 0x20c8
+#define regGDS_GWS_VMID8_BASE_IDX 0
+#define regGDS_GWS_VMID9 0x20c9
+#define regGDS_GWS_VMID9_BASE_IDX 0
+#define regGDS_GWS_VMID10 0x20ca
+#define regGDS_GWS_VMID10_BASE_IDX 0
+#define regGDS_GWS_VMID11 0x20cb
+#define regGDS_GWS_VMID11_BASE_IDX 0
+#define regGDS_GWS_VMID12 0x20cc
+#define regGDS_GWS_VMID12_BASE_IDX 0
+#define regGDS_GWS_VMID13 0x20cd
+#define regGDS_GWS_VMID13_BASE_IDX 0
+#define regGDS_GWS_VMID14 0x20ce
+#define regGDS_GWS_VMID14_BASE_IDX 0
+#define regGDS_GWS_VMID15 0x20cf
+#define regGDS_GWS_VMID15_BASE_IDX 0
+#define regGDS_OA_VMID0 0x20d0
+#define regGDS_OA_VMID0_BASE_IDX 0
+#define regGDS_OA_VMID1 0x20d1
+#define regGDS_OA_VMID1_BASE_IDX 0
+#define regGDS_OA_VMID2 0x20d2
+#define regGDS_OA_VMID2_BASE_IDX 0
+#define regGDS_OA_VMID3 0x20d3
+#define regGDS_OA_VMID3_BASE_IDX 0
+#define regGDS_OA_VMID4 0x20d4
+#define regGDS_OA_VMID4_BASE_IDX 0
+#define regGDS_OA_VMID5 0x20d5
+#define regGDS_OA_VMID5_BASE_IDX 0
+#define regGDS_OA_VMID6 0x20d6
+#define regGDS_OA_VMID6_BASE_IDX 0
+#define regGDS_OA_VMID7 0x20d7
+#define regGDS_OA_VMID7_BASE_IDX 0
+#define regGDS_OA_VMID8 0x20d8
+#define regGDS_OA_VMID8_BASE_IDX 0
+#define regGDS_OA_VMID9 0x20d9
+#define regGDS_OA_VMID9_BASE_IDX 0
+#define regGDS_OA_VMID10 0x20da
+#define regGDS_OA_VMID10_BASE_IDX 0
+#define regGDS_OA_VMID11 0x20db
+#define regGDS_OA_VMID11_BASE_IDX 0
+#define regGDS_OA_VMID12 0x20dc
+#define regGDS_OA_VMID12_BASE_IDX 0
+#define regGDS_OA_VMID13 0x20dd
+#define regGDS_OA_VMID13_BASE_IDX 0
+#define regGDS_OA_VMID14 0x20de
+#define regGDS_OA_VMID14_BASE_IDX 0
+#define regGDS_OA_VMID15 0x20df
+#define regGDS_OA_VMID15_BASE_IDX 0
+#define regGDS_GWS_RESET0 0x20e4
+#define regGDS_GWS_RESET0_BASE_IDX 0
+#define regGDS_GWS_RESET1 0x20e5
+#define regGDS_GWS_RESET1_BASE_IDX 0
+#define regGDS_GWS_RESOURCE_RESET 0x20e6
+#define regGDS_GWS_RESOURCE_RESET_BASE_IDX 0
+#define regGDS_COMPUTE_MAX_WAVE_ID 0x20e8
+#define regGDS_COMPUTE_MAX_WAVE_ID_BASE_IDX 0
+#define regGDS_OA_RESET_MASK 0x20e9
+#define regGDS_OA_RESET_MASK_BASE_IDX 0
+#define regGDS_OA_RESET 0x20ea
+#define regGDS_OA_RESET_BASE_IDX 0
+#define regGDS_CS_CTXSW_STATUS 0x20ed
+#define regGDS_CS_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT0 0x20ee
+#define regGDS_CS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT1 0x20ef
+#define regGDS_CS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT2 0x20f0
+#define regGDS_CS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT3 0x20f1
+#define regGDS_CS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_GFX_CTXSW_STATUS 0x20f2
+#define regGDS_GFX_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT0 0x20f7
+#define regGDS_PS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT1 0x20f8
+#define regGDS_PS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT2 0x20f9
+#define regGDS_PS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT3 0x20fa
+#define regGDS_PS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS_CTXSW_IDX 0x20fb
+#define regGDS_PS_CTXSW_IDX_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT0 0x2117
+#define regGDS_GS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT1 0x2118
+#define regGDS_GS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT2 0x2119
+#define regGDS_GS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT3 0x211a
+#define regGDS_GS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_MEMORY_CLEAN 0x211f
+#define regGDS_MEMORY_CLEAN_BASE_IDX 0
+
+
+// addressBlock: gc_rasdec
+// base address: 0xce00
+#define regRAS_SIGNATURE_CONTROL 0x2120
+#define regRAS_SIGNATURE_CONTROL_BASE_IDX 0
+#define regRAS_SIGNATURE_MASK 0x2121
+#define regRAS_SIGNATURE_MASK_BASE_IDX 0
+#define regRAS_SX_SIGNATURE0 0x2122
+#define regRAS_SX_SIGNATURE0_BASE_IDX 0
+#define regRAS_SX_SIGNATURE1 0x2123
+#define regRAS_SX_SIGNATURE1_BASE_IDX 0
+#define regRAS_SX_SIGNATURE2 0x2124
+#define regRAS_SX_SIGNATURE2_BASE_IDX 0
+#define regRAS_SX_SIGNATURE3 0x2125
+#define regRAS_SX_SIGNATURE3_BASE_IDX 0
+#define regRAS_DB_SIGNATURE0 0x212b
+#define regRAS_DB_SIGNATURE0_BASE_IDX 0
+#define regRAS_PA_SIGNATURE0 0x212c
+#define regRAS_PA_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE0 0x212f
+#define regRAS_SC_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE1 0x2130
+#define regRAS_SC_SIGNATURE1_BASE_IDX 0
+#define regRAS_SC_SIGNATURE2 0x2131
+#define regRAS_SC_SIGNATURE2_BASE_IDX 0
+#define regRAS_SC_SIGNATURE3 0x2132
+#define regRAS_SC_SIGNATURE3_BASE_IDX 0
+#define regRAS_SC_SIGNATURE4 0x2133
+#define regRAS_SC_SIGNATURE4_BASE_IDX 0
+#define regRAS_SC_SIGNATURE5 0x2134
+#define regRAS_SC_SIGNATURE5_BASE_IDX 0
+#define regRAS_SC_SIGNATURE6 0x2135
+#define regRAS_SC_SIGNATURE6_BASE_IDX 0
+#define regRAS_SC_SIGNATURE7 0x2136
+#define regRAS_SC_SIGNATURE7_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE0 0x2139
+#define regRAS_SPI_SIGNATURE0_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE1 0x213a
+#define regRAS_SPI_SIGNATURE1_BASE_IDX 0
+#define regRAS_CB_SIGNATURE0 0x213d
+#define regRAS_CB_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE0 0x213e
+#define regRAS_BCI_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE1 0x213f
+#define regRAS_BCI_SIGNATURE1_BASE_IDX 0
+
+
+// addressBlock: gc_gusdec
+// base address: 0x33000
+#define regGUS_IO_RD_COMBINE_FLUSH 0x2c00
+#define regGUS_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_IO_WR_COMBINE_FLUSH 0x2c01
+#define regGUS_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_IO_RD_PRI_AGE_RATE 0x2c02
+#define regGUS_IO_RD_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_IO_WR_PRI_AGE_RATE 0x2c03
+#define regGUS_IO_WR_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_IO_RD_PRI_AGE_COEFF 0x2c04
+#define regGUS_IO_RD_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_IO_WR_PRI_AGE_COEFF 0x2c05
+#define regGUS_IO_WR_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUEUING 0x2c06
+#define regGUS_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUEUING 0x2c07
+#define regGUS_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define regGUS_IO_RD_PRI_FIXED 0x2c08
+#define regGUS_IO_RD_PRI_FIXED_BASE_IDX 1
+#define regGUS_IO_WR_PRI_FIXED 0x2c09
+#define regGUS_IO_WR_PRI_FIXED_BASE_IDX 1
+#define regGUS_IO_RD_PRI_URGENCY_COEFF 0x2c0a
+#define regGUS_IO_RD_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_IO_WR_PRI_URGENCY_COEFF 0x2c0b
+#define regGUS_IO_WR_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_IO_RD_PRI_URGENCY_MODE 0x2c0c
+#define regGUS_IO_RD_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_IO_WR_PRI_URGENCY_MODE 0x2c0d
+#define regGUS_IO_WR_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI1 0x2c0e
+#define regGUS_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI2 0x2c0f
+#define regGUS_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI3 0x2c10
+#define regGUS_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI4 0x2c11
+#define regGUS_IO_RD_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI1 0x2c12
+#define regGUS_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI2 0x2c13
+#define regGUS_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI3 0x2c14
+#define regGUS_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI4 0x2c15
+#define regGUS_IO_WR_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI1 0x2c16
+#define regGUS_IO_RD_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI2 0x2c17
+#define regGUS_IO_RD_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI3 0x2c18
+#define regGUS_IO_RD_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI4 0x2c19
+#define regGUS_IO_RD_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI1 0x2c1a
+#define regGUS_IO_WR_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI2 0x2c1b
+#define regGUS_IO_WR_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI3 0x2c1c
+#define regGUS_IO_WR_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI4 0x2c1d
+#define regGUS_IO_WR_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_DRAM_COMBINE_FLUSH 0x2c1e
+#define regGUS_DRAM_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_DRAM_COMBINE_RD_WR_EN 0x2c1f
+#define regGUS_DRAM_COMBINE_RD_WR_EN_BASE_IDX 1
+#define regGUS_DRAM_PRI_AGE_RATE 0x2c20
+#define regGUS_DRAM_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_DRAM_PRI_AGE_COEFF 0x2c21
+#define regGUS_DRAM_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUEUING 0x2c22
+#define regGUS_DRAM_PRI_QUEUING_BASE_IDX 1
+#define regGUS_DRAM_PRI_FIXED 0x2c23
+#define regGUS_DRAM_PRI_FIXED_BASE_IDX 1
+#define regGUS_DRAM_PRI_URGENCY_COEFF 0x2c24
+#define regGUS_DRAM_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_DRAM_PRI_URGENCY_MODE 0x2c25
+#define regGUS_DRAM_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI1 0x2c26
+#define regGUS_DRAM_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI2 0x2c27
+#define regGUS_DRAM_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI3 0x2c28
+#define regGUS_DRAM_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI4 0x2c29
+#define regGUS_DRAM_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI5 0x2c2a
+#define regGUS_DRAM_PRI_QUANT_PRI5_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI1 0x2c2b
+#define regGUS_DRAM_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI2 0x2c2c
+#define regGUS_DRAM_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI3 0x2c2d
+#define regGUS_DRAM_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI4 0x2c2e
+#define regGUS_DRAM_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI5 0x2c2f
+#define regGUS_DRAM_PRI_QUANT1_PRI5_BASE_IDX 1
+#define regGUS_IO_GROUP_BURST 0x2c30
+#define regGUS_IO_GROUP_BURST_BASE_IDX 1
+#define regGUS_DRAM_GROUP_BURST 0x2c31
+#define regGUS_DRAM_GROUP_BURST_BASE_IDX 1
+#define regGUS_SDP_ARB_FINAL 0x2c32
+#define regGUS_SDP_ARB_FINAL_BASE_IDX 1
+#define regGUS_SDP_QOS_VC_PRIORITY 0x2c33
+#define regGUS_SDP_QOS_VC_PRIORITY_BASE_IDX 1
+#define regGUS_SDP_CREDITS 0x2c34
+#define regGUS_SDP_CREDITS_BASE_IDX 1
+#define regGUS_SDP_TAG_RESERVE0 0x2c35
+#define regGUS_SDP_TAG_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_TAG_RESERVE1 0x2c36
+#define regGUS_SDP_TAG_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_VCC_RESERVE0 0x2c37
+#define regGUS_SDP_VCC_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_VCC_RESERVE1 0x2c38
+#define regGUS_SDP_VCC_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_VCD_RESERVE0 0x2c39
+#define regGUS_SDP_VCD_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_VCD_RESERVE1 0x2c3a
+#define regGUS_SDP_VCD_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_REQ_CNTL 0x2c3b
+#define regGUS_SDP_REQ_CNTL_BASE_IDX 1
+#define regGUS_MISC 0x2c3c
+#define regGUS_MISC_BASE_IDX 1
+#define regGUS_LATENCY_SAMPLING 0x2c3d
+#define regGUS_LATENCY_SAMPLING_BASE_IDX 1
+#define regGUS_ERR_STATUS 0x2c3e
+#define regGUS_ERR_STATUS_BASE_IDX 1
+#define regGUS_MISC2 0x2c3f
+#define regGUS_MISC2_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_CMDCREDITS0 0x2c40
+#define regGUS_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_CMDCREDITS1 0x2c41
+#define regGUS_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_DATACREDITS0 0x2c42
+#define regGUS_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_DATACREDITS1 0x2c43
+#define regGUS_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_MISCCREDITS 0x2c44
+#define regGUS_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 1
+#define regGUS_SDP_ENABLE 0x2c45
+#define regGUS_SDP_ENABLE_BASE_IDX 1
+#define regGUS_L1_CH0_CMD_IN 0x2c46
+#define regGUS_L1_CH0_CMD_IN_BASE_IDX 1
+#define regGUS_L1_CH0_CMD_OUT 0x2c47
+#define regGUS_L1_CH0_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_IN 0x2c48
+#define regGUS_L1_CH0_DATA_IN_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_OUT 0x2c49
+#define regGUS_L1_CH0_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_U_IN 0x2c4a
+#define regGUS_L1_CH0_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_U_OUT 0x2c4b
+#define regGUS_L1_CH0_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_CMD_IN 0x2c4c
+#define regGUS_L1_CH1_CMD_IN_BASE_IDX 1
+#define regGUS_L1_CH1_CMD_OUT 0x2c4d
+#define regGUS_L1_CH1_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_IN 0x2c4e
+#define regGUS_L1_CH1_DATA_IN_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_OUT 0x2c4f
+#define regGUS_L1_CH1_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_U_IN 0x2c50
+#define regGUS_L1_CH1_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_U_OUT 0x2c51
+#define regGUS_L1_CH1_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_CMD_IN 0x2c52
+#define regGUS_L1_SA0_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA0_CMD_OUT 0x2c53
+#define regGUS_L1_SA0_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_IN 0x2c54
+#define regGUS_L1_SA0_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_OUT 0x2c55
+#define regGUS_L1_SA0_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_U_IN 0x2c56
+#define regGUS_L1_SA0_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_U_OUT 0x2c57
+#define regGUS_L1_SA0_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_CMD_IN 0x2c58
+#define regGUS_L1_SA1_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA1_CMD_OUT 0x2c59
+#define regGUS_L1_SA1_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_IN 0x2c5a
+#define regGUS_L1_SA1_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_OUT 0x2c5b
+#define regGUS_L1_SA1_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_U_IN 0x2c5c
+#define regGUS_L1_SA1_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_U_OUT 0x2c5d
+#define regGUS_L1_SA1_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_CMD_IN 0x2c5e
+#define regGUS_L1_SA2_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA2_CMD_OUT 0x2c5f
+#define regGUS_L1_SA2_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_IN 0x2c60
+#define regGUS_L1_SA2_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_OUT 0x2c61
+#define regGUS_L1_SA2_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_U_IN 0x2c62
+#define regGUS_L1_SA2_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_U_OUT 0x2c63
+#define regGUS_L1_SA2_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_CMD_IN 0x2c64
+#define regGUS_L1_SA3_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA3_CMD_OUT 0x2c65
+#define regGUS_L1_SA3_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_IN 0x2c66
+#define regGUS_L1_SA3_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_OUT 0x2c67
+#define regGUS_L1_SA3_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_U_IN 0x2c68
+#define regGUS_L1_SA3_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_U_OUT 0x2c69
+#define regGUS_L1_SA3_DATA_U_OUT_BASE_IDX 1
+#define regGUS_MISC3 0x2c6a
+#define regGUS_MISC3_BASE_IDX 1
+#define regGUS_WRRSP_FIFO_CNTL 0x2c6b
+#define regGUS_WRRSP_FIFO_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gfxdec0
+// base address: 0x28000
+#define regDB_RENDER_CONTROL 0x0000
+#define regDB_RENDER_CONTROL_BASE_IDX 1
+#define regDB_COUNT_CONTROL 0x0001
+#define regDB_COUNT_CONTROL_BASE_IDX 1
+#define regDB_DEPTH_VIEW 0x0002
+#define regDB_DEPTH_VIEW_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE 0x0003
+#define regDB_RENDER_OVERRIDE_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE2 0x0004
+#define regDB_RENDER_OVERRIDE2_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE 0x0005
+#define regDB_HTILE_DATA_BASE_BASE_IDX 1
+#define regDB_DEPTH_SIZE_XY 0x0007
+#define regDB_DEPTH_SIZE_XY_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MIN 0x0008
+#define regDB_DEPTH_BOUNDS_MIN_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MAX 0x0009
+#define regDB_DEPTH_BOUNDS_MAX_BASE_IDX 1
+#define regDB_STENCIL_CLEAR 0x000a
+#define regDB_STENCIL_CLEAR_BASE_IDX 1
+#define regDB_DEPTH_CLEAR 0x000b
+#define regDB_DEPTH_CLEAR_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_TL 0x000c
+#define regPA_SC_SCREEN_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_BR 0x000d
+#define regPA_SC_SCREEN_SCISSOR_BR_BASE_IDX 1
+#define regDB_RESERVED_REG_2 0x000f
+#define regDB_RESERVED_REG_2_BASE_IDX 1
+#define regDB_Z_INFO 0x0010
+#define regDB_Z_INFO_BASE_IDX 1
+#define regDB_STENCIL_INFO 0x0011
+#define regDB_STENCIL_INFO_BASE_IDX 1
+#define regDB_Z_READ_BASE 0x0012
+#define regDB_Z_READ_BASE_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE 0x0013
+#define regDB_STENCIL_READ_BASE_BASE_IDX 1
+#define regDB_Z_WRITE_BASE 0x0014
+#define regDB_Z_WRITE_BASE_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE 0x0015
+#define regDB_STENCIL_WRITE_BASE_BASE_IDX 1
+#define regDB_RESERVED_REG_1 0x0016
+#define regDB_RESERVED_REG_1_BASE_IDX 1
+#define regDB_RESERVED_REG_3 0x0017
+#define regDB_RESERVED_REG_3_BASE_IDX 1
+#define regDB_Z_READ_BASE_HI 0x001a
+#define regDB_Z_READ_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE_HI 0x001b
+#define regDB_STENCIL_READ_BASE_HI_BASE_IDX 1
+#define regDB_Z_WRITE_BASE_HI 0x001c
+#define regDB_Z_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE_HI 0x001d
+#define regDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE_HI 0x001e
+#define regDB_HTILE_DATA_BASE_HI_BASE_IDX 1
+#define regDB_RMI_L2_CACHE_CONTROL 0x001f
+#define regDB_RMI_L2_CACHE_CONTROL_BASE_IDX 1
+#define regTA_BC_BASE_ADDR 0x0020
+#define regTA_BC_BASE_ADDR_BASE_IDX 1
+#define regTA_BC_BASE_ADDR_HI 0x0021
+#define regTA_BC_BASE_ADDR_HI_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_0 0x007a
+#define regCOHER_DEST_BASE_HI_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_1 0x007b
+#define regCOHER_DEST_BASE_HI_1_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_2 0x007c
+#define regCOHER_DEST_BASE_HI_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_3 0x007d
+#define regCOHER_DEST_BASE_HI_3_BASE_IDX 1
+#define regCOHER_DEST_BASE_2 0x007e
+#define regCOHER_DEST_BASE_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_3 0x007f
+#define regCOHER_DEST_BASE_3_BASE_IDX 1
+#define regPA_SC_WINDOW_OFFSET 0x0080
+#define regPA_SC_WINDOW_OFFSET_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_TL 0x0081
+#define regPA_SC_WINDOW_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_BR 0x0082
+#define regPA_SC_WINDOW_SCISSOR_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_RULE 0x0083
+#define regPA_SC_CLIPRECT_RULE_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_TL 0x0084
+#define regPA_SC_CLIPRECT_0_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_BR 0x0085
+#define regPA_SC_CLIPRECT_0_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_TL 0x0086
+#define regPA_SC_CLIPRECT_1_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_BR 0x0087
+#define regPA_SC_CLIPRECT_1_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_TL 0x0088
+#define regPA_SC_CLIPRECT_2_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_BR 0x0089
+#define regPA_SC_CLIPRECT_2_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_TL 0x008a
+#define regPA_SC_CLIPRECT_3_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_BR 0x008b
+#define regPA_SC_CLIPRECT_3_BR_BASE_IDX 1
+#define regPA_SC_EDGERULE 0x008c
+#define regPA_SC_EDGERULE_BASE_IDX 1
+#define regPA_SU_HARDWARE_SCREEN_OFFSET 0x008d
+#define regPA_SU_HARDWARE_SCREEN_OFFSET_BASE_IDX 1
+#define regCB_TARGET_MASK 0x008e
+#define regCB_TARGET_MASK_BASE_IDX 1
+#define regCB_SHADER_MASK 0x008f
+#define regCB_SHADER_MASK_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_TL 0x0090
+#define regPA_SC_GENERIC_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_BR 0x0091
+#define regPA_SC_GENERIC_SCISSOR_BR_BASE_IDX 1
+#define regCOHER_DEST_BASE_0 0x0092
+#define regCOHER_DEST_BASE_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_1 0x0093
+#define regCOHER_DEST_BASE_1_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_TL 0x0094
+#define regPA_SC_VPORT_SCISSOR_0_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_BR 0x0095
+#define regPA_SC_VPORT_SCISSOR_0_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_TL 0x0096
+#define regPA_SC_VPORT_SCISSOR_1_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_BR 0x0097
+#define regPA_SC_VPORT_SCISSOR_1_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_TL 0x0098
+#define regPA_SC_VPORT_SCISSOR_2_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_BR 0x0099
+#define regPA_SC_VPORT_SCISSOR_2_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_TL 0x009a
+#define regPA_SC_VPORT_SCISSOR_3_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_BR 0x009b
+#define regPA_SC_VPORT_SCISSOR_3_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_TL 0x009c
+#define regPA_SC_VPORT_SCISSOR_4_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_BR 0x009d
+#define regPA_SC_VPORT_SCISSOR_4_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_TL 0x009e
+#define regPA_SC_VPORT_SCISSOR_5_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_BR 0x009f
+#define regPA_SC_VPORT_SCISSOR_5_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_TL 0x00a0
+#define regPA_SC_VPORT_SCISSOR_6_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_BR 0x00a1
+#define regPA_SC_VPORT_SCISSOR_6_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_TL 0x00a2
+#define regPA_SC_VPORT_SCISSOR_7_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_BR 0x00a3
+#define regPA_SC_VPORT_SCISSOR_7_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_TL 0x00a4
+#define regPA_SC_VPORT_SCISSOR_8_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_BR 0x00a5
+#define regPA_SC_VPORT_SCISSOR_8_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_TL 0x00a6
+#define regPA_SC_VPORT_SCISSOR_9_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_BR 0x00a7
+#define regPA_SC_VPORT_SCISSOR_9_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_TL 0x00a8
+#define regPA_SC_VPORT_SCISSOR_10_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_BR 0x00a9
+#define regPA_SC_VPORT_SCISSOR_10_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_TL 0x00aa
+#define regPA_SC_VPORT_SCISSOR_11_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_BR 0x00ab
+#define regPA_SC_VPORT_SCISSOR_11_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_TL 0x00ac
+#define regPA_SC_VPORT_SCISSOR_12_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_BR 0x00ad
+#define regPA_SC_VPORT_SCISSOR_12_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_TL 0x00ae
+#define regPA_SC_VPORT_SCISSOR_13_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_BR 0x00af
+#define regPA_SC_VPORT_SCISSOR_13_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_TL 0x00b0
+#define regPA_SC_VPORT_SCISSOR_14_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_BR 0x00b1
+#define regPA_SC_VPORT_SCISSOR_14_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_TL 0x00b2
+#define regPA_SC_VPORT_SCISSOR_15_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_BR 0x00b3
+#define regPA_SC_VPORT_SCISSOR_15_BR_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_0 0x00b4
+#define regPA_SC_VPORT_ZMIN_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_0 0x00b5
+#define regPA_SC_VPORT_ZMAX_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_1 0x00b6
+#define regPA_SC_VPORT_ZMIN_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_1 0x00b7
+#define regPA_SC_VPORT_ZMAX_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_2 0x00b8
+#define regPA_SC_VPORT_ZMIN_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_2 0x00b9
+#define regPA_SC_VPORT_ZMAX_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_3 0x00ba
+#define regPA_SC_VPORT_ZMIN_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_3 0x00bb
+#define regPA_SC_VPORT_ZMAX_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_4 0x00bc
+#define regPA_SC_VPORT_ZMIN_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_4 0x00bd
+#define regPA_SC_VPORT_ZMAX_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_5 0x00be
+#define regPA_SC_VPORT_ZMIN_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_5 0x00bf
+#define regPA_SC_VPORT_ZMAX_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_6 0x00c0
+#define regPA_SC_VPORT_ZMIN_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_6 0x00c1
+#define regPA_SC_VPORT_ZMAX_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_7 0x00c2
+#define regPA_SC_VPORT_ZMIN_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_7 0x00c3
+#define regPA_SC_VPORT_ZMAX_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_8 0x00c4
+#define regPA_SC_VPORT_ZMIN_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_8 0x00c5
+#define regPA_SC_VPORT_ZMAX_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_9 0x00c6
+#define regPA_SC_VPORT_ZMIN_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_9 0x00c7
+#define regPA_SC_VPORT_ZMAX_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_10 0x00c8
+#define regPA_SC_VPORT_ZMIN_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_10 0x00c9
+#define regPA_SC_VPORT_ZMAX_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_11 0x00ca
+#define regPA_SC_VPORT_ZMIN_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_11 0x00cb
+#define regPA_SC_VPORT_ZMAX_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_12 0x00cc
+#define regPA_SC_VPORT_ZMIN_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_12 0x00cd
+#define regPA_SC_VPORT_ZMAX_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_13 0x00ce
+#define regPA_SC_VPORT_ZMIN_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_13 0x00cf
+#define regPA_SC_VPORT_ZMAX_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_14 0x00d0
+#define regPA_SC_VPORT_ZMIN_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_14 0x00d1
+#define regPA_SC_VPORT_ZMAX_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_15 0x00d2
+#define regPA_SC_VPORT_ZMIN_15_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_15 0x00d3
+#define regPA_SC_VPORT_ZMAX_15_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG 0x00d4
+#define regPA_SC_RASTER_CONFIG_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG_1 0x00d5
+#define regPA_SC_RASTER_CONFIG_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_CONTROL 0x00d6
+#define regPA_SC_SCREEN_EXTENT_CONTROL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_OVERRIDE 0x00d7
+#define regPA_SC_TILE_STEERING_OVERRIDE_BASE_IDX 1
+#define regCP_PERFMON_CNTX_CNTL 0x00d8
+#define regCP_PERFMON_CNTX_CNTL_BASE_IDX 1
+#define regCP_PIPEID 0x00d9
+#define regCP_PIPEID_BASE_IDX 1
+#define regCP_RINGID 0x00d9
+#define regCP_RINGID_BASE_IDX 1
+#define regCP_VMID 0x00da
+#define regCP_VMID_BASE_IDX 1
+#define regCONTEXT_RESERVED_REG0 0x00db
+#define regCONTEXT_RESERVED_REG0_BASE_IDX 1
+#define regCONTEXT_RESERVED_REG1 0x00dc
+#define regCONTEXT_RESERVED_REG1_BASE_IDX 1
+#define regPA_SC_VRS_OVERRIDE_CNTL 0x00f4
+#define regPA_SC_VRS_OVERRIDE_CNTL_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE 0x00f5
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_EXT 0x00f6
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_EXT_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_SIZE_XY 0x00f7
+#define regPA_SC_VRS_RATE_FEEDBACK_SIZE_XY_BASE_IDX 1
+#define regPA_SC_VRS_RATE_CACHE_CNTL 0x00f9
+#define regPA_SC_VRS_RATE_CACHE_CNTL_BASE_IDX 1
+#define regPA_SC_VRS_RATE_BASE 0x00fc
+#define regPA_SC_VRS_RATE_BASE_BASE_IDX 1
+#define regPA_SC_VRS_RATE_BASE_EXT 0x00fd
+#define regPA_SC_VRS_RATE_BASE_EXT_BASE_IDX 1
+#define regPA_SC_VRS_RATE_SIZE_XY 0x00fe
+#define regPA_SC_VRS_RATE_SIZE_XY_BASE_IDX 1
+#define regVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
+#define regVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
+#define regCB_RMI_GL2_CACHE_CONTROL 0x0104
+#define regCB_RMI_GL2_CACHE_CONTROL_BASE_IDX 1
+#define regCB_BLEND_RED 0x0105
+#define regCB_BLEND_RED_BASE_IDX 1
+#define regCB_BLEND_GREEN 0x0106
+#define regCB_BLEND_GREEN_BASE_IDX 1
+#define regCB_BLEND_BLUE 0x0107
+#define regCB_BLEND_BLUE_BASE_IDX 1
+#define regCB_BLEND_ALPHA 0x0108
+#define regCB_BLEND_ALPHA_BASE_IDX 1
+#define regCB_FDCC_CONTROL 0x0109
+#define regCB_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COVERAGE_OUT_CONTROL 0x010a
+#define regCB_COVERAGE_OUT_CONTROL_BASE_IDX 1
+#define regDB_STENCIL_CONTROL 0x010b
+#define regDB_STENCIL_CONTROL_BASE_IDX 1
+#define regDB_STENCILREFMASK 0x010c
+#define regDB_STENCILREFMASK_BASE_IDX 1
+#define regDB_STENCILREFMASK_BF 0x010d
+#define regDB_STENCILREFMASK_BF_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE 0x010f
+#define regPA_CL_VPORT_XSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET 0x0110
+#define regPA_CL_VPORT_XOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE 0x0111
+#define regPA_CL_VPORT_YSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET 0x0112
+#define regPA_CL_VPORT_YOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE 0x0113
+#define regPA_CL_VPORT_ZSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET 0x0114
+#define regPA_CL_VPORT_ZOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_1 0x0115
+#define regPA_CL_VPORT_XSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_1 0x0116
+#define regPA_CL_VPORT_XOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_1 0x0117
+#define regPA_CL_VPORT_YSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_1 0x0118
+#define regPA_CL_VPORT_YOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_1 0x0119
+#define regPA_CL_VPORT_ZSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_1 0x011a
+#define regPA_CL_VPORT_ZOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_2 0x011b
+#define regPA_CL_VPORT_XSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_2 0x011c
+#define regPA_CL_VPORT_XOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_2 0x011d
+#define regPA_CL_VPORT_YSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_2 0x011e
+#define regPA_CL_VPORT_YOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_2 0x011f
+#define regPA_CL_VPORT_ZSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_2 0x0120
+#define regPA_CL_VPORT_ZOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_3 0x0121
+#define regPA_CL_VPORT_XSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_3 0x0122
+#define regPA_CL_VPORT_XOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_3 0x0123
+#define regPA_CL_VPORT_YSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_3 0x0124
+#define regPA_CL_VPORT_YOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_3 0x0125
+#define regPA_CL_VPORT_ZSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_3 0x0126
+#define regPA_CL_VPORT_ZOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_4 0x0127
+#define regPA_CL_VPORT_XSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_4 0x0128
+#define regPA_CL_VPORT_XOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_4 0x0129
+#define regPA_CL_VPORT_YSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_4 0x012a
+#define regPA_CL_VPORT_YOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_4 0x012b
+#define regPA_CL_VPORT_ZSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_4 0x012c
+#define regPA_CL_VPORT_ZOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_5 0x012d
+#define regPA_CL_VPORT_XSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_5 0x012e
+#define regPA_CL_VPORT_XOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_5 0x012f
+#define regPA_CL_VPORT_YSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_5 0x0130
+#define regPA_CL_VPORT_YOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_5 0x0131
+#define regPA_CL_VPORT_ZSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_5 0x0132
+#define regPA_CL_VPORT_ZOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_6 0x0133
+#define regPA_CL_VPORT_XSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_6 0x0134
+#define regPA_CL_VPORT_XOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_6 0x0135
+#define regPA_CL_VPORT_YSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_6 0x0136
+#define regPA_CL_VPORT_YOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_6 0x0137
+#define regPA_CL_VPORT_ZSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_6 0x0138
+#define regPA_CL_VPORT_ZOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_7 0x0139
+#define regPA_CL_VPORT_XSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_7 0x013a
+#define regPA_CL_VPORT_XOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_7 0x013b
+#define regPA_CL_VPORT_YSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_7 0x013c
+#define regPA_CL_VPORT_YOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_7 0x013d
+#define regPA_CL_VPORT_ZSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_7 0x013e
+#define regPA_CL_VPORT_ZOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_8 0x013f
+#define regPA_CL_VPORT_XSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_8 0x0140
+#define regPA_CL_VPORT_XOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_8 0x0141
+#define regPA_CL_VPORT_YSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_8 0x0142
+#define regPA_CL_VPORT_YOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_8 0x0143
+#define regPA_CL_VPORT_ZSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_8 0x0144
+#define regPA_CL_VPORT_ZOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_9 0x0145
+#define regPA_CL_VPORT_XSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_9 0x0146
+#define regPA_CL_VPORT_XOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_9 0x0147
+#define regPA_CL_VPORT_YSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_9 0x0148
+#define regPA_CL_VPORT_YOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_9 0x0149
+#define regPA_CL_VPORT_ZSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_9 0x014a
+#define regPA_CL_VPORT_ZOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_10 0x014b
+#define regPA_CL_VPORT_XSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_10 0x014c
+#define regPA_CL_VPORT_XOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_10 0x014d
+#define regPA_CL_VPORT_YSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_10 0x014e
+#define regPA_CL_VPORT_YOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_10 0x014f
+#define regPA_CL_VPORT_ZSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_10 0x0150
+#define regPA_CL_VPORT_ZOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_11 0x0151
+#define regPA_CL_VPORT_XSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_11 0x0152
+#define regPA_CL_VPORT_XOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_11 0x0153
+#define regPA_CL_VPORT_YSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_11 0x0154
+#define regPA_CL_VPORT_YOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_11 0x0155
+#define regPA_CL_VPORT_ZSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_11 0x0156
+#define regPA_CL_VPORT_ZOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_12 0x0157
+#define regPA_CL_VPORT_XSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_12 0x0158
+#define regPA_CL_VPORT_XOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_12 0x0159
+#define regPA_CL_VPORT_YSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_12 0x015a
+#define regPA_CL_VPORT_YOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_12 0x015b
+#define regPA_CL_VPORT_ZSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_12 0x015c
+#define regPA_CL_VPORT_ZOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_13 0x015d
+#define regPA_CL_VPORT_XSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_13 0x015e
+#define regPA_CL_VPORT_XOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_13 0x015f
+#define regPA_CL_VPORT_YSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_13 0x0160
+#define regPA_CL_VPORT_YOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_13 0x0161
+#define regPA_CL_VPORT_ZSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_13 0x0162
+#define regPA_CL_VPORT_ZOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_14 0x0163
+#define regPA_CL_VPORT_XSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_14 0x0164
+#define regPA_CL_VPORT_XOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_14 0x0165
+#define regPA_CL_VPORT_YSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_14 0x0166
+#define regPA_CL_VPORT_YOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_14 0x0167
+#define regPA_CL_VPORT_ZSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_14 0x0168
+#define regPA_CL_VPORT_ZOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_15 0x0169
+#define regPA_CL_VPORT_XSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_15 0x016a
+#define regPA_CL_VPORT_XOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_15 0x016b
+#define regPA_CL_VPORT_YSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_15 0x016c
+#define regPA_CL_VPORT_YOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_15 0x016d
+#define regPA_CL_VPORT_ZSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_15 0x016e
+#define regPA_CL_VPORT_ZOFFSET_15_BASE_IDX 1
+#define regPA_CL_UCP_0_X 0x016f
+#define regPA_CL_UCP_0_X_BASE_IDX 1
+#define regPA_CL_UCP_0_Y 0x0170
+#define regPA_CL_UCP_0_Y_BASE_IDX 1
+#define regPA_CL_UCP_0_Z 0x0171
+#define regPA_CL_UCP_0_Z_BASE_IDX 1
+#define regPA_CL_UCP_0_W 0x0172
+#define regPA_CL_UCP_0_W_BASE_IDX 1
+#define regPA_CL_UCP_1_X 0x0173
+#define regPA_CL_UCP_1_X_BASE_IDX 1
+#define regPA_CL_UCP_1_Y 0x0174
+#define regPA_CL_UCP_1_Y_BASE_IDX 1
+#define regPA_CL_UCP_1_Z 0x0175
+#define regPA_CL_UCP_1_Z_BASE_IDX 1
+#define regPA_CL_UCP_1_W 0x0176
+#define regPA_CL_UCP_1_W_BASE_IDX 1
+#define regPA_CL_UCP_2_X 0x0177
+#define regPA_CL_UCP_2_X_BASE_IDX 1
+#define regPA_CL_UCP_2_Y 0x0178
+#define regPA_CL_UCP_2_Y_BASE_IDX 1
+#define regPA_CL_UCP_2_Z 0x0179
+#define regPA_CL_UCP_2_Z_BASE_IDX 1
+#define regPA_CL_UCP_2_W 0x017a
+#define regPA_CL_UCP_2_W_BASE_IDX 1
+#define regPA_CL_UCP_3_X 0x017b
+#define regPA_CL_UCP_3_X_BASE_IDX 1
+#define regPA_CL_UCP_3_Y 0x017c
+#define regPA_CL_UCP_3_Y_BASE_IDX 1
+#define regPA_CL_UCP_3_Z 0x017d
+#define regPA_CL_UCP_3_Z_BASE_IDX 1
+#define regPA_CL_UCP_3_W 0x017e
+#define regPA_CL_UCP_3_W_BASE_IDX 1
+#define regPA_CL_UCP_4_X 0x017f
+#define regPA_CL_UCP_4_X_BASE_IDX 1
+#define regPA_CL_UCP_4_Y 0x0180
+#define regPA_CL_UCP_4_Y_BASE_IDX 1
+#define regPA_CL_UCP_4_Z 0x0181
+#define regPA_CL_UCP_4_Z_BASE_IDX 1
+#define regPA_CL_UCP_4_W 0x0182
+#define regPA_CL_UCP_4_W_BASE_IDX 1
+#define regPA_CL_UCP_5_X 0x0183
+#define regPA_CL_UCP_5_X_BASE_IDX 1
+#define regPA_CL_UCP_5_Y 0x0184
+#define regPA_CL_UCP_5_Y_BASE_IDX 1
+#define regPA_CL_UCP_5_Z 0x0185
+#define regPA_CL_UCP_5_Z_BASE_IDX 1
+#define regPA_CL_UCP_5_W 0x0186
+#define regPA_CL_UCP_5_W_BASE_IDX 1
+#define regPA_CL_PROG_NEAR_CLIP_Z 0x0187
+#define regPA_CL_PROG_NEAR_CLIP_Z_BASE_IDX 1
+#define regPA_RATE_CNTL 0x0188
+#define regPA_RATE_CNTL_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_0 0x0191
+#define regSPI_PS_INPUT_CNTL_0_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_1 0x0192
+#define regSPI_PS_INPUT_CNTL_1_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_2 0x0193
+#define regSPI_PS_INPUT_CNTL_2_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_3 0x0194
+#define regSPI_PS_INPUT_CNTL_3_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_4 0x0195
+#define regSPI_PS_INPUT_CNTL_4_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_5 0x0196
+#define regSPI_PS_INPUT_CNTL_5_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_6 0x0197
+#define regSPI_PS_INPUT_CNTL_6_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_7 0x0198
+#define regSPI_PS_INPUT_CNTL_7_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_8 0x0199
+#define regSPI_PS_INPUT_CNTL_8_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_9 0x019a
+#define regSPI_PS_INPUT_CNTL_9_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_10 0x019b
+#define regSPI_PS_INPUT_CNTL_10_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_11 0x019c
+#define regSPI_PS_INPUT_CNTL_11_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_12 0x019d
+#define regSPI_PS_INPUT_CNTL_12_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_13 0x019e
+#define regSPI_PS_INPUT_CNTL_13_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_14 0x019f
+#define regSPI_PS_INPUT_CNTL_14_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_15 0x01a0
+#define regSPI_PS_INPUT_CNTL_15_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_16 0x01a1
+#define regSPI_PS_INPUT_CNTL_16_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_17 0x01a2
+#define regSPI_PS_INPUT_CNTL_17_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_18 0x01a3
+#define regSPI_PS_INPUT_CNTL_18_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_19 0x01a4
+#define regSPI_PS_INPUT_CNTL_19_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_20 0x01a5
+#define regSPI_PS_INPUT_CNTL_20_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_21 0x01a6
+#define regSPI_PS_INPUT_CNTL_21_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_22 0x01a7
+#define regSPI_PS_INPUT_CNTL_22_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_23 0x01a8
+#define regSPI_PS_INPUT_CNTL_23_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_24 0x01a9
+#define regSPI_PS_INPUT_CNTL_24_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_25 0x01aa
+#define regSPI_PS_INPUT_CNTL_25_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_26 0x01ab
+#define regSPI_PS_INPUT_CNTL_26_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_27 0x01ac
+#define regSPI_PS_INPUT_CNTL_27_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_28 0x01ad
+#define regSPI_PS_INPUT_CNTL_28_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_29 0x01ae
+#define regSPI_PS_INPUT_CNTL_29_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_30 0x01af
+#define regSPI_PS_INPUT_CNTL_30_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_31 0x01b0
+#define regSPI_PS_INPUT_CNTL_31_BASE_IDX 1
+#define regSPI_VS_OUT_CONFIG 0x01b1
+#define regSPI_VS_OUT_CONFIG_BASE_IDX 1
+#define regSPI_PS_INPUT_ENA 0x01b3
+#define regSPI_PS_INPUT_ENA_BASE_IDX 1
+#define regSPI_PS_INPUT_ADDR 0x01b4
+#define regSPI_PS_INPUT_ADDR_BASE_IDX 1
+#define regSPI_INTERP_CONTROL_0 0x01b5
+#define regSPI_INTERP_CONTROL_0_BASE_IDX 1
+#define regSPI_PS_IN_CONTROL 0x01b6
+#define regSPI_PS_IN_CONTROL_BASE_IDX 1
+#define regSPI_BARYC_CNTL 0x01b8
+#define regSPI_BARYC_CNTL_BASE_IDX 1
+#define regSPI_TMPRING_SIZE 0x01ba
+#define regSPI_TMPRING_SIZE_BASE_IDX 1
+#define regSPI_GFX_SCRATCH_BASE_LO 0x01bb
+#define regSPI_GFX_SCRATCH_BASE_LO_BASE_IDX 1
+#define regSPI_GFX_SCRATCH_BASE_HI 0x01bc
+#define regSPI_GFX_SCRATCH_BASE_HI_BASE_IDX 1
+#define regSPI_SHADER_IDX_FORMAT 0x01c2
+#define regSPI_SHADER_IDX_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_POS_FORMAT 0x01c3
+#define regSPI_SHADER_POS_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_Z_FORMAT 0x01c4
+#define regSPI_SHADER_Z_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_COL_FORMAT 0x01c5
+#define regSPI_SHADER_COL_FORMAT_BASE_IDX 1
+#define regSX_PS_DOWNCONVERT_CONTROL 0x01d4
+#define regSX_PS_DOWNCONVERT_CONTROL_BASE_IDX 1
+#define regSX_PS_DOWNCONVERT 0x01d5
+#define regSX_PS_DOWNCONVERT_BASE_IDX 1
+#define regSX_BLEND_OPT_EPSILON 0x01d6
+#define regSX_BLEND_OPT_EPSILON_BASE_IDX 1
+#define regSX_BLEND_OPT_CONTROL 0x01d7
+#define regSX_BLEND_OPT_CONTROL_BASE_IDX 1
+#define regSX_MRT0_BLEND_OPT 0x01d8
+#define regSX_MRT0_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT1_BLEND_OPT 0x01d9
+#define regSX_MRT1_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT2_BLEND_OPT 0x01da
+#define regSX_MRT2_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT3_BLEND_OPT 0x01db
+#define regSX_MRT3_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT4_BLEND_OPT 0x01dc
+#define regSX_MRT4_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT5_BLEND_OPT 0x01dd
+#define regSX_MRT5_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT6_BLEND_OPT 0x01de
+#define regSX_MRT6_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT7_BLEND_OPT 0x01df
+#define regSX_MRT7_BLEND_OPT_BASE_IDX 1
+#define regCB_BLEND0_CONTROL 0x01e0
+#define regCB_BLEND0_CONTROL_BASE_IDX 1
+#define regCB_BLEND1_CONTROL 0x01e1
+#define regCB_BLEND1_CONTROL_BASE_IDX 1
+#define regCB_BLEND2_CONTROL 0x01e2
+#define regCB_BLEND2_CONTROL_BASE_IDX 1
+#define regCB_BLEND3_CONTROL 0x01e3
+#define regCB_BLEND3_CONTROL_BASE_IDX 1
+#define regCB_BLEND4_CONTROL 0x01e4
+#define regCB_BLEND4_CONTROL_BASE_IDX 1
+#define regCB_BLEND5_CONTROL 0x01e5
+#define regCB_BLEND5_CONTROL_BASE_IDX 1
+#define regCB_BLEND6_CONTROL 0x01e6
+#define regCB_BLEND6_CONTROL_BASE_IDX 1
+#define regCB_BLEND7_CONTROL 0x01e7
+#define regCB_BLEND7_CONTROL_BASE_IDX 1
+#define regGFX_COPY_STATE 0x01f4
+#define regGFX_COPY_STATE_BASE_IDX 1
+#define regPA_CL_POINT_X_RAD 0x01f5
+#define regPA_CL_POINT_X_RAD_BASE_IDX 1
+#define regPA_CL_POINT_Y_RAD 0x01f6
+#define regPA_CL_POINT_Y_RAD_BASE_IDX 1
+#define regPA_CL_POINT_SIZE 0x01f7
+#define regPA_CL_POINT_SIZE_BASE_IDX 1
+#define regPA_CL_POINT_CULL_RAD 0x01f8
+#define regPA_CL_POINT_CULL_RAD_BASE_IDX 1
+#define regVGT_DMA_BASE_HI 0x01f9
+#define regVGT_DMA_BASE_HI_BASE_IDX 1
+#define regVGT_DMA_BASE 0x01fa
+#define regVGT_DMA_BASE_BASE_IDX 1
+#define regVGT_DRAW_INITIATOR 0x01fc
+#define regVGT_DRAW_INITIATOR_BASE_IDX 1
+#define regVGT_EVENT_ADDRESS_REG 0x01fe
+#define regVGT_EVENT_ADDRESS_REG_BASE_IDX 1
+#define regGE_MAX_OUTPUT_PER_SUBGROUP 0x01ff
+#define regGE_MAX_OUTPUT_PER_SUBGROUP_BASE_IDX 1
+#define regDB_DEPTH_CONTROL 0x0200
+#define regDB_DEPTH_CONTROL_BASE_IDX 1
+#define regDB_EQAA 0x0201
+#define regDB_EQAA_BASE_IDX 1
+#define regCB_COLOR_CONTROL 0x0202
+#define regCB_COLOR_CONTROL_BASE_IDX 1
+#define regDB_SHADER_CONTROL 0x0203
+#define regDB_SHADER_CONTROL_BASE_IDX 1
+#define regPA_CL_CLIP_CNTL 0x0204
+#define regPA_CL_CLIP_CNTL_BASE_IDX 1
+#define regPA_SU_SC_MODE_CNTL 0x0205
+#define regPA_SU_SC_MODE_CNTL_BASE_IDX 1
+#define regPA_CL_VTE_CNTL 0x0206
+#define regPA_CL_VTE_CNTL_BASE_IDX 1
+#define regPA_CL_VS_OUT_CNTL 0x0207
+#define regPA_CL_VS_OUT_CNTL_BASE_IDX 1
+#define regPA_CL_NANINF_CNTL 0x0208
+#define regPA_CL_NANINF_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_CNTL 0x0209
+#define regPA_SU_LINE_STIPPLE_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_SCALE 0x020a
+#define regPA_SU_LINE_STIPPLE_SCALE_BASE_IDX 1
+#define regPA_SU_PRIM_FILTER_CNTL 0x020b
+#define regPA_SU_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL 0x020c
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_CL_NGG_CNTL 0x020e
+#define regPA_CL_NGG_CNTL_BASE_IDX 1
+#define regPA_SU_OVER_RASTERIZATION_CNTL 0x020f
+#define regPA_SU_OVER_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_STEREO_CNTL 0x0210
+#define regPA_STEREO_CNTL_BASE_IDX 1
+#define regPA_STATE_STEREO_X 0x0211
+#define regPA_STATE_STEREO_X_BASE_IDX 1
+#define regPA_CL_VRS_CNTL 0x0212
+#define regPA_CL_VRS_CNTL_BASE_IDX 1
+#define regPA_SU_POINT_SIZE 0x0280
+#define regPA_SU_POINT_SIZE_BASE_IDX 1
+#define regPA_SU_POINT_MINMAX 0x0281
+#define regPA_SU_POINT_MINMAX_BASE_IDX 1
+#define regPA_SU_LINE_CNTL 0x0282
+#define regPA_SU_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE 0x0283
+#define regPA_SC_LINE_STIPPLE_BASE_IDX 1
+#define regVGT_HOS_MAX_TESS_LEVEL 0x0286
+#define regVGT_HOS_MAX_TESS_LEVEL_BASE_IDX 1
+#define regVGT_HOS_MIN_TESS_LEVEL 0x0287
+#define regVGT_HOS_MIN_TESS_LEVEL_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_0 0x0292
+#define regPA_SC_MODE_CNTL_0_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_1 0x0293
+#define regPA_SC_MODE_CNTL_1_BASE_IDX 1
+#define regVGT_ENHANCE 0x0294
+#define regVGT_ENHANCE_BASE_IDX 1
+#define regIA_ENHANCE 0x029c
+#define regIA_ENHANCE_BASE_IDX 1
+#define regVGT_DMA_SIZE 0x029d
+#define regVGT_DMA_SIZE_BASE_IDX 1
+#define regVGT_DMA_MAX_SIZE 0x029e
+#define regVGT_DMA_MAX_SIZE_BASE_IDX 1
+#define regVGT_DMA_INDEX_TYPE 0x029f
+#define regVGT_DMA_INDEX_TYPE_BASE_IDX 1
+#define regWD_ENHANCE 0x02a0
+#define regWD_ENHANCE_BASE_IDX 1
+#define regVGT_PRIMITIVEID_EN 0x02a1
+#define regVGT_PRIMITIVEID_EN_BASE_IDX 1
+#define regVGT_DMA_NUM_INSTANCES 0x02a2
+#define regVGT_DMA_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_PRIMITIVEID_RESET 0x02a3
+#define regVGT_PRIMITIVEID_RESET_BASE_IDX 1
+#define regVGT_EVENT_INITIATOR 0x02a4
+#define regVGT_EVENT_INITIATOR_BASE_IDX 1
+#define regVGT_DRAW_PAYLOAD_CNTL 0x02a6
+#define regVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
+#define regVGT_ESGS_RING_ITEMSIZE 0x02ab
+#define regVGT_ESGS_RING_ITEMSIZE_BASE_IDX 1
+#define regVGT_REUSE_OFF 0x02ad
+#define regVGT_REUSE_OFF_BASE_IDX 1
+#define regDB_HTILE_SURFACE 0x02af
+#define regDB_HTILE_SURFACE_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE0 0x02b0
+#define regDB_SRESULTS_COMPARE_STATE0_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE1 0x02b1
+#define regDB_SRESULTS_COMPARE_STATE1_BASE_IDX 1
+#define regDB_PRELOAD_CONTROL 0x02b2
+#define regDB_PRELOAD_CONTROL_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0x02ca
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0x02cb
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0x02cc
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE_BASE_IDX 1
+#define regVGT_GS_MAX_VERT_OUT 0x02ce
+#define regVGT_GS_MAX_VERT_OUT_BASE_IDX 1
+#define regGE_NGG_SUBGRP_CNTL 0x02d3
+#define regGE_NGG_SUBGRP_CNTL_BASE_IDX 1
+#define regVGT_TESS_DISTRIBUTION 0x02d4
+#define regVGT_TESS_DISTRIBUTION_BASE_IDX 1
+#define regVGT_SHADER_STAGES_EN 0x02d5
+#define regVGT_SHADER_STAGES_EN_BASE_IDX 1
+#define regVGT_LS_HS_CONFIG 0x02d6
+#define regVGT_LS_HS_CONFIG_BASE_IDX 1
+#define regVGT_TF_PARAM 0x02db
+#define regVGT_TF_PARAM_BASE_IDX 1
+#define regDB_ALPHA_TO_MASK 0x02dc
+#define regDB_ALPHA_TO_MASK_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL 0x02de
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_CLAMP 0x02df
+#define regPA_SU_POLY_OFFSET_CLAMP_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE 0x02e0
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET 0x02e1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_SCALE 0x02e2
+#define regPA_SU_POLY_OFFSET_BACK_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET 0x02e3
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET_BASE_IDX 1
+#define regVGT_GS_INSTANCE_CNT 0x02e4
+#define regVGT_GS_INSTANCE_CNT_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_0 0x02f5
+#define regPA_SC_CENTROID_PRIORITY_0_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_1 0x02f6
+#define regPA_SC_CENTROID_PRIORITY_1_BASE_IDX 1
+#define regPA_SC_LINE_CNTL 0x02f7
+#define regPA_SC_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_AA_CONFIG 0x02f8
+#define regPA_SC_AA_CONFIG_BASE_IDX 1
+#define regPA_SU_VTX_CNTL 0x02f9
+#define regPA_SU_VTX_CNTL_BASE_IDX 1
+#define regPA_CL_GB_VERT_CLIP_ADJ 0x02fa
+#define regPA_CL_GB_VERT_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_VERT_DISC_ADJ 0x02fb
+#define regPA_CL_GB_VERT_DISC_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_CLIP_ADJ 0x02fc
+#define regPA_CL_GB_HORZ_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_DISC_ADJ 0x02fd
+#define regPA_CL_GB_HORZ_DISC_ADJ_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0x02fe
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0x02ff
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0x0300
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0x0301
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0x0302
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0x0303
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0x0304
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0x0305
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0x0306
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0x0307
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0x0308
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0x0309
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0x030a
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0x030b
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0x030c
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0x030d
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y0_X1Y0 0x030e
+#define regPA_SC_AA_MASK_X0Y0_X1Y0_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y1_X1Y1 0x030f
+#define regPA_SC_AA_MASK_X0Y1_X1Y1_BASE_IDX 1
+#define regPA_SC_SHADER_CONTROL 0x0310
+#define regPA_SC_SHADER_CONTROL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_0 0x0311
+#define regPA_SC_BINNER_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_1 0x0312
+#define regPA_SC_BINNER_CNTL_1_BASE_IDX 1
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL 0x0313
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_SC_NGG_MODE_CNTL 0x0314
+#define regPA_SC_NGG_MODE_CNTL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_2 0x0315
+#define regPA_SC_BINNER_CNTL_2_BASE_IDX 1
+#define regCB_COLOR0_BASE 0x0318
+#define regCB_COLOR0_BASE_BASE_IDX 1
+#define regCB_COLOR0_VIEW 0x031b
+#define regCB_COLOR0_VIEW_BASE_IDX 1
+#define regCB_COLOR0_INFO 0x031c
+#define regCB_COLOR0_INFO_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB 0x031d
+#define regCB_COLOR0_ATTRIB_BASE_IDX 1
+#define regCB_COLOR0_FDCC_CONTROL 0x031e
+#define regCB_COLOR0_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE 0x0325
+#define regCB_COLOR0_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR1_BASE 0x0327
+#define regCB_COLOR1_BASE_BASE_IDX 1
+#define regCB_COLOR1_VIEW 0x032a
+#define regCB_COLOR1_VIEW_BASE_IDX 1
+#define regCB_COLOR1_INFO 0x032b
+#define regCB_COLOR1_INFO_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB 0x032c
+#define regCB_COLOR1_ATTRIB_BASE_IDX 1
+#define regCB_COLOR1_FDCC_CONTROL 0x032d
+#define regCB_COLOR1_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE 0x0334
+#define regCB_COLOR1_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR2_BASE 0x0336
+#define regCB_COLOR2_BASE_BASE_IDX 1
+#define regCB_COLOR2_VIEW 0x0339
+#define regCB_COLOR2_VIEW_BASE_IDX 1
+#define regCB_COLOR2_INFO 0x033a
+#define regCB_COLOR2_INFO_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB 0x033b
+#define regCB_COLOR2_ATTRIB_BASE_IDX 1
+#define regCB_COLOR2_FDCC_CONTROL 0x033c
+#define regCB_COLOR2_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE 0x0343
+#define regCB_COLOR2_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR3_BASE 0x0345
+#define regCB_COLOR3_BASE_BASE_IDX 1
+#define regCB_COLOR3_VIEW 0x0348
+#define regCB_COLOR3_VIEW_BASE_IDX 1
+#define regCB_COLOR3_INFO 0x0349
+#define regCB_COLOR3_INFO_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB 0x034a
+#define regCB_COLOR3_ATTRIB_BASE_IDX 1
+#define regCB_COLOR3_FDCC_CONTROL 0x034b
+#define regCB_COLOR3_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE 0x0352
+#define regCB_COLOR3_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR4_BASE 0x0354
+#define regCB_COLOR4_BASE_BASE_IDX 1
+#define regCB_COLOR4_VIEW 0x0357
+#define regCB_COLOR4_VIEW_BASE_IDX 1
+#define regCB_COLOR4_INFO 0x0358
+#define regCB_COLOR4_INFO_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB 0x0359
+#define regCB_COLOR4_ATTRIB_BASE_IDX 1
+#define regCB_COLOR4_FDCC_CONTROL 0x035a
+#define regCB_COLOR4_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE 0x0361
+#define regCB_COLOR4_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR5_BASE 0x0363
+#define regCB_COLOR5_BASE_BASE_IDX 1
+#define regCB_COLOR5_VIEW 0x0366
+#define regCB_COLOR5_VIEW_BASE_IDX 1
+#define regCB_COLOR5_INFO 0x0367
+#define regCB_COLOR5_INFO_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB 0x0368
+#define regCB_COLOR5_ATTRIB_BASE_IDX 1
+#define regCB_COLOR5_FDCC_CONTROL 0x0369
+#define regCB_COLOR5_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE 0x0370
+#define regCB_COLOR5_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR6_BASE 0x0372
+#define regCB_COLOR6_BASE_BASE_IDX 1
+#define regCB_COLOR6_VIEW 0x0375
+#define regCB_COLOR6_VIEW_BASE_IDX 1
+#define regCB_COLOR6_INFO 0x0376
+#define regCB_COLOR6_INFO_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB 0x0377
+#define regCB_COLOR6_ATTRIB_BASE_IDX 1
+#define regCB_COLOR6_FDCC_CONTROL 0x0378
+#define regCB_COLOR6_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE 0x037f
+#define regCB_COLOR6_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR7_BASE 0x0381
+#define regCB_COLOR7_BASE_BASE_IDX 1
+#define regCB_COLOR7_VIEW 0x0384
+#define regCB_COLOR7_VIEW_BASE_IDX 1
+#define regCB_COLOR7_INFO 0x0385
+#define regCB_COLOR7_INFO_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB 0x0386
+#define regCB_COLOR7_ATTRIB_BASE_IDX 1
+#define regCB_COLOR7_FDCC_CONTROL 0x0387
+#define regCB_COLOR7_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE 0x038e
+#define regCB_COLOR7_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR0_BASE_EXT 0x0390
+#define regCB_COLOR0_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_BASE_EXT 0x0391
+#define regCB_COLOR1_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_BASE_EXT 0x0392
+#define regCB_COLOR2_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_BASE_EXT 0x0393
+#define regCB_COLOR3_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_BASE_EXT 0x0394
+#define regCB_COLOR4_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_BASE_EXT 0x0395
+#define regCB_COLOR5_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_BASE_EXT 0x0396
+#define regCB_COLOR6_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_BASE_EXT 0x0397
+#define regCB_COLOR7_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE_EXT 0x03a8
+#define regCB_COLOR0_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE_EXT 0x03a9
+#define regCB_COLOR1_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE_EXT 0x03aa
+#define regCB_COLOR2_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE_EXT 0x03ab
+#define regCB_COLOR3_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE_EXT 0x03ac
+#define regCB_COLOR4_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE_EXT 0x03ad
+#define regCB_COLOR5_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE_EXT 0x03ae
+#define regCB_COLOR6_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE_EXT 0x03af
+#define regCB_COLOR7_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB2 0x03b0
+#define regCB_COLOR0_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB2 0x03b1
+#define regCB_COLOR1_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB2 0x03b2
+#define regCB_COLOR2_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB2 0x03b3
+#define regCB_COLOR3_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB2 0x03b4
+#define regCB_COLOR4_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB2 0x03b5
+#define regCB_COLOR5_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB2 0x03b6
+#define regCB_COLOR6_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB2 0x03b7
+#define regCB_COLOR7_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB3 0x03b8
+#define regCB_COLOR0_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB3 0x03b9
+#define regCB_COLOR1_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB3 0x03ba
+#define regCB_COLOR2_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB3 0x03bb
+#define regCB_COLOR3_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB3 0x03bc
+#define regCB_COLOR4_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB3 0x03bd
+#define regCB_COLOR5_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB3 0x03be
+#define regCB_COLOR6_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB3 0x03bf
+#define regCB_COLOR7_ATTRIB3_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_cpdec
+// base address: 0x2a000
+#define regCONFIG_RESERVED_REG0 0x0800
+#define regCONFIG_RESERVED_REG0_BASE_IDX 1
+#define regCONFIG_RESERVED_REG1 0x0801
+#define regCONFIG_RESERVED_REG1_BASE_IDX 1
+#define regCP_MEC_CNTL 0x0802
+#define regCP_MEC_CNTL_BASE_IDX 1
+#define regCP_ME_CNTL 0x0803
+#define regCP_ME_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_grbmdec
+// base address: 0x2a400
+#define regGRBM_GFX_CNTL 0x0900
+#define regGRBM_GFX_CNTL_BASE_IDX 1
+#define regGRBM_NOWHERE 0x0901
+#define regGRBM_NOWHERE_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_padec
+// base address: 0x2a500
+#define regPA_SC_VRS_SURFACE_CNTL 0x0940
+#define regPA_SC_VRS_SURFACE_CNTL_BASE_IDX 1
+#define regPA_SC_ENHANCE 0x0941
+#define regPA_SC_ENHANCE_BASE_IDX 1
+#define regPA_SC_ENHANCE_1 0x0942
+#define regPA_SC_ENHANCE_1_BASE_IDX 1
+#define regPA_SC_ENHANCE_2 0x0943
+#define regPA_SC_ENHANCE_2_BASE_IDX 1
+#define regPA_SC_ENHANCE_3 0x0944
+#define regPA_SC_ENHANCE_3_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_OVERRIDE 0x0946
+#define regPA_SC_BINNER_CNTL_OVERRIDE_BASE_IDX 1
+#define regPA_SC_PBB_OVERRIDE_FLAG 0x0947
+#define regPA_SC_PBB_OVERRIDE_FLAG_BASE_IDX 1
+#define regPA_SC_DSM_CNTL 0x0948
+#define regPA_SC_DSM_CNTL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE 0x0949
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE_BASE_IDX 1
+#define regPA_SC_FIFO_SIZE 0x094a
+#define regPA_SC_FIFO_SIZE_BASE_IDX 1
+#define regPA_SC_IF_FIFO_SIZE 0x094b
+#define regPA_SC_IF_FIFO_SIZE_BASE_IDX 1
+#define regPA_SC_PACKER_WAVE_ID_CNTL 0x094c
+#define regPA_SC_PACKER_WAVE_ID_CNTL_BASE_IDX 1
+#define regPA_SC_ATM_CNTL 0x094d
+#define regPA_SC_ATM_CNTL_BASE_IDX 1
+#define regPA_SC_PKR_WAVE_TABLE_CNTL 0x094e
+#define regPA_SC_PKR_WAVE_TABLE_CNTL_BASE_IDX 1
+#define regPA_SC_FORCE_EOV_MAX_CNTS 0x094f
+#define regPA_SC_FORCE_EOV_MAX_CNTS_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_0 0x0950
+#define regPA_SC_BINNER_EVENT_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_1 0x0951
+#define regPA_SC_BINNER_EVENT_CNTL_1_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_2 0x0952
+#define regPA_SC_BINNER_EVENT_CNTL_2_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_3 0x0953
+#define regPA_SC_BINNER_EVENT_CNTL_3_BASE_IDX 1
+#define regPA_SC_BINNER_TIMEOUT_COUNTER 0x0954
+#define regPA_SC_BINNER_TIMEOUT_COUNTER_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_0 0x0955
+#define regPA_SC_BINNER_PERF_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_1 0x0956
+#define regPA_SC_BINNER_PERF_CNTL_1_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_2 0x0957
+#define regPA_SC_BINNER_PERF_CNTL_2_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_3 0x0958
+#define regPA_SC_BINNER_PERF_CNTL_3_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK 0x095b
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK 0x095c
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_LOCK 0x095d
+#define regPA_SC_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_PH_INTERFACE_FIFO_SIZE 0x095e
+#define regPA_PH_INTERFACE_FIFO_SIZE_BASE_IDX 1
+#define regPA_PH_ENHANCE 0x095f
+#define regPA_PH_ENHANCE_BASE_IDX 1
+#define regPA_SC_VRS_SURFACE_CNTL_1 0x0960
+#define regPA_SC_VRS_SURFACE_CNTL_1_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_sqdec
+// base address: 0x2a780
+#define regSQ_RUNTIME_CONFIG 0x09e0
+#define regSQ_RUNTIME_CONFIG_BASE_IDX 1
+#define regSQ_DEBUG_STS_GLOBAL 0x09e1
+#define regSQ_DEBUG_STS_GLOBAL_BASE_IDX 1
+#define regSQ_DEBUG_STS_GLOBAL2 0x09e2
+#define regSQ_DEBUG_STS_GLOBAL2_BASE_IDX 1
+#define regSH_MEM_BASES 0x09e3
+#define regSH_MEM_BASES_BASE_IDX 1
+#define regSH_MEM_CONFIG 0x09e4
+#define regSH_MEM_CONFIG_BASE_IDX 1
+#define regSQ_DEBUG 0x09e5
+#define regSQ_DEBUG_BASE_IDX 1
+#define regSQ_SHADER_TBA_LO 0x09e6
+#define regSQ_SHADER_TBA_LO_BASE_IDX 1
+#define regSQ_SHADER_TBA_HI 0x09e7
+#define regSQ_SHADER_TBA_HI_BASE_IDX 1
+#define regSQ_SHADER_TMA_LO 0x09e8
+#define regSQ_SHADER_TMA_LO_BASE_IDX 1
+#define regSQ_SHADER_TMA_HI 0x09e9
+#define regSQ_SHADER_TMA_HI_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_cpdec
+// base address: 0x2e000
+#define regCP_DEBUG_2 0x1800
+#define regCP_DEBUG_2_BASE_IDX 1
+#define regCP_FETCHER_SOURCE 0x1801
+#define regCP_FETCHER_SOURCE_BASE_IDX 1
+#define regCP_DFY_CNTL 0x1804
+#define regCP_DFY_CNTL_BASE_IDX 1
+#define regCP_DFY_STAT 0x1805
+#define regCP_DFY_STAT_BASE_IDX 1
+#define regCP_DFY_ADDR_HI 0x1806
+#define regCP_DFY_ADDR_HI_BASE_IDX 1
+#define regCP_DFY_ADDR_LO 0x1807
+#define regCP_DFY_ADDR_LO_BASE_IDX 1
+#define regCP_DFY_DATA_0 0x1808
+#define regCP_DFY_DATA_0_BASE_IDX 1
+#define regCP_DFY_DATA_1 0x1809
+#define regCP_DFY_DATA_1_BASE_IDX 1
+#define regCP_DFY_DATA_2 0x180a
+#define regCP_DFY_DATA_2_BASE_IDX 1
+#define regCP_DFY_DATA_3 0x180b
+#define regCP_DFY_DATA_3_BASE_IDX 1
+#define regCP_DFY_DATA_4 0x180c
+#define regCP_DFY_DATA_4_BASE_IDX 1
+#define regCP_DFY_DATA_5 0x180d
+#define regCP_DFY_DATA_5_BASE_IDX 1
+#define regCP_DFY_DATA_6 0x180e
+#define regCP_DFY_DATA_6_BASE_IDX 1
+#define regCP_DFY_DATA_7 0x180f
+#define regCP_DFY_DATA_7_BASE_IDX 1
+#define regCP_DFY_DATA_8 0x1810
+#define regCP_DFY_DATA_8_BASE_IDX 1
+#define regCP_DFY_DATA_9 0x1811
+#define regCP_DFY_DATA_9_BASE_IDX 1
+#define regCP_DFY_DATA_10 0x1812
+#define regCP_DFY_DATA_10_BASE_IDX 1
+#define regCP_DFY_DATA_11 0x1813
+#define regCP_DFY_DATA_11_BASE_IDX 1
+#define regCP_DFY_DATA_12 0x1814
+#define regCP_DFY_DATA_12_BASE_IDX 1
+#define regCP_DFY_DATA_13 0x1815
+#define regCP_DFY_DATA_13_BASE_IDX 1
+#define regCP_DFY_DATA_14 0x1816
+#define regCP_DFY_DATA_14_BASE_IDX 1
+#define regCP_DFY_DATA_15 0x1817
+#define regCP_DFY_DATA_15_BASE_IDX 1
+#define regCP_DFY_CMD 0x1818
+#define regCP_DFY_CMD_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_cpphqddec
+// base address: 0x2e080
+#define regCP_HPD_MES_ROQ_OFFSETS 0x1821
+#define regCP_HPD_MES_ROQ_OFFSETS_BASE_IDX 1
+#define regCP_HPD_ROQ_OFFSETS 0x1821
+#define regCP_HPD_ROQ_OFFSETS_BASE_IDX 1
+#define regCP_HPD_STATUS0 0x1822
+#define regCP_HPD_STATUS0_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_didtdec
+// base address: 0x2e400
+#define regDIDT_INDEX_AUTO_INCR_EN 0x1900
+#define regDIDT_INDEX_AUTO_INCR_EN_BASE_IDX 1
+#define regDIDT_EDC_CTRL 0x1901
+#define regDIDT_EDC_CTRL_BASE_IDX 1
+#define regDIDT_EDC_THROTTLE_CTRL 0x1902
+#define regDIDT_EDC_THROTTLE_CTRL_BASE_IDX 1
+#define regDIDT_EDC_THRESHOLD 0x1903
+#define regDIDT_EDC_THRESHOLD_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_1_2 0x1904
+#define regDIDT_EDC_STALL_PATTERN_1_2_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_3_4 0x1905
+#define regDIDT_EDC_STALL_PATTERN_3_4_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_5_6 0x1906
+#define regDIDT_EDC_STALL_PATTERN_5_6_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_7 0x1907
+#define regDIDT_EDC_STALL_PATTERN_7_BASE_IDX 1
+#define regDIDT_EDC_STATUS 0x1908
+#define regDIDT_EDC_STATUS_BASE_IDX 1
+#define regDIDT_EDC_DYNAMIC_THRESHOLD_RO 0x1909
+#define regDIDT_EDC_DYNAMIC_THRESHOLD_RO_BASE_IDX 1
+#define regDIDT_EDC_OVERFLOW 0x190a
+#define regDIDT_EDC_OVERFLOW_BASE_IDX 1
+#define regDIDT_EDC_ROLLING_POWER_DELTA 0x190b
+#define regDIDT_EDC_ROLLING_POWER_DELTA_BASE_IDX 1
+#define regDIDT_IND_INDEX 0x190c
+#define regDIDT_IND_INDEX_BASE_IDX 1
+#define regDIDT_IND_DATA 0x190d
+#define regDIDT_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_spidec
+// base address: 0x2e500
+#define regSPI_CDBG_SYS_GFX 0x1940
+#define regSPI_CDBG_SYS_GFX_BASE_IDX 1
+#define regSPI_CDBG_SYS_HP3D 0x1941
+#define regSPI_CDBG_SYS_HP3D_BASE_IDX 1
+#define regSPI_CDBG_SYS_CS0 0x1942
+#define regSPI_CDBG_SYS_CS0_BASE_IDX 1
+#define regSPI_GDBG_WAVE_CNTL 0x1943
+#define regSPI_GDBG_WAVE_CNTL_BASE_IDX 1
+#define regSPI_GDBG_TRAP_CONFIG 0x1944
+#define regSPI_GDBG_TRAP_CONFIG_BASE_IDX 1
+#define regSPI_GDBG_WAVE_CNTL3 0x1945
+#define regSPI_GDBG_WAVE_CNTL3_BASE_IDX 1
+#define regSPI_RESET_DEBUG 0x1946
+#define regSPI_RESET_DEBUG_BASE_IDX 1
+#define regSPI_ARB_CNTL_0 0x1949
+#define regSPI_ARB_CNTL_0_BASE_IDX 1
+#define regSPI_FEATURE_CTRL 0x194a
+#define regSPI_FEATURE_CTRL_BASE_IDX 1
+#define regSPI_SHADER_RSRC_LIMIT_CTRL 0x194b
+#define regSPI_SHADER_RSRC_LIMIT_CTRL_BASE_IDX 1
+#define regSPI_COMPUTE_WF_CTX_SAVE_STATUS 0x194e
+#define regSPI_COMPUTE_WF_CTX_SAVE_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_tcpdec
+// base address: 0x2e680
+#define regTCP_INVALIDATE 0x19a0
+#define regTCP_INVALIDATE_BASE_IDX 1
+#define regTCP_STATUS 0x19a1
+#define regTCP_STATUS_BASE_IDX 1
+#define regTCP_CNTL 0x19a2
+#define regTCP_CNTL_BASE_IDX 1
+#define regTCP_CNTL2 0x19a3
+#define regTCP_CNTL2_BASE_IDX 1
+#define regTCP_CREDIT 0x19a4
+#define regTCP_CREDIT_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_gdsdec
+// base address: 0x2e6c0
+#define regGDS_ENHANCE2 0x19b0
+#define regGDS_ENHANCE2_BASE_IDX 1
+#define regGDS_OA_CGPG_RESTORE 0x19b1
+#define regGDS_OA_CGPG_RESTORE_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_utcl1dec
+// base address: 0x2e600
+#define regUTCL1_CTRL_0 0x1980
+#define regUTCL1_CTRL_0_BASE_IDX 1
+#define regUTCL1_UTCL0_INVREQ_DISABLE 0x1984
+#define regUTCL1_UTCL0_INVREQ_DISABLE_BASE_IDX 1
+#define regUTCL1_CTRL_2 0x1985
+#define regUTCL1_CTRL_2_BASE_IDX 1
+#define regUTCL1_FIFO_SIZING 0x1986
+#define regUTCL1_FIFO_SIZING_BASE_IDX 1
+#define regGCRD_SA0_TARGETS_DISABLE 0x1987
+#define regGCRD_SA0_TARGETS_DISABLE_BASE_IDX 1
+#define regGCRD_SA1_TARGETS_DISABLE 0x1989
+#define regGCRD_SA1_TARGETS_DISABLE_BASE_IDX 1
+#define regGCRD_CREDIT_SAFE 0x198a
+#define regGCRD_CREDIT_SAFE_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_pmmdec
+// base address: 0x2e640
+#define regGCR_GENERAL_CNTL 0x1990
+#define regGCR_GENERAL_CNTL_BASE_IDX 1
+#define regGCR_TARGET_DISABLE 0x1991
+#define regGCR_TARGET_DISABLE_BASE_IDX 1
+#define regGCR_CMD_STATUS 0x1992
+#define regGCR_CMD_STATUS_BASE_IDX 1
+#define regGCR_SPARE 0x1993
+#define regGCR_SPARE_BASE_IDX 1
+#define regPMM_CNTL2 0x1999
+#define regPMM_CNTL2_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_gccacdec
+// base address: 0x2eb40
+#define regGC_CAC_CTRL_1 0x1ad0
+#define regGC_CAC_CTRL_1_BASE_IDX 1
+#define regGC_CAC_CTRL_2 0x1ad1
+#define regGC_CAC_CTRL_2_BASE_IDX 1
+#define regGC_CAC_AGGR_LOWER 0x1ad2
+#define regGC_CAC_AGGR_LOWER_BASE_IDX 1
+#define regGC_CAC_AGGR_UPPER 0x1ad3
+#define regGC_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE0_CAC_AGGR_LOWER 0x1ad4
+#define regSE0_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE0_CAC_AGGR_UPPER 0x1ad5
+#define regSE0_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE1_CAC_AGGR_LOWER 0x1ad6
+#define regSE1_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE1_CAC_AGGR_UPPER 0x1ad7
+#define regSE1_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE2_CAC_AGGR_LOWER 0x1ad8
+#define regSE2_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE2_CAC_AGGR_UPPER 0x1ad9
+#define regSE2_CAC_AGGR_UPPER_BASE_IDX 1
+#define regGC_CAC_AGGR_GFXCLK_CYCLE 0x1ae4
+#define regGC_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE0_CAC_AGGR_GFXCLK_CYCLE 0x1ae5
+#define regSE0_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE1_CAC_AGGR_GFXCLK_CYCLE 0x1ae6
+#define regSE1_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE2_CAC_AGGR_GFXCLK_CYCLE 0x1ae7
+#define regSE2_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regGC_EDC_CTRL 0x1aed
+#define regGC_EDC_CTRL_BASE_IDX 1
+#define regGC_EDC_THRESHOLD 0x1aee
+#define regGC_EDC_THRESHOLD_BASE_IDX 1
+#define regGC_EDC_STRETCH_CTRL 0x1aef
+#define regGC_EDC_STRETCH_CTRL_BASE_IDX 1
+#define regGC_EDC_STRETCH_THRESHOLD 0x1af0
+#define regGC_EDC_STRETCH_THRESHOLD_BASE_IDX 1
+#define regEDC_HYSTERESIS_CNTL 0x1af1
+#define regEDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGC_THROTTLE_CTRL 0x1af2
+#define regGC_THROTTLE_CTRL_BASE_IDX 1
+#define regGC_THROTTLE_CTRL1 0x1af3
+#define regGC_THROTTLE_CTRL1_BASE_IDX 1
+#define regPCC_STALL_PATTERN_CTRL 0x1af4
+#define regPCC_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_CTRL 0x1af5
+#define regPWRBRK_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regPCC_STALL_PATTERN_1_2 0x1af6
+#define regPCC_STALL_PATTERN_1_2_BASE_IDX 1
+#define regPCC_STALL_PATTERN_3_4 0x1af7
+#define regPCC_STALL_PATTERN_3_4_BASE_IDX 1
+#define regPCC_STALL_PATTERN_5_6 0x1af8
+#define regPCC_STALL_PATTERN_5_6_BASE_IDX 1
+#define regPCC_STALL_PATTERN_7 0x1af9
+#define regPCC_STALL_PATTERN_7_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_1_2 0x1afa
+#define regPWRBRK_STALL_PATTERN_1_2_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_3_4 0x1afb
+#define regPWRBRK_STALL_PATTERN_3_4_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_5_6 0x1afc
+#define regPWRBRK_STALL_PATTERN_5_6_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_7 0x1afd
+#define regPWRBRK_STALL_PATTERN_7_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_CTRL 0x1afe
+#define regDIDT_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_1_2 0x1aff
+#define regDIDT_STALL_PATTERN_1_2_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_3_4 0x1b00
+#define regDIDT_STALL_PATTERN_3_4_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_5_6 0x1b01
+#define regDIDT_STALL_PATTERN_5_6_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_7 0x1b02
+#define regDIDT_STALL_PATTERN_7_BASE_IDX 1
+#define regPCC_PWRBRK_HYSTERESIS_CTRL 0x1b03
+#define regPCC_PWRBRK_HYSTERESIS_CTRL_BASE_IDX 1
+#define regEDC_STRETCH_PERF_COUNTER 0x1b04
+#define regEDC_STRETCH_PERF_COUNTER_BASE_IDX 1
+#define regEDC_UNSTRETCH_PERF_COUNTER 0x1b05
+#define regEDC_UNSTRETCH_PERF_COUNTER_BASE_IDX 1
+#define regEDC_STRETCH_NUM_PERF_COUNTER 0x1b06
+#define regEDC_STRETCH_NUM_PERF_COUNTER_BASE_IDX 1
+#define regGC_EDC_STATUS 0x1b07
+#define regGC_EDC_STATUS_BASE_IDX 1
+#define regGC_EDC_OVERFLOW 0x1b08
+#define regGC_EDC_OVERFLOW_BASE_IDX 1
+#define regGC_EDC_ROLLING_POWER_DELTA 0x1b09
+#define regGC_EDC_ROLLING_POWER_DELTA_BASE_IDX 1
+#define regGC_THROTTLE_STATUS 0x1b0a
+#define regGC_THROTTLE_STATUS_BASE_IDX 1
+#define regEDC_PERF_COUNTER 0x1b0b
+#define regEDC_PERF_COUNTER_BASE_IDX 1
+#define regPCC_PERF_COUNTER 0x1b0c
+#define regPCC_PERF_COUNTER_BASE_IDX 1
+#define regPWRBRK_PERF_COUNTER 0x1b0d
+#define regPWRBRK_PERF_COUNTER_BASE_IDX 1
+#define regEDC_HYSTERESIS_STAT 0x1b0e
+#define regEDC_HYSTERESIS_STAT_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CP_0 0x1b10
+#define regGC_CAC_WEIGHT_CP_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CP_1 0x1b11
+#define regGC_CAC_WEIGHT_CP_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_0 0x1b12
+#define regGC_CAC_WEIGHT_EA_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_1 0x1b13
+#define regGC_CAC_WEIGHT_EA_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_2 0x1b14
+#define regGC_CAC_WEIGHT_EA_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_0 0x1b15
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_1 0x1b16
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_2 0x1b17
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_3 0x1b18
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_4 0x1b19
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_4_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_0 0x1b1a
+#define regGC_CAC_WEIGHT_UTCL2_VML2_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_1 0x1b1b
+#define regGC_CAC_WEIGHT_UTCL2_VML2_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_2 0x1b1c
+#define regGC_CAC_WEIGHT_UTCL2_VML2_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_0 0x1b1d
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_1 0x1b1e
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_2 0x1b1f
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_0 0x1b20
+#define regGC_CAC_WEIGHT_GDS_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_1 0x1b21
+#define regGC_CAC_WEIGHT_GDS_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_2 0x1b22
+#define regGC_CAC_WEIGHT_GDS_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_0 0x1b23
+#define regGC_CAC_WEIGHT_GE_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_1 0x1b24
+#define regGC_CAC_WEIGHT_GE_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_2 0x1b25
+#define regGC_CAC_WEIGHT_GE_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_3 0x1b26
+#define regGC_CAC_WEIGHT_GE_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PMM_0 0x1b2e
+#define regGC_CAC_WEIGHT_PMM_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_0 0x1b2f
+#define regGC_CAC_WEIGHT_GL2C_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_1 0x1b30
+#define regGC_CAC_WEIGHT_GL2C_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_2 0x1b31
+#define regGC_CAC_WEIGHT_GL2C_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_0 0x1b32
+#define regGC_CAC_WEIGHT_PH_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_1 0x1b33
+#define regGC_CAC_WEIGHT_PH_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_2 0x1b34
+#define regGC_CAC_WEIGHT_PH_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_3 0x1b35
+#define regGC_CAC_WEIGHT_PH_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_0 0x1b36
+#define regGC_CAC_WEIGHT_SDMA_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_1 0x1b37
+#define regGC_CAC_WEIGHT_SDMA_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_2 0x1b38
+#define regGC_CAC_WEIGHT_SDMA_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_3 0x1b39
+#define regGC_CAC_WEIGHT_SDMA_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_4 0x1b3a
+#define regGC_CAC_WEIGHT_SDMA_4_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_5 0x1b3b
+#define regGC_CAC_WEIGHT_SDMA_5_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CHC_0 0x1b3c
+#define regGC_CAC_WEIGHT_CHC_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CHC_1 0x1b3d
+#define regGC_CAC_WEIGHT_CHC_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GUS_0 0x1b3e
+#define regGC_CAC_WEIGHT_GUS_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GUS_1 0x1b3f
+#define regGC_CAC_WEIGHT_GUS_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_RLC_0 0x1b40
+#define regGC_CAC_WEIGHT_RLC_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GRBM_0 0x1b44
+#define regGC_CAC_WEIGHT_GRBM_0_BASE_IDX 1
+#define regGC_EDC_CLK_MONITOR_CTRL 0x1b56
+#define regGC_EDC_CLK_MONITOR_CTRL_BASE_IDX 1
+#define regGC_CAC_IND_INDEX 0x1b58
+#define regGC_CAC_IND_INDEX_BASE_IDX 1
+#define regGC_CAC_IND_DATA 0x1b59
+#define regGC_CAC_IND_DATA_BASE_IDX 1
+#define regSE_CAC_CTRL_1 0x1b70
+#define regSE_CAC_CTRL_1_BASE_IDX 1
+#define regSE_CAC_CTRL_2 0x1b71
+#define regSE_CAC_CTRL_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TA_0 0x1b72
+#define regSE_CAC_WEIGHT_TA_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_0 0x1b73
+#define regSE_CAC_WEIGHT_TD_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_1 0x1b74
+#define regSE_CAC_WEIGHT_TD_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_2 0x1b75
+#define regSE_CAC_WEIGHT_TD_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_3 0x1b76
+#define regSE_CAC_WEIGHT_TD_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_4 0x1b77
+#define regSE_CAC_WEIGHT_TD_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_5 0x1b78
+#define regSE_CAC_WEIGHT_TD_5_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_0 0x1b79
+#define regSE_CAC_WEIGHT_TCP_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_1 0x1b7a
+#define regSE_CAC_WEIGHT_TCP_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_2 0x1b7b
+#define regSE_CAC_WEIGHT_TCP_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_3 0x1b7c
+#define regSE_CAC_WEIGHT_TCP_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_0 0x1b7d
+#define regSE_CAC_WEIGHT_SQ_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_1 0x1b7e
+#define regSE_CAC_WEIGHT_SQ_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_2 0x1b7f
+#define regSE_CAC_WEIGHT_SQ_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SP_0 0x1b80
+#define regSE_CAC_WEIGHT_SP_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SP_1 0x1b81
+#define regSE_CAC_WEIGHT_SP_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_0 0x1b82
+#define regSE_CAC_WEIGHT_LDS_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_1 0x1b83
+#define regSE_CAC_WEIGHT_LDS_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_2 0x1b84
+#define regSE_CAC_WEIGHT_LDS_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_3 0x1b85
+#define regSE_CAC_WEIGHT_LDS_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQC_0 0x1b87
+#define regSE_CAC_WEIGHT_SQC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQC_1 0x1b88
+#define regSE_CAC_WEIGHT_SQC_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CU_0 0x1b89
+#define regSE_CAC_WEIGHT_CU_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_BCI_0 0x1b8a
+#define regSE_CAC_WEIGHT_BCI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_0 0x1b8b
+#define regSE_CAC_WEIGHT_CB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_1 0x1b8c
+#define regSE_CAC_WEIGHT_CB_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_2 0x1b8d
+#define regSE_CAC_WEIGHT_CB_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_3 0x1b8e
+#define regSE_CAC_WEIGHT_CB_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_4 0x1b8f
+#define regSE_CAC_WEIGHT_CB_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_5 0x1b90
+#define regSE_CAC_WEIGHT_CB_5_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_6 0x1b91
+#define regSE_CAC_WEIGHT_CB_6_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_7 0x1b92
+#define regSE_CAC_WEIGHT_CB_7_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_8 0x1b93
+#define regSE_CAC_WEIGHT_CB_8_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_9 0x1b94
+#define regSE_CAC_WEIGHT_CB_9_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_10 0x1b95
+#define regSE_CAC_WEIGHT_CB_10_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_11 0x1b96
+#define regSE_CAC_WEIGHT_CB_11_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_0 0x1b97
+#define regSE_CAC_WEIGHT_DB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_1 0x1b98
+#define regSE_CAC_WEIGHT_DB_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_2 0x1b99
+#define regSE_CAC_WEIGHT_DB_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_3 0x1b9a
+#define regSE_CAC_WEIGHT_DB_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_4 0x1b9b
+#define regSE_CAC_WEIGHT_DB_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_RMI_0 0x1b9c
+#define regSE_CAC_WEIGHT_RMI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_RMI_1 0x1b9d
+#define regSE_CAC_WEIGHT_RMI_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SX_0 0x1b9e
+#define regSE_CAC_WEIGHT_SX_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SXRB_0 0x1b9f
+#define regSE_CAC_WEIGHT_SXRB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_UTCL1_0 0x1ba0
+#define regSE_CAC_WEIGHT_UTCL1_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_0 0x1ba1
+#define regSE_CAC_WEIGHT_GL1C_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_1 0x1ba2
+#define regSE_CAC_WEIGHT_GL1C_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_2 0x1ba3
+#define regSE_CAC_WEIGHT_GL1C_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_0 0x1ba4
+#define regSE_CAC_WEIGHT_SPI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_1 0x1ba5
+#define regSE_CAC_WEIGHT_SPI_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_2 0x1ba6
+#define regSE_CAC_WEIGHT_SPI_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PC_0 0x1ba7
+#define regSE_CAC_WEIGHT_PC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_0 0x1ba8
+#define regSE_CAC_WEIGHT_PA_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_1 0x1ba9
+#define regSE_CAC_WEIGHT_PA_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_2 0x1baa
+#define regSE_CAC_WEIGHT_PA_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_3 0x1bab
+#define regSE_CAC_WEIGHT_PA_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_0 0x1bac
+#define regSE_CAC_WEIGHT_SC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_1 0x1bad
+#define regSE_CAC_WEIGHT_SC_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_2 0x1bae
+#define regSE_CAC_WEIGHT_SC_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_3 0x1baf
+#define regSE_CAC_WEIGHT_SC_3_BASE_IDX 1
+#define regSE_CAC_WINDOW_AGGR_VALUE 0x1bb0
+#define regSE_CAC_WINDOW_AGGR_VALUE_BASE_IDX 1
+#define regSE_CAC_WINDOW_GFXCLK_CYCLE 0x1bb1
+#define regSE_CAC_WINDOW_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE_CAC_IND_INDEX 0x1bce
+#define regSE_CAC_IND_INDEX_BASE_IDX 1
+#define regSE_CAC_IND_DATA 0x1bcf
+#define regSE_CAC_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly2_spidec
+// base address: 0x2f000
+#define regSPI_RESOURCE_RESERVE_CU_0 0x1c00
+#define regSPI_RESOURCE_RESERVE_CU_0_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_1 0x1c01
+#define regSPI_RESOURCE_RESERVE_CU_1_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_2 0x1c02
+#define regSPI_RESOURCE_RESERVE_CU_2_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_3 0x1c03
+#define regSPI_RESOURCE_RESERVE_CU_3_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_4 0x1c04
+#define regSPI_RESOURCE_RESERVE_CU_4_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_5 0x1c05
+#define regSPI_RESOURCE_RESERVE_CU_5_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_6 0x1c06
+#define regSPI_RESOURCE_RESERVE_CU_6_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_7 0x1c07
+#define regSPI_RESOURCE_RESERVE_CU_7_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_8 0x1c08
+#define regSPI_RESOURCE_RESERVE_CU_8_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_9 0x1c09
+#define regSPI_RESOURCE_RESERVE_CU_9_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_10 0x1c0a
+#define regSPI_RESOURCE_RESERVE_CU_10_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_11 0x1c0b
+#define regSPI_RESOURCE_RESERVE_CU_11_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_12 0x1c0c
+#define regSPI_RESOURCE_RESERVE_CU_12_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_13 0x1c0d
+#define regSPI_RESOURCE_RESERVE_CU_13_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_14 0x1c0e
+#define regSPI_RESOURCE_RESERVE_CU_14_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_15 0x1c0f
+#define regSPI_RESOURCE_RESERVE_CU_15_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_0 0x1c10
+#define regSPI_RESOURCE_RESERVE_EN_CU_0_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_1 0x1c11
+#define regSPI_RESOURCE_RESERVE_EN_CU_1_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_2 0x1c12
+#define regSPI_RESOURCE_RESERVE_EN_CU_2_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_3 0x1c13
+#define regSPI_RESOURCE_RESERVE_EN_CU_3_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_4 0x1c14
+#define regSPI_RESOURCE_RESERVE_EN_CU_4_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_5 0x1c15
+#define regSPI_RESOURCE_RESERVE_EN_CU_5_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_6 0x1c16
+#define regSPI_RESOURCE_RESERVE_EN_CU_6_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_7 0x1c17
+#define regSPI_RESOURCE_RESERVE_EN_CU_7_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_8 0x1c18
+#define regSPI_RESOURCE_RESERVE_EN_CU_8_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_9 0x1c19
+#define regSPI_RESOURCE_RESERVE_EN_CU_9_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_10 0x1c1a
+#define regSPI_RESOURCE_RESERVE_EN_CU_10_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_11 0x1c1b
+#define regSPI_RESOURCE_RESERVE_EN_CU_11_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_12 0x1c1c
+#define regSPI_RESOURCE_RESERVE_EN_CU_12_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_13 0x1c1d
+#define regSPI_RESOURCE_RESERVE_EN_CU_13_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_14 0x1c1e
+#define regSPI_RESOURCE_RESERVE_EN_CU_14_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_15 0x1c1f
+#define regSPI_RESOURCE_RESERVE_EN_CU_15_BASE_IDX 1
+
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define regCP_EOP_DONE_ADDR_LO 0x2000
+#define regCP_EOP_DONE_ADDR_LO_BASE_IDX 1
+#define regCP_EOP_DONE_ADDR_HI 0x2001
+#define regCP_EOP_DONE_ADDR_HI_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_LO 0x2002
+#define regCP_EOP_DONE_DATA_LO_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_HI 0x2003
+#define regCP_EOP_DONE_DATA_HI_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_LO 0x2004
+#define regCP_EOP_LAST_FENCE_LO_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_HI 0x2005
+#define regCP_EOP_LAST_FENCE_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_LO 0x2018
+#define regCP_PIPE_STATS_ADDR_LO_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_HI 0x2019
+#define regCP_PIPE_STATS_ADDR_HI_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_LO 0x201a
+#define regCP_VGT_IAVERT_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_HI 0x201b
+#define regCP_VGT_IAVERT_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_LO 0x201c
+#define regCP_VGT_IAPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_HI 0x201d
+#define regCP_VGT_IAPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_LO 0x201e
+#define regCP_VGT_GSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_HI 0x201f
+#define regCP_VGT_GSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_LO 0x2020
+#define regCP_VGT_VSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_HI 0x2021
+#define regCP_VGT_VSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_LO 0x2022
+#define regCP_VGT_GSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_HI 0x2023
+#define regCP_VGT_GSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_LO 0x2024
+#define regCP_VGT_HSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_HI 0x2025
+#define regCP_VGT_HSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_LO 0x2026
+#define regCP_VGT_DSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_HI 0x2027
+#define regCP_VGT_DSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_LO 0x2028
+#define regCP_PA_CINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_HI 0x2029
+#define regCP_PA_CINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_LO 0x202a
+#define regCP_PA_CPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_HI 0x202b
+#define regCP_PA_CPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_LO 0x202c
+#define regCP_SC_PSINVOC_COUNT0_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_HI 0x202d
+#define regCP_SC_PSINVOC_COUNT0_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_LO 0x202e
+#define regCP_SC_PSINVOC_COUNT1_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_HI 0x202f
+#define regCP_SC_PSINVOC_COUNT1_HI_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_LO 0x2030
+#define regCP_VGT_CSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_HI 0x2031
+#define regCP_VGT_CSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_ASINVOC_COUNT_LO 0x2032
+#define regCP_VGT_ASINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_ASINVOC_COUNT_HI 0x2033
+#define regCP_VGT_ASINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_CONTROL 0x203d
+#define regCP_PIPE_STATS_CONTROL_BASE_IDX 1
+#define regSCRATCH_REG0 0x2040
+#define regSCRATCH_REG0_BASE_IDX 1
+#define regSCRATCH_REG1 0x2041
+#define regSCRATCH_REG1_BASE_IDX 1
+#define regSCRATCH_REG2 0x2042
+#define regSCRATCH_REG2_BASE_IDX 1
+#define regSCRATCH_REG3 0x2043
+#define regSCRATCH_REG3_BASE_IDX 1
+#define regSCRATCH_REG4 0x2044
+#define regSCRATCH_REG4_BASE_IDX 1
+#define regSCRATCH_REG5 0x2045
+#define regSCRATCH_REG5_BASE_IDX 1
+#define regSCRATCH_REG6 0x2046
+#define regSCRATCH_REG6_BASE_IDX 1
+#define regSCRATCH_REG7 0x2047
+#define regSCRATCH_REG7_BASE_IDX 1
+#define regSCRATCH_REG_ATOMIC 0x2048
+#define regSCRATCH_REG_ATOMIC_BASE_IDX 1
+#define regSCRATCH_REG_CMPSWAP_ATOMIC 0x2048
+#define regSCRATCH_REG_CMPSWAP_ATOMIC_BASE_IDX 1
+#define regCP_APPEND_DDID_CNT 0x204b
+#define regCP_APPEND_DDID_CNT_BASE_IDX 1
+#define regCP_APPEND_DATA_HI 0x204c
+#define regCP_APPEND_DATA_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_HI 0x204d
+#define regCP_APPEND_LAST_CS_FENCE_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_HI 0x204e
+#define regCP_APPEND_LAST_PS_FENCE_HI_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_LO 0x2052
+#define regCP_PFP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_HI 0x2053
+#define regCP_PFP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO 0x2054
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI 0x2055
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO 0x2056
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI 0x2057
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_APPEND_ADDR_LO 0x2058
+#define regCP_APPEND_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_ADDR_HI 0x2059
+#define regCP_APPEND_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_DATA 0x205a
+#define regCP_APPEND_DATA_BASE_IDX 1
+#define regCP_APPEND_DATA_LO 0x205a
+#define regCP_APPEND_DATA_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_LO 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_LO 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_LO 0x205d
+#define regCP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_LO 0x205d
+#define regCP_ME_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_HI 0x205e
+#define regCP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_HI 0x205e
+#define regCP_ME_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_MC_WADDR_LO 0x2069
+#define regCP_ME_MC_WADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_WADDR_HI 0x206a
+#define regCP_ME_MC_WADDR_HI_BASE_IDX 1
+#define regCP_ME_MC_WDATA_LO 0x206b
+#define regCP_ME_MC_WDATA_LO_BASE_IDX 1
+#define regCP_ME_MC_WDATA_HI 0x206c
+#define regCP_ME_MC_WDATA_HI_BASE_IDX 1
+#define regCP_ME_MC_RADDR_LO 0x206d
+#define regCP_ME_MC_RADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_RADDR_HI 0x206e
+#define regCP_ME_MC_RADDR_HI_BASE_IDX 1
+#define regCP_SEM_WAIT_TIMER 0x206f
+#define regCP_SEM_WAIT_TIMER_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_LO 0x2070
+#define regCP_SIG_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_HI 0x2071
+#define regCP_SIG_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_WAIT_REG_MEM_TIMEOUT 0x2074
+#define regCP_WAIT_REG_MEM_TIMEOUT_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_LO 0x2075
+#define regCP_WAIT_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_HI 0x2076
+#define regCP_WAIT_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CONTROL 0x2077
+#define regCP_DMA_PFP_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_CONTROL 0x2078
+#define regCP_DMA_ME_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR 0x2080
+#define regCP_DMA_ME_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR_HI 0x2081
+#define regCP_DMA_ME_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR 0x2082
+#define regCP_DMA_ME_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR_HI 0x2083
+#define regCP_DMA_ME_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_COMMAND 0x2084
+#define regCP_DMA_ME_COMMAND_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR 0x2085
+#define regCP_DMA_PFP_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR_HI 0x2086
+#define regCP_DMA_PFP_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR 0x2087
+#define regCP_DMA_PFP_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR_HI 0x2088
+#define regCP_DMA_PFP_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_COMMAND 0x2089
+#define regCP_DMA_PFP_COMMAND_BASE_IDX 1
+#define regCP_DMA_CNTL 0x208a
+#define regCP_DMA_CNTL_BASE_IDX 1
+#define regCP_DMA_READ_TAGS 0x208b
+#define regCP_DMA_READ_TAGS_BASE_IDX 1
+#define regCP_PFP_IB_CONTROL 0x208d
+#define regCP_PFP_IB_CONTROL_BASE_IDX 1
+#define regCP_PFP_LOAD_CONTROL 0x208e
+#define regCP_PFP_LOAD_CONTROL_BASE_IDX 1
+#define regCP_SCRATCH_INDEX 0x208f
+#define regCP_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_SCRATCH_DATA 0x2090
+#define regCP_SCRATCH_DATA_BASE_IDX 1
+#define regCP_RB_OFFSET 0x2091
+#define regCP_RB_OFFSET_BASE_IDX 1
+#define regCP_IB1_OFFSET 0x2092
+#define regCP_IB1_OFFSET_BASE_IDX 1
+#define regCP_IB2_OFFSET 0x2093
+#define regCP_IB2_OFFSET_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_BEGIN 0x2094
+#define regCP_IB1_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_END 0x2095
+#define regCP_IB1_PREAMBLE_END_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_BEGIN 0x2096
+#define regCP_IB2_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_END 0x2097
+#define regCP_IB2_PREAMBLE_END_BASE_IDX 1
+#define regCP_DMA_ME_CMD_ADDR_LO 0x209c
+#define regCP_DMA_ME_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_DMA_ME_CMD_ADDR_HI 0x209d
+#define regCP_DMA_ME_CMD_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CMD_ADDR_LO 0x209e
+#define regCP_DMA_PFP_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_DMA_PFP_CMD_ADDR_HI 0x209f
+#define regCP_DMA_PFP_CMD_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_CMD_ADDR_LO 0x20a0
+#define regCP_APPEND_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_CMD_ADDR_HI 0x20a1
+#define regCP_APPEND_CMD_ADDR_HI_BASE_IDX 1
+#define regUCONFIG_RESERVED_REG0 0x20a2
+#define regUCONFIG_RESERVED_REG0_BASE_IDX 1
+#define regUCONFIG_RESERVED_REG1 0x20a3
+#define regUCONFIG_RESERVED_REG1_BASE_IDX 1
+#define regCP_PA_MSPRIM_COUNT_LO 0x20a4
+#define regCP_PA_MSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_MSPRIM_COUNT_HI 0x20a5
+#define regCP_PA_MSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_GE_MSINVOC_COUNT_LO 0x20a6
+#define regCP_GE_MSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_GE_MSINVOC_COUNT_HI 0x20a7
+#define regCP_GE_MSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_IB1_CMD_BUFSZ 0x20c0
+#define regCP_IB1_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB2_CMD_BUFSZ 0x20c1
+#define regCP_IB2_CMD_BUFSZ_BASE_IDX 1
+#define regCP_ST_CMD_BUFSZ 0x20c2
+#define regCP_ST_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB1_BASE_LO 0x20cc
+#define regCP_IB1_BASE_LO_BASE_IDX 1
+#define regCP_IB1_BASE_HI 0x20cd
+#define regCP_IB1_BASE_HI_BASE_IDX 1
+#define regCP_IB1_BUFSZ 0x20ce
+#define regCP_IB1_BUFSZ_BASE_IDX 1
+#define regCP_IB2_BASE_LO 0x20cf
+#define regCP_IB2_BASE_LO_BASE_IDX 1
+#define regCP_IB2_BASE_HI 0x20d0
+#define regCP_IB2_BASE_HI_BASE_IDX 1
+#define regCP_IB2_BUFSZ 0x20d1
+#define regCP_IB2_BUFSZ_BASE_IDX 1
+#define regCP_ST_BASE_LO 0x20d2
+#define regCP_ST_BASE_LO_BASE_IDX 1
+#define regCP_ST_BASE_HI 0x20d3
+#define regCP_ST_BASE_HI_BASE_IDX 1
+#define regCP_ST_BUFSZ 0x20d4
+#define regCP_ST_BUFSZ_BASE_IDX 1
+#define regCP_EOP_DONE_EVENT_CNTL 0x20d5
+#define regCP_EOP_DONE_EVENT_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_CNTL 0x20d6
+#define regCP_EOP_DONE_DATA_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_CNTX_ID 0x20d7
+#define regCP_EOP_DONE_CNTX_ID_BASE_IDX 1
+#define regCP_DB_BASE_LO 0x20d8
+#define regCP_DB_BASE_LO_BASE_IDX 1
+#define regCP_DB_BASE_HI 0x20d9
+#define regCP_DB_BASE_HI_BASE_IDX 1
+#define regCP_DB_BUFSZ 0x20da
+#define regCP_DB_BUFSZ_BASE_IDX 1
+#define regCP_DB_CMD_BUFSZ 0x20db
+#define regCP_DB_CMD_BUFSZ_BASE_IDX 1
+#define regCP_PFP_COMPLETION_STATUS 0x20ec
+#define regCP_PFP_COMPLETION_STATUS_BASE_IDX 1
+#define regCP_PRED_NOT_VISIBLE 0x20ee
+#define regCP_PRED_NOT_VISIBLE_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR 0x20f0
+#define regCP_PFP_METADATA_BASE_ADDR_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR_HI 0x20f1
+#define regCP_PFP_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR 0x20f4
+#define regCP_DRAW_INDX_INDR_ADDR_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR_HI 0x20f5
+#define regCP_DRAW_INDX_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR 0x20f6
+#define regCP_DISPATCH_INDR_ADDR_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR_HI 0x20f7
+#define regCP_DISPATCH_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR 0x20f8
+#define regCP_INDEX_BASE_ADDR_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR_HI 0x20f9
+#define regCP_INDEX_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_TYPE 0x20fa
+#define regCP_INDEX_TYPE_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR 0x20fb
+#define regCP_GDS_BKUP_ADDR_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR_HI 0x20fc
+#define regCP_GDS_BKUP_ADDR_HI_BASE_IDX 1
+#define regCP_SAMPLE_STATUS 0x20fd
+#define regCP_SAMPLE_STATUS_BASE_IDX 1
+#define regCP_ME_COHER_CNTL 0x20fe
+#define regCP_ME_COHER_CNTL_BASE_IDX 1
+#define regCP_ME_COHER_SIZE 0x20ff
+#define regCP_ME_COHER_SIZE_BASE_IDX 1
+#define regCP_ME_COHER_SIZE_HI 0x2100
+#define regCP_ME_COHER_SIZE_HI_BASE_IDX 1
+#define regCP_ME_COHER_BASE 0x2101
+#define regCP_ME_COHER_BASE_BASE_IDX 1
+#define regCP_ME_COHER_BASE_HI 0x2102
+#define regCP_ME_COHER_BASE_HI_BASE_IDX 1
+#define regCP_ME_COHER_STATUS 0x2103
+#define regCP_ME_COHER_STATUS_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_0 0x2140
+#define regRLC_GPM_PERF_COUNT_0_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_1 0x2141
+#define regRLC_GPM_PERF_COUNT_1_BASE_IDX 1
+#define regGRBM_GFX_INDEX 0x2200
+#define regGRBM_GFX_INDEX_BASE_IDX 1
+#define regVGT_PRIMITIVE_TYPE 0x2242
+#define regVGT_PRIMITIVE_TYPE_BASE_IDX 1
+#define regVGT_INDEX_TYPE 0x2243
+#define regVGT_INDEX_TYPE_BASE_IDX 1
+#define regGE_MIN_VTX_INDX 0x2249
+#define regGE_MIN_VTX_INDX_BASE_IDX 1
+#define regGE_INDX_OFFSET 0x224a
+#define regGE_INDX_OFFSET_BASE_IDX 1
+#define regGE_MULTI_PRIM_IB_RESET_EN 0x224b
+#define regGE_MULTI_PRIM_IB_RESET_EN_BASE_IDX 1
+#define regVGT_NUM_INDICES 0x224c
+#define regVGT_NUM_INDICES_BASE_IDX 1
+#define regVGT_NUM_INSTANCES 0x224d
+#define regVGT_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_TF_RING_SIZE 0x224e
+#define regVGT_TF_RING_SIZE_BASE_IDX 1
+#define regVGT_HS_OFFCHIP_PARAM 0x224f
+#define regVGT_HS_OFFCHIP_PARAM_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE 0x2250
+#define regVGT_TF_MEMORY_BASE_BASE_IDX 1
+#define regGE_MAX_VTX_INDX 0x2259
+#define regGE_MAX_VTX_INDX_BASE_IDX 1
+#define regVGT_INSTANCE_BASE_ID 0x225a
+#define regVGT_INSTANCE_BASE_ID_BASE_IDX 1
+#define regGE_CNTL 0x225b
+#define regGE_CNTL_BASE_IDX 1
+#define regGE_USER_VGPR1 0x225c
+#define regGE_USER_VGPR1_BASE_IDX 1
+#define regGE_USER_VGPR2 0x225d
+#define regGE_USER_VGPR2_BASE_IDX 1
+#define regGE_USER_VGPR3 0x225e
+#define regGE_USER_VGPR3_BASE_IDX 1
+#define regGE_STEREO_CNTL 0x225f
+#define regGE_STEREO_CNTL_BASE_IDX 1
+#define regGE_PC_ALLOC 0x2260
+#define regGE_PC_ALLOC_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE_HI 0x2261
+#define regVGT_TF_MEMORY_BASE_HI_BASE_IDX 1
+#define regGE_USER_VGPR_EN 0x2262
+#define regGE_USER_VGPR_EN_BASE_IDX 1
+#define regGE_GS_FAST_LAUNCH_WG_DIM 0x2264
+#define regGE_GS_FAST_LAUNCH_WG_DIM_BASE_IDX 1
+#define regGE_GS_FAST_LAUNCH_WG_DIM_1 0x2265
+#define regGE_GS_FAST_LAUNCH_WG_DIM_1_BASE_IDX 1
+#define regVGT_GS_OUT_PRIM_TYPE 0x2266
+#define regVGT_GS_OUT_PRIM_TYPE_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_VALUE 0x2280
+#define regPA_SU_LINE_STIPPLE_VALUE_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE_STATE 0x2281
+#define regPA_SC_LINE_STIPPLE_STATE_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_0 0x2284
+#define regPA_SC_SCREEN_EXTENT_MIN_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_0 0x2285
+#define regPA_SC_SCREEN_EXTENT_MAX_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_1 0x2286
+#define regPA_SC_SCREEN_EXTENT_MIN_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_1 0x228b
+#define regPA_SC_SCREEN_EXTENT_MAX_1_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN 0x22a0
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_H 0x22a1
+#define regPA_SC_P3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_V 0x22a2
+#define regPA_SC_P3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE 0x22a3
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT 0x22a4
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN 0x22a8
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_H 0x22a9
+#define regPA_SC_HP3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_V 0x22aa
+#define regPA_SC_HP3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE 0x22ab
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT 0x22ac
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_EN 0x22b0
+#define regPA_SC_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_H 0x22b1
+#define regPA_SC_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_V 0x22b2
+#define regPA_SC_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE 0x22b3
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_COUNT 0x22b4
+#define regPA_SC_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_0 0x2340
+#define regSQ_THREAD_TRACE_USERDATA_0_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_1 0x2341
+#define regSQ_THREAD_TRACE_USERDATA_1_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_2 0x2342
+#define regSQ_THREAD_TRACE_USERDATA_2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_3 0x2343
+#define regSQ_THREAD_TRACE_USERDATA_3_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_4 0x2344
+#define regSQ_THREAD_TRACE_USERDATA_4_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_5 0x2345
+#define regSQ_THREAD_TRACE_USERDATA_5_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_6 0x2346
+#define regSQ_THREAD_TRACE_USERDATA_6_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_7 0x2347
+#define regSQ_THREAD_TRACE_USERDATA_7_BASE_IDX 1
+#define regSQC_CACHES 0x2348
+#define regSQC_CACHES_BASE_IDX 1
+#define regTA_CS_BC_BASE_ADDR 0x2380
+#define regTA_CS_BC_BASE_ADDR_BASE_IDX 1
+#define regTA_CS_BC_BASE_ADDR_HI 0x2381
+#define regTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_LOW 0x23c0
+#define regDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_HI 0x23c1
+#define regDB_OCCLUSION_COUNT0_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_LOW 0x23c2
+#define regDB_OCCLUSION_COUNT1_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_HI 0x23c3
+#define regDB_OCCLUSION_COUNT1_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_LOW 0x23c4
+#define regDB_OCCLUSION_COUNT2_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_HI 0x23c5
+#define regDB_OCCLUSION_COUNT2_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_LOW 0x23c6
+#define regDB_OCCLUSION_COUNT3_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_HI 0x23c7
+#define regDB_OCCLUSION_COUNT3_HI_BASE_IDX 1
+#define regGDS_RD_ADDR 0x2400
+#define regGDS_RD_ADDR_BASE_IDX 1
+#define regGDS_RD_DATA 0x2401
+#define regGDS_RD_DATA_BASE_IDX 1
+#define regGDS_RD_BURST_ADDR 0x2402
+#define regGDS_RD_BURST_ADDR_BASE_IDX 1
+#define regGDS_RD_BURST_COUNT 0x2403
+#define regGDS_RD_BURST_COUNT_BASE_IDX 1
+#define regGDS_RD_BURST_DATA 0x2404
+#define regGDS_RD_BURST_DATA_BASE_IDX 1
+#define regGDS_WR_ADDR 0x2405
+#define regGDS_WR_ADDR_BASE_IDX 1
+#define regGDS_WR_DATA 0x2406
+#define regGDS_WR_DATA_BASE_IDX 1
+#define regGDS_WR_BURST_ADDR 0x2407
+#define regGDS_WR_BURST_ADDR_BASE_IDX 1
+#define regGDS_WR_BURST_DATA 0x2408
+#define regGDS_WR_BURST_DATA_BASE_IDX 1
+#define regGDS_WRITE_COMPLETE 0x2409
+#define regGDS_WRITE_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_CNTL 0x240a
+#define regGDS_ATOM_CNTL_BASE_IDX 1
+#define regGDS_ATOM_COMPLETE 0x240b
+#define regGDS_ATOM_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_BASE 0x240c
+#define regGDS_ATOM_BASE_BASE_IDX 1
+#define regGDS_ATOM_SIZE 0x240d
+#define regGDS_ATOM_SIZE_BASE_IDX 1
+#define regGDS_ATOM_OFFSET0 0x240e
+#define regGDS_ATOM_OFFSET0_BASE_IDX 1
+#define regGDS_ATOM_OFFSET1 0x240f
+#define regGDS_ATOM_OFFSET1_BASE_IDX 1
+#define regGDS_ATOM_DST 0x2410
+#define regGDS_ATOM_DST_BASE_IDX 1
+#define regGDS_ATOM_OP 0x2411
+#define regGDS_ATOM_OP_BASE_IDX 1
+#define regGDS_ATOM_SRC0 0x2412
+#define regGDS_ATOM_SRC0_BASE_IDX 1
+#define regGDS_ATOM_SRC0_U 0x2413
+#define regGDS_ATOM_SRC0_U_BASE_IDX 1
+#define regGDS_ATOM_SRC1 0x2414
+#define regGDS_ATOM_SRC1_BASE_IDX 1
+#define regGDS_ATOM_SRC1_U 0x2415
+#define regGDS_ATOM_SRC1_U_BASE_IDX 1
+#define regGDS_ATOM_READ0 0x2416
+#define regGDS_ATOM_READ0_BASE_IDX 1
+#define regGDS_ATOM_READ0_U 0x2417
+#define regGDS_ATOM_READ0_U_BASE_IDX 1
+#define regGDS_ATOM_READ1 0x2418
+#define regGDS_ATOM_READ1_BASE_IDX 1
+#define regGDS_ATOM_READ1_U 0x2419
+#define regGDS_ATOM_READ1_U_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNTL 0x241a
+#define regGDS_GWS_RESOURCE_CNTL_BASE_IDX 1
+#define regGDS_GWS_RESOURCE 0x241b
+#define regGDS_GWS_RESOURCE_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNT 0x241c
+#define regGDS_GWS_RESOURCE_CNT_BASE_IDX 1
+#define regGDS_OA_CNTL 0x241d
+#define regGDS_OA_CNTL_BASE_IDX 1
+#define regGDS_OA_COUNTER 0x241e
+#define regGDS_OA_COUNTER_BASE_IDX 1
+#define regGDS_OA_ADDRESS 0x241f
+#define regGDS_OA_ADDRESS_BASE_IDX 1
+#define regGDS_OA_INCDEC 0x2420
+#define regGDS_OA_INCDEC_BASE_IDX 1
+#define regGDS_OA_RING_SIZE 0x2421
+#define regGDS_OA_RING_SIZE_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_0 0x2422
+#define regGDS_STRMOUT_DWORDS_WRITTEN_0_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_1 0x2423
+#define regGDS_STRMOUT_DWORDS_WRITTEN_1_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_2 0x2424
+#define regGDS_STRMOUT_DWORDS_WRITTEN_2_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_3 0x2425
+#define regGDS_STRMOUT_DWORDS_WRITTEN_3_BASE_IDX 1
+#define regGDS_GS_0 0x2426
+#define regGDS_GS_0_BASE_IDX 1
+#define regGDS_GS_1 0x2427
+#define regGDS_GS_1_BASE_IDX 1
+#define regGDS_GS_2 0x2428
+#define regGDS_GS_2_BASE_IDX 1
+#define regGDS_GS_3 0x2429
+#define regGDS_GS_3_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_LO 0x242a
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_HI 0x242b
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_LO 0x242c
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_HI 0x242d
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_LO 0x242e
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_HI 0x242f
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_LO 0x2430
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_HI 0x2431
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_LO 0x2432
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_HI 0x2433
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_LO 0x2434
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_HI 0x2435
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_LO 0x2436
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_HI 0x2437
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_LO 0x2438
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_HI 0x2439
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_HI_BASE_IDX 1
+#define regSPI_CONFIG_CNTL 0x2440
+#define regSPI_CONFIG_CNTL_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_1 0x2441
+#define regSPI_CONFIG_CNTL_1_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_2 0x2442
+#define regSPI_CONFIG_CNTL_2_BASE_IDX 1
+#define regSPI_WAVE_LIMIT_CNTL 0x2443
+#define regSPI_WAVE_LIMIT_CNTL_BASE_IDX 1
+#define regSPI_GS_THROTTLE_CNTL1 0x2444
+#define regSPI_GS_THROTTLE_CNTL1_BASE_IDX 1
+#define regSPI_GS_THROTTLE_CNTL2 0x2445
+#define regSPI_GS_THROTTLE_CNTL2_BASE_IDX 1
+#define regSPI_ATTRIBUTE_RING_BASE 0x2446
+#define regSPI_ATTRIBUTE_RING_BASE_BASE_IDX 1
+#define regSPI_ATTRIBUTE_RING_SIZE 0x2447
+#define regSPI_ATTRIBUTE_RING_SIZE_BASE_IDX 1
+
+
+// addressBlock: gc_cprs64dec
+// base address: 0x32000
+#define regCP_MES_PRGRM_CNTR_START 0x2800
+#define regCP_MES_PRGRM_CNTR_START_BASE_IDX 1
+#define regCP_MES_INTR_ROUTINE_START 0x2801
+#define regCP_MES_INTR_ROUTINE_START_BASE_IDX 1
+#define regCP_MES_MTVEC_LO 0x2801
+#define regCP_MES_MTVEC_LO_BASE_IDX 1
+#define regCP_MES_INTR_ROUTINE_START_HI 0x2802
+#define regCP_MES_INTR_ROUTINE_START_HI_BASE_IDX 1
+#define regCP_MES_MTVEC_HI 0x2802
+#define regCP_MES_MTVEC_HI_BASE_IDX 1
+#define regCP_MES_CNTL 0x2807
+#define regCP_MES_CNTL_BASE_IDX 1
+#define regCP_MES_PIPE_PRIORITY_CNTS 0x2808
+#define regCP_MES_PIPE_PRIORITY_CNTS_BASE_IDX 1
+#define regCP_MES_PIPE0_PRIORITY 0x2809
+#define regCP_MES_PIPE0_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE1_PRIORITY 0x280a
+#define regCP_MES_PIPE1_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE2_PRIORITY 0x280b
+#define regCP_MES_PIPE2_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE3_PRIORITY 0x280c
+#define regCP_MES_PIPE3_PRIORITY_BASE_IDX 1
+#define regCP_MES_HEADER_DUMP 0x280d
+#define regCP_MES_HEADER_DUMP_BASE_IDX 1
+#define regCP_MES_MIE_LO 0x280e
+#define regCP_MES_MIE_LO_BASE_IDX 1
+#define regCP_MES_MIE_HI 0x280f
+#define regCP_MES_MIE_HI_BASE_IDX 1
+#define regCP_MES_INTERRUPT 0x2810
+#define regCP_MES_INTERRUPT_BASE_IDX 1
+#define regCP_MES_SCRATCH_INDEX 0x2811
+#define regCP_MES_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_MES_SCRATCH_DATA 0x2812
+#define regCP_MES_SCRATCH_DATA_BASE_IDX 1
+#define regCP_MES_INSTR_PNTR 0x2813
+#define regCP_MES_INSTR_PNTR_BASE_IDX 1
+#define regCP_MES_MSCRATCH_HI 0x2814
+#define regCP_MES_MSCRATCH_HI_BASE_IDX 1
+#define regCP_MES_MSCRATCH_LO 0x2815
+#define regCP_MES_MSCRATCH_LO_BASE_IDX 1
+#define regCP_MES_MSTATUS_LO 0x2816
+#define regCP_MES_MSTATUS_LO_BASE_IDX 1
+#define regCP_MES_MSTATUS_HI 0x2817
+#define regCP_MES_MSTATUS_HI_BASE_IDX 1
+#define regCP_MES_MEPC_LO 0x2818
+#define regCP_MES_MEPC_LO_BASE_IDX 1
+#define regCP_MES_MEPC_HI 0x2819
+#define regCP_MES_MEPC_HI_BASE_IDX 1
+#define regCP_MES_MCAUSE_LO 0x281a
+#define regCP_MES_MCAUSE_LO_BASE_IDX 1
+#define regCP_MES_MCAUSE_HI 0x281b
+#define regCP_MES_MCAUSE_HI_BASE_IDX 1
+#define regCP_MES_MBADADDR_LO 0x281c
+#define regCP_MES_MBADADDR_LO_BASE_IDX 1
+#define regCP_MES_MBADADDR_HI 0x281d
+#define regCP_MES_MBADADDR_HI_BASE_IDX 1
+#define regCP_MES_MIP_LO 0x281e
+#define regCP_MES_MIP_LO_BASE_IDX 1
+#define regCP_MES_MIP_HI 0x281f
+#define regCP_MES_MIP_HI_BASE_IDX 1
+#define regCP_MES_IC_OP_CNTL 0x2820
+#define regCP_MES_IC_OP_CNTL_BASE_IDX 1
+#define regCP_MES_MCYCLE_LO 0x2826
+#define regCP_MES_MCYCLE_LO_BASE_IDX 1
+#define regCP_MES_MCYCLE_HI 0x2827
+#define regCP_MES_MCYCLE_HI_BASE_IDX 1
+#define regCP_MES_MTIME_LO 0x2828
+#define regCP_MES_MTIME_LO_BASE_IDX 1
+#define regCP_MES_MTIME_HI 0x2829
+#define regCP_MES_MTIME_HI_BASE_IDX 1
+#define regCP_MES_MINSTRET_LO 0x282a
+#define regCP_MES_MINSTRET_LO_BASE_IDX 1
+#define regCP_MES_MINSTRET_HI 0x282b
+#define regCP_MES_MINSTRET_HI_BASE_IDX 1
+#define regCP_MES_MISA_LO 0x282c
+#define regCP_MES_MISA_LO_BASE_IDX 1
+#define regCP_MES_MISA_HI 0x282d
+#define regCP_MES_MISA_HI_BASE_IDX 1
+#define regCP_MES_MVENDORID_LO 0x282e
+#define regCP_MES_MVENDORID_LO_BASE_IDX 1
+#define regCP_MES_MVENDORID_HI 0x282f
+#define regCP_MES_MVENDORID_HI_BASE_IDX 1
+#define regCP_MES_MARCHID_LO 0x2830
+#define regCP_MES_MARCHID_LO_BASE_IDX 1
+#define regCP_MES_MARCHID_HI 0x2831
+#define regCP_MES_MARCHID_HI_BASE_IDX 1
+#define regCP_MES_MIMPID_LO 0x2832
+#define regCP_MES_MIMPID_LO_BASE_IDX 1
+#define regCP_MES_MIMPID_HI 0x2833
+#define regCP_MES_MIMPID_HI_BASE_IDX 1
+#define regCP_MES_MHARTID_LO 0x2834
+#define regCP_MES_MHARTID_LO_BASE_IDX 1
+#define regCP_MES_MHARTID_HI 0x2835
+#define regCP_MES_MHARTID_HI_BASE_IDX 1
+#define regCP_MES_DC_BASE_CNTL 0x2836
+#define regCP_MES_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_DC_OP_CNTL 0x2837
+#define regCP_MES_DC_OP_CNTL_BASE_IDX 1
+#define regCP_MES_MTIMECMP_LO 0x2838
+#define regCP_MES_MTIMECMP_LO_BASE_IDX 1
+#define regCP_MES_MTIMECMP_HI 0x2839
+#define regCP_MES_MTIMECMP_HI_BASE_IDX 1
+#define regCP_MES_PROCESS_QUANTUM_PIPE0 0x283a
+#define regCP_MES_PROCESS_QUANTUM_PIPE0_BASE_IDX 1
+#define regCP_MES_PROCESS_QUANTUM_PIPE1 0x283b
+#define regCP_MES_PROCESS_QUANTUM_PIPE1_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL1 0x283c
+#define regCP_MES_DOORBELL_CONTROL1_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL2 0x283d
+#define regCP_MES_DOORBELL_CONTROL2_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL3 0x283e
+#define regCP_MES_DOORBELL_CONTROL3_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL4 0x283f
+#define regCP_MES_DOORBELL_CONTROL4_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL5 0x2840
+#define regCP_MES_DOORBELL_CONTROL5_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL6 0x2841
+#define regCP_MES_DOORBELL_CONTROL6_BASE_IDX 1
+#define regCP_MES_GP0_LO 0x2843
+#define regCP_MES_GP0_LO_BASE_IDX 1
+#define regCP_MES_GP0_HI 0x2844
+#define regCP_MES_GP0_HI_BASE_IDX 1
+#define regCP_MES_GP1_LO 0x2845
+#define regCP_MES_GP1_LO_BASE_IDX 1
+#define regCP_MES_GP1_HI 0x2846
+#define regCP_MES_GP1_HI_BASE_IDX 1
+#define regCP_MES_GP2_LO 0x2847
+#define regCP_MES_GP2_LO_BASE_IDX 1
+#define regCP_MES_GP2_HI 0x2848
+#define regCP_MES_GP2_HI_BASE_IDX 1
+#define regCP_MES_GP3_LO 0x2849
+#define regCP_MES_GP3_LO_BASE_IDX 1
+#define regCP_MES_GP3_HI 0x284a
+#define regCP_MES_GP3_HI_BASE_IDX 1
+#define regCP_MES_GP4_LO 0x284b
+#define regCP_MES_GP4_LO_BASE_IDX 1
+#define regCP_MES_GP4_HI 0x284c
+#define regCP_MES_GP4_HI_BASE_IDX 1
+#define regCP_MES_GP5_LO 0x284d
+#define regCP_MES_GP5_LO_BASE_IDX 1
+#define regCP_MES_GP5_HI 0x284e
+#define regCP_MES_GP5_HI_BASE_IDX 1
+#define regCP_MES_GP6_LO 0x284f
+#define regCP_MES_GP6_LO_BASE_IDX 1
+#define regCP_MES_GP6_HI 0x2850
+#define regCP_MES_GP6_HI_BASE_IDX 1
+#define regCP_MES_GP7_LO 0x2851
+#define regCP_MES_GP7_LO_BASE_IDX 1
+#define regCP_MES_GP7_HI 0x2852
+#define regCP_MES_GP7_HI_BASE_IDX 1
+#define regCP_MES_GP8_LO 0x2853
+#define regCP_MES_GP8_LO_BASE_IDX 1
+#define regCP_MES_GP8_HI 0x2854
+#define regCP_MES_GP8_HI_BASE_IDX 1
+#define regCP_MES_GP9_LO 0x2855
+#define regCP_MES_GP9_LO_BASE_IDX 1
+#define regCP_MES_GP9_HI 0x2856
+#define regCP_MES_GP9_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_BASE0_LO 0x2883
+#define regCP_MES_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_BASE0_HI 0x2884
+#define regCP_MES_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_MASK0_LO 0x2885
+#define regCP_MES_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_MASK0_HI 0x2886
+#define regCP_MES_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_APERTURE 0x2887
+#define regCP_MES_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_BASE_LO 0x2888
+#define regCP_MES_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_BASE_HI 0x2889
+#define regCP_MES_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_MASK_LO 0x288a
+#define regCP_MES_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_MASK_HI 0x288b
+#define regCP_MES_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_APERTURE 0x288c
+#define regCP_MES_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_APERTURE 0x288d
+#define regCP_MES_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_BASE_LO 0x288e
+#define regCP_MES_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_BASE_HI 0x288f
+#define regCP_MES_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_MES_PERFCOUNT_CNTL 0x2899
+#define regCP_MES_PERFCOUNT_CNTL_BASE_IDX 1
+#define regCP_MES_PENDING_INTERRUPT 0x289a
+#define regCP_MES_PENDING_INTERRUPT_BASE_IDX 1
+#define regCP_MES_PRGRM_CNTR_START_HI 0x289d
+#define regCP_MES_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_16 0x289f
+#define regCP_MES_INTERRUPT_DATA_16_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_17 0x28a0
+#define regCP_MES_INTERRUPT_DATA_17_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_18 0x28a1
+#define regCP_MES_INTERRUPT_DATA_18_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_19 0x28a2
+#define regCP_MES_INTERRUPT_DATA_19_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_20 0x28a3
+#define regCP_MES_INTERRUPT_DATA_20_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_21 0x28a4
+#define regCP_MES_INTERRUPT_DATA_21_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_22 0x28a5
+#define regCP_MES_INTERRUPT_DATA_22_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_23 0x28a6
+#define regCP_MES_INTERRUPT_DATA_23_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_24 0x28a7
+#define regCP_MES_INTERRUPT_DATA_24_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_25 0x28a8
+#define regCP_MES_INTERRUPT_DATA_25_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_26 0x28a9
+#define regCP_MES_INTERRUPT_DATA_26_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_27 0x28aa
+#define regCP_MES_INTERRUPT_DATA_27_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_28 0x28ab
+#define regCP_MES_INTERRUPT_DATA_28_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_29 0x28ac
+#define regCP_MES_INTERRUPT_DATA_29_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_30 0x28ad
+#define regCP_MES_INTERRUPT_DATA_30_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_31 0x28ae
+#define regCP_MES_INTERRUPT_DATA_31_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_BASE 0x28af
+#define regCP_MES_DC_APERTURE0_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_MASK 0x28b0
+#define regCP_MES_DC_APERTURE0_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_CNTL 0x28b1
+#define regCP_MES_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_BASE 0x28b2
+#define regCP_MES_DC_APERTURE1_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_MASK 0x28b3
+#define regCP_MES_DC_APERTURE1_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_CNTL 0x28b4
+#define regCP_MES_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_BASE 0x28b5
+#define regCP_MES_DC_APERTURE2_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_MASK 0x28b6
+#define regCP_MES_DC_APERTURE2_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_CNTL 0x28b7
+#define regCP_MES_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_BASE 0x28b8
+#define regCP_MES_DC_APERTURE3_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_MASK 0x28b9
+#define regCP_MES_DC_APERTURE3_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_CNTL 0x28ba
+#define regCP_MES_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_BASE 0x28bb
+#define regCP_MES_DC_APERTURE4_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_MASK 0x28bc
+#define regCP_MES_DC_APERTURE4_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_CNTL 0x28bd
+#define regCP_MES_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_BASE 0x28be
+#define regCP_MES_DC_APERTURE5_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_MASK 0x28bf
+#define regCP_MES_DC_APERTURE5_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_CNTL 0x28c0
+#define regCP_MES_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_BASE 0x28c1
+#define regCP_MES_DC_APERTURE6_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_MASK 0x28c2
+#define regCP_MES_DC_APERTURE6_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_CNTL 0x28c3
+#define regCP_MES_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_BASE 0x28c4
+#define regCP_MES_DC_APERTURE7_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_MASK 0x28c5
+#define regCP_MES_DC_APERTURE7_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_CNTL 0x28c6
+#define regCP_MES_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_BASE 0x28c7
+#define regCP_MES_DC_APERTURE8_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_MASK 0x28c8
+#define regCP_MES_DC_APERTURE8_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_CNTL 0x28c9
+#define regCP_MES_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_BASE 0x28ca
+#define regCP_MES_DC_APERTURE9_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_MASK 0x28cb
+#define regCP_MES_DC_APERTURE9_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_CNTL 0x28cc
+#define regCP_MES_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_BASE 0x28cd
+#define regCP_MES_DC_APERTURE10_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_MASK 0x28ce
+#define regCP_MES_DC_APERTURE10_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_CNTL 0x28cf
+#define regCP_MES_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_BASE 0x28d0
+#define regCP_MES_DC_APERTURE11_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_MASK 0x28d1
+#define regCP_MES_DC_APERTURE11_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_CNTL 0x28d2
+#define regCP_MES_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_BASE 0x28d3
+#define regCP_MES_DC_APERTURE12_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_MASK 0x28d4
+#define regCP_MES_DC_APERTURE12_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_CNTL 0x28d5
+#define regCP_MES_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_BASE 0x28d6
+#define regCP_MES_DC_APERTURE13_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_MASK 0x28d7
+#define regCP_MES_DC_APERTURE13_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_CNTL 0x28d8
+#define regCP_MES_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_BASE 0x28d9
+#define regCP_MES_DC_APERTURE14_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_MASK 0x28da
+#define regCP_MES_DC_APERTURE14_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_CNTL 0x28db
+#define regCP_MES_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_BASE 0x28dc
+#define regCP_MES_DC_APERTURE15_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_MASK 0x28dd
+#define regCP_MES_DC_APERTURE15_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_CNTL 0x28de
+#define regCP_MES_DC_APERTURE15_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_PRGRM_CNTR_START 0x2900
+#define regCP_MEC_RS64_PRGRM_CNTR_START_BASE_IDX 1
+#define regCP_MEC_MTVEC_LO 0x2901
+#define regCP_MEC_MTVEC_LO_BASE_IDX 1
+#define regCP_MEC_MTVEC_HI 0x2902
+#define regCP_MEC_MTVEC_HI_BASE_IDX 1
+#define regCP_MEC_ISA_CNTL 0x2903
+#define regCP_MEC_ISA_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_CNTL 0x2904
+#define regCP_MEC_RS64_CNTL_BASE_IDX 1
+#define regCP_MEC_MIE_LO 0x2905
+#define regCP_MEC_MIE_LO_BASE_IDX 1
+#define regCP_MEC_MIE_HI 0x2906
+#define regCP_MEC_MIE_HI_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT 0x2907
+#define regCP_MEC_RS64_INTERRUPT_BASE_IDX 1
+#define regCP_MEC_RS64_INSTR_PNTR 0x2908
+#define regCP_MEC_RS64_INSTR_PNTR_BASE_IDX 1
+#define regCP_MEC_MIP_LO 0x2909
+#define regCP_MEC_MIP_LO_BASE_IDX 1
+#define regCP_MEC_MIP_HI 0x290a
+#define regCP_MEC_MIP_HI_BASE_IDX 1
+#define regCP_MEC_DC_BASE_CNTL 0x290b
+#define regCP_MEC_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_OP_CNTL 0x290c
+#define regCP_MEC_DC_OP_CNTL_BASE_IDX 1
+#define regCP_MEC_MTIMECMP_LO 0x290d
+#define regCP_MEC_MTIMECMP_LO_BASE_IDX 1
+#define regCP_MEC_MTIMECMP_HI 0x290e
+#define regCP_MEC_MTIMECMP_HI_BASE_IDX 1
+#define regCP_MEC_GP0_LO 0x2910
+#define regCP_MEC_GP0_LO_BASE_IDX 1
+#define regCP_MEC_GP0_HI 0x2911
+#define regCP_MEC_GP0_HI_BASE_IDX 1
+#define regCP_MEC_GP1_LO 0x2912
+#define regCP_MEC_GP1_LO_BASE_IDX 1
+#define regCP_MEC_GP1_HI 0x2913
+#define regCP_MEC_GP1_HI_BASE_IDX 1
+#define regCP_MEC_GP2_LO 0x2914
+#define regCP_MEC_GP2_LO_BASE_IDX 1
+#define regCP_MEC_GP2_HI 0x2915
+#define regCP_MEC_GP2_HI_BASE_IDX 1
+#define regCP_MEC_GP3_LO 0x2916
+#define regCP_MEC_GP3_LO_BASE_IDX 1
+#define regCP_MEC_GP3_HI 0x2917
+#define regCP_MEC_GP3_HI_BASE_IDX 1
+#define regCP_MEC_GP4_LO 0x2918
+#define regCP_MEC_GP4_LO_BASE_IDX 1
+#define regCP_MEC_GP4_HI 0x2919
+#define regCP_MEC_GP4_HI_BASE_IDX 1
+#define regCP_MEC_GP5_LO 0x291a
+#define regCP_MEC_GP5_LO_BASE_IDX 1
+#define regCP_MEC_GP5_HI 0x291b
+#define regCP_MEC_GP5_HI_BASE_IDX 1
+#define regCP_MEC_GP6_LO 0x291c
+#define regCP_MEC_GP6_LO_BASE_IDX 1
+#define regCP_MEC_GP6_HI 0x291d
+#define regCP_MEC_GP6_HI_BASE_IDX 1
+#define regCP_MEC_GP7_LO 0x291e
+#define regCP_MEC_GP7_LO_BASE_IDX 1
+#define regCP_MEC_GP7_HI 0x291f
+#define regCP_MEC_GP7_HI_BASE_IDX 1
+#define regCP_MEC_GP8_LO 0x2920
+#define regCP_MEC_GP8_LO_BASE_IDX 1
+#define regCP_MEC_GP8_HI 0x2921
+#define regCP_MEC_GP8_HI_BASE_IDX 1
+#define regCP_MEC_GP9_LO 0x2922
+#define regCP_MEC_GP9_LO_BASE_IDX 1
+#define regCP_MEC_GP9_HI 0x2923
+#define regCP_MEC_GP9_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_BASE0_LO 0x2927
+#define regCP_MEC_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_BASE0_HI 0x2928
+#define regCP_MEC_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_MASK0_LO 0x2929
+#define regCP_MEC_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_MASK0_HI 0x292a
+#define regCP_MEC_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_APERTURE 0x292b
+#define regCP_MEC_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_BASE_LO 0x292c
+#define regCP_MEC_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_BASE_HI 0x292d
+#define regCP_MEC_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_MASK_LO 0x292e
+#define regCP_MEC_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_MASK_HI 0x292f
+#define regCP_MEC_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_APERTURE 0x2930
+#define regCP_MEC_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_APERTURE 0x2931
+#define regCP_MEC_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_BASE_LO 0x2932
+#define regCP_MEC_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_BASE_HI 0x2933
+#define regCP_MEC_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_MEC_RS64_PERFCOUNT_CNTL 0x2934
+#define regCP_MEC_RS64_PERFCOUNT_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_PENDING_INTERRUPT 0x2935
+#define regCP_MEC_RS64_PENDING_INTERRUPT_BASE_IDX 1
+#define regCP_MEC_RS64_PRGRM_CNTR_START_HI 0x2938
+#define regCP_MEC_RS64_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_16 0x293a
+#define regCP_MEC_RS64_INTERRUPT_DATA_16_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_17 0x293b
+#define regCP_MEC_RS64_INTERRUPT_DATA_17_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_18 0x293c
+#define regCP_MEC_RS64_INTERRUPT_DATA_18_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_19 0x293d
+#define regCP_MEC_RS64_INTERRUPT_DATA_19_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_20 0x293e
+#define regCP_MEC_RS64_INTERRUPT_DATA_20_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_21 0x293f
+#define regCP_MEC_RS64_INTERRUPT_DATA_21_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_22 0x2940
+#define regCP_MEC_RS64_INTERRUPT_DATA_22_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_23 0x2941
+#define regCP_MEC_RS64_INTERRUPT_DATA_23_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_24 0x2942
+#define regCP_MEC_RS64_INTERRUPT_DATA_24_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_25 0x2943
+#define regCP_MEC_RS64_INTERRUPT_DATA_25_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_26 0x2944
+#define regCP_MEC_RS64_INTERRUPT_DATA_26_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_27 0x2945
+#define regCP_MEC_RS64_INTERRUPT_DATA_27_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_28 0x2946
+#define regCP_MEC_RS64_INTERRUPT_DATA_28_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_29 0x2947
+#define regCP_MEC_RS64_INTERRUPT_DATA_29_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_30 0x2948
+#define regCP_MEC_RS64_INTERRUPT_DATA_30_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_31 0x2949
+#define regCP_MEC_RS64_INTERRUPT_DATA_31_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_BASE 0x294a
+#define regCP_MEC_DC_APERTURE0_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_MASK 0x294b
+#define regCP_MEC_DC_APERTURE0_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_CNTL 0x294c
+#define regCP_MEC_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_BASE 0x294d
+#define regCP_MEC_DC_APERTURE1_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_MASK 0x294e
+#define regCP_MEC_DC_APERTURE1_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_CNTL 0x294f
+#define regCP_MEC_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_BASE 0x2950
+#define regCP_MEC_DC_APERTURE2_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_MASK 0x2951
+#define regCP_MEC_DC_APERTURE2_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_CNTL 0x2952
+#define regCP_MEC_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_BASE 0x2953
+#define regCP_MEC_DC_APERTURE3_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_MASK 0x2954
+#define regCP_MEC_DC_APERTURE3_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_CNTL 0x2955
+#define regCP_MEC_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_BASE 0x2956
+#define regCP_MEC_DC_APERTURE4_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_MASK 0x2957
+#define regCP_MEC_DC_APERTURE4_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_CNTL 0x2958
+#define regCP_MEC_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_BASE 0x2959
+#define regCP_MEC_DC_APERTURE5_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_MASK 0x295a
+#define regCP_MEC_DC_APERTURE5_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_CNTL 0x295b
+#define regCP_MEC_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_BASE 0x295c
+#define regCP_MEC_DC_APERTURE6_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_MASK 0x295d
+#define regCP_MEC_DC_APERTURE6_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_CNTL 0x295e
+#define regCP_MEC_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_BASE 0x295f
+#define regCP_MEC_DC_APERTURE7_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_MASK 0x2960
+#define regCP_MEC_DC_APERTURE7_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_CNTL 0x2961
+#define regCP_MEC_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_BASE 0x2962
+#define regCP_MEC_DC_APERTURE8_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_MASK 0x2963
+#define regCP_MEC_DC_APERTURE8_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_CNTL 0x2964
+#define regCP_MEC_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_BASE 0x2965
+#define regCP_MEC_DC_APERTURE9_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_MASK 0x2966
+#define regCP_MEC_DC_APERTURE9_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_CNTL 0x2967
+#define regCP_MEC_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_BASE 0x2968
+#define regCP_MEC_DC_APERTURE10_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_MASK 0x2969
+#define regCP_MEC_DC_APERTURE10_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_CNTL 0x296a
+#define regCP_MEC_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_BASE 0x296b
+#define regCP_MEC_DC_APERTURE11_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_MASK 0x296c
+#define regCP_MEC_DC_APERTURE11_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_CNTL 0x296d
+#define regCP_MEC_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_BASE 0x296e
+#define regCP_MEC_DC_APERTURE12_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_MASK 0x296f
+#define regCP_MEC_DC_APERTURE12_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_CNTL 0x2970
+#define regCP_MEC_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_BASE 0x2971
+#define regCP_MEC_DC_APERTURE13_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_MASK 0x2972
+#define regCP_MEC_DC_APERTURE13_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_CNTL 0x2973
+#define regCP_MEC_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_BASE 0x2974
+#define regCP_MEC_DC_APERTURE14_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_MASK 0x2975
+#define regCP_MEC_DC_APERTURE14_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_CNTL 0x2976
+#define regCP_MEC_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_BASE 0x2977
+#define regCP_MEC_DC_APERTURE15_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_MASK 0x2978
+#define regCP_MEC_DC_APERTURE15_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_CNTL 0x2979
+#define regCP_MEC_DC_APERTURE15_CNTL_BASE_IDX 1
+#define regCP_CPC_IC_OP_CNTL 0x297a
+#define regCP_CPC_IC_OP_CNTL_BASE_IDX 1
+#define regCP_GFX_CNTL 0x2a00
+#define regCP_GFX_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_INTERRUPT0 0x2a01
+#define regCP_GFX_RS64_INTERRUPT0_BASE_IDX 1
+#define regCP_GFX_RS64_INTR_EN0 0x2a02
+#define regCP_GFX_RS64_INTR_EN0_BASE_IDX 1
+#define regCP_GFX_RS64_INTR_EN1 0x2a03
+#define regCP_GFX_RS64_INTR_EN1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE_CNTL 0x2a08
+#define regCP_GFX_RS64_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_DC_OP_CNTL 0x2a09
+#define regCP_GFX_RS64_DC_OP_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_BASE0_LO 0x2a0a
+#define regCP_GFX_RS64_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_BASE0_HI 0x2a0b
+#define regCP_GFX_RS64_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_MASK0_LO 0x2a0c
+#define regCP_GFX_RS64_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_MASK0_HI 0x2a0d
+#define regCP_GFX_RS64_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_APERTURE 0x2a0e
+#define regCP_GFX_RS64_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_LO 0x2a0f
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_HI 0x2a10
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_LO 0x2a11
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_HI 0x2a12
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_APERTURE 0x2a13
+#define regCP_GFX_RS64_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_APERTURE 0x2a14
+#define regCP_GFX_RS64_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_LO 0x2a15
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_HI 0x2a16
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_GFX_RS64_PERFCOUNT_CNTL0 0x2a1a
+#define regCP_GFX_RS64_PERFCOUNT_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_PERFCOUNT_CNTL1 0x2a1b
+#define regCP_GFX_RS64_PERFCOUNT_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_LO0 0x2a1c
+#define regCP_GFX_RS64_MIP_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_LO1 0x2a1d
+#define regCP_GFX_RS64_MIP_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_HI0 0x2a1e
+#define regCP_GFX_RS64_MIP_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_HI1 0x2a1f
+#define regCP_GFX_RS64_MIP_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_LO0 0x2a20
+#define regCP_GFX_RS64_MTIMECMP_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_LO1 0x2a21
+#define regCP_GFX_RS64_MTIMECMP_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_HI0 0x2a22
+#define regCP_GFX_RS64_MTIMECMP_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_HI1 0x2a23
+#define regCP_GFX_RS64_MTIMECMP_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_LO0 0x2a24
+#define regCP_GFX_RS64_GP0_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_LO1 0x2a25
+#define regCP_GFX_RS64_GP0_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_HI0 0x2a26
+#define regCP_GFX_RS64_GP0_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_HI1 0x2a27
+#define regCP_GFX_RS64_GP0_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_LO0 0x2a28
+#define regCP_GFX_RS64_GP1_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_LO1 0x2a29
+#define regCP_GFX_RS64_GP1_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_HI0 0x2a2a
+#define regCP_GFX_RS64_GP1_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_HI1 0x2a2b
+#define regCP_GFX_RS64_GP1_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_LO0 0x2a2c
+#define regCP_GFX_RS64_GP2_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_LO1 0x2a2d
+#define regCP_GFX_RS64_GP2_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_HI0 0x2a2e
+#define regCP_GFX_RS64_GP2_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_HI1 0x2a2f
+#define regCP_GFX_RS64_GP2_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_LO0 0x2a30
+#define regCP_GFX_RS64_GP3_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_LO1 0x2a31
+#define regCP_GFX_RS64_GP3_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_HI0 0x2a32
+#define regCP_GFX_RS64_GP3_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_HI1 0x2a33
+#define regCP_GFX_RS64_GP3_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_LO0 0x2a34
+#define regCP_GFX_RS64_GP4_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_LO1 0x2a35
+#define regCP_GFX_RS64_GP4_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_HI0 0x2a36
+#define regCP_GFX_RS64_GP4_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_HI1 0x2a37
+#define regCP_GFX_RS64_GP4_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_LO0 0x2a38
+#define regCP_GFX_RS64_GP5_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_LO1 0x2a39
+#define regCP_GFX_RS64_GP5_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_HI0 0x2a3a
+#define regCP_GFX_RS64_GP5_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_HI1 0x2a3b
+#define regCP_GFX_RS64_GP5_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP6_LO 0x2a3c
+#define regCP_GFX_RS64_GP6_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP6_HI 0x2a3d
+#define regCP_GFX_RS64_GP6_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP7_LO 0x2a3e
+#define regCP_GFX_RS64_GP7_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP7_HI 0x2a3f
+#define regCP_GFX_RS64_GP7_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP8_LO 0x2a40
+#define regCP_GFX_RS64_GP8_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP8_HI 0x2a41
+#define regCP_GFX_RS64_GP8_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP9_LO 0x2a42
+#define regCP_GFX_RS64_GP9_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP9_HI 0x2a43
+#define regCP_GFX_RS64_GP9_HI_BASE_IDX 1
+#define regCP_GFX_RS64_INSTR_PNTR0 0x2a44
+#define regCP_GFX_RS64_INSTR_PNTR0_BASE_IDX 1
+#define regCP_GFX_RS64_INSTR_PNTR1 0x2a45
+#define regCP_GFX_RS64_INSTR_PNTR1_BASE_IDX 1
+#define regCP_GFX_RS64_PENDING_INTERRUPT0 0x2a46
+#define regCP_GFX_RS64_PENDING_INTERRUPT0_BASE_IDX 1
+#define regCP_GFX_RS64_PENDING_INTERRUPT1 0x2a47
+#define regCP_GFX_RS64_PENDING_INTERRUPT1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_BASE0 0x2a49
+#define regCP_GFX_RS64_DC_APERTURE0_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_MASK0 0x2a4a
+#define regCP_GFX_RS64_DC_APERTURE0_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL0 0x2a4b
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_BASE0 0x2a4c
+#define regCP_GFX_RS64_DC_APERTURE1_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_MASK0 0x2a4d
+#define regCP_GFX_RS64_DC_APERTURE1_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL0 0x2a4e
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_BASE0 0x2a4f
+#define regCP_GFX_RS64_DC_APERTURE2_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_MASK0 0x2a50
+#define regCP_GFX_RS64_DC_APERTURE2_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL0 0x2a51
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_BASE0 0x2a52
+#define regCP_GFX_RS64_DC_APERTURE3_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_MASK0 0x2a53
+#define regCP_GFX_RS64_DC_APERTURE3_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL0 0x2a54
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_BASE0 0x2a55
+#define regCP_GFX_RS64_DC_APERTURE4_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_MASK0 0x2a56
+#define regCP_GFX_RS64_DC_APERTURE4_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL0 0x2a57
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_BASE0 0x2a58
+#define regCP_GFX_RS64_DC_APERTURE5_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_MASK0 0x2a59
+#define regCP_GFX_RS64_DC_APERTURE5_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL0 0x2a5a
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_BASE0 0x2a5b
+#define regCP_GFX_RS64_DC_APERTURE6_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_MASK0 0x2a5c
+#define regCP_GFX_RS64_DC_APERTURE6_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL0 0x2a5d
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_BASE0 0x2a5e
+#define regCP_GFX_RS64_DC_APERTURE7_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_MASK0 0x2a5f
+#define regCP_GFX_RS64_DC_APERTURE7_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL0 0x2a60
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_BASE0 0x2a61
+#define regCP_GFX_RS64_DC_APERTURE8_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_MASK0 0x2a62
+#define regCP_GFX_RS64_DC_APERTURE8_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL0 0x2a63
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_BASE0 0x2a64
+#define regCP_GFX_RS64_DC_APERTURE9_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_MASK0 0x2a65
+#define regCP_GFX_RS64_DC_APERTURE9_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL0 0x2a66
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_BASE0 0x2a67
+#define regCP_GFX_RS64_DC_APERTURE10_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_MASK0 0x2a68
+#define regCP_GFX_RS64_DC_APERTURE10_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL0 0x2a69
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_BASE0 0x2a6a
+#define regCP_GFX_RS64_DC_APERTURE11_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_MASK0 0x2a6b
+#define regCP_GFX_RS64_DC_APERTURE11_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL0 0x2a6c
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_BASE0 0x2a6d
+#define regCP_GFX_RS64_DC_APERTURE12_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_MASK0 0x2a6e
+#define regCP_GFX_RS64_DC_APERTURE12_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL0 0x2a6f
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_BASE0 0x2a70
+#define regCP_GFX_RS64_DC_APERTURE13_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK0 0x2a71
+#define regCP_GFX_RS64_DC_APERTURE13_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL0 0x2a72
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_BASE0 0x2a73
+#define regCP_GFX_RS64_DC_APERTURE14_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_MASK0 0x2a74
+#define regCP_GFX_RS64_DC_APERTURE14_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL0 0x2a75
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_BASE0 0x2a76
+#define regCP_GFX_RS64_DC_APERTURE15_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_MASK0 0x2a77
+#define regCP_GFX_RS64_DC_APERTURE15_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL0 0x2a78
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_BASE1 0x2a79
+#define regCP_GFX_RS64_DC_APERTURE0_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_MASK1 0x2a7a
+#define regCP_GFX_RS64_DC_APERTURE0_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL1 0x2a7b
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_BASE1 0x2a7c
+#define regCP_GFX_RS64_DC_APERTURE1_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_MASK1 0x2a7d
+#define regCP_GFX_RS64_DC_APERTURE1_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL1 0x2a7e
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_BASE1 0x2a7f
+#define regCP_GFX_RS64_DC_APERTURE2_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_MASK1 0x2a80
+#define regCP_GFX_RS64_DC_APERTURE2_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL1 0x2a81
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_BASE1 0x2a82
+#define regCP_GFX_RS64_DC_APERTURE3_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_MASK1 0x2a83
+#define regCP_GFX_RS64_DC_APERTURE3_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL1 0x2a84
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_BASE1 0x2a85
+#define regCP_GFX_RS64_DC_APERTURE4_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_MASK1 0x2a86
+#define regCP_GFX_RS64_DC_APERTURE4_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL1 0x2a87
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_BASE1 0x2a88
+#define regCP_GFX_RS64_DC_APERTURE5_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_MASK1 0x2a89
+#define regCP_GFX_RS64_DC_APERTURE5_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL1 0x2a8a
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_BASE1 0x2a8b
+#define regCP_GFX_RS64_DC_APERTURE6_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_MASK1 0x2a8c
+#define regCP_GFX_RS64_DC_APERTURE6_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL1 0x2a8d
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_BASE1 0x2a8e
+#define regCP_GFX_RS64_DC_APERTURE7_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_MASK1 0x2a8f
+#define regCP_GFX_RS64_DC_APERTURE7_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL1 0x2a90
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_BASE1 0x2a91
+#define regCP_GFX_RS64_DC_APERTURE8_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_MASK1 0x2a92
+#define regCP_GFX_RS64_DC_APERTURE8_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL1 0x2a93
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_BASE1 0x2a94
+#define regCP_GFX_RS64_DC_APERTURE9_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_MASK1 0x2a95
+#define regCP_GFX_RS64_DC_APERTURE9_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL1 0x2a96
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_BASE1 0x2a97
+#define regCP_GFX_RS64_DC_APERTURE10_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_MASK1 0x2a98
+#define regCP_GFX_RS64_DC_APERTURE10_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL1 0x2a99
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_BASE1 0x2a9a
+#define regCP_GFX_RS64_DC_APERTURE11_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_MASK1 0x2a9b
+#define regCP_GFX_RS64_DC_APERTURE11_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL1 0x2a9c
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_BASE1 0x2a9d
+#define regCP_GFX_RS64_DC_APERTURE12_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_MASK1 0x2a9e
+#define regCP_GFX_RS64_DC_APERTURE12_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL1 0x2a9f
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_BASE1 0x2aa0
+#define regCP_GFX_RS64_DC_APERTURE13_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK1 0x2aa1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL1 0x2aa2
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_BASE1 0x2aa3
+#define regCP_GFX_RS64_DC_APERTURE14_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_MASK1 0x2aa4
+#define regCP_GFX_RS64_DC_APERTURE14_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL1 0x2aa5
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_BASE1 0x2aa6
+#define regCP_GFX_RS64_DC_APERTURE15_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_MASK1 0x2aa7
+#define regCP_GFX_RS64_DC_APERTURE15_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL1 0x2aa8
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_INTERRUPT1 0x2aac
+#define regCP_GFX_RS64_INTERRUPT1_BASE_IDX 1
+
+
+// addressBlock: gc_gl1dec
+// base address: 0x33400
+#define regGL1_ARB_CTRL 0x2d00
+#define regGL1_ARB_CTRL_BASE_IDX 1
+#define regGL1_DRAM_BURST_MASK 0x2d02
+#define regGL1_DRAM_BURST_MASK_BASE_IDX 1
+#define regGL1_ARB_STATUS 0x2d03
+#define regGL1_ARB_STATUS_BASE_IDX 1
+#define regGL1_DRAM_BURST_CTRL 0x2d04
+#define regGL1_DRAM_BURST_CTRL_BASE_IDX 1
+#define regGL1I_GL1R_REP_FGCG_OVERRIDE 0x2d05
+#define regGL1I_GL1R_REP_FGCG_OVERRIDE_BASE_IDX 1
+#define regGL1C_CTRL 0x2d40
+#define regGL1C_CTRL_BASE_IDX 1
+#define regGL1C_STATUS 0x2d41
+#define regGL1C_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_CNTL2 0x2d43
+#define regGL1C_UTCL0_CNTL2_BASE_IDX 1
+#define regGL1C_UTCL0_STATUS 0x2d44
+#define regGL1C_UTCL0_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_RETRY 0x2d45
+#define regGL1C_UTCL0_RETRY_BASE_IDX 1
+#define regGL1C_CTRL2 0x2d46
+#define regGL1C_CTRL2_BASE_IDX 1
+
+
+// addressBlock: gc_chdec
+// base address: 0x33600
+#define regCH_ARB_CTRL 0x2d80
+#define regCH_ARB_CTRL_BASE_IDX 1
+#define regCH_DRAM_BURST_MASK 0x2d82
+#define regCH_DRAM_BURST_MASK_BASE_IDX 1
+#define regCH_ARB_STATUS 0x2d83
+#define regCH_ARB_STATUS_BASE_IDX 1
+#define regCH_DRAM_BURST_CTRL 0x2d84
+#define regCH_DRAM_BURST_CTRL_BASE_IDX 1
+#define regCHA_CHC_CREDITS 0x2d88
+#define regCHA_CHC_CREDITS_BASE_IDX 1
+#define regCHA_CLIENT_FREE_DELAY 0x2d89
+#define regCHA_CLIENT_FREE_DELAY_BASE_IDX 1
+#define regCHI_CHR_REP_FGCG_OVERRIDE 0x2d8c
+#define regCHI_CHR_REP_FGCG_OVERRIDE_BASE_IDX 1
+#define regCH_VC5_ENABLE 0x2d94
+#define regCH_VC5_ENABLE_BASE_IDX 1
+#define regCHC_CTRL 0x2dc0
+#define regCHC_CTRL_BASE_IDX 1
+#define regCHC_STATUS 0x2dc1
+#define regCHC_STATUS_BASE_IDX 1
+#define regCHCG_CTRL 0x2dc2
+#define regCHCG_CTRL_BASE_IDX 1
+#define regCHCG_STATUS 0x2dc3
+#define regCHCG_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_gl2dec
+// base address: 0x33800
+#define regGL2C_CTRL 0x2e00
+#define regGL2C_CTRL_BASE_IDX 1
+#define regGL2C_CTRL2 0x2e01
+#define regGL2C_CTRL2_BASE_IDX 1
+#define regGL2C_STATUS 0x2e02
+#define regGL2C_STATUS_BASE_IDX 1
+#define regGL2C_ADDR_MATCH_MASK 0x2e03
+#define regGL2C_ADDR_MATCH_MASK_BASE_IDX 1
+#define regGL2C_ADDR_MATCH_SIZE 0x2e04
+#define regGL2C_ADDR_MATCH_SIZE_BASE_IDX 1
+#define regGL2C_WBINVL2 0x2e05
+#define regGL2C_WBINVL2_BASE_IDX 1
+#define regGL2C_SOFT_RESET 0x2e06
+#define regGL2C_SOFT_RESET_BASE_IDX 1
+#define regGL2C_CM_CTRL0 0x2e07
+#define regGL2C_CM_CTRL0_BASE_IDX 1
+#define regGL2C_CM_CTRL1 0x2e08
+#define regGL2C_CM_CTRL1_BASE_IDX 1
+#define regGL2C_CM_STALL 0x2e09
+#define regGL2C_CM_STALL_BASE_IDX 1
+#define regGL2C_CM_CTRL2 0x2e0b
+#define regGL2C_CM_CTRL2_BASE_IDX 1
+#define regGL2C_CTRL3 0x2e0c
+#define regGL2C_CTRL3_BASE_IDX 1
+#define regGL2C_LB_CTR_CTRL 0x2e0d
+#define regGL2C_LB_CTR_CTRL_BASE_IDX 1
+#define regGL2C_LB_DATA0 0x2e0e
+#define regGL2C_LB_DATA0_BASE_IDX 1
+#define regGL2C_LB_DATA1 0x2e0f
+#define regGL2C_LB_DATA1_BASE_IDX 1
+#define regGL2C_LB_DATA2 0x2e10
+#define regGL2C_LB_DATA2_BASE_IDX 1
+#define regGL2C_LB_DATA3 0x2e11
+#define regGL2C_LB_DATA3_BASE_IDX 1
+#define regGL2C_LB_CTR_SEL0 0x2e12
+#define regGL2C_LB_CTR_SEL0_BASE_IDX 1
+#define regGL2C_LB_CTR_SEL1 0x2e13
+#define regGL2C_LB_CTR_SEL1_BASE_IDX 1
+#define regGL2C_CTRL4 0x2e17
+#define regGL2C_CTRL4_BASE_IDX 1
+#define regGL2C_DISCARD_STALL_CTRL 0x2e18
+#define regGL2C_DISCARD_STALL_CTRL_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_CTRL 0x2e20
+#define regGL2A_ADDR_MATCH_CTRL_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_MASK 0x2e21
+#define regGL2A_ADDR_MATCH_MASK_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_SIZE 0x2e22
+#define regGL2A_ADDR_MATCH_SIZE_BASE_IDX 1
+#define regGL2A_PRIORITY_CTRL 0x2e23
+#define regGL2A_PRIORITY_CTRL_BASE_IDX 1
+#define regGL2A_CTRL 0x2e24
+#define regGL2A_CTRL_BASE_IDX 1
+#define regGL2A_RESP_THROTTLE_CTRL 0x2e2a
+#define regGL2A_RESP_THROTTLE_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_gl1hdec
+// base address: 0x33900
+#define regGL1H_ARB_CTRL 0x2e40
+#define regGL1H_ARB_CTRL_BASE_IDX 1
+#define regGL1H_GL1_CREDITS 0x2e41
+#define regGL1H_GL1_CREDITS_BASE_IDX 1
+#define regGL1H_BURST_MASK 0x2e42
+#define regGL1H_BURST_MASK_BASE_IDX 1
+#define regGL1H_BURST_CTRL 0x2e43
+#define regGL1H_BURST_CTRL_BASE_IDX 1
+#define regGL1H_ARB_STATUS 0x2e44
+#define regGL1H_ARB_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_perfddec
+// base address: 0x34000
+#define regCPG_PERFCOUNTER1_LO 0x3000
+#define regCPG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER1_HI 0x3001
+#define regCPG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_LO 0x3002
+#define regCPG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_HI 0x3003
+#define regCPG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_LO 0x3004
+#define regCPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_HI 0x3005
+#define regCPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_LO 0x3006
+#define regCPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_HI 0x3007
+#define regCPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_LO 0x3008
+#define regCPF_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_HI 0x3009
+#define regCPF_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_LO 0x300a
+#define regCPF_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_HI 0x300b
+#define regCPF_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_LATENCY_STATS_DATA 0x300c
+#define regCPF_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPG_LATENCY_STATS_DATA 0x300d
+#define regCPG_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPC_LATENCY_STATS_DATA 0x300e
+#define regCPC_LATENCY_STATS_DATA_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_LO 0x3040
+#define regGRBM_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_HI 0x3041
+#define regGRBM_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_LO 0x3043
+#define regGRBM_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_HI 0x3044
+#define regGRBM_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_LO 0x3045
+#define regGRBM_SE0_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_HI 0x3046
+#define regGRBM_SE0_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_LO 0x3047
+#define regGRBM_SE1_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_HI 0x3048
+#define regGRBM_SE1_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_LO 0x3049
+#define regGRBM_SE2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_HI 0x304a
+#define regGRBM_SE2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_LO 0x304b
+#define regGRBM_SE3_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_HI 0x304c
+#define regGRBM_SE3_PERFCOUNTER_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_LO 0x30a4
+#define regGE1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_HI 0x30a5
+#define regGE1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_LO 0x30a6
+#define regGE1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_HI 0x30a7
+#define regGE1_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_LO 0x30a8
+#define regGE1_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_HI 0x30a9
+#define regGE1_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_LO 0x30aa
+#define regGE1_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_HI 0x30ab
+#define regGE1_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_LO 0x30ac
+#define regGE2_DIST_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_HI 0x30ad
+#define regGE2_DIST_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_LO 0x30ae
+#define regGE2_DIST_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_HI 0x30af
+#define regGE2_DIST_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_LO 0x30b0
+#define regGE2_DIST_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_HI 0x30b1
+#define regGE2_DIST_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_LO 0x30b2
+#define regGE2_DIST_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_HI 0x30b3
+#define regGE2_DIST_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_LO 0x30b4
+#define regGE2_SE_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_HI 0x30b5
+#define regGE2_SE_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_LO 0x30b6
+#define regGE2_SE_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_HI 0x30b7
+#define regGE2_SE_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_LO 0x30b8
+#define regGE2_SE_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_HI 0x30b9
+#define regGE2_SE_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_LO 0x30ba
+#define regGE2_SE_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_HI 0x30bb
+#define regGE2_SE_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_LO 0x3100
+#define regPA_SU_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_HI 0x3101
+#define regPA_SU_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_LO 0x3102
+#define regPA_SU_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_HI 0x3103
+#define regPA_SU_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_LO 0x3104
+#define regPA_SU_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_HI 0x3105
+#define regPA_SU_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_LO 0x3106
+#define regPA_SU_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_HI 0x3107
+#define regPA_SU_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_LO 0x3140
+#define regPA_SC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_HI 0x3141
+#define regPA_SC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_LO 0x3142
+#define regPA_SC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_HI 0x3143
+#define regPA_SC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_LO 0x3144
+#define regPA_SC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_HI 0x3145
+#define regPA_SC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_LO 0x3146
+#define regPA_SC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_HI 0x3147
+#define regPA_SC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_LO 0x3148
+#define regPA_SC_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_HI 0x3149
+#define regPA_SC_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_LO 0x314a
+#define regPA_SC_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_HI 0x314b
+#define regPA_SC_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_LO 0x314c
+#define regPA_SC_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_HI 0x314d
+#define regPA_SC_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_LO 0x314e
+#define regPA_SC_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_HI 0x314f
+#define regPA_SC_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_HI 0x3180
+#define regSPI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_LO 0x3181
+#define regSPI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_HI 0x3182
+#define regSPI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_LO 0x3183
+#define regSPI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_HI 0x3184
+#define regSPI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_LO 0x3185
+#define regSPI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_HI 0x3186
+#define regSPI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_LO 0x3187
+#define regSPI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_HI 0x3188
+#define regSPI_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_LO 0x3189
+#define regSPI_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_HI 0x318a
+#define regSPI_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_LO 0x318b
+#define regSPI_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER0_HI 0x318c
+#define regPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER0_LO 0x318d
+#define regPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER1_HI 0x318e
+#define regPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER1_LO 0x318f
+#define regPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER2_HI 0x3190
+#define regPC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER2_LO 0x3191
+#define regPC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER3_HI 0x3192
+#define regPC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER3_LO 0x3193
+#define regPC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_LO 0x31c0
+#define regSQ_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_LO 0x31c2
+#define regSQ_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_LO 0x31c4
+#define regSQ_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_LO 0x31c6
+#define regSQ_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_LO 0x31c8
+#define regSQ_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_LO 0x31ca
+#define regSQ_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_LO 0x31cc
+#define regSQ_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_LO 0x31ce
+#define regSQ_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_LO 0x31e4
+#define regSQG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_HI 0x31e5
+#define regSQG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_LO 0x31e6
+#define regSQG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_HI 0x31e7
+#define regSQG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_LO 0x31e8
+#define regSQG_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_HI 0x31e9
+#define regSQG_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_LO 0x31ea
+#define regSQG_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_HI 0x31eb
+#define regSQG_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_LO 0x31ec
+#define regSQG_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_HI 0x31ed
+#define regSQG_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_LO 0x31ee
+#define regSQG_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_HI 0x31ef
+#define regSQG_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_LO 0x31f0
+#define regSQG_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_HI 0x31f1
+#define regSQG_PERFCOUNTER6_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_LO 0x31f2
+#define regSQG_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_HI 0x31f3
+#define regSQG_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER0_LO 0x3240
+#define regSX_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER0_HI 0x3241
+#define regSX_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER1_LO 0x3242
+#define regSX_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER1_HI 0x3243
+#define regSX_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER2_LO 0x3244
+#define regSX_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER2_HI 0x3245
+#define regSX_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER3_LO 0x3246
+#define regSX_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER3_HI 0x3247
+#define regSX_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_LO 0x3260
+#define regGCEA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_HI 0x3261
+#define regGCEA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_LO 0x3262
+#define regGCEA_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_HI 0x3263
+#define regGCEA_PERFCOUNTER_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_LO 0x3280
+#define regGDS_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_HI 0x3281
+#define regGDS_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_LO 0x3282
+#define regGDS_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_HI 0x3283
+#define regGDS_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_LO 0x3284
+#define regGDS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_HI 0x3285
+#define regGDS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_LO 0x3286
+#define regGDS_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_HI 0x3287
+#define regGDS_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER0_LO 0x32c0
+#define regTA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER0_HI 0x32c1
+#define regTA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER1_LO 0x32c2
+#define regTA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER1_HI 0x32c3
+#define regTA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER0_LO 0x3300
+#define regTD_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER0_HI 0x3301
+#define regTD_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER1_LO 0x3302
+#define regTD_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER1_HI 0x3303
+#define regTD_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_LO 0x3340
+#define regTCP_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_HI 0x3341
+#define regTCP_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_LO 0x3342
+#define regTCP_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_HI 0x3343
+#define regTCP_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_LO 0x3344
+#define regTCP_PERFCOUNTER2_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_HI 0x3345
+#define regTCP_PERFCOUNTER2_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_LO 0x3346
+#define regTCP_PERFCOUNTER3_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_HI 0x3347
+#define regTCP_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER 0x3348
+#define regTCP_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER2 0x3349
+#define regTCP_PERFCOUNTER_FILTER2_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER_EN 0x334a
+#define regTCP_PERFCOUNTER_FILTER_EN_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_LO 0x3380
+#define regGL2C_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_HI 0x3381
+#define regGL2C_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_LO 0x3382
+#define regGL2C_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_HI 0x3383
+#define regGL2C_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_LO 0x3384
+#define regGL2C_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_HI 0x3385
+#define regGL2C_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_LO 0x3386
+#define regGL2C_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_HI 0x3387
+#define regGL2C_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_LO 0x3390
+#define regGL2A_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_HI 0x3391
+#define regGL2A_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_LO 0x3392
+#define regGL2A_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_HI 0x3393
+#define regGL2A_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_LO 0x3394
+#define regGL2A_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_HI 0x3395
+#define regGL2A_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_LO 0x3396
+#define regGL2A_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_HI 0x3397
+#define regGL2A_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_LO 0x33a0
+#define regGL1C_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_HI 0x33a1
+#define regGL1C_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_LO 0x33a2
+#define regGL1C_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_HI 0x33a3
+#define regGL1C_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_LO 0x33a4
+#define regGL1C_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_HI 0x33a5
+#define regGL1C_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_LO 0x33a6
+#define regGL1C_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_HI 0x33a7
+#define regGL1C_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_LO 0x33c0
+#define regCHC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_HI 0x33c1
+#define regCHC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_LO 0x33c2
+#define regCHC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_HI 0x33c3
+#define regCHC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_LO 0x33c4
+#define regCHC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_HI 0x33c5
+#define regCHC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_LO 0x33c6
+#define regCHC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_HI 0x33c7
+#define regCHC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_LO 0x33c8
+#define regCHCG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_HI 0x33c9
+#define regCHCG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_LO 0x33ca
+#define regCHCG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_HI 0x33cb
+#define regCHCG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_LO 0x33cc
+#define regCHCG_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_HI 0x33cd
+#define regCHCG_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_LO 0x33ce
+#define regCHCG_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_HI 0x33cf
+#define regCHCG_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER0_LO 0x3406
+#define regCB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER0_HI 0x3407
+#define regCB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER1_LO 0x3408
+#define regCB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER1_HI 0x3409
+#define regCB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER2_LO 0x340a
+#define regCB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER2_HI 0x340b
+#define regCB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER3_LO 0x340c
+#define regCB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER3_HI 0x340d
+#define regCB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER0_LO 0x3440
+#define regDB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER0_HI 0x3441
+#define regDB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER1_LO 0x3442
+#define regDB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER1_HI 0x3443
+#define regDB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER2_LO 0x3444
+#define regDB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER2_HI 0x3445
+#define regDB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER3_LO 0x3446
+#define regDB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER3_HI 0x3447
+#define regDB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_LO 0x3480
+#define regRLC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_HI 0x3481
+#define regRLC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_LO 0x3482
+#define regRLC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_HI 0x3483
+#define regRLC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_LO 0x34c0
+#define regRMI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_HI 0x34c1
+#define regRMI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_LO 0x34c2
+#define regRMI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_HI 0x34c3
+#define regRMI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_LO 0x34c4
+#define regRMI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_HI 0x34c5
+#define regRMI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_LO 0x34c6
+#define regRMI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_HI 0x34c7
+#define regRMI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_LO 0x3520
+#define regGCR_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_HI 0x3521
+#define regGCR_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_LO 0x3522
+#define regGCR_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_HI 0x3523
+#define regGCR_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_LO 0x3580
+#define regPA_PH_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_HI 0x3581
+#define regPA_PH_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_LO 0x3582
+#define regPA_PH_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_HI 0x3583
+#define regPA_PH_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_LO 0x3584
+#define regPA_PH_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_HI 0x3585
+#define regPA_PH_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_LO 0x3586
+#define regPA_PH_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_HI 0x3587
+#define regPA_PH_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_LO 0x3588
+#define regPA_PH_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_HI 0x3589
+#define regPA_PH_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_LO 0x358a
+#define regPA_PH_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_HI 0x358b
+#define regPA_PH_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_LO 0x358c
+#define regPA_PH_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_HI 0x358d
+#define regPA_PH_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_LO 0x358e
+#define regPA_PH_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_HI 0x358f
+#define regPA_PH_PERFCOUNTER7_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_LO 0x35a0
+#define regUTCL1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_HI 0x35a1
+#define regUTCL1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_LO 0x35a2
+#define regUTCL1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_HI 0x35a3
+#define regUTCL1_PERFCOUNTER1_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_LO 0x35a4
+#define regUTCL1_PERFCOUNTER2_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_HI 0x35a5
+#define regUTCL1_PERFCOUNTER2_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_LO 0x35a6
+#define regUTCL1_PERFCOUNTER3_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_HI 0x35a7
+#define regUTCL1_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_LO 0x35c0
+#define regGL1A_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_HI 0x35c1
+#define regGL1A_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_LO 0x35c2
+#define regGL1A_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_HI 0x35c3
+#define regGL1A_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_LO 0x35c4
+#define regGL1A_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_HI 0x35c5
+#define regGL1A_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_LO 0x35c6
+#define regGL1A_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_HI 0x35c7
+#define regGL1A_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_LO 0x35d0
+#define regGL1H_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_HI 0x35d1
+#define regGL1H_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_LO 0x35d2
+#define regGL1H_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_HI 0x35d3
+#define regGL1H_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_LO 0x35d4
+#define regGL1H_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_HI 0x35d5
+#define regGL1H_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_LO 0x35d6
+#define regGL1H_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_HI 0x35d7
+#define regGL1H_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_LO 0x3600
+#define regCHA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_HI 0x3601
+#define regCHA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_LO 0x3602
+#define regCHA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_HI 0x3603
+#define regCHA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_LO 0x3604
+#define regCHA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_HI 0x3605
+#define regCHA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_LO 0x3606
+#define regCHA_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_HI 0x3607
+#define regCHA_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_LO 0x3640
+#define regGUS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_HI 0x3641
+#define regGUS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGUS_PERFCOUNTER_LO 0x3642
+#define regGUS_PERFCOUNTER_LO_BASE_IDX 1
+#define regGUS_PERFCOUNTER_HI 0x3643
+#define regGUS_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_perfsdec
+// base address: 0x36000
+#define regCPG_PERFCOUNTER1_SELECT 0x3800
+#define regCPG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT1 0x3801
+#define regCPG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT 0x3802
+#define regCPG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_SELECT 0x3803
+#define regCPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT1 0x3804
+#define regCPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_SELECT 0x3805
+#define regCPF_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT1 0x3806
+#define regCPF_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT 0x3807
+#define regCPF_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCP_PERFMON_CNTL 0x3808
+#define regCP_PERFMON_CNTL_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT 0x3809
+#define regCPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT 0x380a
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT 0x380b
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPF_LATENCY_STATS_SELECT 0x380c
+#define regCPF_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPG_LATENCY_STATS_SELECT 0x380d
+#define regCPG_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_LATENCY_STATS_SELECT 0x380e
+#define regCPC_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_TC_PERF_COUNTER_WINDOW_SELECT 0x380f
+#define regCPC_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT 0x3810
+#define regCP_DRAW_OBJECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT_COUNTER 0x3811
+#define regCP_DRAW_OBJECT_COUNTER_BASE_IDX 1
+#define regCP_DRAW_WINDOW_MASK_HI 0x3812
+#define regCP_DRAW_WINDOW_MASK_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_HI 0x3813
+#define regCP_DRAW_WINDOW_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_LO 0x3814
+#define regCP_DRAW_WINDOW_LO_BASE_IDX 1
+#define regCP_DRAW_WINDOW_CNTL 0x3815
+#define regCP_DRAW_WINDOW_CNTL_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT 0x3840
+#define regGRBM_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT 0x3841
+#define regGRBM_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_SELECT 0x3842
+#define regGRBM_SE0_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_SELECT 0x3843
+#define regGRBM_SE1_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_SELECT 0x3844
+#define regGRBM_SE2_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_SELECT 0x3845
+#define regGRBM_SE3_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT_HI 0x384d
+#define regGRBM_PERFCOUNTER0_SELECT_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT_HI 0x384e
+#define regGRBM_PERFCOUNTER1_SELECT_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_SELECT 0x38a4
+#define regGE1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_SELECT1 0x38a5
+#define regGE1_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_SELECT 0x38a6
+#define regGE1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_SELECT1 0x38a7
+#define regGE1_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_SELECT 0x38a8
+#define regGE1_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_SELECT1 0x38a9
+#define regGE1_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_SELECT 0x38aa
+#define regGE1_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_SELECT1 0x38ab
+#define regGE1_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_SELECT 0x38ac
+#define regGE2_DIST_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_SELECT1 0x38ad
+#define regGE2_DIST_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_SELECT 0x38ae
+#define regGE2_DIST_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_SELECT1 0x38af
+#define regGE2_DIST_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_SELECT 0x38b0
+#define regGE2_DIST_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_SELECT1 0x38b1
+#define regGE2_DIST_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_SELECT 0x38b2
+#define regGE2_DIST_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_SELECT1 0x38b3
+#define regGE2_DIST_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_SELECT 0x38b4
+#define regGE2_SE_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_SELECT1 0x38b5
+#define regGE2_SE_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_SELECT 0x38b6
+#define regGE2_SE_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_SELECT1 0x38b7
+#define regGE2_SE_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_SELECT 0x38b8
+#define regGE2_SE_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_SELECT1 0x38b9
+#define regGE2_SE_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_SELECT 0x38ba
+#define regGE2_SE_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_SELECT1 0x38bb
+#define regGE2_SE_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT 0x3900
+#define regPA_SU_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT1 0x3901
+#define regPA_SU_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT 0x3902
+#define regPA_SU_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT1 0x3903
+#define regPA_SU_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT 0x3904
+#define regPA_SU_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT1 0x3905
+#define regPA_SU_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT 0x3906
+#define regPA_SU_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT1 0x3907
+#define regPA_SU_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT 0x3940
+#define regPA_SC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT1 0x3941
+#define regPA_SC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_SELECT 0x3942
+#define regPA_SC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_SELECT 0x3943
+#define regPA_SC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_SELECT 0x3944
+#define regPA_SC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_SELECT 0x3945
+#define regPA_SC_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_SELECT 0x3946
+#define regPA_SC_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_SELECT 0x3947
+#define regPA_SC_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_SELECT 0x3948
+#define regPA_SC_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT 0x3980
+#define regSPI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT 0x3981
+#define regSPI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT 0x3982
+#define regSPI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT 0x3983
+#define regSPI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT1 0x3984
+#define regSPI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT1 0x3985
+#define regSPI_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT1 0x3986
+#define regSPI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT1 0x3987
+#define regSPI_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_SELECT 0x3988
+#define regSPI_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_SELECT 0x3989
+#define regSPI_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER_BINS 0x398a
+#define regSPI_PERFCOUNTER_BINS_BASE_IDX 1
+#define regPC_PERFCOUNTER0_SELECT 0x398c
+#define regPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER1_SELECT 0x398d
+#define regPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER2_SELECT 0x398e
+#define regPC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER3_SELECT 0x398f
+#define regPC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER0_SELECT1 0x3990
+#define regPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER1_SELECT1 0x3991
+#define regPC_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER2_SELECT1 0x3992
+#define regPC_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER3_SELECT1 0x3993
+#define regPC_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_SELECT 0x39c0
+#define regSQ_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_SELECT 0x39c1
+#define regSQ_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_SELECT 0x39c2
+#define regSQ_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_SELECT 0x39c3
+#define regSQ_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_SELECT 0x39c4
+#define regSQ_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_SELECT 0x39c5
+#define regSQ_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_SELECT 0x39c6
+#define regSQ_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_SELECT 0x39c7
+#define regSQ_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER8_SELECT 0x39c8
+#define regSQ_PERFCOUNTER8_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER9_SELECT 0x39c9
+#define regSQ_PERFCOUNTER9_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER10_SELECT 0x39ca
+#define regSQ_PERFCOUNTER10_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER11_SELECT 0x39cb
+#define regSQ_PERFCOUNTER11_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER12_SELECT 0x39cc
+#define regSQ_PERFCOUNTER12_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER13_SELECT 0x39cd
+#define regSQ_PERFCOUNTER13_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER14_SELECT 0x39ce
+#define regSQ_PERFCOUNTER14_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER15_SELECT 0x39cf
+#define regSQ_PERFCOUNTER15_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_SELECT 0x39d0
+#define regSQG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_SELECT 0x39d1
+#define regSQG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_SELECT 0x39d2
+#define regSQG_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_SELECT 0x39d3
+#define regSQG_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_SELECT 0x39d4
+#define regSQG_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_SELECT 0x39d5
+#define regSQG_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_SELECT 0x39d6
+#define regSQG_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_SELECT 0x39d7
+#define regSQG_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER_CTRL 0x39d8
+#define regSQG_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQG_PERFCOUNTER_CTRL2 0x39da
+#define regSQG_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSQG_PERF_SAMPLE_FINISH 0x39db
+#define regSQG_PERF_SAMPLE_FINISH_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL 0x39e0
+#define regSQ_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL2 0x39e2
+#define regSQ_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF0_BASE 0x39e8
+#define regSQ_THREAD_TRACE_BUF0_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF0_SIZE 0x39e9
+#define regSQ_THREAD_TRACE_BUF0_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF1_BASE 0x39ea
+#define regSQ_THREAD_TRACE_BUF1_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF1_SIZE 0x39eb
+#define regSQ_THREAD_TRACE_BUF1_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_CTRL 0x39ec
+#define regSQ_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regSQ_THREAD_TRACE_MASK 0x39ed
+#define regSQ_THREAD_TRACE_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_TOKEN_MASK 0x39ee
+#define regSQ_THREAD_TRACE_TOKEN_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_WPTR 0x39ef
+#define regSQ_THREAD_TRACE_WPTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS 0x39f4
+#define regSQ_THREAD_TRACE_STATUS_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS2 0x39f5
+#define regSQ_THREAD_TRACE_STATUS2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_GFX_DRAW_CNTR 0x39f6
+#define regSQ_THREAD_TRACE_GFX_DRAW_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_GFX_MARKER_CNTR 0x39f7
+#define regSQ_THREAD_TRACE_GFX_MARKER_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HP3D_DRAW_CNTR 0x39f8
+#define regSQ_THREAD_TRACE_HP3D_DRAW_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HP3D_MARKER_CNTR 0x39f9
+#define regSQ_THREAD_TRACE_HP3D_MARKER_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_DROPPED_CNTR 0x39fa
+#define regSQ_THREAD_TRACE_DROPPED_CNTR_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_SELECT 0x3a00
+#define regGCEA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_SELECT1 0x3a01
+#define regGCEA_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_MODE 0x3a02
+#define regGCEA_PERFCOUNTER2_MODE_BASE_IDX 1
+#define regGCEA_PERFCOUNTER0_CFG 0x3a03
+#define regGCEA_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCEA_PERFCOUNTER1_CFG 0x3a04
+#define regGCEA_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_RSLT_CNTL 0x3a05
+#define regGCEA_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT 0x3a40
+#define regSX_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT 0x3a41
+#define regSX_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER2_SELECT 0x3a42
+#define regSX_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER3_SELECT 0x3a43
+#define regSX_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT1 0x3a44
+#define regSX_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT1 0x3a45
+#define regSX_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT 0x3a80
+#define regGDS_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT 0x3a81
+#define regGDS_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT 0x3a82
+#define regGDS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT 0x3a83
+#define regGDS_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT1 0x3a84
+#define regGDS_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT1 0x3a85
+#define regGDS_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT1 0x3a86
+#define regGDS_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT1 0x3a87
+#define regGDS_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT 0x3ac0
+#define regTA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT1 0x3ac1
+#define regTA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER1_SELECT 0x3ac2
+#define regTA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT 0x3b00
+#define regTD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT1 0x3b01
+#define regTD_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTD_PERFCOUNTER1_SELECT 0x3b02
+#define regTD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT 0x3b40
+#define regTCP_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT1 0x3b41
+#define regTCP_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT 0x3b42
+#define regTCP_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT1 0x3b43
+#define regTCP_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_SELECT 0x3b44
+#define regTCP_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_SELECT 0x3b45
+#define regTCP_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_SELECT 0x3b80
+#define regGL2C_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_SELECT1 0x3b81
+#define regGL2C_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_SELECT 0x3b82
+#define regGL2C_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_SELECT1 0x3b83
+#define regGL2C_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_SELECT 0x3b84
+#define regGL2C_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_SELECT 0x3b85
+#define regGL2C_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_SELECT 0x3b90
+#define regGL2A_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_SELECT1 0x3b91
+#define regGL2A_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_SELECT 0x3b92
+#define regGL2A_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_SELECT1 0x3b93
+#define regGL2A_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_SELECT 0x3b94
+#define regGL2A_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_SELECT 0x3b95
+#define regGL2A_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_SELECT 0x3ba0
+#define regGL1C_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_SELECT1 0x3ba1
+#define regGL1C_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_SELECT 0x3ba2
+#define regGL1C_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_SELECT 0x3ba3
+#define regGL1C_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_SELECT 0x3ba4
+#define regGL1C_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_SELECT 0x3bc0
+#define regCHC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_SELECT1 0x3bc1
+#define regCHC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_SELECT 0x3bc2
+#define regCHC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_SELECT 0x3bc3
+#define regCHC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_SELECT 0x3bc4
+#define regCHC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_SELECT 0x3bc6
+#define regCHCG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_SELECT1 0x3bc7
+#define regCHCG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_SELECT 0x3bc8
+#define regCHCG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_SELECT 0x3bc9
+#define regCHCG_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_SELECT 0x3bca
+#define regCHCG_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER_FILTER 0x3c00
+#define regCB_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT 0x3c01
+#define regCB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT1 0x3c02
+#define regCB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCB_PERFCOUNTER1_SELECT 0x3c03
+#define regCB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER2_SELECT 0x3c04
+#define regCB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER3_SELECT 0x3c05
+#define regCB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT 0x3c40
+#define regDB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT1 0x3c41
+#define regDB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT 0x3c42
+#define regDB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT1 0x3c43
+#define regDB_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER2_SELECT 0x3c44
+#define regDB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER3_SELECT 0x3c46
+#define regDB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRLC_SPM_PERFMON_CNTL 0x3c80
+#define regRLC_SPM_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_LO 0x3c81
+#define regRLC_SPM_PERFMON_RING_BASE_LO_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_HI 0x3c82
+#define regRLC_SPM_PERFMON_RING_BASE_HI_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_SIZE 0x3c83
+#define regRLC_SPM_PERFMON_RING_SIZE_BASE_IDX 1
+#define regRLC_SPM_RING_WRPTR 0x3c84
+#define regRLC_SPM_RING_WRPTR_BASE_IDX 1
+#define regRLC_SPM_RING_RDPTR 0x3c85
+#define regRLC_SPM_RING_RDPTR_BASE_IDX 1
+#define regRLC_SPM_SEGMENT_THRESHOLD 0x3c86
+#define regRLC_SPM_SEGMENT_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE 0x3c87
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR 0x3c88
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA 0x3c89
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_ADDR 0x3c8a
+#define regRLC_SPM_SE_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_DATA 0x3c8b
+#define regRLC_SPM_SE_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_ADDR 0x3c92
+#define regRLC_SPM_ACCUM_DATARAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_DATA 0x3c93
+#define regRLC_SPM_ACCUM_DATARAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SWA_DATARAM_ADDR 0x3c94
+#define regRLC_SPM_ACCUM_SWA_DATARAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SWA_DATARAM_DATA 0x3c95
+#define regRLC_SPM_ACCUM_SWA_DATARAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR 0x3c96
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_DATA 0x3c97
+#define regRLC_SPM_ACCUM_CTRLRAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET 0x3c98
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET_BASE_IDX 1
+#define regRLC_SPM_ACCUM_STATUS 0x3c99
+#define regRLC_SPM_ACCUM_STATUS_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRL 0x3c9a
+#define regRLC_SPM_ACCUM_CTRL_BASE_IDX 1
+#define regRLC_SPM_ACCUM_MODE 0x3c9b
+#define regRLC_SPM_ACCUM_MODE_BASE_IDX 1
+#define regRLC_SPM_ACCUM_THRESHOLD 0x3c9c
+#define regRLC_SPM_ACCUM_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SAMPLES_REQUESTED 0x3c9d
+#define regRLC_SPM_ACCUM_SAMPLES_REQUESTED_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_WRCOUNT 0x3c9e
+#define regRLC_SPM_ACCUM_DATARAM_WRCOUNT_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS 0x3c9f
+#define regRLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS_BASE_IDX 1
+#define regRLC_SPM_PAUSE 0x3ca2
+#define regRLC_SPM_PAUSE_BASE_IDX 1
+#define regRLC_SPM_STATUS 0x3ca3
+#define regRLC_SPM_STATUS_BASE_IDX 1
+#define regRLC_SPM_GFXCLOCK_LOWCOUNT 0x3ca4
+#define regRLC_SPM_GFXCLOCK_LOWCOUNT_BASE_IDX 1
+#define regRLC_SPM_GFXCLOCK_HIGHCOUNT 0x3ca5
+#define regRLC_SPM_GFXCLOCK_HIGHCOUNT_BASE_IDX 1
+#define regRLC_SPM_MODE 0x3cad
+#define regRLC_SPM_MODE_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_DATA_LO 0x3cae
+#define regRLC_SPM_RSPM_REQ_DATA_LO_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_DATA_HI 0x3caf
+#define regRLC_SPM_RSPM_REQ_DATA_HI_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_OP 0x3cb0
+#define regRLC_SPM_RSPM_REQ_OP_BASE_IDX 1
+#define regRLC_SPM_RSPM_RET_DATA 0x3cb1
+#define regRLC_SPM_RSPM_RET_DATA_BASE_IDX 1
+#define regRLC_SPM_RSPM_RET_OP 0x3cb2
+#define regRLC_SPM_RSPM_RET_OP_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_DATA_LO 0x3cb3
+#define regRLC_SPM_SE_RSPM_REQ_DATA_LO_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_DATA_HI 0x3cb4
+#define regRLC_SPM_SE_RSPM_REQ_DATA_HI_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_OP 0x3cb5
+#define regRLC_SPM_SE_RSPM_REQ_OP_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_RET_DATA 0x3cb6
+#define regRLC_SPM_SE_RSPM_RET_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_RET_OP 0x3cb7
+#define regRLC_SPM_SE_RSPM_RET_OP_BASE_IDX 1
+#define regRLC_SPM_RSPM_CMD 0x3cb8
+#define regRLC_SPM_RSPM_CMD_BASE_IDX 1
+#define regRLC_SPM_RSPM_CMD_ACK 0x3cb9
+#define regRLC_SPM_RSPM_CMD_ACK_BASE_IDX 1
+#define regRLC_SPM_SPARE 0x3cbf
+#define regRLC_SPM_SPARE_BASE_IDX 1
+#define regRLC_PERFMON_CNTL 0x3cc0
+#define regRLC_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_SELECT 0x3cc1
+#define regRLC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_SELECT 0x3cc2
+#define regRLC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_CNTL 0x3cc3
+#define regRLC_GPU_IOV_PERF_CNT_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR 0x3cc4
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA 0x3cc5
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR 0x3cc6
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA 0x3cc7
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT 0x3d00
+#define regRMI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT1 0x3d01
+#define regRMI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_SELECT 0x3d02
+#define regRMI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT 0x3d03
+#define regRMI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT1 0x3d04
+#define regRMI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_SELECT 0x3d05
+#define regRMI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRMI_PERF_COUNTER_CNTL 0x3d06
+#define regRMI_PERF_COUNTER_CNTL_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_SELECT 0x3d60
+#define regGCR_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_SELECT1 0x3d61
+#define regGCR_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_SELECT 0x3d62
+#define regGCR_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_SELECT 0x3d80
+#define regPA_PH_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_SELECT1 0x3d81
+#define regPA_PH_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_SELECT 0x3d82
+#define regPA_PH_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_SELECT 0x3d83
+#define regPA_PH_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_SELECT 0x3d84
+#define regPA_PH_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_SELECT 0x3d85
+#define regPA_PH_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_SELECT 0x3d86
+#define regPA_PH_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_SELECT 0x3d87
+#define regPA_PH_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_SELECT 0x3d88
+#define regPA_PH_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_SELECT1 0x3d90
+#define regPA_PH_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_SELECT1 0x3d91
+#define regPA_PH_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_SELECT1 0x3d92
+#define regPA_PH_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_SELECT 0x3da0
+#define regUTCL1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_SELECT 0x3da1
+#define regUTCL1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_SELECT 0x3da2
+#define regUTCL1_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_SELECT 0x3da3
+#define regUTCL1_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_SELECT 0x3dc0
+#define regGL1A_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_SELECT1 0x3dc1
+#define regGL1A_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_SELECT 0x3dc2
+#define regGL1A_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_SELECT 0x3dc3
+#define regGL1A_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_SELECT 0x3dc4
+#define regGL1A_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_SELECT 0x3dd0
+#define regGL1H_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_SELECT1 0x3dd1
+#define regGL1H_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_SELECT 0x3dd2
+#define regGL1H_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_SELECT 0x3dd3
+#define regGL1H_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_SELECT 0x3dd4
+#define regGL1H_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_SELECT 0x3de0
+#define regCHA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_SELECT1 0x3de1
+#define regCHA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_SELECT 0x3de2
+#define regCHA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_SELECT 0x3de3
+#define regCHA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_SELECT 0x3de4
+#define regCHA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_SELECT 0x3e00
+#define regGUS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_SELECT1 0x3e01
+#define regGUS_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_MODE 0x3e02
+#define regGUS_PERFCOUNTER2_MODE_BASE_IDX 1
+#define regGUS_PERFCOUNTER0_CFG 0x3e03
+#define regGUS_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGUS_PERFCOUNTER1_CFG 0x3e04
+#define regGUS_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGUS_PERFCOUNTER_RSLT_CNTL 0x3e05
+#define regGUS_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gdfll_gdfll_dec
+// base address: 0x3a000
+#define regGDFLL_EDC_HYSTERESIS_CNTL 0x4828
+#define regGDFLL_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_EDC_HYSTERESIS_STAT 0x4829
+#define regGDFLL_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: gc_gdfll_se_gdfll_dec
+// base address: 0x3a300
+#define regGDFLL_SE_EDC_HYSTERESIS_CNTL 0x48e8
+#define regGDFLL_SE_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_SE_EDC_HYSTERESIS_STAT 0x48e9
+#define regGDFLL_SE_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfs_grtavfs_dec
+// base address: 0x3ac00
+#define regGRTAVFS_RTAVFS_REG_ADDR 0x4b00
+#define regGRTAVFS_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_WR_DATA 0x4b01
+#define regGRTAVFS_RTAVFS_WR_DATA_BASE_IDX 1
+#define regGRTAVFS_GENERAL_0 0x4b02
+#define regGRTAVFS_GENERAL_0_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_RD_DATA 0x4b03
+#define regGRTAVFS_RTAVFS_RD_DATA_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_REG_CTRL 0x4b04
+#define regGRTAVFS_RTAVFS_REG_CTRL_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_REG_STATUS 0x4b05
+#define regGRTAVFS_RTAVFS_REG_STATUS_BASE_IDX 1
+#define regGRTAVFS_TARG_FREQ 0x4b06
+#define regGRTAVFS_TARG_FREQ_BASE_IDX 1
+#define regGRTAVFS_TARG_VOLT 0x4b07
+#define regGRTAVFS_TARG_VOLT_BASE_IDX 1
+#define regGRTAVFS_SOFT_RESET 0x4b0c
+#define regGRTAVFS_SOFT_RESET_BASE_IDX 1
+#define regGRTAVFS_PSM_CNTL 0x4b0d
+#define regGRTAVFS_PSM_CNTL_BASE_IDX 1
+#define regGRTAVFS_CLK_CNTL 0x4b0e
+#define regGRTAVFS_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfs_se_grtavfs_dec
+// base address: 0x3ad00
+#define regGRTAVFS_SE_RTAVFS_REG_ADDR 0x4b40
+#define regGRTAVFS_SE_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_WR_DATA 0x4b41
+#define regGRTAVFS_SE_RTAVFS_WR_DATA_BASE_IDX 1
+#define regGRTAVFS_SE_GENERAL_0 0x4b42
+#define regGRTAVFS_SE_GENERAL_0_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_RD_DATA 0x4b43
+#define regGRTAVFS_SE_RTAVFS_RD_DATA_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_REG_CTRL 0x4b44
+#define regGRTAVFS_SE_RTAVFS_REG_CTRL_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_REG_STATUS 0x4b45
+#define regGRTAVFS_SE_RTAVFS_REG_STATUS_BASE_IDX 1
+#define regGRTAVFS_SE_TARG_FREQ 0x4b46
+#define regGRTAVFS_SE_TARG_FREQ_BASE_IDX 1
+#define regGRTAVFS_SE_TARG_VOLT 0x4b47
+#define regGRTAVFS_SE_TARG_VOLT_BASE_IDX 1
+#define regGRTAVFS_SE_SOFT_RESET 0x4b4c
+#define regGRTAVFS_SE_SOFT_RESET_BASE_IDX 1
+#define regGRTAVFS_SE_PSM_CNTL 0x4b4d
+#define regGRTAVFS_SE_PSM_CNTL_BASE_IDX 1
+#define regGRTAVFS_SE_CLK_CNTL 0x4b4e
+#define regGRTAVFS_SE_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfsdec
+// base address: 0x3ac00
+#define regRTAVFS_RTAVFS_REG_ADDR 0x4b00
+#define regRTAVFS_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regRTAVFS_RTAVFS_WR_DATA 0x4b01
+#define regRTAVFS_RTAVFS_WR_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_hypdec
+// base address: 0x3e000
+#define regGFX_PIPE_PRIORITY 0x587f
+#define regGFX_PIPE_PRIORITY_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_ENABLE 0x5b00
+#define regRLC_GPU_IOV_VF_ENABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG6 0x5b06
+#define regRLC_GPU_IOV_CFG_REG6_BASE_IDX 1
+#define regRLC_SDMA0_STATUS 0x5b18
+#define regRLC_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_SDMA1_STATUS 0x5b19
+#define regRLC_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_SDMA2_STATUS 0x5b1a
+#define regRLC_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_SDMA3_STATUS 0x5b1b
+#define regRLC_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_SDMA0_BUSY_STATUS 0x5b1c
+#define regRLC_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA1_BUSY_STATUS 0x5b1d
+#define regRLC_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA2_BUSY_STATUS 0x5b1e
+#define regRLC_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA3_BUSY_STATUS 0x5b1f
+#define regRLC_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG8 0x5b20
+#define regRLC_GPU_IOV_CFG_REG8_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_0 0x5b25
+#define regRLC_RLCV_TIMER_INT_0_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_1 0x5b26
+#define regRLC_RLCV_TIMER_INT_1_BASE_IDX 1
+#define regRLC_RLCV_TIMER_CTRL 0x5b27
+#define regRLC_RLCV_TIMER_CTRL_BASE_IDX 1
+#define regRLC_RLCV_TIMER_STAT 0x5b28
+#define regRLC_RLCV_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS 0x5b2a
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET 0x5b2b
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR 0x5b2c
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_MASK 0x5b2d
+#define regRLC_GPU_IOV_VF_MASK_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_0 0x5b2e
+#define regRLC_HYP_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_1 0x5b2f
+#define regRLC_HYP_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_BUSY_CLK_CNTL 0x5b30
+#define regRLC_BUSY_CLK_CNTL_BASE_IDX 1
+#define regRLC_CLK_CNTL 0x5b31
+#define regRLC_CLK_CNTL_BASE_IDX 1
+#define regRLC_PACE_TIMER_STAT 0x5b33
+#define regRLC_PACE_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_BLOCK 0x5b34
+#define regRLC_GPU_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG1 0x5b35
+#define regRLC_GPU_IOV_CFG_REG1_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG2 0x5b36
+#define regRLC_GPU_IOV_CFG_REG2_BASE_IDX 1
+#define regRLC_GPU_IOV_VM_BUSY_STATUS 0x5b37
+#define regRLC_GPU_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_0 0x5b38
+#define regRLC_GPU_IOV_SCH_0_BASE_IDX 1
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID 0x5b39
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_3 0x5b3a
+#define regRLC_GPU_IOV_SCH_3_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_1 0x5b3b
+#define regRLC_GPU_IOV_SCH_1_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_2 0x5b3c
+#define regRLC_GPU_IOV_SCH_2_BASE_IDX 1
+#define regRLC_PACE_INT_FORCE 0x5b3d
+#define regRLC_PACE_INT_FORCE_BASE_IDX 1
+#define regRLC_PACE_INT_CLEAR 0x5b3e
+#define regRLC_PACE_INT_CLEAR_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_STAT 0x5b3f
+#define regRLC_GPU_IOV_INT_STAT_BASE_IDX 1
+#define regRLC_IH_COOKIE 0x5b41
+#define regRLC_IH_COOKIE_BASE_IDX 1
+#define regRLC_IH_COOKIE_CNTL 0x5b42
+#define regRLC_IH_COOKIE_CNTL_BASE_IDX 1
+#define regRLC_HYP_RLCG_UCODE_CHKSUM 0x5b43
+#define regRLC_HYP_RLCG_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_HYP_RLCP_UCODE_CHKSUM 0x5b44
+#define regRLC_HYP_RLCP_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_HYP_RLCV_UCODE_CHKSUM 0x5b45
+#define regRLC_HYP_RLCV_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_CNTL 0x5b46
+#define regRLC_GPU_IOV_F32_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_RESET 0x5b47
+#define regRLC_GPU_IOV_F32_RESET_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_ADDR 0x5b48
+#define regRLC_GPU_IOV_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_DATA 0x5b49
+#define regRLC_GPU_IOV_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_SMU_RESPONSE 0x5b4a
+#define regRLC_GPU_IOV_SMU_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_INVALIDATE_CACHE 0x5b4b
+#define regRLC_GPU_IOV_F32_INVALIDATE_CACHE_BASE_IDX 1
+#define regRLC_GPU_IOV_VIRT_RESET_REQ 0x5b4c
+#define regRLC_GPU_IOV_VIRT_RESET_REQ_BASE_IDX 1
+#define regRLC_GPU_IOV_RLC_RESPONSE 0x5b4d
+#define regRLC_GPU_IOV_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_DISABLE 0x5b4e
+#define regRLC_GPU_IOV_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_FORCE 0x5b4f
+#define regRLC_GPU_IOV_INT_FORCE_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_ADDR 0x5b50
+#define regRLC_GPU_IOV_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_DATA 0x5b51
+#define regRLC_GPU_IOV_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_2 0x5b52
+#define regRLC_HYP_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_3 0x5b53
+#define regRLC_HYP_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_LX6_SCRATCH_ADDR 0x5b59
+#define regRLC_LX6_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_LX6_CORE1_SCRATCH_ADDR 0x5b5b
+#define regRLC_LX6_CORE1_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_ADDR 0x5b60
+#define regRLC_GPM_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_DATA 0x5b61
+#define regRLC_GPM_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPM_IRAM_ADDR 0x5b62
+#define regRLC_GPM_IRAM_ADDR_BASE_IDX 1
+#define regRLC_GPM_IRAM_DATA 0x5b63
+#define regRLC_GPM_IRAM_DATA_BASE_IDX 1
+#define regRLC_RLCP_IRAM_ADDR 0x5b64
+#define regRLC_RLCP_IRAM_ADDR_BASE_IDX 1
+#define regRLC_RLCP_IRAM_DATA 0x5b65
+#define regRLC_RLCP_IRAM_DATA_BASE_IDX 1
+#define regRLC_RLCV_IRAM_ADDR 0x5b66
+#define regRLC_RLCV_IRAM_ADDR_BASE_IDX 1
+#define regRLC_RLCV_IRAM_DATA 0x5b67
+#define regRLC_RLCV_IRAM_DATA_BASE_IDX 1
+#define regRLC_LX6_DRAM_ADDR 0x5b68
+#define regRLC_LX6_DRAM_ADDR_BASE_IDX 1
+#define regRLC_LX6_DRAM_DATA 0x5b69
+#define regRLC_LX6_DRAM_DATA_BASE_IDX 1
+#define regRLC_LX6_IRAM_ADDR 0x5b6a
+#define regRLC_LX6_IRAM_ADDR_BASE_IDX 1
+#define regRLC_LX6_IRAM_DATA 0x5b6b
+#define regRLC_LX6_IRAM_DATA_BASE_IDX 1
+#define regRLC_PACE_UCODE_ADDR 0x5b6c
+#define regRLC_PACE_UCODE_ADDR_BASE_IDX 1
+#define regRLC_PACE_UCODE_DATA 0x5b6d
+#define regRLC_PACE_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_ADDR 0x5b6e
+#define regRLC_GPM_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_DATA 0x5b6f
+#define regRLC_GPM_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_SRM_DRAM_ADDR 0x5b71
+#define regRLC_SRM_DRAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_DRAM_DATA 0x5b72
+#define regRLC_SRM_DRAM_DATA_BASE_IDX 1
+#define regRLC_SRM_ARAM_ADDR 0x5b73
+#define regRLC_SRM_ARAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_ARAM_DATA 0x5b74
+#define regRLC_SRM_ARAM_DATA_BASE_IDX 1
+#define regRLC_PACE_SCRATCH_ADDR 0x5b77
+#define regRLC_PACE_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_PACE_SCRATCH_DATA 0x5b78
+#define regRLC_PACE_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_GTS_OFFSET_LSB 0x5b79
+#define regRLC_GTS_OFFSET_LSB_BASE_IDX 1
+#define regRLC_GTS_OFFSET_MSB 0x5b7a
+#define regRLC_GTS_OFFSET_MSB_BASE_IDX 1
+#define regGL2_PIPE_STEER_0 0x5b80
+#define regGL2_PIPE_STEER_0_BASE_IDX 1
+#define regGL2_PIPE_STEER_1 0x5b81
+#define regGL2_PIPE_STEER_1_BASE_IDX 1
+#define regGL2_PIPE_STEER_2 0x5b82
+#define regGL2_PIPE_STEER_2_BASE_IDX 1
+#define regGL2_PIPE_STEER_3 0x5b83
+#define regGL2_PIPE_STEER_3_BASE_IDX 1
+#define regGL1_PIPE_STEER 0x5b84
+#define regGL1_PIPE_STEER_BASE_IDX 1
+#define regCH_PIPE_STEER 0x5b88
+#define regCH_PIPE_STEER_BASE_IDX 1
+#define regGC_USER_SHADER_ARRAY_CONFIG 0x5b90
+#define regGC_USER_SHADER_ARRAY_CONFIG_BASE_IDX 1
+#define regGC_USER_PRIM_CONFIG 0x5b91
+#define regGC_USER_PRIM_CONFIG_BASE_IDX 1
+#define regGC_USER_SA_UNIT_DISABLE 0x5b92
+#define regGC_USER_SA_UNIT_DISABLE_BASE_IDX 1
+#define regGC_USER_RB_REDUNDANCY 0x5b93
+#define regGC_USER_RB_REDUNDANCY_BASE_IDX 1
+#define regGC_USER_RB_BACKEND_DISABLE 0x5b94
+#define regGC_USER_RB_BACKEND_DISABLE_BASE_IDX 1
+#define regGC_USER_RMI_REDUNDANCY 0x5b95
+#define regGC_USER_RMI_REDUNDANCY_BASE_IDX 1
+#define regCGTS_USER_TCC_DISABLE 0x5b96
+#define regCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define regGC_USER_SHADER_RATE_CONFIG 0x5b97
+#define regGC_USER_SHADER_RATE_CONFIG_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_STATUS 0x5bc0
+#define regRLC_GPU_IOV_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_STATUS 0x5bc1
+#define regRLC_GPU_IOV_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_STATUS 0x5bc2
+#define regRLC_GPU_IOV_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_STATUS 0x5bc3
+#define regRLC_GPU_IOV_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_STATUS 0x5bc4
+#define regRLC_GPU_IOV_SDMA4_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_STATUS 0x5bc5
+#define regRLC_GPU_IOV_SDMA5_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_STATUS 0x5bc6
+#define regRLC_GPU_IOV_SDMA6_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_STATUS 0x5bc7
+#define regRLC_GPU_IOV_SDMA7_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS 0x5bc8
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS 0x5bc9
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS 0x5bca
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS 0x5bcb
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS 0x5bcc
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS 0x5bcd
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS 0x5bce
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS 0x5bcf
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_cphypdec
+// base address: 0x3e000
+#define regCP_HYP_PFP_UCODE_ADDR 0x5814
+#define regCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_PFP_UCODE_ADDR 0x5814
+#define regCP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_DATA 0x5815
+#define regCP_HYP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_PFP_UCODE_DATA 0x5815
+#define regCP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_ADDR 0x5816
+#define regCP_HYP_ME_UCODE_ADDR_BASE_IDX 1
+#define regCP_ME_RAM_RADDR 0x5816
+#define regCP_ME_RAM_RADDR_BASE_IDX 1
+#define regCP_ME_RAM_WADDR 0x5816
+#define regCP_ME_RAM_WADDR_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_DATA 0x5817
+#define regCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+#define regCP_ME_RAM_DATA 0x5817
+#define regCP_ME_RAM_DATA_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_ADDR 0x581a
+#define regCP_HYP_MEC1_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_ADDR 0x581a
+#define regCP_MEC_ME1_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_DATA 0x581b
+#define regCP_HYP_MEC1_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_DATA 0x581b
+#define regCP_MEC_ME1_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_ADDR 0x581c
+#define regCP_HYP_MEC2_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_ADDR 0x581c
+#define regCP_MEC_ME2_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_DATA 0x581d
+#define regCP_HYP_MEC2_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_DATA 0x581d
+#define regCP_MEC_ME2_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_CHKSUM 0x581e
+#define regCP_HYP_PFP_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_CHKSUM 0x5820
+#define regCP_HYP_ME_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM 0x5821
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM 0x5822
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_PFP_IC_BASE_LO 0x5840
+#define regCP_PFP_IC_BASE_LO_BASE_IDX 1
+#define regCP_PFP_IC_BASE_HI 0x5841
+#define regCP_PFP_IC_BASE_HI_BASE_IDX 1
+#define regCP_PFP_IC_BASE_CNTL 0x5842
+#define regCP_PFP_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_PFP_IC_OP_CNTL 0x5843
+#define regCP_PFP_IC_OP_CNTL_BASE_IDX 1
+#define regCP_ME_IC_BASE_LO 0x5844
+#define regCP_ME_IC_BASE_LO_BASE_IDX 1
+#define regCP_ME_IC_BASE_HI 0x5845
+#define regCP_ME_IC_BASE_HI_BASE_IDX 1
+#define regCP_ME_IC_BASE_CNTL 0x5846
+#define regCP_ME_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_ME_IC_OP_CNTL 0x5847
+#define regCP_ME_IC_OP_CNTL_BASE_IDX 1
+#define regCP_CPC_IC_BASE_LO 0x584c
+#define regCP_CPC_IC_BASE_LO_BASE_IDX 1
+#define regCP_CPC_IC_BASE_HI 0x584d
+#define regCP_CPC_IC_BASE_HI_BASE_IDX 1
+#define regCP_CPC_IC_BASE_CNTL 0x584e
+#define regCP_CPC_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_IC_BASE_LO 0x5850
+#define regCP_MES_IC_BASE_LO_BASE_IDX 1
+#define regCP_MES_MIBASE_LO 0x5850
+#define regCP_MES_MIBASE_LO_BASE_IDX 1
+#define regCP_MES_IC_BASE_HI 0x5851
+#define regCP_MES_IC_BASE_HI_BASE_IDX 1
+#define regCP_MES_MIBASE_HI 0x5851
+#define regCP_MES_MIBASE_HI_BASE_IDX 1
+#define regCP_MES_IC_BASE_CNTL 0x5852
+#define regCP_MES_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_DC_BASE_LO 0x5854
+#define regCP_MES_DC_BASE_LO_BASE_IDX 1
+#define regCP_MES_MDBASE_LO 0x5854
+#define regCP_MES_MDBASE_LO_BASE_IDX 1
+#define regCP_MES_DC_BASE_HI 0x5855
+#define regCP_MES_DC_BASE_HI_BASE_IDX 1
+#define regCP_MES_MDBASE_HI 0x5855
+#define regCP_MES_MDBASE_HI_BASE_IDX 1
+#define regCP_MES_MIBOUND_LO 0x585b
+#define regCP_MES_MIBOUND_LO_BASE_IDX 1
+#define regCP_MES_MIBOUND_HI 0x585c
+#define regCP_MES_MIBOUND_HI_BASE_IDX 1
+#define regCP_MES_MDBOUND_LO 0x585d
+#define regCP_MES_MDBOUND_LO_BASE_IDX 1
+#define regCP_MES_MDBOUND_HI 0x585e
+#define regCP_MES_MDBOUND_HI_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE0_LO 0x5863
+#define regCP_GFX_RS64_DC_BASE0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE1_LO 0x5864
+#define regCP_GFX_RS64_DC_BASE1_LO_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE0_HI 0x5865
+#define regCP_GFX_RS64_DC_BASE0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE1_HI 0x5866
+#define regCP_GFX_RS64_DC_BASE1_HI_BASE_IDX 1
+#define regCP_GFX_RS64_MIBOUND_LO 0x586c
+#define regCP_GFX_RS64_MIBOUND_LO_BASE_IDX 1
+#define regCP_GFX_RS64_MIBOUND_HI 0x586d
+#define regCP_GFX_RS64_MIBOUND_HI_BASE_IDX 1
+#define regCP_MEC_DC_BASE_LO 0x5870
+#define regCP_MEC_DC_BASE_LO_BASE_IDX 1
+#define regCP_MEC_MDBASE_LO 0x5870
+#define regCP_MEC_MDBASE_LO_BASE_IDX 1
+#define regCP_MEC_DC_BASE_HI 0x5871
+#define regCP_MEC_DC_BASE_HI_BASE_IDX 1
+#define regCP_MEC_MDBASE_HI 0x5871
+#define regCP_MEC_MDBASE_HI_BASE_IDX 1
+#define regCP_MEC_MIBOUND_LO 0x5872
+#define regCP_MEC_MIBOUND_LO_BASE_IDX 1
+#define regCP_MEC_MIBOUND_HI 0x5873
+#define regCP_MEC_MIBOUND_HI_BASE_IDX 1
+#define regCP_MEC_MDBOUND_LO 0x5874
+#define regCP_MEC_MDBOUND_LO_BASE_IDX 1
+#define regCP_MEC_MDBOUND_HI 0x5875
+#define regCP_MEC_MDBOUND_HI_BASE_IDX 1
+
+
+// addressBlock: gc_grbm_hypdec
+// base address: 0x3e800
+#define regGRBM_GFX_INDEX_SR_SELECT 0x5a00
+#define regGRBM_GFX_INDEX_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_INDEX_SR_DATA 0x5a01
+#define regGRBM_GFX_INDEX_SR_DATA_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_SELECT 0x5a02
+#define regGRBM_GFX_CNTL_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_DATA 0x5a03
+#define regGRBM_GFX_CNTL_SR_DATA_BASE_IDX 1
+#define regGC_IH_COOKIE_0_PTR 0x5a07
+#define regGC_IH_COOKIE_0_PTR_BASE_IDX 1
+#define regGRBM_SE_REMAP_CNTL 0x5a08
+#define regGRBM_SE_REMAP_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcvmsharedhvdec
+// base address: 0x3ea00
+#define regGCMC_VM_FB_SIZE_OFFSET_VF0 0x5a80
+#define regGCMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF1 0x5a81
+#define regGCMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF2 0x5a82
+#define regGCMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF3 0x5a83
+#define regGCMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF4 0x5a84
+#define regGCMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF5 0x5a85
+#define regGCMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF6 0x5a86
+#define regGCMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF7 0x5a87
+#define regGCMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF8 0x5a88
+#define regGCMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF9 0x5a89
+#define regGCMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF10 0x5a8a
+#define regGCMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF11 0x5a8b
+#define regGCMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF12 0x5a8c
+#define regGCMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF13 0x5a8d
+#define regGCMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF14 0x5a8e
+#define regGCMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF15 0x5a8f
+#define regGCMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+
+
+// addressBlock: gc_rlcdec
+// base address: 0x3b000
+#define regRLC_CNTL 0x4c00
+#define regRLC_CNTL_BASE_IDX 1
+#define regRLC_F32_UCODE_VERSION 0x4c03
+#define regRLC_F32_UCODE_VERSION_BASE_IDX 1
+#define regRLC_STAT 0x4c04
+#define regRLC_STAT_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_LSB 0x4c0c
+#define regRLC_REFCLOCK_TIMESTAMP_LSB_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_MSB 0x4c0d
+#define regRLC_REFCLOCK_TIMESTAMP_MSB_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_0 0x4c0e
+#define regRLC_GPM_TIMER_INT_0_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_1 0x4c0f
+#define regRLC_GPM_TIMER_INT_1_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_2 0x4c10
+#define regRLC_GPM_TIMER_INT_2_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_3 0x4c11
+#define regRLC_GPM_TIMER_INT_3_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_4 0x4c12
+#define regRLC_GPM_TIMER_INT_4_BASE_IDX 1
+#define regRLC_GPM_TIMER_CTRL 0x4c13
+#define regRLC_GPM_TIMER_CTRL_BASE_IDX 1
+#define regRLC_GPM_TIMER_STAT 0x4c14
+#define regRLC_GPM_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_STAT 0x4c16
+#define regRLC_GPM_LEGACY_INT_STAT_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_CLEAR 0x4c17
+#define regRLC_GPM_LEGACY_INT_CLEAR_BASE_IDX 1
+#define regRLC_INT_STAT 0x4c18
+#define regRLC_INT_STAT_BASE_IDX 1
+#define regRLC_MGCG_CTRL 0x4c1a
+#define regRLC_MGCG_CTRL_BASE_IDX 1
+#define regRLC_JUMP_TABLE_RESTORE 0x4c1e
+#define regRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
+#define regRLC_PG_DELAY_2 0x4c1f
+#define regRLC_PG_DELAY_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB 0x4c24
+#define regRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB 0x4c25
+#define regRLC_GPU_CLOCK_COUNT_MSB_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT 0x4c26
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_BASE_IDX 1
+#define regRLC_UCODE_CNTL 0x4c27
+#define regRLC_UCODE_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_RESET 0x4c28
+#define regRLC_GPM_THREAD_RESET_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T0 0x4c29
+#define regRLC_GPM_CP_DMA_COMPLETE_T0_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T1 0x4c2a
+#define regRLC_GPM_CP_DMA_COMPLETE_T1_BASE_IDX 1
+#define regRLC_GPM_THREAD_INVALIDATE_CACHE 0x4c2b
+#define regRLC_GPM_THREAD_INVALIDATE_CACHE_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_LSB 0x4c30
+#define regRLC_CLK_COUNT_GFXCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_MSB 0x4c31
+#define regRLC_CLK_COUNT_GFXCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_LSB 0x4c32
+#define regRLC_CLK_COUNT_REFCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_MSB 0x4c33
+#define regRLC_CLK_COUNT_REFCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_CTRL 0x4c34
+#define regRLC_CLK_COUNT_CTRL_BASE_IDX 1
+#define regRLC_CLK_COUNT_STAT 0x4c35
+#define regRLC_CLK_COUNT_STAT_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_CNTL 0x4c36
+#define regRLC_RLCG_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_STAT 0x4c37
+#define regRLC_RLCG_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_0_DATA_LO 0x4c38
+#define regRLC_RLCG_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_0_DATA_HI 0x4c39
+#define regRLC_RLCG_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_1_DATA_LO 0x4c3a
+#define regRLC_RLCG_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_1_DATA_HI 0x4c3b
+#define regRLC_RLCG_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_2_DATA_LO 0x4c3c
+#define regRLC_RLCG_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_2_DATA_HI 0x4c3d
+#define regRLC_RLCG_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_3_DATA_LO 0x4c3e
+#define regRLC_RLCG_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_3_DATA_HI 0x4c3f
+#define regRLC_RLCG_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32_RES_SEL 0x4c41
+#define regRLC_GPU_CLOCK_32_RES_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32 0x4c42
+#define regRLC_GPU_CLOCK_32_BASE_IDX 1
+#define regRLC_PG_CNTL 0x4c43
+#define regRLC_PG_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_PRIORITY 0x4c44
+#define regRLC_GPM_THREAD_PRIORITY_BASE_IDX 1
+#define regRLC_GPM_THREAD_ENABLE 0x4c45
+#define regRLC_GPM_THREAD_ENABLE_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_RANGE 0x4c47
+#define regRLC_RLCG_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_CGTT_MGCG_OVERRIDE 0x4c48
+#define regRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL 0x4c49
+#define regRLC_CGCG_CGLS_CTRL_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL 0x4c4a
+#define regRLC_CGCG_RAMP_CTRL_BASE_IDX 1
+#define regRLC_DYN_PG_STATUS 0x4c4b
+#define regRLC_DYN_PG_STATUS_BASE_IDX 1
+#define regRLC_DYN_PG_REQUEST 0x4c4c
+#define regRLC_DYN_PG_REQUEST_BASE_IDX 1
+#define regRLC_PG_DELAY 0x4c4d
+#define regRLC_PG_DELAY_BASE_IDX 1
+#define regRLC_WGP_STATUS 0x4c4e
+#define regRLC_WGP_STATUS_BASE_IDX 1
+#define regRLC_PG_ALWAYS_ON_WGP_MASK 0x4c53
+#define regRLC_PG_ALWAYS_ON_WGP_MASK_BASE_IDX 1
+#define regRLC_MAX_PG_WGP 0x4c54
+#define regRLC_MAX_PG_WGP_BASE_IDX 1
+#define regRLC_AUTO_PG_CTRL 0x4c55
+#define regRLC_AUTO_PG_CTRL_BASE_IDX 1
+#define regRLC_SERDES_RD_INDEX 0x4c59
+#define regRLC_SERDES_RD_INDEX_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_0 0x4c5a
+#define regRLC_SERDES_RD_DATA_0_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_1 0x4c5b
+#define regRLC_SERDES_RD_DATA_1_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_2 0x4c5c
+#define regRLC_SERDES_RD_DATA_2_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_3 0x4c5d
+#define regRLC_SERDES_RD_DATA_3_BASE_IDX 1
+#define regRLC_SERDES_MASK 0x4c5e
+#define regRLC_SERDES_MASK_BASE_IDX 1
+#define regRLC_SERDES_CTRL 0x4c5f
+#define regRLC_SERDES_CTRL_BASE_IDX 1
+#define regRLC_SERDES_DATA 0x4c60
+#define regRLC_SERDES_DATA_BASE_IDX 1
+#define regRLC_SERDES_BUSY 0x4c61
+#define regRLC_SERDES_BUSY_BASE_IDX 1
+#define regRLC_GPM_GENERAL_0 0x4c63
+#define regRLC_GPM_GENERAL_0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_1 0x4c64
+#define regRLC_GPM_GENERAL_1_BASE_IDX 1
+#define regRLC_GPM_GENERAL_2 0x4c65
+#define regRLC_GPM_GENERAL_2_BASE_IDX 1
+#define regRLC_GPM_GENERAL_3 0x4c66
+#define regRLC_GPM_GENERAL_3_BASE_IDX 1
+#define regRLC_GPM_GENERAL_4 0x4c67
+#define regRLC_GPM_GENERAL_4_BASE_IDX 1
+#define regRLC_GPM_GENERAL_5 0x4c68
+#define regRLC_GPM_GENERAL_5_BASE_IDX 1
+#define regRLC_GPM_GENERAL_6 0x4c69
+#define regRLC_GPM_GENERAL_6_BASE_IDX 1
+#define regRLC_GPM_GENERAL_7 0x4c6a
+#define regRLC_GPM_GENERAL_7_BASE_IDX 1
+#define regRLC_STATIC_PG_STATUS 0x4c6e
+#define regRLC_STATIC_PG_STATUS_BASE_IDX 1
+#define regRLC_GPM_GENERAL_16 0x4c76
+#define regRLC_GPM_GENERAL_16_BASE_IDX 1
+#define regRLC_PG_DELAY_3 0x4c78
+#define regRLC_PG_DELAY_3_BASE_IDX 1
+#define regRLC_GPR_REG1 0x4c79
+#define regRLC_GPR_REG1_BASE_IDX 1
+#define regRLC_GPR_REG2 0x4c7a
+#define regRLC_GPR_REG2_BASE_IDX 1
+#define regRLC_GPM_INT_DISABLE_TH0 0x4c7c
+#define regRLC_GPM_INT_DISABLE_TH0_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_DISABLE 0x4c7d
+#define regRLC_GPM_LEGACY_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPM_INT_FORCE_TH0 0x4c7e
+#define regRLC_GPM_INT_FORCE_TH0_BASE_IDX 1
+#define regRLC_SRM_CNTL 0x4c80
+#define regRLC_SRM_CNTL_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND_STATUS 0x4c88
+#define regRLC_SRM_GPM_COMMAND_STATUS_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_0 0x4c8b
+#define regRLC_SRM_INDEX_CNTL_ADDR_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_1 0x4c8c
+#define regRLC_SRM_INDEX_CNTL_ADDR_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_2 0x4c8d
+#define regRLC_SRM_INDEX_CNTL_ADDR_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_3 0x4c8e
+#define regRLC_SRM_INDEX_CNTL_ADDR_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_4 0x4c8f
+#define regRLC_SRM_INDEX_CNTL_ADDR_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_5 0x4c90
+#define regRLC_SRM_INDEX_CNTL_ADDR_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_6 0x4c91
+#define regRLC_SRM_INDEX_CNTL_ADDR_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_7 0x4c92
+#define regRLC_SRM_INDEX_CNTL_ADDR_7_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_0 0x4c93
+#define regRLC_SRM_INDEX_CNTL_DATA_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_1 0x4c94
+#define regRLC_SRM_INDEX_CNTL_DATA_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_2 0x4c95
+#define regRLC_SRM_INDEX_CNTL_DATA_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_3 0x4c96
+#define regRLC_SRM_INDEX_CNTL_DATA_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_4 0x4c97
+#define regRLC_SRM_INDEX_CNTL_DATA_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_5 0x4c98
+#define regRLC_SRM_INDEX_CNTL_DATA_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_6 0x4c99
+#define regRLC_SRM_INDEX_CNTL_DATA_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_7 0x4c9a
+#define regRLC_SRM_INDEX_CNTL_DATA_7_BASE_IDX 1
+#define regRLC_SRM_STAT 0x4c9b
+#define regRLC_SRM_STAT_BASE_IDX 1
+#define regRLC_GPM_GENERAL_8 0x4cad
+#define regRLC_GPM_GENERAL_8_BASE_IDX 1
+#define regRLC_GPM_GENERAL_9 0x4cae
+#define regRLC_GPM_GENERAL_9_BASE_IDX 1
+#define regRLC_GPM_GENERAL_10 0x4caf
+#define regRLC_GPM_GENERAL_10_BASE_IDX 1
+#define regRLC_GPM_GENERAL_11 0x4cb0
+#define regRLC_GPM_GENERAL_11_BASE_IDX 1
+#define regRLC_GPM_GENERAL_12 0x4cb1
+#define regRLC_GPM_GENERAL_12_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_0 0x4cb2
+#define regRLC_GPM_UTCL1_CNTL_0_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_1 0x4cb3
+#define regRLC_GPM_UTCL1_CNTL_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_2 0x4cb4
+#define regRLC_GPM_UTCL1_CNTL_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_CNTL 0x4cb5
+#define regRLC_SPM_UTCL1_CNTL_BASE_IDX 1
+#define regRLC_UTCL1_STATUS_2 0x4cb6
+#define regRLC_UTCL1_STATUS_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_1 0x4cbc
+#define regRLC_SPM_UTCL1_ERROR_1_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_2 0x4cbd
+#define regRLC_SPM_UTCL1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_1 0x4cbe
+#define regRLC_GPM_UTCL1_TH0_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_2 0x4cc0
+#define regRLC_GPM_UTCL1_TH0_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1 0x4cc1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_2 0x4cc2
+#define regRLC_GPM_UTCL1_TH1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_1 0x4cc3
+#define regRLC_GPM_UTCL1_TH2_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_2 0x4cc4
+#define regRLC_GPM_UTCL1_TH2_ERROR_2_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL_3D 0x4cc5
+#define regRLC_CGCG_CGLS_CTRL_3D_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL_3D 0x4cc6
+#define regRLC_CGCG_RAMP_CTRL_3D_BASE_IDX 1
+#define regRLC_SEMAPHORE_0 0x4cc7
+#define regRLC_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_SEMAPHORE_1 0x4cc8
+#define regRLC_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_SEMAPHORE_2 0x4cc9
+#define regRLC_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_SEMAPHORE_3 0x4cca
+#define regRLC_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_PACE_INT_STAT 0x4ccc
+#define regRLC_PACE_INT_STAT_BASE_IDX 1
+#define regRLC_UTCL1_STATUS 0x4cd4
+#define regRLC_UTCL1_STATUS_BASE_IDX 1
+#define regRLC_R2I_CNTL_0 0x4cd5
+#define regRLC_R2I_CNTL_0_BASE_IDX 1
+#define regRLC_R2I_CNTL_1 0x4cd6
+#define regRLC_R2I_CNTL_1_BASE_IDX 1
+#define regRLC_R2I_CNTL_2 0x4cd7
+#define regRLC_R2I_CNTL_2_BASE_IDX 1
+#define regRLC_R2I_CNTL_3 0x4cd8
+#define regRLC_R2I_CNTL_3_BASE_IDX 1
+#define regRLC_GPM_INT_STAT_TH0 0x4cdc
+#define regRLC_GPM_INT_STAT_TH0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_13 0x4cdd
+#define regRLC_GPM_GENERAL_13_BASE_IDX 1
+#define regRLC_GPM_GENERAL_14 0x4cde
+#define regRLC_GPM_GENERAL_14_BASE_IDX 1
+#define regRLC_GPM_GENERAL_15 0x4cdf
+#define regRLC_GPM_GENERAL_15_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1 0x4cea
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_2 0x4ceb
+#define regRLC_GPU_CLOCK_COUNT_LSB_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_2 0x4cec
+#define regRLC_GPU_CLOCK_COUNT_MSB_2_BASE_IDX 1
+#define regRLC_PACE_INT_DISABLE 0x4ced
+#define regRLC_PACE_INT_DISABLE_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2 0x4cef
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_RANGE 0x4cf0
+#define regRLC_RLCV_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_CNTL 0x4cf1
+#define regRLC_RLCV_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_STAT 0x4cf2
+#define regRLC_RLCV_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_0_DATA_LO 0x4cf3
+#define regRLC_RLCV_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_0_DATA_HI 0x4cf4
+#define regRLC_RLCV_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_1_DATA_LO 0x4cf5
+#define regRLC_RLCV_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_1_DATA_HI 0x4cf6
+#define regRLC_RLCV_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_2_DATA_LO 0x4cf7
+#define regRLC_RLCV_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_2_DATA_HI 0x4cf8
+#define regRLC_RLCV_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_3_DATA_LO 0x4cf9
+#define regRLC_RLCV_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_3_DATA_HI 0x4cfa
+#define regRLC_RLCV_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_1 0x4cfb
+#define regRLC_GPU_CLOCK_COUNT_LSB_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_1 0x4cfc
+#define regRLC_GPU_CLOCK_COUNT_MSB_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT 0x4d00
+#define regRLC_RLCV_SPARE_INT_BASE_IDX 1
+#define regRLC_FIREWALL_VIOLATION 0x4d02
+#define regRLC_FIREWALL_VIOLATION_BASE_IDX 1
+#define regRLC_PACE_TIMER_INT_0 0x4d04
+#define regRLC_PACE_TIMER_INT_0_BASE_IDX 1
+#define regRLC_PACE_TIMER_INT_1 0x4d05
+#define regRLC_PACE_TIMER_INT_1_BASE_IDX 1
+#define regRLC_PACE_TIMER_CTRL 0x4d06
+#define regRLC_PACE_TIMER_CTRL_BASE_IDX 1
+#define regRLC_SMU_CLK_REQ 0x4d08
+#define regRLC_SMU_CLK_REQ_BASE_IDX 1
+#define regRLC_CP_STAT_INVAL_STAT 0x4d09
+#define regRLC_CP_STAT_INVAL_STAT_BASE_IDX 1
+#define regRLC_CP_STAT_INVAL_CTRL 0x4d0a
+#define regRLC_CP_STAT_INVAL_CTRL_BASE_IDX 1
+#define regRLC_SPARE 0x4d0b
+#define regRLC_SPARE_BASE_IDX 1
+#define regRLC_SPP_CTRL 0x4d0c
+#define regRLC_SPP_CTRL_BASE_IDX 1
+#define regRLC_SPP_SHADER_PROFILE_EN 0x4d0d
+#define regRLC_SPP_SHADER_PROFILE_EN_BASE_IDX 1
+#define regRLC_SPP_SSF_CAPTURE_EN 0x4d0e
+#define regRLC_SPP_SSF_CAPTURE_EN_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_0 0x4d0f
+#define regRLC_SPP_SSF_THRESHOLD_0_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_1 0x4d10
+#define regRLC_SPP_SSF_THRESHOLD_1_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_2 0x4d11
+#define regRLC_SPP_SSF_THRESHOLD_2_BASE_IDX 1
+#define regRLC_SPP_INFLIGHT_RD_ADDR 0x4d12
+#define regRLC_SPP_INFLIGHT_RD_ADDR_BASE_IDX 1
+#define regRLC_SPP_INFLIGHT_RD_DATA 0x4d13
+#define regRLC_SPP_INFLIGHT_RD_DATA_BASE_IDX 1
+#define regRLC_SPP_PROF_INFO_1 0x4d18
+#define regRLC_SPP_PROF_INFO_1_BASE_IDX 1
+#define regRLC_SPP_PROF_INFO_2 0x4d19
+#define regRLC_SPP_PROF_INFO_2_BASE_IDX 1
+#define regRLC_SPP_GLOBAL_SH_ID 0x4d1a
+#define regRLC_SPP_GLOBAL_SH_ID_BASE_IDX 1
+#define regRLC_SPP_GLOBAL_SH_ID_VALID 0x4d1b
+#define regRLC_SPP_GLOBAL_SH_ID_VALID_BASE_IDX 1
+#define regRLC_SPP_STATUS 0x4d1c
+#define regRLC_SPP_STATUS_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_0 0x4d1d
+#define regRLC_SPP_PVT_STAT_0_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_1 0x4d1e
+#define regRLC_SPP_PVT_STAT_1_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_2 0x4d1f
+#define regRLC_SPP_PVT_STAT_2_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_3 0x4d20
+#define regRLC_SPP_PVT_STAT_3_BASE_IDX 1
+#define regRLC_SPP_PVT_LEVEL_MAX 0x4d21
+#define regRLC_SPP_PVT_LEVEL_MAX_BASE_IDX 1
+#define regRLC_SPP_STALL_STATE_UPDATE 0x4d22
+#define regRLC_SPP_STALL_STATE_UPDATE_BASE_IDX 1
+#define regRLC_SPP_PBB_INFO 0x4d23
+#define regRLC_SPP_PBB_INFO_BASE_IDX 1
+#define regRLC_SPP_RESET 0x4d24
+#define regRLC_SPP_RESET_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_RANGE 0x4d26
+#define regRLC_RLCP_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_CNTL 0x4d27
+#define regRLC_RLCP_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_STAT 0x4d28
+#define regRLC_RLCP_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_0_DATA_LO 0x4d29
+#define regRLC_RLCP_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_0_DATA_HI 0x4d2a
+#define regRLC_RLCP_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_1_DATA_LO 0x4d2b
+#define regRLC_RLCP_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_1_DATA_HI 0x4d2c
+#define regRLC_RLCP_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_2_DATA_LO 0x4d2d
+#define regRLC_RLCP_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_2_DATA_HI 0x4d2e
+#define regRLC_RLCP_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_3_DATA_LO 0x4d2f
+#define regRLC_RLCP_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_3_DATA_HI 0x4d30
+#define regRLC_RLCP_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_CAC_MASK_CNTL 0x4d45
+#define regRLC_CAC_MASK_CNTL_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_CNTR_CTRL 0x4d48
+#define regRLC_POWER_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_CNTR_CTRL 0x4d49
+#define regRLC_CLK_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_CNTR_CTRL 0x4d4a
+#define regRLC_DS_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_CNTR_CTRL 0x4d4b
+#define regRLC_ULV_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_CNTR_CTRL 0x4d4c
+#define regRLC_PCC_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_CNTR_CTRL 0x4d4d
+#define regRLC_GENERAL_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_EVENT_CNTR 0x4d50
+#define regRLC_POWER_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_EVENT_CNTR 0x4d51
+#define regRLC_CLK_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_EVENT_CNTR 0x4d52
+#define regRLC_DS_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_EVENT_CNTR 0x4d53
+#define regRLC_ULV_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_EVENT_CNTR 0x4d54
+#define regRLC_PCC_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_EVENT_CNTR 0x4d55
+#define regRLC_GENERAL_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_REF_CNTR 0x4d58
+#define regRLC_POWER_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_REF_CNTR 0x4d59
+#define regRLC_CLK_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_REF_CNTR 0x4d5a
+#define regRLC_DS_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_REF_CNTR 0x4d5b
+#define regRLC_ULV_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_REF_CNTR 0x4d5c
+#define regRLC_PCC_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_REF_CNTR 0x4d5d
+#define regRLC_GENERAL_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_CTRL 0x4d5e
+#define regRLC_GFX_IH_CLIENT_CTRL_BASE_IDX 1
+#define regRLC_GFX_IH_ARBITER_STAT 0x4d5f
+#define regRLC_GFX_IH_ARBITER_STAT_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SE_STAT_L 0x4d60
+#define regRLC_GFX_IH_CLIENT_SE_STAT_L_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SE_STAT_H 0x4d61
+#define regRLC_GFX_IH_CLIENT_SE_STAT_H_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SDMA_STAT 0x4d62
+#define regRLC_GFX_IH_CLIENT_SDMA_STAT_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_OTHER_STAT 0x4d63
+#define regRLC_GFX_IH_CLIENT_OTHER_STAT_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_DELAY_IND_ADDR 0x4d64
+#define regRLC_SPM_GLOBAL_DELAY_IND_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_DELAY_IND_DATA 0x4d65
+#define regRLC_SPM_GLOBAL_DELAY_IND_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_DELAY_IND_ADDR 0x4d66
+#define regRLC_SPM_SE_DELAY_IND_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_DELAY_IND_DATA 0x4d67
+#define regRLC_SPM_SE_DELAY_IND_DATA_BASE_IDX 1
+#define regRLC_LX6_CNTL 0x4d80
+#define regRLC_LX6_CNTL_BASE_IDX 1
+#define regRLC_XT_CORE_STATUS 0x4dd4
+#define regRLC_XT_CORE_STATUS_BASE_IDX 1
+#define regRLC_XT_CORE_INTERRUPT 0x4dd5
+#define regRLC_XT_CORE_INTERRUPT_BASE_IDX 1
+#define regRLC_XT_CORE_FAULT_INFO 0x4dd6
+#define regRLC_XT_CORE_FAULT_INFO_BASE_IDX 1
+#define regRLC_XT_CORE_ALT_RESET_VEC 0x4dd7
+#define regRLC_XT_CORE_ALT_RESET_VEC_BASE_IDX 1
+#define regRLC_XT_CORE_RESERVED 0x4dd8
+#define regRLC_XT_CORE_RESERVED_BASE_IDX 1
+#define regRLC_XT_INT_VEC_FORCE 0x4dd9
+#define regRLC_XT_INT_VEC_FORCE_BASE_IDX 1
+#define regRLC_XT_INT_VEC_CLEAR 0x4dda
+#define regRLC_XT_INT_VEC_CLEAR_BASE_IDX 1
+#define regRLC_XT_INT_VEC_MUX_SEL 0x4ddb
+#define regRLC_XT_INT_VEC_MUX_SEL_BASE_IDX 1
+#define regRLC_XT_INT_VEC_MUX_INT_SEL 0x4ddc
+#define regRLC_XT_INT_VEC_MUX_INT_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_SPM_LSB 0x4de4
+#define regRLC_GPU_CLOCK_COUNT_SPM_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_SPM_MSB 0x4de5
+#define regRLC_GPU_CLOCK_COUNT_SPM_MSB_BASE_IDX 1
+#define regRLC_SPM_THREAD_TRACE_CTRL 0x4de6
+#define regRLC_SPM_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regRLC_SPP_CAM_ADDR 0x4de8
+#define regRLC_SPP_CAM_ADDR_BASE_IDX 1
+#define regRLC_SPP_CAM_DATA 0x4de9
+#define regRLC_SPP_CAM_DATA_BASE_IDX 1
+#define regRLC_SPP_CAM_EXT_ADDR 0x4dea
+#define regRLC_SPP_CAM_EXT_ADDR_BASE_IDX 1
+#define regRLC_SPP_CAM_EXT_DATA 0x4deb
+#define regRLC_SPP_CAM_EXT_DATA_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_CTRL 0x4df1
+#define regRLC_CPAXI_DOORBELL_MON_CTRL_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_STAT 0x4df2
+#define regRLC_CPAXI_DOORBELL_MON_STAT_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_DATA_LSB 0x4df3
+#define regRLC_CPAXI_DOORBELL_MON_DATA_LSB_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_DATA_MSB 0x4df4
+#define regRLC_CPAXI_DOORBELL_MON_DATA_MSB_BASE_IDX 1
+#define regRLC_XT_DOORBELL_RANGE 0x4df5
+#define regRLC_XT_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_XT_DOORBELL_CNTL 0x4df6
+#define regRLC_XT_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_XT_DOORBELL_STAT 0x4df7
+#define regRLC_XT_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_XT_DOORBELL_0_DATA_LO 0x4df8
+#define regRLC_XT_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_0_DATA_HI 0x4df9
+#define regRLC_XT_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_1_DATA_LO 0x4dfa
+#define regRLC_XT_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_1_DATA_HI 0x4dfb
+#define regRLC_XT_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_2_DATA_LO 0x4dfc
+#define regRLC_XT_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_2_DATA_HI 0x4dfd
+#define regRLC_XT_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_3_DATA_LO 0x4dfe
+#define regRLC_XT_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_3_DATA_HI 0x4dff
+#define regRLC_XT_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_MEM_SLP_CNTL 0x4e00
+#define regRLC_MEM_SLP_CNTL_BASE_IDX 1
+#define regSMU_RLC_RESPONSE 0x4e01
+#define regSMU_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_RLCV_SAFE_MODE 0x4e02
+#define regRLC_RLCV_SAFE_MODE_BASE_IDX 1
+#define regRLC_SMU_SAFE_MODE 0x4e03
+#define regRLC_SMU_SAFE_MODE_BASE_IDX 1
+#define regRLC_RLCV_COMMAND 0x4e04
+#define regRLC_RLCV_COMMAND_BASE_IDX 1
+#define regRLC_SMU_MESSAGE 0x4e05
+#define regRLC_SMU_MESSAGE_BASE_IDX 1
+#define regRLC_SMU_MESSAGE_1 0x4e06
+#define regRLC_SMU_MESSAGE_1_BASE_IDX 1
+#define regRLC_SMU_MESSAGE_2 0x4e07
+#define regRLC_SMU_MESSAGE_2_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND 0x4e08
+#define regRLC_SRM_GPM_COMMAND_BASE_IDX 1
+#define regRLC_SRM_GPM_ABORT 0x4e09
+#define regRLC_SRM_GPM_ABORT_BASE_IDX 1
+#define regRLC_SMU_COMMAND 0x4e0a
+#define regRLC_SMU_COMMAND_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_1 0x4e0b
+#define regRLC_SMU_ARGUMENT_1_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_2 0x4e0c
+#define regRLC_SMU_ARGUMENT_2_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_3 0x4e0d
+#define regRLC_SMU_ARGUMENT_3_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_4 0x4e0e
+#define regRLC_SMU_ARGUMENT_4_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_5 0x4e0f
+#define regRLC_SMU_ARGUMENT_5_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_ADDR_HI 0x4e10
+#define regRLC_IMU_BOOTLOAD_ADDR_HI_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_ADDR_LO 0x4e11
+#define regRLC_IMU_BOOTLOAD_ADDR_LO_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_SIZE 0x4e12
+#define regRLC_IMU_BOOTLOAD_SIZE_BASE_IDX 1
+#define regRLC_IMU_MISC 0x4e16
+#define regRLC_IMU_MISC_BASE_IDX 1
+#define regRLC_IMU_RESET_VECTOR 0x4e17
+#define regRLC_IMU_RESET_VECTOR_BASE_IDX 1
+
+
+// addressBlock: gc_rlcsdec
+// base address: 0x3b980
+#define regRLC_RLCS_DEC_START 0x4e60
+#define regRLC_RLCS_DEC_START_BASE_IDX 1
+#define regRLC_RLCS_DEC_DUMP_ADDR 0x4e61
+#define regRLC_RLCS_DEC_DUMP_ADDR_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_1 0x4e62
+#define regRLC_RLCS_EXCEPTION_REG_1_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_2 0x4e63
+#define regRLC_RLCS_EXCEPTION_REG_2_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_3 0x4e64
+#define regRLC_RLCS_EXCEPTION_REG_3_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_4 0x4e65
+#define regRLC_RLCS_EXCEPTION_REG_4_BASE_IDX 1
+#define regRLC_RLCS_CGCG_REQUEST 0x4e66
+#define regRLC_RLCS_CGCG_REQUEST_BASE_IDX 1
+#define regRLC_RLCS_CGCG_STATUS 0x4e67
+#define regRLC_RLCS_CGCG_STATUS_BASE_IDX 1
+#define regRLC_RLCS_SOC_DS_CNTL 0x4e68
+#define regRLC_RLCS_SOC_DS_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_DS_CNTL 0x4e69
+#define regRLC_RLCS_GFX_DS_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_DS_ALLOW_MASK_CNTL 0x4e6a
+#define regRLC_RLCS_GFX_DS_ALLOW_MASK_CNTL_BASE_IDX 1
+#define regRLC_GPM_STAT 0x4e6b
+#define regRLC_GPM_STAT_BASE_IDX 1
+#define regRLC_RLCS_GPM_STAT 0x4e6b
+#define regRLC_RLCS_GPM_STAT_BASE_IDX 1
+#define regRLC_RLCS_ABORTED_PD_SEQUENCE 0x4e6c
+#define regRLC_RLCS_ABORTED_PD_SEQUENCE_BASE_IDX 1
+#define regRLC_RLCS_DIDT_FORCE_STALL 0x4e6d
+#define regRLC_RLCS_DIDT_FORCE_STALL_BASE_IDX 1
+#define regRLC_RLCS_IOV_CMD_STATUS 0x4e6e
+#define regRLC_RLCS_IOV_CMD_STATUS_BASE_IDX 1
+#define regRLC_RLCS_IOV_CNTX_LOC_SIZE 0x4e6f
+#define regRLC_RLCS_IOV_CNTX_LOC_SIZE_BASE_IDX 1
+#define regRLC_RLCS_IOV_SCH_BLOCK 0x4e70
+#define regRLC_RLCS_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_RLCS_IOV_VM_BUSY_STATUS 0x4e71
+#define regRLC_RLCS_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_RLCS_GPM_STAT_2 0x4e72
+#define regRLC_RLCS_GPM_STAT_2_BASE_IDX 1
+#define regRLC_RLCS_GRBM_SOFT_RESET 0x4e73
+#define regRLC_RLCS_GRBM_SOFT_RESET_BASE_IDX 1
+#define regRLC_RLCS_PG_CHANGE_STATUS 0x4e74
+#define regRLC_RLCS_PG_CHANGE_STATUS_BASE_IDX 1
+#define regRLC_RLCS_PG_CHANGE_READ 0x4e75
+#define regRLC_RLCS_PG_CHANGE_READ_BASE_IDX 1
+#define regRLC_RLCS_IH_SEMAPHORE 0x4e76
+#define regRLC_RLCS_IH_SEMAPHORE_BASE_IDX 1
+#define regRLC_RLCS_IH_COOKIE_SEMAPHORE 0x4e77
+#define regRLC_RLCS_IH_COOKIE_SEMAPHORE_BASE_IDX 1
+#define regRLC_RLCS_WGP_STATUS 0x4e78
+#define regRLC_RLCS_WGP_STATUS_BASE_IDX 1
+#define regRLC_RLCS_WGP_READ 0x4e79
+#define regRLC_RLCS_WGP_READ_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_CTRL_1 0x4e7a
+#define regRLC_RLCS_CP_INT_CTRL_1_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_CTRL_2 0x4e7b
+#define regRLC_RLCS_CP_INT_CTRL_2_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_INFO_1 0x4e7c
+#define regRLC_RLCS_CP_INT_INFO_1_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_INFO_2 0x4e7d
+#define regRLC_RLCS_CP_INT_INFO_2_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_CTRL 0x4e7e
+#define regRLC_RLCS_SPM_INT_CTRL_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_INFO_1 0x4e7f
+#define regRLC_RLCS_SPM_INT_INFO_1_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_INFO_2 0x4e80
+#define regRLC_RLCS_SPM_INT_INFO_2_BASE_IDX 1
+#define regRLC_RLCS_DSM_TRIG 0x4e81
+#define regRLC_RLCS_DSM_TRIG_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_STATUS 0x4e82
+#define regRLC_RLCS_BOOTLOAD_STATUS_BASE_IDX 1
+#define regRLC_RLCS_POWER_BRAKE_CNTL 0x4e83
+#define regRLC_RLCS_POWER_BRAKE_CNTL_BASE_IDX 1
+#define regRLC_RLCS_POWER_BRAKE_CNTL_TH1 0x4e84
+#define regRLC_RLCS_POWER_BRAKE_CNTL_TH1_BASE_IDX 1
+#define regRLC_RLCS_GRBM_IDLE_BUSY_STAT 0x4e85
+#define regRLC_RLCS_GRBM_IDLE_BUSY_STAT_BASE_IDX 1
+#define regRLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL 0x4e86
+#define regRLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL_BASE_IDX 1
+#define regRLC_RLCS_CMP_IDLE_CNTL 0x4e87
+#define regRLC_RLCS_CMP_IDLE_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_0 0x4e88
+#define regRLC_RLCS_GENERAL_0_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_1 0x4e89
+#define regRLC_RLCS_GENERAL_1_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_2 0x4e8a
+#define regRLC_RLCS_GENERAL_2_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_3 0x4e8b
+#define regRLC_RLCS_GENERAL_3_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_4 0x4e8c
+#define regRLC_RLCS_GENERAL_4_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_5 0x4e8d
+#define regRLC_RLCS_GENERAL_5_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_6 0x4e8e
+#define regRLC_RLCS_GENERAL_6_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_7 0x4e8f
+#define regRLC_RLCS_GENERAL_7_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_8 0x4e90
+#define regRLC_RLCS_GENERAL_8_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_9 0x4e91
+#define regRLC_RLCS_GENERAL_9_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_10 0x4e92
+#define regRLC_RLCS_GENERAL_10_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_11 0x4e93
+#define regRLC_RLCS_GENERAL_11_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_12 0x4e94
+#define regRLC_RLCS_GENERAL_12_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_13 0x4e95
+#define regRLC_RLCS_GENERAL_13_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_14 0x4e96
+#define regRLC_RLCS_GENERAL_14_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_15 0x4e97
+#define regRLC_RLCS_GENERAL_15_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_16 0x4e98
+#define regRLC_RLCS_GENERAL_16_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_1 0x4ec5
+#define regRLC_RLCS_AUXILIARY_REG_1_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_2 0x4ec6
+#define regRLC_RLCS_AUXILIARY_REG_2_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_3 0x4ec7
+#define regRLC_RLCS_AUXILIARY_REG_3_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_4 0x4ec8
+#define regRLC_RLCS_AUXILIARY_REG_4_BASE_IDX 1
+#define regRLC_RLCS_SPM_SQTT_MODE 0x4ec9
+#define regRLC_RLCS_SPM_SQTT_MODE_BASE_IDX 1
+#define regRLC_RLCS_CP_DMA_SRCID_OVER 0x4eca
+#define regRLC_RLCS_CP_DMA_SRCID_OVER_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS1 0x4ecb
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS1_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS2 0x4ecc
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS2_BASE_IDX 1
+#define regRLC_RLCS_IMU_VIDCHG_CNTL 0x4ecd
+#define regRLC_RLCS_IMU_VIDCHG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_EDC_INT_CNTL 0x4ece
+#define regRLC_RLCS_EDC_INT_CNTL_BASE_IDX 1
+#define regRLC_RLCS_KMD_LOG_CNTL1 0x4ecf
+#define regRLC_RLCS_KMD_LOG_CNTL1_BASE_IDX 1
+#define regRLC_RLCS_KMD_LOG_CNTL2 0x4ed0
+#define regRLC_RLCS_KMD_LOG_CNTL2_BASE_IDX 1
+#define regRLC_RLCS_GPM_LEGACY_INT_STAT 0x4ed1
+#define regRLC_RLCS_GPM_LEGACY_INT_STAT_BASE_IDX 1
+#define regRLC_RLCS_GPM_LEGACY_INT_DISABLE 0x4ed2
+#define regRLC_RLCS_GPM_LEGACY_INT_DISABLE_BASE_IDX 1
+#define regRLC_RLCS_SRM_SRCID_CNTL 0x4ed3
+#define regRLC_RLCS_SRM_SRCID_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_0 0x4ed4
+#define regRLC_RLCS_GCR_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_1 0x4ed5
+#define regRLC_RLCS_GCR_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_2 0x4ed6
+#define regRLC_RLCS_GCR_DATA_2_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_3 0x4ed7
+#define regRLC_RLCS_GCR_DATA_3_BASE_IDX 1
+#define regRLC_RLCS_GCR_STATUS 0x4ed8
+#define regRLC_RLCS_GCR_STATUS_BASE_IDX 1
+#define regRLC_RLCS_PERFMON_CLK_CNTL_UCODE 0x4ed9
+#define regRLC_RLCS_PERFMON_CLK_CNTL_UCODE_BASE_IDX 1
+#define regRLC_RLCS_UTCL2_CNTL 0x4eda
+#define regRLC_RLCS_UTCL2_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA0 0x4edb
+#define regRLC_RLCS_IMU_RLC_MSG_DATA0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA1 0x4edc
+#define regRLC_RLCS_IMU_RLC_MSG_DATA1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA2 0x4edd
+#define regRLC_RLCS_IMU_RLC_MSG_DATA2_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA3 0x4ede
+#define regRLC_RLCS_IMU_RLC_MSG_DATA3_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA4 0x4edf
+#define regRLC_RLCS_IMU_RLC_MSG_DATA4_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_CONTROL 0x4ee0
+#define regRLC_RLCS_IMU_RLC_MSG_CONTROL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_CNTL 0x4ee1
+#define regRLC_RLCS_IMU_RLC_MSG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_DATA0 0x4ee2
+#define regRLC_RLCS_RLC_IMU_MSG_DATA0_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_CONTROL 0x4ee3
+#define regRLC_RLCS_RLC_IMU_MSG_CONTROL_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_CNTL 0x4ee4
+#define regRLC_RLCS_RLC_IMU_MSG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_0 0x4ee5
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_1 0x4ee6
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MUTEX_CNTL 0x4ee7
+#define regRLC_RLCS_IMU_RLC_MUTEX_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_STATUS 0x4ee8
+#define regRLC_RLCS_IMU_RLC_STATUS_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_STATUS 0x4ee9
+#define regRLC_RLCS_RLC_IMU_STATUS_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_DATA_1 0x4eea
+#define regRLC_RLCS_IMU_RAM_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_1_LSB 0x4eeb
+#define regRLC_RLCS_IMU_RAM_ADDR_1_LSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_1_MSB 0x4eec
+#define regRLC_RLCS_IMU_RAM_ADDR_1_MSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_DATA_0 0x4eed
+#define regRLC_RLCS_IMU_RAM_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_0_LSB 0x4eee
+#define regRLC_RLCS_IMU_RAM_ADDR_0_LSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_0_MSB 0x4eef
+#define regRLC_RLCS_IMU_RAM_ADDR_0_MSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_CNTL 0x4ef0
+#define regRLC_RLCS_IMU_RAM_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_GFX_DOORBELL_FENCE 0x4ef1
+#define regRLC_RLCS_IMU_GFX_DOORBELL_FENCE_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_CNTL_1 0x4ef3
+#define regRLC_RLCS_SDMA_INT_CNTL_1_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_CNTL_2 0x4ef4
+#define regRLC_RLCS_SDMA_INT_CNTL_2_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_STAT 0x4ef5
+#define regRLC_RLCS_SDMA_INT_STAT_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_INFO 0x4ef6
+#define regRLC_RLCS_SDMA_INT_INFO_BASE_IDX 1
+#define regRLC_RLCS_PMM_CGCG_CNTL 0x4ef7
+#define regRLC_RLCS_PMM_CGCG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_MEM_POWER_CTRL_LO 0x4ef8
+#define regRLC_RLCS_GFX_MEM_POWER_CTRL_LO_BASE_IDX 1
+#define regRLC_RLCS_GFX_RM_CNTL 0x4efa
+#define regRLC_RLCS_GFX_RM_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_1 0x4efb
+#define regRLC_RLCS_IH_CTRL_1_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_2 0x4efc
+#define regRLC_RLCS_IH_CTRL_2_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_3 0x4efd
+#define regRLC_RLCS_IH_CTRL_3_BASE_IDX 1
+#define regRLC_RLCS_IH_STATUS 0x4efe
+#define regRLC_RLCS_IH_STATUS_BASE_IDX 1
+#define regRLC_RLCS_DEC_END 0x4fff
+#define regRLC_RLCS_DEC_END_BASE_IDX 1
+
+
+// addressBlock: gc_pfvfdec_rlc
+// base address: 0x2a600
+#define regRLC_SAFE_MODE 0x0980
+#define regRLC_SAFE_MODE_BASE_IDX 1
+#define regRLC_SPM_SAMPLE_CNT 0x0981
+#define regRLC_SPM_SAMPLE_CNT_BASE_IDX 1
+#define regRLC_SPM_MC_CNTL 0x0982
+#define regRLC_SPM_MC_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_CNTL 0x0983
+#define regRLC_SPM_INT_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_STATUS 0x0984
+#define regRLC_SPM_INT_STATUS_BASE_IDX 1
+#define regRLC_SPM_INT_INFO_1 0x0985
+#define regRLC_SPM_INT_INFO_1_BASE_IDX 1
+#define regRLC_SPM_INT_INFO_2 0x0986
+#define regRLC_SPM_INT_INFO_2_BASE_IDX 1
+#define regRLC_CSIB_ADDR_LO 0x0987
+#define regRLC_CSIB_ADDR_LO_BASE_IDX 1
+#define regRLC_CSIB_ADDR_HI 0x0988
+#define regRLC_CSIB_ADDR_HI_BASE_IDX 1
+#define regRLC_CSIB_LENGTH 0x0989
+#define regRLC_CSIB_LENGTH_BASE_IDX 1
+#define regRLC_CP_SCHEDULERS 0x098a
+#define regRLC_CP_SCHEDULERS_BASE_IDX 1
+#define regRLC_CP_EOF_INT 0x098b
+#define regRLC_CP_EOF_INT_BASE_IDX 1
+#define regRLC_CP_EOF_INT_CNT 0x098c
+#define regRLC_CP_EOF_INT_CNT_BASE_IDX 1
+#define regRLC_SPARE_INT_0 0x098d
+#define regRLC_SPARE_INT_0_BASE_IDX 1
+#define regRLC_SPARE_INT_1 0x098e
+#define regRLC_SPARE_INT_1_BASE_IDX 1
+#define regRLC_SPARE_INT_2 0x098f
+#define regRLC_SPARE_INT_2_BASE_IDX 1
+#define regRLC_PACE_SPARE_INT 0x0990
+#define regRLC_PACE_SPARE_INT_BASE_IDX 1
+#define regRLC_PACE_SPARE_INT_1 0x0991
+#define regRLC_PACE_SPARE_INT_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT_1 0x0992
+#define regRLC_RLCV_SPARE_INT_1_BASE_IDX 1
+
+
+// addressBlock: gc_pwrdec
+// base address: 0x3c000
+#define regCGTS_TCC_DISABLE 0x5006
+#define regCGTS_TCC_DISABLE_BASE_IDX 1
+#define regCGTX_SPI_DEBUG_CLK_CTRL 0x507f
+#define regCGTX_SPI_DEBUG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_VGT_CLK_CTRL 0x5084
+#define regCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define regCGTT_IA_CLK_CTRL 0x5085
+#define regCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_WD_CLK_CTRL 0x5086
+#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define regCGTT_GS_NGG_CLK_CTRL 0x5087
+#define regCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_PA_CLK_CTRL 0x5088
+#define regCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL0 0x5089
+#define regCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL1 0x508a
+#define regCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL2 0x508b
+#define regCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_SQG_CLK_CTRL 0x508d
+#define regCGTT_SQG_CLK_CTRL_BASE_IDX 1
+#define regSQ_ALU_CLK_CTRL 0x508e
+#define regSQ_ALU_CLK_CTRL_BASE_IDX 1
+#define regSQ_TEX_CLK_CTRL 0x508f
+#define regSQ_TEX_CLK_CTRL_BASE_IDX 1
+#define regSQ_LDS_CLK_CTRL 0x5090
+#define regSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define regICG_SP_CLK_CTRL 0x5093
+#define regICG_SP_CLK_CTRL_BASE_IDX 1
+#define regTA_CGTT_CTRL 0x509d
+#define regTA_CGTT_CTRL_BASE_IDX 1
+#define regDB_CGTT_CLK_CTRL_0 0x50a4
+#define regDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define regCB_CGTT_SCLK_CTRL 0x50a8
+#define regCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regGFX_ICG_GL2A_CTRL 0x50ac
+#define regGFX_ICG_GL2A_CTRL_BASE_IDX 1
+#define regCGTT_CP_CLK_CTRL 0x50b0
+#define regCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPF_CLK_CTRL 0x50b1
+#define regCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPC_CLK_CTRL 0x50b2
+#define regCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_RLC_CLK_CTRL 0x50b5
+#define regCGTT_RLC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL3 0x50bc
+#define regCGTT_SC_CLK_CTRL3_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL4 0x50bd
+#define regCGTT_SC_CLK_CTRL4_BASE_IDX 1
+#define regGCEA_ICG_CTRL 0x50c4
+#define regGCEA_ICG_CTRL_BASE_IDX 1
+#define regGL1I_GL1R_MGCG_OVERRIDE 0x50e4
+#define regGL1I_GL1R_MGCG_OVERRIDE_BASE_IDX 1
+#define regGL1H_ICG_CTRL 0x50e8
+#define regGL1H_ICG_CTRL_BASE_IDX 1
+#define regCHI_CHR_MGCG_OVERRIDE 0x50e9
+#define regCHI_CHR_MGCG_OVERRIDE_BASE_IDX 1
+#define regICG_GL1C_CLK_CTRL 0x50ec
+#define regICG_GL1C_CLK_CTRL_BASE_IDX 1
+#define regICG_GL1A_CTRL 0x50f0
+#define regICG_GL1A_CTRL_BASE_IDX 1
+#define regICG_CHA_CTRL 0x50f1
+#define regICG_CHA_CTRL_BASE_IDX 1
+#define regGUS_ICG_CTRL 0x50f4
+#define regGUS_ICG_CTRL_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL0 0x50f8
+#define regCGTT_PH_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL1 0x50f9
+#define regCGTT_PH_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL2 0x50fa
+#define regCGTT_PH_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL3 0x50fb
+#define regCGTT_PH_CLK_CTRL3_BASE_IDX 1
+#define regGFX_ICG_GL2C_CTRL 0x50fc
+#define regGFX_ICG_GL2C_CTRL_BASE_IDX 1
+#define regGFX_ICG_GL2C_CTRL1 0x50fd
+#define regGFX_ICG_GL2C_CTRL1_BASE_IDX 1
+#define regICG_LDS_CLK_CTRL 0x5114
+#define regICG_LDS_CLK_CTRL_BASE_IDX 1
+#define regGFX_ICG_UTCL1_CTRL 0x511c
+#define regGFX_ICG_UTCL1_CTRL_BASE_IDX 1
+#define regICG_CHC_CLK_CTRL 0x5140
+#define regICG_CHC_CLK_CTRL_BASE_IDX 1
+#define regICG_CHCG_CLK_CTRL 0x5144
+#define regICG_CHCG_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_pspdec
+// base address: 0x3f000
+#define regCP_MES_DM_INDEX_ADDR 0x5c00
+#define regCP_MES_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_MES_DM_INDEX_DATA 0x5c01
+#define regCP_MES_DM_INDEX_DATA_BASE_IDX 1
+#define regCP_MEC_DM_INDEX_ADDR 0x5c02
+#define regCP_MEC_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_MEC_DM_INDEX_DATA 0x5c03
+#define regCP_MEC_DM_INDEX_DATA_BASE_IDX 1
+#define regCP_GFX_RS64_DM_INDEX_ADDR 0x5c04
+#define regCP_GFX_RS64_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_GFX_RS64_DM_INDEX_DATA 0x5c05
+#define regCP_GFX_RS64_DM_INDEX_DATA_BASE_IDX 1
+#define regCPG_PSP_DEBUG 0x5c10
+#define regCPG_PSP_DEBUG_BASE_IDX 1
+#define regCPC_PSP_DEBUG 0x5c11
+#define regCPC_PSP_DEBUG_BASE_IDX 1
+#define regGRBM_IOV_ERROR_FIFO 0x5e07
+#define regGRBM_IOV_ERROR_FIFO_BASE_IDX 1
+#define regGRBM_SEC_CNTL 0x5e0d
+#define regGRBM_SEC_CNTL_BASE_IDX 1
+#define regGRBM_CAM_INDEX 0x5e10
+#define regGRBM_CAM_INDEX_BASE_IDX 1
+#define regGRBM_HYP_CAM_INDEX 0x5e10
+#define regGRBM_HYP_CAM_INDEX_BASE_IDX 1
+#define regGRBM_CAM_DATA 0x5e11
+#define regGRBM_CAM_DATA_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA 0x5e11
+#define regGRBM_HYP_CAM_DATA_BASE_IDX 1
+#define regGRBM_CAM_DATA_UPPER 0x5e12
+#define regGRBM_CAM_DATA_UPPER_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA_UPPER 0x5e12
+#define regGRBM_HYP_CAM_DATA_UPPER_BASE_IDX 1
+#define regRLC_FWL_FIRST_VIOL_ADDR 0x5f26
+#define regRLC_FWL_FIRST_VIOL_ADDR_BASE_IDX 1
+
+
+// addressBlock: gc_gfx_imu_gfx_imudec
+// base address: 0x38000
+#define regGFX_IMU_C2PMSG_0 0x4000
+#define regGFX_IMU_C2PMSG_0_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_1 0x4001
+#define regGFX_IMU_C2PMSG_1_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_2 0x4002
+#define regGFX_IMU_C2PMSG_2_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_3 0x4003
+#define regGFX_IMU_C2PMSG_3_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_4 0x4004
+#define regGFX_IMU_C2PMSG_4_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_5 0x4005
+#define regGFX_IMU_C2PMSG_5_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_6 0x4006
+#define regGFX_IMU_C2PMSG_6_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_7 0x4007
+#define regGFX_IMU_C2PMSG_7_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_8 0x4008
+#define regGFX_IMU_C2PMSG_8_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_9 0x4009
+#define regGFX_IMU_C2PMSG_9_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_10 0x400a
+#define regGFX_IMU_C2PMSG_10_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_11 0x400b
+#define regGFX_IMU_C2PMSG_11_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_12 0x400c
+#define regGFX_IMU_C2PMSG_12_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_13 0x400d
+#define regGFX_IMU_C2PMSG_13_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_14 0x400e
+#define regGFX_IMU_C2PMSG_14_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_15 0x400f
+#define regGFX_IMU_C2PMSG_15_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_16 0x4010
+#define regGFX_IMU_C2PMSG_16_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_17 0x4011
+#define regGFX_IMU_C2PMSG_17_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_18 0x4012
+#define regGFX_IMU_C2PMSG_18_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_19 0x4013
+#define regGFX_IMU_C2PMSG_19_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_20 0x4014
+#define regGFX_IMU_C2PMSG_20_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_21 0x4015
+#define regGFX_IMU_C2PMSG_21_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_22 0x4016
+#define regGFX_IMU_C2PMSG_22_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_23 0x4017
+#define regGFX_IMU_C2PMSG_23_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_24 0x4018
+#define regGFX_IMU_C2PMSG_24_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_25 0x4019
+#define regGFX_IMU_C2PMSG_25_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_26 0x401a
+#define regGFX_IMU_C2PMSG_26_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_27 0x401b
+#define regGFX_IMU_C2PMSG_27_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_28 0x401c
+#define regGFX_IMU_C2PMSG_28_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_29 0x401d
+#define regGFX_IMU_C2PMSG_29_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_30 0x401e
+#define regGFX_IMU_C2PMSG_30_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_31 0x401f
+#define regGFX_IMU_C2PMSG_31_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_32 0x4020
+#define regGFX_IMU_C2PMSG_32_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_33 0x4021
+#define regGFX_IMU_C2PMSG_33_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_34 0x4022
+#define regGFX_IMU_C2PMSG_34_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_35 0x4023
+#define regGFX_IMU_C2PMSG_35_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_36 0x4024
+#define regGFX_IMU_C2PMSG_36_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_37 0x4025
+#define regGFX_IMU_C2PMSG_37_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_38 0x4026
+#define regGFX_IMU_C2PMSG_38_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_39 0x4027
+#define regGFX_IMU_C2PMSG_39_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_40 0x4028
+#define regGFX_IMU_C2PMSG_40_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_41 0x4029
+#define regGFX_IMU_C2PMSG_41_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_42 0x402a
+#define regGFX_IMU_C2PMSG_42_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_43 0x402b
+#define regGFX_IMU_C2PMSG_43_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_44 0x402c
+#define regGFX_IMU_C2PMSG_44_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_45 0x402d
+#define regGFX_IMU_C2PMSG_45_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_46 0x402e
+#define regGFX_IMU_C2PMSG_46_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_47 0x402f
+#define regGFX_IMU_C2PMSG_47_BASE_IDX 1
+#define regGFX_IMU_MSG_FLAGS 0x403f
+#define regGFX_IMU_MSG_FLAGS_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL0 0x4040
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL0_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL1 0x4041
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL1_BASE_IDX 1
+#define regGFX_IMU_PWRMGT_IRQ_CTRL 0x4042
+#define regGFX_IMU_PWRMGT_IRQ_CTRL_BASE_IDX 1
+#define regGFX_IMU_MP1_MUTEX 0x4043
+#define regGFX_IMU_MP1_MUTEX_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_4 0x4046
+#define regGFX_IMU_RLC_DATA_4_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_3 0x4047
+#define regGFX_IMU_RLC_DATA_3_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_2 0x4048
+#define regGFX_IMU_RLC_DATA_2_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_1 0x4049
+#define regGFX_IMU_RLC_DATA_1_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_0 0x404a
+#define regGFX_IMU_RLC_DATA_0_BASE_IDX 1
+#define regGFX_IMU_RLC_CMD 0x404b
+#define regGFX_IMU_RLC_CMD_BASE_IDX 1
+#define regGFX_IMU_RLC_MUTEX 0x404c
+#define regGFX_IMU_RLC_MUTEX_BASE_IDX 1
+#define regGFX_IMU_RLC_MSG_STATUS 0x404f
+#define regGFX_IMU_RLC_MSG_STATUS_BASE_IDX 1
+#define regRLC_GFX_IMU_DATA_0 0x4052
+#define regRLC_GFX_IMU_DATA_0_BASE_IDX 1
+#define regRLC_GFX_IMU_CMD 0x4053
+#define regRLC_GFX_IMU_CMD_BASE_IDX 1
+#define regGFX_IMU_RLC_STATUS 0x4054
+#define regGFX_IMU_RLC_STATUS_BASE_IDX 1
+#define regGFX_IMU_STATUS 0x4055
+#define regGFX_IMU_STATUS_BASE_IDX 1
+#define regGFX_IMU_SOC_DATA 0x4059
+#define regGFX_IMU_SOC_DATA_BASE_IDX 1
+#define regGFX_IMU_SOC_ADDR 0x405a
+#define regGFX_IMU_SOC_ADDR_BASE_IDX 1
+#define regGFX_IMU_SOC_REQ 0x405b
+#define regGFX_IMU_SOC_REQ_BASE_IDX 1
+#define regGFX_IMU_VF_CTRL 0x405c
+#define regGFX_IMU_VF_CTRL_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY 0x4060
+#define regGFX_IMU_TELEMETRY_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY_DATA 0x4061
+#define regGFX_IMU_TELEMETRY_DATA_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY_TEMPERATURE 0x4062
+#define regGFX_IMU_TELEMETRY_TEMPERATURE_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_0 0x4068
+#define regGFX_IMU_SCRATCH_0_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_1 0x4069
+#define regGFX_IMU_SCRATCH_1_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_2 0x406a
+#define regGFX_IMU_SCRATCH_2_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_3 0x406b
+#define regGFX_IMU_SCRATCH_3_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_4 0x406c
+#define regGFX_IMU_SCRATCH_4_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_5 0x406d
+#define regGFX_IMU_SCRATCH_5_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_6 0x406e
+#define regGFX_IMU_SCRATCH_6_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_7 0x406f
+#define regGFX_IMU_SCRATCH_7_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_8 0x4070
+#define regGFX_IMU_SCRATCH_8_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_9 0x4071
+#define regGFX_IMU_SCRATCH_9_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_10 0x4072
+#define regGFX_IMU_SCRATCH_10_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_11 0x4073
+#define regGFX_IMU_SCRATCH_11_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_12 0x4074
+#define regGFX_IMU_SCRATCH_12_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_13 0x4075
+#define regGFX_IMU_SCRATCH_13_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_14 0x4076
+#define regGFX_IMU_SCRATCH_14_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_15 0x4077
+#define regGFX_IMU_SCRATCH_15_BASE_IDX 1
+#define regGFX_IMU_FW_GTS_LO 0x4078
+#define regGFX_IMU_FW_GTS_LO_BASE_IDX 1
+#define regGFX_IMU_FW_GTS_HI 0x4079
+#define regGFX_IMU_FW_GTS_HI_BASE_IDX 1
+#define regGFX_IMU_GTS_OFFSET_LO 0x407a
+#define regGFX_IMU_GTS_OFFSET_LO_BASE_IDX 1
+#define regGFX_IMU_GTS_OFFSET_HI 0x407b
+#define regGFX_IMU_GTS_OFFSET_HI_BASE_IDX 1
+#define regGFX_IMU_RLC_GTS_OFFSET_LO 0x407c
+#define regGFX_IMU_RLC_GTS_OFFSET_LO_BASE_IDX 1
+#define regGFX_IMU_RLC_GTS_OFFSET_HI 0x407d
+#define regGFX_IMU_RLC_GTS_OFFSET_HI_BASE_IDX 1
+#define regGFX_IMU_CORE_INT_STATUS 0x407f
+#define regGFX_IMU_CORE_INT_STATUS_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_MASK 0x4080
+#define regGFX_IMU_PIC_INT_MASK_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_LVL 0x4081
+#define regGFX_IMU_PIC_INT_LVL_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_EDGE 0x4082
+#define regGFX_IMU_PIC_INT_EDGE_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_0 0x4083
+#define regGFX_IMU_PIC_INT_PRI_0_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_1 0x4084
+#define regGFX_IMU_PIC_INT_PRI_1_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_2 0x4085
+#define regGFX_IMU_PIC_INT_PRI_2_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_3 0x4086
+#define regGFX_IMU_PIC_INT_PRI_3_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_4 0x4087
+#define regGFX_IMU_PIC_INT_PRI_4_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_5 0x4088
+#define regGFX_IMU_PIC_INT_PRI_5_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_6 0x4089
+#define regGFX_IMU_PIC_INT_PRI_6_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_7 0x408a
+#define regGFX_IMU_PIC_INT_PRI_7_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_STATUS 0x408b
+#define regGFX_IMU_PIC_INT_STATUS_BASE_IDX 1
+#define regGFX_IMU_PIC_INTR 0x408c
+#define regGFX_IMU_PIC_INTR_BASE_IDX 1
+#define regGFX_IMU_PIC_INTR_ID 0x408d
+#define regGFX_IMU_PIC_INTR_ID_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_1 0x4090
+#define regGFX_IMU_IH_CTRL_1_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_2 0x4091
+#define regGFX_IMU_IH_CTRL_2_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_3 0x4092
+#define regGFX_IMU_IH_CTRL_3_BASE_IDX 1
+#define regGFX_IMU_IH_STATUS 0x4093
+#define regGFX_IMU_IH_STATUS_BASE_IDX 1
+#define regGFX_IMU_FUSESTRAP 0x4094
+#define regGFX_IMU_FUSESTRAP_BASE_IDX 1
+#define regGFX_IMU_SMUIO_VIDCHG_CTRL 0x4098
+#define regGFX_IMU_SMUIO_VIDCHG_CTRL_BASE_IDX 1
+#define regGFX_IMU_GFXCLK_BYPASS_CTRL 0x409c
+#define regGFX_IMU_GFXCLK_BYPASS_CTRL_BASE_IDX 1
+#define regGFX_IMU_CLK_CTRL 0x409d
+#define regGFX_IMU_CLK_CTRL_BASE_IDX 1
+#define regGFX_IMU_DOORBELL_CONTROL 0x409e
+#define regGFX_IMU_DOORBELL_CONTROL_BASE_IDX 1
+#define regGFX_IMU_RLC_CG_CTRL 0x40a0
+#define regGFX_IMU_RLC_CG_CTRL_BASE_IDX 1
+#define regGFX_IMU_RLC_THROTTLE_GFX 0x40a1
+#define regGFX_IMU_RLC_THROTTLE_GFX_BASE_IDX 1
+#define regGFX_IMU_RLC_RESET_VECTOR 0x40a2
+#define regGFX_IMU_RLC_RESET_VECTOR_BASE_IDX 1
+#define regGFX_IMU_RLC_OVERRIDE 0x40a3
+#define regGFX_IMU_RLC_OVERRIDE_BASE_IDX 1
+#define regGFX_IMU_DPM_CONTROL 0x40a8
+#define regGFX_IMU_DPM_CONTROL_BASE_IDX 1
+#define regGFX_IMU_DPM_ACC 0x40a9
+#define regGFX_IMU_DPM_ACC_BASE_IDX 1
+#define regGFX_IMU_DPM_REF_COUNTER 0x40aa
+#define regGFX_IMU_DPM_REF_COUNTER_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_INDEX 0x40ac
+#define regGFX_IMU_RLC_RAM_INDEX_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_ADDR_HIGH 0x40ad
+#define regGFX_IMU_RLC_RAM_ADDR_HIGH_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_ADDR_LOW 0x40ae
+#define regGFX_IMU_RLC_RAM_ADDR_LOW_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_DATA 0x40af
+#define regGFX_IMU_RLC_RAM_DATA_BASE_IDX 1
+#define regGFX_IMU_FENCE_CTRL 0x40b0
+#define regGFX_IMU_FENCE_CTRL_BASE_IDX 1
+#define regGFX_IMU_FENCE_LOG_INIT 0x40b1
+#define regGFX_IMU_FENCE_LOG_INIT_BASE_IDX 1
+#define regGFX_IMU_FENCE_LOG_ADDR 0x40b2
+#define regGFX_IMU_FENCE_LOG_ADDR_BASE_IDX 1
+#define regGFX_IMU_PROGRAM_CTR 0x40b5
+#define regGFX_IMU_PROGRAM_CTR_BASE_IDX 1
+#define regGFX_IMU_CORE_CTRL 0x40b6
+#define regGFX_IMU_CORE_CTRL_BASE_IDX 1
+#define regGFX_IMU_CORE_STATUS 0x40b7
+#define regGFX_IMU_CORE_STATUS_BASE_IDX 1
+#define regGFX_IMU_PWROKRAW 0x40b8
+#define regGFX_IMU_PWROKRAW_BASE_IDX 1
+#define regGFX_IMU_PWROK 0x40b9
+#define regGFX_IMU_PWROK_BASE_IDX 1
+#define regGFX_IMU_GAP_PWROK 0x40ba
+#define regGFX_IMU_GAP_PWROK_BASE_IDX 1
+#define regGFX_IMU_RESETn 0x40bb
+#define regGFX_IMU_RESETn_BASE_IDX 1
+#define regGFX_IMU_GFX_RESET_CTRL 0x40bc
+#define regGFX_IMU_GFX_RESET_CTRL_BASE_IDX 1
+#define regGFX_IMU_AEB_OVERRIDE 0x40bd
+#define regGFX_IMU_AEB_OVERRIDE_BASE_IDX 1
+#define regGFX_IMU_VDCI_RESET_CTRL 0x40be
+#define regGFX_IMU_VDCI_RESET_CTRL_BASE_IDX 1
+#define regGFX_IMU_GFX_ISO_CTRL 0x40bf
+#define regGFX_IMU_GFX_ISO_CTRL_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CTRL0 0x40c0
+#define regGFX_IMU_TIMER0_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CTRL1 0x40c1
+#define regGFX_IMU_TIMER0_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP_AUTOINC 0x40c2
+#define regGFX_IMU_TIMER0_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP_INTEN 0x40c3
+#define regGFX_IMU_TIMER0_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP0 0x40c4
+#define regGFX_IMU_TIMER0_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP1 0x40c5
+#define regGFX_IMU_TIMER0_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP3 0x40c7
+#define regGFX_IMU_TIMER0_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER0_VALUE 0x40c8
+#define regGFX_IMU_TIMER0_VALUE_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CTRL0 0x40c9
+#define regGFX_IMU_TIMER1_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CTRL1 0x40ca
+#define regGFX_IMU_TIMER1_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP_AUTOINC 0x40cb
+#define regGFX_IMU_TIMER1_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP_INTEN 0x40cc
+#define regGFX_IMU_TIMER1_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP0 0x40cd
+#define regGFX_IMU_TIMER1_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP1 0x40ce
+#define regGFX_IMU_TIMER1_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP3 0x40d0
+#define regGFX_IMU_TIMER1_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER1_VALUE 0x40d1
+#define regGFX_IMU_TIMER1_VALUE_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CTRL0 0x40d2
+#define regGFX_IMU_TIMER2_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CTRL1 0x40d3
+#define regGFX_IMU_TIMER2_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP_AUTOINC 0x40d4
+#define regGFX_IMU_TIMER2_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP_INTEN 0x40d5
+#define regGFX_IMU_TIMER2_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP0 0x40d6
+#define regGFX_IMU_TIMER2_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP1 0x40d7
+#define regGFX_IMU_TIMER2_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP3 0x40d9
+#define regGFX_IMU_TIMER2_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER2_VALUE 0x40da
+#define regGFX_IMU_TIMER2_VALUE_BASE_IDX 1
+#define regGFX_IMU_FUSE_CTRL 0x40e0
+#define regGFX_IMU_FUSE_CTRL_BASE_IDX 1
+#define regGFX_IMU_D_RAM_ADDR 0x40fc
+#define regGFX_IMU_D_RAM_ADDR_BASE_IDX 1
+#define regGFX_IMU_D_RAM_DATA 0x40fd
+#define regGFX_IMU_D_RAM_DATA_BASE_IDX 1
+#define regGFX_IMU_GFX_IH_GASKET_CTRL 0x40ff
+#define regGFX_IMU_GFX_IH_GASKET_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_gfx_imu_gfx_imu_pspdec
+// base address: 0x3fe00
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_HI 0x5f81
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_HI_BASE_IDX 1
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_LO 0x5f82
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_LO_BASE_IDX 1
+#define regGFX_IMU_RLC_BOOTLOADER_SIZE 0x5f83
+#define regGFX_IMU_RLC_BOOTLOADER_SIZE_BASE_IDX 1
+#define regGFX_IMU_I_RAM_ADDR 0x5f90
+#define regGFX_IMU_I_RAM_ADDR_BASE_IDX 1
+#define regGFX_IMU_I_RAM_DATA 0x5f91
+#define regGFX_IMU_I_RAM_DATA_BASE_IDX 1
+
+
+// addressBlock: gccacind
+// base address: 0x0
+#define ixGC_CAC_ID 0x0000
+#define ixGC_CAC_CNTL 0x0001
+#define ixGC_CAC_ACC_CP0 0x0010
+#define ixGC_CAC_ACC_CP1 0x0011
+#define ixGC_CAC_ACC_CP2 0x0012
+#define ixGC_CAC_ACC_EA0 0x0013
+#define ixGC_CAC_ACC_EA1 0x0014
+#define ixGC_CAC_ACC_EA2 0x0015
+#define ixGC_CAC_ACC_EA3 0x0016
+#define ixGC_CAC_ACC_EA4 0x0017
+#define ixGC_CAC_ACC_EA5 0x0018
+#define ixGC_CAC_ACC_UTCL2_ROUTER0 0x0019
+#define ixGC_CAC_ACC_UTCL2_ROUTER1 0x001a
+#define ixGC_CAC_ACC_UTCL2_ROUTER2 0x001b
+#define ixGC_CAC_ACC_UTCL2_ROUTER3 0x001c
+#define ixGC_CAC_ACC_UTCL2_ROUTER4 0x001d
+#define ixGC_CAC_ACC_UTCL2_ROUTER5 0x001e
+#define ixGC_CAC_ACC_UTCL2_ROUTER6 0x001f
+#define ixGC_CAC_ACC_UTCL2_ROUTER7 0x0020
+#define ixGC_CAC_ACC_UTCL2_ROUTER8 0x0021
+#define ixGC_CAC_ACC_UTCL2_ROUTER9 0x0022
+#define ixGC_CAC_ACC_UTCL2_VML20 0x0023
+#define ixGC_CAC_ACC_UTCL2_VML21 0x0024
+#define ixGC_CAC_ACC_UTCL2_VML22 0x0025
+#define ixGC_CAC_ACC_UTCL2_VML23 0x0026
+#define ixGC_CAC_ACC_UTCL2_VML24 0x0027
+#define ixGC_CAC_ACC_UTCL2_WALKER0 0x0028
+#define ixGC_CAC_ACC_UTCL2_WALKER1 0x0029
+#define ixGC_CAC_ACC_UTCL2_WALKER2 0x002a
+#define ixGC_CAC_ACC_UTCL2_WALKER3 0x002b
+#define ixGC_CAC_ACC_UTCL2_WALKER4 0x002c
+#define ixGC_CAC_ACC_GDS0 0x002d
+#define ixGC_CAC_ACC_GDS1 0x002e
+#define ixGC_CAC_ACC_GDS2 0x002f
+#define ixGC_CAC_ACC_GDS3 0x0030
+#define ixGC_CAC_ACC_GDS4 0x0031
+#define ixGC_CAC_ACC_GE0 0x0032
+#define ixGC_CAC_ACC_GE1 0x0033
+#define ixGC_CAC_ACC_GE2 0x0034
+#define ixGC_CAC_ACC_GE3 0x0035
+#define ixGC_CAC_ACC_GE4 0x0036
+#define ixGC_CAC_ACC_GE5 0x0037
+#define ixGC_CAC_ACC_GE6 0x0038
+#define ixGC_CAC_ACC_GE7 0x0039
+#define ixGC_CAC_ACC_GE8 0x003a
+#define ixGC_CAC_ACC_GE9 0x003b
+#define ixGC_CAC_ACC_GE10 0x003c
+#define ixGC_CAC_ACC_GE11 0x003d
+#define ixGC_CAC_ACC_GE12 0x003e
+#define ixGC_CAC_ACC_GE13 0x003f
+#define ixGC_CAC_ACC_GE14 0x0040
+#define ixGC_CAC_ACC_GE15 0x0041
+#define ixGC_CAC_ACC_GE16 0x0042
+#define ixGC_CAC_ACC_GE17 0x0043
+#define ixGC_CAC_ACC_GE18 0x0044
+#define ixGC_CAC_ACC_GE19 0x0045
+#define ixGC_CAC_ACC_GE20 0x0046
+#define ixGC_CAC_ACC_PMM0 0x0047
+#define ixGC_CAC_ACC_GL2C0 0x0048
+#define ixGC_CAC_ACC_GL2C1 0x0049
+#define ixGC_CAC_ACC_GL2C2 0x004a
+#define ixGC_CAC_ACC_GL2C3 0x004b
+#define ixGC_CAC_ACC_GL2C4 0x004c
+#define ixGC_CAC_ACC_PH0 0x004d
+#define ixGC_CAC_ACC_PH1 0x004e
+#define ixGC_CAC_ACC_PH2 0x004f
+#define ixGC_CAC_ACC_PH3 0x0050
+#define ixGC_CAC_ACC_PH4 0x0051
+#define ixGC_CAC_ACC_PH5 0x0052
+#define ixGC_CAC_ACC_PH6 0x0053
+#define ixGC_CAC_ACC_PH7 0x0054
+#define ixGC_CAC_ACC_SDMA0 0x0055
+#define ixGC_CAC_ACC_SDMA1 0x0056
+#define ixGC_CAC_ACC_SDMA2 0x0057
+#define ixGC_CAC_ACC_SDMA3 0x0058
+#define ixGC_CAC_ACC_SDMA4 0x0059
+#define ixGC_CAC_ACC_SDMA5 0x005a
+#define ixGC_CAC_ACC_SDMA6 0x005b
+#define ixGC_CAC_ACC_SDMA7 0x005c
+#define ixGC_CAC_ACC_SDMA8 0x005d
+#define ixGC_CAC_ACC_SDMA9 0x005e
+#define ixGC_CAC_ACC_SDMA10 0x005f
+#define ixGC_CAC_ACC_SDMA11 0x0060
+#define ixGC_CAC_ACC_CHC0 0x0061
+#define ixGC_CAC_ACC_CHC1 0x0062
+#define ixGC_CAC_ACC_CHC2 0x0063
+#define ixGC_CAC_ACC_GUS0 0x0064
+#define ixGC_CAC_ACC_GUS1 0x0065
+#define ixGC_CAC_ACC_GUS2 0x0066
+#define ixGC_CAC_ACC_RLC0 0x0067
+#define ixGC_CAC_ACC_UTCL2_ATCL20 0x0068
+#define ixGC_CAC_ACC_UTCL2_ATCL21 0x0069
+#define ixGC_CAC_ACC_UTCL2_ATCL22 0x006a
+#define ixGC_CAC_ACC_UTCL2_ATCL23 0x006b
+#define ixGC_CAC_ACC_UTCL2_ATCL24 0x006c
+#define ixRELEASE_TO_STALL_LUT_1_8 0x0100
+#define ixRELEASE_TO_STALL_LUT_9_16 0x0101
+#define ixRELEASE_TO_STALL_LUT_17_20 0x0102
+#define ixSTALL_TO_RELEASE_LUT_1_4 0x0103
+#define ixSTALL_TO_RELEASE_LUT_5_7 0x0104
+#define ixSTALL_TO_PWRBRK_LUT_1_4 0x0105
+#define ixSTALL_TO_PWRBRK_LUT_5_7 0x0106
+#define ixPWRBRK_STALL_TO_RELEASE_LUT_1_4 0x0107
+#define ixPWRBRK_STALL_TO_RELEASE_LUT_5_7 0x0108
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_1_8 0x0109
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_9_16 0x010a
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_17_20 0x010b
+#define ixFIXED_PATTERN_PERF_COUNTER_1 0x010c
+#define ixFIXED_PATTERN_PERF_COUNTER_2 0x010d
+#define ixFIXED_PATTERN_PERF_COUNTER_3 0x010e
+#define ixFIXED_PATTERN_PERF_COUNTER_4 0x010f
+#define ixFIXED_PATTERN_PERF_COUNTER_5 0x0110
+#define ixFIXED_PATTERN_PERF_COUNTER_6 0x0111
+#define ixFIXED_PATTERN_PERF_COUNTER_7 0x0112
+#define ixFIXED_PATTERN_PERF_COUNTER_8 0x0113
+#define ixFIXED_PATTERN_PERF_COUNTER_9 0x0114
+#define ixFIXED_PATTERN_PERF_COUNTER_10 0x0115
+#define ixHW_LUT_UPDATE_STATUS 0x0116
+
+
+// addressBlock: secacind
+// base address: 0x0
+#define ixSE_CAC_ID 0x0000
+#define ixSE_CAC_CNTL 0x0001
+
+
+// addressBlock: grtavfsind
+// base address: 0x0
+#define ixRTAVFS_REG0 0x0000
+#define ixRTAVFS_REG1 0x0001
+#define ixRTAVFS_REG2 0x0002
+#define ixRTAVFS_REG3 0x0003
+#define ixRTAVFS_REG4 0x0004
+#define ixRTAVFS_REG5 0x0005
+#define ixRTAVFS_REG6 0x0006
+#define ixRTAVFS_REG7 0x0007
+#define ixRTAVFS_REG8 0x0008
+#define ixRTAVFS_REG9 0x0009
+#define ixRTAVFS_REG10 0x000a
+#define ixRTAVFS_REG11 0x000b
+#define ixRTAVFS_REG12 0x000c
+#define ixRTAVFS_REG13 0x000d
+#define ixRTAVFS_REG14 0x000e
+#define ixRTAVFS_REG15 0x000f
+#define ixRTAVFS_REG16 0x0010
+#define ixRTAVFS_REG17 0x0011
+#define ixRTAVFS_REG18 0x0012
+#define ixRTAVFS_REG19 0x0013
+#define ixRTAVFS_REG20 0x0014
+#define ixRTAVFS_REG21 0x0015
+#define ixRTAVFS_REG22 0x0016
+#define ixRTAVFS_REG23 0x0017
+#define ixRTAVFS_REG24 0x0018
+#define ixRTAVFS_REG25 0x0019
+#define ixRTAVFS_REG26 0x001a
+#define ixRTAVFS_REG27 0x001b
+#define ixRTAVFS_REG28 0x001c
+#define ixRTAVFS_REG29 0x001d
+#define ixRTAVFS_REG30 0x001e
+#define ixRTAVFS_REG31 0x001f
+#define ixRTAVFS_REG32 0x0020
+#define ixRTAVFS_REG33 0x0021
+#define ixRTAVFS_REG34 0x0022
+#define ixRTAVFS_REG35 0x0023
+#define ixRTAVFS_REG36 0x0024
+#define ixRTAVFS_REG37 0x0025
+#define ixRTAVFS_REG38 0x0026
+#define ixRTAVFS_REG39 0x0027
+#define ixRTAVFS_REG40 0x0028
+#define ixRTAVFS_REG41 0x0029
+#define ixRTAVFS_REG42 0x002a
+#define ixRTAVFS_REG43 0x002b
+#define ixRTAVFS_REG44 0x002c
+#define ixRTAVFS_REG45 0x002d
+#define ixRTAVFS_REG46 0x002e
+#define ixRTAVFS_REG47 0x002f
+#define ixRTAVFS_REG48 0x0030
+#define ixRTAVFS_REG49 0x0031
+#define ixRTAVFS_REG50 0x0032
+#define ixRTAVFS_REG51 0x0033
+#define ixRTAVFS_REG52 0x0034
+#define ixRTAVFS_REG53 0x0035
+#define ixRTAVFS_REG54 0x0036
+#define ixRTAVFS_REG55 0x0037
+#define ixRTAVFS_REG56 0x0038
+#define ixRTAVFS_REG57 0x0039
+#define ixRTAVFS_REG58 0x003a
+#define ixRTAVFS_REG59 0x003b
+#define ixRTAVFS_REG60 0x003c
+#define ixRTAVFS_REG61 0x003d
+#define ixRTAVFS_REG62 0x003e
+#define ixRTAVFS_REG63 0x003f
+#define ixRTAVFS_REG64 0x0040
+#define ixRTAVFS_REG65 0x0041
+#define ixRTAVFS_REG66 0x0042
+#define ixRTAVFS_REG67 0x0043
+#define ixRTAVFS_REG68 0x0044
+#define ixRTAVFS_REG69 0x0045
+#define ixRTAVFS_REG70 0x0046
+#define ixRTAVFS_REG71 0x0047
+#define ixRTAVFS_REG72 0x0048
+#define ixRTAVFS_REG73 0x0049
+#define ixRTAVFS_REG74 0x004a
+#define ixRTAVFS_REG75 0x004b
+#define ixRTAVFS_REG76 0x004c
+#define ixRTAVFS_REG77 0x004d
+#define ixRTAVFS_REG78 0x004e
+#define ixRTAVFS_REG79 0x004f
+#define ixRTAVFS_REG80 0x0050
+#define ixRTAVFS_REG81 0x0051
+#define ixRTAVFS_REG82 0x0052
+#define ixRTAVFS_REG83 0x0053
+#define ixRTAVFS_REG84 0x0054
+#define ixRTAVFS_REG85 0x0055
+#define ixRTAVFS_REG86 0x0056
+#define ixRTAVFS_REG87 0x0057
+#define ixRTAVFS_REG88 0x0058
+#define ixRTAVFS_REG89 0x0059
+#define ixRTAVFS_REG90 0x005a
+#define ixRTAVFS_REG91 0x005b
+#define ixRTAVFS_REG92 0x005c
+#define ixRTAVFS_REG93 0x005d
+#define ixRTAVFS_REG94 0x005e
+#define ixRTAVFS_REG95 0x005f
+#define ixRTAVFS_REG96 0x0060
+#define ixRTAVFS_REG97 0x0061
+#define ixRTAVFS_REG98 0x0062
+#define ixRTAVFS_REG99 0x0063
+#define ixRTAVFS_REG100 0x0064
+#define ixRTAVFS_REG101 0x0065
+#define ixRTAVFS_REG102 0x0066
+#define ixRTAVFS_REG103 0x0067
+#define ixRTAVFS_REG104 0x0068
+#define ixRTAVFS_REG105 0x0069
+#define ixRTAVFS_REG106 0x006a
+#define ixRTAVFS_REG107 0x006b
+#define ixRTAVFS_REG108 0x006c
+#define ixRTAVFS_REG109 0x006d
+#define ixRTAVFS_REG110 0x006e
+#define ixRTAVFS_REG111 0x006f
+#define ixRTAVFS_REG112 0x0070
+#define ixRTAVFS_REG113 0x0071
+#define ixRTAVFS_REG114 0x0072
+#define ixRTAVFS_REG115 0x0073
+#define ixRTAVFS_REG116 0x0074
+#define ixRTAVFS_REG117 0x0075
+#define ixRTAVFS_REG118 0x0076
+#define ixRTAVFS_REG119 0x0077
+#define ixRTAVFS_REG120 0x0078
+#define ixRTAVFS_REG121 0x0079
+#define ixRTAVFS_REG122 0x007a
+#define ixRTAVFS_REG123 0x007b
+#define ixRTAVFS_REG124 0x007c
+#define ixRTAVFS_REG125 0x007d
+#define ixRTAVFS_REG126 0x007e
+#define ixRTAVFS_REG127 0x007f
+#define ixRTAVFS_REG128 0x0080
+#define ixRTAVFS_REG129 0x0081
+#define ixRTAVFS_REG130 0x0082
+#define ixRTAVFS_REG131 0x0083
+#define ixRTAVFS_REG132 0x0084
+#define ixRTAVFS_REG133 0x0085
+#define ixRTAVFS_REG134 0x0086
+#define ixRTAVFS_REG135 0x0087
+#define ixRTAVFS_REG136 0x0088
+#define ixRTAVFS_REG137 0x0089
+#define ixRTAVFS_REG138 0x008a
+#define ixRTAVFS_REG139 0x008b
+#define ixRTAVFS_REG140 0x008c
+#define ixRTAVFS_REG141 0x008d
+#define ixRTAVFS_REG142 0x008e
+#define ixRTAVFS_REG143 0x008f
+#define ixRTAVFS_REG144 0x0090
+#define ixRTAVFS_REG145 0x0091
+#define ixRTAVFS_REG146 0x0092
+#define ixRTAVFS_REG147 0x0093
+#define ixRTAVFS_REG148 0x0094
+#define ixRTAVFS_REG149 0x0095
+#define ixRTAVFS_REG150 0x0096
+#define ixRTAVFS_REG151 0x0097
+#define ixRTAVFS_REG152 0x0098
+#define ixRTAVFS_REG153 0x0099
+#define ixRTAVFS_REG154 0x009a
+#define ixRTAVFS_REG155 0x009b
+#define ixRTAVFS_REG156 0x009c
+#define ixRTAVFS_REG157 0x009d
+#define ixRTAVFS_REG158 0x009e
+#define ixRTAVFS_REG159 0x009f
+#define ixRTAVFS_REG160 0x00a0
+#define ixRTAVFS_REG161 0x00a1
+#define ixRTAVFS_REG162 0x00a2
+#define ixRTAVFS_REG163 0x00a3
+#define ixRTAVFS_REG164 0x00a4
+#define ixRTAVFS_REG165 0x00a5
+#define ixRTAVFS_REG166 0x00a6
+#define ixRTAVFS_REG167 0x00a7
+#define ixRTAVFS_REG168 0x00a8
+#define ixRTAVFS_REG169 0x00a9
+#define ixRTAVFS_REG170 0x00aa
+#define ixRTAVFS_REG171 0x00ab
+#define ixRTAVFS_REG172 0x00ac
+#define ixRTAVFS_REG173 0x00ad
+#define ixRTAVFS_REG174 0x00ae
+#define ixRTAVFS_REG175 0x00af
+#define ixRTAVFS_REG176 0x00b0
+#define ixRTAVFS_REG177 0x00b1
+#define ixRTAVFS_REG178 0x00b2
+#define ixRTAVFS_REG179 0x00b3
+#define ixRTAVFS_REG180 0x00b4
+#define ixRTAVFS_REG181 0x00b5
+#define ixRTAVFS_REG182 0x00b6
+#define ixRTAVFS_REG183 0x00b7
+#define ixRTAVFS_REG184 0x00b8
+#define ixRTAVFS_REG185 0x00b9
+#define ixRTAVFS_REG186 0x00ba
+#define ixRTAVFS_REG187 0x00bb
+#define ixRTAVFS_REG189 0x00bd
+#define ixRTAVFS_REG190 0x00be
+#define ixRTAVFS_REG191 0x00bf
+#define ixRTAVFS_REG192 0x00c0
+#define ixRTAVFS_REG193 0x00c1
+#define ixRTAVFS_REG194 0x00c2
+
+
+// addressBlock: sqind
+// base address: 0x0
+#define ixSQ_DEBUG_STS_LOCAL 0x0008
+#define ixSQ_DEBUG_CTRL_LOCAL 0x0009
+#define ixSQ_WAVE_ACTIVE 0x000a
+#define ixSQ_WAVE_VALID_AND_IDLE 0x000b
+#define ixSQ_WAVE_MODE 0x0101
+#define ixSQ_WAVE_STATUS 0x0102
+#define ixSQ_WAVE_TRAPSTS 0x0103
+#define ixSQ_WAVE_GPR_ALLOC 0x0105
+#define ixSQ_WAVE_LDS_ALLOC 0x0106
+#define ixSQ_WAVE_IB_STS 0x0107
+#define ixSQ_WAVE_PC_LO 0x0108
+#define ixSQ_WAVE_PC_HI 0x0109
+#define ixSQ_WAVE_IB_DBG1 0x010d
+#define ixSQ_WAVE_FLUSH_IB 0x010e
+#define ixSQ_WAVE_FLAT_SCRATCH_LO 0x0114
+#define ixSQ_WAVE_FLAT_SCRATCH_HI 0x0115
+#define ixSQ_WAVE_HW_ID1 0x0117
+#define ixSQ_WAVE_HW_ID2 0x0118
+#define ixSQ_WAVE_POPS_PACKER 0x0119
+#define ixSQ_WAVE_SCHED_MODE 0x011a
+#define ixSQ_WAVE_IB_STS2 0x011c
+#define ixSQ_WAVE_SHADER_CYCLES 0x011d
+#define ixSQ_WAVE_TTMP0 0x026c
+#define ixSQ_WAVE_TTMP1 0x026d
+#define ixSQ_WAVE_TTMP2 0x026e
+#define ixSQ_WAVE_TTMP3 0x026f
+#define ixSQ_WAVE_TTMP4 0x0270
+#define ixSQ_WAVE_TTMP5 0x0271
+#define ixSQ_WAVE_TTMP6 0x0272
+#define ixSQ_WAVE_TTMP7 0x0273
+#define ixSQ_WAVE_TTMP8 0x0274
+#define ixSQ_WAVE_TTMP9 0x0275
+#define ixSQ_WAVE_TTMP10 0x0276
+#define ixSQ_WAVE_TTMP11 0x0277
+#define ixSQ_WAVE_TTMP12 0x0278
+#define ixSQ_WAVE_TTMP13 0x0279
+#define ixSQ_WAVE_TTMP14 0x027a
+#define ixSQ_WAVE_TTMP15 0x027b
+#define ixSQ_WAVE_M0 0x027d
+#define ixSQ_WAVE_EXEC_LO 0x027e
+#define ixSQ_WAVE_EXEC_HI 0x027f
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
new file mode 100644
index 000000000000..ae3ef8a9e702
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
@@ -0,0 +1,44640 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_11_0_3_SH_MASK_HEADER
+#define _gc_11_0_3_SH_MASK_HEADER
+
+
+// addressBlock: gc_sdma0_sdma0dec
+//SDMA0_DEC_START
+#define SDMA0_DEC_START__START__SHIFT 0x0
+#define SDMA0_DEC_START__START_MASK 0xFFFFFFFFL
+//SDMA0_F32_MISC_CNTL
+#define SDMA0_F32_MISC_CNTL__F32_WAKEUP__SHIFT 0x0
+#define SDMA0_F32_MISC_CNTL__F32_WAKEUP_MASK 0x00000001L
+//SDMA0_GLOBAL_TIMESTAMP_LO
+#define SDMA0_GLOBAL_TIMESTAMP_LO__DATA__SHIFT 0x0
+#define SDMA0_GLOBAL_TIMESTAMP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_GLOBAL_TIMESTAMP_HI
+#define SDMA0_GLOBAL_TIMESTAMP_HI__DATA__SHIFT 0x0
+#define SDMA0_GLOBAL_TIMESTAMP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_POWER_CNTL
+#define SDMA0_POWER_CNTL__LS_ENABLE__SHIFT 0x8
+#define SDMA0_POWER_CNTL__LS_ENABLE_MASK 0x00000100L
+//SDMA0_CNTL
+#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA0_CNTL__PIO_DONE_ACK_ENABLE__SHIFT 0x6
+#define SDMA0_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE__SHIFT 0x8
+#define SDMA0_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x9
+#define SDMA0_CNTL__CP_MES_INT_ENABLE__SHIFT 0xa
+#define SDMA0_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA0_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA0_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA0_CNTL__CH_PERFCNT_ENABLE__SHIFT 0x10
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA0_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA0_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA0_CNTL__PIO_DONE_ACK_ENABLE_MASK 0x00000040L
+#define SDMA0_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE_MASK 0x00000100L
+#define SDMA0_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000200L
+#define SDMA0_CNTL__CP_MES_INT_ENABLE_MASK 0x00000400L
+#define SDMA0_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA0_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA0_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA0_CNTL__CH_PERFCNT_ENABLE_MASK 0x00010000L
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA0_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA0_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA0_CHICKEN_BITS__BACK_COMPAT_ENABLE__SHIFT 0x3
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x5
+#define SDMA0_CHICKEN_BITS__RD_BURST__SHIFT 0x6
+#define SDMA0_CHICKEN_BITS__WR_BURST__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE__SHIFT 0xa
+#define SDMA0_CHICKEN_BITS__WR_COMBINE_256B_ENABLE__SHIFT 0xe
+#define SDMA0_CHICKEN_BITS__RD_COMBINE_256B_ENABLE__SHIFT 0xf
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA0_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG__SHIFT 0x13
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG__SHIFT 0x15
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG__SHIFT 0x16
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x18
+#define SDMA0_CHICKEN_BITS__SW_FREEZE_ENABLE__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x1a
+#define SDMA0_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA0_CHICKEN_BITS__BACK_COMPAT_ENABLE_MASK 0x00000008L
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00000020L
+#define SDMA0_CHICKEN_BITS__RD_BURST_MASK 0x000000C0L
+#define SDMA0_CHICKEN_BITS__WR_BURST_MASK 0x00000300L
+#define SDMA0_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE_MASK 0x00003C00L
+#define SDMA0_CHICKEN_BITS__WR_COMBINE_256B_ENABLE_MASK 0x00004000L
+#define SDMA0_CHICKEN_BITS__RD_COMBINE_256B_ENABLE_MASK 0x00008000L
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA0_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG_MASK 0x00080000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG_MASK 0x00100000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG_MASK 0x00200000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG_MASK 0x00400000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG_MASK 0x00800000L
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x01000000L
+#define SDMA0_CHICKEN_BITS__SW_FREEZE_ENABLE_MASK 0x02000000L
+#define SDMA0_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL_MASK 0x04000000L
+#define SDMA0_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA0_GB_ADDR_CONFIG
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA0_GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA0_GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA0_GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA0_GB_ADDR_CONFIG_READ
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA0_RB_RPTR_FETCH
+#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA0_RB_RPTR_FETCH_HI
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA0_IB_OFFSET_FETCH
+#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PROGRAM
+#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA0_STATUS_REG
+#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA0_STATUS_REG__CGCG_FENCE__SHIFT 0xb
+#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA0_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA0_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA0_STATUS_REG__CGCG_FENCE_MASK 0x00000800L
+#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA0_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA0_STATUS1_REG
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA0_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA0_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xd
+#define SDMA0_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define SDMA0_STATUS1_REG__SEC_INTR_STATUS__SHIFT 0x11
+#define SDMA0_STATUS1_REG__WPTR_POLL_IDLE__SHIFT 0x12
+#define SDMA0_STATUS1_REG__SDMA_IDLE__SHIFT 0x13
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA0_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define SDMA0_STATUS1_REG__EX_START_MASK 0x00002000L
+#define SDMA0_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00004000L
+#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+#define SDMA0_STATUS1_REG__SEC_INTR_STATUS_MASK 0x00020000L
+#define SDMA0_STATUS1_REG__WPTR_POLL_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS1_REG__SDMA_IDLE_MASK 0x00080000L
+//SDMA0_CNTL1
+#define SDMA0_CNTL1__WPTR_POLL_FREQUENCY__SHIFT 0x2
+#define SDMA0_CNTL1__WPTR_POLL_FREQUENCY_MASK 0x0000FFFCL
+//SDMA0_HBM_PAGE_CONFIG
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA0_UCODE_CHECKSUM
+#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_FREEZE
+#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA0_PROCESS_QUANTUM0
+#define SDMA0_PROCESS_QUANTUM0__PROCESS0_QUANTUM__SHIFT 0x0
+#define SDMA0_PROCESS_QUANTUM0__PROCESS1_QUANTUM__SHIFT 0x8
+#define SDMA0_PROCESS_QUANTUM0__PROCESS2_QUANTUM__SHIFT 0x10
+#define SDMA0_PROCESS_QUANTUM0__PROCESS3_QUANTUM__SHIFT 0x18
+#define SDMA0_PROCESS_QUANTUM0__PROCESS0_QUANTUM_MASK 0x000000FFL
+#define SDMA0_PROCESS_QUANTUM0__PROCESS1_QUANTUM_MASK 0x0000FF00L
+#define SDMA0_PROCESS_QUANTUM0__PROCESS2_QUANTUM_MASK 0x00FF0000L
+#define SDMA0_PROCESS_QUANTUM0__PROCESS3_QUANTUM_MASK 0xFF000000L
+//SDMA0_PROCESS_QUANTUM1
+#define SDMA0_PROCESS_QUANTUM1__PROCESS4_QUANTUM__SHIFT 0x0
+#define SDMA0_PROCESS_QUANTUM1__PROCESS5_QUANTUM__SHIFT 0x8
+#define SDMA0_PROCESS_QUANTUM1__PROCESS6_QUANTUM__SHIFT 0x10
+#define SDMA0_PROCESS_QUANTUM1__PROCESS7_QUANTUM__SHIFT 0x18
+#define SDMA0_PROCESS_QUANTUM1__PROCESS4_QUANTUM_MASK 0x000000FFL
+#define SDMA0_PROCESS_QUANTUM1__PROCESS5_QUANTUM_MASK 0x0000FF00L
+#define SDMA0_PROCESS_QUANTUM1__PROCESS6_QUANTUM_MASK 0x00FF0000L
+#define SDMA0_PROCESS_QUANTUM1__PROCESS7_QUANTUM_MASK 0xFF000000L
+//SDMA0_WATCHDOG_CNTL
+#define SDMA0_WATCHDOG_CNTL__QUEUE_HANG_COUNT__SHIFT 0x0
+#define SDMA0_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT__SHIFT 0x8
+#define SDMA0_WATCHDOG_CNTL__QUEUE_HANG_COUNT_MASK 0x000000FFL
+#define SDMA0_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT_MASK 0x0000FF00L
+//SDMA0_QUEUE_STATUS0
+#define SDMA0_QUEUE_STATUS0__QUEUE0_STATUS__SHIFT 0x0
+#define SDMA0_QUEUE_STATUS0__QUEUE1_STATUS__SHIFT 0x4
+#define SDMA0_QUEUE_STATUS0__QUEUE2_STATUS__SHIFT 0x8
+#define SDMA0_QUEUE_STATUS0__QUEUE3_STATUS__SHIFT 0xc
+#define SDMA0_QUEUE_STATUS0__QUEUE4_STATUS__SHIFT 0x10
+#define SDMA0_QUEUE_STATUS0__QUEUE5_STATUS__SHIFT 0x14
+#define SDMA0_QUEUE_STATUS0__QUEUE6_STATUS__SHIFT 0x18
+#define SDMA0_QUEUE_STATUS0__QUEUE7_STATUS__SHIFT 0x1c
+#define SDMA0_QUEUE_STATUS0__QUEUE0_STATUS_MASK 0x0000000FL
+#define SDMA0_QUEUE_STATUS0__QUEUE1_STATUS_MASK 0x000000F0L
+#define SDMA0_QUEUE_STATUS0__QUEUE2_STATUS_MASK 0x00000F00L
+#define SDMA0_QUEUE_STATUS0__QUEUE3_STATUS_MASK 0x0000F000L
+#define SDMA0_QUEUE_STATUS0__QUEUE4_STATUS_MASK 0x000F0000L
+#define SDMA0_QUEUE_STATUS0__QUEUE5_STATUS_MASK 0x00F00000L
+#define SDMA0_QUEUE_STATUS0__QUEUE6_STATUS_MASK 0x0F000000L
+#define SDMA0_QUEUE_STATUS0__QUEUE7_STATUS_MASK 0xF0000000L
+//SDMA0_EDC_CONFIG
+#define SDMA0_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA0_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA0_BA_THRESHOLD
+#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA0_ID
+#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA0_VERSION
+#define SDMA0_VERSION__MINVER__SHIFT 0x0
+#define SDMA0_VERSION__MAJVER__SHIFT 0x8
+#define SDMA0_VERSION__REV__SHIFT 0x10
+#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA0_VERSION__REV_MASK 0x003F0000L
+//SDMA0_EDC_COUNTER
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA0_EDC_COUNTER_CLEAR
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA0_STATUS2_REG
+#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS2_REG__TH0F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS2_REG__TH0F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA0_ATOMIC_CNTL
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA0_ATOMIC_PREOP_LO
+#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_ATOMIC_PREOP_HI
+#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_CNTL
+#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x0
+#define SDMA0_UTCL1_CNTL__PAGE_WAIT_DELAY__SHIFT 0x5
+#define SDMA0_UTCL1_CNTL__RESP_MODE__SHIFT 0x9
+#define SDMA0_UTCL1_CNTL__FORCE_INVALIDATION__SHIFT 0xe
+#define SDMA0_UTCL1_CNTL__FORCE_INVREQ_HEAVY__SHIFT 0xf
+#define SDMA0_UTCL1_CNTL__WR_EXE_PERMS_CTRL__SHIFT 0x10
+#define SDMA0_UTCL1_CNTL__RD_EXE_PERMS_CTRL__SHIFT 0x11
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0x12
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x0000001FL
+#define SDMA0_UTCL1_CNTL__PAGE_WAIT_DELAY_MASK 0x000001E0L
+#define SDMA0_UTCL1_CNTL__RESP_MODE_MASK 0x00000600L
+#define SDMA0_UTCL1_CNTL__FORCE_INVALIDATION_MASK 0x00004000L
+#define SDMA0_UTCL1_CNTL__FORCE_INVREQ_HEAVY_MASK 0x00008000L
+#define SDMA0_UTCL1_CNTL__WR_EXE_PERMS_CTRL_MASK 0x00010000L
+#define SDMA0_UTCL1_CNTL__RD_EXE_PERMS_CTRL_MASK 0x00020000L
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x003C0000L
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x3F000000L
+//SDMA0_UTCL1_WATERMK
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK__SHIFT 0x0
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP__SHIFT 0x4
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK__SHIFT 0x6
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP__SHIFT 0xa
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK__SHIFT 0xc
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP__SHIFT 0x10
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK__SHIFT 0x12
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP__SHIFT 0x16
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK_MASK 0x0000000FL
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP_MASK 0x00000030L
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK_MASK 0x000003C0L
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP_MASK 0x00000C00L
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK_MASK 0x0000F000L
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP_MASK 0x00030000L
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK_MASK 0x003C0000L
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP_MASK 0x00C00000L
+//SDMA0_UTCL1_TIMEOUT
+#define SDMA0_UTCL1_TIMEOUT__XNACK_LIMIT__SHIFT 0x0
+#define SDMA0_UTCL1_TIMEOUT__XNACK_LIMIT_MASK 0x0000FFFFL
+//SDMA0_UTCL1_PAGE
+#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA0_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0xa
+#define SDMA0_UTCL1_PAGE__USE_IO__SHIFT 0xb
+#define SDMA0_UTCL1_PAGE__RD_L2_POLICY__SHIFT 0xc
+#define SDMA0_UTCL1_PAGE__WR_L2_POLICY__SHIFT 0xe
+#define SDMA0_UTCL1_PAGE__DMA_PAGE_SIZE__SHIFT 0x10
+#define SDMA0_UTCL1_PAGE__USE_BC__SHIFT 0x16
+#define SDMA0_UTCL1_PAGE__ADDR_IS_PA__SHIFT 0x17
+#define SDMA0_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0x18
+#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA0_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000003C0L
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000400L
+#define SDMA0_UTCL1_PAGE__USE_IO_MASK 0x00000800L
+#define SDMA0_UTCL1_PAGE__RD_L2_POLICY_MASK 0x00003000L
+#define SDMA0_UTCL1_PAGE__WR_L2_POLICY_MASK 0x0000C000L
+#define SDMA0_UTCL1_PAGE__DMA_PAGE_SIZE_MASK 0x003F0000L
+#define SDMA0_UTCL1_PAGE__USE_BC_MASK 0x00400000L
+#define SDMA0_UTCL1_PAGE__ADDR_IS_PA_MASK 0x00800000L
+#define SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK 0x01000000L
+//SDMA0_UTCL1_RD_STATUS
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_RD_STATUS__RESERVED0__SHIFT 0x5
+#define SDMA0_UTCL1_RD_STATUS__RESERVED1__SHIFT 0x6
+#define SDMA0_UTCL1_RD_STATUS__META_Q_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_RD_STATUS__RESERVED2__SHIFT 0xd
+#define SDMA0_UTCL1_RD_STATUS__RESERVED3__SHIFT 0xe
+#define SDMA0_UTCL1_RD_STATUS__META_Q_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_RD_STATUS__RD_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA0_UTCL1_RD_STATUS__RD_REQRET_IDLE__SHIFT 0x11
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_IDLE__SHIFT 0x12
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_TYPE__SHIFT 0x13
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REG_READY__SHIFT 0x17
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA0_UTCL1_RD_STATUS__RESERVED4__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_IN_RTR__SHIFT 0x1c
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_RD_STATUS__INV_BUSY__SHIFT 0x1e
+#define SDMA0_UTCL1_RD_STATUS__DBIT_REQ_IDLE__SHIFT 0x1f
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED0_MASK 0x00000020L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED1_MASK 0x00000040L
+#define SDMA0_UTCL1_RD_STATUS__META_Q_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED2_MASK 0x00002000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED3_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_STATUS__META_Q_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_STATUS__RD_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQRET_IDLE_MASK 0x00020000L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_IDLE_MASK 0x00040000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_TYPE_MASK 0x00180000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED4_MASK 0x04000000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_IN_RTR_MASK 0x10000000L
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_RD_STATUS__INV_BUSY_MASK 0x40000000L
+#define SDMA0_UTCL1_RD_STATUS__DBIT_REQ_IDLE_MASK 0x80000000L
+//SDMA0_UTCL1_WR_STATUS
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_WR_STATUS__RESERVED0__SHIFT 0x7
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0xf
+#define SDMA0_UTCL1_WR_STATUS__WR_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA0_UTCL1_WR_STATUS__WR_REQRET_IDLE__SHIFT 0x11
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_IDLE__SHIFT 0x12
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_TYPE__SHIFT 0x13
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REG_READY__SHIFT 0x17
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL__SHIFT 0x1a
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_RTR__SHIFT 0x1c
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR__SHIFT 0x1e
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR__SHIFT 0x1f
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_WR_STATUS__RESERVED0_MASK 0x00000080L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_STATUS__WR_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQRET_IDLE_MASK 0x00020000L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_IDLE_MASK 0x00040000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_TYPE_MASK 0x00180000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL_MASK 0x04000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_RTR_MASK 0x10000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR_MASK 0x40000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR_MASK 0x80000000L
+//SDMA0_UTCL1_INV0
+#define SDMA0_UTCL1_INV0__INV_PROC_BUSY__SHIFT 0x0
+#define SDMA0_UTCL1_INV0__GPUVM_FRAG_SIZE__SHIFT 0x1
+#define SDMA0_UTCL1_INV0__GPUVM_VMID__SHIFT 0x7
+#define SDMA0_UTCL1_INV0__GPUVM_MODE__SHIFT 0xb
+#define SDMA0_UTCL1_INV0__GPUVM_HIGH__SHIFT 0xd
+#define SDMA0_UTCL1_INV0__GPUVM_TAG__SHIFT 0xe
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_HIGH__SHIFT 0x12
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_LOW__SHIFT 0x16
+#define SDMA0_UTCL1_INV0__INV_TYPE__SHIFT 0x1a
+#define SDMA0_UTCL1_INV0__INV_PROC_BUSY_MASK 0x00000001L
+#define SDMA0_UTCL1_INV0__GPUVM_FRAG_SIZE_MASK 0x0000007EL
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_MASK 0x00000780L
+#define SDMA0_UTCL1_INV0__GPUVM_MODE_MASK 0x00001800L
+#define SDMA0_UTCL1_INV0__GPUVM_HIGH_MASK 0x00002000L
+#define SDMA0_UTCL1_INV0__GPUVM_TAG_MASK 0x0003C000L
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_HIGH_MASK 0x003C0000L
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_LOW_MASK 0x03C00000L
+#define SDMA0_UTCL1_INV0__INV_TYPE_MASK 0x0C000000L
+//SDMA0_UTCL1_INV1
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_INV2
+#define SDMA0_UTCL1_INV2__CPF_VMID__SHIFT 0x0
+#define SDMA0_UTCL1_INV2__CPF_FLUSH_TYPE__SHIFT 0x10
+#define SDMA0_UTCL1_INV2__CPF_FRAG_SIZE__SHIFT 0x11
+#define SDMA0_UTCL1_INV2__CPF_VMID_MASK 0x0000FFFFL
+#define SDMA0_UTCL1_INV2__CPF_FLUSH_TYPE_MASK 0x00010000L
+#define SDMA0_UTCL1_INV2__CPF_FRAG_SIZE_MASK 0x007E0000L
+//SDMA0_UTCL1_RD_XNACK0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK1
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA0_UTCL1_WR_XNACK0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_WR_XNACK1
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA0_RELAX_ORDERING_LUT
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS_2
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA0_CHICKEN_BITS_2__UCODE_BUF_DS_EN__SHIFT 0x6
+#define SDMA0_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP__SHIFT 0x7
+#define SDMA0_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS_2__RESERVED_14_12__SHIFT 0xc
+#define SDMA0_CHICKEN_BITS_2__RESERVED_15__SHIFT 0xf
+#define SDMA0_CHICKEN_BITS_2__RB_FIFO_WATERMARK__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS_2__IB_FIFO_WATERMARK__SHIFT 0x12
+#define SDMA0_CHICKEN_BITS_2__RESERVED_22_20__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS_2__CH_RD_WATERMARK__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB__SHIFT 0x1e
+#define SDMA0_CHICKEN_BITS_2__PIO_VFID_SOURCE__SHIFT 0x1f
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA0_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+#define SDMA0_CHICKEN_BITS_2__UCODE_BUF_DS_EN_MASK 0x00000040L
+#define SDMA0_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP_MASK 0x00000080L
+#define SDMA0_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING_MASK 0x00000F00L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_14_12_MASK 0x00007000L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_15_MASK 0x00008000L
+#define SDMA0_CHICKEN_BITS_2__RB_FIFO_WATERMARK_MASK 0x00030000L
+#define SDMA0_CHICKEN_BITS_2__IB_FIFO_WATERMARK_MASK 0x000C0000L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_22_20_MASK 0x00700000L
+#define SDMA0_CHICKEN_BITS_2__CH_RD_WATERMARK_MASK 0x01800000L
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_MASK 0x3E000000L
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB_MASK 0x40000000L
+#define SDMA0_CHICKEN_BITS_2__PIO_VFID_SOURCE_MASK 0x80000000L
+//SDMA0_STATUS3_REG
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA0_STATUS3_REG__AQL_PREV_CMD_IDLE__SHIFT 0x15
+#define SDMA0_STATUS3_REG__TLBI_IDLE__SHIFT 0x16
+#define SDMA0_STATUS3_REG__GCR_IDLE__SHIFT 0x17
+#define SDMA0_STATUS3_REG__INVREQ_IDLE__SHIFT 0x18
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x19
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x1a
+#define SDMA0_STATUS3_REG__TMZ_MTYPE_STATUS__SHIFT 0x1e
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA0_STATUS3_REG__AQL_PREV_CMD_IDLE_MASK 0x00200000L
+#define SDMA0_STATUS3_REG__TLBI_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS3_REG__GCR_IDLE_MASK 0x00800000L
+#define SDMA0_STATUS3_REG__INVREQ_IDLE_MASK 0x01000000L
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x02000000L
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x3C000000L
+#define SDMA0_STATUS3_REG__TMZ_MTYPE_STATUS_MASK 0xC0000000L
+//SDMA0_PHYSICAL_ADDR_LO
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA0_PHYSICAL_ADDR_HI
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA0_GLOBAL_QUANTUM
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM__SHIFT 0x0
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM__SHIFT 0x8
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM_MASK 0x000000FFL
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM_MASK 0x0000FF00L
+//SDMA0_ERROR_LOG
+#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA0_PUB_DUMMY_REG0
+#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG1
+#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG2
+#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG3
+#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_F32_COUNTER
+#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_CRD_CNTL
+#define SDMA0_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA0_CRD_CNTL__CH_WRREQ_CREDIT__SHIFT 0x13
+#define SDMA0_CRD_CNTL__CH_RDREQ_CREDIT__SHIFT 0x19
+#define SDMA0_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+#define SDMA0_CRD_CNTL__CH_WRREQ_CREDIT_MASK 0x01F80000L
+#define SDMA0_CRD_CNTL__CH_RDREQ_CREDIT_MASK 0x7E000000L
+//SDMA0_RLC_CGCG_CTRL
+#define SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE__SHIFT 0x1
+#define SDMA0_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS__SHIFT 0x10
+#define SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS_MASK 0xFFFF0000L
+//SDMA0_GPU_IOV_VIOLATION_LOG
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA0_AQL_STATUS
+#define SDMA0_AQL_STATUS__COMPLETE_SIGNAL_EMPTY__SHIFT 0x0
+#define SDMA0_AQL_STATUS__INVALID_CMD_EMPTY__SHIFT 0x1
+#define SDMA0_AQL_STATUS__COMPLETE_SIGNAL_EMPTY_MASK 0x00000001L
+#define SDMA0_AQL_STATUS__INVALID_CMD_EMPTY_MASK 0x00000002L
+//SDMA0_EA_DBIT_ADDR_DATA
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_EA_DBIT_ADDR_INDEX
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA0_TLBI_GCR_CNTL
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CMD_DW__SHIFT 0x0
+#define SDMA0_TLBI_GCR_CNTL__GCR_CMD_DW__SHIFT 0x4
+#define SDMA0_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE__SHIFT 0x8
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CREDIT__SHIFT 0x10
+#define SDMA0_TLBI_GCR_CNTL__GCR_CREDIT__SHIFT 0x18
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CMD_DW_MASK 0x0000000FL
+#define SDMA0_TLBI_GCR_CNTL__GCR_CMD_DW_MASK 0x000000F0L
+#define SDMA0_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE_MASK 0x00000F00L
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CREDIT_MASK 0x00FF0000L
+#define SDMA0_TLBI_GCR_CNTL__GCR_CREDIT_MASK 0xFF000000L
+//SDMA0_TILING_CONFIG
+#define SDMA0_TILING_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define SDMA0_TILING_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//SDMA0_HASH
+#define SDMA0_HASH__CHANNEL_BITS__SHIFT 0x0
+#define SDMA0_HASH__BANK_BITS__SHIFT 0x4
+#define SDMA0_HASH__CHANNEL_XOR_COUNT__SHIFT 0x8
+#define SDMA0_HASH__BANK_XOR_COUNT__SHIFT 0xc
+#define SDMA0_HASH__CHANNEL_BITS_MASK 0x00000007L
+#define SDMA0_HASH__BANK_BITS_MASK 0x00000070L
+#define SDMA0_HASH__CHANNEL_XOR_COUNT_MASK 0x00000700L
+#define SDMA0_HASH__BANK_XOR_COUNT_MASK 0x00007000L
+//SDMA0_INT_STATUS
+#define SDMA0_INT_STATUS__DATA__SHIFT 0x0
+#define SDMA0_INT_STATUS__DATA_MASK 0xFFFFFFFFL
+//SDMA0_GPU_IOV_VIOLATION_LOG2
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA0_HOLE_ADDR_LO
+#define SDMA0_HOLE_ADDR_LO__VALUE__SHIFT 0x0
+#define SDMA0_HOLE_ADDR_LO__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_HOLE_ADDR_HI
+#define SDMA0_HOLE_ADDR_HI__VALUE__SHIFT 0x0
+#define SDMA0_HOLE_ADDR_HI__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_CLOCK_GATING_STATUS
+#define SDMA0_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS__SHIFT 0x0
+#define SDMA0_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS__SHIFT 0x2
+#define SDMA0_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS__SHIFT 0x3
+#define SDMA0_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS__SHIFT 0x4
+#define SDMA0_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS__SHIFT 0x5
+#define SDMA0_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS__SHIFT 0x6
+#define SDMA0_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS_MASK 0x00000001L
+#define SDMA0_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS_MASK 0x00000004L
+#define SDMA0_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS_MASK 0x00000008L
+#define SDMA0_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS_MASK 0x00000010L
+#define SDMA0_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS_MASK 0x00000020L
+#define SDMA0_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS_MASK 0x00000040L
+//SDMA0_STATUS4_REG
+#define SDMA0_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA0_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA0_STATUS4_REG__CH_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA0_STATUS4_REG__CH_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA0_STATUS4_REG__GCR_OUTSTANDING__SHIFT 0x6
+#define SDMA0_STATUS4_REG__TLBI_OUTSTANDING__SHIFT 0x7
+#define SDMA0_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x8
+#define SDMA0_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x9
+#define SDMA0_STATUS4_REG__REG_POLLING__SHIFT 0xa
+#define SDMA0_STATUS4_REG__MEM_POLLING__SHIFT 0xb
+#define SDMA0_STATUS4_REG__RESERVED_13_12__SHIFT 0xc
+#define SDMA0_STATUS4_REG__RESERVED_15_14__SHIFT 0xe
+#define SDMA0_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA0_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x14
+#define SDMA0_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x15
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_FAULT__SHIFT 0x16
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_NULL__SHIFT 0x17
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT__SHIFT 0x18
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_FAULT__SHIFT 0x19
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_NULL__SHIFT 0x1a
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT__SHIFT 0x1b
+#define SDMA0_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA0_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA0_STATUS4_REG__CH_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA0_STATUS4_REG__CH_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA0_STATUS4_REG__GCR_OUTSTANDING_MASK 0x00000040L
+#define SDMA0_STATUS4_REG__TLBI_OUTSTANDING_MASK 0x00000080L
+#define SDMA0_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000100L
+#define SDMA0_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000200L
+#define SDMA0_STATUS4_REG__REG_POLLING_MASK 0x00000400L
+#define SDMA0_STATUS4_REG__MEM_POLLING_MASK 0x00000800L
+#define SDMA0_STATUS4_REG__RESERVED_13_12_MASK 0x00003000L
+#define SDMA0_STATUS4_REG__RESERVED_15_14_MASK 0x0000C000L
+#define SDMA0_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA0_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00100000L
+#define SDMA0_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00200000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_FAULT_MASK 0x00400000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_NULL_MASK 0x00800000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT_MASK 0x01000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_FAULT_MASK 0x02000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_NULL_MASK 0x04000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT_MASK 0x08000000L
+//SDMA0_SCRATCH_RAM_DATA
+#define SDMA0_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA0_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA0_SCRATCH_RAM_ADDR
+#define SDMA0_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA0_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA0_TIMESTAMP_CNTL
+#define SDMA0_TIMESTAMP_CNTL__CAPTURE__SHIFT 0x0
+#define SDMA0_TIMESTAMP_CNTL__CAPTURE_MASK 0x00000001L
+//SDMA0_STATUS5_REG
+#define SDMA0_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS__SHIFT 0x0
+#define SDMA0_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS__SHIFT 0x1
+#define SDMA0_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS__SHIFT 0x2
+#define SDMA0_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS__SHIFT 0x3
+#define SDMA0_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS__SHIFT 0x4
+#define SDMA0_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS__SHIFT 0x5
+#define SDMA0_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS__SHIFT 0x6
+#define SDMA0_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS__SHIFT 0x7
+#define SDMA0_STATUS5_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA0_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x14
+#define SDMA0_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x15
+#define SDMA0_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x16
+#define SDMA0_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x17
+#define SDMA0_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x18
+#define SDMA0_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x19
+#define SDMA0_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1a
+#define SDMA0_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1b
+#define SDMA0_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS_MASK 0x00000001L
+#define SDMA0_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS_MASK 0x00000002L
+#define SDMA0_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS_MASK 0x00000004L
+#define SDMA0_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS_MASK 0x00000008L
+#define SDMA0_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS_MASK 0x00000010L
+#define SDMA0_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS_MASK 0x00000020L
+#define SDMA0_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS_MASK 0x00000040L
+#define SDMA0_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS_MASK 0x00000080L
+#define SDMA0_STATUS5_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA0_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00100000L
+#define SDMA0_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00200000L
+#define SDMA0_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00400000L
+#define SDMA0_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00800000L
+#define SDMA0_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION_MASK 0x01000000L
+#define SDMA0_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION_MASK 0x02000000L
+#define SDMA0_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION_MASK 0x04000000L
+#define SDMA0_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION_MASK 0x08000000L
+//SDMA0_QUEUE_RESET_REQ
+#define SDMA0_QUEUE_RESET_REQ__QUEUE0_RESET__SHIFT 0x0
+#define SDMA0_QUEUE_RESET_REQ__QUEUE1_RESET__SHIFT 0x1
+#define SDMA0_QUEUE_RESET_REQ__QUEUE2_RESET__SHIFT 0x2
+#define SDMA0_QUEUE_RESET_REQ__QUEUE3_RESET__SHIFT 0x3
+#define SDMA0_QUEUE_RESET_REQ__QUEUE4_RESET__SHIFT 0x4
+#define SDMA0_QUEUE_RESET_REQ__QUEUE5_RESET__SHIFT 0x5
+#define SDMA0_QUEUE_RESET_REQ__QUEUE6_RESET__SHIFT 0x6
+#define SDMA0_QUEUE_RESET_REQ__QUEUE7_RESET__SHIFT 0x7
+#define SDMA0_QUEUE_RESET_REQ__RESERVED__SHIFT 0x8
+#define SDMA0_QUEUE_RESET_REQ__QUEUE0_RESET_MASK 0x00000001L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE1_RESET_MASK 0x00000002L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE2_RESET_MASK 0x00000004L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE3_RESET_MASK 0x00000008L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE4_RESET_MASK 0x00000010L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE5_RESET_MASK 0x00000020L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE6_RESET_MASK 0x00000040L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE7_RESET_MASK 0x00000080L
+#define SDMA0_QUEUE_RESET_REQ__RESERVED_MASK 0xFFFFFF00L
+//SDMA0_STATUS6_REG
+#define SDMA0_STATUS6_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS6_REG__TH1F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS6_REG__TH1_EXCEPTION__SHIFT 0x10
+#define SDMA0_STATUS6_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS6_REG__TH1F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA0_STATUS6_REG__TH1_EXCEPTION_MASK 0xFFFF0000L
+//SDMA0_UCODE1_CHECKSUM
+#define SDMA0_UCODE1_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE1_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_CE_CTRL
+#define SDMA0_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA0_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA0_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA0_CE_CTRL__CE_DCC_READ_128B_ENABLE__SHIFT 0x8
+#define SDMA0_CE_CTRL__RESERVED__SHIFT 0x9
+#define SDMA0_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA0_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA0_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA0_CE_CTRL__CE_DCC_READ_128B_ENABLE_MASK 0x00000100L
+#define SDMA0_CE_CTRL__RESERVED_MASK 0xFFFFFE00L
+//SDMA0_FED_STATUS
+#define SDMA0_FED_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA0_FED_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA0_FED_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA0_FED_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA0_FED_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA0_FED_STATUS__COPY_METADATA_ECC__SHIFT 0x5
+#define SDMA0_FED_STATUS__SELFLOAD_UCODE_ECC__SHIFT 0x6
+#define SDMA0_FED_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA0_FED_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA0_FED_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA0_FED_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA0_FED_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA0_FED_STATUS__COPY_METADATA_ECC_MASK 0x00000020L
+#define SDMA0_FED_STATUS__SELFLOAD_UCODE_ECC_MASK 0x00000040L
+//SDMA0_QUEUE0_RB_CNTL
+#define SDMA0_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE0_RB_BASE
+#define SDMA0_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_BASE_HI
+#define SDMA0_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE0_RB_RPTR
+#define SDMA0_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_HI
+#define SDMA0_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR
+#define SDMA0_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_HI
+#define SDMA0_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_IB_CNTL
+#define SDMA0_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE0_IB_RPTR
+#define SDMA0_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE0_IB_OFFSET
+#define SDMA0_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE0_IB_BASE_LO
+#define SDMA0_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE0_IB_BASE_HI
+#define SDMA0_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_IB_SIZE
+#define SDMA0_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE0_SKIP_CNTL
+#define SDMA0_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE0_CONTEXT_STATUS
+#define SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE0_CONTEXT_STATUS__USE_IB__SHIFT 0x1
+#define SDMA0_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__USE_IB_MASK 0x00000002L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE0_DOORBELL
+#define SDMA0_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE0_DOORBELL_LOG
+#define SDMA0_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_DOORBELL_OFFSET
+#define SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE0_CSA_ADDR_LO
+#define SDMA0_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_CSA_ADDR_HI
+#define SDMA0_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_SCHEDULE_CNTL
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE0_IB_SUB_REMAIN
+#define SDMA0_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE0_PREEMPT
+#define SDMA0_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE0_DUMMY_REG
+#define SDMA0_QUEUE0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_RB_AQL_CNTL
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE0_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE0_RB_PREEMPT
+#define SDMA0_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE0_MIDCMD_DATA0
+#define SDMA0_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA1
+#define SDMA0_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA2
+#define SDMA0_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA3
+#define SDMA0_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA4
+#define SDMA0_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA5
+#define SDMA0_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA6
+#define SDMA0_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA7
+#define SDMA0_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA8
+#define SDMA0_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA9
+#define SDMA0_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA10
+#define SDMA0_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_CNTL
+#define SDMA0_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE1_RB_CNTL
+#define SDMA0_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE1_RB_BASE
+#define SDMA0_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_BASE_HI
+#define SDMA0_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE1_RB_RPTR
+#define SDMA0_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_HI
+#define SDMA0_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR
+#define SDMA0_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_HI
+#define SDMA0_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_IB_CNTL
+#define SDMA0_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE1_IB_RPTR
+#define SDMA0_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE1_IB_OFFSET
+#define SDMA0_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE1_IB_BASE_LO
+#define SDMA0_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE1_IB_BASE_HI
+#define SDMA0_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_IB_SIZE
+#define SDMA0_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE1_SKIP_CNTL
+#define SDMA0_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE1_CONTEXT_STATUS
+#define SDMA0_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE1_DOORBELL
+#define SDMA0_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE1_DOORBELL_LOG
+#define SDMA0_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_DOORBELL_OFFSET
+#define SDMA0_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE1_CSA_ADDR_LO
+#define SDMA0_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_CSA_ADDR_HI
+#define SDMA0_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_SCHEDULE_CNTL
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE1_IB_SUB_REMAIN
+#define SDMA0_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE1_PREEMPT
+#define SDMA0_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE1_DUMMY_REG
+#define SDMA0_QUEUE1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_RB_AQL_CNTL
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE1_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE1_RB_PREEMPT
+#define SDMA0_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE1_MIDCMD_DATA0
+#define SDMA0_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA1
+#define SDMA0_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA2
+#define SDMA0_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA3
+#define SDMA0_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA4
+#define SDMA0_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA5
+#define SDMA0_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA6
+#define SDMA0_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA7
+#define SDMA0_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA8
+#define SDMA0_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA9
+#define SDMA0_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA10
+#define SDMA0_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_CNTL
+#define SDMA0_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE2_RB_CNTL
+#define SDMA0_QUEUE2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE2_RB_BASE
+#define SDMA0_QUEUE2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_BASE_HI
+#define SDMA0_QUEUE2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE2_RB_RPTR
+#define SDMA0_QUEUE2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_HI
+#define SDMA0_QUEUE2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR
+#define SDMA0_QUEUE2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_HI
+#define SDMA0_QUEUE2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_IB_CNTL
+#define SDMA0_QUEUE2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE2_IB_RPTR
+#define SDMA0_QUEUE2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE2_IB_OFFSET
+#define SDMA0_QUEUE2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE2_IB_BASE_LO
+#define SDMA0_QUEUE2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE2_IB_BASE_HI
+#define SDMA0_QUEUE2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_IB_SIZE
+#define SDMA0_QUEUE2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE2_SKIP_CNTL
+#define SDMA0_QUEUE2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE2_CONTEXT_STATUS
+#define SDMA0_QUEUE2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE2_DOORBELL
+#define SDMA0_QUEUE2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE2_DOORBELL_LOG
+#define SDMA0_QUEUE2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_DOORBELL_OFFSET
+#define SDMA0_QUEUE2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE2_CSA_ADDR_LO
+#define SDMA0_QUEUE2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_CSA_ADDR_HI
+#define SDMA0_QUEUE2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_SCHEDULE_CNTL
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE2_IB_SUB_REMAIN
+#define SDMA0_QUEUE2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE2_PREEMPT
+#define SDMA0_QUEUE2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE2_DUMMY_REG
+#define SDMA0_QUEUE2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_RB_AQL_CNTL
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE2_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE2_RB_PREEMPT
+#define SDMA0_QUEUE2_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE2_MIDCMD_DATA0
+#define SDMA0_QUEUE2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA1
+#define SDMA0_QUEUE2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA2
+#define SDMA0_QUEUE2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA3
+#define SDMA0_QUEUE2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA4
+#define SDMA0_QUEUE2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA5
+#define SDMA0_QUEUE2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA6
+#define SDMA0_QUEUE2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA7
+#define SDMA0_QUEUE2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA8
+#define SDMA0_QUEUE2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA9
+#define SDMA0_QUEUE2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA10
+#define SDMA0_QUEUE2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_CNTL
+#define SDMA0_QUEUE2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE3_RB_CNTL
+#define SDMA0_QUEUE3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE3_RB_BASE
+#define SDMA0_QUEUE3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_BASE_HI
+#define SDMA0_QUEUE3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE3_RB_RPTR
+#define SDMA0_QUEUE3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_HI
+#define SDMA0_QUEUE3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR
+#define SDMA0_QUEUE3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_HI
+#define SDMA0_QUEUE3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_IB_CNTL
+#define SDMA0_QUEUE3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE3_IB_RPTR
+#define SDMA0_QUEUE3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE3_IB_OFFSET
+#define SDMA0_QUEUE3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE3_IB_BASE_LO
+#define SDMA0_QUEUE3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE3_IB_BASE_HI
+#define SDMA0_QUEUE3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_IB_SIZE
+#define SDMA0_QUEUE3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE3_SKIP_CNTL
+#define SDMA0_QUEUE3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE3_CONTEXT_STATUS
+#define SDMA0_QUEUE3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE3_DOORBELL
+#define SDMA0_QUEUE3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE3_DOORBELL_LOG
+#define SDMA0_QUEUE3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_DOORBELL_OFFSET
+#define SDMA0_QUEUE3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE3_CSA_ADDR_LO
+#define SDMA0_QUEUE3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_CSA_ADDR_HI
+#define SDMA0_QUEUE3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_SCHEDULE_CNTL
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE3_IB_SUB_REMAIN
+#define SDMA0_QUEUE3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE3_PREEMPT
+#define SDMA0_QUEUE3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE3_DUMMY_REG
+#define SDMA0_QUEUE3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_RB_AQL_CNTL
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE3_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE3_RB_PREEMPT
+#define SDMA0_QUEUE3_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE3_MIDCMD_DATA0
+#define SDMA0_QUEUE3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA1
+#define SDMA0_QUEUE3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA2
+#define SDMA0_QUEUE3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA3
+#define SDMA0_QUEUE3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA4
+#define SDMA0_QUEUE3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA5
+#define SDMA0_QUEUE3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA6
+#define SDMA0_QUEUE3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA7
+#define SDMA0_QUEUE3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA8
+#define SDMA0_QUEUE3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA9
+#define SDMA0_QUEUE3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA10
+#define SDMA0_QUEUE3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_CNTL
+#define SDMA0_QUEUE3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE4_RB_CNTL
+#define SDMA0_QUEUE4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE4_RB_BASE
+#define SDMA0_QUEUE4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_BASE_HI
+#define SDMA0_QUEUE4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE4_RB_RPTR
+#define SDMA0_QUEUE4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_HI
+#define SDMA0_QUEUE4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR
+#define SDMA0_QUEUE4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_HI
+#define SDMA0_QUEUE4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_IB_CNTL
+#define SDMA0_QUEUE4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE4_IB_RPTR
+#define SDMA0_QUEUE4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE4_IB_OFFSET
+#define SDMA0_QUEUE4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE4_IB_BASE_LO
+#define SDMA0_QUEUE4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE4_IB_BASE_HI
+#define SDMA0_QUEUE4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_IB_SIZE
+#define SDMA0_QUEUE4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE4_SKIP_CNTL
+#define SDMA0_QUEUE4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE4_CONTEXT_STATUS
+#define SDMA0_QUEUE4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE4_DOORBELL
+#define SDMA0_QUEUE4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE4_DOORBELL_LOG
+#define SDMA0_QUEUE4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_DOORBELL_OFFSET
+#define SDMA0_QUEUE4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE4_CSA_ADDR_LO
+#define SDMA0_QUEUE4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_CSA_ADDR_HI
+#define SDMA0_QUEUE4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_SCHEDULE_CNTL
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE4_IB_SUB_REMAIN
+#define SDMA0_QUEUE4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE4_PREEMPT
+#define SDMA0_QUEUE4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE4_DUMMY_REG
+#define SDMA0_QUEUE4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_RB_AQL_CNTL
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE4_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE4_RB_PREEMPT
+#define SDMA0_QUEUE4_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE4_MIDCMD_DATA0
+#define SDMA0_QUEUE4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA1
+#define SDMA0_QUEUE4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA2
+#define SDMA0_QUEUE4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA3
+#define SDMA0_QUEUE4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA4
+#define SDMA0_QUEUE4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA5
+#define SDMA0_QUEUE4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA6
+#define SDMA0_QUEUE4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA7
+#define SDMA0_QUEUE4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA8
+#define SDMA0_QUEUE4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA9
+#define SDMA0_QUEUE4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA10
+#define SDMA0_QUEUE4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_CNTL
+#define SDMA0_QUEUE4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE5_RB_CNTL
+#define SDMA0_QUEUE5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE5_RB_BASE
+#define SDMA0_QUEUE5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_BASE_HI
+#define SDMA0_QUEUE5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE5_RB_RPTR
+#define SDMA0_QUEUE5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_HI
+#define SDMA0_QUEUE5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR
+#define SDMA0_QUEUE5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_HI
+#define SDMA0_QUEUE5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_IB_CNTL
+#define SDMA0_QUEUE5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE5_IB_RPTR
+#define SDMA0_QUEUE5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE5_IB_OFFSET
+#define SDMA0_QUEUE5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE5_IB_BASE_LO
+#define SDMA0_QUEUE5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE5_IB_BASE_HI
+#define SDMA0_QUEUE5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_IB_SIZE
+#define SDMA0_QUEUE5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE5_SKIP_CNTL
+#define SDMA0_QUEUE5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE5_CONTEXT_STATUS
+#define SDMA0_QUEUE5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE5_DOORBELL
+#define SDMA0_QUEUE5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE5_DOORBELL_LOG
+#define SDMA0_QUEUE5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_DOORBELL_OFFSET
+#define SDMA0_QUEUE5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE5_CSA_ADDR_LO
+#define SDMA0_QUEUE5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_CSA_ADDR_HI
+#define SDMA0_QUEUE5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_SCHEDULE_CNTL
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE5_IB_SUB_REMAIN
+#define SDMA0_QUEUE5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE5_PREEMPT
+#define SDMA0_QUEUE5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE5_DUMMY_REG
+#define SDMA0_QUEUE5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_RB_AQL_CNTL
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE5_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE5_RB_PREEMPT
+#define SDMA0_QUEUE5_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE5_MIDCMD_DATA0
+#define SDMA0_QUEUE5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA1
+#define SDMA0_QUEUE5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA2
+#define SDMA0_QUEUE5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA3
+#define SDMA0_QUEUE5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA4
+#define SDMA0_QUEUE5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA5
+#define SDMA0_QUEUE5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA6
+#define SDMA0_QUEUE5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA7
+#define SDMA0_QUEUE5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA8
+#define SDMA0_QUEUE5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA9
+#define SDMA0_QUEUE5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA10
+#define SDMA0_QUEUE5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_CNTL
+#define SDMA0_QUEUE5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE6_RB_CNTL
+#define SDMA0_QUEUE6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE6_RB_BASE
+#define SDMA0_QUEUE6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_BASE_HI
+#define SDMA0_QUEUE6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE6_RB_RPTR
+#define SDMA0_QUEUE6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_HI
+#define SDMA0_QUEUE6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR
+#define SDMA0_QUEUE6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_HI
+#define SDMA0_QUEUE6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_IB_CNTL
+#define SDMA0_QUEUE6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE6_IB_RPTR
+#define SDMA0_QUEUE6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE6_IB_OFFSET
+#define SDMA0_QUEUE6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE6_IB_BASE_LO
+#define SDMA0_QUEUE6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE6_IB_BASE_HI
+#define SDMA0_QUEUE6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_IB_SIZE
+#define SDMA0_QUEUE6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE6_SKIP_CNTL
+#define SDMA0_QUEUE6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE6_CONTEXT_STATUS
+#define SDMA0_QUEUE6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE6_DOORBELL
+#define SDMA0_QUEUE6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE6_DOORBELL_LOG
+#define SDMA0_QUEUE6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_DOORBELL_OFFSET
+#define SDMA0_QUEUE6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE6_CSA_ADDR_LO
+#define SDMA0_QUEUE6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_CSA_ADDR_HI
+#define SDMA0_QUEUE6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_SCHEDULE_CNTL
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE6_IB_SUB_REMAIN
+#define SDMA0_QUEUE6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE6_PREEMPT
+#define SDMA0_QUEUE6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE6_DUMMY_REG
+#define SDMA0_QUEUE6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_RB_AQL_CNTL
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE6_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE6_RB_PREEMPT
+#define SDMA0_QUEUE6_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE6_MIDCMD_DATA0
+#define SDMA0_QUEUE6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA1
+#define SDMA0_QUEUE6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA2
+#define SDMA0_QUEUE6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA3
+#define SDMA0_QUEUE6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA4
+#define SDMA0_QUEUE6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA5
+#define SDMA0_QUEUE6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA6
+#define SDMA0_QUEUE6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA7
+#define SDMA0_QUEUE6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA8
+#define SDMA0_QUEUE6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA9
+#define SDMA0_QUEUE6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA10
+#define SDMA0_QUEUE6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_CNTL
+#define SDMA0_QUEUE6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE7_RB_CNTL
+#define SDMA0_QUEUE7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE7_RB_BASE
+#define SDMA0_QUEUE7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_BASE_HI
+#define SDMA0_QUEUE7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE7_RB_RPTR
+#define SDMA0_QUEUE7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_HI
+#define SDMA0_QUEUE7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR
+#define SDMA0_QUEUE7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_HI
+#define SDMA0_QUEUE7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_IB_CNTL
+#define SDMA0_QUEUE7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE7_IB_RPTR
+#define SDMA0_QUEUE7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE7_IB_OFFSET
+#define SDMA0_QUEUE7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE7_IB_BASE_LO
+#define SDMA0_QUEUE7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE7_IB_BASE_HI
+#define SDMA0_QUEUE7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_IB_SIZE
+#define SDMA0_QUEUE7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE7_SKIP_CNTL
+#define SDMA0_QUEUE7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE7_CONTEXT_STATUS
+#define SDMA0_QUEUE7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE7_DOORBELL
+#define SDMA0_QUEUE7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE7_DOORBELL_LOG
+#define SDMA0_QUEUE7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_DOORBELL_OFFSET
+#define SDMA0_QUEUE7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE7_CSA_ADDR_LO
+#define SDMA0_QUEUE7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_CSA_ADDR_HI
+#define SDMA0_QUEUE7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_SCHEDULE_CNTL
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE7_IB_SUB_REMAIN
+#define SDMA0_QUEUE7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE7_PREEMPT
+#define SDMA0_QUEUE7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE7_DUMMY_REG
+#define SDMA0_QUEUE7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_RB_AQL_CNTL
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE7_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE7_RB_PREEMPT
+#define SDMA0_QUEUE7_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE7_MIDCMD_DATA0
+#define SDMA0_QUEUE7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA1
+#define SDMA0_QUEUE7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA2
+#define SDMA0_QUEUE7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA3
+#define SDMA0_QUEUE7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA4
+#define SDMA0_QUEUE7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA5
+#define SDMA0_QUEUE7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA6
+#define SDMA0_QUEUE7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA7
+#define SDMA0_QUEUE7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA8
+#define SDMA0_QUEUE7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA9
+#define SDMA0_QUEUE7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA10
+#define SDMA0_QUEUE7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_CNTL
+#define SDMA0_QUEUE7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+
+// addressBlock: gc_sdma0_sdma1dec
+//SDMA1_DEC_START
+#define SDMA1_DEC_START__START__SHIFT 0x0
+#define SDMA1_DEC_START__START_MASK 0xFFFFFFFFL
+//SDMA1_F32_MISC_CNTL
+#define SDMA1_F32_MISC_CNTL__F32_WAKEUP__SHIFT 0x0
+#define SDMA1_F32_MISC_CNTL__F32_WAKEUP_MASK 0x00000001L
+//SDMA1_GLOBAL_TIMESTAMP_LO
+#define SDMA1_GLOBAL_TIMESTAMP_LO__DATA__SHIFT 0x0
+#define SDMA1_GLOBAL_TIMESTAMP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_GLOBAL_TIMESTAMP_HI
+#define SDMA1_GLOBAL_TIMESTAMP_HI__DATA__SHIFT 0x0
+#define SDMA1_GLOBAL_TIMESTAMP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_POWER_CNTL
+#define SDMA1_POWER_CNTL__LS_ENABLE__SHIFT 0x8
+#define SDMA1_POWER_CNTL__LS_ENABLE_MASK 0x00000100L
+//SDMA1_CNTL
+#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA1_CNTL__PIO_DONE_ACK_ENABLE__SHIFT 0x6
+#define SDMA1_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE__SHIFT 0x8
+#define SDMA1_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x9
+#define SDMA1_CNTL__CP_MES_INT_ENABLE__SHIFT 0xa
+#define SDMA1_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA1_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA1_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA1_CNTL__CH_PERFCNT_ENABLE__SHIFT 0x10
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA1_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA1_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA1_CNTL__PIO_DONE_ACK_ENABLE_MASK 0x00000040L
+#define SDMA1_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE_MASK 0x00000100L
+#define SDMA1_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000200L
+#define SDMA1_CNTL__CP_MES_INT_ENABLE_MASK 0x00000400L
+#define SDMA1_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA1_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA1_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA1_CNTL__CH_PERFCNT_ENABLE_MASK 0x00010000L
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA1_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA1_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA1_CHICKEN_BITS__BACK_COMPAT_ENABLE__SHIFT 0x3
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x5
+#define SDMA1_CHICKEN_BITS__RD_BURST__SHIFT 0x6
+#define SDMA1_CHICKEN_BITS__WR_BURST__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE__SHIFT 0xa
+#define SDMA1_CHICKEN_BITS__WR_COMBINE_256B_ENABLE__SHIFT 0xe
+#define SDMA1_CHICKEN_BITS__RD_COMBINE_256B_ENABLE__SHIFT 0xf
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA1_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG__SHIFT 0x13
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG__SHIFT 0x15
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG__SHIFT 0x16
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x18
+#define SDMA1_CHICKEN_BITS__SW_FREEZE_ENABLE__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x1a
+#define SDMA1_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA1_CHICKEN_BITS__BACK_COMPAT_ENABLE_MASK 0x00000008L
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00000020L
+#define SDMA1_CHICKEN_BITS__RD_BURST_MASK 0x000000C0L
+#define SDMA1_CHICKEN_BITS__WR_BURST_MASK 0x00000300L
+#define SDMA1_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE_MASK 0x00003C00L
+#define SDMA1_CHICKEN_BITS__WR_COMBINE_256B_ENABLE_MASK 0x00004000L
+#define SDMA1_CHICKEN_BITS__RD_COMBINE_256B_ENABLE_MASK 0x00008000L
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA1_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG_MASK 0x00080000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG_MASK 0x00100000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG_MASK 0x00200000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG_MASK 0x00400000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG_MASK 0x00800000L
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x01000000L
+#define SDMA1_CHICKEN_BITS__SW_FREEZE_ENABLE_MASK 0x02000000L
+#define SDMA1_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL_MASK 0x04000000L
+#define SDMA1_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA1_GB_ADDR_CONFIG
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA1_GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA1_GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA1_GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA1_GB_ADDR_CONFIG_READ
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA1_RB_RPTR_FETCH
+#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA1_RB_RPTR_FETCH_HI
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA1_IB_OFFSET_FETCH
+#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PROGRAM
+#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA1_STATUS_REG
+#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA1_STATUS_REG__CGCG_FENCE__SHIFT 0xb
+#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA1_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA1_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA1_STATUS_REG__CGCG_FENCE_MASK 0x00000800L
+#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA1_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA1_STATUS1_REG
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA1_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA1_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xd
+#define SDMA1_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define SDMA1_STATUS1_REG__SEC_INTR_STATUS__SHIFT 0x11
+#define SDMA1_STATUS1_REG__WPTR_POLL_IDLE__SHIFT 0x12
+#define SDMA1_STATUS1_REG__SDMA_IDLE__SHIFT 0x13
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA1_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define SDMA1_STATUS1_REG__EX_START_MASK 0x00002000L
+#define SDMA1_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00004000L
+#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+#define SDMA1_STATUS1_REG__SEC_INTR_STATUS_MASK 0x00020000L
+#define SDMA1_STATUS1_REG__WPTR_POLL_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS1_REG__SDMA_IDLE_MASK 0x00080000L
+//SDMA1_CNTL1
+#define SDMA1_CNTL1__WPTR_POLL_FREQUENCY__SHIFT 0x2
+#define SDMA1_CNTL1__WPTR_POLL_FREQUENCY_MASK 0x0000FFFCL
+//SDMA1_HBM_PAGE_CONFIG
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA1_UCODE_CHECKSUM
+#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_FREEZE
+#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA1_PROCESS_QUANTUM0
+#define SDMA1_PROCESS_QUANTUM0__PROCESS0_QUANTUM__SHIFT 0x0
+#define SDMA1_PROCESS_QUANTUM0__PROCESS1_QUANTUM__SHIFT 0x8
+#define SDMA1_PROCESS_QUANTUM0__PROCESS2_QUANTUM__SHIFT 0x10
+#define SDMA1_PROCESS_QUANTUM0__PROCESS3_QUANTUM__SHIFT 0x18
+#define SDMA1_PROCESS_QUANTUM0__PROCESS0_QUANTUM_MASK 0x000000FFL
+#define SDMA1_PROCESS_QUANTUM0__PROCESS1_QUANTUM_MASK 0x0000FF00L
+#define SDMA1_PROCESS_QUANTUM0__PROCESS2_QUANTUM_MASK 0x00FF0000L
+#define SDMA1_PROCESS_QUANTUM0__PROCESS3_QUANTUM_MASK 0xFF000000L
+//SDMA1_PROCESS_QUANTUM1
+#define SDMA1_PROCESS_QUANTUM1__PROCESS4_QUANTUM__SHIFT 0x0
+#define SDMA1_PROCESS_QUANTUM1__PROCESS5_QUANTUM__SHIFT 0x8
+#define SDMA1_PROCESS_QUANTUM1__PROCESS6_QUANTUM__SHIFT 0x10
+#define SDMA1_PROCESS_QUANTUM1__PROCESS7_QUANTUM__SHIFT 0x18
+#define SDMA1_PROCESS_QUANTUM1__PROCESS4_QUANTUM_MASK 0x000000FFL
+#define SDMA1_PROCESS_QUANTUM1__PROCESS5_QUANTUM_MASK 0x0000FF00L
+#define SDMA1_PROCESS_QUANTUM1__PROCESS6_QUANTUM_MASK 0x00FF0000L
+#define SDMA1_PROCESS_QUANTUM1__PROCESS7_QUANTUM_MASK 0xFF000000L
+//SDMA1_WATCHDOG_CNTL
+#define SDMA1_WATCHDOG_CNTL__QUEUE_HANG_COUNT__SHIFT 0x0
+#define SDMA1_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT__SHIFT 0x8
+#define SDMA1_WATCHDOG_CNTL__QUEUE_HANG_COUNT_MASK 0x000000FFL
+#define SDMA1_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT_MASK 0x0000FF00L
+//SDMA1_QUEUE_STATUS0
+#define SDMA1_QUEUE_STATUS0__QUEUE0_STATUS__SHIFT 0x0
+#define SDMA1_QUEUE_STATUS0__QUEUE1_STATUS__SHIFT 0x4
+#define SDMA1_QUEUE_STATUS0__QUEUE2_STATUS__SHIFT 0x8
+#define SDMA1_QUEUE_STATUS0__QUEUE3_STATUS__SHIFT 0xc
+#define SDMA1_QUEUE_STATUS0__QUEUE4_STATUS__SHIFT 0x10
+#define SDMA1_QUEUE_STATUS0__QUEUE5_STATUS__SHIFT 0x14
+#define SDMA1_QUEUE_STATUS0__QUEUE6_STATUS__SHIFT 0x18
+#define SDMA1_QUEUE_STATUS0__QUEUE7_STATUS__SHIFT 0x1c
+#define SDMA1_QUEUE_STATUS0__QUEUE0_STATUS_MASK 0x0000000FL
+#define SDMA1_QUEUE_STATUS0__QUEUE1_STATUS_MASK 0x000000F0L
+#define SDMA1_QUEUE_STATUS0__QUEUE2_STATUS_MASK 0x00000F00L
+#define SDMA1_QUEUE_STATUS0__QUEUE3_STATUS_MASK 0x0000F000L
+#define SDMA1_QUEUE_STATUS0__QUEUE4_STATUS_MASK 0x000F0000L
+#define SDMA1_QUEUE_STATUS0__QUEUE5_STATUS_MASK 0x00F00000L
+#define SDMA1_QUEUE_STATUS0__QUEUE6_STATUS_MASK 0x0F000000L
+#define SDMA1_QUEUE_STATUS0__QUEUE7_STATUS_MASK 0xF0000000L
+//SDMA1_EDC_CONFIG
+#define SDMA1_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA1_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA1_BA_THRESHOLD
+#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA1_ID
+#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA1_VERSION
+#define SDMA1_VERSION__MINVER__SHIFT 0x0
+#define SDMA1_VERSION__MAJVER__SHIFT 0x8
+#define SDMA1_VERSION__REV__SHIFT 0x10
+#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA1_VERSION__REV_MASK 0x003F0000L
+//SDMA1_EDC_COUNTER
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA1_EDC_COUNTER_CLEAR
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA1_STATUS2_REG
+#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS2_REG__TH0F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS2_REG__TH0F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA1_ATOMIC_CNTL
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA1_ATOMIC_PREOP_LO
+#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_ATOMIC_PREOP_HI
+#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_CNTL
+#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x0
+#define SDMA1_UTCL1_CNTL__PAGE_WAIT_DELAY__SHIFT 0x5
+#define SDMA1_UTCL1_CNTL__RESP_MODE__SHIFT 0x9
+#define SDMA1_UTCL1_CNTL__FORCE_INVALIDATION__SHIFT 0xe
+#define SDMA1_UTCL1_CNTL__FORCE_INVREQ_HEAVY__SHIFT 0xf
+#define SDMA1_UTCL1_CNTL__WR_EXE_PERMS_CTRL__SHIFT 0x10
+#define SDMA1_UTCL1_CNTL__RD_EXE_PERMS_CTRL__SHIFT 0x11
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0x12
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x0000001FL
+#define SDMA1_UTCL1_CNTL__PAGE_WAIT_DELAY_MASK 0x000001E0L
+#define SDMA1_UTCL1_CNTL__RESP_MODE_MASK 0x00000600L
+#define SDMA1_UTCL1_CNTL__FORCE_INVALIDATION_MASK 0x00004000L
+#define SDMA1_UTCL1_CNTL__FORCE_INVREQ_HEAVY_MASK 0x00008000L
+#define SDMA1_UTCL1_CNTL__WR_EXE_PERMS_CTRL_MASK 0x00010000L
+#define SDMA1_UTCL1_CNTL__RD_EXE_PERMS_CTRL_MASK 0x00020000L
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x003C0000L
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x3F000000L
+//SDMA1_UTCL1_WATERMK
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK__SHIFT 0x0
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP__SHIFT 0x4
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK__SHIFT 0x6
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP__SHIFT 0xa
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK__SHIFT 0xc
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP__SHIFT 0x10
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK__SHIFT 0x12
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP__SHIFT 0x16
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK_MASK 0x0000000FL
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP_MASK 0x00000030L
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK_MASK 0x000003C0L
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP_MASK 0x00000C00L
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK_MASK 0x0000F000L
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP_MASK 0x00030000L
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK_MASK 0x003C0000L
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP_MASK 0x00C00000L
+//SDMA1_UTCL1_TIMEOUT
+#define SDMA1_UTCL1_TIMEOUT__XNACK_LIMIT__SHIFT 0x0
+#define SDMA1_UTCL1_TIMEOUT__XNACK_LIMIT_MASK 0x0000FFFFL
+//SDMA1_UTCL1_PAGE
+#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA1_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0xa
+#define SDMA1_UTCL1_PAGE__USE_IO__SHIFT 0xb
+#define SDMA1_UTCL1_PAGE__RD_L2_POLICY__SHIFT 0xc
+#define SDMA1_UTCL1_PAGE__WR_L2_POLICY__SHIFT 0xe
+#define SDMA1_UTCL1_PAGE__DMA_PAGE_SIZE__SHIFT 0x10
+#define SDMA1_UTCL1_PAGE__USE_BC__SHIFT 0x16
+#define SDMA1_UTCL1_PAGE__ADDR_IS_PA__SHIFT 0x17
+#define SDMA1_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0x18
+#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA1_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000003C0L
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000400L
+#define SDMA1_UTCL1_PAGE__USE_IO_MASK 0x00000800L
+#define SDMA1_UTCL1_PAGE__RD_L2_POLICY_MASK 0x00003000L
+#define SDMA1_UTCL1_PAGE__WR_L2_POLICY_MASK 0x0000C000L
+#define SDMA1_UTCL1_PAGE__DMA_PAGE_SIZE_MASK 0x003F0000L
+#define SDMA1_UTCL1_PAGE__USE_BC_MASK 0x00400000L
+#define SDMA1_UTCL1_PAGE__ADDR_IS_PA_MASK 0x00800000L
+#define SDMA1_UTCL1_PAGE__LLC_NOALLOC_MASK 0x01000000L
+//SDMA1_UTCL1_RD_STATUS
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_RD_STATUS__RESERVED0__SHIFT 0x5
+#define SDMA1_UTCL1_RD_STATUS__RESERVED1__SHIFT 0x6
+#define SDMA1_UTCL1_RD_STATUS__META_Q_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_RD_STATUS__RESERVED2__SHIFT 0xd
+#define SDMA1_UTCL1_RD_STATUS__RESERVED3__SHIFT 0xe
+#define SDMA1_UTCL1_RD_STATUS__META_Q_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_RD_STATUS__RD_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA1_UTCL1_RD_STATUS__RD_REQRET_IDLE__SHIFT 0x11
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_IDLE__SHIFT 0x12
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_TYPE__SHIFT 0x13
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REG_READY__SHIFT 0x17
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA1_UTCL1_RD_STATUS__RESERVED4__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_IN_RTR__SHIFT 0x1c
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_RD_STATUS__INV_BUSY__SHIFT 0x1e
+#define SDMA1_UTCL1_RD_STATUS__DBIT_REQ_IDLE__SHIFT 0x1f
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED0_MASK 0x00000020L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED1_MASK 0x00000040L
+#define SDMA1_UTCL1_RD_STATUS__META_Q_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED2_MASK 0x00002000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED3_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_STATUS__META_Q_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_STATUS__RD_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQRET_IDLE_MASK 0x00020000L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_IDLE_MASK 0x00040000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_TYPE_MASK 0x00180000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED4_MASK 0x04000000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_IN_RTR_MASK 0x10000000L
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_RD_STATUS__INV_BUSY_MASK 0x40000000L
+#define SDMA1_UTCL1_RD_STATUS__DBIT_REQ_IDLE_MASK 0x80000000L
+//SDMA1_UTCL1_WR_STATUS
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_WR_STATUS__RESERVED0__SHIFT 0x7
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0xf
+#define SDMA1_UTCL1_WR_STATUS__WR_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA1_UTCL1_WR_STATUS__WR_REQRET_IDLE__SHIFT 0x11
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_IDLE__SHIFT 0x12
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_TYPE__SHIFT 0x13
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REG_READY__SHIFT 0x17
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL__SHIFT 0x1a
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_RTR__SHIFT 0x1c
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR__SHIFT 0x1e
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR__SHIFT 0x1f
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_WR_STATUS__RESERVED0_MASK 0x00000080L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_STATUS__WR_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQRET_IDLE_MASK 0x00020000L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_IDLE_MASK 0x00040000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_TYPE_MASK 0x00180000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL_MASK 0x04000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_RTR_MASK 0x10000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR_MASK 0x40000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR_MASK 0x80000000L
+//SDMA1_UTCL1_INV0
+#define SDMA1_UTCL1_INV0__INV_PROC_BUSY__SHIFT 0x0
+#define SDMA1_UTCL1_INV0__GPUVM_FRAG_SIZE__SHIFT 0x1
+#define SDMA1_UTCL1_INV0__GPUVM_VMID__SHIFT 0x7
+#define SDMA1_UTCL1_INV0__GPUVM_MODE__SHIFT 0xb
+#define SDMA1_UTCL1_INV0__GPUVM_HIGH__SHIFT 0xd
+#define SDMA1_UTCL1_INV0__GPUVM_TAG__SHIFT 0xe
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_HIGH__SHIFT 0x12
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_LOW__SHIFT 0x16
+#define SDMA1_UTCL1_INV0__INV_TYPE__SHIFT 0x1a
+#define SDMA1_UTCL1_INV0__INV_PROC_BUSY_MASK 0x00000001L
+#define SDMA1_UTCL1_INV0__GPUVM_FRAG_SIZE_MASK 0x0000007EL
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_MASK 0x00000780L
+#define SDMA1_UTCL1_INV0__GPUVM_MODE_MASK 0x00001800L
+#define SDMA1_UTCL1_INV0__GPUVM_HIGH_MASK 0x00002000L
+#define SDMA1_UTCL1_INV0__GPUVM_TAG_MASK 0x0003C000L
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_HIGH_MASK 0x003C0000L
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_LOW_MASK 0x03C00000L
+#define SDMA1_UTCL1_INV0__INV_TYPE_MASK 0x0C000000L
+//SDMA1_UTCL1_INV1
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_INV2
+#define SDMA1_UTCL1_INV2__CPF_VMID__SHIFT 0x0
+#define SDMA1_UTCL1_INV2__CPF_FLUSH_TYPE__SHIFT 0x10
+#define SDMA1_UTCL1_INV2__CPF_FRAG_SIZE__SHIFT 0x11
+#define SDMA1_UTCL1_INV2__CPF_VMID_MASK 0x0000FFFFL
+#define SDMA1_UTCL1_INV2__CPF_FLUSH_TYPE_MASK 0x00010000L
+#define SDMA1_UTCL1_INV2__CPF_FRAG_SIZE_MASK 0x007E0000L
+//SDMA1_UTCL1_RD_XNACK0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK1
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA1_UTCL1_WR_XNACK0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_WR_XNACK1
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA1_RELAX_ORDERING_LUT
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS_2
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA1_CHICKEN_BITS_2__UCODE_BUF_DS_EN__SHIFT 0x6
+#define SDMA1_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP__SHIFT 0x7
+#define SDMA1_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS_2__RESERVED_14_12__SHIFT 0xc
+#define SDMA1_CHICKEN_BITS_2__RESERVED_15__SHIFT 0xf
+#define SDMA1_CHICKEN_BITS_2__RB_FIFO_WATERMARK__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS_2__IB_FIFO_WATERMARK__SHIFT 0x12
+#define SDMA1_CHICKEN_BITS_2__RESERVED_22_20__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS_2__CH_RD_WATERMARK__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB__SHIFT 0x1e
+#define SDMA1_CHICKEN_BITS_2__PIO_VFID_SOURCE__SHIFT 0x1f
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA1_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+#define SDMA1_CHICKEN_BITS_2__UCODE_BUF_DS_EN_MASK 0x00000040L
+#define SDMA1_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP_MASK 0x00000080L
+#define SDMA1_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING_MASK 0x00000F00L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_14_12_MASK 0x00007000L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_15_MASK 0x00008000L
+#define SDMA1_CHICKEN_BITS_2__RB_FIFO_WATERMARK_MASK 0x00030000L
+#define SDMA1_CHICKEN_BITS_2__IB_FIFO_WATERMARK_MASK 0x000C0000L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_22_20_MASK 0x00700000L
+#define SDMA1_CHICKEN_BITS_2__CH_RD_WATERMARK_MASK 0x01800000L
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_MASK 0x3E000000L
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB_MASK 0x40000000L
+#define SDMA1_CHICKEN_BITS_2__PIO_VFID_SOURCE_MASK 0x80000000L
+//SDMA1_STATUS3_REG
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA1_STATUS3_REG__AQL_PREV_CMD_IDLE__SHIFT 0x15
+#define SDMA1_STATUS3_REG__TLBI_IDLE__SHIFT 0x16
+#define SDMA1_STATUS3_REG__GCR_IDLE__SHIFT 0x17
+#define SDMA1_STATUS3_REG__INVREQ_IDLE__SHIFT 0x18
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x19
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x1a
+#define SDMA1_STATUS3_REG__TMZ_MTYPE_STATUS__SHIFT 0x1e
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA1_STATUS3_REG__AQL_PREV_CMD_IDLE_MASK 0x00200000L
+#define SDMA1_STATUS3_REG__TLBI_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS3_REG__GCR_IDLE_MASK 0x00800000L
+#define SDMA1_STATUS3_REG__INVREQ_IDLE_MASK 0x01000000L
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x02000000L
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x3C000000L
+#define SDMA1_STATUS3_REG__TMZ_MTYPE_STATUS_MASK 0xC0000000L
+//SDMA1_PHYSICAL_ADDR_LO
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA1_PHYSICAL_ADDR_HI
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA1_GLOBAL_QUANTUM
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM__SHIFT 0x0
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM__SHIFT 0x8
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM_MASK 0x000000FFL
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM_MASK 0x0000FF00L
+//SDMA1_ERROR_LOG
+#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA1_PUB_DUMMY_REG0
+#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG1
+#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG2
+#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG3
+#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_F32_COUNTER
+#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_CRD_CNTL
+#define SDMA1_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA1_CRD_CNTL__CH_WRREQ_CREDIT__SHIFT 0x13
+#define SDMA1_CRD_CNTL__CH_RDREQ_CREDIT__SHIFT 0x19
+#define SDMA1_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+#define SDMA1_CRD_CNTL__CH_WRREQ_CREDIT_MASK 0x01F80000L
+#define SDMA1_CRD_CNTL__CH_RDREQ_CREDIT_MASK 0x7E000000L
+//SDMA1_RLC_CGCG_CTRL
+#define SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE__SHIFT 0x1
+#define SDMA1_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS__SHIFT 0x10
+#define SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS_MASK 0xFFFF0000L
+//SDMA1_GPU_IOV_VIOLATION_LOG
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA1_AQL_STATUS
+#define SDMA1_AQL_STATUS__COMPLETE_SIGNAL_EMPTY__SHIFT 0x0
+#define SDMA1_AQL_STATUS__INVALID_CMD_EMPTY__SHIFT 0x1
+#define SDMA1_AQL_STATUS__COMPLETE_SIGNAL_EMPTY_MASK 0x00000001L
+#define SDMA1_AQL_STATUS__INVALID_CMD_EMPTY_MASK 0x00000002L
+//SDMA1_EA_DBIT_ADDR_DATA
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_EA_DBIT_ADDR_INDEX
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA1_TLBI_GCR_CNTL
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CMD_DW__SHIFT 0x0
+#define SDMA1_TLBI_GCR_CNTL__GCR_CMD_DW__SHIFT 0x4
+#define SDMA1_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE__SHIFT 0x8
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CREDIT__SHIFT 0x10
+#define SDMA1_TLBI_GCR_CNTL__GCR_CREDIT__SHIFT 0x18
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CMD_DW_MASK 0x0000000FL
+#define SDMA1_TLBI_GCR_CNTL__GCR_CMD_DW_MASK 0x000000F0L
+#define SDMA1_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE_MASK 0x00000F00L
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CREDIT_MASK 0x00FF0000L
+#define SDMA1_TLBI_GCR_CNTL__GCR_CREDIT_MASK 0xFF000000L
+//SDMA1_TILING_CONFIG
+#define SDMA1_TILING_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define SDMA1_TILING_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//SDMA1_HASH
+#define SDMA1_HASH__CHANNEL_BITS__SHIFT 0x0
+#define SDMA1_HASH__BANK_BITS__SHIFT 0x4
+#define SDMA1_HASH__CHANNEL_XOR_COUNT__SHIFT 0x8
+#define SDMA1_HASH__BANK_XOR_COUNT__SHIFT 0xc
+#define SDMA1_HASH__CHANNEL_BITS_MASK 0x00000007L
+#define SDMA1_HASH__BANK_BITS_MASK 0x00000070L
+#define SDMA1_HASH__CHANNEL_XOR_COUNT_MASK 0x00000700L
+#define SDMA1_HASH__BANK_XOR_COUNT_MASK 0x00007000L
+//SDMA1_INT_STATUS
+#define SDMA1_INT_STATUS__DATA__SHIFT 0x0
+#define SDMA1_INT_STATUS__DATA_MASK 0xFFFFFFFFL
+//SDMA1_GPU_IOV_VIOLATION_LOG2
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA1_HOLE_ADDR_LO
+#define SDMA1_HOLE_ADDR_LO__VALUE__SHIFT 0x0
+#define SDMA1_HOLE_ADDR_LO__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_HOLE_ADDR_HI
+#define SDMA1_HOLE_ADDR_HI__VALUE__SHIFT 0x0
+#define SDMA1_HOLE_ADDR_HI__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_CLOCK_GATING_STATUS
+#define SDMA1_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS__SHIFT 0x0
+#define SDMA1_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS__SHIFT 0x2
+#define SDMA1_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS__SHIFT 0x3
+#define SDMA1_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS__SHIFT 0x4
+#define SDMA1_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS__SHIFT 0x5
+#define SDMA1_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS__SHIFT 0x6
+#define SDMA1_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS_MASK 0x00000001L
+#define SDMA1_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS_MASK 0x00000004L
+#define SDMA1_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS_MASK 0x00000008L
+#define SDMA1_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS_MASK 0x00000010L
+#define SDMA1_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS_MASK 0x00000020L
+#define SDMA1_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS_MASK 0x00000040L
+//SDMA1_STATUS4_REG
+#define SDMA1_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA1_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA1_STATUS4_REG__CH_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA1_STATUS4_REG__CH_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA1_STATUS4_REG__GCR_OUTSTANDING__SHIFT 0x6
+#define SDMA1_STATUS4_REG__TLBI_OUTSTANDING__SHIFT 0x7
+#define SDMA1_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x8
+#define SDMA1_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x9
+#define SDMA1_STATUS4_REG__REG_POLLING__SHIFT 0xa
+#define SDMA1_STATUS4_REG__MEM_POLLING__SHIFT 0xb
+#define SDMA1_STATUS4_REG__RESERVED_13_12__SHIFT 0xc
+#define SDMA1_STATUS4_REG__RESERVED_15_14__SHIFT 0xe
+#define SDMA1_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA1_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x14
+#define SDMA1_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x15
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_FAULT__SHIFT 0x16
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_NULL__SHIFT 0x17
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT__SHIFT 0x18
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_FAULT__SHIFT 0x19
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_NULL__SHIFT 0x1a
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT__SHIFT 0x1b
+#define SDMA1_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA1_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA1_STATUS4_REG__CH_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA1_STATUS4_REG__CH_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA1_STATUS4_REG__GCR_OUTSTANDING_MASK 0x00000040L
+#define SDMA1_STATUS4_REG__TLBI_OUTSTANDING_MASK 0x00000080L
+#define SDMA1_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000100L
+#define SDMA1_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000200L
+#define SDMA1_STATUS4_REG__REG_POLLING_MASK 0x00000400L
+#define SDMA1_STATUS4_REG__MEM_POLLING_MASK 0x00000800L
+#define SDMA1_STATUS4_REG__RESERVED_13_12_MASK 0x00003000L
+#define SDMA1_STATUS4_REG__RESERVED_15_14_MASK 0x0000C000L
+#define SDMA1_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA1_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00100000L
+#define SDMA1_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00200000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_FAULT_MASK 0x00400000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_NULL_MASK 0x00800000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT_MASK 0x01000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_FAULT_MASK 0x02000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_NULL_MASK 0x04000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT_MASK 0x08000000L
+//SDMA1_SCRATCH_RAM_DATA
+#define SDMA1_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA1_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA1_SCRATCH_RAM_ADDR
+#define SDMA1_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA1_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA1_TIMESTAMP_CNTL
+#define SDMA1_TIMESTAMP_CNTL__CAPTURE__SHIFT 0x0
+#define SDMA1_TIMESTAMP_CNTL__CAPTURE_MASK 0x00000001L
+//SDMA1_STATUS5_REG
+#define SDMA1_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS__SHIFT 0x0
+#define SDMA1_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS__SHIFT 0x1
+#define SDMA1_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS__SHIFT 0x2
+#define SDMA1_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS__SHIFT 0x3
+#define SDMA1_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS__SHIFT 0x4
+#define SDMA1_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS__SHIFT 0x5
+#define SDMA1_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS__SHIFT 0x6
+#define SDMA1_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS__SHIFT 0x7
+#define SDMA1_STATUS5_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA1_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x14
+#define SDMA1_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x15
+#define SDMA1_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x16
+#define SDMA1_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x17
+#define SDMA1_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x18
+#define SDMA1_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x19
+#define SDMA1_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1a
+#define SDMA1_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1b
+#define SDMA1_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS_MASK 0x00000001L
+#define SDMA1_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS_MASK 0x00000002L
+#define SDMA1_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS_MASK 0x00000004L
+#define SDMA1_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS_MASK 0x00000008L
+#define SDMA1_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS_MASK 0x00000010L
+#define SDMA1_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS_MASK 0x00000020L
+#define SDMA1_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS_MASK 0x00000040L
+#define SDMA1_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS_MASK 0x00000080L
+#define SDMA1_STATUS5_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA1_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00100000L
+#define SDMA1_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00200000L
+#define SDMA1_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00400000L
+#define SDMA1_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00800000L
+#define SDMA1_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION_MASK 0x01000000L
+#define SDMA1_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION_MASK 0x02000000L
+#define SDMA1_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION_MASK 0x04000000L
+#define SDMA1_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION_MASK 0x08000000L
+//SDMA1_QUEUE_RESET_REQ
+#define SDMA1_QUEUE_RESET_REQ__QUEUE0_RESET__SHIFT 0x0
+#define SDMA1_QUEUE_RESET_REQ__QUEUE1_RESET__SHIFT 0x1
+#define SDMA1_QUEUE_RESET_REQ__QUEUE2_RESET__SHIFT 0x2
+#define SDMA1_QUEUE_RESET_REQ__QUEUE3_RESET__SHIFT 0x3
+#define SDMA1_QUEUE_RESET_REQ__QUEUE4_RESET__SHIFT 0x4
+#define SDMA1_QUEUE_RESET_REQ__QUEUE5_RESET__SHIFT 0x5
+#define SDMA1_QUEUE_RESET_REQ__QUEUE6_RESET__SHIFT 0x6
+#define SDMA1_QUEUE_RESET_REQ__QUEUE7_RESET__SHIFT 0x7
+#define SDMA1_QUEUE_RESET_REQ__RESERVED__SHIFT 0x8
+#define SDMA1_QUEUE_RESET_REQ__QUEUE0_RESET_MASK 0x00000001L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE1_RESET_MASK 0x00000002L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE2_RESET_MASK 0x00000004L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE3_RESET_MASK 0x00000008L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE4_RESET_MASK 0x00000010L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE5_RESET_MASK 0x00000020L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE6_RESET_MASK 0x00000040L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE7_RESET_MASK 0x00000080L
+#define SDMA1_QUEUE_RESET_REQ__RESERVED_MASK 0xFFFFFF00L
+//SDMA1_STATUS6_REG
+#define SDMA1_STATUS6_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS6_REG__TH1F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS6_REG__TH1_EXCEPTION__SHIFT 0x10
+#define SDMA1_STATUS6_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS6_REG__TH1F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA1_STATUS6_REG__TH1_EXCEPTION_MASK 0xFFFF0000L
+//SDMA1_UCODE1_CHECKSUM
+#define SDMA1_UCODE1_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE1_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_CE_CTRL
+#define SDMA1_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA1_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA1_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA1_CE_CTRL__CE_DCC_READ_128B_ENABLE__SHIFT 0x8
+#define SDMA1_CE_CTRL__RESERVED__SHIFT 0x9
+#define SDMA1_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA1_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA1_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA1_CE_CTRL__CE_DCC_READ_128B_ENABLE_MASK 0x00000100L
+#define SDMA1_CE_CTRL__RESERVED_MASK 0xFFFFFE00L
+//SDMA1_FED_STATUS
+#define SDMA1_FED_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA1_FED_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA1_FED_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA1_FED_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA1_FED_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA1_FED_STATUS__COPY_METADATA_ECC__SHIFT 0x5
+#define SDMA1_FED_STATUS__SELFLOAD_UCODE_ECC__SHIFT 0x6
+#define SDMA1_FED_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA1_FED_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA1_FED_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA1_FED_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA1_FED_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA1_FED_STATUS__COPY_METADATA_ECC_MASK 0x00000020L
+#define SDMA1_FED_STATUS__SELFLOAD_UCODE_ECC_MASK 0x00000040L
+//SDMA1_QUEUE0_RB_CNTL
+#define SDMA1_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE0_RB_BASE
+#define SDMA1_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_BASE_HI
+#define SDMA1_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE0_RB_RPTR
+#define SDMA1_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_HI
+#define SDMA1_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR
+#define SDMA1_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_HI
+#define SDMA1_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_IB_CNTL
+#define SDMA1_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE0_IB_RPTR
+#define SDMA1_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE0_IB_OFFSET
+#define SDMA1_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE0_IB_BASE_LO
+#define SDMA1_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE0_IB_BASE_HI
+#define SDMA1_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_IB_SIZE
+#define SDMA1_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE0_SKIP_CNTL
+#define SDMA1_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE0_CONTEXT_STATUS
+#define SDMA1_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE0_CONTEXT_STATUS__USE_IB__SHIFT 0x1
+#define SDMA1_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__USE_IB_MASK 0x00000002L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE0_DOORBELL
+#define SDMA1_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE0_DOORBELL_LOG
+#define SDMA1_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_DOORBELL_OFFSET
+#define SDMA1_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE0_CSA_ADDR_LO
+#define SDMA1_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_CSA_ADDR_HI
+#define SDMA1_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_SCHEDULE_CNTL
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE0_IB_SUB_REMAIN
+#define SDMA1_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE0_PREEMPT
+#define SDMA1_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE0_DUMMY_REG
+#define SDMA1_QUEUE0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_RB_AQL_CNTL
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE0_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE0_RB_PREEMPT
+#define SDMA1_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE0_MIDCMD_DATA0
+#define SDMA1_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA1
+#define SDMA1_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA2
+#define SDMA1_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA3
+#define SDMA1_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA4
+#define SDMA1_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA5
+#define SDMA1_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA6
+#define SDMA1_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA7
+#define SDMA1_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA8
+#define SDMA1_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA9
+#define SDMA1_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA10
+#define SDMA1_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_CNTL
+#define SDMA1_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE1_RB_CNTL
+#define SDMA1_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE1_RB_BASE
+#define SDMA1_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_BASE_HI
+#define SDMA1_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE1_RB_RPTR
+#define SDMA1_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_HI
+#define SDMA1_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR
+#define SDMA1_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_HI
+#define SDMA1_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_IB_CNTL
+#define SDMA1_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE1_IB_RPTR
+#define SDMA1_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE1_IB_OFFSET
+#define SDMA1_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE1_IB_BASE_LO
+#define SDMA1_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE1_IB_BASE_HI
+#define SDMA1_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_IB_SIZE
+#define SDMA1_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE1_SKIP_CNTL
+#define SDMA1_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE1_CONTEXT_STATUS
+#define SDMA1_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE1_DOORBELL
+#define SDMA1_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE1_DOORBELL_LOG
+#define SDMA1_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_DOORBELL_OFFSET
+#define SDMA1_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE1_CSA_ADDR_LO
+#define SDMA1_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_CSA_ADDR_HI
+#define SDMA1_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_SCHEDULE_CNTL
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE1_IB_SUB_REMAIN
+#define SDMA1_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE1_PREEMPT
+#define SDMA1_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE1_DUMMY_REG
+#define SDMA1_QUEUE1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_RB_AQL_CNTL
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE1_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE1_RB_PREEMPT
+#define SDMA1_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE1_MIDCMD_DATA0
+#define SDMA1_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA1
+#define SDMA1_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA2
+#define SDMA1_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA3
+#define SDMA1_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA4
+#define SDMA1_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA5
+#define SDMA1_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA6
+#define SDMA1_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA7
+#define SDMA1_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA8
+#define SDMA1_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA9
+#define SDMA1_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA10
+#define SDMA1_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_CNTL
+#define SDMA1_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE2_RB_CNTL
+#define SDMA1_QUEUE2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE2_RB_BASE
+#define SDMA1_QUEUE2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_BASE_HI
+#define SDMA1_QUEUE2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE2_RB_RPTR
+#define SDMA1_QUEUE2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_HI
+#define SDMA1_QUEUE2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR
+#define SDMA1_QUEUE2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_HI
+#define SDMA1_QUEUE2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_IB_CNTL
+#define SDMA1_QUEUE2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE2_IB_RPTR
+#define SDMA1_QUEUE2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE2_IB_OFFSET
+#define SDMA1_QUEUE2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE2_IB_BASE_LO
+#define SDMA1_QUEUE2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE2_IB_BASE_HI
+#define SDMA1_QUEUE2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_IB_SIZE
+#define SDMA1_QUEUE2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE2_SKIP_CNTL
+#define SDMA1_QUEUE2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE2_CONTEXT_STATUS
+#define SDMA1_QUEUE2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE2_DOORBELL
+#define SDMA1_QUEUE2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE2_DOORBELL_LOG
+#define SDMA1_QUEUE2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_DOORBELL_OFFSET
+#define SDMA1_QUEUE2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE2_CSA_ADDR_LO
+#define SDMA1_QUEUE2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_CSA_ADDR_HI
+#define SDMA1_QUEUE2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_SCHEDULE_CNTL
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE2_IB_SUB_REMAIN
+#define SDMA1_QUEUE2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE2_PREEMPT
+#define SDMA1_QUEUE2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE2_DUMMY_REG
+#define SDMA1_QUEUE2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_RB_AQL_CNTL
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE2_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE2_RB_PREEMPT
+#define SDMA1_QUEUE2_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE2_MIDCMD_DATA0
+#define SDMA1_QUEUE2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA1
+#define SDMA1_QUEUE2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA2
+#define SDMA1_QUEUE2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA3
+#define SDMA1_QUEUE2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA4
+#define SDMA1_QUEUE2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA5
+#define SDMA1_QUEUE2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA6
+#define SDMA1_QUEUE2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA7
+#define SDMA1_QUEUE2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA8
+#define SDMA1_QUEUE2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA9
+#define SDMA1_QUEUE2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA10
+#define SDMA1_QUEUE2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_CNTL
+#define SDMA1_QUEUE2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE3_RB_CNTL
+#define SDMA1_QUEUE3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE3_RB_BASE
+#define SDMA1_QUEUE3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_BASE_HI
+#define SDMA1_QUEUE3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE3_RB_RPTR
+#define SDMA1_QUEUE3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_HI
+#define SDMA1_QUEUE3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR
+#define SDMA1_QUEUE3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_HI
+#define SDMA1_QUEUE3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_IB_CNTL
+#define SDMA1_QUEUE3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE3_IB_RPTR
+#define SDMA1_QUEUE3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE3_IB_OFFSET
+#define SDMA1_QUEUE3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE3_IB_BASE_LO
+#define SDMA1_QUEUE3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE3_IB_BASE_HI
+#define SDMA1_QUEUE3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_IB_SIZE
+#define SDMA1_QUEUE3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE3_SKIP_CNTL
+#define SDMA1_QUEUE3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE3_CONTEXT_STATUS
+#define SDMA1_QUEUE3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE3_DOORBELL
+#define SDMA1_QUEUE3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE3_DOORBELL_LOG
+#define SDMA1_QUEUE3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_DOORBELL_OFFSET
+#define SDMA1_QUEUE3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE3_CSA_ADDR_LO
+#define SDMA1_QUEUE3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_CSA_ADDR_HI
+#define SDMA1_QUEUE3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_SCHEDULE_CNTL
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE3_IB_SUB_REMAIN
+#define SDMA1_QUEUE3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE3_PREEMPT
+#define SDMA1_QUEUE3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE3_DUMMY_REG
+#define SDMA1_QUEUE3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_RB_AQL_CNTL
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE3_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE3_RB_PREEMPT
+#define SDMA1_QUEUE3_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE3_MIDCMD_DATA0
+#define SDMA1_QUEUE3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA1
+#define SDMA1_QUEUE3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA2
+#define SDMA1_QUEUE3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA3
+#define SDMA1_QUEUE3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA4
+#define SDMA1_QUEUE3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA5
+#define SDMA1_QUEUE3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA6
+#define SDMA1_QUEUE3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA7
+#define SDMA1_QUEUE3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA8
+#define SDMA1_QUEUE3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA9
+#define SDMA1_QUEUE3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA10
+#define SDMA1_QUEUE3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_CNTL
+#define SDMA1_QUEUE3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE4_RB_CNTL
+#define SDMA1_QUEUE4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE4_RB_BASE
+#define SDMA1_QUEUE4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_BASE_HI
+#define SDMA1_QUEUE4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE4_RB_RPTR
+#define SDMA1_QUEUE4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_HI
+#define SDMA1_QUEUE4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR
+#define SDMA1_QUEUE4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_HI
+#define SDMA1_QUEUE4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_IB_CNTL
+#define SDMA1_QUEUE4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE4_IB_RPTR
+#define SDMA1_QUEUE4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE4_IB_OFFSET
+#define SDMA1_QUEUE4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE4_IB_BASE_LO
+#define SDMA1_QUEUE4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE4_IB_BASE_HI
+#define SDMA1_QUEUE4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_IB_SIZE
+#define SDMA1_QUEUE4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE4_SKIP_CNTL
+#define SDMA1_QUEUE4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE4_CONTEXT_STATUS
+#define SDMA1_QUEUE4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE4_DOORBELL
+#define SDMA1_QUEUE4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE4_DOORBELL_LOG
+#define SDMA1_QUEUE4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_DOORBELL_OFFSET
+#define SDMA1_QUEUE4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE4_CSA_ADDR_LO
+#define SDMA1_QUEUE4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_CSA_ADDR_HI
+#define SDMA1_QUEUE4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_SCHEDULE_CNTL
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE4_IB_SUB_REMAIN
+#define SDMA1_QUEUE4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE4_PREEMPT
+#define SDMA1_QUEUE4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE4_DUMMY_REG
+#define SDMA1_QUEUE4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_RB_AQL_CNTL
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE4_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE4_RB_PREEMPT
+#define SDMA1_QUEUE4_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE4_MIDCMD_DATA0
+#define SDMA1_QUEUE4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA1
+#define SDMA1_QUEUE4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA2
+#define SDMA1_QUEUE4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA3
+#define SDMA1_QUEUE4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA4
+#define SDMA1_QUEUE4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA5
+#define SDMA1_QUEUE4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA6
+#define SDMA1_QUEUE4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA7
+#define SDMA1_QUEUE4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA8
+#define SDMA1_QUEUE4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA9
+#define SDMA1_QUEUE4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA10
+#define SDMA1_QUEUE4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_CNTL
+#define SDMA1_QUEUE4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE5_RB_CNTL
+#define SDMA1_QUEUE5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE5_RB_BASE
+#define SDMA1_QUEUE5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_BASE_HI
+#define SDMA1_QUEUE5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE5_RB_RPTR
+#define SDMA1_QUEUE5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_HI
+#define SDMA1_QUEUE5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR
+#define SDMA1_QUEUE5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_HI
+#define SDMA1_QUEUE5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_IB_CNTL
+#define SDMA1_QUEUE5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE5_IB_RPTR
+#define SDMA1_QUEUE5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE5_IB_OFFSET
+#define SDMA1_QUEUE5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE5_IB_BASE_LO
+#define SDMA1_QUEUE5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE5_IB_BASE_HI
+#define SDMA1_QUEUE5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_IB_SIZE
+#define SDMA1_QUEUE5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE5_SKIP_CNTL
+#define SDMA1_QUEUE5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE5_CONTEXT_STATUS
+#define SDMA1_QUEUE5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE5_DOORBELL
+#define SDMA1_QUEUE5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE5_DOORBELL_LOG
+#define SDMA1_QUEUE5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_DOORBELL_OFFSET
+#define SDMA1_QUEUE5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE5_CSA_ADDR_LO
+#define SDMA1_QUEUE5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_CSA_ADDR_HI
+#define SDMA1_QUEUE5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_SCHEDULE_CNTL
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE5_IB_SUB_REMAIN
+#define SDMA1_QUEUE5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE5_PREEMPT
+#define SDMA1_QUEUE5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE5_DUMMY_REG
+#define SDMA1_QUEUE5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_RB_AQL_CNTL
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE5_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE5_RB_PREEMPT
+#define SDMA1_QUEUE5_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE5_MIDCMD_DATA0
+#define SDMA1_QUEUE5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA1
+#define SDMA1_QUEUE5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA2
+#define SDMA1_QUEUE5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA3
+#define SDMA1_QUEUE5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA4
+#define SDMA1_QUEUE5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA5
+#define SDMA1_QUEUE5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA6
+#define SDMA1_QUEUE5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA7
+#define SDMA1_QUEUE5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA8
+#define SDMA1_QUEUE5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA9
+#define SDMA1_QUEUE5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA10
+#define SDMA1_QUEUE5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_CNTL
+#define SDMA1_QUEUE5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE6_RB_CNTL
+#define SDMA1_QUEUE6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE6_RB_BASE
+#define SDMA1_QUEUE6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_BASE_HI
+#define SDMA1_QUEUE6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE6_RB_RPTR
+#define SDMA1_QUEUE6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_HI
+#define SDMA1_QUEUE6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR
+#define SDMA1_QUEUE6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_HI
+#define SDMA1_QUEUE6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_IB_CNTL
+#define SDMA1_QUEUE6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE6_IB_RPTR
+#define SDMA1_QUEUE6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE6_IB_OFFSET
+#define SDMA1_QUEUE6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE6_IB_BASE_LO
+#define SDMA1_QUEUE6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE6_IB_BASE_HI
+#define SDMA1_QUEUE6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_IB_SIZE
+#define SDMA1_QUEUE6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE6_SKIP_CNTL
+#define SDMA1_QUEUE6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE6_CONTEXT_STATUS
+#define SDMA1_QUEUE6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE6_DOORBELL
+#define SDMA1_QUEUE6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE6_DOORBELL_LOG
+#define SDMA1_QUEUE6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_DOORBELL_OFFSET
+#define SDMA1_QUEUE6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE6_CSA_ADDR_LO
+#define SDMA1_QUEUE6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_CSA_ADDR_HI
+#define SDMA1_QUEUE6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_SCHEDULE_CNTL
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE6_IB_SUB_REMAIN
+#define SDMA1_QUEUE6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE6_PREEMPT
+#define SDMA1_QUEUE6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE6_DUMMY_REG
+#define SDMA1_QUEUE6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_RB_AQL_CNTL
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE6_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE6_RB_PREEMPT
+#define SDMA1_QUEUE6_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE6_MIDCMD_DATA0
+#define SDMA1_QUEUE6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA1
+#define SDMA1_QUEUE6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA2
+#define SDMA1_QUEUE6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA3
+#define SDMA1_QUEUE6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA4
+#define SDMA1_QUEUE6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA5
+#define SDMA1_QUEUE6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA6
+#define SDMA1_QUEUE6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA7
+#define SDMA1_QUEUE6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA8
+#define SDMA1_QUEUE6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA9
+#define SDMA1_QUEUE6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA10
+#define SDMA1_QUEUE6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_CNTL
+#define SDMA1_QUEUE6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE7_RB_CNTL
+#define SDMA1_QUEUE7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE7_RB_BASE
+#define SDMA1_QUEUE7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_BASE_HI
+#define SDMA1_QUEUE7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE7_RB_RPTR
+#define SDMA1_QUEUE7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_HI
+#define SDMA1_QUEUE7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR
+#define SDMA1_QUEUE7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_HI
+#define SDMA1_QUEUE7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_IB_CNTL
+#define SDMA1_QUEUE7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE7_IB_RPTR
+#define SDMA1_QUEUE7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE7_IB_OFFSET
+#define SDMA1_QUEUE7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE7_IB_BASE_LO
+#define SDMA1_QUEUE7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE7_IB_BASE_HI
+#define SDMA1_QUEUE7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_IB_SIZE
+#define SDMA1_QUEUE7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE7_SKIP_CNTL
+#define SDMA1_QUEUE7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE7_CONTEXT_STATUS
+#define SDMA1_QUEUE7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE7_DOORBELL
+#define SDMA1_QUEUE7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE7_DOORBELL_LOG
+#define SDMA1_QUEUE7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_DOORBELL_OFFSET
+#define SDMA1_QUEUE7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE7_CSA_ADDR_LO
+#define SDMA1_QUEUE7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_CSA_ADDR_HI
+#define SDMA1_QUEUE7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_SCHEDULE_CNTL
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE7_IB_SUB_REMAIN
+#define SDMA1_QUEUE7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE7_PREEMPT
+#define SDMA1_QUEUE7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE7_DUMMY_REG
+#define SDMA1_QUEUE7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_RB_AQL_CNTL
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE7_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE7_RB_PREEMPT
+#define SDMA1_QUEUE7_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE7_MIDCMD_DATA0
+#define SDMA1_QUEUE7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA1
+#define SDMA1_QUEUE7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA2
+#define SDMA1_QUEUE7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA3
+#define SDMA1_QUEUE7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA4
+#define SDMA1_QUEUE7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA5
+#define SDMA1_QUEUE7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA6
+#define SDMA1_QUEUE7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA7
+#define SDMA1_QUEUE7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA8
+#define SDMA1_QUEUE7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA9
+#define SDMA1_QUEUE7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA10
+#define SDMA1_QUEUE7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_CNTL
+#define SDMA1_QUEUE7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+
+// addressBlock: gc_sdma0_sdma0hypdec
+//SDMA0_UCODE_ADDR
+#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA0_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA0_UCODE_DATA
+#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_UCODE_SELFLOAD_CONTROL
+#define SDMA0_UCODE_SELFLOAD_CONTROL__GPA__SHIFT 0x0
+#define SDMA0_UCODE_SELFLOAD_CONTROL__SYS__SHIFT 0x1
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CID__SHIFT 0x4
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CACHE_POLICY__SHIFT 0x8
+#define SDMA0_UCODE_SELFLOAD_CONTROL__GPA_MASK 0x00000001L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__SYS_MASK 0x00000002L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CID_MASK 0x000000F0L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CACHE_POLICY_MASK 0x00000300L
+//SDMA0_BROADCAST_UCODE_ADDR
+#define SDMA0_BROADCAST_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_BROADCAST_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA0_BROADCAST_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA0_BROADCAST_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA0_BROADCAST_UCODE_DATA
+#define SDMA0_BROADCAST_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_BROADCAST_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_VM_CTX_LO
+#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_VM_CTX_HI
+#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_ACTIVE_FCN_ID
+#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA0_VM_CTX_CNTL
+#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA0_VM_CTX_CNTL__MEM_PHY__SHIFT 0x8
+#define SDMA0_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE__SHIFT 0x10
+#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+#define SDMA0_VM_CTX_CNTL__MEM_PHY_MASK 0x00000300L
+#define SDMA0_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE_MASK 0x00010000L
+//SDMA0_VIRT_RESET_REQ
+#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA0_CONTEXT_REG_TYPE0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_CNTL__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_HI__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_HI__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_HI__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_CNTL__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_RPTR__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_OFFSET__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_LO__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_HI__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_SIZE__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_SKIP_CNTL__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_DOORBELL__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED31_19__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_CNTL_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_HI_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_CNTL_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_RPTR_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_OFFSET_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_LO_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_HI_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_SIZE_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_SKIP_CNTL_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_DOORBELL_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED31_19_MASK 0xFFF80000L
+//SDMA0_CONTEXT_REG_TYPE1
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED8_0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_LOG__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_SCHEDULE_CNTL__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_PREEMPT__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DUMMY_REG__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_PREEMPT__SHIFT 0x16
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x17
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED8_0_MASK 0x000001FFL
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_SCHEDULE_CNTL_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_PREEMPT_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DUMMY_REG_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_PREEMPT_MASK 0x00400000L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFF800000L
+//SDMA0_CONTEXT_REG_TYPE2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA0_PUB_REG_TYPE0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_DEC_START__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE0__RESERVED_10_1__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE0__SDMA0_F32_MISC_CNTL__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_LO__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_HI__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE0__RESERVED22__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE0__RESERVED23__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE0__RESERVED24__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE0__RESERVED25__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE0__RESERVED27__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE0__SDMA0_DEC_START_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE0__RESERVED_10_1_MASK 0x000007FEL
+#define SDMA0_PUB_REG_TYPE0__SDMA0_F32_MISC_CNTL_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_LO_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_HI_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED22_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED23_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED24_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED25_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED27_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE1__SDMA0_CNTL1__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM0__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM1__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE1__SDMA0_WATCHDOG_CNTL__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE1__RESERVED15__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE1__RESERVED16__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE1__RESERVED17__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_TIMEOUT__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_PAGE__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_CNTL1_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM0_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM1_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_WATCHDOG_CNTL_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED15_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED16_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED17_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_TIMEOUT_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_PAGE_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_STATUS__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GLOBAL_QUANTUM__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE2__RESERVE_22_22__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE2__RESERVED23__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE2__RESERVED24__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE2__RESERVED25__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE2__RESERVED26__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RLC_CGCG_CTRL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE2__SDMA0_AQL_STATUS__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_STATUS_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_STATUS_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GLOBAL_QUANTUM_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE2__RESERVE_22_22_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED23_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED24_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED25_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED26_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RLC_CGCG_CTRL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_AQL_STATUS_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TLBI_GCR_CNTL__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TILING_CONFIG__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HASH__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE3__RESERVED5__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE3__RESERVED7__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CE_CTRL__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE3__SDMA0_FED_STATUS__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE3__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE3__RESERVED11__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE3__RESERVED12__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE3__RESERVED13__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE3__RESERVED14__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE3__RESERVED15__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE3__SDMA0_INT_STATUS__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_LO__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_HI__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE3__RESERVED20__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CLOCK_GATING_STATUS__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS4_REG__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_DATA__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_ADDR__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TIMESTAMP_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE3__RESERVED26__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE3__RESERVED27__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS5_REG__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE3__SDMA0_QUEUE_RESET_REQ__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS6_REG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE3__SDMA0_UCODE1_CHECKSUM__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TLBI_GCR_CNTL_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TILING_CONFIG_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HASH_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE3__RESERVED5_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE3__RESERVED7_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CE_CTRL_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_FED_STATUS_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE3__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE3__RESERVED11_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE3__RESERVED12_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED13_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED14_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED15_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_INT_STATUS_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_LO_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_HI_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED20_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CLOCK_GATING_STATUS_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS4_REG_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_DATA_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_ADDR_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TIMESTAMP_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED26_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED27_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS5_REG_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_QUEUE_RESET_REQ_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS6_REG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_UCODE1_CHECKSUM_MASK 0x80000000L
+//SDMA0_VM_CNTL
+#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA0_F32_CNTL
+#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA0_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA0_F32_CNTL__TH0_CHECKSUM_CLR__SHIFT 0x8
+#define SDMA0_F32_CNTL__TH0_RESET__SHIFT 0x9
+#define SDMA0_F32_CNTL__TH0_ENABLE__SHIFT 0xa
+#define SDMA0_F32_CNTL__TH1_CHECKSUM_CLR__SHIFT 0xc
+#define SDMA0_F32_CNTL__TH1_RESET__SHIFT 0xd
+#define SDMA0_F32_CNTL__TH1_ENABLE__SHIFT 0xe
+#define SDMA0_F32_CNTL__TH0_PRIORITY__SHIFT 0x10
+#define SDMA0_F32_CNTL__TH1_PRIORITY__SHIFT 0x18
+#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA0_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA0_F32_CNTL__TH0_CHECKSUM_CLR_MASK 0x00000100L
+#define SDMA0_F32_CNTL__TH0_RESET_MASK 0x00000200L
+#define SDMA0_F32_CNTL__TH0_ENABLE_MASK 0x00000400L
+#define SDMA0_F32_CNTL__TH1_CHECKSUM_CLR_MASK 0x00001000L
+#define SDMA0_F32_CNTL__TH1_RESET_MASK 0x00002000L
+#define SDMA0_F32_CNTL__TH1_ENABLE_MASK 0x00004000L
+#define SDMA0_F32_CNTL__TH0_PRIORITY_MASK 0x00FF0000L
+#define SDMA0_F32_CNTL__TH1_PRIORITY_MASK 0xFF000000L
+
+
+// addressBlock: gc_sdma0_sdma1hypdec
+//SDMA1_UCODE_ADDR
+#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA1_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA1_UCODE_DATA
+#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_UCODE_SELFLOAD_CONTROL
+#define SDMA1_UCODE_SELFLOAD_CONTROL__GPA__SHIFT 0x0
+#define SDMA1_UCODE_SELFLOAD_CONTROL__SYS__SHIFT 0x1
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CID__SHIFT 0x4
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CACHE_POLICY__SHIFT 0x8
+#define SDMA1_UCODE_SELFLOAD_CONTROL__GPA_MASK 0x00000001L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__SYS_MASK 0x00000002L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CID_MASK 0x000000F0L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CACHE_POLICY_MASK 0x00000300L
+//SDMA1_BROADCAST_UCODE_ADDR
+#define SDMA1_BROADCAST_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_BROADCAST_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA1_BROADCAST_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA1_BROADCAST_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA1_BROADCAST_UCODE_DATA
+#define SDMA1_BROADCAST_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_BROADCAST_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_VM_CTX_LO
+#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_VM_CTX_HI
+#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_ACTIVE_FCN_ID
+#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA1_VM_CTX_CNTL
+#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA1_VM_CTX_CNTL__MEM_PHY__SHIFT 0x8
+#define SDMA1_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE__SHIFT 0x10
+#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+#define SDMA1_VM_CTX_CNTL__MEM_PHY_MASK 0x00000300L
+#define SDMA1_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE_MASK 0x00010000L
+//SDMA1_VIRT_RESET_REQ
+#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA1_CONTEXT_REG_TYPE0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_CNTL__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_HI__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_HI__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_HI__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_CNTL__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_RPTR__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_OFFSET__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_LO__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_HI__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_SIZE__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_SKIP_CNTL__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_DOORBELL__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED31_19__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_CNTL_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_HI_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_CNTL_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_RPTR_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_OFFSET_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_LO_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_HI_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_SIZE_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_SKIP_CNTL_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_DOORBELL_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED31_19_MASK 0xFFF80000L
+//SDMA1_CONTEXT_REG_TYPE1
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED8_0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_LOG__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_SCHEDULE_CNTL__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_PREEMPT__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DUMMY_REG__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_PREEMPT__SHIFT 0x16
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x17
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED8_0_MASK 0x000001FFL
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_SCHEDULE_CNTL_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_PREEMPT_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DUMMY_REG_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_PREEMPT_MASK 0x00400000L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFF800000L
+//SDMA1_CONTEXT_REG_TYPE2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA1_PUB_REG_TYPE0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_DEC_START__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE0__RESERVED_10_1__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE0__SDMA1_F32_MISC_CNTL__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_LO__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_HI__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE0__RESERVED22__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE0__RESERVED23__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE0__RESERVED24__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE0__RESERVED25__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE0__RESERVED27__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE0__SDMA1_DEC_START_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE0__RESERVED_10_1_MASK 0x000007FEL
+#define SDMA1_PUB_REG_TYPE0__SDMA1_F32_MISC_CNTL_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_LO_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_HI_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED22_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED23_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED24_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED25_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED27_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE1__SDMA1_CNTL1__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM0__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM1__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE1__SDMA1_WATCHDOG_CNTL__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE1__RESERVED15__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE1__RESERVED16__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE1__RESERVED17__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_TIMEOUT__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_PAGE__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_CNTL1_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM0_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM1_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_WATCHDOG_CNTL_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED15_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED16_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED17_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_TIMEOUT_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_PAGE_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_STATUS__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GLOBAL_QUANTUM__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE2__RESERVE_22_22__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE2__RESERVED23__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE2__RESERVED24__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE2__RESERVED25__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE2__RESERVED26__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RLC_CGCG_CTRL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE2__SDMA1_AQL_STATUS__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_STATUS_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_STATUS_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GLOBAL_QUANTUM_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE2__RESERVE_22_22_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED23_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED24_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED25_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED26_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RLC_CGCG_CTRL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_AQL_STATUS_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TLBI_GCR_CNTL__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TILING_CONFIG__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HASH__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE3__RESERVED5__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE3__RESERVED7__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CE_CTRL__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE3__SDMA1_FED_STATUS__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE3__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE3__RESERVED11__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE3__RESERVED12__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE3__RESERVED13__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE3__RESERVED14__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE3__RESERVED15__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE3__SDMA1_INT_STATUS__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_LO__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_HI__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE3__RESERVED20__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CLOCK_GATING_STATUS__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS4_REG__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_DATA__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_ADDR__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TIMESTAMP_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE3__RESERVED26__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE3__RESERVED27__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS5_REG__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE3__SDMA1_QUEUE_RESET_REQ__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS6_REG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE3__SDMA1_UCODE1_CHECKSUM__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TLBI_GCR_CNTL_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TILING_CONFIG_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HASH_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE3__RESERVED5_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE3__RESERVED7_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CE_CTRL_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_FED_STATUS_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE3__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE3__RESERVED11_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE3__RESERVED12_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED13_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED14_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED15_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_INT_STATUS_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_LO_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_HI_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED20_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CLOCK_GATING_STATUS_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS4_REG_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_DATA_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_ADDR_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TIMESTAMP_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED26_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED27_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS5_REG_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_QUEUE_RESET_REQ_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS6_REG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_UCODE1_CHECKSUM_MASK 0x80000000L
+//SDMA1_VM_CNTL
+#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA1_F32_CNTL
+#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA1_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA1_F32_CNTL__TH0_CHECKSUM_CLR__SHIFT 0x8
+#define SDMA1_F32_CNTL__TH0_RESET__SHIFT 0x9
+#define SDMA1_F32_CNTL__TH0_ENABLE__SHIFT 0xa
+#define SDMA1_F32_CNTL__TH1_CHECKSUM_CLR__SHIFT 0xc
+#define SDMA1_F32_CNTL__TH1_RESET__SHIFT 0xd
+#define SDMA1_F32_CNTL__TH1_ENABLE__SHIFT 0xe
+#define SDMA1_F32_CNTL__TH0_PRIORITY__SHIFT 0x10
+#define SDMA1_F32_CNTL__TH1_PRIORITY__SHIFT 0x18
+#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA1_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA1_F32_CNTL__TH0_CHECKSUM_CLR_MASK 0x00000100L
+#define SDMA1_F32_CNTL__TH0_RESET_MASK 0x00000200L
+#define SDMA1_F32_CNTL__TH0_ENABLE_MASK 0x00000400L
+#define SDMA1_F32_CNTL__TH1_CHECKSUM_CLR_MASK 0x00001000L
+#define SDMA1_F32_CNTL__TH1_RESET_MASK 0x00002000L
+#define SDMA1_F32_CNTL__TH1_ENABLE_MASK 0x00004000L
+#define SDMA1_F32_CNTL__TH0_PRIORITY_MASK 0x00FF0000L
+#define SDMA1_F32_CNTL__TH1_PRIORITY_MASK 0xFF000000L
+
+
+// addressBlock: gc_sdma0_sdma0perfsdec
+//SDMA0_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA0_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA0_PERFCNT_MISC_CNTL
+#define SDMA0_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA0_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+//SDMA0_PERFCOUNTER0_SELECT
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA0_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER0_SELECT1
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER1_SELECT
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA0_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER1_SELECT1
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+
+
+// addressBlock: gc_sdma0_sdma1perfsdec
+//SDMA1_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA1_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA1_PERFCNT_MISC_CNTL
+#define SDMA1_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA1_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+//SDMA1_PERFCOUNTER0_SELECT
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA1_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER0_SELECT1
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER1_SELECT
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA1_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER1_SELECT1
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+
+
+// addressBlock: gc_sdma0_sdma0perfddec
+//SDMA0_PERFCNT_PERFCOUNTER_LO
+#define SDMA0_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCNT_PERFCOUNTER_HI
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA0_PERFCOUNTER0_LO
+#define SDMA0_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER0_HI
+#define SDMA0_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_LO
+#define SDMA0_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_HI
+#define SDMA0_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_sdma0_sdma1perfddec
+//SDMA1_PERFCNT_PERFCOUNTER_LO
+#define SDMA1_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCNT_PERFCOUNTER_HI
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA1_PERFCOUNTER0_LO
+#define SDMA1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER0_HI
+#define SDMA1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_LO
+#define SDMA1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_HI
+#define SDMA1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_grbmdec
+//GRBM_CNTL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
+#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
+#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
+//GRBM_SKEW_CNTL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
+//GRBM_STATUS2
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
+#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
+#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
+#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
+#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
+#define GRBM_STATUS2__SDMA_SCH_RQ_PENDING__SHIFT 0x13
+#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
+#define GRBM_STATUS2__SDMA_BUSY__SHIFT 0x15
+#define GRBM_STATUS2__SDMA0_RQ_PENDING__SHIFT 0x16
+#define GRBM_STATUS2__SDMA1_RQ_PENDING__SHIFT 0x17
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x1a
+#define GRBM_STATUS2__TCP_BUSY__SHIFT 0x1b
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
+#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
+#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
+#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
+#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
+#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
+#define GRBM_STATUS2__SDMA_SCH_RQ_PENDING_MASK 0x00080000L
+#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
+#define GRBM_STATUS2__SDMA_BUSY_MASK 0x00200000L
+#define GRBM_STATUS2__SDMA0_RQ_PENDING_MASK 0x00400000L
+#define GRBM_STATUS2__SDMA1_RQ_PENDING_MASK 0x00800000L
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x04000000L
+#define GRBM_STATUS2__TCP_BUSY_MASK 0x08000000L
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
+//GRBM_PWR_CNTL
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
+#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
+#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
+#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
+#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
+//GRBM_STATUS
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS__SDMA_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
+#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
+#define GRBM_STATUS__GE_BUSY_NO_DMA__SHIFT 0x10
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
+#define GRBM_STATUS__GE_BUSY__SHIFT 0x15
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
+#define GRBM_STATUS__ANY_ACTIVE__SHIFT 0x1b
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__SDMA_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__GE_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__GE_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__ANY_ACTIVE_MASK 0x08000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+//GRBM_STATUS_SE0
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE0__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE0__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE0__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE0__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE0__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE0__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE0__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE0__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE0__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE0__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE0__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE1
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE1__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE1__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE1__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE1__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE1__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE1__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE1__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE1__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE1__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE1__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE1__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS3
+#define GRBM_STATUS3__GRBM_RLC_INTR_CREDIT_PENDING__SHIFT 0x5
+#define GRBM_STATUS3__GRBM_CPF_INTR_CREDIT_PENDING__SHIFT 0x7
+#define GRBM_STATUS3__MESPIPE0_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS3__MESPIPE1_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS3__PH_BUSY__SHIFT 0xd
+#define GRBM_STATUS3__CH_BUSY__SHIFT 0xe
+#define GRBM_STATUS3__GL2CC_BUSY__SHIFT 0xf
+#define GRBM_STATUS3__GL1CC_BUSY__SHIFT 0x10
+#define GRBM_STATUS3__SEDC_BUSY__SHIFT 0x19
+#define GRBM_STATUS3__PC_BUSY__SHIFT 0x1a
+#define GRBM_STATUS3__GL1H_BUSY__SHIFT 0x1b
+#define GRBM_STATUS3__GUS_LINK_BUSY__SHIFT 0x1c
+#define GRBM_STATUS3__GUS_BUSY__SHIFT 0x1d
+#define GRBM_STATUS3__UTCL1_BUSY__SHIFT 0x1e
+#define GRBM_STATUS3__PMM_BUSY__SHIFT 0x1f
+#define GRBM_STATUS3__GRBM_RLC_INTR_CREDIT_PENDING_MASK 0x00000020L
+#define GRBM_STATUS3__GRBM_CPF_INTR_CREDIT_PENDING_MASK 0x00000080L
+#define GRBM_STATUS3__MESPIPE0_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS3__MESPIPE1_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS3__PH_BUSY_MASK 0x00002000L
+#define GRBM_STATUS3__CH_BUSY_MASK 0x00004000L
+#define GRBM_STATUS3__GL2CC_BUSY_MASK 0x00008000L
+#define GRBM_STATUS3__GL1CC_BUSY_MASK 0x00010000L
+#define GRBM_STATUS3__SEDC_BUSY_MASK 0x02000000L
+#define GRBM_STATUS3__PC_BUSY_MASK 0x04000000L
+#define GRBM_STATUS3__GL1H_BUSY_MASK 0x08000000L
+#define GRBM_STATUS3__GUS_LINK_BUSY_MASK 0x10000000L
+#define GRBM_STATUS3__GUS_BUSY_MASK 0x20000000L
+#define GRBM_STATUS3__UTCL1_BUSY_MASK 0x40000000L
+#define GRBM_STATUS3__PMM_BUSY_MASK 0x80000000L
+//GRBM_SOFT_RESET
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2__SHIFT 0xf
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI__SHIFT 0x15
+#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT 0x17
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA1__SHIFT 0x18
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2_MASK 0x00008000L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI_MASK 0x00200000L
+#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK 0x00800000L
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK 0x01000000L
+//GRBM_GFX_CLKEN_CNTL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
+//GRBM_WAIT_IDLE_CLOCKS
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
+//GRBM_STATUS_SE2
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE2__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE2__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE2__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE2__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE2__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE2__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE2__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE2__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE2__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE2__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE2__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+//GRBM_READ_ERROR
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+//GRBM_READ_ERROR2
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE0__SHIFT 0x9
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE1__SHIFT 0xa
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE2__SHIFT 0xb
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE3__SHIFT 0xc
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA0__SHIFT 0xd
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA1__SHIFT 0xe
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE0_MASK 0x00000200L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE1_MASK 0x00000400L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE2_MASK 0x00000800L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE3_MASK 0x00001000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA0_MASK 0x00002000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA1_MASK 0x00004000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+//GRBM_INT_CNTL
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+//GRBM_TRAP_OP
+#define GRBM_TRAP_OP__RW__SHIFT 0x0
+#define GRBM_TRAP_OP__RW_MASK 0x00000001L
+//GRBM_TRAP_ADDR
+#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_ADDR_MSK
+#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_WD
+#define GRBM_TRAP_WD__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
+//GRBM_TRAP_WD_MSK
+#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
+//GRBM_DSM_BYPASS
+#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
+#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
+#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
+#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
+//GRBM_WRITE_ERROR
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
+#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
+#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x8
+#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
+#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
+#define GRBM_WRITE_ERROR__TMZ__SHIFT 0x11
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL__SHIFT 0x12
+#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
+#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
+#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
+#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000003CL
+#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x00000F00L
+#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
+#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
+#define GRBM_WRITE_ERROR__TMZ_MASK 0x00020000L
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL_MASK 0x00040000L
+#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
+#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
+#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
+//GRBM_CHIP_REVISION
+#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
+#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
+//GRBM_RSMU_CFG
+#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
+#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
+#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
+#define GRBM_RSMU_CFG__DEBUG_MASK__SHIFT 0x11
+#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
+#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
+#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
+#define GRBM_RSMU_CFG__DEBUG_MASK_MASK 0x00020000L
+//GRBM_IH_CREDIT
+#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//GRBM_PWR_CNTL2
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
+//GRBM_UTCL2_INVAL_RANGE_START
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
+//GRBM_UTCL2_INVAL_RANGE_END
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
+//GRBM_RSMU_READ_ERROR
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
+//GRBM_INVALID_PIPE
+#define GRBM_INVALID_PIPE__ADDR__SHIFT 0x2
+#define GRBM_INVALID_PIPE__PIPEID__SHIFT 0x14
+#define GRBM_INVALID_PIPE__MEID__SHIFT 0x16
+#define GRBM_INVALID_PIPE__QUEUEID__SHIFT 0x18
+#define GRBM_INVALID_PIPE__SSRCID__SHIFT 0x1b
+#define GRBM_INVALID_PIPE__INVALID_PIPE__SHIFT 0x1f
+#define GRBM_INVALID_PIPE__ADDR_MASK 0x000FFFFCL
+#define GRBM_INVALID_PIPE__PIPEID_MASK 0x00300000L
+#define GRBM_INVALID_PIPE__MEID_MASK 0x00C00000L
+#define GRBM_INVALID_PIPE__QUEUEID_MASK 0x07000000L
+#define GRBM_INVALID_PIPE__SSRCID_MASK 0x78000000L
+#define GRBM_INVALID_PIPE__INVALID_PIPE_MASK 0x80000000L
+//GRBM_FENCE_RANGE0
+#define GRBM_FENCE_RANGE0__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE0__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE0__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE0__END_MASK 0xFFFF0000L
+//GRBM_FENCE_RANGE1
+#define GRBM_FENCE_RANGE1__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE1__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE1__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE1__END_MASK 0xFFFF0000L
+//GRBM_SCRATCH_REG0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG1
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG2
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG3
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG4
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG5
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG6
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG7
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//VIOLATION_DATA_ASYNC_VF_PROG
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID__SHIFT 0x0
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID__SHIFT 0x4
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR__SHIFT 0x1f
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID_MASK 0x0000000FL
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID_MASK 0x000003F0L
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR_MASK 0x80000000L
+
+
+// addressBlock: gc_cpdec
+//CP_CPC_DEBUG_CNTL
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_CPF_DEBUG_CNTL
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_CPC_STATUS
+#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
+#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
+#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
+#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
+#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
+#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
+#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
+#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
+#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
+#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
+#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
+#define CP_CPC_STATUS__GCRIU_BUSY__SHIFT 0xf
+#define CP_CPC_STATUS__MES_BUSY__SHIFT 0x10
+#define CP_CPC_STATUS__MES_SCRATCH_RAM_BUSY__SHIFT 0x11
+#define CP_CPC_STATUS__RCIU3_BUSY__SHIFT 0x12
+#define CP_CPC_STATUS__MES_INSTRUCTION_CACHE_BUSY__SHIFT 0x13
+#define CP_CPC_STATUS__MES_DATA_CACHE_BUSY__SHIFT 0x14
+#define CP_CPC_STATUS__MEC_DATA_CACHE_BUSY__SHIFT 0x15
+#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
+#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
+#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
+#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
+#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
+#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
+#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
+#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
+#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
+#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
+#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
+#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
+#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
+#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
+#define CP_CPC_STATUS__GCRIU_BUSY_MASK 0x00008000L
+#define CP_CPC_STATUS__MES_BUSY_MASK 0x00010000L
+#define CP_CPC_STATUS__MES_SCRATCH_RAM_BUSY_MASK 0x00020000L
+#define CP_CPC_STATUS__RCIU3_BUSY_MASK 0x00040000L
+#define CP_CPC_STATUS__MES_INSTRUCTION_CACHE_BUSY_MASK 0x00080000L
+#define CP_CPC_STATUS__MES_DATA_CACHE_BUSY_MASK 0x00100000L
+#define CP_CPC_STATUS__MEC_DATA_CACHE_BUSY_MASK 0x00200000L
+#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
+#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
+#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
+//CP_CPC_BUSY_STAT
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPHORE_BUSY__SHIFT 0x1
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPHORE_BUSY__SHIFT 0x11
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPHORE_BUSY_MASK 0x00000002L
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPHORE_BUSY_MASK 0x00020000L
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
+//CP_CPC_STALLED_STAT1
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
+#define CP_CPC_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x7
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
+#define CP_CPC_STALLED_STAT1__GCRIU_WAITING_ON_FREE__SHIFT 0x19
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
+#define CP_CPC_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000080L
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
+#define CP_CPC_STALLED_STAT1__GCRIU_WAITING_ON_FREE_MASK 0x02000000L
+//CP_CPF_STATUS
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
+#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
+#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
+#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
+#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
+#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
+#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
+#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
+#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
+#define CP_CPF_STATUS__RCIU_BUSY__SHIFT 0x12
+#define CP_CPF_STATUS__RCIU_GFX_BUSY__SHIFT 0x13
+#define CP_CPF_STATUS__RCIU_CMP_BUSY__SHIFT 0x14
+#define CP_CPF_STATUS__ROQ_DATA_BUSY__SHIFT 0x15
+#define CP_CPF_STATUS__ROQ_CE_DATA_BUSY__SHIFT 0x16
+#define CP_CPF_STATUS__GCRIU_BUSY__SHIFT 0x17
+#define CP_CPF_STATUS__MES_HQD_BUSY__SHIFT 0x18
+#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
+#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
+#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
+#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
+#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
+#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
+#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
+#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
+#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
+#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
+#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
+#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
+#define CP_CPF_STATUS__RCIU_BUSY_MASK 0x00040000L
+#define CP_CPF_STATUS__RCIU_GFX_BUSY_MASK 0x00080000L
+#define CP_CPF_STATUS__RCIU_CMP_BUSY_MASK 0x00100000L
+#define CP_CPF_STATUS__ROQ_DATA_BUSY_MASK 0x00200000L
+#define CP_CPF_STATUS__ROQ_CE_DATA_BUSY_MASK 0x00400000L
+#define CP_CPF_STATUS__GCRIU_BUSY_MASK 0x00800000L
+#define CP_CPF_STATUS__MES_HQD_BUSY_MASK 0x01000000L
+#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
+#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
+#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
+#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
+//CP_CPF_BUSY_STAT
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_STAT__CSF_DATA_BUSY__SHIFT 0x9
+#define CP_CPF_BUSY_STAT__CSF_CE_DATA_BUSY__SHIFT 0xa
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
+#define CP_CPF_BUSY_STAT__CSF_DATA_BUSY_MASK 0x00000200L
+#define CP_CPF_BUSY_STAT__CSF_CE_DATA_BUSY_MASK 0x00000400L
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
+//CP_CPF_STALLED_STAT1
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
+#define CP_CPF_STALLED_STAT1__DATA_FETCHING_DATA__SHIFT 0xc
+#define CP_CPF_STALLED_STAT1__GCRIU_WAIT_ON_FREE__SHIFT 0xd
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
+#define CP_CPF_STALLED_STAT1__DATA_FETCHING_DATA_MASK 0x00001000L
+#define CP_CPF_STALLED_STAT1__GCRIU_WAIT_ON_FREE_MASK 0x00002000L
+//CP_CPC_BUSY_STAT2
+#define CP_CPC_BUSY_STAT2__MES_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT2__MES_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT2__MES_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT2__MES_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT2__MES_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT2__MES_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT2__MES_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT2__MES_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT2__MES_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT2__MES_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT2__MES_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT2__MES_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT2__MES_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT2__MES_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT2__MES_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT2__MES_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT2__MES_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT2__MES_PIPE3_BUSY_MASK 0x00002000L
+//CP_CPC_GRBM_FREE_COUNT
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+//CP_CPC_PRIV_VIOLATION_ADDR
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0003FFFFL
+//CP_MEC_ME1_HEADER_DUMP
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_HEADER_DUMP
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_CPC_SCRATCH_INDEX
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_CPC_SCRATCH_DATA
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_CPF_GRBM_FREE_COUNT
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
+//CP_CPF_BUSY_STAT2
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPG_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPC_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT2__MES_HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT2__MES_HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT2__MES_HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT2__MES_HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT2__MES_HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPG_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPC_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT2__MES_HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_BUSY_MASK 0x40000000L
+//CP_CPC_HALT_HYST_COUNT
+#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
+#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
+//CP_STALLED_STAT3
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
+#define CP_STALLED_STAT3__GCRIU_WAITING_ON_FREE__SHIFT 0x15
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
+#define CP_STALLED_STAT3__GCRIU_WAITING_ON_FREE_MASK 0x00200000L
+//CP_STALLED_STAT1
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R0__SHIFT 0x2
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R1__SHIFT 0x3
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R0__SHIFT 0x4
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R1__SHIFT 0x5
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R0_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R1_MASK 0x00000008L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R0_MASK 0x00000010L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R1_MASK 0x00000020L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
+//CP_STALLED_STAT2
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
+#define CP_STALLED_STAT2__PFP_TO_MEQ_DDID_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_PULSE__SHIFT 0x15
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_WR_CONFIRM__SHIFT 0x16
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_DDID_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_PULSE_MASK 0x00200000L
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+//CP_BUSY_STAT
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+//CP_STAT
+#define CP_STAT__ROQ_DB_BUSY__SHIFT 0x5
+#define CP_STAT__ROQ_CE_DB_BUSY__SHIFT 0x6
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
+#define CP_STAT__DC_BUSY__SHIFT 0xd
+#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
+#define CP_STAT__PFP_BUSY__SHIFT 0xf
+#define CP_STAT__MEQ_BUSY__SHIFT 0x10
+#define CP_STAT__ME_BUSY__SHIFT 0x11
+#define CP_STAT__QUERY_BUSY__SHIFT 0x12
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
+#define CP_STAT__DMA_BUSY__SHIFT 0x16
+#define CP_STAT__RCIU_BUSY__SHIFT 0x17
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
+#define CP_STAT__GCRIU_BUSY__SHIFT 0x19
+#define CP_STAT__CE_BUSY__SHIFT 0x1a
+#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
+#define CP_STAT__CP_BUSY__SHIFT 0x1f
+#define CP_STAT__ROQ_DB_BUSY_MASK 0x00000020L
+#define CP_STAT__ROQ_CE_DB_BUSY_MASK 0x00000040L
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__GCRIU_BUSY_MASK 0x02000000L
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+//CP_ME_HEADER_DUMP
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_HEADER_DUMP
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_GRBM_FREE_COUNT
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
+//CP_PFP_INSTR_PNTR
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_ME_INSTR_PNTR
+#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC1_INSTR_PNTR
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC2_INSTR_PNTR
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CSF_STAT
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
+//CP_CNTX_STAT
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+//CP_ME_PREEMPTION
+#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
+#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
+//CP_RB1_RPTR
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB0_RPTR
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_RPTR
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_WPTR_DELAY
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
+//CP_RB_WPTR_POLL_CNTL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//CP_ROQ1_THRESHOLDS
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0xa
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x14
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000003FFL
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x000FFC00L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0x3FF00000L
+//CP_ROQ2_THRESHOLDS
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x0
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0xa
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x000003FFL
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x000FFC00L
+//CP_STQ_THRESHOLDS
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
+//CP_MEQ_THRESHOLDS
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
+//CP_ROQ_AVAIL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x00000FFFL
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x0FFF0000L
+//CP_STQ_AVAIL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
+//CP_ROQ2_AVAIL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
+#define CP_ROQ2_AVAIL__ROQ_CNT_DB__SHIFT 0x10
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x00000FFFL
+#define CP_ROQ2_AVAIL__ROQ_CNT_DB_MASK 0x0FFF0000L
+//CP_MEQ_AVAIL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
+//CP_CMD_INDEX
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
+//CP_CMD_DATA
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
+#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
+//CP_ROQ_RB_STAT
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x00000FFFL
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x0FFF0000L
+//CP_ROQ_IB1_STAT
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x00000FFFL
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x0FFF0000L
+//CP_ROQ_IB2_STAT
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x00000FFFL
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x0FFF0000L
+//CP_STQ_STAT
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
+//CP_STQ_WR_STAT
+#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
+#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
+//CP_MEQ_STAT
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
+//CP_ROQ3_THRESHOLDS
+#define CP_ROQ3_THRESHOLDS__R0_DB_START__SHIFT 0x0
+#define CP_ROQ3_THRESHOLDS__R1_DB_START__SHIFT 0xa
+#define CP_ROQ3_THRESHOLDS__R0_DB_START_MASK 0x000003FFL
+#define CP_ROQ3_THRESHOLDS__R1_DB_START_MASK 0x000FFC00L
+//CP_ROQ_DB_STAT
+#define CP_ROQ_DB_STAT__ROQ_RPTR_DB__SHIFT 0x0
+#define CP_ROQ_DB_STAT__ROQ_WPTR_DB__SHIFT 0x10
+#define CP_ROQ_DB_STAT__ROQ_RPTR_DB_MASK 0x00000FFFL
+#define CP_ROQ_DB_STAT__ROQ_WPTR_DB_MASK 0x0FFF0000L
+//CP_INT_STAT_DEBUG
+#define CP_INT_STAT_DEBUG__RESUME_INT_ASSERTED__SHIFT 0x8
+#define CP_INT_STAT_DEBUG__SUSPEND_INT_ASSERTED__SHIFT 0x9
+#define CP_INT_STAT_DEBUG__DMA_WATCH_INT_ASSERTED__SHIFT 0xa
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED__SHIFT 0xb
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_INT_STAT_DEBUG__FUE_INT_STATUS_DEBUG__SHIFT 0xf
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED__SHIFT 0x12
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x13
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x14
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED__SHIFT 0x15
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_INT_STAT_DEBUG__RESUME_INT_ASSERTED_MASK 0x00000100L
+#define CP_INT_STAT_DEBUG__SUSPEND_INT_ASSERTED_MASK 0x00000200L
+#define CP_INT_STAT_DEBUG__DMA_WATCH_INT_ASSERTED_MASK 0x00000400L
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED_MASK 0x00000800L
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_INT_STAT_DEBUG__FUE_INT_STATUS_DEBUG_MASK 0x00008000L
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED_MASK 0x00040000L
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x00080000L
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x00100000L
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED_MASK 0x00200000L
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_DEBUG_CNTL
+#define CP_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_PRIV_VIOLATION_ADDR
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0003FFFFL
+
+
+// addressBlock: gc_padec
+//VGT_DMA_DATA_FIFO_DEPTH
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000003FFL
+//VGT_DMA_REQ_FIFO_DEPTH
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_DRAW_INIT_FIFO_DEPTH
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_MC_LAT_CNTL
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
+//IA_UTCL1_STATUS_2
+#define IA_UTCL1_STATUS_2__IA_BUSY__SHIFT 0x0
+#define IA_UTCL1_STATUS_2__IA_DMA_BUSY__SHIFT 0x1
+#define IA_UTCL1_STATUS_2__IA_DMA_REQ_BUSY__SHIFT 0x2
+#define IA_UTCL1_STATUS_2__IA_GRP_BUSY__SHIFT 0x3
+#define IA_UTCL1_STATUS_2__IA_ADC_BUSY__SHIFT 0x4
+#define IA_UTCL1_STATUS_2__FAULT_DETECTED__SHIFT 0x5
+#define IA_UTCL1_STATUS_2__RETRY_DETECTED__SHIFT 0x6
+#define IA_UTCL1_STATUS_2__PRT_DETECTED__SHIFT 0x7
+#define IA_UTCL1_STATUS_2__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS_2__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS_2__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS_2__IA_BUSY_MASK 0x00000001L
+#define IA_UTCL1_STATUS_2__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_UTCL1_STATUS_2__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_UTCL1_STATUS_2__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_UTCL1_STATUS_2__IA_ADC_BUSY_MASK 0x00000010L
+#define IA_UTCL1_STATUS_2__FAULT_DETECTED_MASK 0x00000020L
+#define IA_UTCL1_STATUS_2__RETRY_DETECTED_MASK 0x00000040L
+#define IA_UTCL1_STATUS_2__PRT_DETECTED_MASK 0x00000080L
+#define IA_UTCL1_STATUS_2__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS_2__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS_2__PRT_UTCL1ID_MASK 0x3F000000L
+//WD_CNTL_STATUS
+#define WD_CNTL_STATUS__DIST_BUSY__SHIFT 0x0
+#define WD_CNTL_STATUS__DIST_BE_BUSY__SHIFT 0x1
+#define WD_CNTL_STATUS__GE_UTCL1_BUSY__SHIFT 0x2
+#define WD_CNTL_STATUS__WD_TE11_BUSY__SHIFT 0x3
+#define WD_CNTL_STATUS__PC_MANAGER_BUSY__SHIFT 0x4
+#define WD_CNTL_STATUS__WLC_BUSY__SHIFT 0x5
+#define WD_CNTL_STATUS__DIST_BUSY_MASK 0x00000001L
+#define WD_CNTL_STATUS__DIST_BE_BUSY_MASK 0x00000002L
+#define WD_CNTL_STATUS__GE_UTCL1_BUSY_MASK 0x00000004L
+#define WD_CNTL_STATUS__WD_TE11_BUSY_MASK 0x00000008L
+#define WD_CNTL_STATUS__PC_MANAGER_BUSY_MASK 0x00000010L
+#define WD_CNTL_STATUS__WLC_BUSY_MASK 0x00000020L
+//CC_GC_PRIM_CONFIG
+#define CC_GC_PRIM_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_PRIM_CONFIG__INACTIVE_PA__SHIFT 0x4
+#define CC_GC_PRIM_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_PRIM_CONFIG__INACTIVE_PA_MASK 0x000FFFF0L
+//WD_QOS
+#define WD_QOS__DRAW_STALL__SHIFT 0x0
+#define WD_QOS__DRAW_STALL_MASK 0x00000001L
+//WD_UTCL1_CNTL
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define WD_UTCL1_CNTL__MTYPE_OVERRIDE__SHIFT 0x1d
+#define WD_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE__SHIFT 0x1e
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define WD_UTCL1_CNTL__MTYPE_OVERRIDE_MASK 0x20000000L
+#define WD_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE_MASK 0x40000000L
+//WD_UTCL1_STATUS
+#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//IA_UTCL1_CNTL
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define IA_UTCL1_CNTL__MTYPE_OVERRIDE__SHIFT 0x1d
+#define IA_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE__SHIFT 0x1e
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define IA_UTCL1_CNTL__MTYPE_OVERRIDE_MASK 0x20000000L
+#define IA_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE_MASK 0x40000000L
+//IA_UTCL1_STATUS
+#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CC_GC_SA_UNIT_DISABLE
+#define CC_GC_SA_UNIT_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define CC_GC_SA_UNIT_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x00FFFF00L
+//GE_RATE_CNTL_1
+#define GE_RATE_CNTL_1__ADD_X_CLKS_LS_VERT__SHIFT 0x0
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_LS_VERT__SHIFT 0x4
+#define GE_RATE_CNTL_1__ADD_X_CLKS_HS_VERT__SHIFT 0x8
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_HS_VERT__SHIFT 0xc
+#define GE_RATE_CNTL_1__ADD_X_CLKS_ES_VERT__SHIFT 0x10
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_ES_VERT__SHIFT 0x14
+#define GE_RATE_CNTL_1__ADD_X_CLKS_GS_PRIM__SHIFT 0x18
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_GS_PRIM__SHIFT 0x1c
+#define GE_RATE_CNTL_1__ADD_X_CLKS_LS_VERT_MASK 0x0000000FL
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_LS_VERT_MASK 0x000000F0L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_HS_VERT_MASK 0x00000F00L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_HS_VERT_MASK 0x0000F000L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_ES_VERT_MASK 0x000F0000L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_ES_VERT_MASK 0x00F00000L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_GS_PRIM_MASK 0x0F000000L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_GS_PRIM_MASK 0xF0000000L
+//GE_RATE_CNTL_2
+#define GE_RATE_CNTL_2__ADD_X_CLKS_VS_VERT__SHIFT 0x0
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_VS_VERT__SHIFT 0x4
+#define GE_RATE_CNTL_2__ADD_X_CLKS_PA_PRIM__SHIFT 0x8
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_PA_PRIM__SHIFT 0xc
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_HS_GS__SHIFT 0x10
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_LS_ES__SHIFT 0x14
+#define GE_RATE_CNTL_2__MERGED_HS_GS_MODE__SHIFT 0x18
+#define GE_RATE_CNTL_2__MERGED_LS_ES_MODE__SHIFT 0x19
+#define GE_RATE_CNTL_2__ENABLE_RATE_CNTL__SHIFT 0x1a
+#define GE_RATE_CNTL_2__SWAP_PRIORITY__SHIFT 0x1b
+#define GE_RATE_CNTL_2__ADD_X_CLKS_VS_VERT_MASK 0x0000000FL
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_VS_VERT_MASK 0x000000F0L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_PA_PRIM_MASK 0x00000F00L
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_PA_PRIM_MASK 0x0000F000L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_HS_GS_MASK 0x000F0000L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_LS_ES_MASK 0x00F00000L
+#define GE_RATE_CNTL_2__MERGED_HS_GS_MODE_MASK 0x01000000L
+#define GE_RATE_CNTL_2__MERGED_LS_ES_MODE_MASK 0x02000000L
+#define GE_RATE_CNTL_2__ENABLE_RATE_CNTL_MASK 0x04000000L
+#define GE_RATE_CNTL_2__SWAP_PRIORITY_MASK 0x08000000L
+//VGT_SYS_CONFIG
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
+#define VGT_SYS_CONFIG__NUM_SUBGROUPS_IN_FLIGHT__SHIFT 0x8
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+#define VGT_SYS_CONFIG__NUM_SUBGROUPS_IN_FLIGHT_MASK 0x0007FF00L
+//GE_PRIV_CONTROL
+#define GE_PRIV_CONTROL__RESERVED__SHIFT 0x0
+#define GE_PRIV_CONTROL__CLAMP_PRIMGRP_SIZE__SHIFT 0x1
+#define GE_PRIV_CONTROL__RESET_ON_PIPELINE_CHANGE__SHIFT 0xa
+#define GE_PRIV_CONTROL__FGCG_OVERRIDE__SHIFT 0xf
+#define GE_PRIV_CONTROL__CLAMP_HS_OFFCHIP_PER_SE_OVERRIDE__SHIFT 0x10
+#define GE_PRIV_CONTROL__DISABLE_ACCUM_AGM__SHIFT 0x11
+#define GE_PRIV_CONTROL__RESERVED_MASK 0x00000001L
+#define GE_PRIV_CONTROL__CLAMP_PRIMGRP_SIZE_MASK 0x000003FEL
+#define GE_PRIV_CONTROL__RESET_ON_PIPELINE_CHANGE_MASK 0x00000400L
+#define GE_PRIV_CONTROL__FGCG_OVERRIDE_MASK 0x00008000L
+#define GE_PRIV_CONTROL__CLAMP_HS_OFFCHIP_PER_SE_OVERRIDE_MASK 0x00010000L
+#define GE_PRIV_CONTROL__DISABLE_ACCUM_AGM_MASK 0x00020000L
+//GE_STATUS
+#define GE_STATUS__PERFCOUNTER_STATUS__SHIFT 0x0
+#define GE_STATUS__THREAD_TRACE_STATUS__SHIFT 0x1
+#define GE_STATUS__PERFCOUNTER_STATUS_MASK 0x00000001L
+#define GE_STATUS__THREAD_TRACE_STATUS_MASK 0x00000002L
+//VGT_GS_MAX_WAVE_ID
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GFX_PIPE_CONTROL
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
+#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_STALL_EN__SHIFT 0x11
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
+#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_STALL_EN_MASK 0x00020000L
+//CC_GC_SHADER_ARRAY_CONFIG
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT 0x10
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK 0xFFFF0000L
+//GE2_SE_CNTL_STATUS
+#define GE2_SE_CNTL_STATUS__TE_BUSY__SHIFT 0x0
+#define GE2_SE_CNTL_STATUS__NGG_BUSY__SHIFT 0x1
+#define GE2_SE_CNTL_STATUS__HS_BUSY__SHIFT 0x2
+#define GE2_SE_CNTL_STATUS__TE_BUSY_MASK 0x00000001L
+#define GE2_SE_CNTL_STATUS__NGG_BUSY_MASK 0x00000002L
+#define GE2_SE_CNTL_STATUS__HS_BUSY_MASK 0x00000004L
+//VGT_RESET_DEBUG
+#define VGT_RESET_DEBUG__GS_DISABLE__SHIFT 0x0
+#define VGT_RESET_DEBUG__TESS_DISABLE__SHIFT 0x1
+#define VGT_RESET_DEBUG__WD_DISABLE__SHIFT 0x2
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE0__SHIFT 0x3
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE1__SHIFT 0x4
+#define VGT_RESET_DEBUG__ENABLE_VMID_RESET_UTCL1__SHIFT 0x5
+#define VGT_RESET_DEBUG__DISABLE_PREFETCH__SHIFT 0x6
+#define VGT_RESET_DEBUG__DISABLE_SWITCH_MODE_STALL_FIX__SHIFT 0x7
+#define VGT_RESET_DEBUG__DISABLE_SENDING_MULTIPLE_SE_IN_PD__SHIFT 0x8
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_ON_OFF__SHIFT 0x9
+#define VGT_RESET_DEBUG__DISABLE_PATCH_OPTIMIZATION__SHIFT 0xa
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_OFF_ON__SHIFT 0xb
+#define VGT_RESET_DEBUG__DISABLE_MERGE_GRP_PERF_FIX__SHIFT 0xc
+#define VGT_RESET_DEBUG__DISABLE_MESH_SHADER_ATTR_PACKING__SHIFT 0xd
+#define VGT_RESET_DEBUG__ENABLE_SMALL_INST_PACK_ADJ_GS_OFF__SHIFT 0xe
+#define VGT_RESET_DEBUG__DISABLE_PATCH_DIST_LAST_DONUT_SE_SWITCH_LOGIC__SHIFT 0xf
+#define VGT_RESET_DEBUG__SPARE__SHIFT 0x10
+#define VGT_RESET_DEBUG__GS_DISABLE_MASK 0x00000001L
+#define VGT_RESET_DEBUG__TESS_DISABLE_MASK 0x00000002L
+#define VGT_RESET_DEBUG__WD_DISABLE_MASK 0x00000004L
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE0_MASK 0x00000008L
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE1_MASK 0x00000010L
+#define VGT_RESET_DEBUG__ENABLE_VMID_RESET_UTCL1_MASK 0x00000020L
+#define VGT_RESET_DEBUG__DISABLE_PREFETCH_MASK 0x00000040L
+#define VGT_RESET_DEBUG__DISABLE_SWITCH_MODE_STALL_FIX_MASK 0x00000080L
+#define VGT_RESET_DEBUG__DISABLE_SENDING_MULTIPLE_SE_IN_PD_MASK 0x00000100L
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_ON_OFF_MASK 0x00000200L
+#define VGT_RESET_DEBUG__DISABLE_PATCH_OPTIMIZATION_MASK 0x00000400L
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_OFF_ON_MASK 0x00000800L
+#define VGT_RESET_DEBUG__DISABLE_MERGE_GRP_PERF_FIX_MASK 0x00001000L
+#define VGT_RESET_DEBUG__DISABLE_MESH_SHADER_ATTR_PACKING_MASK 0x00002000L
+#define VGT_RESET_DEBUG__ENABLE_SMALL_INST_PACK_ADJ_GS_OFF_MASK 0x00004000L
+#define VGT_RESET_DEBUG__DISABLE_PATCH_DIST_LAST_DONUT_SE_SWITCH_LOGIC_MASK 0x00008000L
+#define VGT_RESET_DEBUG__SPARE_MASK 0xFFFF0000L
+//GE_SPI_IF_SAFE_REG
+#define GE_SPI_IF_SAFE_REG__GE_SPI_LS_ES_DATA__SHIFT 0x0
+#define GE_SPI_IF_SAFE_REG__GE_SPI_HS_GS_DATA__SHIFT 0x6
+#define GE_SPI_IF_SAFE_REG__GE_SPI_GRP__SHIFT 0xc
+#define GE_SPI_IF_SAFE_REG__GE_SPI_LS_ES_DATA_MASK 0x0000003FL
+#define GE_SPI_IF_SAFE_REG__GE_SPI_HS_GS_DATA_MASK 0x00000FC0L
+#define GE_SPI_IF_SAFE_REG__GE_SPI_GRP_MASK 0x0003F000L
+//GE_PA_IF_SAFE_REG
+#define GE_PA_IF_SAFE_REG__GE_PA_CSB__SHIFT 0x0
+#define GE_PA_IF_SAFE_REG__GE_PA_PAYLOAD__SHIFT 0xa
+#define GE_PA_IF_SAFE_REG__GE_PA_CSB_MASK 0x000003FFL
+#define GE_PA_IF_SAFE_REG__GE_PA_PAYLOAD_MASK 0x000FFC00L
+//PA_CL_CNTL_STATUS
+#define PA_CL_CNTL_STATUS__CL_BUSY__SHIFT 0x1f
+#define PA_CL_CNTL_STATUS__CL_BUSY_MASK 0x80000000L
+//PA_CL_ENHANCE
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x5
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE__SHIFT 0x11
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE__SHIFT 0x12
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE__SHIFT 0x13
+#define PA_CL_ENHANCE__DISABLE_PA_PH_INTF_FINE_CLOCK_GATE__SHIFT 0x14
+#define PA_CL_ENHANCE__DISABLE_PA_SX_REQ_INTF_FINE_CLOCK_GATE__SHIFT 0x15
+#define PA_CL_ENHANCE__ENABLE_PA_RATE_CNTL__SHIFT 0x16
+#define PA_CL_ENHANCE__CLAMP_NEGATIVE_BB_TO_ZERO__SHIFT 0x17
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE_MASK 0x00020000L
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE_MASK 0x00040000L
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE_MASK 0x00080000L
+#define PA_CL_ENHANCE__DISABLE_PA_PH_INTF_FINE_CLOCK_GATE_MASK 0x00100000L
+#define PA_CL_ENHANCE__DISABLE_PA_SX_REQ_INTF_FINE_CLOCK_GATE_MASK 0x00200000L
+#define PA_CL_ENHANCE__ENABLE_PA_RATE_CNTL_MASK 0x00400000L
+#define PA_CL_ENHANCE__CLAMP_NEGATIVE_BB_TO_ZERO_MASK 0x00800000L
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+//PA_CL_RESET_DEBUG
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE__SHIFT 0x0
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE_MASK 0x00000001L
+//PA_SU_CNTL_STATUS
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+//PA_SC_FIFO_DEPTH_CNTL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
+
+
+// addressBlock: gc_sqdec
+//SQ_CONFIG
+#define SQ_CONFIG__ECO_SPARE__SHIFT 0x0
+#define SQ_CONFIG__NEW_TRANS_ARB_SCHEME__SHIFT 0x8
+#define SQ_CONFIG__DISABLE_VMEM_EXEC_ZERO_SKIP__SHIFT 0x9
+#define SQ_CONFIG__DISABLE_SGPR_RD_KILL__SHIFT 0xa
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_GS__SHIFT 0x12
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_GS__SHIFT 0x13
+#define SQ_CONFIG__WCLK_HYSTERESIS_CNT__SHIFT 0x15
+#define SQ_CONFIG__DISABLE_END_CLAUSE_TX__SHIFT 0x1b
+#define SQ_CONFIG__ECO_SPARE_MASK 0x000000FFL
+#define SQ_CONFIG__NEW_TRANS_ARB_SCHEME_MASK 0x00000100L
+#define SQ_CONFIG__DISABLE_VMEM_EXEC_ZERO_SKIP_MASK 0x00000200L
+#define SQ_CONFIG__DISABLE_SGPR_RD_KILL_MASK 0x00000400L
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_GS_MASK 0x00040000L
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_GS_MASK 0x00180000L
+#define SQ_CONFIG__WCLK_HYSTERESIS_CNT_MASK 0x00600000L
+#define SQ_CONFIG__DISABLE_END_CLAUSE_TX_MASK 0x08000000L
+//SQC_CONFIG
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0x9
+#define SQC_CONFIG__EVICT_LRU__SHIFT 0xa
+#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xc
+#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xd
+#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0xe
+#define SQC_CONFIG__CACHE_CTRL_GCR_FIX_DISABLE__SHIFT 0x16
+#define SQC_CONFIG__CACHE_CTRL_ALMOST_MAX_INFLIGHT_CONFIG__SHIFT 0x17
+#define SQC_CONFIG__SPARE__SHIFT 0x1a
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000200L
+#define SQC_CONFIG__EVICT_LRU_MASK 0x00000C00L
+#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00001000L
+#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00002000L
+#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x003FC000L
+#define SQC_CONFIG__CACHE_CTRL_GCR_FIX_DISABLE_MASK 0x00400000L
+#define SQC_CONFIG__CACHE_CTRL_ALMOST_MAX_INFLIGHT_CONFIG_MASK 0x03800000L
+#define SQC_CONFIG__SPARE_MASK 0xFC000000L
+//LDS_CONFIG
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
+#define LDS_CONFIG__CONF_BIT_1__SHIFT 0x1
+#define LDS_CONFIG__WAVE32_INTERP_DUAL_ISSUE_DISABLE__SHIFT 0x2
+#define LDS_CONFIG__SP_TDDATA_FGCG_OVERRIDE__SHIFT 0x3
+#define LDS_CONFIG__SQC_PERF_FGCG_OVERRIDE__SHIFT 0x4
+#define LDS_CONFIG__CONF_BIT_5__SHIFT 0x5
+#define LDS_CONFIG__CONF_BIT_6__SHIFT 0x6
+#define LDS_CONFIG__CONF_BIT_7__SHIFT 0x7
+#define LDS_CONFIG__CONF_BIT_8__SHIFT 0x8
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
+#define LDS_CONFIG__CONF_BIT_1_MASK 0x00000002L
+#define LDS_CONFIG__WAVE32_INTERP_DUAL_ISSUE_DISABLE_MASK 0x00000004L
+#define LDS_CONFIG__SP_TDDATA_FGCG_OVERRIDE_MASK 0x00000008L
+#define LDS_CONFIG__SQC_PERF_FGCG_OVERRIDE_MASK 0x00000010L
+#define LDS_CONFIG__CONF_BIT_5_MASK 0x00000020L
+#define LDS_CONFIG__CONF_BIT_6_MASK 0x00000040L
+#define LDS_CONFIG__CONF_BIT_7_MASK 0x00000080L
+#define LDS_CONFIG__CONF_BIT_8_MASK 0x00000100L
+//SQ_RANDOM_WAVE_PRI
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
+#define SQ_RANDOM_WAVE_PRI__FORCE_IB_ARB_PRIO_MSK_VALID__SHIFT 0x1f
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x00FFFC00L
+#define SQ_RANDOM_WAVE_PRI__FORCE_IB_ARB_PRIO_MSK_VALID_MASK 0x80000000L
+//SQG_STATUS
+#define SQG_STATUS__REG_BUSY__SHIFT 0x0
+#define SQG_STATUS__REG_BUSY_MASK 0x00000001L
+//SQ_FIFO_SIZES
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
+#define SQ_FIFO_SIZES__EXPORT_BUF_GS_RESERVED__SHIFT 0xc
+#define SQ_FIFO_SIZES__EXPORT_BUF_PS_RESERVED__SHIFT 0xe
+#define SQ_FIFO_SIZES__EXPORT_BUF_REDUCE__SHIFT 0x10
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
+#define SQ_FIFO_SIZES__EXPORT_BUF_PRIMPOS_LIMIT__SHIFT 0x14
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000300L
+#define SQ_FIFO_SIZES__EXPORT_BUF_GS_RESERVED_MASK 0x00003000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_PS_RESERVED_MASK 0x0000C000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_REDUCE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_PRIMPOS_LIMIT_MASK 0x00300000L
+//SQ_DSM_CNTL
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQ_DSM_CNTL2
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
+//SP_CONFIG
+#define SP_CONFIG__DEST_CACHE_EVICT_COUNTER__SHIFT 0x0
+#define SP_CONFIG__ALU_BUSY_MGCG_OVERRIDE__SHIFT 0x2
+#define SP_CONFIG__DISABLE_TRANS_COEXEC__SHIFT 0x3
+#define SP_CONFIG__CAC_COUNTER_OVERRIDE__SHIFT 0x4
+#define SP_CONFIG__SP_SX_EXPVDATA_FGCG_OVERRIDE__SHIFT 0x5
+#define SP_CONFIG__DEST_CACHE_EVICT_COUNTER_MASK 0x00000003L
+#define SP_CONFIG__ALU_BUSY_MGCG_OVERRIDE_MASK 0x00000004L
+#define SP_CONFIG__DISABLE_TRANS_COEXEC_MASK 0x00000008L
+#define SP_CONFIG__CAC_COUNTER_OVERRIDE_MASK 0x00000010L
+#define SP_CONFIG__SP_SX_EXPVDATA_FGCG_OVERRIDE_MASK 0x00000020L
+//SQ_ARB_CONFIG
+#define SQ_ARB_CONFIG__WG_RR_INTERVAL__SHIFT 0x0
+#define SQ_ARB_CONFIG__FWD_PROG_INTERVAL__SHIFT 0x4
+#define SQ_ARB_CONFIG__WG_RR_INTERVAL_MASK 0x00000003L
+#define SQ_ARB_CONFIG__FWD_PROG_INTERVAL_MASK 0x00000030L
+//SQ_DEBUG_HOST_TRAP_STATUS
+#define SQ_DEBUG_HOST_TRAP_STATUS__PENDING_COUNT__SHIFT 0x0
+#define SQ_DEBUG_HOST_TRAP_STATUS__PENDING_COUNT_MASK 0x0000007FL
+//SQG_GL1H_STATUS
+#define SQG_GL1H_STATUS__R0_ACK_ERR_DETECTED__SHIFT 0x0
+#define SQG_GL1H_STATUS__R0_XNACK_ERR_DETECTED__SHIFT 0x1
+#define SQG_GL1H_STATUS__R1_ACK_ERR_DETECTED__SHIFT 0x2
+#define SQG_GL1H_STATUS__R1_XNACK_ERR_DETECTED__SHIFT 0x3
+#define SQG_GL1H_STATUS__R0_ACK_ERR_DETECTED_MASK 0x00000001L
+#define SQG_GL1H_STATUS__R0_XNACK_ERR_DETECTED_MASK 0x00000002L
+#define SQG_GL1H_STATUS__R1_ACK_ERR_DETECTED_MASK 0x00000004L
+#define SQG_GL1H_STATUS__R1_XNACK_ERR_DETECTED_MASK 0x00000008L
+//SQG_CONFIG
+#define SQG_CONFIG__GL1H_PREFETCH_PAGE__SHIFT 0x0
+#define SQG_CONFIG__SQG_ICPFT_EN__SHIFT 0xd
+#define SQG_CONFIG__SQG_ICPFT_CLR__SHIFT 0xe
+#define SQG_CONFIG__XNACK_INTR_MASK__SHIFT 0x10
+#define SQG_CONFIG__GL1H_PREFETCH_PAGE_MASK 0x0000000FL
+#define SQG_CONFIG__SQG_ICPFT_EN_MASK 0x00002000L
+#define SQG_CONFIG__SQG_ICPFT_CLR_MASK 0x00004000L
+#define SQG_CONFIG__XNACK_INTR_MASK_MASK 0xFFFF0000L
+//SQ_PERF_SNAPSHOT_CTRL
+#define SQ_PERF_SNAPSHOT_CTRL__TIMER_ON_OFF__SHIFT 0x0
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK__SHIFT 0x1
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL__SHIFT 0x11
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTERVAL__SHIFT 0x12
+#define SQ_PERF_SNAPSHOT_CTRL__TIMER_ON_OFF_MASK 0x00000001L
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK_MASK 0x0001FFFEL
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL_MASK 0x00020000L
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTERVAL_MASK 0x003C0000L
+//CC_GC_SHADER_RATE_CONFIG
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+//SQ_INTERRUPT_AUTO_MASK
+#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
+#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
+//SQ_INTERRUPT_MSG_CTRL
+#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
+#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
+//SQ_WATCH0_ADDR_H
+#define SQ_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH0_ADDR_L
+#define SQ_WATCH0_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH0_CNTL
+#define SQ_WATCH0_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH0_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH0_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH1_ADDR_H
+#define SQ_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH1_ADDR_L
+#define SQ_WATCH1_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH1_CNTL
+#define SQ_WATCH1_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH1_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH1_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH2_ADDR_H
+#define SQ_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH2_ADDR_L
+#define SQ_WATCH2_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH2_CNTL
+#define SQ_WATCH2_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH2_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH2_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH3_ADDR_H
+#define SQ_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH3_ADDR_L
+#define SQ_WATCH3_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH3_CNTL
+#define SQ_WATCH3_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH3_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH3_CNTL__VALID_MASK 0x80000000L
+//SQ_IND_INDEX
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
+#define SQ_IND_INDEX__WORKITEM_ID__SHIFT 0x5
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xb
+#define SQ_IND_INDEX__INDEX__SHIFT 0x10
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000001FL
+#define SQ_IND_INDEX__WORKITEM_ID_MASK 0x000007E0L
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00000800L
+#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
+//SQ_IND_DATA
+#define SQ_IND_DATA__DATA__SHIFT 0x0
+#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//SQ_CMD
+#define SQ_CMD__CMD__SHIFT 0x0
+#define SQ_CMD__MODE__SHIFT 0x4
+#define SQ_CMD__CHECK_VMID__SHIFT 0x7
+#define SQ_CMD__DATA__SHIFT 0x8
+#define SQ_CMD__WAVE_ID__SHIFT 0x10
+#define SQ_CMD__QUEUE_ID__SHIFT 0x18
+#define SQ_CMD__VM_ID__SHIFT 0x1c
+#define SQ_CMD__CMD_MASK 0x0000000FL
+#define SQ_CMD__MODE_MASK 0x00000070L
+#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
+#define SQ_CMD__DATA_MASK 0x00000F00L
+#define SQ_CMD__WAVE_ID_MASK 0x001F0000L
+#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
+#define SQ_CMD__VM_ID_MASK 0xF0000000L
+//SQC_MISC_CONFIG
+#define SQC_MISC_CONFIG__UNUSED__SHIFT 0x0
+#define SQC_MISC_CONFIG__SQC_SPI_TTRACE_FGCG_OVERRIDE__SHIFT 0x5
+#define SQC_MISC_CONFIG__SQ_SPI_MSG_FGCG_OVERRIDE__SHIFT 0x6
+#define SQC_MISC_CONFIG__SPI_SQ_EXPALLOC_FGCG_OVERRIDE__SHIFT 0x7
+#define SQC_MISC_CONFIG__SQC_SQ_DATA_RET_FGCG_OVERRIDE__SHIFT 0x8
+#define SQC_MISC_CONFIG__SQC_SQ_INST_RET_FGCG_OVERRIDE__SHIFT 0x9
+#define SQC_MISC_CONFIG__SQC_GCR_RSP_FGCG_OVERRIDE__SHIFT 0xa
+#define SQC_MISC_CONFIG__ICLK_MGCG_DISABLE__SHIFT 0xb
+#define SQC_MISC_CONFIG__ICLK_BANK_MGCG_DISABLE__SHIFT 0xc
+#define SQC_MISC_CONFIG__DCLK_MGCG_DISABLE__SHIFT 0xd
+#define SQC_MISC_CONFIG__GCLK_MGCG_DISABLE__SHIFT 0xe
+#define SQC_MISC_CONFIG__MCLK_MGCG_DISABLE__SHIFT 0xf
+#define SQC_MISC_CONFIG__PCLK_MGCG_DISABLE__SHIFT 0x10
+#define SQC_MISC_CONFIG__BCLK_MGCG_DISABLE__SHIFT 0x11
+#define SQC_MISC_CONFIG__SQC_TA_RESET_FGCG_OVERRIDE__SHIFT 0x12
+#define SQC_MISC_CONFIG__SQC_LDS_CONFIG_FGCG_OVERRIDE__SHIFT 0x13
+#define SQC_MISC_CONFIG__DCLK_BANK_MGCG_DISABLE__SHIFT 0x14
+#define SQC_MISC_CONFIG__SQC_SQ_BARRIER_DONE_FGCG_OVERRIDE__SHIFT 0x15
+#define SQC_MISC_CONFIG__SQC_SQ_MSGDONE_FGCG_OVERRIDE__SHIFT 0x16
+#define SQC_MISC_CONFIG__CMCLK_MGCG_DISABLE__SHIFT 0x17
+#define SQC_MISC_CONFIG__SQC_GL1_CLKEN_OVERRIDE__SHIFT 0x18
+#define SQC_MISC_CONFIG__SQC_CORE_OVERRIDE__SHIFT 0x19
+#define SQC_MISC_CONFIG__ICLK_HMF_BS_MGCG_DISABLE__SHIFT 0x1a
+#define SQC_MISC_CONFIG__ICLK_CC_MGCG_DISABLE__SHIFT 0x1b
+#define SQC_MISC_CONFIG__DCLK_HMF_BS_MGCG_DISABLE__SHIFT 0x1c
+#define SQC_MISC_CONFIG__DCLK_CC_MGCG_DISABLE__SHIFT 0x1d
+#define SQC_MISC_CONFIG__UNUSED_MASK 0x0000001FL
+#define SQC_MISC_CONFIG__SQC_SPI_TTRACE_FGCG_OVERRIDE_MASK 0x00000020L
+#define SQC_MISC_CONFIG__SQ_SPI_MSG_FGCG_OVERRIDE_MASK 0x00000040L
+#define SQC_MISC_CONFIG__SPI_SQ_EXPALLOC_FGCG_OVERRIDE_MASK 0x00000080L
+#define SQC_MISC_CONFIG__SQC_SQ_DATA_RET_FGCG_OVERRIDE_MASK 0x00000100L
+#define SQC_MISC_CONFIG__SQC_SQ_INST_RET_FGCG_OVERRIDE_MASK 0x00000200L
+#define SQC_MISC_CONFIG__SQC_GCR_RSP_FGCG_OVERRIDE_MASK 0x00000400L
+#define SQC_MISC_CONFIG__ICLK_MGCG_DISABLE_MASK 0x00000800L
+#define SQC_MISC_CONFIG__ICLK_BANK_MGCG_DISABLE_MASK 0x00001000L
+#define SQC_MISC_CONFIG__DCLK_MGCG_DISABLE_MASK 0x00002000L
+#define SQC_MISC_CONFIG__GCLK_MGCG_DISABLE_MASK 0x00004000L
+#define SQC_MISC_CONFIG__MCLK_MGCG_DISABLE_MASK 0x00008000L
+#define SQC_MISC_CONFIG__PCLK_MGCG_DISABLE_MASK 0x00010000L
+#define SQC_MISC_CONFIG__BCLK_MGCG_DISABLE_MASK 0x00020000L
+#define SQC_MISC_CONFIG__SQC_TA_RESET_FGCG_OVERRIDE_MASK 0x00040000L
+#define SQC_MISC_CONFIG__SQC_LDS_CONFIG_FGCG_OVERRIDE_MASK 0x00080000L
+#define SQC_MISC_CONFIG__DCLK_BANK_MGCG_DISABLE_MASK 0x00100000L
+#define SQC_MISC_CONFIG__SQC_SQ_BARRIER_DONE_FGCG_OVERRIDE_MASK 0x00200000L
+#define SQC_MISC_CONFIG__SQC_SQ_MSGDONE_FGCG_OVERRIDE_MASK 0x00400000L
+#define SQC_MISC_CONFIG__CMCLK_MGCG_DISABLE_MASK 0x00800000L
+#define SQC_MISC_CONFIG__SQC_GL1_CLKEN_OVERRIDE_MASK 0x01000000L
+#define SQC_MISC_CONFIG__SQC_CORE_OVERRIDE_MASK 0x02000000L
+#define SQC_MISC_CONFIG__ICLK_HMF_BS_MGCG_DISABLE_MASK 0x04000000L
+#define SQC_MISC_CONFIG__ICLK_CC_MGCG_DISABLE_MASK 0x08000000L
+#define SQC_MISC_CONFIG__DCLK_HMF_BS_MGCG_DISABLE_MASK 0x10000000L
+#define SQC_MISC_CONFIG__DCLK_CC_MGCG_DISABLE_MASK 0x20000000L
+
+
+// addressBlock: gc_shsdec
+//SX_DEBUG_BUSY
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ3__SHIFT 0x0
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ2__SHIFT 0x1
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ1__SHIFT 0x2
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ3__SHIFT 0x3
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ2__SHIFT 0x4
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ1__SHIFT 0x5
+#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x6
+#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x7
+#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x8
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x9
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL__SHIFT 0xa
+#define SX_DEBUG_BUSY__SX_SX_IN_VALID__SHIFT 0xb
+#define SX_DEBUG_BUSY__SX_SX_OUT_VALID__SHIFT 0xc
+#define SX_DEBUG_BUSY__RESERVED__SHIFT 0xd
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ3_MASK 0x00000001L
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ2_MASK 0x00000002L
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ1_MASK 0x00000004L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ3_MASK 0x00000008L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ2_MASK 0x00000010L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ1_MASK 0x00000020L
+#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x00000040L
+#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x00000080L
+#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x00000100L
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x00000200L
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL_MASK 0x00000400L
+#define SX_DEBUG_BUSY__SX_SX_IN_VALID_MASK 0x00000800L
+#define SX_DEBUG_BUSY__SX_SX_OUT_VALID_MASK 0x00001000L
+#define SX_DEBUG_BUSY__RESERVED_MASK 0xFFFFE000L
+//SX_DEBUG_BUSY_2
+#define SX_DEBUG_BUSY_2__COL_SCBD0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0__SHIFT 0x1
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE__SHIFT 0x2
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0__SHIFT 0x4
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE__SHIFT 0x5
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0__SHIFT 0x7
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE__SHIFT 0x8
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0__SHIFT 0xa
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE__SHIFT 0xb
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_2__COL_DBIF3_QUAD_FREE__SHIFT 0xf
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_2__COL_DBIF2_QUAD_FREE__SHIFT 0x12
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_2__COL_DBIF1_QUAD_FREE__SHIFT 0x15
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_2__COL_DBIF0_QUAD_FREE__SHIFT 0x18
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_2__COL_SCBD0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0_MASK 0x00000002L
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE_MASK 0x00000004L
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0_MASK 0x00000010L
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE_MASK 0x00000020L
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0_MASK 0x00000080L
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE_MASK 0x00000100L
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0_MASK 0x00000400L
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE_MASK 0x00000800L
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_QUAD_FREE_MASK 0x00008000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_QUAD_FREE_MASK 0x00040000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_QUAD_FREE_MASK 0x00200000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_QUAD_FREE_MASK 0x01000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_3
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_4
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_1
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
+#define SX_DEBUG_1__ENABLE_FIFO_DEBUG_WRITE__SHIFT 0x7
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
+#define SX_DEBUG_1__DISABLE_REP_FGCG__SHIFT 0xd
+#define SX_DEBUG_1__ENABLE_SAME_PC_GDS_CGTS__SHIFT 0xe
+#define SX_DEBUG_1__DISABLE_RAM_FGCG__SHIFT 0xf
+#define SX_DEBUG_1__PC_DISABLE_SAME_ADDR_OPT__SHIFT 0x10
+#define SX_DEBUG_1__DISABLE_COL_VAL_READ_OPT__SHIFT 0x11
+#define SX_DEBUG_1__DISABLE_BC_RB_PLUS__SHIFT 0x12
+#define SX_DEBUG_1__DISABLE_NATIVE_DOWNCVT_FMT_MAPPING__SHIFT 0x13
+#define SX_DEBUG_1__DISABLE_SCBD_READ_PWR_OPT__SHIFT 0x14
+#define SX_DEBUG_1__DISABLE_GDS_CGTS_OPT__SHIFT 0x15
+#define SX_DEBUG_1__DISABLE_DOWNCVT_PWR_OPT__SHIFT 0x16
+#define SX_DEBUG_1__DISABLE_POS_BUFF_REUSE_OPT__SHIFT 0x17
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007FL
+#define SX_DEBUG_1__ENABLE_FIFO_DEBUG_WRITE_MASK 0x00000080L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x00000100L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x00000200L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x00000400L
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x00000800L
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x00001000L
+#define SX_DEBUG_1__DISABLE_REP_FGCG_MASK 0x00002000L
+#define SX_DEBUG_1__ENABLE_SAME_PC_GDS_CGTS_MASK 0x00004000L
+#define SX_DEBUG_1__DISABLE_RAM_FGCG_MASK 0x00008000L
+#define SX_DEBUG_1__PC_DISABLE_SAME_ADDR_OPT_MASK 0x00010000L
+#define SX_DEBUG_1__DISABLE_COL_VAL_READ_OPT_MASK 0x00020000L
+#define SX_DEBUG_1__DISABLE_BC_RB_PLUS_MASK 0x00040000L
+#define SX_DEBUG_1__DISABLE_NATIVE_DOWNCVT_FMT_MAPPING_MASK 0x00080000L
+#define SX_DEBUG_1__DISABLE_SCBD_READ_PWR_OPT_MASK 0x00100000L
+#define SX_DEBUG_1__DISABLE_GDS_CGTS_OPT_MASK 0x00200000L
+#define SX_DEBUG_1__DISABLE_DOWNCVT_PWR_OPT_MASK 0x00400000L
+#define SX_DEBUG_1__DISABLE_POS_BUFF_REUSE_OPT_MASK 0x00800000L
+//SX_DEBUG_BUSY_5
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK6_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK6_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_6
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK6_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_6__COL_REQ3_CREDIT_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_6__COL_REQ3_FLOP_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_6__COL_REQ2_CREDIT_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_6__COL_REQ2_FLOP_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_6__COL_REQ1_CREDIT_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_6__COL_REQ1_FLOP_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_6__COL_REQ0_CREDIT_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK6_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_6__COL_REQ3_CREDIT_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_6__COL_REQ3_FLOP_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_6__COL_REQ2_CREDIT_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_6__COL_REQ2_FLOP_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_6__COL_REQ1_CREDIT_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_6__COL_REQ1_FLOP_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_6__COL_REQ0_CREDIT_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_7
+#define SX_DEBUG_BUSY_7__COL_REQ0_FLOP_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_7__COL_SCBD1_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1__SHIFT 0x2
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_ADJ__SHIFT 0x3
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ2__SHIFT 0x4
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ3__SHIFT 0x5
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ4__SHIFT 0x6
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ5__SHIFT 0x7
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALID_OUT__SHIFT 0x8
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1__SHIFT 0x9
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_ADJ__SHIFT 0xa
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ2__SHIFT 0xb
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ3__SHIFT 0xc
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ4__SHIFT 0xd
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ5__SHIFT 0xe
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALID_OUT__SHIFT 0xf
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1__SHIFT 0x10
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_ADJ__SHIFT 0x11
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ2__SHIFT 0x12
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ3__SHIFT 0x13
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ4__SHIFT 0x14
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ5__SHIFT 0x15
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALID_OUT__SHIFT 0x16
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1__SHIFT 0x17
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_ADJ__SHIFT 0x18
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ2__SHIFT 0x19
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ3__SHIFT 0x1a
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ4__SHIFT 0x1b
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ5__SHIFT 0x1c
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALID_OUT__SHIFT 0x1d
+#define SX_DEBUG_BUSY_7__RESERVED__SHIFT 0x1e
+#define SX_DEBUG_BUSY_7__COL_REQ0_FLOP_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_7__COL_SCBD1_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_MASK 0x00000004L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_ADJ_MASK 0x00000008L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ2_MASK 0x00000010L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ3_MASK 0x00000020L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ4_MASK 0x00000040L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ5_MASK 0x00000080L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALID_OUT_MASK 0x00000100L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_MASK 0x00000200L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_ADJ_MASK 0x00000400L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ2_MASK 0x00000800L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ3_MASK 0x00001000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ4_MASK 0x00002000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ5_MASK 0x00004000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALID_OUT_MASK 0x00008000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_MASK 0x00010000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_ADJ_MASK 0x00020000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ2_MASK 0x00040000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ3_MASK 0x00080000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ4_MASK 0x00100000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ5_MASK 0x00200000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALID_OUT_MASK 0x00400000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_MASK 0x00800000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_ADJ_MASK 0x01000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ2_MASK 0x02000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ3_MASK 0x04000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ4_MASK 0x08000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ5_MASK 0x10000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALID_OUT_MASK 0x20000000L
+#define SX_DEBUG_BUSY_7__RESERVED_MASK 0xC0000000L
+//SX_DEBUG_BUSY_8
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL3_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL2_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL1_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL0_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL3_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL2_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL1_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL0_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL3_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL2_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL1_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL0_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL3_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL2_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL1_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL0_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL3_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL2_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL1_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL0_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL3_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL2_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL1_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL0_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL3_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL2_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL1_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL0_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL3_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL2_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL1_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL0_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL3_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL2_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL1_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL0_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL3_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL2_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL1_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL0_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL3_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL2_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL1_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL0_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL3_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL2_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL1_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL0_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_9
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL3_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL2_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL1_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL0_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL3_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL2_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL1_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL0_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL3_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL2_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL1_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL0_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL3_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL2_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL1_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL0_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL3_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL2_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL1_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL0_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL3_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL2_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL1_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL0_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL3_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL2_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL1_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL0_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL3_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL2_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL1_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL0_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL3_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL2_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL1_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL0_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL3_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL2_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL1_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL0_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL3_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL2_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL1_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL0_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL3_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL2_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL1_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL0_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_10
+#define SX_DEBUG_BUSY_10__POS_SCBD_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_10__POS_FREE_OR_VALIDS__SHIFT 0x1
+#define SX_DEBUG_BUSY_10__POS_REQUESTER_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_10__PA_SX_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ3__SHIFT 0x4
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ2__SHIFT 0x5
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ1__SHIFT 0x6
+#define SX_DEBUG_BUSY_10__IDX_SCBD_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_10__IDX_FREE_OR_VALIDS__SHIFT 0x8
+#define SX_DEBUG_BUSY_10__IDX_REQUESTER_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_10__PA_SX_IDX_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ3__SHIFT 0xb
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ2__SHIFT 0xc
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ1__SHIFT 0xd
+#define SX_DEBUG_BUSY_10__RESERVED__SHIFT 0xe
+#define SX_DEBUG_BUSY_10__POS_SCBD_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_10__POS_FREE_OR_VALIDS_MASK 0x00000002L
+#define SX_DEBUG_BUSY_10__POS_REQUESTER_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_10__PA_SX_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ3_MASK 0x00000010L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ2_MASK 0x00000020L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ1_MASK 0x00000040L
+#define SX_DEBUG_BUSY_10__IDX_SCBD_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_10__IDX_FREE_OR_VALIDS_MASK 0x00000100L
+#define SX_DEBUG_BUSY_10__IDX_REQUESTER_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_10__PA_SX_IDX_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ3_MASK 0x00000800L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ2_MASK 0x00001000L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ1_MASK 0x00002000L
+#define SX_DEBUG_BUSY_10__RESERVED_MASK 0xFFFFC000L
+//SPI_PS_MAX_WAVE_ID
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
+//SPI_GFX_CNTL
+#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
+#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
+//SPI_DEBUG_READ
+#define SPI_DEBUG_READ__DATA__SHIFT 0x0
+#define SPI_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//SPI_DSM_CNTL
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+//SPI_DSM_CNTL2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x3
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000001F8L
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT__SHIFT 0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT_MASK 0x00000003L
+//SPI_DEBUG_BUSY
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x2
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x3
+#define SPI_DEBUG_BUSY__PS2_BUSY__SHIFT 0x4
+#define SPI_DEBUG_BUSY__PS3_BUSY__SHIFT 0x5
+#define SPI_DEBUG_BUSY__CSG0_BUSY__SHIFT 0x6
+#define SPI_DEBUG_BUSY__CSG1_BUSY__SHIFT 0x7
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x8
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x9
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0xa
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0xb
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0xc
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0xd
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0xe
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0xf
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0x10
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0x11
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x12
+#define SPI_DEBUG_BUSY__OFC_LDS_BUSY__SHIFT 0x13
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x14
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x15
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x16
+#define SPI_DEBUG_BUSY__RSRC_ALLOC_BUSY__SHIFT 0x17
+#define SPI_DEBUG_BUSY__PWS_BUSY__SHIFT 0x18
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__PS2_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__PS3_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__CSG0_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__CSG1_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00040000L
+#define SPI_DEBUG_BUSY__OFC_LDS_BUSY_MASK 0x00080000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00200000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00400000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC_BUSY_MASK 0x00800000L
+#define SPI_DEBUG_BUSY__PWS_BUSY_MASK 0x01000000L
+//SPI_CONFIG_PS_CU_EN
+#define SPI_CONFIG_PS_CU_EN__PKR_OFFSET__SHIFT 0x0
+#define SPI_CONFIG_PS_CU_EN__PKR2_OFFSET__SHIFT 0x4
+#define SPI_CONFIG_PS_CU_EN__PKR3_OFFSET__SHIFT 0x8
+#define SPI_CONFIG_PS_CU_EN__PKR_OFFSET_MASK 0x0000000FL
+#define SPI_CONFIG_PS_CU_EN__PKR2_OFFSET_MASK 0x000000F0L
+#define SPI_CONFIG_PS_CU_EN__PKR3_OFFSET_MASK 0x00000F00L
+//SPI_WF_LIFETIME_CNTL
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
+#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
+#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
+//SPI_WF_LIFETIME_LIMIT_0
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_1
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_2
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_3
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_4
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_5
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_0
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_2
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_4
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_6
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_7
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_9
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_11
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_13
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_14
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_15
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_16
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_17
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_18
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_19
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_20
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_DEBUG
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE__SHIFT 0x0
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_21
+#define SPI_WF_LIFETIME_STATUS_21__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_21__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_21__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_21__INT_SENT_MASK 0x80000000L
+//SPI_LB_CTR_CTRL
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
+#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
+#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
+#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
+//SPI_LB_WGP_MASK
+#define SPI_LB_WGP_MASK__WGP_MASK__SHIFT 0x0
+#define SPI_LB_WGP_MASK__WGP_MASK_MASK 0xFFFFL
+//SPI_LB_DATA_REG
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
+//SPI_PG_ENABLE_STATIC_WGP_MASK
+#define SPI_PG_ENABLE_STATIC_WGP_MASK__WGP_MASK__SHIFT 0x0
+#define SPI_PG_ENABLE_STATIC_WGP_MASK__WGP_MASK_MASK 0xFFFFL
+//SPI_GDS_CREDITS
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
+//SPI_SX_EXPORT_BUFFER_SIZES
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
+//SPI_SX_SCOREBOARD_BUFFER_SIZES
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
+//SPI_CSQ_WF_ACTIVE_STATUS
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
+//SPI_CSQ_WF_ACTIVE_COUNT_0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_1
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_2
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_3
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x07FF0000L
+//SPI_LB_DATA_WAVES
+#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
+#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
+#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
+#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERWGP_WAVE_HSGS
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_HS__SHIFT 0x0
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_GS__SHIFT 0x10
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_HS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_GS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERWGP_WAVE_CS
+#define SPI_LB_DATA_PERWGP_WAVE_CS__ACTIVE__SHIFT 0x0
+#define SPI_LB_DATA_PERWGP_WAVE_CS__ACTIVE_MASK 0xFFFFL
+//SPIS_DEBUG_READ
+#define SPIS_DEBUG_READ__DATA__SHIFT 0x0
+#define SPIS_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//BCI_DEBUG_READ
+#define BCI_DEBUG_READ__DATA__SHIFT 0x0
+#define BCI_DEBUG_READ__DATA_MASK 0xFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_LO
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_HI
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_PSMA_LO
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSMA_HI
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_GPR_MIN
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+//SPI_P1_TRAP_SCREEN_PSBA_LO
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSBA_HI
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_PSMA_LO
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSMA_HI
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_GPR_MIN
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+
+
+// addressBlock: gc_tpdec
+//TD_CNTL
+#define TD_CNTL__DISABLE_MEDIAN_CALC_FOR_CUBECORNER_PHANTOM_TEXELS__SHIFT 0x0
+#define TD_CNTL__FORCE_RESIDENCY_MAP_TO_BE_MAX_FILTER__SHIFT 0x2
+#define TD_CNTL__FORCE_RESIDENCY_MAP_CC_MAX_OF_ALL_SAMPLES__SHIFT 0x7
+#define TD_CNTL__PRESERVE_VGPR_ON_UTC_ERROR__SHIFT 0xd
+#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
+#define TD_CNTL__FORCE_RT_BVH4_ARBITER_TO_PING_PONG__SHIFT 0x11
+#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
+#define TD_CNTL__DISABLE_ROUND_TO_ZERO_FOR_LARGE_FLOAT_TO_SMALL_FLOAT__SHIFT 0x16
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
+#define TD_CNTL__ARBITER_ROUND_ROBIN__SHIFT 0x18
+#define TD_CNTL__ARBITER_OLDEST_PRIORITY__SHIFT 0x19
+#define TD_CNTL__DONE_SCOREBOARD_DEPTH__SHIFT 0x1a
+#define TD_CNTL__DISABLE_MEDIAN_CALC_FOR_CUBECORNER_PHANTOM_TEXELS_MASK 0x00000001L
+#define TD_CNTL__FORCE_RESIDENCY_MAP_TO_BE_MAX_FILTER_MASK 0x00000004L
+#define TD_CNTL__FORCE_RESIDENCY_MAP_CC_MAX_OF_ALL_SAMPLES_MASK 0x00000080L
+#define TD_CNTL__PRESERVE_VGPR_ON_UTC_ERROR_MASK 0x00002000L
+#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
+#define TD_CNTL__FORCE_RT_BVH4_ARBITER_TO_PING_PONG_MASK 0x00020000L
+#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
+#define TD_CNTL__DISABLE_ROUND_TO_ZERO_FOR_LARGE_FLOAT_TO_SMALL_FLOAT_MASK 0x00400000L
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
+#define TD_CNTL__ARBITER_ROUND_ROBIN_MASK 0x01000000L
+#define TD_CNTL__ARBITER_OLDEST_PRIORITY_MASK 0x02000000L
+#define TD_CNTL__DONE_SCOREBOARD_DEPTH_MASK 0xFC000000L
+//TD_STATUS
+#define TD_STATUS__BUSY__SHIFT 0x1f
+#define TD_STATUS__BUSY_MASK 0x80000000L
+//TD_POWER_CNTL
+#define TD_POWER_CNTL__DISABLE_NOFILTER_FORMATTER_POWER_OPT__SHIFT 0x6
+#define TD_POWER_CNTL__FORCE_NOFILTER_D16_FORMATTERS_ON__SHIFT 0x7
+#define TD_POWER_CNTL__ENABLE_DEBUG_REG__SHIFT 0x8
+#define TD_POWER_CNTL__DISABLE_NOFILTER_FORMATTER_POWER_OPT_MASK 0x00000040L
+#define TD_POWER_CNTL__FORCE_NOFILTER_D16_FORMATTERS_ON_MASK 0x00000080L
+#define TD_POWER_CNTL__ENABLE_DEBUG_REG_MASK 0x00000100L
+//TD_CNTL2
+#define TD_CNTL2__LDS_RETURN_FIFO_CREDIT__SHIFT 0x0
+#define TD_CNTL2__MULTI_CYCLE_16FP__SHIFT 0x3
+#define TD_CNTL2__LDS_RETURN_FIFO_CREDIT_MASK 0x00000007L
+#define TD_CNTL2__MULTI_CYCLE_16FP_MASK 0x00000008L
+//TD_DSM_CNTL
+//TD_DSM_CNTL2
+//TD_SCRATCH
+#define TD_SCRATCH__SCRATCH__SHIFT 0x0
+#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+//TA_CNTL
+#define TA_CNTL__TA_SQ_XNACK_FGCG_DISABLE__SHIFT 0x0
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
+#define TA_CNTL__TA_SQ_XNACK_FGCG_DISABLE_MASK 0x00000001L
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
+//TA_CNTL_AUX
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
+#define TA_CNTL_AUX__DEPTH_AS_PITCH_DIS__SHIFT 0x1
+#define TA_CNTL_AUX__CORNER_SAMPLES_MIN_DIM__SHIFT 0x2
+#define TA_CNTL_AUX__OVERRIDE_QUAD_MODE_DIS__SHIFT 0x3
+#define TA_CNTL_AUX__DERIV_ADJUST_DIS__SHIFT 0x4
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
+#define TA_CNTL_AUX__GATHERH_DST_SEL__SHIFT 0x6
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
+#define TA_CNTL_AUX__ANISO_MAG_STEP_CLAMP__SHIFT 0x8
+#define TA_CNTL_AUX__AUTO_ALIGN_FORMAT__SHIFT 0x9
+#define TA_CNTL_AUX__ANISO_HALF_THRESH__SHIFT 0xa
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS__SHIFT 0xc
+#define TA_CNTL_AUX__ANISO_STEP_ORDER__SHIFT 0xd
+#define TA_CNTL_AUX__ANISO_STEP__SHIFT 0xe
+#define TA_CNTL_AUX__MINMAG_UNNORM__SHIFT 0xf
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
+#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
+#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP__SHIFT 0x1c
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG__SHIFT 0x1d
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE__SHIFT 0x1e
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
+#define TA_CNTL_AUX__DEPTH_AS_PITCH_DIS_MASK 0x00000002L
+#define TA_CNTL_AUX__CORNER_SAMPLES_MIN_DIM_MASK 0x00000004L
+#define TA_CNTL_AUX__OVERRIDE_QUAD_MODE_DIS_MASK 0x00000008L
+#define TA_CNTL_AUX__DERIV_ADJUST_DIS_MASK 0x00000010L
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
+#define TA_CNTL_AUX__GATHERH_DST_SEL_MASK 0x00000040L
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
+#define TA_CNTL_AUX__ANISO_MAG_STEP_CLAMP_MASK 0x00000100L
+#define TA_CNTL_AUX__AUTO_ALIGN_FORMAT_MASK 0x00000200L
+#define TA_CNTL_AUX__ANISO_HALF_THRESH_MASK 0x00000C00L
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS_MASK 0x00001000L
+#define TA_CNTL_AUX__ANISO_STEP_ORDER_MASK 0x00002000L
+#define TA_CNTL_AUX__ANISO_STEP_MASK 0x00004000L
+#define TA_CNTL_AUX__MINMAG_UNNORM_MASK 0x00008000L
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
+#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x00020000L
+#define TA_CNTL_AUX__ANISO_TAP_MASK 0x00040000L
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP_MASK 0x10000000L
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG_MASK 0x20000000L
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE_MASK 0xC0000000L
+//TA_CNTL2
+#define TA_CNTL2__POINT_SAMPLE_ACCEL_DIS__SHIFT 0x10
+#define TA_CNTL2__ELEMSIZE_HASH_DIS__SHIFT 0x11
+#define TA_CNTL2__TRUNCATE_COORD_MODE__SHIFT 0x12
+#define TA_CNTL2__ELIMINATE_UNLIT_QUAD_DIS__SHIFT 0x13
+#define TA_CNTL2__POINT_SAMPLE_ACCEL_DIS_MASK 0x00010000L
+#define TA_CNTL2__ELEMSIZE_HASH_DIS_MASK 0x00020000L
+#define TA_CNTL2__TRUNCATE_COORD_MODE_MASK 0x00040000L
+#define TA_CNTL2__ELIMINATE_UNLIT_QUAD_DIS_MASK 0x00080000L
+//TA_STATUS
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
+#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
+#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
+#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
+#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
+#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
+#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
+#define TA_STATUS__IN_BUSY__SHIFT 0x18
+#define TA_STATUS__FG_BUSY__SHIFT 0x19
+#define TA_STATUS__LA_BUSY__SHIFT 0x1a
+#define TA_STATUS__FL_BUSY__SHIFT 0x1b
+#define TA_STATUS__TA_BUSY__SHIFT 0x1c
+#define TA_STATUS__FA_BUSY__SHIFT 0x1d
+#define TA_STATUS__AL_BUSY__SHIFT 0x1e
+#define TA_STATUS__BUSY__SHIFT 0x1f
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
+#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
+#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
+#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__LA_BUSY_MASK 0x04000000L
+#define TA_STATUS__FL_BUSY_MASK 0x08000000L
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__BUSY_MASK 0x80000000L
+//TA_SCRATCH
+#define TA_SCRATCH__SCRATCH__SHIFT 0x0
+#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gdsdec
+//GDS_CONFIG
+#define GDS_CONFIG__WRITE_DIS__SHIFT 0x0
+#define GDS_CONFIG__UNUSED__SHIFT 0x1
+#define GDS_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define GDS_CONFIG__UNUSED_MASK 0xFFFFFFFEL
+//GDS_CNTL_STATUS
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x3
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x4
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x5
+#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x6
+#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x7
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0x8
+#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0x9
+#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xa
+#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xb
+#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xc
+#define GDS_CNTL_STATUS__CREDIT_BUSY4__SHIFT 0xd
+#define GDS_CNTL_STATUS__CREDIT_BUSY5__SHIFT 0xe
+#define GDS_CNTL_STATUS__CREDIT_BUSY6__SHIFT 0xf
+#define GDS_CNTL_STATUS__CREDIT_BUSY7__SHIFT 0x10
+#define GDS_CNTL_STATUS__UNUSED__SHIFT 0x11
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000010L
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000040L
+#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000080L
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000100L
+#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000200L
+#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00000400L
+#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00000800L
+#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00001000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY4_MASK 0x00002000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY5_MASK 0x00004000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY6_MASK 0x00008000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY7_MASK 0x00010000L
+#define GDS_CNTL_STATUS__UNUSED_MASK 0xFFFE0000L
+//GDS_ENHANCE
+#define GDS_ENHANCE__MISC__SHIFT 0x0
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
+#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
+#define GDS_ENHANCE__UNUSED__SHIFT 0x12
+#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
+#define GDS_ENHANCE__UNUSED_MASK 0xFFFC0000L
+//GDS_PROTECTION_FAULT
+#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
+#define GDS_PROTECTION_FAULT__SE_ID__SHIFT 0x3
+#define GDS_PROTECTION_FAULT__SA_ID__SHIFT 0x6
+#define GDS_PROTECTION_FAULT__WGP_ID__SHIFT 0x7
+#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xb
+#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xd
+#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x12
+#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
+#define GDS_PROTECTION_FAULT__SE_ID_MASK 0x00000038L
+#define GDS_PROTECTION_FAULT__SA_ID_MASK 0x00000040L
+#define GDS_PROTECTION_FAULT__WGP_ID_MASK 0x00000780L
+#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00001800L
+#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0003E000L
+#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFC0000L
+//GDS_VM_PROTECTION_FAULT
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
+#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
+#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
+#define GDS_VM_PROTECTION_FAULT__TMZ__SHIFT 0x5
+#define GDS_VM_PROTECTION_FAULT__UNUSED1__SHIFT 0x6
+#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
+#define GDS_VM_PROTECTION_FAULT__UNUSED2__SHIFT 0xc
+#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
+#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
+#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
+#define GDS_VM_PROTECTION_FAULT__TMZ_MASK 0x00000020L
+#define GDS_VM_PROTECTION_FAULT__UNUSED1_MASK 0x000000C0L
+#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
+#define GDS_VM_PROTECTION_FAULT__UNUSED2_MASK 0x0000F000L
+#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
+#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED__SHIFT 0x2
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
+#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED_MASK 0x0000000CL
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
+#define GDS_EDC_OA_DED__ME0_PIPE1_CS_DED__SHIFT 0xc
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xd
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_EDC_OA_DED__ME0_PIPE1_CS_DED_MASK 0x00001000L
+#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFE000L
+//GDS_DSM_CNTL
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED__SHIFT 0x8
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xa
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED_MASK 0x00000300L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFFC00L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_DSM_CNTL2
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
+
+
+// addressBlock: gc_rbdec
+//DB_DEBUG
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+//DB_DEBUG2
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
+#define DB_DEBUG2__FORCE_PERF_COUNTERS_ON__SHIFT 0xe
+#define DB_DEBUG2__FULL_TILE_CACHE_EVICT_ON_HALF_FULL__SHIFT 0xf
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES__SHIFT 0x10
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
+#define DB_DEBUG2__DISABLE_FULL_TILE_WAVE_BREAK__SHIFT 0x14
+#define DB_DEBUG2__ENABLE_FULL_TILE_WAVE_BREAK_FOR_ALL_TILES__SHIFT 0x15
+#define DB_DEBUG2__FORCE_ITERATE_256__SHIFT 0x18
+#define DB_DEBUG2__RESERVED1__SHIFT 0x1a
+#define DB_DEBUG2__DEBUG_BUS_FLOP_EN__SHIFT 0x1b
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
+#define DB_DEBUG2__FORCE_PERF_COUNTERS_ON_MASK 0x00004000L
+#define DB_DEBUG2__FULL_TILE_CACHE_EVICT_ON_HALF_FULL_MASK 0x00008000L
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_FULL_TILE_WAVE_BREAK_MASK 0x00100000L
+#define DB_DEBUG2__ENABLE_FULL_TILE_WAVE_BREAK_FOR_ALL_TILES_MASK 0x00200000L
+#define DB_DEBUG2__FORCE_ITERATE_256_MASK 0x03000000L
+#define DB_DEBUG2__RESERVED1_MASK 0x04000000L
+#define DB_DEBUG2__DEBUG_BUS_FLOP_EN_MASK 0x08000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+//DB_DEBUG3
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
+#define DB_DEBUG3__DISABLE_RELOAD_CONTEXT_DRAW_DATA__SHIFT 0x1
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
+#define DB_DEBUG3__DISABLE_SLOCS_PER_CTXT_MATCH__SHIFT 0x10
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
+#define DB_DEBUG3__DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
+#define DB_DEBUG3__DISABLE_TS_WRITE_L0__SHIFT 0x1e
+#define DB_DEBUG3__DISABLE_MULTIDTAG_FL_PANIC_REQUIREMENT__SHIFT 0x1f
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
+#define DB_DEBUG3__DISABLE_RELOAD_CONTEXT_DRAW_DATA_MASK 0x00000002L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
+#define DB_DEBUG3__DISABLE_SLOCS_PER_CTXT_MATCH_MASK 0x00010000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DISABLE_TS_WRITE_L0_MASK 0x40000000L
+#define DB_DEBUG3__DISABLE_MULTIDTAG_FL_PANIC_REQUIREMENT_MASK 0x80000000L
+//DB_DEBUG4
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
+#define DB_DEBUG4__DISABLE_SEPARATE_OP_PIPE_CLK__SHIFT 0x4
+#define DB_DEBUG4__DISABLE_SEPARATE_SX_CLK__SHIFT 0x5
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x6
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x7
+#define DB_DEBUG4__DISABLE_SEPARATE_DBG_CLK__SHIFT 0x8
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0xc
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0xf
+#define DB_DEBUG4__DISABLE_HIZ_TS_COLLISION_DETECT__SHIFT 0x10
+#define DB_DEBUG4__DISABLE_LAST_OF_BURST_ON_FLUSH_CHUNK0_ALL_DONE__SHIFT 0x12
+#define DB_DEBUG4__ENABLE_CZ_OVERFLOW_TESTMODE__SHIFT 0x13
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO__SHIFT 0x15
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_CONFLICT__SHIFT 0x16
+#define DB_DEBUG4__WR_MEM_BURST_CTL__SHIFT 0x18
+#define DB_DEBUG4__DISABLE_WR_MEM_BURST_POOLING__SHIFT 0x1b
+#define DB_DEBUG4__DISABLE_RD_MEM_BURST__SHIFT 0x1c
+#define DB_DEBUG4__LATE_ACK_SCOREBOARD_MULTIPLE_SLOT__SHIFT 0x1e
+#define DB_DEBUG4__LATE_ACK_PSD_EOP_OLD_METHOD__SHIFT 0x1f
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
+#define DB_DEBUG4__DISABLE_SEPARATE_OP_PIPE_CLK_MASK 0x00000010L
+#define DB_DEBUG4__DISABLE_SEPARATE_SX_CLK_MASK 0x00000020L
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000040L
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000080L
+#define DB_DEBUG4__DISABLE_SEPARATE_DBG_CLK_MASK 0x00000100L
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00001000L
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00008000L
+#define DB_DEBUG4__DISABLE_HIZ_TS_COLLISION_DETECT_MASK 0x00010000L
+#define DB_DEBUG4__DISABLE_LAST_OF_BURST_ON_FLUSH_CHUNK0_ALL_DONE_MASK 0x00040000L
+#define DB_DEBUG4__ENABLE_CZ_OVERFLOW_TESTMODE_MASK 0x00080000L
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_MASK 0x00200000L
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_CONFLICT_MASK 0x00400000L
+#define DB_DEBUG4__WR_MEM_BURST_CTL_MASK 0x07000000L
+#define DB_DEBUG4__DISABLE_WR_MEM_BURST_POOLING_MASK 0x08000000L
+#define DB_DEBUG4__DISABLE_RD_MEM_BURST_MASK 0x10000000L
+#define DB_DEBUG4__LATE_ACK_SCOREBOARD_MULTIPLE_SLOT_MASK 0x40000000L
+#define DB_DEBUG4__LATE_ACK_PSD_EOP_OLD_METHOD_MASK 0x80000000L
+//DB_ETILE_STUTTER_CONTROL
+#define DB_ETILE_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_ETILE_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_ETILE_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_ETILE_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_LTILE_STUTTER_CONTROL
+#define DB_LTILE_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_LTILE_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_LTILE_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_LTILE_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_EQUAD_STUTTER_CONTROL
+#define DB_EQUAD_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_EQUAD_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_EQUAD_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_EQUAD_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_LQUAD_STUTTER_CONTROL
+#define DB_LQUAD_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_LQUAD_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_LQUAD_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_LQUAD_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_CREDIT_LIMIT
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
+#define DB_CREDIT_LIMIT__DB_SC_WAVE_CREDITS__SHIFT 0xd
+#define DB_CREDIT_LIMIT__DB_SC_FREE_WAVE_CREDITS__SHIFT 0x12
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
+#define DB_CREDIT_LIMIT__DB_SC_WAVE_CREDITS_MASK 0x0003E000L
+#define DB_CREDIT_LIMIT__DB_SC_FREE_WAVE_CREDITS_MASK 0x007C0000L
+//DB_WATERMARKS
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x8
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0x10
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x18
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x000000FFL
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x0000FF00L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x00FF0000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0xFF000000L
+//DB_SUBTILE_CONTROL
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
+//DB_FREE_CACHELINES
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x8
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0x10
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x18
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x000000FFL
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x0000FF00L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x00FF0000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0xFF000000L
+//DB_FIFO_DEPTH1
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x18
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0xFF000000L
+//DB_FIFO_DEPTH2
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF0000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
+//DB_LAST_OF_BURST_CONFIG
+#define DB_LAST_OF_BURST_CONFIG__MAXBURST__SHIFT 0x0
+#define DB_LAST_OF_BURST_CONFIG__TIMEOUT__SHIFT 0x8
+#define DB_LAST_OF_BURST_CONFIG__DBCB_LOB_SWITCH_TIMEOUT__SHIFT 0xb
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_FG_DEFAULT_TIMEOUT__SHIFT 0x11
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_COUNT_RESET_ON_LOB__SHIFT 0x12
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_FLQ_LOB_EVERY_256B__SHIFT 0x13
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_ZCACHE_FL_OP_EVEN_ARB__SHIFT 0x14
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_FORCE_FLUSH_BEFORE_FIFO__SHIFT 0x15
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_DKG_LOB_GEN__SHIFT 0x16
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_LPF_LOB_GEN__SHIFT 0x17
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FL_BURST__SHIFT 0x19
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FG_LOB_FWDR__SHIFT 0x1a
+#define DB_LAST_OF_BURST_CONFIG__BYPASS_SORT_RD_BA__SHIFT 0x1c
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_256B_COALESCE__SHIFT 0x1d
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_RD_BURST__SHIFT 0x1e
+#define DB_LAST_OF_BURST_CONFIG__LEGACY_LOB_INSERT_EN__SHIFT 0x1f
+#define DB_LAST_OF_BURST_CONFIG__MAXBURST_MASK 0x000000FFL
+#define DB_LAST_OF_BURST_CONFIG__TIMEOUT_MASK 0x00000700L
+#define DB_LAST_OF_BURST_CONFIG__DBCB_LOB_SWITCH_TIMEOUT_MASK 0x0000F800L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_FG_DEFAULT_TIMEOUT_MASK 0x00020000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_COUNT_RESET_ON_LOB_MASK 0x00040000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_FLQ_LOB_EVERY_256B_MASK 0x00080000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_ZCACHE_FL_OP_EVEN_ARB_MASK 0x00100000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_FORCE_FLUSH_BEFORE_FIFO_MASK 0x00200000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_DKG_LOB_GEN_MASK 0x00400000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_LPF_LOB_GEN_MASK 0x00800000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FL_BURST_MASK 0x02000000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FG_LOB_FWDR_MASK 0x04000000L
+#define DB_LAST_OF_BURST_CONFIG__BYPASS_SORT_RD_BA_MASK 0x10000000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_256B_COALESCE_MASK 0x20000000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_RD_BURST_MASK 0x40000000L
+#define DB_LAST_OF_BURST_CONFIG__LEGACY_LOB_INSERT_EN_MASK 0x80000000L
+//DB_RING_CONTROL
+#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
+#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
+//DB_MEM_ARB_WATERMARKS
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
+//DB_FIFO_DEPTH3
+#define DB_FIFO_DEPTH3__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH3__OSB_WAVE_TABLE_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH3__OREO_WAVE_HIDE_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH3__QUAD_READ_REQS__SHIFT 0x18
+#define DB_FIFO_DEPTH3__LTILE_PROBE_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH3__OSB_WAVE_TABLE_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH3__OREO_WAVE_HIDE_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH3__QUAD_READ_REQS_MASK 0xFF000000L
+//DB_DEBUG6
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_CONFLICT__SHIFT 0x0
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_HARD_CONFLICT__SHIFT 0x1
+#define DB_DEBUG6__FORCE_DB_SC_QUAD_CONFLICT__SHIFT 0x2
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ALL__SHIFT 0x3
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ID__SHIFT 0x4
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_EN__SHIFT 0xa
+#define DB_DEBUG6__DISABLE_PWS_PLUS_TCP_CM_LIVENESS_STALL__SHIFT 0xb
+#define DB_DEBUG6__DISABLE_PWS_PLUS_DTT_TAG_LIVENESS_STALL__SHIFT 0xc
+#define DB_DEBUG6__SET_DB_PERFMON_PWS_PIPE_ID__SHIFT 0xd
+#define DB_DEBUG6__FTWB_MAX_TIMEOUT_VAL__SHIFT 0x10
+#define DB_DEBUG6__DISABLE_LQO_SMT_RAM_OPT__SHIFT 0x18
+#define DB_DEBUG6__FORCE_MAX_TILES_IN_WAVE_CHECK__SHIFT 0x19
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_FIX__SHIFT 0x1a
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_WAIT_PANIC__SHIFT 0x1b
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_CONFLICT_MASK 0x00000001L
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_HARD_CONFLICT_MASK 0x00000002L
+#define DB_DEBUG6__FORCE_DB_SC_QUAD_CONFLICT_MASK 0x00000004L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ALL_MASK 0x00000008L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ID_MASK 0x000003F0L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_EN_MASK 0x00000400L
+#define DB_DEBUG6__DISABLE_PWS_PLUS_TCP_CM_LIVENESS_STALL_MASK 0x00000800L
+#define DB_DEBUG6__DISABLE_PWS_PLUS_DTT_TAG_LIVENESS_STALL_MASK 0x00001000L
+#define DB_DEBUG6__SET_DB_PERFMON_PWS_PIPE_ID_MASK 0x00006000L
+#define DB_DEBUG6__FTWB_MAX_TIMEOUT_VAL_MASK 0x00FF0000L
+#define DB_DEBUG6__DISABLE_LQO_SMT_RAM_OPT_MASK 0x01000000L
+#define DB_DEBUG6__FORCE_MAX_TILES_IN_WAVE_CHECK_MASK 0x02000000L
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_FIX_MASK 0x04000000L
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_WAIT_PANIC_MASK 0x08000000L
+//DB_EXCEPTION_CONTROL
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4
+#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8
+#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L
+#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L
+#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L
+//DB_DEBUG7
+#define DB_DEBUG7__SPARE_BITS__SHIFT 0x0
+#define DB_DEBUG7__SPARE_BITS_MASK 0xFFFFFFFFL
+//DB_DEBUG5
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PRELOAD__SHIFT 0x0
+#define DB_DEBUG5__ENABLE_SECONDARY_MIPS_TAILS_COMPRESSION__SHIFT 0x1
+#define DB_DEBUG5__DISABLE_CLEAR_VALUE_UPDATE_ON_TILE_CACHE_HIT__SHIFT 0x2
+#define DB_DEBUG5__DISABLE_2SRC_VRS_HARD_CONFLICT__SHIFT 0x3
+#define DB_DEBUG5__DISABLE_FLQ_MCC_DTILEID_CHECK__SHIFT 0x4
+#define DB_DEBUG5__DISABLE_NOZ_POWER_SAVINGS__SHIFT 0x5
+#define DB_DEBUG5__DISABLE_TILE_INFLIGHT_DEC_POSTZ_FIX__SHIFT 0x6
+#define DB_DEBUG5__DISABLE_MGCG_GATING_ON_SHADER_WAIT__SHIFT 0x7
+#define DB_DEBUG5__DISABLE_VRS_1X2_2XAA__SHIFT 0x8
+#define DB_DEBUG5__ENABLE_FULL_TILE_WAVE_BREAK_ON_COARSE__SHIFT 0x9
+#define DB_DEBUG5__DISABLE_HTILE_HARVESTING__SHIFT 0xa
+#define DB_DEBUG5__DISABLE_SEPARATE_TILE_CLK__SHIFT 0xb
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PREFETCH__SHIFT 0xc
+#define DB_DEBUG5__DISABLE_PSL_AUTO_MODE_FIX__SHIFT 0xd
+#define DB_DEBUG5__DISABLE_FORCE_ZMASK_EXPANDED__SHIFT 0xe
+#define DB_DEBUG5__DISABLE_SEPARATE_LQO_CLK__SHIFT 0xf
+#define DB_DEBUG5__DISABLE_Z_WITHOUT_PLANES_FLQ__SHIFT 0x10
+#define DB_DEBUG5__PRESERVE_QMASK_FOR_POSTZ_OP_PIPE__SHIFT 0x11
+#define DB_DEBUG5__Z_NACK_BEHAVIOR_ONLY_WHEN_Z_IS_PRT__SHIFT 0x12
+#define DB_DEBUG5__S_NACK_BEHAVIOR_ONLY_WHEN_S_IS_PRT__SHIFT 0x13
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_Z__SHIFT 0x14
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_STENCIL__SHIFT 0x15
+#define DB_DEBUG5__DISABLE_LQO_FTCQ_DUAL_QUAD_REGION_CHECK__SHIFT 0x16
+#define DB_DEBUG5__DISABLE_EVENT_INSERTION_AFTER_ZPC_BEFORE_CONTEXT_DONE__SHIFT 0x17
+#define DB_DEBUG5__SPARE_BITS__SHIFT 0x18
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PRELOAD_MASK 0x00000001L
+#define DB_DEBUG5__ENABLE_SECONDARY_MIPS_TAILS_COMPRESSION_MASK 0x00000002L
+#define DB_DEBUG5__DISABLE_CLEAR_VALUE_UPDATE_ON_TILE_CACHE_HIT_MASK 0x00000004L
+#define DB_DEBUG5__DISABLE_2SRC_VRS_HARD_CONFLICT_MASK 0x00000008L
+#define DB_DEBUG5__DISABLE_FLQ_MCC_DTILEID_CHECK_MASK 0x00000010L
+#define DB_DEBUG5__DISABLE_NOZ_POWER_SAVINGS_MASK 0x00000020L
+#define DB_DEBUG5__DISABLE_TILE_INFLIGHT_DEC_POSTZ_FIX_MASK 0x00000040L
+#define DB_DEBUG5__DISABLE_MGCG_GATING_ON_SHADER_WAIT_MASK 0x00000080L
+#define DB_DEBUG5__DISABLE_VRS_1X2_2XAA_MASK 0x00000100L
+#define DB_DEBUG5__ENABLE_FULL_TILE_WAVE_BREAK_ON_COARSE_MASK 0x00000200L
+#define DB_DEBUG5__DISABLE_HTILE_HARVESTING_MASK 0x00000400L
+#define DB_DEBUG5__DISABLE_SEPARATE_TILE_CLK_MASK 0x00000800L
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PREFETCH_MASK 0x00001000L
+#define DB_DEBUG5__DISABLE_PSL_AUTO_MODE_FIX_MASK 0x00002000L
+#define DB_DEBUG5__DISABLE_FORCE_ZMASK_EXPANDED_MASK 0x00004000L
+#define DB_DEBUG5__DISABLE_SEPARATE_LQO_CLK_MASK 0x00008000L
+#define DB_DEBUG5__DISABLE_Z_WITHOUT_PLANES_FLQ_MASK 0x00010000L
+#define DB_DEBUG5__PRESERVE_QMASK_FOR_POSTZ_OP_PIPE_MASK 0x00020000L
+#define DB_DEBUG5__Z_NACK_BEHAVIOR_ONLY_WHEN_Z_IS_PRT_MASK 0x00040000L
+#define DB_DEBUG5__S_NACK_BEHAVIOR_ONLY_WHEN_S_IS_PRT_MASK 0x00080000L
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_Z_MASK 0x00100000L
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_STENCIL_MASK 0x00200000L
+#define DB_DEBUG5__DISABLE_LQO_FTCQ_DUAL_QUAD_REGION_CHECK_MASK 0x00400000L
+#define DB_DEBUG5__DISABLE_EVENT_INSERTION_AFTER_ZPC_BEFORE_CONTEXT_DONE_MASK 0x00800000L
+#define DB_DEBUG5__SPARE_BITS_MASK 0xFF000000L
+//DB_FGCG_SRAMS_CLK_CTRL
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE0__SHIFT 0x0
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE1__SHIFT 0x1
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE2__SHIFT 0x2
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE3__SHIFT 0x3
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE4__SHIFT 0x4
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE5__SHIFT 0x5
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE6__SHIFT 0x6
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE7__SHIFT 0x7
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE8__SHIFT 0x8
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE9__SHIFT 0x9
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE10__SHIFT 0xa
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE11__SHIFT 0xb
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE12__SHIFT 0xc
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE13__SHIFT 0xd
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE14__SHIFT 0xe
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE15__SHIFT 0xf
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE16__SHIFT 0x10
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE17__SHIFT 0x11
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE18__SHIFT 0x12
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE19__SHIFT 0x13
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE20__SHIFT 0x14
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE21__SHIFT 0x15
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE22__SHIFT 0x16
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE23__SHIFT 0x17
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE24__SHIFT 0x18
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE25__SHIFT 0x19
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE26__SHIFT 0x1a
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE27__SHIFT 0x1b
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE28__SHIFT 0x1c
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE29__SHIFT 0x1d
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE30__SHIFT 0x1e
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE31__SHIFT 0x1f
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE0_MASK 0x00000001L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE1_MASK 0x00000002L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE2_MASK 0x00000004L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE3_MASK 0x00000008L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE4_MASK 0x00000010L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE5_MASK 0x00000020L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE6_MASK 0x00000040L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE7_MASK 0x00000080L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE8_MASK 0x00000100L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE9_MASK 0x00000200L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE10_MASK 0x00000400L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE11_MASK 0x00000800L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE12_MASK 0x00001000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE13_MASK 0x00002000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE14_MASK 0x00004000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE15_MASK 0x00008000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE16_MASK 0x00010000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE17_MASK 0x00020000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE18_MASK 0x00040000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE19_MASK 0x00080000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE20_MASK 0x00100000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE21_MASK 0x00200000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE22_MASK 0x00400000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE23_MASK 0x00800000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE24_MASK 0x01000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE25_MASK 0x02000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE26_MASK 0x04000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE27_MASK 0x08000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE28_MASK 0x10000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE29_MASK 0x20000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE30_MASK 0x40000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE31_MASK 0x80000000L
+//DB_FGCG_INTERFACES_CLK_CTRL
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_QUAD_OVERRIDE__SHIFT 0x0
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_EXPORT_OVERRIDE__SHIFT 0x2
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_RDREQ_OVERRIDE__SHIFT 0x3
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_WRREQ_OVERRIDE__SHIFT 0x4
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_TILE_OVERRIDE__SHIFT 0x5
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_RMIRET_OVERRIDE__SHIFT 0x6
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_WAVE_OVERRIDE__SHIFT 0x7
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_FREE_WAVE_OVERRIDE__SHIFT 0x8
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_QUAD_OVERRIDE_MASK 0x00000001L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_EXPORT_OVERRIDE_MASK 0x00000004L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_RDREQ_OVERRIDE_MASK 0x00000008L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_WRREQ_OVERRIDE_MASK 0x00000010L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_TILE_OVERRIDE_MASK 0x00000020L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_RMIRET_OVERRIDE_MASK 0x00000040L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_WAVE_OVERRIDE_MASK 0x00000080L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_FREE_WAVE_OVERRIDE_MASK 0x00000100L
+//DB_FIFO_DEPTH4
+#define DB_FIFO_DEPTH4__OSB_SQUAD_TABLE_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH4__OSB_TILE_TABLE_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH4__OSB_SCORE_BOARD_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH4__OSB_EVENT_FIFO_DEPTH__SHIFT 0x18
+#define DB_FIFO_DEPTH4__OSB_SQUAD_TABLE_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH4__OSB_TILE_TABLE_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH4__OSB_SCORE_BOARD_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH4__OSB_EVENT_FIFO_DEPTH_MASK 0xFF000000L
+//CC_RB_REDUNDANCY
+#define CC_RB_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define CC_RB_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//CC_RB_BACKEND_DISABLE
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_RB_BACKEND_DISABLE__RESERVED__SHIFT 0x2
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x4
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_BACKEND_DISABLE__RESERVED_MASK 0x0000000CL
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xFFFFFFF0L
+//GB_ADDR_CONFIG
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//GB_BACKEND_MAP
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
+//GB_GPU_ID
+#define GB_GPU_ID__GPU_ID__SHIFT 0x0
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
+//CC_RB_DAISY_CHAIN
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
+//GB_ADDR_CONFIG_READ
+#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//CB_HW_CONTROL_4
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_NUM_QB_LOG2__SHIFT 0x0
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_ALGORITHM__SHIFT 0x3
+#define CB_HW_CONTROL_4__DISABLE_USE_OF_SMT_SCORE__SHIFT 0x5
+#define CB_HW_CONTROL_4__SPARE_10__SHIFT 0x6
+#define CB_HW_CONTROL_4__SPARE_11__SHIFT 0x7
+#define CB_HW_CONTROL_4__SPARE_12__SHIFT 0x8
+#define CB_HW_CONTROL_4__DISABLE_MA_WAIT_FOR_LAST__SHIFT 0x9
+#define CB_HW_CONTROL_4__SMT_TIMEOUT_THRESHOLD__SHIFT 0xa
+#define CB_HW_CONTROL_4__SMT_QPFIFO_THRESHOLD__SHIFT 0xd
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_RAW_HAZARD__SHIFT 0x10
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_COARSE_RAW_HAZARD__SHIFT 0x11
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_DS_RAW_HAZARD__SHIFT 0x12
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_NUM_QB_LOG2_MASK 0x00000007L
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_ALGORITHM_MASK 0x00000018L
+#define CB_HW_CONTROL_4__DISABLE_USE_OF_SMT_SCORE_MASK 0x00000020L
+#define CB_HW_CONTROL_4__SPARE_10_MASK 0x00000040L
+#define CB_HW_CONTROL_4__SPARE_11_MASK 0x00000080L
+#define CB_HW_CONTROL_4__SPARE_12_MASK 0x00000100L
+#define CB_HW_CONTROL_4__DISABLE_MA_WAIT_FOR_LAST_MASK 0x00000200L
+#define CB_HW_CONTROL_4__SMT_TIMEOUT_THRESHOLD_MASK 0x00001C00L
+#define CB_HW_CONTROL_4__SMT_QPFIFO_THRESHOLD_MASK 0x0000E000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_RAW_HAZARD_MASK 0x00010000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_COARSE_RAW_HAZARD_MASK 0x00020000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_DS_RAW_HAZARD_MASK 0x00040000L
+//CB_HW_CONTROL_3
+#define CB_HW_CONTROL_3__SPARE_5__SHIFT 0x0
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
+#define CB_HW_CONTROL_3__SPARE_6__SHIFT 0x2
+#define CB_HW_CONTROL_3__SPARE_7__SHIFT 0x3
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x4
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x5
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x6
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0x7
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xb
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0xc
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0xd
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0xe
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0xf
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x10
+#define CB_HW_CONTROL_3__SPARE_8__SHIFT 0x11
+#define CB_HW_CONTROL_3__SPARE_9__SHIFT 0x12
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT 0x14
+#define CB_HW_CONTROL_3__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x15
+#define CB_HW_CONTROL_3__SPARE_5_MASK 0x00000001L
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
+#define CB_HW_CONTROL_3__SPARE_6_MASK 0x00000004L
+#define CB_HW_CONTROL_3__SPARE_7_MASK 0x00000008L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000010L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000020L
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000040L
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000080L
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00000800L
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00001000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00002000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00004000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00008000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00010000L
+#define CB_HW_CONTROL_3__SPARE_8_MASK 0x00020000L
+#define CB_HW_CONTROL_3__SPARE_9_MASK 0x00040000L
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK 0x00100000L
+#define CB_HW_CONTROL_3__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00200000L
+//CB_HW_CONTROL
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT 0x1
+#define CB_HW_CONTROL__DISABLE_SMT_WHEN_NO_FDCC_FIX__SHIFT 0x2
+#define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6
+#define CB_HW_CONTROL__NUM_CCC_SKID_FIFO_ENTRIES__SHIFT 0xc
+#define CB_HW_CONTROL__FORCE_FEA_HIGH__SHIFT 0xf
+#define CB_HW_CONTROL__FORCE_EVICT_ALL_VALID__SHIFT 0x10
+#define CB_HW_CONTROL__DISABLE_DCC_CACHE_BYTEMASKING__SHIFT 0x11
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
+#define CB_HW_CONTROL__DISABLE_USE_OF_SET_HASH__SHIFT 0x14
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
+#define CB_HW_CONTROL__SPARE_2__SHIFT 0x16
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
+#define CB_HW_CONTROL__SPARE_3__SHIFT 0x1d
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK 0x00000002L
+#define CB_HW_CONTROL__DISABLE_SMT_WHEN_NO_FDCC_FIX_MASK 0x00000004L
+#define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L
+#define CB_HW_CONTROL__NUM_CCC_SKID_FIFO_ENTRIES_MASK 0x00007000L
+#define CB_HW_CONTROL__FORCE_FEA_HIGH_MASK 0x00008000L
+#define CB_HW_CONTROL__FORCE_EVICT_ALL_VALID_MASK 0x00010000L
+#define CB_HW_CONTROL__DISABLE_DCC_CACHE_BYTEMASKING_MASK 0x00020000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__DISABLE_USE_OF_SET_HASH_MASK 0x00100000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__SPARE_2_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__SPARE_3_MASK 0x20000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+//CB_HW_CONTROL_1
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0x0
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0000003FL
+//CB_HW_CONTROL_2
+#define CB_HW_CONTROL_2__SPARE_4__SHIFT 0x0
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x8
+#define CB_HW_CONTROL_2__SPARE__SHIFT 0xe
+#define CB_HW_CONTROL_2__SPARE_4_MASK 0x000000FFL
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x00003F00L
+#define CB_HW_CONTROL_2__SPARE_MASK 0xFFFFC000L
+//CB_DCC_CONFIG
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DEPTH__SHIFT 0x0
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x5
+#define CB_DCC_CONFIG__SPARE_13__SHIFT 0x6
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE__SHIFT 0x7
+#define CB_DCC_CONFIG__SPARE_14__SHIFT 0x8
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x19
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DEPTH_MASK 0x0000001FL
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000020L
+#define CB_DCC_CONFIG__SPARE_13_MASK 0x00000040L
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE_MASK 0x00000080L
+#define CB_DCC_CONFIG__SPARE_14_MASK 0x0000FF00L
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x01FF0000L
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xFE000000L
+//CB_HW_MEM_ARBITER_RD
+#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x13
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x19
+#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00040000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x00380000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x01C00000L
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x02000000L
+//CB_HW_MEM_ARBITER_WR
+#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x13
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x19
+#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00040000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x00380000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x01C00000L
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x02000000L
+//CB_FGCG_SRAM_OVERRIDE
+#define CB_FGCG_SRAM_OVERRIDE__DISABLE_FGCG__SHIFT 0x0
+#define CB_FGCG_SRAM_OVERRIDE__DISABLE_FGCG_MASK 0x000FFFFFL
+//CB_DCC_CONFIG2
+#define CB_DCC_CONFIG2__INVALID_KEY_ERROR_CODE__SHIFT 0x0
+#define CB_DCC_CONFIG2__CLEAR_FRAG2DCC_KEY_ERROR_CODE__SHIFT 0x8
+#define CB_DCC_CONFIG2__ENABLE_COMP_KEY_ERROR_DETECTION__SHIFT 0x9
+#define CB_DCC_CONFIG2__INVALID_KEY_ERROR_CODE_MASK 0x000000FFL
+#define CB_DCC_CONFIG2__CLEAR_FRAG2DCC_KEY_ERROR_CODE_MASK 0x00000100L
+#define CB_DCC_CONFIG2__ENABLE_COMP_KEY_ERROR_DETECTION_MASK 0x00000200L
+//CHICKEN_BITS
+#define CHICKEN_BITS__SPARE__SHIFT 0x0
+#define CHICKEN_BITS__SPARE_MASK 0xFFFFFFFFL
+//CB_CACHE_EVICT_POINTS
+#define CB_CACHE_EVICT_POINTS__CC_COLOR_EVICT_POINT__SHIFT 0x0
+#define CB_CACHE_EVICT_POINTS__CC_FMASK_EVICT_POINT__SHIFT 0x8
+#define CB_CACHE_EVICT_POINTS__DCC_CACHE_EVICT_POINT__SHIFT 0x10
+#define CB_CACHE_EVICT_POINTS__CC_CACHE_EVICT_POINT__SHIFT 0x18
+#define CB_CACHE_EVICT_POINTS__CC_COLOR_EVICT_POINT_MASK 0x000000FFL
+#define CB_CACHE_EVICT_POINTS__CC_FMASK_EVICT_POINT_MASK 0x0000FF00L
+#define CB_CACHE_EVICT_POINTS__DCC_CACHE_EVICT_POINT_MASK 0x00FF0000L
+#define CB_CACHE_EVICT_POINTS__CC_CACHE_EVICT_POINT_MASK 0xFF000000L
+
+
+// addressBlock: gc_gceadec
+//GCEA_DRAM_RD_CLI2GRP_MAP0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_CLI2GRP_MAP1
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP1
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_GRP2VC_MAP
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_WR_GRP2VC_MAP
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_RD_LAZY
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_WR_LAZY
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_RD_CAM_CNTL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_WR_CAM_CNTL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_PAGE_BURST
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_AGE
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_WR_PRI_AGE
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_RD_PRI_QUEUING
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_QUEUING
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_FIXED
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_FIXED
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_URGENCY
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_WR_PRI_URGENCY
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI1
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI2
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI3
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI1
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI2
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI3
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_CLI2GRP_MAP0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_CLI2GRP_MAP1
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP1
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_COMBINE_FLUSH
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//GCEA_IO_WR_COMBINE_FLUSH
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING__SHIFT 0x12
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING_MASK 0x00040000L
+//GCEA_IO_GROUP_BURST
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_AGE
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_WR_PRI_AGE
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_RD_PRI_QUEUING
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_QUEUING
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_FIXED
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_FIXED
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_URGENCY
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_WR_PRI_URGENCY
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_RD_PRI_URGENCY_MASKING
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_WR_PRI_URGENCY_MASKING
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_RD_PRI_QUANT_PRI1
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI2
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI3
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI1
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI2
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI3
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_SDP_ARB_DRAM
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//GCEA_SDP_ARB_FINAL
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define GCEA_SDP_ARB_FINAL__DRAM_RD_THROTTLE__SHIFT 0x1c
+#define GCEA_SDP_ARB_FINAL__DRAM_WR_THROTTLE__SHIFT 0x1d
+#define GCEA_SDP_ARB_FINAL__GMI_RD_THROTTLE__SHIFT 0x1e
+#define GCEA_SDP_ARB_FINAL__GMI_WR_THROTTLE__SHIFT 0x1f
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_RD_THROTTLE_MASK 0x10000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_WR_THROTTLE_MASK 0x20000000L
+#define GCEA_SDP_ARB_FINAL__GMI_RD_THROTTLE_MASK 0x40000000L
+#define GCEA_SDP_ARB_FINAL__GMI_WR_THROTTLE_MASK 0x80000000L
+//GCEA_SDP_DRAM_PRIORITY
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_IO_PRIORITY
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_CREDITS
+#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
+#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_TAG_RESERVE0
+#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GCEA_SDP_TAG_RESERVE1
+#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GCEA_SDP_VCC_RESERVE0
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCC_RESERVE1
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_VCD_RESERVE0
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+
+
+// addressBlock: gc_gceadec2
+//GCEA_SDP_VCD_RESERVE1
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_REQ_CNTL
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//GCEA_MISC
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//GCEA_LATENCY_SAMPLING
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//GCEA_MAM_CTRL2
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_DISABLE__SHIFT 0x0
+#define GCEA_MAM_CTRL2__DBIT_PF_CLR_ONLY__SHIFT 0x1
+#define GCEA_MAM_CTRL2__DBIT_PF_RD_ONLY__SHIFT 0x2
+#define GCEA_MAM_CTRL2__DBIT_TRACK_SEGMENT__SHIFT 0x3
+#define GCEA_MAM_CTRL2__ARAM_TRACK_SEGMENT__SHIFT 0x6
+#define GCEA_MAM_CTRL2__ARAM_FB_TRACK_SIZE__SHIFT 0x9
+#define GCEA_MAM_CTRL2__ARAM_RB_ENTRY_SIZE__SHIFT 0xf
+#define GCEA_MAM_CTRL2__ARAM_OVERRIDE_EA_STRAP__SHIFT 0x12
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_ENABLE__SHIFT 0x13
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_VALUE__SHIFT 0x14
+#define GCEA_MAM_CTRL2__ARAM_REMOVE_TRACKER__SHIFT 0x15
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_ENABLE__SHIFT 0x16
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_VALUE__SHIFT 0x17
+#define GCEA_MAM_CTRL2__RESERVED_FIELD__SHIFT 0x18
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_DISABLE_MASK 0x00000001L
+#define GCEA_MAM_CTRL2__DBIT_PF_CLR_ONLY_MASK 0x00000002L
+#define GCEA_MAM_CTRL2__DBIT_PF_RD_ONLY_MASK 0x00000004L
+#define GCEA_MAM_CTRL2__DBIT_TRACK_SEGMENT_MASK 0x00000038L
+#define GCEA_MAM_CTRL2__ARAM_TRACK_SEGMENT_MASK 0x000001C0L
+#define GCEA_MAM_CTRL2__ARAM_FB_TRACK_SIZE_MASK 0x00007E00L
+#define GCEA_MAM_CTRL2__ARAM_RB_ENTRY_SIZE_MASK 0x00038000L
+#define GCEA_MAM_CTRL2__ARAM_OVERRIDE_EA_STRAP_MASK 0x00040000L
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_ENABLE_MASK 0x00080000L
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_VALUE_MASK 0x00100000L
+#define GCEA_MAM_CTRL2__ARAM_REMOVE_TRACKER_MASK 0x00200000L
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_ENABLE_MASK 0x00400000L
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_VALUE_MASK 0x00800000L
+#define GCEA_MAM_CTRL2__RESERVED_FIELD_MASK 0xFF000000L
+//GCEA_MAM_CTRL
+#define GCEA_MAM_CTRL__MAM_DISABLE__SHIFT 0x0
+#define GCEA_MAM_CTRL__DBIT_COALESCE_DISABLE__SHIFT 0x1
+#define GCEA_MAM_CTRL__ARAM_COALESCE_DISABLE__SHIFT 0x2
+#define GCEA_MAM_CTRL__ARAM_FLUSH_SNOOP_EN__SHIFT 0x3
+#define GCEA_MAM_CTRL__SDMA_UPDT_ARAM__SHIFT 0x4
+#define GCEA_MAM_CTRL__ARAM_FLUSH_NOALLOC__SHIFT 0x5
+#define GCEA_MAM_CTRL__FLUSH_TRACKER__SHIFT 0x6
+#define GCEA_MAM_CTRL__CLEAR_TRACKER__SHIFT 0x7
+#define GCEA_MAM_CTRL__SDP_PRIORITY__SHIFT 0x8
+#define GCEA_MAM_CTRL__FORCE_FLUSH_UPDT_TRACKER__SHIFT 0xc
+#define GCEA_MAM_CTRL__FORCE_FLUSH_GEN_INTERRUPT__SHIFT 0xd
+#define GCEA_MAM_CTRL__TIMER_FLUSH_UPDT_TRACKER__SHIFT 0xe
+#define GCEA_MAM_CTRL__TIMER_FLUSH_GEN_INTERRUPT__SHIFT 0xf
+#define GCEA_MAM_CTRL__RESERVED_FIELD__SHIFT 0x10
+#define GCEA_MAM_CTRL__ARAM_NUM_RB_ENTRIES__SHIFT 0x17
+#define GCEA_MAM_CTRL__ARAM_RB_ADDR_HI__SHIFT 0x1c
+#define GCEA_MAM_CTRL__MAM_DISABLE_MASK 0x00000001L
+#define GCEA_MAM_CTRL__DBIT_COALESCE_DISABLE_MASK 0x00000002L
+#define GCEA_MAM_CTRL__ARAM_COALESCE_DISABLE_MASK 0x00000004L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_SNOOP_EN_MASK 0x00000008L
+#define GCEA_MAM_CTRL__SDMA_UPDT_ARAM_MASK 0x00000010L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_NOALLOC_MASK 0x00000020L
+#define GCEA_MAM_CTRL__FLUSH_TRACKER_MASK 0x00000040L
+#define GCEA_MAM_CTRL__CLEAR_TRACKER_MASK 0x00000080L
+#define GCEA_MAM_CTRL__SDP_PRIORITY_MASK 0x00000F00L
+#define GCEA_MAM_CTRL__FORCE_FLUSH_UPDT_TRACKER_MASK 0x00001000L
+#define GCEA_MAM_CTRL__FORCE_FLUSH_GEN_INTERRUPT_MASK 0x00002000L
+#define GCEA_MAM_CTRL__TIMER_FLUSH_UPDT_TRACKER_MASK 0x00004000L
+#define GCEA_MAM_CTRL__TIMER_FLUSH_GEN_INTERRUPT_MASK 0x00008000L
+#define GCEA_MAM_CTRL__RESERVED_FIELD_MASK 0x007F0000L
+#define GCEA_MAM_CTRL__ARAM_NUM_RB_ENTRIES_MASK 0x0F800000L
+#define GCEA_MAM_CTRL__ARAM_RB_ADDR_HI_MASK 0xF0000000L
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT__IOWR_DATAMEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_DSM_CNTL
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//GCEA_DSM_CNTLA
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//GCEA_DSM_CNTLB
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//GCEA_DSM_CNTL2
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//GCEA_DSM_CNTL2A
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//GCEA_DSM_CNTL2B
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//GCEA_GL2C_XBR_CREDITS
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
+//GCEA_GL2C_XBR_MAXBURST
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
+#define GCEA_GL2C_XBR_MAXBURST__IO_RD__SHIFT 0x4
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
+#define GCEA_GL2C_XBR_MAXBURST__IO_WR__SHIFT 0xc
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_FLUSH_TIMER__SHIFT 0x10
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_SAME64B_ONLY__SHIFT 0x13
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_FLUSH_TIMER__SHIFT 0x14
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_SAME64B_ONLY__SHIFT 0x17
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
+#define GCEA_GL2C_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
+#define GCEA_GL2C_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_FLUSH_TIMER_MASK 0x00070000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_SAME64B_ONLY_MASK 0x00080000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_FLUSH_TIMER_MASK 0x00700000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_SAME64B_ONLY_MASK 0x00800000L
+//GCEA_PROBE_CNTL
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
+//GCEA_PROBE_MAP
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTGL2C__SHIFT 0x0
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTGL2C__SHIFT 0x1
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTGL2C__SHIFT 0x2
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTGL2C__SHIFT 0x3
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTGL2C__SHIFT 0x4
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTGL2C__SHIFT 0x5
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTGL2C__SHIFT 0x6
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTGL2C__SHIFT 0x7
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTGL2C__SHIFT 0x8
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTGL2C__SHIFT 0x9
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTGL2C__SHIFT 0xa
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTGL2C__SHIFT 0xb
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTGL2C__SHIFT 0xc
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTGL2C__SHIFT 0xd
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTGL2C__SHIFT 0xe
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTGL2C__SHIFT 0xf
+#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTGL2C_MASK 0x00000001L
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTGL2C_MASK 0x00000002L
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTGL2C_MASK 0x00000004L
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTGL2C_MASK 0x00000008L
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTGL2C_MASK 0x00000010L
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTGL2C_MASK 0x00000020L
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTGL2C_MASK 0x00000040L
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTGL2C_MASK 0x00000080L
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTGL2C_MASK 0x00000100L
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTGL2C_MASK 0x00000200L
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTGL2C_MASK 0x00000400L
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTGL2C_MASK 0x00000800L
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTGL2C_MASK 0x00001000L
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTGL2C_MASK 0x00002000L
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTGL2C_MASK 0x00004000L
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTGL2C_MASK 0x00008000L
+#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+//GCEA_MISC2
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define GCEA_MISC2__BLOCK_REQUESTS__SHIFT 0xd
+#define GCEA_MISC2__REQUESTS_BLOCKED__SHIFT 0xe
+#define GCEA_MISC2__FGCLKEN_OVERRIDE__SHIFT 0xf
+#define GCEA_MISC2__LINKMGR_CRBUSY_MASK__SHIFT 0x10
+#define GCEA_MISC2__RDRET_FED_MASK__SHIFT 0x11
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define GCEA_MISC2__BLOCK_REQUESTS_MASK 0x00002000L
+#define GCEA_MISC2__REQUESTS_BLOCKED_MASK 0x00004000L
+#define GCEA_MISC2__FGCLKEN_OVERRIDE_MASK 0x00008000L
+#define GCEA_MISC2__LINKMGR_CRBUSY_MASK_MASK 0x00010000L
+#define GCEA_MISC2__RDRET_FED_MASK_MASK 0x00020000L
+
+
+// addressBlock: gc_gceadec3
+//GCEA_SDP_BACKDOOR_CMDCREDITS0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS1
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS1
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_MISCCREDITS
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x0000007FL
+//GCEA_RRET_MEM_RESERVE
+#define GCEA_RRET_MEM_RESERVE__VC0__SHIFT 0x0
+#define GCEA_RRET_MEM_RESERVE__VC1__SHIFT 0x4
+#define GCEA_RRET_MEM_RESERVE__VC2__SHIFT 0x8
+#define GCEA_RRET_MEM_RESERVE__VC3__SHIFT 0xc
+#define GCEA_RRET_MEM_RESERVE__VC4__SHIFT 0x10
+#define GCEA_RRET_MEM_RESERVE__VC5__SHIFT 0x14
+#define GCEA_RRET_MEM_RESERVE__VC6__SHIFT 0x18
+#define GCEA_RRET_MEM_RESERVE__VC7__SHIFT 0x1c
+#define GCEA_RRET_MEM_RESERVE__VC0_MASK 0x0000000FL
+#define GCEA_RRET_MEM_RESERVE__VC1_MASK 0x000000F0L
+#define GCEA_RRET_MEM_RESERVE__VC2_MASK 0x00000F00L
+#define GCEA_RRET_MEM_RESERVE__VC3_MASK 0x0000F000L
+#define GCEA_RRET_MEM_RESERVE__VC4_MASK 0x000F0000L
+#define GCEA_RRET_MEM_RESERVE__VC5_MASK 0x00F00000L
+#define GCEA_RRET_MEM_RESERVE__VC6_MASK 0x0F000000L
+#define GCEA_RRET_MEM_RESERVE__VC7_MASK 0xF0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT3__MAM_AFMEM_SEC_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_SEC_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_SDP_ENABLE
+#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GCEA_SDP_ENABLE__EARLY_CREDIT_REQUEST__SHIFT 0x1
+#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
+#define GCEA_SDP_ENABLE__EARLY_CREDIT_REQUEST_MASK 0x00000002L
+
+
+// addressBlock: gc_spipdec2
+//SPI_PQEV_CTRL
+#define SPI_PQEV_CTRL__SCAN_PERIOD__SHIFT 0x0
+#define SPI_PQEV_CTRL__QUEUE_DURATION__SHIFT 0xa
+#define SPI_PQEV_CTRL__COMPUTE_PIPE_EN__SHIFT 0x10
+#define SPI_PQEV_CTRL__SCAN_PERIOD_MASK 0x000003FFL
+#define SPI_PQEV_CTRL__QUEUE_DURATION_MASK 0x0000FC00L
+#define SPI_PQEV_CTRL__COMPUTE_PIPE_EN_MASK 0x00FF0000L
+//SPI_EXP_THROTTLE_CTRL
+#define SPI_EXP_THROTTLE_CTRL__ENABLE__SHIFT 0x0
+#define SPI_EXP_THROTTLE_CTRL__PERIOD__SHIFT 0x1
+#define SPI_EXP_THROTTLE_CTRL__UPSTEP__SHIFT 0x5
+#define SPI_EXP_THROTTLE_CTRL__DOWNSTEP__SHIFT 0x9
+#define SPI_EXP_THROTTLE_CTRL__LOW_STALL_MON_HIST_COUNT__SHIFT 0xd
+#define SPI_EXP_THROTTLE_CTRL__HIGH_STALL_MON_HIST_COUNT__SHIFT 0x10
+#define SPI_EXP_THROTTLE_CTRL__EXP_STALL_THRESHOLD__SHIFT 0x13
+#define SPI_EXP_THROTTLE_CTRL__SKEW_COUNT__SHIFT 0x1a
+#define SPI_EXP_THROTTLE_CTRL__THROTTLE_RESET__SHIFT 0x1d
+#define SPI_EXP_THROTTLE_CTRL__ENABLE_MASK 0x00000001L
+#define SPI_EXP_THROTTLE_CTRL__PERIOD_MASK 0x0000001EL
+#define SPI_EXP_THROTTLE_CTRL__UPSTEP_MASK 0x000001E0L
+#define SPI_EXP_THROTTLE_CTRL__DOWNSTEP_MASK 0x00001E00L
+#define SPI_EXP_THROTTLE_CTRL__LOW_STALL_MON_HIST_COUNT_MASK 0x0000E000L
+#define SPI_EXP_THROTTLE_CTRL__HIGH_STALL_MON_HIST_COUNT_MASK 0x00070000L
+#define SPI_EXP_THROTTLE_CTRL__EXP_STALL_THRESHOLD_MASK 0x03F80000L
+#define SPI_EXP_THROTTLE_CTRL__SKEW_COUNT_MASK 0x1C000000L
+#define SPI_EXP_THROTTLE_CTRL__THROTTLE_RESET_MASK 0x20000000L
+
+
+// addressBlock: gc_rmi_rmidec
+//RMI_GENERAL_CNTL
+#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
+#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
+//RMI_GENERAL_CNTL1
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xb
+#define RMI_GENERAL_CNTL1__ARBITER_ADDRESS_CHANGE_ENABLE__SHIFT 0xe
+#define RMI_GENERAL_CNTL1__LAST_OF_BURST_INSERTION_DISABLE__SHIFT 0xf
+#define RMI_GENERAL_CNTL1__TCIW0_PRODUCER_CREDITS__SHIFT 0x10
+#define RMI_GENERAL_CNTL1__TCIW1_PRODUCER_CREDITS__SHIFT 0x16
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000600L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000800L
+#define RMI_GENERAL_CNTL1__ARBITER_ADDRESS_CHANGE_ENABLE_MASK 0x00004000L
+#define RMI_GENERAL_CNTL1__LAST_OF_BURST_INSERTION_DISABLE_MASK 0x00008000L
+#define RMI_GENERAL_CNTL1__TCIW0_PRODUCER_CREDITS_MASK 0x003F0000L
+#define RMI_GENERAL_CNTL1__TCIW1_PRODUCER_CREDITS_MASK 0x0FC00000L
+//RMI_GENERAL_STATUS
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
+#define RMI_GENERAL_STATUS__RESERVED_BIT_6__SHIFT 0x6
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
+#define RMI_GENERAL_STATUS__RESERVED_BIT_18__SHIFT 0x12
+#define RMI_GENERAL_STATUS__RESERVED_BIT_19__SHIFT 0x13
+#define RMI_GENERAL_STATUS__RESERVED_BIT_20__SHIFT 0x14
+#define RMI_GENERAL_STATUS__RESERVED_BITS_28_21__SHIFT 0x15
+#define RMI_GENERAL_STATUS__RESERVED_BIT_29__SHIFT 0x1d
+#define RMI_GENERAL_STATUS__RESERVED_BIT_30__SHIFT 0x1e
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_6_MASK 0x00000040L
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_18_MASK 0x00040000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_19_MASK 0x00080000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_20_MASK 0x00100000L
+#define RMI_GENERAL_STATUS__RESERVED_BITS_28_21_MASK 0x1FE00000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_29_MASK 0x20000000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_30_MASK 0x40000000L
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
+//RMI_SUBBLOCK_STATUS0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
+//RMI_SUBBLOCK_STATUS1
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
+//RMI_SUBBLOCK_STATUS2
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
+//RMI_SUBBLOCK_STATUS3
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
+//RMI_XBAR_CONFIG
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
+#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
+//RMI_PROBE_POP_LOGIC_CNTL
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
+//RMI_UTC_XNACK_N_MISC_CNTL
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
+//RMI_DEMUX_CNTL
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_OVERRIDE_EN__SHIFT 0x2
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_OVERRIDE_EN__SHIFT 0x12
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_OVERRIDE_EN_MASK 0x00000004L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_OVERRIDE_EN_MASK 0x00040000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
+//RMI_UTCL1_CNTL1
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//RMI_UTCL1_CNTL2
+#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define RMI_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define RMI_UTCL1_CNTL2__PERM_MODE_OVRD__SHIFT 0x1b
+#define RMI_UTCL1_CNTL2__LINE_INVALIDATE_OPT__SHIFT 0x1c
+#define RMI_UTCL1_CNTL2__GPUVM_16K_DEFAULT__SHIFT 0x1d
+#define RMI_UTCL1_CNTL2__FGCG_DISABLE__SHIFT 0x1e
+#define RMI_UTCL1_CNTL2__RESERVED__SHIFT 0x1f
+#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define RMI_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define RMI_UTCL1_CNTL2__PERM_MODE_OVRD_MASK 0x08000000L
+#define RMI_UTCL1_CNTL2__LINE_INVALIDATE_OPT_MASK 0x10000000L
+#define RMI_UTCL1_CNTL2__GPUVM_16K_DEFAULT_MASK 0x20000000L
+#define RMI_UTCL1_CNTL2__FGCG_DISABLE_MASK 0x40000000L
+#define RMI_UTCL1_CNTL2__RESERVED_MASK 0x80000000L
+//RMI_UTC_UNIT_CONFIG
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN__SHIFT 0x0
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN_MASK 0x0000FFFFL
+//RMI_TCIW_FORMATTER0_CNTL
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
+//RMI_TCIW_FORMATTER1_CNTL
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
+//RMI_SCOREBOARD_CNTL
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
+//RMI_SCOREBOARD_STATUS0
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
+#define RMI_SCOREBOARD_STATUS0__COUNTER_SELECT__SHIFT 0x16
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
+#define RMI_SCOREBOARD_STATUS0__COUNTER_SELECT_MASK 0x07C00000L
+//RMI_SCOREBOARD_STATUS1
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
+//RMI_SCOREBOARD_STATUS2
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
+//RMI_XBAR_ARBITER_CONFIG
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_OVERRIDE_EN__SHIFT 0x15
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_OVERRIDE_EN_MASK 0x00200000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
+//RMI_XBAR_ARBITER_CONFIG_1
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
+//RMI_CLOCK_CNTRL
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
+//RMI_UTCL1_STATUS
+#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//RMI_RB_GLX_CID_MAP
+#define RMI_RB_GLX_CID_MAP__CB_COLOR_MAP__SHIFT 0x0
+#define RMI_RB_GLX_CID_MAP__CB_FMASK_MAP__SHIFT 0x4
+#define RMI_RB_GLX_CID_MAP__CB_CMASK_MAP__SHIFT 0x8
+#define RMI_RB_GLX_CID_MAP__CB_DCC_MAP__SHIFT 0xc
+#define RMI_RB_GLX_CID_MAP__DB_Z_MAP__SHIFT 0x10
+#define RMI_RB_GLX_CID_MAP__DB_S_MAP__SHIFT 0x14
+#define RMI_RB_GLX_CID_MAP__DB_TILE_MAP__SHIFT 0x18
+#define RMI_RB_GLX_CID_MAP__DB_ZPCPSD_MAP__SHIFT 0x1c
+#define RMI_RB_GLX_CID_MAP__CB_COLOR_MAP_MASK 0x0000000FL
+#define RMI_RB_GLX_CID_MAP__CB_FMASK_MAP_MASK 0x000000F0L
+#define RMI_RB_GLX_CID_MAP__CB_CMASK_MAP_MASK 0x00000F00L
+#define RMI_RB_GLX_CID_MAP__CB_DCC_MAP_MASK 0x0000F000L
+#define RMI_RB_GLX_CID_MAP__DB_Z_MAP_MASK 0x000F0000L
+#define RMI_RB_GLX_CID_MAP__DB_S_MAP_MASK 0x00F00000L
+#define RMI_RB_GLX_CID_MAP__DB_TILE_MAP_MASK 0x0F000000L
+#define RMI_RB_GLX_CID_MAP__DB_ZPCPSD_MAP_MASK 0xF0000000L
+//RMI_XNACK_DEBUG
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID__SHIFT 0x0
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID_MASK 0x0000FFFFL
+//RMI_SPARE
+#define RMI_SPARE__RMI_2_GL1_128B_READ_DISABLE__SHIFT 0x1
+#define RMI_SPARE__RMI_2_GL1_REPEATER_FGCG_DISABLE__SHIFT 0x2
+#define RMI_SPARE__RMI_2_RB_REPEATER_FGCG_DISABLE__SHIFT 0x3
+#define RMI_SPARE__EARLY_WRITE_ACK_ENABLE_C_RW_NOA_RESOLVE_DIS__SHIFT 0x4
+#define RMI_SPARE__RMI_REORDER_BYPASS_CHANNEL_DIS__SHIFT 0x5
+#define RMI_SPARE__XNACK_RETURN_DATA_OVERRIDE__SHIFT 0x6
+#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define RMI_SPARE__NOFILL_RMI_CID_CC__SHIFT 0x8
+#define RMI_SPARE__NOFILL_RMI_CID_FC__SHIFT 0x9
+#define RMI_SPARE__NOFILL_RMI_CID_CM__SHIFT 0xa
+#define RMI_SPARE__NOFILL_RMI_CID_DC__SHIFT 0xb
+#define RMI_SPARE__NOFILL_RMI_CID_Z__SHIFT 0xc
+#define RMI_SPARE__NOFILL_RMI_CID_S__SHIFT 0xd
+#define RMI_SPARE__NOFILL_RMI_CID_TILE__SHIFT 0xe
+#define RMI_SPARE__SPARE_BIT_15_0__SHIFT 0xf
+#define RMI_SPARE__ARBITER_ADDRESS_MASK__SHIFT 0x10
+#define RMI_SPARE__RMI_2_GL1_128B_READ_DISABLE_MASK 0x00000002L
+#define RMI_SPARE__RMI_2_GL1_REPEATER_FGCG_DISABLE_MASK 0x00000004L
+#define RMI_SPARE__RMI_2_RB_REPEATER_FGCG_DISABLE_MASK 0x00000008L
+#define RMI_SPARE__EARLY_WRITE_ACK_ENABLE_C_RW_NOA_RESOLVE_DIS_MASK 0x00000010L
+#define RMI_SPARE__RMI_REORDER_BYPASS_CHANNEL_DIS_MASK 0x00000020L
+#define RMI_SPARE__XNACK_RETURN_DATA_OVERRIDE_MASK 0x00000040L
+#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define RMI_SPARE__NOFILL_RMI_CID_CC_MASK 0x00000100L
+#define RMI_SPARE__NOFILL_RMI_CID_FC_MASK 0x00000200L
+#define RMI_SPARE__NOFILL_RMI_CID_CM_MASK 0x00000400L
+#define RMI_SPARE__NOFILL_RMI_CID_DC_MASK 0x00000800L
+#define RMI_SPARE__NOFILL_RMI_CID_Z_MASK 0x00001000L
+#define RMI_SPARE__NOFILL_RMI_CID_S_MASK 0x00002000L
+#define RMI_SPARE__NOFILL_RMI_CID_TILE_MASK 0x00004000L
+#define RMI_SPARE__SPARE_BIT_15_0_MASK 0x00008000L
+#define RMI_SPARE__ARBITER_ADDRESS_MASK_MASK 0xFFFF0000L
+//RMI_SPARE_1
+#define RMI_SPARE_1__EARLY_WRACK_FIFO_DISABLE__SHIFT 0x0
+#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
+#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
+#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
+#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
+#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
+#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
+#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
+#define RMI_SPARE_1__RMI_REORDER_DIS_BY_CID__SHIFT 0x8
+#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
+#define RMI_SPARE_1__EARLY_WRACK_FIFO_DISABLE_MASK 0x00000001L
+#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
+#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
+#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
+#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
+#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
+#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
+#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
+#define RMI_SPARE_1__RMI_REORDER_DIS_BY_CID_MASK 0x0000FF00L
+#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
+//RMI_SPARE_2
+#define RMI_SPARE_2__ERROR_ZERO_BYTE_MASK_CID__SHIFT 0x0
+#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
+#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
+#define RMI_SPARE_2__ERROR_ZERO_BYTE_MASK_CID_MASK 0x0000FFFFL
+#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
+#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
+//CC_RMI_REDUNDANCY
+#define CC_RMI_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_0__SHIFT 0x1
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_1__SHIFT 0x2
+#define CC_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE__SHIFT 0x3
+#define CC_RMI_REDUNDANCY__REPAIR_ID_SWAP__SHIFT 0x4
+#define CC_RMI_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_0_MASK 0x00000002L
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_1_MASK 0x00000004L
+#define CC_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE_MASK 0x00000008L
+#define CC_RMI_REDUNDANCY__REPAIR_ID_SWAP_MASK 0x00000010L
+
+
+// addressBlock: gc_pmmdec
+//GCR_PIO_CNTL
+#define GCR_PIO_CNTL__GCR_DATA_INDEX__SHIFT 0x0
+#define GCR_PIO_CNTL__GCR_REG_DONE__SHIFT 0x2
+#define GCR_PIO_CNTL__GCR_REG_RESET__SHIFT 0x3
+#define GCR_PIO_CNTL__GCR_PIO_RSP_TAG__SHIFT 0x10
+#define GCR_PIO_CNTL__GCR_PIO_RSP_DONE__SHIFT 0x1e
+#define GCR_PIO_CNTL__GCR_READY__SHIFT 0x1f
+#define GCR_PIO_CNTL__GCR_DATA_INDEX_MASK 0x00000003L
+#define GCR_PIO_CNTL__GCR_REG_DONE_MASK 0x00000004L
+#define GCR_PIO_CNTL__GCR_REG_RESET_MASK 0x00000008L
+#define GCR_PIO_CNTL__GCR_PIO_RSP_TAG_MASK 0x00FF0000L
+#define GCR_PIO_CNTL__GCR_PIO_RSP_DONE_MASK 0x40000000L
+#define GCR_PIO_CNTL__GCR_READY_MASK 0x80000000L
+//GCR_PIO_DATA
+#define GCR_PIO_DATA__GCR_DATA__SHIFT 0x0
+#define GCR_PIO_DATA__GCR_DATA_MASK 0xFFFFFFFFL
+//PMM_CNTL
+#define PMM_CNTL__PMM_DISABLE__SHIFT 0x0
+#define PMM_CNTL__ABIT_FORCE_FLUSH__SHIFT 0x1
+#define PMM_CNTL__ABIT_TIMER_THRESHOLD__SHIFT 0x2
+#define PMM_CNTL__ABIT_TIMER_DISABLE__SHIFT 0x6
+#define PMM_CNTL__ABIT_TIMER_RESET__SHIFT 0x7
+#define PMM_CNTL__INTERRUPT_PRIORITY__SHIFT 0x8
+#define PMM_CNTL__PMM_INTERRUPTS_DISABLE__SHIFT 0xa
+#define PMM_CNTL__RESERVED__SHIFT 0xb
+#define PMM_CNTL__PMM_DISABLE_MASK 0x00000001L
+#define PMM_CNTL__ABIT_FORCE_FLUSH_MASK 0x00000002L
+#define PMM_CNTL__ABIT_TIMER_THRESHOLD_MASK 0x0000003CL
+#define PMM_CNTL__ABIT_TIMER_DISABLE_MASK 0x00000040L
+#define PMM_CNTL__ABIT_TIMER_RESET_MASK 0x00000080L
+#define PMM_CNTL__INTERRUPT_PRIORITY_MASK 0x00000300L
+#define PMM_CNTL__PMM_INTERRUPTS_DISABLE_MASK 0x00000400L
+#define PMM_CNTL__RESERVED_MASK 0xFFFFF800L
+//PMM_STATUS
+#define PMM_STATUS__PMM_IDLE__SHIFT 0x0
+#define PMM_STATUS__ABIT_FORCE_FLUSH_IN_PROGRESS__SHIFT 0x1
+#define PMM_STATUS__ABIT_FORCE_FLUSH_DONE__SHIFT 0x2
+#define PMM_STATUS__ABIT_TIMER_FLUSH_IN_PROGRESS__SHIFT 0x3
+#define PMM_STATUS__ABIT_TIMER_FLUSH_DONE__SHIFT 0x4
+#define PMM_STATUS__ABIT_TIMER_RUNNING__SHIFT 0x5
+#define PMM_STATUS__PMM_INTERRUPTS_PENDING__SHIFT 0x6
+#define PMM_STATUS__ABIT_FLUSH_ERROR__SHIFT 0x7
+#define PMM_STATUS__ABIT_TIMER_RESET_CDC_IN_PROGRESS__SHIFT 0x8
+#define PMM_STATUS__ABIT_TIMER_ENABLE_CDC_IN_PROGRESS__SHIFT 0x9
+#define PMM_STATUS__ABIT_TIMER_THRESHOLD_CDC_IN_PROGRESS__SHIFT 0xa
+#define PMM_STATUS__RESERVED__SHIFT 0xb
+#define PMM_STATUS__PMM_IDLE_MASK 0x00000001L
+#define PMM_STATUS__ABIT_FORCE_FLUSH_IN_PROGRESS_MASK 0x00000002L
+#define PMM_STATUS__ABIT_FORCE_FLUSH_DONE_MASK 0x00000004L
+#define PMM_STATUS__ABIT_TIMER_FLUSH_IN_PROGRESS_MASK 0x00000008L
+#define PMM_STATUS__ABIT_TIMER_FLUSH_DONE_MASK 0x00000010L
+#define PMM_STATUS__ABIT_TIMER_RUNNING_MASK 0x00000020L
+#define PMM_STATUS__PMM_INTERRUPTS_PENDING_MASK 0x00000040L
+#define PMM_STATUS__ABIT_FLUSH_ERROR_MASK 0x00000080L
+#define PMM_STATUS__ABIT_TIMER_RESET_CDC_IN_PROGRESS_MASK 0x00000100L
+#define PMM_STATUS__ABIT_TIMER_ENABLE_CDC_IN_PROGRESS_MASK 0x00000200L
+#define PMM_STATUS__ABIT_TIMER_THRESHOLD_CDC_IN_PROGRESS_MASK 0x00000400L
+#define PMM_STATUS__RESERVED_MASK 0xFFFFF800L
+
+
+// addressBlock: gc_utcl1dec
+//UTCL1_CTRL_1
+#define UTCL1_CTRL_1__UTCL1_CACHE_CORE_BYPASS__SHIFT 0x0
+#define UTCL1_CTRL_1__UTCL1_TCP_BYPASS__SHIFT 0x1
+#define UTCL1_CTRL_1__UTCL1_SQCI_BYPASS__SHIFT 0x2
+#define UTCL1_CTRL_1__UTCL1_SQCD_BYPASS__SHIFT 0x3
+#define UTCL1_CTRL_1__UTCL1_RMI_BYPASS__SHIFT 0x4
+#define UTCL1_CTRL_1__UTCL1_SQG_BYPASS__SHIFT 0x5
+#define UTCL1_CTRL_1__UTCL1_FORCE_RANGE_INV_TO_VMID__SHIFT 0x6
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL__SHIFT 0x7
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_DONE__SHIFT 0x8
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_1__SHIFT 0x9
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_2__SHIFT 0xb
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_3__SHIFT 0xd
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_4__SHIFT 0xf
+#define UTCL1_CTRL_1__RESERVED__SHIFT 0x11
+#define UTCL1_CTRL_1__UTCL1_CACHE_CORE_BYPASS_MASK 0x00000001L
+#define UTCL1_CTRL_1__UTCL1_TCP_BYPASS_MASK 0x00000002L
+#define UTCL1_CTRL_1__UTCL1_SQCI_BYPASS_MASK 0x00000004L
+#define UTCL1_CTRL_1__UTCL1_SQCD_BYPASS_MASK 0x00000008L
+#define UTCL1_CTRL_1__UTCL1_RMI_BYPASS_MASK 0x00000010L
+#define UTCL1_CTRL_1__UTCL1_SQG_BYPASS_MASK 0x00000020L
+#define UTCL1_CTRL_1__UTCL1_FORCE_RANGE_INV_TO_VMID_MASK 0x00000040L
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_MASK 0x00000080L
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_DONE_MASK 0x00000100L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_1_MASK 0x00000600L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_2_MASK 0x00001800L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_3_MASK 0x00006000L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_4_MASK 0x00018000L
+#define UTCL1_CTRL_1__RESERVED_MASK 0xFFFE0000L
+//UTCL1_ALOG
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_THRESHOLD__SHIFT 0x0
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER2_BYPASS__SHIFT 0x3
+#define UTCL1_ALOG__UTCL1_ALOG_ACTIVE__SHIFT 0x4
+#define UTCL1_ALOG__UTCL1_ALOG_MODE__SHIFT 0x5
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_LOCK_WINDOW__SHIFT 0x6
+#define UTCL1_ALOG__UTCL1_ALOG_ONLY_MISS__SHIFT 0x9
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_INTR_THRESHOLD__SHIFT 0xa
+#define UTCL1_ALOG__UTCL1_ALOG_SPACE_EN__SHIFT 0xc
+#define UTCL1_ALOG__UTCL1_ALOG_CLEAN__SHIFT 0xf
+#define UTCL1_ALOG__UTCL1_ALOG_IDLE__SHIFT 0x10
+#define UTCL1_ALOG__UTCL1_ALOG_TRACK_SEGMENT_SIZE__SHIFT 0x11
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_BYPASS__SHIFT 0x17
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_INTR_ON_ALLOC__SHIFT 0x18
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_THRESHOLD_MASK 0x00000007L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER2_BYPASS_MASK 0x00000008L
+#define UTCL1_ALOG__UTCL1_ALOG_ACTIVE_MASK 0x00000010L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE_MASK 0x00000020L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_LOCK_WINDOW_MASK 0x000001C0L
+#define UTCL1_ALOG__UTCL1_ALOG_ONLY_MISS_MASK 0x00000200L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_INTR_THRESHOLD_MASK 0x00000C00L
+#define UTCL1_ALOG__UTCL1_ALOG_SPACE_EN_MASK 0x00007000L
+#define UTCL1_ALOG__UTCL1_ALOG_CLEAN_MASK 0x00008000L
+#define UTCL1_ALOG__UTCL1_ALOG_IDLE_MASK 0x00010000L
+#define UTCL1_ALOG__UTCL1_ALOG_TRACK_SEGMENT_SIZE_MASK 0x007E0000L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_BYPASS_MASK 0x00800000L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_INTR_ON_ALLOC_MASK 0x01000000L
+//UTCL1_STATUS
+#define UTCL1_STATUS__UTCL1_HIT_PATH_BUSY__SHIFT 0x0
+#define UTCL1_STATUS__UTCL1_MH_BUSY__SHIFT 0x1
+#define UTCL1_STATUS__UTCL1_INV_BUSY__SHIFT 0x2
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_REQ__SHIFT 0x3
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_RET__SHIFT 0x4
+#define UTCL1_STATUS__UTCL1_LAST_UTCL2_RET_XNACK__SHIFT 0x5
+#define UTCL1_STATUS__UTCL1_RANGE_INV_IN_PROGRESS__SHIFT 0x7
+#define UTCL1_STATUS__RESERVED__SHIFT 0x8
+#define UTCL1_STATUS__UTCL1_HIT_PATH_BUSY_MASK 0x00000001L
+#define UTCL1_STATUS__UTCL1_MH_BUSY_MASK 0x00000002L
+#define UTCL1_STATUS__UTCL1_INV_BUSY_MASK 0x00000004L
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_REQ_MASK 0x00000008L
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_RET_MASK 0x00000010L
+#define UTCL1_STATUS__UTCL1_LAST_UTCL2_RET_XNACK_MASK 0x00000060L
+#define UTCL1_STATUS__UTCL1_RANGE_INV_IN_PROGRESS_MASK 0x00000080L
+#define UTCL1_STATUS__RESERVED_MASK 0x00000100L
+
+
+// addressBlock: gc_gcvmsharedpfdec
+//GCMC_VM_NB_MMIOBASE
+#define GCMC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define GCMC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//GCMC_VM_NB_MMIOLIMIT
+#define GCMC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define GCMC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//GCMC_VM_NB_PCI_CTRL
+#define GCMC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define GCMC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//GCMC_VM_NB_PCI_ARB
+#define GCMC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define GCMC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//GCMC_VM_NB_TOP_OF_DRAM_SLOT1
+#define GCMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define GCMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//GCMC_VM_NB_LOWER_TOP_OF_DRAM2
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//GCMC_VM_NB_UPPER_TOP_OF_DRAM2
+#define GCMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define GCMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//GCMC_VM_FB_OFFSET
+#define GCMC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define GCMC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//GCMC_VM_STEERING
+#define GCMC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define GCMC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//GCMC_SHARED_VIRT_RESET_REQ
+#define GCMC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define GCMC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define GCMC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define GCMC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//GCMC_MEM_POWER_LS
+#define GCMC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define GCMC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define GCMC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define GCMC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//GCMC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_SYSMEM_ADDRESS_START
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_SYSMEM_ADDRESS_END
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_APT_CNTL
+#define GCMC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define GCMC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define GCMC_VM_APT_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x2
+#define GCMC_VM_APT_CNTL__CHECK_IS_LOCAL__SHIFT 0x4
+#define GCMC_VM_APT_CNTL__CAP_FRAG_SIZE_2M__SHIFT 0x5
+#define GCMC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL__SHIFT 0x6
+#define GCMC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define GCMC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+#define GCMC_VM_APT_CNTL__FRAG_APT_INTXN_MODE_MASK 0x0000000CL
+#define GCMC_VM_APT_CNTL__CHECK_IS_LOCAL_MASK 0x00000010L
+#define GCMC_VM_APT_CNTL__CAP_FRAG_SIZE_2M_MASK 0x00000020L
+#define GCMC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL_MASK 0x000000C0L
+//GCMC_VM_LOCAL_FB_ADDRESS_START
+#define GCMC_VM_LOCAL_FB_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_FB_ADDRESS_END
+#define GCMC_VM_LOCAL_FB_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL
+#define GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//GCUTCL2_ICG_CTRL
+#define GCUTCL2_ICG_CTRL__OFF_HYSTERESIS__SHIFT 0x0
+#define GCUTCL2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE__SHIFT 0x4
+#define GCUTCL2_ICG_CTRL__STATIC_CLOCK_OVERRIDE__SHIFT 0x5
+#define GCUTCL2_ICG_CTRL__AON_CLOCK_OVERRIDE__SHIFT 0x6
+#define GCUTCL2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE__SHIFT 0x7
+#define GCUTCL2_ICG_CTRL__OFF_HYSTERESIS_MASK 0x0000000FL
+#define GCUTCL2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE_MASK 0x00000010L
+#define GCUTCL2_ICG_CTRL__STATIC_CLOCK_OVERRIDE_MASK 0x00000020L
+#define GCUTCL2_ICG_CTRL__AON_CLOCK_OVERRIDE_MASK 0x00000040L
+#define GCUTCL2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE_MASK 0x00000080L
+//GCMC_SHARED_ACTIVE_FCN_ID
+#define GCMC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define GCMC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1e
+#define GCMC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define GCMC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x40000000L
+//GCUTCL2_CGTT_BUSY_CTRL
+#define GCUTCL2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define GCUTCL2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x5
+#define GCUTCL2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000001FL
+#define GCUTCL2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000020L
+//GCMC_VM_FB_NOALLOC_CNTL
+#define GCMC_VM_FB_NOALLOC_CNTL__LOCAL_FB_NOALLOC_NOPTE__SHIFT 0x0
+#define GCMC_VM_FB_NOALLOC_CNTL__REMOTE_FB_NOALLOC_NOPTE__SHIFT 0x1
+#define GCMC_VM_FB_NOALLOC_CNTL__FB_NOALLOC_WALKER_FETCH__SHIFT 0x2
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_ATCL2_NOALLOC__SHIFT 0x3
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE2_NOALLOC__SHIFT 0x4
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE3_NOALLOC__SHIFT 0x5
+#define GCMC_VM_FB_NOALLOC_CNTL__LOCAL_FB_NOALLOC_NOPTE_MASK 0x00000001L
+#define GCMC_VM_FB_NOALLOC_CNTL__REMOTE_FB_NOALLOC_NOPTE_MASK 0x00000002L
+#define GCMC_VM_FB_NOALLOC_CNTL__FB_NOALLOC_WALKER_FETCH_MASK 0x00000004L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_ATCL2_NOALLOC_MASK 0x00000008L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE2_NOALLOC_MASK 0x00000010L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE3_NOALLOC_MASK 0x00000020L
+//GCUTCL2_HARVEST_BYPASS_GROUPS
+#define GCUTCL2_HARVEST_BYPASS_GROUPS__BYPASS_GROUPS__SHIFT 0x0
+#define GCUTCL2_HARVEST_BYPASS_GROUPS__BYPASS_GROUPS_MASK 0xFFFFFFFFL
+//GCUTCL2_GROUP_RET_FAULT_STATUS
+#define GCUTCL2_GROUP_RET_FAULT_STATUS__FAULT_GROUPS__SHIFT 0x0
+#define GCUTCL2_GROUP_RET_FAULT_STATUS__FAULT_GROUPS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcvml2pfdec
+//GCVM_L2_CNTL
+#define GCVM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define GCVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define GCVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define GCVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define GCVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define GCVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define GCVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define GCVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define GCVM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define GCVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define GCVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define GCVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define GCVM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define GCVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define GCVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define GCVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define GCVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define GCVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define GCVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define GCVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define GCVM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define GCVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define GCVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define GCVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//GCVM_L2_CNTL2
+#define GCVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define GCVM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define GCVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define GCVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define GCVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define GCVM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define GCVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define GCVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define GCVM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define GCVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define GCVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define GCVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define GCVM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define GCVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//GCVM_L2_CNTL3
+#define GCVM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define GCVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define GCVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define GCVM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define GCVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define GCVM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define GCVM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//GCVM_L2_STATUS
+#define GCVM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define GCVM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define GCVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define GCVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define GCVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define GCVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define GCVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define GCVM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define GCVM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define GCVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define GCVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define GCVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define GCVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define GCVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//GCVM_DUMMY_PAGE_FAULT_CNTL
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//GCVM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_INVALIDATE_CNTL
+#define GCVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING__SHIFT 0x0
+#define GCVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING__SHIFT 0x8
+#define GCVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING_MASK 0x000000FFL
+#define GCVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING_MASK 0x0000FF00L
+//GCVM_L2_PROTECTION_FAULT_CNTL
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define GCVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define GCVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define GCVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define GCVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define GCVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define GCVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define GCVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define GCVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//GCVM_L2_PROTECTION_FAULT_CNTL2
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//GCVM_L2_PROTECTION_FAULT_MM_CNTL3
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_MM_CNTL4
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_STATUS
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define GCVM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define GCVM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define GCVM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PRT__SHIFT 0x1d
+#define GCVM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PRT_MASK 0x20000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
+//GCVM_L2_PROTECTION_FAULT_ADDR_LO32
+#define GCVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_ADDR_HI32
+#define GCVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//GCVM_L2_CNTL4
+#define GCVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define GCVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define GCVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define GCVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define GCVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define GCVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define GCVM_L2_CNTL4__GC_CH_FGCG_OFF__SHIFT 0x1d
+#define GCVM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE__SHIFT 0x1e
+#define GCVM_L2_CNTL4__VFIFO_VISIBLE_BANK_SILOS__SHIFT 0x1f
+#define GCVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define GCVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define GCVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define GCVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define GCVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define GCVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+#define GCVM_L2_CNTL4__GC_CH_FGCG_OFF_MASK 0x20000000L
+#define GCVM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE_MASK 0x40000000L
+#define GCVM_L2_CNTL4__VFIFO_VISIBLE_BANK_SILOS_MASK 0x80000000L
+//GCVM_L2_MM_GROUP_RT_CLASSES
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//GCVM_L2_BANK_SELECT_RESERVED_CID
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
+//GCVM_L2_BANK_SELECT_RESERVED_CID2
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
+//GCVM_L2_CACHE_PARITY_CNTL
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//GCVM_L2_ICG_CTRL
+#define GCVM_L2_ICG_CTRL__OFF_HYSTERESIS__SHIFT 0x0
+#define GCVM_L2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE__SHIFT 0x4
+#define GCVM_L2_ICG_CTRL__STATIC_CLOCK_OVERRIDE__SHIFT 0x5
+#define GCVM_L2_ICG_CTRL__AON_CLOCK_OVERRIDE__SHIFT 0x6
+#define GCVM_L2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE__SHIFT 0x7
+#define GCVM_L2_ICG_CTRL__OFF_HYSTERESIS_MASK 0x0000000FL
+#define GCVM_L2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE_MASK 0x00000010L
+#define GCVM_L2_ICG_CTRL__STATIC_CLOCK_OVERRIDE_MASK 0x00000020L
+#define GCVM_L2_ICG_CTRL__AON_CLOCK_OVERRIDE_MASK 0x00000040L
+#define GCVM_L2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE_MASK 0x00000080L
+//GCVM_L2_CNTL5
+#define GCVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID__SHIFT 0x5
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE__SHIFT 0xe
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE__SHIFT 0xf
+#define GCVM_L2_CNTL5__UTCL2_ATC_REQ_FGCG_OFF__SHIFT 0x10
+#define GCVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID_MASK 0x00003FE0L
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE_MASK 0x00004000L
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE_MASK 0x00008000L
+#define GCVM_L2_CNTL5__UTCL2_ATC_REQ_FGCG_OFF_MASK 0x00010000L
+//GCVM_L2_GCR_CNTL
+#define GCVM_L2_GCR_CNTL__GCR_ENABLE__SHIFT 0x0
+#define GCVM_L2_GCR_CNTL__GCR_CLIENT_ID__SHIFT 0x1
+#define GCVM_L2_GCR_CNTL__GCR_ENABLE_MASK 0x00000001L
+#define GCVM_L2_GCR_CNTL__GCR_CLIENT_ID_MASK 0x000003FEL
+//GCVML2_WALKER_MACRO_THROTTLE_TIME
+#define GCVML2_WALKER_MACRO_THROTTLE_TIME__TIME__SHIFT 0x0
+#define GCVML2_WALKER_MACRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
+//GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT
+#define GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
+#define GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
+//GCVML2_WALKER_MICRO_THROTTLE_TIME
+#define GCVML2_WALKER_MICRO_THROTTLE_TIME__TIME__SHIFT 0x0
+#define GCVML2_WALKER_MICRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
+//GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT
+#define GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
+#define GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
+//GCVM_L2_CGTT_BUSY_CTRL
+#define GCVM_L2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define GCVM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x5
+#define GCVM_L2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000001FL
+#define GCVM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000020L
+//GCVM_L2_PTE_CACHE_DUMP_CNTL
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ENABLE__SHIFT 0x0
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__READY__SHIFT 0x1
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__BANK__SHIFT 0x4
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__CACHE__SHIFT 0x8
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ASSOC__SHIFT 0xc
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__INDEX__SHIFT 0x10
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ENABLE_MASK 0x00000001L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__READY_MASK 0x00000002L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__BANK_MASK 0x000000F0L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__CACHE_MASK 0x00000F00L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ASSOC_MASK 0x0000F000L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__INDEX_MASK 0xFFFF0000L
+//GCVM_L2_PTE_CACHE_DUMP_READ
+#define GCVM_L2_PTE_CACHE_DUMP_READ__DATA__SHIFT 0x0
+#define GCVM_L2_PTE_CACHE_DUMP_READ__DATA_MASK 0xFFFFFFFFL
+//GCVM_L2_BANK_SELECT_MASKS
+#define GCVM_L2_BANK_SELECT_MASKS__MASK0__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_MASKS__MASK1__SHIFT 0x4
+#define GCVM_L2_BANK_SELECT_MASKS__MASK2__SHIFT 0x8
+#define GCVM_L2_BANK_SELECT_MASKS__MASK3__SHIFT 0xc
+#define GCVM_L2_BANK_SELECT_MASKS__MASK0_MASK 0x0000000FL
+#define GCVM_L2_BANK_SELECT_MASKS__MASK1_MASK 0x000000F0L
+#define GCVM_L2_BANK_SELECT_MASKS__MASK2_MASK 0x00000F00L
+#define GCVM_L2_BANK_SELECT_MASKS__MASK3_MASK 0x0000F000L
+//GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__UPDATE_MASK 0x00000400L
+//GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__UPDATE_MASK 0x00000400L
+//GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__UPDATE_MASK 0x00000400L
+//GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__CREDITS__SHIFT 0x0
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__UPDATE__SHIFT 0xa
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__CREDITS_MASK 0x000003FFL
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__UPDATE_MASK 0x00000400L
+//GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__CREDITS__SHIFT 0x0
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__UPDATE__SHIFT 0xa
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__CREDITS_MASK 0x000003FFL
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__UPDATE_MASK 0x00000400L
+
+
+// addressBlock: gc_gcatcl2dec
+//GC_ATC_L2_CNTL
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define GC_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define GC_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define GC_ATC_L2_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x14
+#define GC_ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE__SHIFT 0x16
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define GC_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define GC_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+#define GC_ATC_L2_CNTL__FRAG_APT_INTXN_MODE_MASK 0x00300000L
+#define GC_ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE_MASK 0x0FC00000L
+//GC_ATC_L2_CNTL2
+#define GC_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define GC_ATC_L2_CNTL2__NUM_BANKS_LOG2__SHIFT 0x6
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x9
+#define GC_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xb
+#define GC_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0xc
+#define GC_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xf
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x12
+#define GC_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define GC_ATC_L2_CNTL2__NUM_BANKS_LOG2_MASK 0x000001C0L
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x00000600L
+#define GC_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000800L
+#define GC_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00007000L
+#define GC_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00038000L
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00FC0000L
+//GC_ATC_L2_CACHE_DATA0
+#define GC_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define GC_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define GC_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x18
+#define GC_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define GC_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define GC_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x00FFFFFCL
+#define GC_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x0F000000L
+//GC_ATC_L2_CACHE_DATA1
+#define GC_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//GC_ATC_L2_CACHE_DATA2
+#define GC_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//GC_ATC_L2_CNTL3
+#define GC_ATC_L2_CNTL3__L2_SMALLK_CACHE_FRAGMENT_SIZE__SHIFT 0x0
+#define GC_ATC_L2_CNTL3__L2_MIDK_CACHE_FRAGMENT_SIZE__SHIFT 0x6
+#define GC_ATC_L2_CNTL3__L2_BIGK_CACHE_FRAGMENT_SIZE__SHIFT 0xc
+#define GC_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x12
+#define GC_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x15
+#define GC_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x1b
+#define GC_ATC_L2_CNTL3__REPEATER_FGCG_OFF__SHIFT 0x1e
+#define GC_ATC_L2_CNTL3__L2_SMALLK_CACHE_FRAGMENT_SIZE_MASK 0x0000003FL
+#define GC_ATC_L2_CNTL3__L2_MIDK_CACHE_FRAGMENT_SIZE_MASK 0x00000FC0L
+#define GC_ATC_L2_CNTL3__L2_BIGK_CACHE_FRAGMENT_SIZE_MASK 0x0003F000L
+#define GC_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x001C0000L
+#define GC_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x07E00000L
+#define GC_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x38000000L
+#define GC_ATC_L2_CNTL3__REPEATER_FGCG_OFF_MASK 0x40000000L
+//GC_ATC_L2_STATUS
+#define GC_ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define GC_ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS__SHIFT 0x1
+#define GC_ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define GC_ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS_MASK 0x00000002L
+//GC_ATC_L2_STATUS2
+#define GC_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define GC_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define GC_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define GC_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//GC_ATC_L2_MISC_CG
+#define GC_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define GC_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define GC_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define GC_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define GC_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define GC_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//GC_ATC_L2_MEM_POWER_LS
+#define GC_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define GC_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define GC_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define GC_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//GC_ATC_L2_SDPPORT_CTRL
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN__SHIFT 0x0
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV__SHIFT 0x1
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN__SHIFT 0x2
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x3
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN__SHIFT 0x4
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV__SHIFT 0x5
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN__SHIFT 0x6
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV__SHIFT 0x7
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN__SHIFT 0x8
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV__SHIFT 0x9
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN_MASK 0x00000001L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV_MASK 0x00000002L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN_MASK 0x00000004L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV_MASK 0x00000008L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN_MASK 0x00000010L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV_MASK 0x00000020L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN_MASK 0x00000040L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV_MASK 0x00000080L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN_MASK 0x00000100L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV_MASK 0x00000200L
+
+
+// addressBlock: gc_gcl2tlbpfdec
+//GCL2TLB_TLB0_STATUS
+#define GCL2TLB_TLB0_STATUS__BUSY__SHIFT 0x0
+#define GCL2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define GCL2TLB_TLB0_STATUS__FOUND_APERTURE_FAULTS__SHIFT 0x2
+#define GCL2TLB_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define GCL2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+#define GCL2TLB_TLB0_STATUS__FOUND_APERTURE_FAULTS_MASK 0x00000004L
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR_MASK 0xFFFFFFFFL
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID__SHIFT 0x4
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID__SHIFT 0x8
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF__SHIFT 0xc
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA__SHIFT 0xd
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM__SHIFT 0xf
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM__SHIFT 0x10
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM__SHIFT 0x11
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID__SHIFT 0x12
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ__SHIFT 0x1e
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR_MASK 0x0000000FL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID_MASK 0x000000F0L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID_MASK 0x00000F00L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF_MASK 0x00001000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA_MASK 0x00006000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM_MASK 0x00008000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM_MASK 0x00010000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM_MASK 0x00020000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID_MASK 0x07FC0000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ_MASK 0x40000000L
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR_MASK 0xFFFFFFFFL
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS__SHIFT 0x4
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE__SHIFT 0x7
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP__SHIFT 0xd
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA__SHIFT 0xe
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO__SHIFT 0xf
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ__SHIFT 0x10
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE__SHIFT 0x11
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE__SHIFT 0x12
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG__SHIFT 0x15
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK__SHIFT 0x16
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC__SHIFT 0x18
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK__SHIFT 0x1f
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR_MASK 0x0000000FL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS_MASK 0x00000070L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE_MASK 0x00001F80L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP_MASK 0x00002000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA_MASK 0x00004000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO_MASK 0x00008000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ_MASK 0x00010000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE_MASK 0x00020000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE_MASK 0x001C0000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG_MASK 0x00200000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK_MASK 0x00C00000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC_MASK 0x01000000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK_MASK 0x80000000L
+
+
+// addressBlock: gc_gcvmsharedvcdec
+//GCMC_VM_FB_LOCATION_BASE
+#define GCMC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//GCMC_VM_FB_LOCATION_TOP
+#define GCMC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define GCMC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_TOP
+#define GCMC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define GCMC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_BOT
+#define GCMC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define GCMC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_BASE
+#define GCMC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define GCMC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define GCMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//GCMC_VM_MX_L1_TLB_CNTL
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define GCMC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define GCMC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define GCMC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define GCMC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00003800L
+
+
+// addressBlock: gc_gcvml2vcdec
+//GCVM_CONTEXT0_CNTL
+#define GCVM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT1_CNTL
+#define GCVM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT2_CNTL
+#define GCVM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT3_CNTL
+#define GCVM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT4_CNTL
+#define GCVM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT5_CNTL
+#define GCVM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT6_CNTL
+#define GCVM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT7_CNTL
+#define GCVM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT8_CNTL
+#define GCVM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT9_CNTL
+#define GCVM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT10_CNTL
+#define GCVM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT11_CNTL
+#define GCVM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT12_CNTL
+#define GCVM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT13_CNTL
+#define GCVM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT14_CNTL
+#define GCVM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT15_CNTL
+#define GCVM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXTS_DISABLE
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//GCVM_INVALIDATE_ENG0_SEM
+#define GCVM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG1_SEM
+#define GCVM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG2_SEM
+#define GCVM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG3_SEM
+#define GCVM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG4_SEM
+#define GCVM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG5_SEM
+#define GCVM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG6_SEM
+#define GCVM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG7_SEM
+#define GCVM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG8_SEM
+#define GCVM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG9_SEM
+#define GCVM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG10_SEM
+#define GCVM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG11_SEM
+#define GCVM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG12_SEM
+#define GCVM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG13_SEM
+#define GCVM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG14_SEM
+#define GCVM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG15_SEM
+#define GCVM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG16_SEM
+#define GCVM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG17_SEM
+#define GCVM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG0_REQ
+#define GCVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG0_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG0_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG1_REQ
+#define GCVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG1_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG1_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG2_REQ
+#define GCVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG2_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG2_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG3_REQ
+#define GCVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG3_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG3_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG4_REQ
+#define GCVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG4_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG4_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG5_REQ
+#define GCVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG5_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG5_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG6_REQ
+#define GCVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG6_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG6_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG7_REQ
+#define GCVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG7_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG7_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG8_REQ
+#define GCVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG8_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG8_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG9_REQ
+#define GCVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG9_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG9_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG10_REQ
+#define GCVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG10_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG10_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG11_REQ
+#define GCVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG11_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG11_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG12_REQ
+#define GCVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG12_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG12_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG13_REQ
+#define GCVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG13_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG13_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG14_REQ
+#define GCVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG14_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG14_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG15_REQ
+#define GCVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG15_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG15_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG16_REQ
+#define GCVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG16_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG16_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG17_REQ
+#define GCVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG17_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG17_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG0_ACK
+#define GCVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG1_ACK
+#define GCVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG2_ACK
+#define GCVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG3_ACK
+#define GCVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG4_ACK
+#define GCVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG5_ACK
+#define GCVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG6_ACK
+#define GCVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG7_ACK
+#define GCVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG8_ACK
+#define GCVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG9_ACK
+#define GCVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG10_ACK
+#define GCVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG11_ACK
+#define GCVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG12_ACK
+#define GCVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG13_ACK
+#define GCVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG14_ACK
+#define GCVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG15_ACK
+#define GCVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG16_ACK
+#define GCVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG17_ACK
+#define GCVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+
+
+// addressBlock: gc_gcvml2perfddec
+//GCVML2_PERFCOUNTER2_0_LO
+#define GCVML2_PERFCOUNTER2_0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_1_LO
+#define GCVML2_PERFCOUNTER2_1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_0_HI
+#define GCVML2_PERFCOUNTER2_0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_1_HI
+#define GCVML2_PERFCOUNTER2_1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcvml2prdec
+//GCMC_VM_L2_PERFCOUNTER_LO
+#define GCMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCMC_VM_L2_PERFCOUNTER_HI
+#define GCMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GCUTCL2_PERFCOUNTER_LO
+#define GCUTCL2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCUTCL2_PERFCOUNTER_HI
+#define GCUTCL2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCUTCL2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCUTCL2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcatcl2perfddec
+//GC_ATC_L2_PERFCOUNTER2_LO
+#define GC_ATC_L2_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GC_ATC_L2_PERFCOUNTER2_HI
+#define GC_ATC_L2_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcatcl2pfcntrdec
+//GC_ATC_L2_PERFCOUNTER_LO
+#define GC_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GC_ATC_L2_PERFCOUNTER_HI
+#define GC_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GC_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcl2tlbprdec
+//GCL2TLB_PERFCOUNTER_LO
+#define GCL2TLB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCL2TLB_PERFCOUNTER_HI
+#define GCL2TLB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCL2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCL2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcvml2perfsdec
+//GCVML2_PERFCOUNTER2_0_SELECT
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_1_SELECT
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_0_SELECT1
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_1_SELECT1
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_0_MODE
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GCVML2_PERFCOUNTER2_1_MODE
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+
+
+// addressBlock: gc_gcvml2pldec
+//GCMC_VM_L2_PERFCOUNTER0_CFG
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER1_CFG
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER2_CFG
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER3_CFG
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER4_CFG
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER5_CFG
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER6_CFG
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER7_CFG
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//GCUTCL2_PERFCOUNTER0_CFG
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER1_CFG
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER2_CFG
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER3_CFG
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER_RSLT_CNTL
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcatcl2perfsdec
+//GC_ATC_L2_PERFCOUNTER2_SELECT
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GC_ATC_L2_PERFCOUNTER2_SELECT1
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GC_ATC_L2_PERFCOUNTER2_MODE
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+
+
+// addressBlock: gc_gcatcl2pfcntldec
+//GC_ATC_L2_PERFCOUNTER0_CFG
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GC_ATC_L2_PERFCOUNTER1_CFG
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GC_ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcl2tlbpldec
+//GCL2TLB_PERFCOUNTER0_CFG
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER1_CFG
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER2_CFG
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER3_CFG
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER_RSLT_CNTL
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcvml2pspdec
+//GCUTCL2_TRANSLATION_BYPASS_BY_VMID
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__TRANS_BYPASS_VMIDS__SHIFT 0x0
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__GPA_MODE_VMIDS__SHIFT 0x10
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__TRANS_BYPASS_VMIDS_MASK 0x0000FFFFL
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__GPA_MODE_VMIDS_MASK 0xFFFF0000L
+//GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE
+#define GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE__GPU_HOST_TRANSLATION_ENABLE__SHIFT 0x0
+#define GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE__GPU_HOST_TRANSLATION_ENABLE_MASK 0x00000001L
+//GCVM_IOMMU_CONTROL_REGISTER
+#define GCVM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define GCVM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//GCVM_IOMMU_MMIO_CNTRL_1
+#define GCVM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define GCVM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//GCMC_VM_MARC_BASE_LO_0
+#define GCMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_1
+#define GCMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_2
+#define GCMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_3
+#define GCMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_4
+#define GCMC_VM_MARC_BASE_LO_4__MARC_BASE_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_4__MARC_BASE_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_5
+#define GCMC_VM_MARC_BASE_LO_5__MARC_BASE_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_5__MARC_BASE_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_6
+#define GCMC_VM_MARC_BASE_LO_6__MARC_BASE_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_6__MARC_BASE_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_7
+#define GCMC_VM_MARC_BASE_LO_7__MARC_BASE_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_7__MARC_BASE_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_8
+#define GCMC_VM_MARC_BASE_LO_8__MARC_BASE_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_8__MARC_BASE_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_9
+#define GCMC_VM_MARC_BASE_LO_9__MARC_BASE_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_9__MARC_BASE_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_10
+#define GCMC_VM_MARC_BASE_LO_10__MARC_BASE_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_10__MARC_BASE_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_11
+#define GCMC_VM_MARC_BASE_LO_11__MARC_BASE_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_11__MARC_BASE_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_12
+#define GCMC_VM_MARC_BASE_LO_12__MARC_BASE_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_12__MARC_BASE_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_13
+#define GCMC_VM_MARC_BASE_LO_13__MARC_BASE_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_13__MARC_BASE_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_14
+#define GCMC_VM_MARC_BASE_LO_14__MARC_BASE_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_14__MARC_BASE_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_15
+#define GCMC_VM_MARC_BASE_LO_15__MARC_BASE_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_15__MARC_BASE_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_HI_0
+#define GCMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_1
+#define GCMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_2
+#define GCMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_3
+#define GCMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_4
+#define GCMC_VM_MARC_BASE_HI_4__MARC_BASE_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_4__MARC_BASE_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_5
+#define GCMC_VM_MARC_BASE_HI_5__MARC_BASE_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_5__MARC_BASE_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_6
+#define GCMC_VM_MARC_BASE_HI_6__MARC_BASE_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_6__MARC_BASE_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_7
+#define GCMC_VM_MARC_BASE_HI_7__MARC_BASE_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_7__MARC_BASE_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_8
+#define GCMC_VM_MARC_BASE_HI_8__MARC_BASE_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_8__MARC_BASE_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_9
+#define GCMC_VM_MARC_BASE_HI_9__MARC_BASE_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_9__MARC_BASE_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_10
+#define GCMC_VM_MARC_BASE_HI_10__MARC_BASE_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_10__MARC_BASE_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_11
+#define GCMC_VM_MARC_BASE_HI_11__MARC_BASE_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_11__MARC_BASE_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_12
+#define GCMC_VM_MARC_BASE_HI_12__MARC_BASE_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_12__MARC_BASE_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_13
+#define GCMC_VM_MARC_BASE_HI_13__MARC_BASE_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_13__MARC_BASE_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_14
+#define GCMC_VM_MARC_BASE_HI_14__MARC_BASE_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_14__MARC_BASE_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_15
+#define GCMC_VM_MARC_BASE_HI_15__MARC_BASE_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_15__MARC_BASE_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_LO_0
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_1
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_2
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_3
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_4
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_ENABLE_4__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_READONLY_4__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_RELOC_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_ENABLE_4_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_READONLY_4_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_RELOC_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_5
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_ENABLE_5__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_READONLY_5__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_RELOC_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_ENABLE_5_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_READONLY_5_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_RELOC_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_6
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_ENABLE_6__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_READONLY_6__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_RELOC_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_ENABLE_6_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_READONLY_6_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_RELOC_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_7
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_ENABLE_7__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_READONLY_7__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_RELOC_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_ENABLE_7_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_READONLY_7_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_RELOC_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_8
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_ENABLE_8__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_READONLY_8__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_RELOC_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_ENABLE_8_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_READONLY_8_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_RELOC_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_9
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_ENABLE_9__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_READONLY_9__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_RELOC_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_ENABLE_9_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_READONLY_9_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_RELOC_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_10
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_ENABLE_10__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_READONLY_10__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_RELOC_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_ENABLE_10_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_READONLY_10_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_RELOC_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_11
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_ENABLE_11__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_READONLY_11__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_RELOC_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_ENABLE_11_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_READONLY_11_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_RELOC_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_12
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_ENABLE_12__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_READONLY_12__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_RELOC_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_ENABLE_12_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_READONLY_12_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_RELOC_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_13
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_ENABLE_13__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_READONLY_13__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_RELOC_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_ENABLE_13_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_READONLY_13_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_RELOC_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_14
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_ENABLE_14__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_READONLY_14__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_RELOC_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_ENABLE_14_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_READONLY_14_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_RELOC_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_15
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_ENABLE_15__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_READONLY_15__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_RELOC_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_ENABLE_15_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_READONLY_15_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_RELOC_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_HI_0
+#define GCMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_1
+#define GCMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_2
+#define GCMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_3
+#define GCMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_4
+#define GCMC_VM_MARC_RELOC_HI_4__MARC_RELOC_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_4__MARC_RELOC_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_5
+#define GCMC_VM_MARC_RELOC_HI_5__MARC_RELOC_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_5__MARC_RELOC_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_6
+#define GCMC_VM_MARC_RELOC_HI_6__MARC_RELOC_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_6__MARC_RELOC_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_7
+#define GCMC_VM_MARC_RELOC_HI_7__MARC_RELOC_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_7__MARC_RELOC_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_8
+#define GCMC_VM_MARC_RELOC_HI_8__MARC_RELOC_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_8__MARC_RELOC_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_9
+#define GCMC_VM_MARC_RELOC_HI_9__MARC_RELOC_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_9__MARC_RELOC_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_10
+#define GCMC_VM_MARC_RELOC_HI_10__MARC_RELOC_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_10__MARC_RELOC_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_11
+#define GCMC_VM_MARC_RELOC_HI_11__MARC_RELOC_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_11__MARC_RELOC_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_12
+#define GCMC_VM_MARC_RELOC_HI_12__MARC_RELOC_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_12__MARC_RELOC_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_13
+#define GCMC_VM_MARC_RELOC_HI_13__MARC_RELOC_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_13__MARC_RELOC_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_14
+#define GCMC_VM_MARC_RELOC_HI_14__MARC_RELOC_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_14__MARC_RELOC_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_15
+#define GCMC_VM_MARC_RELOC_HI_15__MARC_RELOC_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_15__MARC_RELOC_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_LO_0
+#define GCMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_1
+#define GCMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_2
+#define GCMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_3
+#define GCMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_4
+#define GCMC_VM_MARC_LEN_LO_4__MARC_LEN_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_4__MARC_LEN_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_5
+#define GCMC_VM_MARC_LEN_LO_5__MARC_LEN_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_5__MARC_LEN_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_6
+#define GCMC_VM_MARC_LEN_LO_6__MARC_LEN_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_6__MARC_LEN_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_7
+#define GCMC_VM_MARC_LEN_LO_7__MARC_LEN_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_7__MARC_LEN_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_8
+#define GCMC_VM_MARC_LEN_LO_8__MARC_LEN_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_8__MARC_LEN_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_9
+#define GCMC_VM_MARC_LEN_LO_9__MARC_LEN_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_9__MARC_LEN_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_10
+#define GCMC_VM_MARC_LEN_LO_10__MARC_LEN_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_10__MARC_LEN_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_11
+#define GCMC_VM_MARC_LEN_LO_11__MARC_LEN_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_11__MARC_LEN_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_12
+#define GCMC_VM_MARC_LEN_LO_12__MARC_LEN_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_12__MARC_LEN_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_13
+#define GCMC_VM_MARC_LEN_LO_13__MARC_LEN_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_13__MARC_LEN_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_14
+#define GCMC_VM_MARC_LEN_LO_14__MARC_LEN_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_14__MARC_LEN_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_15
+#define GCMC_VM_MARC_LEN_LO_15__MARC_LEN_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_15__MARC_LEN_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_HI_0
+#define GCMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_1
+#define GCMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_2
+#define GCMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_3
+#define GCMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_4
+#define GCMC_VM_MARC_LEN_HI_4__MARC_LEN_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_4__MARC_LEN_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_5
+#define GCMC_VM_MARC_LEN_HI_5__MARC_LEN_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_5__MARC_LEN_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_6
+#define GCMC_VM_MARC_LEN_HI_6__MARC_LEN_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_6__MARC_LEN_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_7
+#define GCMC_VM_MARC_LEN_HI_7__MARC_LEN_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_7__MARC_LEN_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_8
+#define GCMC_VM_MARC_LEN_HI_8__MARC_LEN_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_8__MARC_LEN_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_9
+#define GCMC_VM_MARC_LEN_HI_9__MARC_LEN_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_9__MARC_LEN_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_10
+#define GCMC_VM_MARC_LEN_HI_10__MARC_LEN_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_10__MARC_LEN_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_11
+#define GCMC_VM_MARC_LEN_HI_11__MARC_LEN_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_11__MARC_LEN_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_12
+#define GCMC_VM_MARC_LEN_HI_12__MARC_LEN_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_12__MARC_LEN_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_13
+#define GCMC_VM_MARC_LEN_HI_13__MARC_LEN_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_13__MARC_LEN_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_14
+#define GCMC_VM_MARC_LEN_HI_14__MARC_LEN_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_14__MARC_LEN_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_15
+#define GCMC_VM_MARC_LEN_HI_15__MARC_LEN_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_15__MARC_LEN_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_PFVF_MAPPING_0
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_1
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_2
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_3
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_4
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_5
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_6
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_7
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_8
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_9
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_10
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_11
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_12
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_13
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_14
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_15
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_PF_MASK 0x00010000L
+//GCUTC_TRANSLATION_FAULT_CNTL0
+#define GCUTC_TRANSLATION_FAULT_CNTL0__DEFAULT_PHYSICAL_PAGE_ADDRESS_LSB__SHIFT 0x0
+#define GCUTC_TRANSLATION_FAULT_CNTL0__DEFAULT_PHYSICAL_PAGE_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//GCUTC_TRANSLATION_FAULT_CNTL1
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_PHYSICAL_PAGE_ADDRESS_MSB__SHIFT 0x0
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_IO__SHIFT 0x4
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SPA__SHIFT 0x5
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SNOOP__SHIFT 0x6
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_PHYSICAL_PAGE_ADDRESS_MSB_MASK 0x0000000FL
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_IO_MASK 0x00000010L
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SPA_MASK 0x00000020L
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SNOOP_MASK 0x00000040L
+
+
+// addressBlock: gc_gcl2tlbpspdec
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL__ENABLE__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL__ENABLE_MASK 0x00000001L
+
+
+// addressBlock: gc_shdec
+//SPI_SHADER_PGM_RSRC4_PS
+#define SPI_SHADER_PGM_RSRC4_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_PS__INST_PREF_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_PS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC4_PS__INST_PREF_SIZE_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_PS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_PGM_CHKSUM_PS
+#define SPI_SHADER_PGM_CHKSUM_PS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_PS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC3_PS
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_PS__LDS_GROUP_SIZE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_PS__LDS_GROUP_SIZE_MASK 0x00C00000L
+//SPI_SHADER_PGM_LO_PS
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_PS
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_PS
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_PS__MEM_ORDERED__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_PS__FWD_PROGRESS__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_PS__LOAD_PROVOKING_VTX__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__MEM_ORDERED_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FWD_PROGRESS_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_PS__LOAD_PROVOKING_VTX_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
+//SPI_SHADER_PGM_RSRC2_PS
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_PS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_PS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_PS_0
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_1
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_2
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_3
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_4
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_5
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_6
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_7
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_8
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_9
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_10
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_11
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_12
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_13
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_14
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_15
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_16
+#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_17
+#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_18
+#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_19
+#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_20
+#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_21
+#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_22
+#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_23
+#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_24
+#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_25
+#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_26
+#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_27
+#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_28
+#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_29
+#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_30
+#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_31
+#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_REQ_CTRL_PS
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_PS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_PS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_PS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_PS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_PS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_PS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_PS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_PS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_PS_0
+#define SPI_SHADER_USER_ACCUM_PS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_1
+#define SPI_SHADER_USER_ACCUM_PS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_2
+#define SPI_SHADER_USER_ACCUM_PS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_3
+#define SPI_SHADER_USER_ACCUM_PS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_CHKSUM_GS
+#define SPI_SHADER_PGM_CHKSUM_GS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_GS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_GS
+#define SPI_SHADER_PGM_RSRC4_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_GS__RESERVED__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC4_GS__PH_THROTTLE_EN__SHIFT 0xe
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_THROTTLE_EN__SHIFT 0xf
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_GS__INST_PREF_SIZE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_GS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_GS__CU_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC4_GS__RESERVED_MASK 0x00003FFEL
+#define SPI_SHADER_PGM_RSRC4_GS__PH_THROTTLE_EN_MASK 0x00004000L
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_THROTTLE_EN_MASK 0x00008000L
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x007F0000L
+#define SPI_SHADER_PGM_RSRC4_GS__INST_PREF_SIZE_MASK 0x1F800000L
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_GS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_USER_DATA_ADDR_LO_GS
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_GS
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_ES_GS
+#define SPI_SHADER_PGM_LO_ES_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES_GS
+#define SPI_SHADER_PGM_HI_ES_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES_GS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_GS
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH_MASK 0xFC000000L
+//SPI_SHADER_PGM_LO_GS
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_GS
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC1_GS
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_GS__MEM_ORDERED__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_GS__FWD_PROGRESS__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_GS__WGP_MODE__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__MEM_ORDERED_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FWD_PROGRESS_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_GS__WGP_MODE_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_GS
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_GS_0
+#define SPI_SHADER_USER_DATA_GS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_1
+#define SPI_SHADER_USER_DATA_GS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_2
+#define SPI_SHADER_USER_DATA_GS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_3
+#define SPI_SHADER_USER_DATA_GS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_4
+#define SPI_SHADER_USER_DATA_GS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_5
+#define SPI_SHADER_USER_DATA_GS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_6
+#define SPI_SHADER_USER_DATA_GS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_7
+#define SPI_SHADER_USER_DATA_GS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_8
+#define SPI_SHADER_USER_DATA_GS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_9
+#define SPI_SHADER_USER_DATA_GS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_10
+#define SPI_SHADER_USER_DATA_GS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_11
+#define SPI_SHADER_USER_DATA_GS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_12
+#define SPI_SHADER_USER_DATA_GS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_13
+#define SPI_SHADER_USER_DATA_GS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_14
+#define SPI_SHADER_USER_DATA_GS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_15
+#define SPI_SHADER_USER_DATA_GS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_16
+#define SPI_SHADER_USER_DATA_GS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_17
+#define SPI_SHADER_USER_DATA_GS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_18
+#define SPI_SHADER_USER_DATA_GS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_19
+#define SPI_SHADER_USER_DATA_GS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_20
+#define SPI_SHADER_USER_DATA_GS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_21
+#define SPI_SHADER_USER_DATA_GS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_22
+#define SPI_SHADER_USER_DATA_GS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_23
+#define SPI_SHADER_USER_DATA_GS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_24
+#define SPI_SHADER_USER_DATA_GS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_25
+#define SPI_SHADER_USER_DATA_GS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_26
+#define SPI_SHADER_USER_DATA_GS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_27
+#define SPI_SHADER_USER_DATA_GS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_28
+#define SPI_SHADER_USER_DATA_GS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_29
+#define SPI_SHADER_USER_DATA_GS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_30
+#define SPI_SHADER_USER_DATA_GS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_31
+#define SPI_SHADER_USER_DATA_GS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_GS_MESHLET_DIM
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_X__SHIFT 0x0
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Y__SHIFT 0x8
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Z__SHIFT 0x10
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_THREADGROUP_SIZE__SHIFT 0x18
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_X_MASK 0x000000FFL
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Y_MASK 0x0000FF00L
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Z_MASK 0x00FF0000L
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_THREADGROUP_SIZE_MASK 0xFF000000L
+//SPI_SHADER_GS_MESHLET_EXP_ALLOC
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_VERTS__SHIFT 0x0
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_PRIMS__SHIFT 0x9
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_VERTS_MASK 0x000001FFL
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_PRIMS_MASK 0x0003FE00L
+//SPI_SHADER_REQ_CTRL_ESGS
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_ESGS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_ESGS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_ESGS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_ESGS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_ESGS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_ESGS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_ESGS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_ESGS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_ESGS_0
+#define SPI_SHADER_USER_ACCUM_ESGS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_1
+#define SPI_SHADER_USER_ACCUM_ESGS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_2
+#define SPI_SHADER_USER_ACCUM_ESGS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_3
+#define SPI_SHADER_USER_ACCUM_ESGS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_LO_ES
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_CHKSUM_HS
+#define SPI_SHADER_PGM_CHKSUM_HS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_HS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_HS
+#define SPI_SHADER_PGM_RSRC4_HS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_HS__INST_PREF_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_HS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_HS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC4_HS__INST_PREF_SIZE_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_HS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_USER_DATA_ADDR_LO_HS
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_HS
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_LS_HS
+#define SPI_SHADER_PGM_LO_LS_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS_HS
+#define SPI_SHADER_PGM_HI_LS_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS_HS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_HS
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH_MASK 0x0000FC00L
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
+//SPI_SHADER_PGM_LO_HS
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_HS
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC1_HS
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_HS__MEM_ORDERED__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_HS__FWD_PROGRESS__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_HS__WGP_MODE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__MEM_ORDERED_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FWD_PROGRESS_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_HS__WGP_MODE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
+//SPI_SHADER_PGM_RSRC2_HS
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x9
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_HS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0003FE00L
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x07FC0000L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_HS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_HS_0
+#define SPI_SHADER_USER_DATA_HS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_1
+#define SPI_SHADER_USER_DATA_HS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_2
+#define SPI_SHADER_USER_DATA_HS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_3
+#define SPI_SHADER_USER_DATA_HS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_4
+#define SPI_SHADER_USER_DATA_HS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_5
+#define SPI_SHADER_USER_DATA_HS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_6
+#define SPI_SHADER_USER_DATA_HS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_7
+#define SPI_SHADER_USER_DATA_HS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_8
+#define SPI_SHADER_USER_DATA_HS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_9
+#define SPI_SHADER_USER_DATA_HS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_10
+#define SPI_SHADER_USER_DATA_HS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_11
+#define SPI_SHADER_USER_DATA_HS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_12
+#define SPI_SHADER_USER_DATA_HS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_13
+#define SPI_SHADER_USER_DATA_HS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_14
+#define SPI_SHADER_USER_DATA_HS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_15
+#define SPI_SHADER_USER_DATA_HS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_16
+#define SPI_SHADER_USER_DATA_HS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_17
+#define SPI_SHADER_USER_DATA_HS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_18
+#define SPI_SHADER_USER_DATA_HS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_19
+#define SPI_SHADER_USER_DATA_HS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_20
+#define SPI_SHADER_USER_DATA_HS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_21
+#define SPI_SHADER_USER_DATA_HS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_22
+#define SPI_SHADER_USER_DATA_HS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_23
+#define SPI_SHADER_USER_DATA_HS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_24
+#define SPI_SHADER_USER_DATA_HS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_25
+#define SPI_SHADER_USER_DATA_HS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_26
+#define SPI_SHADER_USER_DATA_HS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_27
+#define SPI_SHADER_USER_DATA_HS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_28
+#define SPI_SHADER_USER_DATA_HS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_29
+#define SPI_SHADER_USER_DATA_HS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_30
+#define SPI_SHADER_USER_DATA_HS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_31
+#define SPI_SHADER_USER_DATA_HS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_REQ_CTRL_LSHS
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_LSHS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_LSHS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_LSHS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_LSHS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_LSHS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_LSHS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_LSHS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_LSHS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_LSHS_0
+#define SPI_SHADER_USER_ACCUM_LSHS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_1
+#define SPI_SHADER_USER_ACCUM_LSHS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_2
+#define SPI_SHADER_USER_ACCUM_LSHS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_3
+#define SPI_SHADER_USER_ACCUM_LSHS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_LO_LS
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
+//COMPUTE_DISPATCH_INITIATOR
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
+#define COMPUTE_DISPATCH_INITIATOR__TUNNEL_ENABLE__SHIFT 0xd
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
+#define COMPUTE_DISPATCH_INITIATOR__CS_W32_EN__SHIFT 0xf
+#define COMPUTE_DISPATCH_INITIATOR__AMP_SHADER_EN__SHIFT 0x10
+#define COMPUTE_DISPATCH_INITIATOR__DISABLE_DISP_PREMPT_EN__SHIFT 0x11
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__TUNNEL_ENABLE_MASK 0x00002000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+#define COMPUTE_DISPATCH_INITIATOR__CS_W32_EN_MASK 0x00008000L
+#define COMPUTE_DISPATCH_INITIATOR__AMP_SHADER_EN_MASK 0x00010000L
+#define COMPUTE_DISPATCH_INITIATOR__DISABLE_DISP_PREMPT_EN_MASK 0x00020000L
+//COMPUTE_DIM_X
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Y
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Z
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_START_X
+#define COMPUTE_START_X__START__SHIFT 0x0
+#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Y
+#define COMPUTE_START_Y__START__SHIFT 0x0
+#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Z
+#define COMPUTE_START_Z__START__SHIFT 0x0
+#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
+//COMPUTE_NUM_THREAD_X
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Y
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Z
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_PIPELINESTAT_ENABLE
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
+//COMPUTE_PERFCOUNT_ENABLE
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
+//COMPUTE_PGM_LO
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
+#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_PGM_HI
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_PKT_ADDR_LO
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_PKT_ADDR_HI
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_LO
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_HI
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//COMPUTE_PGM_RSRC1
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x16
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x19
+#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
+#define COMPUTE_PGM_RSRC1__WGP_MODE__SHIFT 0x1d
+#define COMPUTE_PGM_RSRC1__MEM_ORDERED__SHIFT 0x1e
+#define COMPUTE_PGM_RSRC1__FWD_PROGRESS__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
+#define COMPUTE_PGM_RSRC1__WGP_MODE_MASK 0x20000000L
+#define COMPUTE_PGM_RSRC1__MEM_ORDERED_MASK 0x40000000L
+#define COMPUTE_PGM_RSRC1__FWD_PROGRESS_MASK 0x80000000L
+//COMPUTE_PGM_RSRC2
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
+//COMPUTE_VMID
+#define COMPUTE_VMID__DATA__SHIFT 0x0
+#define COMPUTE_VMID__DATA_MASK 0x0000000FL
+//COMPUTE_RESOURCE_LIMITS
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+//COMPUTE_DESTINATION_EN_SE0
+#define COMPUTE_DESTINATION_EN_SE0__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE0__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DESTINATION_EN_SE1
+#define COMPUTE_DESTINATION_EN_SE1__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE1__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE1
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_TMPRING_SIZE
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x07FFF000L
+//COMPUTE_DESTINATION_EN_SE2
+#define COMPUTE_DESTINATION_EN_SE2__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE2__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE2
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DESTINATION_EN_SE3
+#define COMPUTE_DESTINATION_EN_SE3__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE3__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE3
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_RESTART_X
+#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Y
+#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Z
+#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_THREAD_TRACE_ENABLE
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
+//COMPUTE_MISC_RESERVED
+#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
+#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
+#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
+#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000007L
+#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x00000008L
+#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x00000010L
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
+//COMPUTE_DISPATCH_ID
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
+//COMPUTE_THREADGROUP_ID
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
+//COMPUTE_REQ_CTRL
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_EN__SHIFT 0x0
+#define COMPUTE_REQ_CTRL__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define COMPUTE_REQ_CTRL__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define COMPUTE_REQ_CTRL__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define COMPUTE_REQ_CTRL__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define COMPUTE_REQ_CTRL__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define COMPUTE_REQ_CTRL__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define COMPUTE_REQ_CTRL__DEDICATED_PREALLOCATION_BUFFER_LIMIT__SHIFT 0x14
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_EN_MASK 0x00000001L
+#define COMPUTE_REQ_CTRL__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define COMPUTE_REQ_CTRL__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define COMPUTE_REQ_CTRL__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define COMPUTE_REQ_CTRL__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define COMPUTE_REQ_CTRL__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define COMPUTE_REQ_CTRL__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+#define COMPUTE_REQ_CTRL__DEDICATED_PREALLOCATION_BUFFER_LIMIT_MASK 0x07F00000L
+//COMPUTE_USER_ACCUM_0
+#define COMPUTE_USER_ACCUM_0__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_0__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_1
+#define COMPUTE_USER_ACCUM_1__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_1__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_2
+#define COMPUTE_USER_ACCUM_2__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_2__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_3
+#define COMPUTE_USER_ACCUM_3__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_3__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_PGM_RSRC3
+#define COMPUTE_PGM_RSRC3__SHARED_VGPR_CNT__SHIFT 0x0
+#define COMPUTE_PGM_RSRC3__INST_PREF_SIZE__SHIFT 0x4
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START__SHIFT 0xa
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END__SHIFT 0xb
+#define COMPUTE_PGM_RSRC3__IMAGE_OP__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC3__SHARED_VGPR_CNT_MASK 0x0000000FL
+#define COMPUTE_PGM_RSRC3__INST_PREF_SIZE_MASK 0x000003F0L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END_MASK 0x00000800L
+#define COMPUTE_PGM_RSRC3__IMAGE_OP_MASK 0x80000000L
+//COMPUTE_DDID_INDEX
+#define COMPUTE_DDID_INDEX__INDEX__SHIFT 0x0
+#define COMPUTE_DDID_INDEX__INDEX_MASK 0x000007FFL
+//COMPUTE_SHADER_CHKSUM
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM__SHIFT 0x0
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE4
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE5
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE6
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE7
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DISPATCH_INTERLEAVE
+#define COMPUTE_DISPATCH_INTERLEAVE__INTERLEAVE__SHIFT 0x0
+#define COMPUTE_DISPATCH_INTERLEAVE__INTERLEAVE_MASK 0x000003FFL
+//COMPUTE_RELAUNCH
+#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
+//COMPUTE_WAVE_RESTORE_ADDR_LO
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
+//COMPUTE_WAVE_RESTORE_ADDR_HI
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
+//COMPUTE_RELAUNCH2
+#define COMPUTE_RELAUNCH2__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH2__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH2__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH2__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH2__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH2__IS_STATE_MASK 0x80000000L
+//COMPUTE_USER_DATA_0
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_1
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_2
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_3
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_4
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_5
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_6
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_7
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_8
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_9
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_10
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_11
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_12
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_13
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_14
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_15
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_TUNNEL
+#define COMPUTE_DISPATCH_TUNNEL__OFF_DELAY__SHIFT 0x0
+#define COMPUTE_DISPATCH_TUNNEL__IMMEDIATE__SHIFT 0xa
+#define COMPUTE_DISPATCH_TUNNEL__OFF_DELAY_MASK 0x000003FFL
+#define COMPUTE_DISPATCH_TUNNEL__IMMEDIATE_MASK 0x00000400L
+//COMPUTE_DISPATCH_END
+#define COMPUTE_DISPATCH_END__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_END__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_NOWHERE
+#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
+#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
+//SH_RESERVED_REG0
+#define SH_RESERVED_REG0__DATA__SHIFT 0x0
+#define SH_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//SH_RESERVED_REG1
+#define SH_RESERVED_REG1__DATA__SHIFT 0x0
+#define SH_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cppdec
+//CP_CU_MASK_ADDR_LO
+#define CP_CU_MASK_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_CU_MASK_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_CU_MASK_ADDR_HI
+#define CP_CU_MASK_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_CU_MASK_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_CU_MASK_CNTL
+#define CP_CU_MASK_CNTL__POLICY__SHIFT 0x0
+#define CP_CU_MASK_CNTL__POLICY_MASK 0x00000001L
+//CP_EOPQ_WAIT_TIME
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
+//CP_CPC_MGCG_SYNC_CNTL
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
+//CPC_INT_INFO
+#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
+#define CPC_INT_INFO__TYPE__SHIFT 0x10
+#define CPC_INT_INFO__VMID__SHIFT 0x14
+#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
+#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
+#define CPC_INT_INFO__TYPE_MASK 0x00010000L
+#define CPC_INT_INFO__VMID_MASK 0x00F00000L
+#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
+//CP_VIRT_STATUS
+#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
+#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
+//CPC_INT_ADDR
+#define CPC_INT_ADDR__ADDR__SHIFT 0x0
+#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CPC_INT_PASID
+#define CPC_INT_PASID__PASID__SHIFT 0x0
+#define CPC_INT_PASID__BYPASS_PASID__SHIFT 0x10
+#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
+#define CPC_INT_PASID__BYPASS_PASID_MASK 0x00010000L
+//CP_GFX_ERROR
+#define CP_GFX_ERROR__ME_INSTR_CACHE_UTCL1_ERROR__SHIFT 0x0
+#define CP_GFX_ERROR__PFP_INSTR_CACHE_UTCL1_ERROR__SHIFT 0x1
+#define CP_GFX_ERROR__DDID_DRAW_UTCL1_ERROR__SHIFT 0x2
+#define CP_GFX_ERROR__DDID_DISPATCH_UTCL1_ERROR__SHIFT 0x3
+#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_GFX_ERROR__DATA_FETCHER_UTCL1_ERROR__SHIFT 0x6
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
+#define CP_GFX_ERROR__RESERVED__SHIFT 0x1f
+#define CP_GFX_ERROR__ME_INSTR_CACHE_UTCL1_ERROR_MASK 0x00000001L
+#define CP_GFX_ERROR__PFP_INSTR_CACHE_UTCL1_ERROR_MASK 0x00000002L
+#define CP_GFX_ERROR__DDID_DRAW_UTCL1_ERROR_MASK 0x00000004L
+#define CP_GFX_ERROR__DDID_DISPATCH_UTCL1_ERROR_MASK 0x00000008L
+#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_GFX_ERROR__DATA_FETCHER_UTCL1_ERROR_MASK 0x00000040L
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
+#define CP_GFX_ERROR__RESERVED_MASK 0x80000000L
+//CPG_UTCL1_CNTL
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPC_UTCL1_CNTL
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPF_UTCL1_CNTL
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
+//CP_AQL_SMM_STATUS
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
+//CP_RB0_BASE
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB_BASE
+#define CP_RB_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB0_CNTL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB0_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB0_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB0_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB0_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB0_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB0_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB0_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB0_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB0_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB0_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB0_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_CNTL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_RPTR_WR
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
+//CP_RB0_RPTR_ADDR
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB_RPTR_ADDR
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB0_RPTR_ADDR_HI
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_RPTR_ADDR_HI
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_BUFSZ_MASK
+#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_BUFSZ_MASK
+#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//GC_PRIV_MODE
+#define GC_PRIV_MODE__MC_PRIV_MODE__SHIFT 0x0
+#define GC_PRIV_MODE__MC_PRIV_MODE_MASK 0x00000001L
+//CP_INT_CNTL
+#define CP_INT_CNTL__RESUME_INT_ENABLE__SHIFT 0x8
+#define CP_INT_CNTL__SUSPEND_INT_ENABLE__SHIFT 0x9
+#define CP_INT_CNTL__DMA_WATCH_INT_ENABLE__SHIFT 0xa
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL__RESUME_INT_ENABLE_MASK 0x00000100L
+#define CP_INT_CNTL__SUSPEND_INT_ENABLE_MASK 0x00000200L
+#define CP_INT_CNTL__DMA_WATCH_INT_ENABLE_MASK 0x00000400L
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS
+#define CP_INT_STATUS__RESUME_INT_STAT__SHIFT 0x8
+#define CP_INT_STATUS__SUSPEND_INT_STAT__SHIFT 0x9
+#define CP_INT_STATUS__DMA_WATCH_INT_STAT__SHIFT 0xa
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS__RESUME_INT_STAT_MASK 0x00000100L
+#define CP_INT_STATUS__SUSPEND_INT_STAT_MASK 0x00000200L
+#define CP_INT_STATUS__DMA_WATCH_INT_STAT_MASK 0x00000400L
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_DEVICE_ID
+#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
+//CP_ME0_PIPE_PRIORITY_CNTS
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_RING_PRIORITY_CNTS
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME0_PIPE0_PRIORITY
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING0_PRIORITY
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE1_PRIORITY
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING1_PRIORITY
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_FATAL_ERROR
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
+#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
+#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
+//CP_RB_VMID
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
+#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
+//CP_ME0_PIPE0_VMID
+#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
+//CP_ME0_PIPE1_VMID
+#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
+//CP_RB0_WPTR
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB0_WPTR_HI
+#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR_HI
+#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR_HI
+#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_PROCESS_QUANTUM
+#define CP_PROCESS_QUANTUM__QUANTUM_DURATION__SHIFT 0x0
+#define CP_PROCESS_QUANTUM__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_PROCESS_QUANTUM__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_PROCESS_QUANTUM__QUANTUM_EN__SHIFT 0x1f
+#define CP_PROCESS_QUANTUM__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_PROCESS_QUANTUM__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_PROCESS_QUANTUM__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_PROCESS_QUANTUM__QUANTUM_EN_MASK 0x80000000L
+//CP_RB_DOORBELL_RANGE_LOWER
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x00000FFCL
+//CP_RB_DOORBELL_RANGE_UPPER
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x00000FFCL
+//CP_MEC_DOORBELL_RANGE_LOWER
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x00000FFCL
+//CP_MEC_DOORBELL_RANGE_UPPER
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x00000FFCL
+//CPG_UTCL1_ERROR
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CPC_UTCL1_ERROR
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CP_RB1_BASE
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB1_CNTL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB1_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB1_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB1_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB1_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB1_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB1_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB1_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB1_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB1_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB1_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB1_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB1_RPTR_ADDR
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB1_RPTR_ADDR_HI
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB1_BUFSZ_MASK
+#define CP_RB1_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB1_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_INT_CNTL_RING0
+#define CP_INT_CNTL_RING0__RESUME_INT_ENABLE__SHIFT 0x8
+#define CP_INT_CNTL_RING0__SUSPEND_INT_ENABLE__SHIFT 0x9
+#define CP_INT_CNTL_RING0__DMA_WATCH_INT_ENABLE__SHIFT 0xa
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING0__RESUME_INT_ENABLE_MASK 0x00000100L
+#define CP_INT_CNTL_RING0__SUSPEND_INT_ENABLE_MASK 0x00000200L
+#define CP_INT_CNTL_RING0__DMA_WATCH_INT_ENABLE_MASK 0x00000400L
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING1
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS_RING0
+#define CP_INT_STATUS_RING0__RESUME_INT_STAT__SHIFT 0x8
+#define CP_INT_STATUS_RING0__SUSPEND_INT_STAT__SHIFT 0x9
+#define CP_INT_STATUS_RING0__DMA_WATCH_INT_STAT__SHIFT 0xa
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING0__RESUME_INT_STAT_MASK 0x00000100L
+#define CP_INT_STATUS_RING0__SUSPEND_INT_STAT_MASK 0x00000200L
+#define CP_INT_STATUS_RING0__DMA_WATCH_INT_STAT_MASK 0x00000400L
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING1
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_ME_F32_INTERRUPT
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT__SHIFT 0x1
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2__SHIFT 0x2
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3__SHIFT 0x3
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT_MASK 0x00000002L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2_MASK 0x00000004L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3_MASK 0x00000008L
+//CP_PFP_F32_INTERRUPT
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3__SHIFT 0x3
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3_MASK 0x00000008L
+//CP_MEC1_F32_INTERRUPT
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INTERRUPT
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_PWR_CNTL
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE0__SHIFT 0x14
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE1__SHIFT 0x15
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE2__SHIFT 0x16
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE3__SHIFT 0x17
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE0_MASK 0x00100000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE1_MASK 0x00200000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE2_MASK 0x00400000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE3_MASK 0x00800000L
+//CP_ECC_FIRSTOCCURRENCE
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
+#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
+#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
+#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
+#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
+//CP_ECC_FIRSTOCCURRENCE_RING0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING1
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
+//GB_EDC_MODE
+#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
+#define GB_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define GB_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
+#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
+#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
+#define GB_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define GB_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
+//CP_DEBUG
+#define CP_DEBUG__PERFMON_RING_SEL__SHIFT 0x0
+#define CP_DEBUG__DEBUG_BUS_SELECT_BITS__SHIFT 0x2
+#define CP_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0x8
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CP_DEBUG__PACKET_FILTER_DISABLE__SHIFT 0xa
+#define CP_DEBUG__NOT_EOP_PREEMPT_DISABLE__SHIFT 0xb
+#define CP_DEBUG__CPG_CHIU_RO_DISABLE__SHIFT 0xc
+#define CP_DEBUG__CPG_GCR_CNTL_BYPASS__SHIFT 0xd
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE__SHIFT 0xe
+#define CP_DEBUG__CPG_UTCL1_ERROR_HALT_DISABLE__SHIFT 0xf
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR__SHIFT 0x10
+#define CP_DEBUG__CPG_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_DEBUG__PRIV_VIOLATION_WRITE_DISABLE__SHIFT 0x14
+#define CP_DEBUG__CPG_CHIU_GUS_DISABLE__SHIFT 0x15
+#define CP_DEBUG__INTERRUPT_DISABLE__SHIFT 0x16
+#define CP_DEBUG__PREDICATE_DISABLE__SHIFT 0x17
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_DEBUG__CPG_CHIU_MTYPE_OVERRIDE__SHIFT 0x1b
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE__SHIFT 0x1e
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE__SHIFT 0x1f
+#define CP_DEBUG__PERFMON_RING_SEL_MASK 0x00000003L
+#define CP_DEBUG__DEBUG_BUS_SELECT_BITS_MASK 0x000000FCL
+#define CP_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00000100L
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define CP_DEBUG__PACKET_FILTER_DISABLE_MASK 0x00000400L
+#define CP_DEBUG__NOT_EOP_PREEMPT_DISABLE_MASK 0x00000800L
+#define CP_DEBUG__CPG_CHIU_RO_DISABLE_MASK 0x00001000L
+#define CP_DEBUG__CPG_GCR_CNTL_BYPASS_MASK 0x00002000L
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE_MASK 0x00004000L
+#define CP_DEBUG__CPG_UTCL1_ERROR_HALT_DISABLE_MASK 0x00008000L
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR_MASK 0x00070000L
+#define CP_DEBUG__CPG_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_DEBUG__PRIV_VIOLATION_WRITE_DISABLE_MASK 0x00100000L
+#define CP_DEBUG__CPG_CHIU_GUS_DISABLE_MASK 0x00200000L
+#define CP_DEBUG__INTERRUPT_DISABLE_MASK 0x00400000L
+#define CP_DEBUG__PREDICATE_DISABLE_MASK 0x00800000L
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_DEBUG__CPG_CHIU_MTYPE_OVERRIDE_MASK 0x08000000L
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE_MASK 0x40000000L
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE_MASK 0x80000000L
+//CP_CPF_DEBUG
+#define CP_CPF_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0xe
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE__SHIFT 0x10
+#define CP_CPF_DEBUG__CPF_GCR_CNTL_BYPASS__SHIFT 0x11
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPF_DEBUG__CPF_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_DELAY_OVERRIDE__SHIFT 0x16
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_OVERRIDE__SHIFT 0x17
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPF_DEBUG__CPF_CHIU_NOALLOC_OVERRIDE__SHIFT 0x1a
+#define CP_CPF_DEBUG__CE_FETCHER_DISABLE__SHIFT 0x1b
+#define CP_CPF_DEBUG__CPF_CHIU_GUS_DISABLE__SHIFT 0x1c
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS__SHIFT 0x1d
+#define CP_CPF_DEBUG__CPF_CHIU_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPF_DEBUG__DBGU_TRIGGER__SHIFT 0x1f
+#define CP_CPF_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00004000L
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE_MASK 0x00010000L
+#define CP_CPF_DEBUG__CPF_GCR_CNTL_BYPASS_MASK 0x00020000L
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPF_DEBUG__CPF_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_DELAY_OVERRIDE_MASK 0x00400000L
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_OVERRIDE_MASK 0x00800000L
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPF_DEBUG__CPF_CHIU_NOALLOC_OVERRIDE_MASK 0x04000000L
+#define CP_CPF_DEBUG__CE_FETCHER_DISABLE_MASK 0x08000000L
+#define CP_CPF_DEBUG__CPF_CHIU_GUS_DISABLE_MASK 0x10000000L
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS_MASK 0x20000000L
+#define CP_CPF_DEBUG__CPF_CHIU_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPF_DEBUG__DBGU_TRIGGER_MASK 0x80000000L
+//CP_CPC_DEBUG
+#define CP_CPC_DEBUG__PIPE_SELECT__SHIFT 0x0
+#define CP_CPC_DEBUG__ME_SELECT__SHIFT 0x2
+#define CP_CPC_DEBUG__ADC_INTERLEAVE_DISABLE__SHIFT 0x4
+#define CP_CPC_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0xe
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE__SHIFT 0xf
+#define CP_CPC_DEBUG__CPC_CHIU_NOALLOC_OVERRIDE__SHIFT 0x10
+#define CP_CPC_DEBUG__CPC_GCR_CNTL_BYPASS__SHIFT 0x11
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPC_DEBUG__CPC_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_CPC_DEBUG__PRIV_VIOLATION_WRITE_DISABLE__SHIFT 0x14
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE__SHIFT 0x15
+#define CP_CPC_DEBUG__INTERRUPT_DISABLE__SHIFT 0x16
+#define CP_CPC_DEBUG__CPC_CHIU_RO_DISABLE__SHIFT 0x17
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_CPC_DEBUG__CPC_CHIU_GUS_DISABLE__SHIFT 0x1b
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_CPC_DEBUG__CPC_CHIU_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE__SHIFT 0x1f
+#define CP_CPC_DEBUG__PIPE_SELECT_MASK 0x00000003L
+#define CP_CPC_DEBUG__ME_SELECT_MASK 0x00000004L
+#define CP_CPC_DEBUG__ADC_INTERLEAVE_DISABLE_MASK 0x00000010L
+#define CP_CPC_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00004000L
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE_MASK 0x00008000L
+#define CP_CPC_DEBUG__CPC_CHIU_NOALLOC_OVERRIDE_MASK 0x00010000L
+#define CP_CPC_DEBUG__CPC_GCR_CNTL_BYPASS_MASK 0x00020000L
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPC_DEBUG__CPC_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_CPC_DEBUG__PRIV_VIOLATION_WRITE_DISABLE_MASK 0x00100000L
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE_MASK 0x00200000L
+#define CP_CPC_DEBUG__INTERRUPT_DISABLE_MASK 0x00400000L
+#define CP_CPC_DEBUG__CPC_CHIU_RO_DISABLE_MASK 0x00800000L
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_CPC_DEBUG__CPC_CHIU_GUS_DISABLE_MASK 0x08000000L
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_CPC_DEBUG__CPC_CHIU_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
+#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
+#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL1
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
+//CP_ME1_PIPE0_INT_CNTL
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_CNTL
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_CNTL
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_CNTL
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_CNTL
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_CNTL
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_CNTL
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_CNTL
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE0_INT_STATUS
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_STATUS
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_STATUS
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_STATUS
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_STATUS
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_STATUS
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_STATUS
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_STATUS
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_GFX_QUEUE_INDEX
+#define CP_GFX_QUEUE_INDEX__QUEUE_ACCESS__SHIFT 0x0
+#define CP_GFX_QUEUE_INDEX__PIPE_ID__SHIFT 0x4
+#define CP_GFX_QUEUE_INDEX__QUEUE_ID__SHIFT 0x8
+#define CP_GFX_QUEUE_INDEX__QUEUE_ACCESS_MASK 0x00000001L
+#define CP_GFX_QUEUE_INDEX__PIPE_ID_MASK 0x00000030L
+#define CP_GFX_QUEUE_INDEX__QUEUE_ID_MASK 0x00000700L
+//CC_GC_EDC_CONFIG
+#define CC_GC_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_GC_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_PIPE_PRIORITY_CNTS
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME1_PIPE0_PRIORITY
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE1_PRIORITY
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE2_PRIORITY
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE3_PRIORITY
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE_PRIORITY_CNTS
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME2_PIPE0_PRIORITY
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE1_PRIORITY
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE2_PRIORITY
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE3_PRIORITY
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_PFP_PRGRM_CNTR_START
+#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_ME_PRGRM_CNTR_START
+#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MEC1_PRGRM_CNTR_START
+#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x000FFFFFL
+//CP_MEC2_PRGRM_CNTR_START
+#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x000FFFFFL
+//CP_PFP_INTR_ROUTINE_START
+#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_ME_INTR_ROUTINE_START
+#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_MEC1_INTR_ROUTINE_START
+#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x000FFFFFL
+//CP_MEC2_INTR_ROUTINE_START
+#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x000FFFFFL
+//CP_CONTEXT_CNTL
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_GE_CNTX__SHIFT 0x0
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_GE_CNTX__SHIFT 0x10
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_GE_CNTX_MASK 0x00000007L
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_GE_CNTX_MASK 0x00070000L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
+//CP_MAX_CONTEXT
+#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
+#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
+//CP_IQ_WAIT_TIME1
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
+#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
+//CP_IQ_WAIT_TIME2
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
+#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
+#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
+//CP_RB0_BASE_HI
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_RB1_BASE_HI
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_VMID_RESET
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
+#define CP_VMID_RESET__PIPE0_QUEUES__SHIFT 0x10
+#define CP_VMID_RESET__PIPE1_QUEUES__SHIFT 0x18
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_RESET__PIPE0_QUEUES_MASK 0x00FF0000L
+#define CP_VMID_RESET__PIPE1_QUEUES_MASK 0xFF000000L
+//CPC_INT_CNTL
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CPC_INT_STATUS
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_VMID_PREEMPT
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
+#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
+//CPC_INT_CNTX_ID
+#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PQ_STATUS
+#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN__SHIFT 0x2
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MODE__SHIFT 0x3
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN_MASK 0x00000004L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MODE_MASK 0x00000008L
+//CP_PFP_PRGRM_CNTR_START_HI
+#define CP_PFP_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MAX_DRAW_COUNT
+#define CP_MAX_DRAW_COUNT__MAX_DRAW_COUNT__SHIFT 0x0
+#define CP_MAX_DRAW_COUNT__MAX_DRAW_COUNT_MASK 0xFFFFFFFFL
+//CP_MEC1_F32_INT_DIS
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INT_DIS
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_VMID_STATUS
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
+//CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CPC_SUSPEND_CTX_SAVE_CONTROL
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__POLICY_MASK 0x00000018L
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CPC_SUSPEND_CNTL_STACK_OFFSET
+#define CPC_SUSPEND_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CPC_SUSPEND_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CPC_SUSPEND_CNTL_STACK_SIZE
+#define CPC_SUSPEND_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CPC_SUSPEND_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CPC_SUSPEND_WG_STATE_OFFSET
+#define CPC_SUSPEND_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CPC_SUSPEND_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CPC_SUSPEND_CTX_SAVE_SIZE
+#define CPC_SUSPEND_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CPC_SUSPEND_CTX_SAVE_SIZE__SIZE_MASK 0x03FFF000L
+//CPC_OS_PIPES
+#define CPC_OS_PIPES__OS_PIPES__SHIFT 0x0
+#define CPC_OS_PIPES__OS_PIPES_MASK 0x000000FFL
+//CP_SUSPEND_RESUME_REQ
+#define CP_SUSPEND_RESUME_REQ__SUSPEND_REQ__SHIFT 0x0
+#define CP_SUSPEND_RESUME_REQ__RESUME_REQ__SHIFT 0x1
+#define CP_SUSPEND_RESUME_REQ__SUSPEND_REQ_MASK 0x00000001L
+#define CP_SUSPEND_RESUME_REQ__RESUME_REQ_MASK 0x00000002L
+//CP_SUSPEND_CNTL
+#define CP_SUSPEND_CNTL__SUSPEND_MODE__SHIFT 0x0
+#define CP_SUSPEND_CNTL__SUSPEND_ENABLE__SHIFT 0x1
+#define CP_SUSPEND_CNTL__RESUME_LOCK__SHIFT 0x2
+#define CP_SUSPEND_CNTL__ACE_SUSPEND_ACTIVE__SHIFT 0x3
+#define CP_SUSPEND_CNTL__SUSPEND_MODE_MASK 0x00000001L
+#define CP_SUSPEND_CNTL__SUSPEND_ENABLE_MASK 0x00000002L
+#define CP_SUSPEND_CNTL__RESUME_LOCK_MASK 0x00000004L
+#define CP_SUSPEND_CNTL__ACE_SUSPEND_ACTIVE_MASK 0x00000008L
+//CP_IQ_WAIT_TIME3
+#define CP_IQ_WAIT_TIME3__SUSPEND_QUE__SHIFT 0x0
+#define CP_IQ_WAIT_TIME3__SUSPEND_QUE_MASK 0x000000FFL
+//CPC_DDID_BASE_ADDR_LO
+#define CPC_DDID_BASE_ADDR_LO__BASE_ADDR_LO__SHIFT 0x6
+#define CPC_DDID_BASE_ADDR_LO__BASE_ADDR_LO_MASK 0xFFFFFFC0L
+//CP_DDID_BASE_ADDR_LO
+#define CP_DDID_BASE_ADDR_LO__BASE_ADDR_LO__SHIFT 0x6
+#define CP_DDID_BASE_ADDR_LO__BASE_ADDR_LO_MASK 0xFFFFFFC0L
+//CPC_DDID_BASE_ADDR_HI
+#define CPC_DDID_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CPC_DDID_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_DDID_BASE_ADDR_HI
+#define CP_DDID_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_DDID_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CPC_DDID_CNTL
+#define CPC_DDID_CNTL__THRESHOLD__SHIFT 0x0
+#define CPC_DDID_CNTL__SIZE__SHIFT 0x10
+#define CPC_DDID_CNTL__NO_RING_MEMORY__SHIFT 0x13
+#define CPC_DDID_CNTL__POLICY__SHIFT 0x1c
+#define CPC_DDID_CNTL__MODE__SHIFT 0x1e
+#define CPC_DDID_CNTL__ENABLE__SHIFT 0x1f
+#define CPC_DDID_CNTL__THRESHOLD_MASK 0x000000FFL
+#define CPC_DDID_CNTL__SIZE_MASK 0x00010000L
+#define CPC_DDID_CNTL__NO_RING_MEMORY_MASK 0x00080000L
+#define CPC_DDID_CNTL__POLICY_MASK 0x30000000L
+#define CPC_DDID_CNTL__MODE_MASK 0x40000000L
+#define CPC_DDID_CNTL__ENABLE_MASK 0x80000000L
+//CP_DDID_CNTL
+#define CP_DDID_CNTL__THRESHOLD__SHIFT 0x0
+#define CP_DDID_CNTL__SIZE__SHIFT 0x10
+#define CP_DDID_CNTL__NO_RING_MEMORY__SHIFT 0x13
+#define CP_DDID_CNTL__VMID__SHIFT 0x14
+#define CP_DDID_CNTL__VMID_SEL__SHIFT 0x18
+#define CP_DDID_CNTL__POLICY__SHIFT 0x1c
+#define CP_DDID_CNTL__MODE__SHIFT 0x1e
+#define CP_DDID_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DDID_CNTL__THRESHOLD_MASK 0x000000FFL
+#define CP_DDID_CNTL__SIZE_MASK 0x00010000L
+#define CP_DDID_CNTL__NO_RING_MEMORY_MASK 0x00080000L
+#define CP_DDID_CNTL__VMID_MASK 0x00F00000L
+#define CP_DDID_CNTL__VMID_SEL_MASK 0x01000000L
+#define CP_DDID_CNTL__POLICY_MASK 0x30000000L
+#define CP_DDID_CNTL__MODE_MASK 0x40000000L
+#define CP_DDID_CNTL__ENABLE_MASK 0x80000000L
+//CP_GFX_DDID_INFLIGHT_COUNT
+#define CP_GFX_DDID_INFLIGHT_COUNT__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_INFLIGHT_COUNT__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_WPTR
+#define CP_GFX_DDID_WPTR__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_WPTR__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_RPTR
+#define CP_GFX_DDID_RPTR__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_RPTR__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_DELTA_RPT_COUNT
+#define CP_GFX_DDID_DELTA_RPT_COUNT__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_DELTA_RPT_COUNT__COUNT_MASK 0x000000FFL
+//CP_GFX_HPD_STATUS0
+#define CP_GFX_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_GFX_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_GFX_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_GFX_HPD_STATUS0__FORCE_MAPPED_QUEUE__SHIFT 0x10
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_GFX_HPD_STATUS0__SUSPEND_REQ__SHIFT 0x1c
+#define CP_GFX_HPD_STATUS0__ENABLE_OVERIDE_QUEUEID__SHIFT 0x1d
+#define CP_GFX_HPD_STATUS0__OVERIDE_QUEUEID__SHIFT 0x1e
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_GFX_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_GFX_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_GFX_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_GFX_HPD_STATUS0__FORCE_MAPPED_QUEUE_MASK 0x00070000L
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_GFX_HPD_STATUS0__SUSPEND_REQ_MASK 0x10000000L
+#define CP_GFX_HPD_STATUS0__ENABLE_OVERIDE_QUEUEID_MASK 0x20000000L
+#define CP_GFX_HPD_STATUS0__OVERIDE_QUEUEID_MASK 0x40000000L
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+//CP_GFX_HPD_CONTROL0
+#define CP_GFX_HPD_CONTROL0__SUSPEND_ENABLE__SHIFT 0x0
+#define CP_GFX_HPD_CONTROL0__PIPE_HOLDING__SHIFT 0x4
+#define CP_GFX_HPD_CONTROL0__RB_CE_ROQ_CNTL__SHIFT 0x8
+#define CP_GFX_HPD_CONTROL0__SUSPEND_ENABLE_MASK 0x00000001L
+#define CP_GFX_HPD_CONTROL0__PIPE_HOLDING_MASK 0x00000010L
+#define CP_GFX_HPD_CONTROL0__RB_CE_ROQ_CNTL_MASK 0x00000100L
+//CP_GFX_HPD_OSPRE_FENCE_ADDR_LO
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_GFX_HPD_OSPRE_FENCE_ADDR_HI
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_GFX_HPD_OSPRE_FENCE_DATA_LO
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_GFX_HPD_OSPRE_FENCE_DATA_HI
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_GFX_INDEX_MUTEX
+#define CP_GFX_INDEX_MUTEX__REQUEST__SHIFT 0x0
+#define CP_GFX_INDEX_MUTEX__CLIENTID__SHIFT 0x1
+#define CP_GFX_INDEX_MUTEX__REQUEST_MASK 0x00000001L
+#define CP_GFX_INDEX_MUTEX__CLIENTID_MASK 0x0000000EL
+//CP_ME_PRGRM_CNTR_START_HI
+#define CP_ME_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_PFP_INTR_ROUTINE_START_HI
+#define CP_PFP_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START_HI__IR_START_MASK 0x3FFFFFFFL
+//CP_ME_INTR_ROUTINE_START_HI
+#define CP_ME_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START_HI__IR_START_MASK 0x3FFFFFFFL
+//CP_GFX_MQD_BASE_ADDR
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_MQD_BASE_ADDR_HI
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_GFX_MQD_BASE_ADDR_HI__APP_VMID__SHIFT 0x1c
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+#define CP_GFX_MQD_BASE_ADDR_HI__APP_VMID_MASK 0xF0000000L
+//CP_GFX_HQD_ACTIVE
+#define CP_GFX_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_GFX_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_GFX_HQD_VMID
+#define CP_GFX_HQD_VMID__VMID__SHIFT 0x0
+#define CP_GFX_HQD_VMID__VMID_MASK 0x0000000FL
+//CP_GFX_HQD_QUEUE_PRIORITY
+#define CP_GFX_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_GFX_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_GFX_HQD_QUANTUM
+#define CP_GFX_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_GFX_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x3
+#define CP_GFX_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_GFX_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_GFX_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000018L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x0000FF00L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_GFX_HQD_BASE
+#define CP_GFX_HQD_BASE__RB_BASE__SHIFT 0x0
+#define CP_GFX_HQD_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_BASE_HI
+#define CP_GFX_HQD_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_GFX_HQD_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_GFX_HQD_RPTR
+#define CP_GFX_HQD_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_GFX_HQD_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_GFX_HQD_RPTR_ADDR
+#define CP_GFX_HQD_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_GFX_HQD_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_HQD_RPTR_ADDR_HI
+#define CP_GFX_HQD_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_GFX_HQD_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_WPTR_POLL_ADDR_LO
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_RB_WPTR_POLL_ADDR_HI
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_DOORBELL_CONTROL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_GFX_HQD_OFFSET
+#define CP_GFX_HQD_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_GFX_HQD_OFFSET__DISABLE_RB_OFFSET__SHIFT 0x1f
+#define CP_GFX_HQD_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+#define CP_GFX_HQD_OFFSET__DISABLE_RB_OFFSET_MASK 0x80000000L
+//CP_GFX_HQD_CNTL
+#define CP_GFX_HQD_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_GFX_HQD_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_GFX_HQD_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_GFX_HQD_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_GFX_HQD_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_GFX_HQD_CNTL__BUF_SWAP__SHIFT 0x10
+#define CP_GFX_HQD_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_GFX_HQD_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_GFX_HQD_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_HQD_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_GFX_HQD_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_GFX_HQD_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_GFX_HQD_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_GFX_HQD_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_GFX_HQD_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_GFX_HQD_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_GFX_HQD_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_GFX_HQD_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_GFX_HQD_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_GFX_HQD_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_GFX_HQD_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_GFX_HQD_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_GFX_HQD_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_GFX_HQD_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_GFX_HQD_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_GFX_HQD_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_GFX_HQD_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_GFX_HQD_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_GFX_HQD_CSMD_RPTR
+#define CP_GFX_HQD_CSMD_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_GFX_HQD_CSMD_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_GFX_HQD_WPTR
+#define CP_GFX_HQD_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_GFX_HQD_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_WPTR_HI
+#define CP_GFX_HQD_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_GFX_HQD_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_DEQUEUE_REQUEST
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000001L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_GFX_HQD_MAPPED
+#define CP_GFX_HQD_MAPPED__MAPPED__SHIFT 0x0
+#define CP_GFX_HQD_MAPPED__MAPPED_MASK 0x00000001L
+//CP_GFX_HQD_QUE_MGR_CONTROL
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_IDLE_QUEUE_DISCONNECT__SHIFT 0x0
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_CONNECT_HANDSHAKE__SHIFT 0x4
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_FETCHER_DISCONNECT__SHIFT 0x5
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_ACTIVE_EN__SHIFT 0x6
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_ALLOW_DB_UPDATE_EN__SHIFT 0x7
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE__SHIFT 0x8
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_OFFSET_UPDATE__SHIFT 0xb
+#define CP_GFX_HQD_QUE_MGR_CONTROL__PRIORITY_PREEMPT_DISABLE__SHIFT 0xd
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_QUEUE_MGR__SHIFT 0xf
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_IDLE_MESSAGE__SHIFT 0x10
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_SWITCH_MESSAGE_IDLE__SHIFT 0x11
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_SWITCH_MSG_PREEMPT__SHIFT 0x12
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_MAPPED_QUEUE_IDLE_MSG__SHIFT 0x17
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_IDLE_QUEUE_DISCONNECT_MASK 0x00000001L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_CONNECT_HANDSHAKE_MASK 0x00000010L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_FETCHER_DISCONNECT_MASK 0x00000020L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_ACTIVE_EN_MASK 0x00000040L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_ALLOW_DB_UPDATE_EN_MASK 0x00000080L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_MASK 0x00000700L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_OFFSET_UPDATE_MASK 0x00000800L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__PRIORITY_PREEMPT_DISABLE_MASK 0x00002000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_QUEUE_MGR_MASK 0x00008000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_IDLE_MESSAGE_MASK 0x00010000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_SWITCH_MESSAGE_IDLE_MASK 0x00020000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_SWITCH_MSG_PREEMPT_MASK 0x00040000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_MAPPED_QUEUE_IDLE_MSG_MASK 0x00800000L
+//CP_GFX_HQD_IQ_TIMER
+#define CP_GFX_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_GFX_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_GFX_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_GFX_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_GFX_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_GFX_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_GFX_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x1b
+#define CP_GFX_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_GFX_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_GFX_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_GFX_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_GFX_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_GFX_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_GFX_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_GFX_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_GFX_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x08000000L
+#define CP_GFX_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_GFX_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_GFX_HQD_HQ_STATUS0
+#define CP_GFX_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
+#define CP_GFX_HQD_HQ_STATUS0__OS_PREEMPT_STATUS__SHIFT 0x4
+#define CP_GFX_HQD_HQ_STATUS0__PREEMPT_ACK__SHIFT 0x6
+#define CP_GFX_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_GFX_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000001L
+#define CP_GFX_HQD_HQ_STATUS0__OS_PREEMPT_STATUS_MASK 0x00000030L
+#define CP_GFX_HQD_HQ_STATUS0__PREEMPT_ACK_MASK 0x00000040L
+#define CP_GFX_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+//CP_GFX_HQD_HQ_CONTROL0
+#define CP_GFX_HQD_HQ_CONTROL0__COMMAND__SHIFT 0x0
+#define CP_GFX_HQD_HQ_CONTROL0__SPARES__SHIFT 0x4
+#define CP_GFX_HQD_HQ_CONTROL0__COMMAND_MASK 0x0000000FL
+#define CP_GFX_HQD_HQ_CONTROL0__SPARES_MASK 0x000000F0L
+//CP_GFX_MQD_CONTROL
+#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_GFX_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_GFX_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x03000000L
+//CP_HQD_GFX_CONTROL
+#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
+#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
+#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
+#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
+//CP_HQD_GFX_STATUS
+#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
+#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
+//CP_DMA_WATCH0_ADDR_LO
+#define CP_DMA_WATCH0_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH0_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH0_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH0_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH0_ADDR_HI
+#define CP_DMA_WATCH0_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH0_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH0_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH0_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH0_MASK
+#define CP_DMA_WATCH0_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH0_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH0_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH0_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH0_CNTL
+#define CP_DMA_WATCH0_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH0_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH0_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH0_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH0_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH0_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH0_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH0_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH0_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH0_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH0_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH0_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH1_ADDR_LO
+#define CP_DMA_WATCH1_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH1_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH1_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH1_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH1_ADDR_HI
+#define CP_DMA_WATCH1_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH1_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH1_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH1_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH1_MASK
+#define CP_DMA_WATCH1_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH1_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH1_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH1_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH1_CNTL
+#define CP_DMA_WATCH1_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH1_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH1_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH1_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH1_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH1_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH1_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH1_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH1_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH1_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH1_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH1_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH2_ADDR_LO
+#define CP_DMA_WATCH2_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH2_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH2_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH2_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH2_ADDR_HI
+#define CP_DMA_WATCH2_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH2_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH2_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH2_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH2_MASK
+#define CP_DMA_WATCH2_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH2_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH2_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH2_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH2_CNTL
+#define CP_DMA_WATCH2_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH2_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH2_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH2_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH2_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH2_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH2_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH2_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH2_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH2_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH2_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH2_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH3_ADDR_LO
+#define CP_DMA_WATCH3_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH3_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH3_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH3_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH3_ADDR_HI
+#define CP_DMA_WATCH3_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH3_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH3_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH3_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH3_MASK
+#define CP_DMA_WATCH3_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH3_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH3_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH3_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH3_CNTL
+#define CP_DMA_WATCH3_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH3_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH3_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH3_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH3_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH3_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH3_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH3_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH3_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH3_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH3_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH3_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH_STAT_ADDR_LO
+#define CP_DMA_WATCH_STAT_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_WATCH_STAT_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_WATCH_STAT_ADDR_HI
+#define CP_DMA_WATCH_STAT_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH_STAT_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_WATCH_STAT
+#define CP_DMA_WATCH_STAT__VMID__SHIFT 0x0
+#define CP_DMA_WATCH_STAT__QUEUE_ID__SHIFT 0x4
+#define CP_DMA_WATCH_STAT__CLIENT_ID__SHIFT 0x8
+#define CP_DMA_WATCH_STAT__PIPE__SHIFT 0xc
+#define CP_DMA_WATCH_STAT__WATCH_ID__SHIFT 0x10
+#define CP_DMA_WATCH_STAT__RD_WR__SHIFT 0x14
+#define CP_DMA_WATCH_STAT__TRAP_FLAG__SHIFT 0x1f
+#define CP_DMA_WATCH_STAT__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH_STAT__QUEUE_ID_MASK 0x00000070L
+#define CP_DMA_WATCH_STAT__CLIENT_ID_MASK 0x00000700L
+#define CP_DMA_WATCH_STAT__PIPE_MASK 0x00003000L
+#define CP_DMA_WATCH_STAT__WATCH_ID_MASK 0x00030000L
+#define CP_DMA_WATCH_STAT__RD_WR_MASK 0x00100000L
+#define CP_DMA_WATCH_STAT__TRAP_FLAG_MASK 0x80000000L
+//CP_PFP_JT_STAT
+#define CP_PFP_JT_STAT__JT_LOADED__SHIFT 0x0
+#define CP_PFP_JT_STAT__WR_MASK__SHIFT 0x10
+#define CP_PFP_JT_STAT__JT_LOADED_MASK 0x00000003L
+#define CP_PFP_JT_STAT__WR_MASK_MASK 0x00030000L
+//CP_MEC_JT_STAT
+#define CP_MEC_JT_STAT__JT_LOADED__SHIFT 0x0
+#define CP_MEC_JT_STAT__WR_MASK__SHIFT 0x10
+#define CP_MEC_JT_STAT__JT_LOADED_MASK 0x000000FFL
+#define CP_MEC_JT_STAT__WR_MASK_MASK 0x00FF0000L
+//CP_CPC_BUSY_HYSTERESIS
+#define CP_CPC_BUSY_HYSTERESIS__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPC_BUSY_HYSTERESIS__CPC_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_HYSTERESIS__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPC_BUSY_HYSTERESIS__CPC_BUSY_MASK 0x0000FF00L
+//CP_CPF_BUSY_HYSTERESIS1
+#define CP_CPF_BUSY_HYSTERESIS1__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPF_BUSY_HYSTERESIS1__CPF_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_HYSTERESIS1__CORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_HYSTERESIS1__GFX_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_HYSTERESIS1__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPF_BUSY_HYSTERESIS1__CPF_BUSY_MASK 0x0000FF00L
+#define CP_CPF_BUSY_HYSTERESIS1__CORE_BUSY_MASK 0x00FF0000L
+#define CP_CPF_BUSY_HYSTERESIS1__GFX_BUSY_MASK 0xFF000000L
+//CP_CPF_BUSY_HYSTERESIS2
+#define CP_CPF_BUSY_HYSTERESIS2__CMP_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_HYSTERESIS2__CMP_BUSY_MASK 0x000000FFL
+//CP_CPG_BUSY_HYSTERESIS1
+#define CP_CPG_BUSY_HYSTERESIS1__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPG_BUSY_HYSTERESIS1__CP_BUSY__SHIFT 0x8
+#define CP_CPG_BUSY_HYSTERESIS1__DMA_BUSY__SHIFT 0x10
+#define CP_CPG_BUSY_HYSTERESIS1__GFX_BUSY__SHIFT 0x18
+#define CP_CPG_BUSY_HYSTERESIS1__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPG_BUSY_HYSTERESIS1__CP_BUSY_MASK 0x0000FF00L
+#define CP_CPG_BUSY_HYSTERESIS1__DMA_BUSY_MASK 0x00FF0000L
+#define CP_CPG_BUSY_HYSTERESIS1__GFX_BUSY_MASK 0xFF000000L
+//CP_CPG_BUSY_HYSTERESIS2
+#define CP_CPG_BUSY_HYSTERESIS2__CMP_BUSY__SHIFT 0x0
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_0__SHIFT 0x8
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_1__SHIFT 0x10
+#define CP_CPG_BUSY_HYSTERESIS2__CMP_BUSY_MASK 0x000000FFL
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_0_MASK 0x0000FF00L
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_1_MASK 0x00FF0000L
+//CP_RB_DOORBELL_CLEAR
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
+//CP_RB0_ACTIVE
+#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_ACTIVE
+#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB1_ACTIVE
+#define CP_RB1_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB1_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_STATUS
+#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+//CPG_RCIU_CAM_INDEX
+#define CPG_RCIU_CAM_INDEX__INDEX__SHIFT 0x0
+#define CPG_RCIU_CAM_INDEX__INDEX_MASK 0x0000001FL
+//CPG_RCIU_CAM_DATA
+#define CPG_RCIU_CAM_DATA__DATA__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_RCIU_CAM_DATA_PHASE0
+#define CPG_RCIU_CAM_DATA_PHASE0__ADDR__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE0_EN__SHIFT 0x18
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE1_EN__SHIFT 0x19
+#define CPG_RCIU_CAM_DATA_PHASE0__SKIP_WR__SHIFT 0x1f
+#define CPG_RCIU_CAM_DATA_PHASE0__ADDR_MASK 0x0003FFFFL
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE0_EN_MASK 0x01000000L
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE1_EN_MASK 0x02000000L
+#define CPG_RCIU_CAM_DATA_PHASE0__SKIP_WR_MASK 0x80000000L
+//CPG_RCIU_CAM_DATA_PHASE1
+#define CPG_RCIU_CAM_DATA_PHASE1__MASK__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE1__MASK_MASK 0xFFFFFFFFL
+//CPG_RCIU_CAM_DATA_PHASE2
+#define CPG_RCIU_CAM_DATA_PHASE2__VALUE__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE2__VALUE_MASK 0xFFFFFFFFL
+//CP_GPU_TIMESTAMP_OFFSET_LO
+#define CP_GPU_TIMESTAMP_OFFSET_LO__OFFSET_LO__SHIFT 0x0
+#define CP_GPU_TIMESTAMP_OFFSET_LO__OFFSET_LO_MASK 0xFFFFFFFFL
+//CP_GPU_TIMESTAMP_OFFSET_HI
+#define CP_GPU_TIMESTAMP_OFFSET_HI__OFFSET_HI__SHIFT 0x0
+#define CP_GPU_TIMESTAMP_OFFSET_HI__OFFSET_HI_MASK 0xFFFFFFFFL
+//CP_SDMA_DMA_DONE
+#define CP_SDMA_DMA_DONE__SDMA_ID__SHIFT 0x0
+#define CP_SDMA_DMA_DONE__SDMA_ID_MASK 0x0000000FL
+//CP_PFP_SDMA_CS
+#define CP_PFP_SDMA_CS__REQUEST_GRANT__SHIFT 0x0
+#define CP_PFP_SDMA_CS__SDMA_ID__SHIFT 0x4
+#define CP_PFP_SDMA_CS__REQUEST_POSITION__SHIFT 0x8
+#define CP_PFP_SDMA_CS__SDMA_COUNT__SHIFT 0xc
+#define CP_PFP_SDMA_CS__REQUEST_GRANT_MASK 0x00000001L
+#define CP_PFP_SDMA_CS__SDMA_ID_MASK 0x000000F0L
+#define CP_PFP_SDMA_CS__REQUEST_POSITION_MASK 0x00000F00L
+#define CP_PFP_SDMA_CS__SDMA_COUNT_MASK 0x00003000L
+//CP_ME_SDMA_CS
+#define CP_ME_SDMA_CS__REQUEST_GRANT__SHIFT 0x0
+#define CP_ME_SDMA_CS__SDMA_ID__SHIFT 0x4
+#define CP_ME_SDMA_CS__REQUEST_POSITION__SHIFT 0x8
+#define CP_ME_SDMA_CS__SDMA_COUNT__SHIFT 0xc
+#define CP_ME_SDMA_CS__REQUEST_GRANT_MASK 0x00000001L
+#define CP_ME_SDMA_CS__SDMA_ID_MASK 0x000000F0L
+#define CP_ME_SDMA_CS__REQUEST_POSITION_MASK 0x00000F00L
+#define CP_ME_SDMA_CS__SDMA_COUNT_MASK 0x00003000L
+//CPF_GCR_CNTL
+#define CPF_GCR_CNTL__GCR_GL_CMD__SHIFT 0x0
+#define CPF_GCR_CNTL__GCR_GL_CMD_MASK 0x0007FFFFL
+//CPG_UTCL1_STATUS
+#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPC_UTCL1_STATUS
+#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPF_UTCL1_STATUS
+#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CP_SD_CNTL
+#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
+#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
+#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
+#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
+#define CP_SD_CNTL__GE_EN__SHIFT 0x5
+#define CP_SD_CNTL__UTCL1_EN__SHIFT 0x6
+#define CP_SD_CNTL__EA_EN__SHIFT 0x9
+#define CP_SD_CNTL__SDMA_EN__SHIFT 0xa
+#define CP_SD_CNTL__SD_VMIDVEC_OVERRIDE__SHIFT 0x1f
+#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
+#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
+#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
+#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
+#define CP_SD_CNTL__GE_EN_MASK 0x00000020L
+#define CP_SD_CNTL__UTCL1_EN_MASK 0x00000040L
+#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
+#define CP_SD_CNTL__SDMA_EN_MASK 0x00000400L
+#define CP_SD_CNTL__SD_VMIDVEC_OVERRIDE_MASK 0x80000000L
+//CP_SOFT_RESET_CNTL
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
+#define CP_SOFT_RESET_CNTL__GFX_HQD_REG_RESET__SHIFT 0x7
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
+#define CP_SOFT_RESET_CNTL__GFX_HQD_REG_RESET_MASK 0x00000080L
+//CP_CPC_GFX_CNTL
+#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
+#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
+#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
+#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
+#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
+#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
+#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
+#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
+
+
+// addressBlock: gc_spipdec
+//SPI_ARB_PRIORITY
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
+//SPI_ARB_CYCLES_0
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
+//SPI_ARB_CYCLES_1
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
+//SPI_WCL_PIPE_PERCENT_GFX
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_HP3D
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_CS0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS1
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS2
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS3
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS4
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS5
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS6
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS7
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
+//SPI_USER_ACCUM_VMID_CNTL
+#define SPI_USER_ACCUM_VMID_CNTL__EN_USER_ACCUM__SHIFT 0x0
+#define SPI_USER_ACCUM_VMID_CNTL__EN_USER_ACCUM_MASK 0x0000000FL
+//SPI_GDBG_PER_VMID_CNTL
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID__SHIFT 0x0
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE__SHIFT 0x1
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x00000001L
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x00000006L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x00000008L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x00001FF0L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x00002000L
+//SPI_COMPUTE_QUEUE_RESET
+#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
+#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
+//SPI_COMPUTE_WF_CTX_SAVE
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
+
+
+// addressBlock: gc_cpphqddec
+//CP_HPD_UTCL1_CNTL
+#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
+#define CP_HPD_UTCL1_CNTL__DISABLE_ERROR_REPORT__SHIFT 0xa
+#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
+#define CP_HPD_UTCL1_CNTL__DISABLE_ERROR_REPORT_MASK 0x00000400L
+//CP_HPD_UTCL1_ERROR
+#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
+#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
+#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
+#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
+#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
+#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
+//CP_HPD_UTCL1_ERROR_ADDR
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
+//CP_MQD_BASE_ADDR
+#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_MQD_BASE_ADDR_HI
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_ACTIVE
+#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
+#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
+//CP_HQD_VMID
+#define CP_HQD_VMID__VMID__SHIFT 0x0
+#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
+#define CP_HQD_VMID__VQID__SHIFT 0x10
+#define CP_HQD_VMID__VMID_MASK 0x0000000FL
+#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
+#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
+//CP_HQD_PERSISTENT_STATE
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
+#define CP_HQD_PERSISTENT_STATE__TMZ_CONNECT_OVERRIDE__SHIFT 0x1
+#define CP_HQD_PERSISTENT_STATE__SUSPEND_STATUS__SHIFT 0x7
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
+#define CP_HQD_PERSISTENT_STATE__TMZ_SWITCH_EXEMPT__SHIFT 0x12
+#define CP_HQD_PERSISTENT_STATE__TMZ_MATCH_DIS__SHIFT 0x13
+#define CP_HQD_PERSISTENT_STATE__WPP_CLAMP_EN__SHIFT 0x14
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
+#define CP_HQD_PERSISTENT_STATE__TMZ_CONNECT_OVERRIDE_MASK 0x00000002L
+#define CP_HQD_PERSISTENT_STATE__SUSPEND_STATUS_MASK 0x00000080L
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
+#define CP_HQD_PERSISTENT_STATE__TMZ_SWITCH_EXEMPT_MASK 0x00040000L
+#define CP_HQD_PERSISTENT_STATE__TMZ_MATCH_DIS_MASK 0x00080000L
+#define CP_HQD_PERSISTENT_STATE__WPP_CLAMP_EN_MASK 0x00100000L
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
+//CP_HQD_PIPE_PRIORITY
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
+//CP_HQD_QUEUE_PRIORITY
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_HQD_QUANTUM
+#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
+#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
+#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_HQD_PQ_BASE
+#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
+#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_BASE_HI
+#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
+//CP_HQD_PQ_RPTR
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_RPTR_REPORT_ADDR
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_WPTR_POLL_ADDR
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
+//CP_HQD_PQ_WPTR_POLL_ADDR_HI
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_DOORBELL_CONTROL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_HQD_PQ_CONTROL
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x12
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_PQ_CONTROL__TMZ__SHIFT 0x16
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_PQ_CONTROL__PQ_VOLATILE__SHIFT 0x1a
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
+#define CP_HQD_PQ_CONTROL__TUNNEL_DISPATCH__SHIFT 0x1d
+#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x000C0000L
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_PQ_CONTROL__TMZ_MASK 0x00400000L
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK 0x04000000L
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
+#define CP_HQD_PQ_CONTROL__TUNNEL_DISPATCH_MASK 0x20000000L
+#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
+//CP_HQD_IB_BASE_ADDR
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_IB_BASE_ADDR_HI
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_IB_RPTR
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
+//CP_HQD_IB_CONTROL
+#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IB_CONTROL__IB_VOLATILE__SHIFT 0x1a
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
+#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_IB_CONTROL__IB_VOLATILE_MASK 0x04000000L
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
+//CP_HQD_IQ_TIMER
+#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IQ_TIMER__IQ_VOLATILE__SHIFT 0x1a
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x1b
+#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
+#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_IQ_TIMER__IQ_VOLATILE_MASK 0x04000000L
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x08000000L
+#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
+#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_HQD_IQ_RPTR
+#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
+#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
+//CP_HQD_DEQUEUE_REQUEST
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x0000000FL
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_HQD_DMA_OFFLOAD
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_OFFLOAD
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_SEMA_CMD
+#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
+#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
+#define CP_HQD_SEMA_CMD__POLLING_DIS__SHIFT 0x8
+#define CP_HQD_SEMA_CMD__MESSAGE_EN__SHIFT 0x9
+#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
+#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
+#define CP_HQD_SEMA_CMD__POLLING_DIS_MASK 0x00000100L
+#define CP_HQD_SEMA_CMD__MESSAGE_EN_MASK 0x00000200L
+//CP_HQD_MSG_TYPE
+#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
+#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
+#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
+#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
+//CP_HQD_ATOMIC0_PREOP_LO
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC0_PREOP_HI
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_LO
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_HI
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER0
+#define CP_HQD_HQ_SCHEDULER0__CWSR__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER0__SAVE_STATUS__SHIFT 0x1
+#define CP_HQD_HQ_SCHEDULER0__RSRV__SHIFT 0x2
+#define CP_HQD_HQ_SCHEDULER0__STATIC_QUEUE__SHIFT 0x3
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_RUN_ONCE__SHIFT 0x6
+#define CP_HQD_HQ_SCHEDULER0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_SCHEDULER0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_SCHEDULER0__C_INHERIT_VMID__SHIFT 0x9
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SCHEDULER_TYPE__SHIFT 0xa
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_USE_GWS__SHIFT 0xd
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_DEBUG_EN__SHIFT 0xe
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SLOT_CONNECTED__SHIFT 0xf
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_ENABLED__SHIFT 0x14
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_PIPE__SHIFT 0x15
+#define CP_HQD_HQ_SCHEDULER0__CONCURRENT_PROCESS_COUNT__SHIFT 0x18
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_SCHEDULER0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_SCHEDULER0__CWSR_MASK 0x00000001L
+#define CP_HQD_HQ_SCHEDULER0__SAVE_STATUS_MASK 0x00000002L
+#define CP_HQD_HQ_SCHEDULER0__RSRV_MASK 0x00000004L
+#define CP_HQD_HQ_SCHEDULER0__STATIC_QUEUE_MASK 0x00000038L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_RUN_ONCE_MASK 0x00000040L
+#define CP_HQD_HQ_SCHEDULER0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_SCHEDULER0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_SCHEDULER0__C_INHERIT_VMID_MASK 0x00000200L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SCHEDULER_TYPE_MASK 0x00001C00L
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_USE_GWS_MASK 0x00002000L
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_DEBUG_EN_MASK 0x00004000L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SLOT_CONNECTED_MASK 0x00008000L
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_ENABLED_MASK 0x00100000L
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_PIPE_MASK 0x00600000L
+#define CP_HQD_HQ_SCHEDULER0__CONCURRENT_PROCESS_COUNT_MASK 0x0F000000L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_SCHEDULER0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_STATUS0
+#define CP_HQD_HQ_STATUS0__CWSR__SHIFT 0x0
+#define CP_HQD_HQ_STATUS0__SAVE_STATUS__SHIFT 0x1
+#define CP_HQD_HQ_STATUS0__RSRV__SHIFT 0x2
+#define CP_HQD_HQ_STATUS0__STATIC_QUEUE__SHIFT 0x3
+#define CP_HQD_HQ_STATUS0__QUEUE_RUN_ONCE__SHIFT 0x6
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_STATUS0__C_INHERIT_VMID__SHIFT 0x9
+#define CP_HQD_HQ_STATUS0__QUEUE_SCHEDULER_TYPE__SHIFT 0xa
+#define CP_HQD_HQ_STATUS0__C_QUEUE_USE_GWS__SHIFT 0xd
+#define CP_HQD_HQ_STATUS0__C_QUEUE_DEBUG_EN__SHIFT 0xe
+#define CP_HQD_HQ_STATUS0__QUEUE_SLOT_CONNECTED__SHIFT 0xf
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_ENABLED__SHIFT 0x14
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_PIPE__SHIFT 0x15
+#define CP_HQD_HQ_STATUS0__CONCURRENT_PROCESS_COUNT__SHIFT 0x18
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_STATUS0__CWSR_MASK 0x00000001L
+#define CP_HQD_HQ_STATUS0__SAVE_STATUS_MASK 0x00000002L
+#define CP_HQD_HQ_STATUS0__RSRV_MASK 0x00000004L
+#define CP_HQD_HQ_STATUS0__STATIC_QUEUE_MASK 0x00000038L
+#define CP_HQD_HQ_STATUS0__QUEUE_RUN_ONCE_MASK 0x00000040L
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_STATUS0__C_INHERIT_VMID_MASK 0x00000200L
+#define CP_HQD_HQ_STATUS0__QUEUE_SCHEDULER_TYPE_MASK 0x00001C00L
+#define CP_HQD_HQ_STATUS0__C_QUEUE_USE_GWS_MASK 0x00002000L
+#define CP_HQD_HQ_STATUS0__C_QUEUE_DEBUG_EN_MASK 0x00004000L
+#define CP_HQD_HQ_STATUS0__QUEUE_SLOT_CONNECTED_MASK 0x00008000L
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_ENABLED_MASK 0x00100000L
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_PIPE_MASK 0x00600000L
+#define CP_HQD_HQ_STATUS0__CONCURRENT_PROCESS_COUNT_MASK 0x0F000000L
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_CONTROL0
+#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER1
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_MQD_CONTROL
+#define CP_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_MQD_CONTROL__MQD_VOLATILE__SHIFT 0x1a
+#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_MQD_CONTROL__MQD_VOLATILE_MASK 0x04000000L
+//CP_HQD_HQ_STATUS1
+#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_CONTROL1
+#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR_HI
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
+//CP_HQD_EOP_CONTROL
+#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_EOP_CONTROL__EOP_VOLATILE__SHIFT 0x1a
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
+#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_EOP_CONTROL__EOP_VOLATILE_MASK 0x04000000L
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
+//CP_HQD_EOP_RPTR
+#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
+#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
+#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
+#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
+//CP_HQD_EOP_WPTR
+#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
+#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
+#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
+#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
+//CP_HQD_EOP_EVENTS
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_LO
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_HI
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_CTX_SAVE_CONTROL
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000018L
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CP_HQD_CNTL_STACK_OFFSET
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_CNTL_STACK_SIZE
+#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CP_HQD_WG_STATE_OFFSET
+#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CP_HQD_CTX_SAVE_SIZE
+#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x03FFF000L
+//CP_HQD_GDS_RESOURCE_STATE
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
+//CP_HQD_ERROR
+#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
+#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
+#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
+#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
+#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
+#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
+#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
+#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
+#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
+#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
+#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
+#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
+//CP_HQD_EOP_WPTR_MEM
+#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
+//CP_HQD_AQL_CONTROL
+#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
+#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
+#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
+//CP_HQD_PQ_WPTR_LO
+#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_WPTR_HI
+#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
+//CP_HQD_SUSPEND_CNTL_STACK_OFFSET
+#define CP_HQD_SUSPEND_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_SUSPEND_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_SUSPEND_CNTL_STACK_DW_CNT
+#define CP_HQD_SUSPEND_CNTL_STACK_DW_CNT__CNT__SHIFT 0x0
+#define CP_HQD_SUSPEND_CNTL_STACK_DW_CNT__CNT_MASK 0x00003FFFL
+//CP_HQD_SUSPEND_WG_STATE_OFFSET
+#define CP_HQD_SUSPEND_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_SUSPEND_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CP_HQD_DDID_RPTR
+#define CP_HQD_DDID_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_DDID_RPTR__RPTR_MASK 0x000007FFL
+//CP_HQD_DDID_WPTR
+#define CP_HQD_DDID_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_DDID_WPTR__WPTR_MASK 0x000007FFL
+//CP_HQD_DDID_INFLIGHT_COUNT
+#define CP_HQD_DDID_INFLIGHT_COUNT__COUNT__SHIFT 0x0
+#define CP_HQD_DDID_INFLIGHT_COUNT__COUNT_MASK 0x0000FFFFL
+//CP_HQD_DDID_DELTA_RPT_COUNT
+#define CP_HQD_DDID_DELTA_RPT_COUNT__COUNT__SHIFT 0x0
+#define CP_HQD_DDID_DELTA_RPT_COUNT__COUNT_MASK 0x000000FFL
+//CP_HQD_DEQUEUE_STATUS
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT__SHIFT 0x0
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_MASK 0x0000000FL
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_EN_MASK 0x00000400L
+
+
+// addressBlock: gc_tcpdec
+//TCP_WATCH0_ADDR_H
+#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH0_ADDR_L
+#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH0_CNTL
+#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH0_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH1_ADDR_H
+#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH1_ADDR_L
+#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH1_CNTL
+#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH1_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH2_ADDR_H
+#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH2_ADDR_L
+#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH2_CNTL
+#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH2_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH3_ADDR_H
+#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH3_ADDR_L
+#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH3_CNTL
+#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH3_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
+
+
+// addressBlock: gc_gdspdec
+//GDS_VMID0_BASE
+#define GDS_VMID0_BASE__BASE__SHIFT 0x0
+#define GDS_VMID0_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID0_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID0_SIZE
+#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID0_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID0_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID1_BASE
+#define GDS_VMID1_BASE__BASE__SHIFT 0x0
+#define GDS_VMID1_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID1_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID1_SIZE
+#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID1_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID1_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID2_BASE
+#define GDS_VMID2_BASE__BASE__SHIFT 0x0
+#define GDS_VMID2_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID2_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID2_SIZE
+#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID2_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID2_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID3_BASE
+#define GDS_VMID3_BASE__BASE__SHIFT 0x0
+#define GDS_VMID3_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID3_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID3_SIZE
+#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID3_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID3_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID4_BASE
+#define GDS_VMID4_BASE__BASE__SHIFT 0x0
+#define GDS_VMID4_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID4_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID4_SIZE
+#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID4_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID4_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID5_BASE
+#define GDS_VMID5_BASE__BASE__SHIFT 0x0
+#define GDS_VMID5_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID5_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID5_SIZE
+#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID5_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID5_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID6_BASE
+#define GDS_VMID6_BASE__BASE__SHIFT 0x0
+#define GDS_VMID6_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID6_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID6_SIZE
+#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID6_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID6_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID7_BASE
+#define GDS_VMID7_BASE__BASE__SHIFT 0x0
+#define GDS_VMID7_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID7_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID7_SIZE
+#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID7_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID7_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID8_BASE
+#define GDS_VMID8_BASE__BASE__SHIFT 0x0
+#define GDS_VMID8_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID8_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID8_SIZE
+#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID8_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID8_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID9_BASE
+#define GDS_VMID9_BASE__BASE__SHIFT 0x0
+#define GDS_VMID9_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID9_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID9_SIZE
+#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID9_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID9_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID10_BASE
+#define GDS_VMID10_BASE__BASE__SHIFT 0x0
+#define GDS_VMID10_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID10_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID10_SIZE
+#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID10_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID10_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID11_BASE
+#define GDS_VMID11_BASE__BASE__SHIFT 0x0
+#define GDS_VMID11_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID11_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID11_SIZE
+#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID11_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID11_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID12_BASE
+#define GDS_VMID12_BASE__BASE__SHIFT 0x0
+#define GDS_VMID12_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID12_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID12_SIZE
+#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID12_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID12_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID13_BASE
+#define GDS_VMID13_BASE__BASE__SHIFT 0x0
+#define GDS_VMID13_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID13_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID13_SIZE
+#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID13_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID13_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID14_BASE
+#define GDS_VMID14_BASE__BASE__SHIFT 0x0
+#define GDS_VMID14_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID14_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID14_SIZE
+#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID14_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID14_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID15_BASE
+#define GDS_VMID15_BASE__BASE__SHIFT 0x0
+#define GDS_VMID15_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID15_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID15_SIZE
+#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID15_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID15_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_GWS_VMID0
+#define GDS_GWS_VMID0__BASE__SHIFT 0x0
+#define GDS_GWS_VMID0__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID0__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID0__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID0__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID1
+#define GDS_GWS_VMID1__BASE__SHIFT 0x0
+#define GDS_GWS_VMID1__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID1__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID1__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID1__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID2
+#define GDS_GWS_VMID2__BASE__SHIFT 0x0
+#define GDS_GWS_VMID2__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID2__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID2__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID2__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID3
+#define GDS_GWS_VMID3__BASE__SHIFT 0x0
+#define GDS_GWS_VMID3__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID3__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID3__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID3__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID4
+#define GDS_GWS_VMID4__BASE__SHIFT 0x0
+#define GDS_GWS_VMID4__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID4__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID4__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID4__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID5
+#define GDS_GWS_VMID5__BASE__SHIFT 0x0
+#define GDS_GWS_VMID5__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID5__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID5__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID5__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID6
+#define GDS_GWS_VMID6__BASE__SHIFT 0x0
+#define GDS_GWS_VMID6__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID6__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID6__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID6__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID7
+#define GDS_GWS_VMID7__BASE__SHIFT 0x0
+#define GDS_GWS_VMID7__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID7__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID7__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID7__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID8
+#define GDS_GWS_VMID8__BASE__SHIFT 0x0
+#define GDS_GWS_VMID8__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID8__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID8__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID8__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID9
+#define GDS_GWS_VMID9__BASE__SHIFT 0x0
+#define GDS_GWS_VMID9__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID9__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID9__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID9__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID10
+#define GDS_GWS_VMID10__BASE__SHIFT 0x0
+#define GDS_GWS_VMID10__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID10__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID10__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID10__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID11
+#define GDS_GWS_VMID11__BASE__SHIFT 0x0
+#define GDS_GWS_VMID11__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID11__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID11__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID11__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID12
+#define GDS_GWS_VMID12__BASE__SHIFT 0x0
+#define GDS_GWS_VMID12__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID12__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID12__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID12__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID13
+#define GDS_GWS_VMID13__BASE__SHIFT 0x0
+#define GDS_GWS_VMID13__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID13__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID13__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID13__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID14
+#define GDS_GWS_VMID14__BASE__SHIFT 0x0
+#define GDS_GWS_VMID14__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID14__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID14__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID14__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID15
+#define GDS_GWS_VMID15__BASE__SHIFT 0x0
+#define GDS_GWS_VMID15__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID15__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID15__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID15__UNUSED2_MASK 0xFF800000L
+//GDS_OA_VMID0
+#define GDS_OA_VMID0__MASK__SHIFT 0x0
+#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID1
+#define GDS_OA_VMID1__MASK__SHIFT 0x0
+#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID2
+#define GDS_OA_VMID2__MASK__SHIFT 0x0
+#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID3
+#define GDS_OA_VMID3__MASK__SHIFT 0x0
+#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID4
+#define GDS_OA_VMID4__MASK__SHIFT 0x0
+#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID5
+#define GDS_OA_VMID5__MASK__SHIFT 0x0
+#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID6
+#define GDS_OA_VMID6__MASK__SHIFT 0x0
+#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID7
+#define GDS_OA_VMID7__MASK__SHIFT 0x0
+#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID8
+#define GDS_OA_VMID8__MASK__SHIFT 0x0
+#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID9
+#define GDS_OA_VMID9__MASK__SHIFT 0x0
+#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID10
+#define GDS_OA_VMID10__MASK__SHIFT 0x0
+#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID11
+#define GDS_OA_VMID11__MASK__SHIFT 0x0
+#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID12
+#define GDS_OA_VMID12__MASK__SHIFT 0x0
+#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID13
+#define GDS_OA_VMID13__MASK__SHIFT 0x0
+#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID14
+#define GDS_OA_VMID14__MASK__SHIFT 0x0
+#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID15
+#define GDS_OA_VMID15__MASK__SHIFT 0x0
+#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
+//GDS_GWS_RESET0
+#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
+#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
+#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
+#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
+#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
+#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
+#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
+#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
+#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
+#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
+#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
+#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
+#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
+#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
+#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
+#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
+#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
+#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
+#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
+#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
+#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
+#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
+#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
+#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
+#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
+#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
+#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
+//GDS_GWS_RESET1
+#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
+#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
+#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
+#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
+#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
+#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
+#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
+#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
+#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
+#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
+#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
+#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
+#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
+#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
+#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
+#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
+#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
+#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
+#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
+#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
+#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
+#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
+#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
+#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
+#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
+#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
+#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
+//GDS_GWS_RESOURCE_RESET
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
+#define GDS_GWS_RESOURCE_RESET__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
+#define GDS_GWS_RESOURCE_RESET__UNUSED_MASK 0xFFFF0000L
+//GDS_COMPUTE_MAX_WAVE_ID
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define GDS_COMPUTE_MAX_WAVE_ID__UNUSED__SHIFT 0xc
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define GDS_COMPUTE_MAX_WAVE_ID__UNUSED_MASK 0xFFFFF000L
+//GDS_OA_RESET_MASK
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
+#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
+#define GDS_OA_RESET_MASK__ME0_PIPE1_CS_RESET__SHIFT 0xc
+#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xd
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
+#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
+#define GDS_OA_RESET_MASK__ME0_PIPE1_CS_RESET_MASK 0x00001000L
+#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFE000L
+//GDS_OA_RESET
+#define GDS_OA_RESET__RESET__SHIFT 0x0
+#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
+#define GDS_OA_RESET__UNUSED__SHIFT 0x10
+#define GDS_OA_RESET__RESET_MASK 0x00000001L
+#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
+#define GDS_OA_RESET__UNUSED_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_STATUS
+#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_CS_CTXSW_CNT0
+#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT1
+#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT2
+#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT3
+#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GFX_CTXSW_STATUS
+#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_PS_CTXSW_CNT0
+#define GDS_PS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT1
+#define GDS_PS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT2
+#define GDS_PS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT3
+#define GDS_PS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_IDX
+#define GDS_PS_CTXSW_IDX__PACKER_ID__SHIFT 0x0
+#define GDS_PS_CTXSW_IDX__UNUSED__SHIFT 0x6
+#define GDS_PS_CTXSW_IDX__PACKER_ID_MASK 0x0000003FL
+#define GDS_PS_CTXSW_IDX__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GS_CTXSW_CNT0
+#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT1
+#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT2
+#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT3
+#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_MEMORY_CLEAN
+#define GDS_MEMORY_CLEAN__START__SHIFT 0x0
+#define GDS_MEMORY_CLEAN__FINISH__SHIFT 0x1
+#define GDS_MEMORY_CLEAN__UNUSED__SHIFT 0x2
+#define GDS_MEMORY_CLEAN__START_MASK 0x00000001L
+#define GDS_MEMORY_CLEAN__FINISH_MASK 0x00000002L
+#define GDS_MEMORY_CLEAN__UNUSED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_rasdec
+//RAS_SIGNATURE_CONTROL
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+//RAS_SIGNATURE_MASK
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE0
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE1
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE2
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE3
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_DB_SIGNATURE0
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_PA_SIGNATURE0
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE0
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE1
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE2
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE3
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE4
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE5
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE6
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE7
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE0
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE1
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_CB_SIGNATURE0
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE0
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE1
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gusdec
+//GUS_IO_RD_COMBINE_FLUSH
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x18
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+#define GUS_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x03000000L
+//GUS_IO_WR_COMBINE_FLUSH
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x18
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+#define GUS_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x03000000L
+//GUS_IO_RD_PRI_AGE_RATE
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_IO_WR_PRI_AGE_RATE
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_IO_RD_PRI_AGE_COEFF
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_AGE_COEFF
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_QUEUING
+#define GUS_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_QUEUING
+#define GUS_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_FIXED
+#define GUS_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_FIXED
+#define GUS_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_URGENCY_COEFF
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_URGENCY_COEFF
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_URGENCY_MODE
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_IO_WR_PRI_URGENCY_MODE
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_IO_RD_PRI_QUANT_PRI1
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI2
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI3
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI4
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI1
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI2
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI3
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI4
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT1_PRI1
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI2
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI3
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI4
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI1
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI2
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI3
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI4
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_COMBINE_FLUSH
+#define GUS_DRAM_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_DRAM_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_DRAM_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_DRAM_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_DRAM_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_DRAM_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_DRAM_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_DRAM_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+//GUS_DRAM_COMBINE_RD_WR_EN
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP0_TIMER__SHIFT 0x0
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP1_TIMER__SHIFT 0x2
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP2_TIMER__SHIFT 0x4
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP3_TIMER__SHIFT 0x6
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP4_TIMER__SHIFT 0x8
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP5_TIMER__SHIFT 0xa
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP0_TIMER_MASK 0x00000003L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP1_TIMER_MASK 0x0000000CL
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP2_TIMER_MASK 0x00000030L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP3_TIMER_MASK 0x000000C0L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP4_TIMER_MASK 0x00000300L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP5_TIMER_MASK 0x00000C00L
+//GUS_DRAM_PRI_AGE_RATE
+#define GUS_DRAM_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_DRAM_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_DRAM_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_DRAM_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_DRAM_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_DRAM_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_DRAM_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_DRAM_PRI_AGE_COEFF
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_QUEUING
+#define GUS_DRAM_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_FIXED
+#define GUS_DRAM_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_URGENCY_COEFF
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_URGENCY_MODE
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_DRAM_PRI_QUANT_PRI1
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI2
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI3
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI4
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI5
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT1_PRI1
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI2
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI3
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI4
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI5
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_GROUP_BURST
+#define GUS_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GUS_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GUS_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GUS_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GUS_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GUS_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GUS_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GUS_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GUS_DRAM_GROUP_BURST
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_LO__SHIFT 0x0
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_HI__SHIFT 0x8
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_LO_MASK 0x000000FFL
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_HI_MASK 0x0000FF00L
+//GUS_SDP_ARB_FINAL
+#define GUS_SDP_ARB_FINAL__HI_DRAM_BURST_LIMIT__SHIFT 0x0
+#define GUS_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x5
+#define GUS_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GUS_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GUS_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x11
+#define GUS_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x12
+#define GUS_SDP_ARB_FINAL__HI_DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GUS_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x000003E0L
+#define GUS_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GUS_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GUS_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x00020000L
+#define GUS_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x00040000L
+//GUS_SDP_QOS_VC_PRIORITY
+#define GUS_SDP_QOS_VC_PRIORITY__VC2_IORD__SHIFT 0x0
+#define GUS_SDP_QOS_VC_PRIORITY__VC3_IOWR__SHIFT 0x4
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_DRAM__SHIFT 0x8
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_HI_DRAM__SHIFT 0xc
+#define GUS_SDP_QOS_VC_PRIORITY__VC2_IORD_MASK 0x0000000FL
+#define GUS_SDP_QOS_VC_PRIORITY__VC3_IOWR_MASK 0x000000F0L
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_DRAM_MASK 0x00000F00L
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_HI_DRAM_MASK 0x0000F000L
+//GUS_SDP_CREDITS
+#define GUS_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GUS_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GUS_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GUS_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GUS_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GUS_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//GUS_SDP_TAG_RESERVE0
+#define GUS_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GUS_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GUS_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GUS_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GUS_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GUS_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GUS_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GUS_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GUS_SDP_TAG_RESERVE1
+#define GUS_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GUS_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GUS_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GUS_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GUS_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GUS_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GUS_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GUS_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GUS_SDP_VCC_RESERVE0
+#define GUS_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GUS_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GUS_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GUS_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GUS_SDP_VCC_RESERVE1
+#define GUS_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GUS_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GUS_SDP_VCD_RESERVE0
+#define GUS_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GUS_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GUS_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GUS_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GUS_SDP_VCD_RESERVE1
+#define GUS_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GUS_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GUS_SDP_REQ_CNTL
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GUS_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GUS_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GUS_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GUS_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
+//GUS_MISC
+#define GUS_MISC__RELATIVE_PRI_IN_DRAM_ARB__SHIFT 0x0
+#define GUS_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x1
+#define GUS_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x2
+#define GUS_MISC__EARLY_SDP_ORIGDATA__SHIFT 0x3
+#define GUS_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0x4
+#define GUS_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x6
+#define GUS_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x8
+#define GUS_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0xa
+#define GUS_MISC__SEND0_IOWR_ONLY__SHIFT 0xf
+#define GUS_MISC__RELATIVE_PRI_IN_DRAM_ARB_MASK 0x00000001L
+#define GUS_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000002L
+#define GUS_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000004L
+#define GUS_MISC__EARLY_SDP_ORIGDATA_MASK 0x00000008L
+#define GUS_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00000030L
+#define GUS_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x000000C0L
+#define GUS_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00000300L
+#define GUS_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x00007C00L
+#define GUS_MISC__SEND0_IOWR_ONLY_MASK 0x00008000L
+//GUS_LATENCY_SAMPLING
+#define GUS_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GUS_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GUS_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x2
+#define GUS_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x3
+#define GUS_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x4
+#define GUS_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x5
+#define GUS_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x6
+#define GUS_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x7
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0x8
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0x9
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xa
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xb
+#define GUS_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xc
+#define GUS_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x14
+#define GUS_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000004L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000008L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000010L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000020L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000040L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000080L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000100L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000200L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00000400L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00000800L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x000FF000L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x0FF00000L
+//GUS_ERR_STATUS
+#define GUS_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GUS_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GUS_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GUS_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GUS_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GUS_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GUS_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GUS_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GUS_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GUS_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GUS_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GUS_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GUS_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GUS_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//GUS_MISC2
+#define GUS_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0x0
+#define GUS_MISC2__CH_L1_RO_MASK__SHIFT 0x1
+#define GUS_MISC2__SA0_L1_RO_MASK__SHIFT 0x2
+#define GUS_MISC2__SA1_L1_RO_MASK__SHIFT 0x3
+#define GUS_MISC2__SA2_L1_RO_MASK__SHIFT 0x4
+#define GUS_MISC2__SA3_L1_RO_MASK__SHIFT 0x5
+#define GUS_MISC2__CH_L1_PERF_MASK__SHIFT 0x6
+#define GUS_MISC2__SA0_L1_PERF_MASK__SHIFT 0x7
+#define GUS_MISC2__SA1_L1_PERF_MASK__SHIFT 0x8
+#define GUS_MISC2__SA2_L1_PERF_MASK__SHIFT 0x9
+#define GUS_MISC2__SA3_L1_PERF_MASK__SHIFT 0xa
+#define GUS_MISC2__FP_ATOMICS_ENABLE__SHIFT 0xb
+#define GUS_MISC2__L1_RET_CLKEN__SHIFT 0xc
+#define GUS_MISC2__FGCLKEN_HIGH__SHIFT 0xd
+#define GUS_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define GUS_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define GUS_MISC2__RIO_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x10
+#define GUS_MISC2__WIO_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x11
+#define GUS_MISC2__DRAM_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x12
+#define GUS_MISC2__RDRET_FED_MASK__SHIFT 0x13
+#define GUS_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00000001L
+#define GUS_MISC2__CH_L1_RO_MASK_MASK 0x00000002L
+#define GUS_MISC2__SA0_L1_RO_MASK_MASK 0x00000004L
+#define GUS_MISC2__SA1_L1_RO_MASK_MASK 0x00000008L
+#define GUS_MISC2__SA2_L1_RO_MASK_MASK 0x00000010L
+#define GUS_MISC2__SA3_L1_RO_MASK_MASK 0x00000020L
+#define GUS_MISC2__CH_L1_PERF_MASK_MASK 0x00000040L
+#define GUS_MISC2__SA0_L1_PERF_MASK_MASK 0x00000080L
+#define GUS_MISC2__SA1_L1_PERF_MASK_MASK 0x00000100L
+#define GUS_MISC2__SA2_L1_PERF_MASK_MASK 0x00000200L
+#define GUS_MISC2__SA3_L1_PERF_MASK_MASK 0x00000400L
+#define GUS_MISC2__FP_ATOMICS_ENABLE_MASK 0x00000800L
+#define GUS_MISC2__L1_RET_CLKEN_MASK 0x00001000L
+#define GUS_MISC2__FGCLKEN_HIGH_MASK 0x00002000L
+#define GUS_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define GUS_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define GUS_MISC2__RIO_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00010000L
+#define GUS_MISC2__WIO_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00020000L
+#define GUS_MISC2__DRAM_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00040000L
+#define GUS_MISC2__RDRET_FED_MASK_MASK 0x00080000L
+//GUS_SDP_BACKDOOR_CMDCREDITS0
+#define GUS_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
+//GUS_SDP_BACKDOOR_CMDCREDITS1
+#define GUS_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
+//GUS_SDP_BACKDOOR_DATACREDITS0
+#define GUS_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
+//GUS_SDP_BACKDOOR_DATACREDITS1
+#define GUS_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
+//GUS_SDP_BACKDOOR_MISCCREDITS
+#define GUS_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
+#define GUS_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
+#define GUS_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
+//GUS_SDP_ENABLE
+#define GUS_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GUS_SDP_ENABLE__ENABLE_MASK 0x00000001L
+//GUS_L1_CH0_CMD_IN
+#define GUS_L1_CH0_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_CMD_OUT
+#define GUS_L1_CH0_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_IN
+#define GUS_L1_CH0_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_OUT
+#define GUS_L1_CH0_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_U_IN
+#define GUS_L1_CH0_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_U_OUT
+#define GUS_L1_CH0_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_CMD_IN
+#define GUS_L1_CH1_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_CMD_OUT
+#define GUS_L1_CH1_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_IN
+#define GUS_L1_CH1_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_OUT
+#define GUS_L1_CH1_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_U_IN
+#define GUS_L1_CH1_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_U_OUT
+#define GUS_L1_CH1_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_CMD_IN
+#define GUS_L1_SA0_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_CMD_OUT
+#define GUS_L1_SA0_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_IN
+#define GUS_L1_SA0_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_OUT
+#define GUS_L1_SA0_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_U_IN
+#define GUS_L1_SA0_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_U_OUT
+#define GUS_L1_SA0_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_CMD_IN
+#define GUS_L1_SA1_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_CMD_OUT
+#define GUS_L1_SA1_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_IN
+#define GUS_L1_SA1_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_OUT
+#define GUS_L1_SA1_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_U_IN
+#define GUS_L1_SA1_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_U_OUT
+#define GUS_L1_SA1_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_CMD_IN
+#define GUS_L1_SA2_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_CMD_OUT
+#define GUS_L1_SA2_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_IN
+#define GUS_L1_SA2_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_OUT
+#define GUS_L1_SA2_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_U_IN
+#define GUS_L1_SA2_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_U_OUT
+#define GUS_L1_SA2_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_CMD_IN
+#define GUS_L1_SA3_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_CMD_OUT
+#define GUS_L1_SA3_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_IN
+#define GUS_L1_SA3_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_OUT
+#define GUS_L1_SA3_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_U_IN
+#define GUS_L1_SA3_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_U_OUT
+#define GUS_L1_SA3_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_MISC3
+#define GUS_MISC3__FP_ATOMICS_LOG__SHIFT 0x0
+#define GUS_MISC3__CLEAR_LOG__SHIFT 0x1
+#define GUS_MISC3__FP_ATOMICS_LOG_MASK 0x00000001L
+#define GUS_MISC3__CLEAR_LOG_MASK 0x00000002L
+//GUS_WRRSP_FIFO_CNTL
+#define GUS_WRRSP_FIFO_CNTL__THRESHOLD__SHIFT 0x0
+#define GUS_WRRSP_FIFO_CNTL__THRESHOLD_MASK 0x0000003FL
+
+
+// addressBlock: gc_gfxdec0
+//DB_RENDER_CONTROL
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
+#define DB_RENDER_CONTROL__PS_INVOKE_DISABLE__SHIFT 0xe
+#define DB_RENDER_CONTROL__OREO_MODE__SHIFT 0x10
+#define DB_RENDER_CONTROL__FORCE_OREO_MODE__SHIFT 0x12
+#define DB_RENDER_CONTROL__FORCE_EXPORT_ORDER__SHIFT 0x13
+#define DB_RENDER_CONTROL__MAX_ALLOWED_TILES_IN_WAVE__SHIFT 0x14
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
+#define DB_RENDER_CONTROL__PS_INVOKE_DISABLE_MASK 0x00004000L
+#define DB_RENDER_CONTROL__OREO_MODE_MASK 0x00030000L
+#define DB_RENDER_CONTROL__FORCE_OREO_MODE_MASK 0x00040000L
+#define DB_RENDER_CONTROL__FORCE_EXPORT_ORDER_MASK 0x00080000L
+#define DB_RENDER_CONTROL__MAX_ALLOWED_TILES_IN_WAVE_MASK 0x00F00000L
+//DB_COUNT_CONTROL
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
+#define DB_COUNT_CONTROL__DISABLE_CONSERVATIVE_ZPASS_COUNTS__SHIFT 0x2
+#define DB_COUNT_CONTROL__ENHANCED_CONSERVATIVE_ZPASS_COUNTS__SHIFT 0x3
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__DISABLE_CONSERVATIVE_ZPASS_COUNTS_MASK 0x00000004L
+#define DB_COUNT_CONTROL__ENHANCED_CONSERVATIVE_ZPASS_COUNTS_MASK 0x00000008L
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
+//DB_DEPTH_VIEW
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
+#define DB_DEPTH_VIEW__SLICE_START_HI__SHIFT 0xb
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
+#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
+#define DB_DEPTH_VIEW__SLICE_MAX_HI__SHIFT 0x1e
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
+#define DB_DEPTH_VIEW__SLICE_START_HI_MASK 0x00001800L
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
+#define DB_DEPTH_VIEW__SLICE_MAX_HI_MASK 0xC0000000L
+//DB_RENDER_OVERRIDE
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+//DB_RENDER_OVERRIDE2
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
+#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE2__DISABLE_NOZ__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
+#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L
+#define DB_RENDER_OVERRIDE2__DISABLE_NOZ_MASK 0x20000000L
+//DB_HTILE_DATA_BASE
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_DEPTH_SIZE_XY
+#define DB_DEPTH_SIZE_XY__X_MAX__SHIFT 0x0
+#define DB_DEPTH_SIZE_XY__Y_MAX__SHIFT 0x10
+#define DB_DEPTH_SIZE_XY__X_MAX_MASK 0x00003FFFL
+#define DB_DEPTH_SIZE_XY__Y_MAX_MASK 0x3FFF0000L
+//DB_DEPTH_BOUNDS_MIN
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
+//DB_DEPTH_BOUNDS_MAX
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
+//DB_STENCIL_CLEAR
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
+//DB_DEPTH_CLEAR
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
+//PA_SC_SCREEN_SCISSOR_TL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_SCISSOR_BR
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
+//DB_RESERVED_REG_2
+#define DB_RESERVED_REG_2__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_2__FIELD_2__SHIFT 0x4
+#define DB_RESERVED_REG_2__FIELD_3__SHIFT 0x8
+#define DB_RESERVED_REG_2__FIELD_4__SHIFT 0xd
+#define DB_RESERVED_REG_2__FIELD_5__SHIFT 0xf
+#define DB_RESERVED_REG_2__FIELD_6__SHIFT 0x11
+#define DB_RESERVED_REG_2__FIELD_7__SHIFT 0x13
+#define DB_RESERVED_REG_2__FIELD_8__SHIFT 0x1c
+#define DB_RESERVED_REG_2__FIELD_1_MASK 0x0000000FL
+#define DB_RESERVED_REG_2__FIELD_2_MASK 0x000000F0L
+#define DB_RESERVED_REG_2__FIELD_3_MASK 0x00001F00L
+#define DB_RESERVED_REG_2__FIELD_4_MASK 0x00006000L
+#define DB_RESERVED_REG_2__FIELD_5_MASK 0x00018000L
+#define DB_RESERVED_REG_2__FIELD_6_MASK 0x00060000L
+#define DB_RESERVED_REG_2__FIELD_7_MASK 0x00180000L
+#define DB_RESERVED_REG_2__FIELD_8_MASK 0xF0000000L
+//DB_Z_INFO
+#define DB_Z_INFO__FORMAT__SHIFT 0x0
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
+#define DB_Z_INFO__SW_MODE__SHIFT 0x4
+#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0x9
+#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xb
+#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_Z_INFO__RESERVED_FIELD_1__SHIFT 0xd
+#define DB_Z_INFO__MAXMIP__SHIFT 0x10
+#define DB_Z_INFO__ITERATE_256__SHIFT 0x14
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
+#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00000600L
+#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00000800L
+#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_Z_INFO__RESERVED_FIELD_1_MASK 0x0000E000L
+#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
+#define DB_Z_INFO__ITERATE_256_MASK 0x00100000L
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+//DB_STENCIL_INFO
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
+#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0x9
+#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xb
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_STENCIL_INFO__RESERVED_FIELD_1__SHIFT 0xd
+#define DB_STENCIL_INFO__ITERATE_256__SHIFT 0x14
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00000600L
+#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00000800L
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_STENCIL_INFO__RESERVED_FIELD_1_MASK 0x0000E000L
+#define DB_STENCIL_INFO__ITERATE_256_MASK 0x00100000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+//DB_Z_READ_BASE
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_READ_BASE
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_WRITE_BASE
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_WRITE_BASE
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_RESERVED_REG_1
+#define DB_RESERVED_REG_1__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_1__FIELD_2__SHIFT 0xb
+#define DB_RESERVED_REG_1__FIELD_1_MASK 0x000007FFL
+#define DB_RESERVED_REG_1__FIELD_2_MASK 0x003FF800L
+//DB_RESERVED_REG_3
+#define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL
+//DB_Z_READ_BASE_HI
+#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_READ_BASE_HI
+#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_Z_WRITE_BASE_HI
+#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_WRITE_BASE_HI
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_HTILE_DATA_BASE_HI
+#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_RMI_L2_CACHE_CONTROL
+#define DB_RMI_L2_CACHE_CONTROL__Z_WR_POLICY__SHIFT 0x0
+#define DB_RMI_L2_CACHE_CONTROL__S_WR_POLICY__SHIFT 0x2
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_WR_POLICY__SHIFT 0x4
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_WR_POLICY__SHIFT 0x6
+#define DB_RMI_L2_CACHE_CONTROL__Z_RD_POLICY__SHIFT 0x10
+#define DB_RMI_L2_CACHE_CONTROL__S_RD_POLICY__SHIFT 0x12
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_RD_POLICY__SHIFT 0x14
+#define DB_RMI_L2_CACHE_CONTROL__Z_BIG_PAGE__SHIFT 0x18
+#define DB_RMI_L2_CACHE_CONTROL__S_BIG_PAGE__SHIFT 0x19
+#define DB_RMI_L2_CACHE_CONTROL__Z_NOALLOC__SHIFT 0x1a
+#define DB_RMI_L2_CACHE_CONTROL__S_NOALLOC__SHIFT 0x1b
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_NOALLOC__SHIFT 0x1c
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_NOALLOC__SHIFT 0x1d
+#define DB_RMI_L2_CACHE_CONTROL__Z_WR_POLICY_MASK 0x00000003L
+#define DB_RMI_L2_CACHE_CONTROL__S_WR_POLICY_MASK 0x0000000CL
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_WR_POLICY_MASK 0x00000030L
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_WR_POLICY_MASK 0x000000C0L
+#define DB_RMI_L2_CACHE_CONTROL__Z_RD_POLICY_MASK 0x00030000L
+#define DB_RMI_L2_CACHE_CONTROL__S_RD_POLICY_MASK 0x000C0000L
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_RD_POLICY_MASK 0x00300000L
+#define DB_RMI_L2_CACHE_CONTROL__Z_BIG_PAGE_MASK 0x01000000L
+#define DB_RMI_L2_CACHE_CONTROL__S_BIG_PAGE_MASK 0x02000000L
+#define DB_RMI_L2_CACHE_CONTROL__Z_NOALLOC_MASK 0x04000000L
+#define DB_RMI_L2_CACHE_CONTROL__S_NOALLOC_MASK 0x08000000L
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_NOALLOC_MASK 0x10000000L
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_NOALLOC_MASK 0x20000000L
+//TA_BC_BASE_ADDR
+#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_BC_BASE_ADDR_HI
+#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_1
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_2
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_3
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_2
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_3
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_WINDOW_OFFSET
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
+//PA_SC_WINDOW_SCISSOR_TL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_WINDOW_SCISSOR_BR
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_RULE
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
+//PA_SC_CLIPRECT_0_TL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_0_BR
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_TL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_BR
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_TL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_BR
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_TL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_BR
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_EDGERULE
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
+//PA_SU_HARDWARE_SCREEN_OFFSET
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
+//CB_TARGET_MASK
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
+//CB_SHADER_MASK
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
+//PA_SC_GENERIC_SCISSOR_TL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_GENERIC_SCISSOR_BR
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//COHER_DEST_BASE_0
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_1
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_SCISSOR_0_TL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_0_BR
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_1_TL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_1_BR
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_2_TL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_2_BR
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_3_TL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_3_BR
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_4_TL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_4_BR
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_5_TL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_5_BR
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_6_TL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_6_BR
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_7_TL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_7_BR
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_8_TL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_8_BR
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_9_TL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_9_BR
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_10_TL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_10_BR
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_11_TL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_11_BR
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_12_TL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_12_BR
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_13_TL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_13_BR
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_14_TL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_14_BR
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_15_TL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_15_BR
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_ZMIN_0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_1
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_1
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_2
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_2
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_3
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_3
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_4
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_4
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_5
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_5
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_6
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_6
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_7
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_7
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_8
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_8
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_9
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_9
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_10
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_10
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_11
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_11
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_12
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_12
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_13
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_13
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_14
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_14
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_15
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_15
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_RASTER_CONFIG
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1c
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x0C000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0x30000000L
+//PA_SC_RASTER_CONFIG_1
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x00000030L
+//PA_SC_SCREEN_EXTENT_CONTROL
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
+//PA_SC_TILE_STEERING_OVERRIDE
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT 0xc
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT 0x10
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT 0x14
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK 0x00003000L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK 0x00030000L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK 0x00300000L
+//CP_PERFMON_CNTX_CNTL
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+//CP_PIPEID
+#define CP_PIPEID__PIPE_ID__SHIFT 0x0
+#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
+//CP_RINGID
+#define CP_RINGID__RINGID__SHIFT 0x0
+#define CP_RINGID__RINGID_MASK 0x00000003L
+//CP_VMID
+#define CP_VMID__VMID__SHIFT 0x0
+#define CP_VMID__VMID_MASK 0x0000000FL
+//CONTEXT_RESERVED_REG0
+#define CONTEXT_RESERVED_REG0__DATA__SHIFT 0x0
+#define CONTEXT_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//CONTEXT_RESERVED_REG1
+#define CONTEXT_RESERVED_REG1__DATA__SHIFT 0x0
+#define CONTEXT_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//PA_SC_VRS_OVERRIDE_CNTL
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT 0x0
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_RATE__SHIFT 0x4
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_SURFACE_ENABLE__SHIFT 0xc
+#define PA_SC_VRS_OVERRIDE_CNTL__RATE_HINT_WRITE_BACK_ENABLE__SHIFT 0xd
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE__SHIFT 0xe
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK 0x00000007L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_RATE_MASK 0x000000F0L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_SURFACE_ENABLE_MASK 0x00001000L
+#define PA_SC_VRS_OVERRIDE_CNTL__RATE_HINT_WRITE_BACK_ENABLE_MASK 0x00002000L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE_MASK 0x00004000L
+//PA_SC_VRS_RATE_FEEDBACK_BASE
+#define PA_SC_VRS_RATE_FEEDBACK_BASE__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VRS_RATE_FEEDBACK_BASE_EXT
+#define PA_SC_VRS_RATE_FEEDBACK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//PA_SC_VRS_RATE_FEEDBACK_SIZE_XY
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__X_MAX__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__Y_MAX__SHIFT 0x10
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__X_MAX_MASK 0x000007FFL
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__Y_MAX_MASK 0x07FF0000L
+//PA_SC_VRS_RATE_CACHE_CNTL
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_RD__SHIFT 0x0
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_WR__SHIFT 0x1
+#define PA_SC_VRS_RATE_CACHE_CNTL__L1_RD_POLICY__SHIFT 0x2
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_RD_POLICY__SHIFT 0x4
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_WR_POLICY__SHIFT 0x6
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_RD_NOALLOC__SHIFT 0x8
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_WR_NOALLOC__SHIFT 0x9
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_RD__SHIFT 0xa
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_WR__SHIFT 0xb
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_RD__SHIFT 0xc
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_WR__SHIFT 0xd
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_RD_MASK 0x00000001L
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_WR_MASK 0x00000002L
+#define PA_SC_VRS_RATE_CACHE_CNTL__L1_RD_POLICY_MASK 0x0000000CL
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_RD_POLICY_MASK 0x00000030L
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_WR_POLICY_MASK 0x000000C0L
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_RD_NOALLOC_MASK 0x00000100L
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_WR_NOALLOC_MASK 0x00000200L
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_RD_MASK 0x00000400L
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_WR_MASK 0x00000800L
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_RD_MASK 0x00001000L
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_WR_MASK 0x00002000L
+//PA_SC_VRS_RATE_BASE
+#define PA_SC_VRS_RATE_BASE__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VRS_RATE_BASE_EXT
+#define PA_SC_VRS_RATE_BASE_EXT__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_BASE_EXT__TB_SYNC_SIM_ID__SHIFT 0x1c
+#define PA_SC_VRS_RATE_BASE_EXT__BASE_256B_MASK 0x000000FFL
+#define PA_SC_VRS_RATE_BASE_EXT__TB_SYNC_SIM_ID_MASK 0xF0000000L
+//PA_SC_VRS_RATE_SIZE_XY
+#define PA_SC_VRS_RATE_SIZE_XY__X_MAX__SHIFT 0x0
+#define PA_SC_VRS_RATE_SIZE_XY__Y_MAX__SHIFT 0x10
+#define PA_SC_VRS_RATE_SIZE_XY__X_MAX_MASK 0x000007FFL
+#define PA_SC_VRS_RATE_SIZE_XY__Y_MAX_MASK 0x07FF0000L
+//VGT_MULTI_PRIM_IB_RESET_INDX
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
+//CB_RMI_GL2_CACHE_CONTROL
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_WR_POLICY__SHIFT 0x0
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_WR_POLICY__SHIFT 0x2
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_RD_POLICY__SHIFT 0x14
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_RD_POLICY__SHIFT 0x16
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_L3_BYPASS__SHIFT 0x1a
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_L3_BYPASS__SHIFT 0x1b
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_BIG_PAGE__SHIFT 0x1f
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_WR_POLICY_MASK 0x00000003L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_WR_POLICY_MASK 0x0000000CL
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_RD_POLICY_MASK 0x00300000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_RD_POLICY_MASK 0x00C00000L
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_L3_BYPASS_MASK 0x04000000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_L3_BYPASS_MASK 0x08000000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_BIG_PAGE_MASK 0x80000000L
+//CB_BLEND_RED
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
+#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
+//CB_BLEND_GREEN
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
+//CB_BLEND_BLUE
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
+//CB_BLEND_ALPHA
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
+//CB_FDCC_CONTROL
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_WATERMARK__SHIFT 0x2
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01__SHIFT 0x8
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE__SHIFT 0x9
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0xa
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01__SHIFT 0xc
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE__SHIFT 0xd
+#define CB_FDCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG__SHIFT 0xe
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_WATERMARK_MASK 0x0000007CL
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01_MASK 0x00000100L
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE_MASK 0x00000200L
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00000400L
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01_MASK 0x00001000L
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE_MASK 0x00002000L
+#define CB_FDCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG_MASK 0x00004000L
+//CB_COVERAGE_OUT_CONTROL
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_ENABLE__SHIFT 0x0
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_MRT__SHIFT 0x1
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_CHANNEL__SHIFT 0x4
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_SAMPLES__SHIFT 0x8
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_ENABLE_MASK 0x00000001L
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_MRT_MASK 0x0000000EL
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_CHANNEL_MASK 0x00000030L
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_SAMPLES_MASK 0x00000F00L
+//DB_STENCIL_CONTROL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
+//DB_STENCILREFMASK
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
+//DB_STENCILREFMASK_BF
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
+//PA_CL_VPORT_XSCALE
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_1
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_1
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_1
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_1
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_1
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_1
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_2
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_2
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_2
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_2
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_2
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_2
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_3
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_3
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_3
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_3
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_3
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_3
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_4
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_4
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_4
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_4
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_4
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_4
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_5
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_5
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_5
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_5
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_5
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_5
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_6
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_6
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_6
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_6
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_6
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_6
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_7
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_7
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_7
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_7
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_7
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_7
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_8
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_8
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_8
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_8
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_8
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_8
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_9
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_9
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_9
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_9
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_9
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_9
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_10
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_10
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_10
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_10
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_10
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_10
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_11
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_11
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_11
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_11
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_11
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_11
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_12
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_12
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_12
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_12
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_12
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_12
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_13
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_13
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_13
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_13
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_13
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_13
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_14
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_14
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_14
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_14
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_14
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_14
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_15
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_15
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_15
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_15
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_15
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_15
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_X
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Y
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Z
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_W
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_X
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Y
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Z
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_W
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_X
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Y
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Z
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_W
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_X
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Y
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Z
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_W
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_X
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Y
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Z
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_W
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_X
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Y
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Z
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_W
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_PROG_NEAR_CLIP_Z
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_RATE_CNTL
+#define PA_RATE_CNTL__VERTEX_RATE__SHIFT 0x0
+#define PA_RATE_CNTL__PRIM_RATE__SHIFT 0x4
+#define PA_RATE_CNTL__VERTEX_RATE_MASK 0x0000000FL
+#define PA_RATE_CNTL__PRIM_RATE_MASK 0x000000F0L
+//SPI_PS_INPUT_CNTL_0
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_0__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_0__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_0__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_1
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_1__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_1__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_1__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_2
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_2__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_2__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_2__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_3
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_3__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_3__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_3__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_4
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_4__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_4__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_4__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_5
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_5__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_5__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_5__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_6
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_6__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_6__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_6__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_7
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_7__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_7__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_7__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_8
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_8__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_8__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_8__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_9
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_9__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_9__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_9__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_10
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_10__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_10__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_10__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_11
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_11__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_11__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_11__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_12
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_12__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_12__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_12__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_13
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_13__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_13__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_13__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_14
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_14__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_14__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_14__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_15
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_15__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_15__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_15__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_16
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_16__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_16__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_16__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_17
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_17__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_17__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_17__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_18
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_18__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_18__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_18__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_19
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_19__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_19__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_19__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_20
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_20__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_20__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_20__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_21
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_21__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_21__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_21__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_22
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_22__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_22__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_22__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_23
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_23__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_23__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_23__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_24
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_24__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_24__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_24__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_25
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_25__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_25__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_25__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_26
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_26__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_26__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_26__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_27
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_27__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_27__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_27__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_28
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_28__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_28__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_28__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_29
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_29__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_29__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_29__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_30
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_30__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_30__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_30__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_31
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_31__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_31__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_31__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
+//SPI_VS_OUT_CONFIG
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
+#define SPI_VS_OUT_CONFIG__NO_PC_EXPORT__SHIFT 0x7
+#define SPI_VS_OUT_CONFIG__PRIM_EXPORT_COUNT__SHIFT 0x8
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
+#define SPI_VS_OUT_CONFIG__NO_PC_EXPORT_MASK 0x00000080L
+#define SPI_VS_OUT_CONFIG__PRIM_EXPORT_COUNT_MASK 0x00001F00L
+//SPI_PS_INPUT_ENA
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_PS_INPUT_ADDR
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_INTERP_CONTROL_0
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+//SPI_PS_IN_CONTROL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
+#define SPI_PS_IN_CONTROL__NUM_PRIM_INTERP__SHIFT 0x9
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
+#define SPI_PS_IN_CONTROL__PS_W32_EN__SHIFT 0xf
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
+#define SPI_PS_IN_CONTROL__NUM_PRIM_INTERP_MASK 0x00003E00L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+#define SPI_PS_IN_CONTROL__PS_W32_EN_MASK 0x00008000L
+//SPI_BARYC_CNTL
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+//SPI_TMPRING_SIZE
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x07FFF000L
+//SPI_GFX_SCRATCH_BASE_LO
+#define SPI_GFX_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define SPI_GFX_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//SPI_GFX_SCRATCH_BASE_HI
+#define SPI_GFX_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define SPI_GFX_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//SPI_SHADER_IDX_FORMAT
+#define SPI_SHADER_IDX_FORMAT__IDX0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_IDX_FORMAT__IDX0_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_POS_FORMAT
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_POS_FORMAT__POS4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_POS_FORMAT__POS4_EXPORT_FORMAT_MASK 0x000F0000L
+//SPI_SHADER_Z_FORMAT
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_COL_FORMAT
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
+//SX_PS_DOWNCONVERT_CONTROL
+#define SX_PS_DOWNCONVERT_CONTROL__MRT0_FMT_MAPPING_DISABLE__SHIFT 0x0
+#define SX_PS_DOWNCONVERT_CONTROL__MRT1_FMT_MAPPING_DISABLE__SHIFT 0x1
+#define SX_PS_DOWNCONVERT_CONTROL__MRT2_FMT_MAPPING_DISABLE__SHIFT 0x2
+#define SX_PS_DOWNCONVERT_CONTROL__MRT3_FMT_MAPPING_DISABLE__SHIFT 0x3
+#define SX_PS_DOWNCONVERT_CONTROL__MRT4_FMT_MAPPING_DISABLE__SHIFT 0x4
+#define SX_PS_DOWNCONVERT_CONTROL__MRT5_FMT_MAPPING_DISABLE__SHIFT 0x5
+#define SX_PS_DOWNCONVERT_CONTROL__MRT6_FMT_MAPPING_DISABLE__SHIFT 0x6
+#define SX_PS_DOWNCONVERT_CONTROL__MRT7_FMT_MAPPING_DISABLE__SHIFT 0x7
+#define SX_PS_DOWNCONVERT_CONTROL__MRT0_FMT_MAPPING_DISABLE_MASK 0x00000001L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT1_FMT_MAPPING_DISABLE_MASK 0x00000002L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT2_FMT_MAPPING_DISABLE_MASK 0x00000004L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT3_FMT_MAPPING_DISABLE_MASK 0x00000008L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT4_FMT_MAPPING_DISABLE_MASK 0x00000010L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT5_FMT_MAPPING_DISABLE_MASK 0x00000020L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT6_FMT_MAPPING_DISABLE_MASK 0x00000040L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT7_FMT_MAPPING_DISABLE_MASK 0x00000080L
+//SX_PS_DOWNCONVERT
+#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
+#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
+#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
+#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
+#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
+#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
+#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
+#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
+#define SX_PS_DOWNCONVERT__MRT0_MASK 0x0000000FL
+#define SX_PS_DOWNCONVERT__MRT1_MASK 0x000000F0L
+#define SX_PS_DOWNCONVERT__MRT2_MASK 0x00000F00L
+#define SX_PS_DOWNCONVERT__MRT3_MASK 0x0000F000L
+#define SX_PS_DOWNCONVERT__MRT4_MASK 0x000F0000L
+#define SX_PS_DOWNCONVERT__MRT5_MASK 0x00F00000L
+#define SX_PS_DOWNCONVERT__MRT6_MASK 0x0F000000L
+#define SX_PS_DOWNCONVERT__MRT7_MASK 0xF0000000L
+//SX_BLEND_OPT_EPSILON
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0x0000000FL
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0x000000F0L
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0x00000F00L
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0x0000F000L
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0x000F0000L
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0x00F00000L
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0x0F000000L
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xF0000000L
+//SX_BLEND_OPT_CONTROL
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x00000001L
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x00000002L
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x00000010L
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x00000020L
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x00000100L
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x00000200L
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x00001000L
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x00002000L
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x00010000L
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x00020000L
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x00100000L
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x00200000L
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x01000000L
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x02000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000L
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000L
+//SX_MRT0_BLEND_OPT
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT1_BLEND_OPT
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT2_BLEND_OPT
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT3_BLEND_OPT
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT4_BLEND_OPT
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT5_BLEND_OPT
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT6_BLEND_OPT
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT7_BLEND_OPT
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//CB_BLEND0_CONTROL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND1_CONTROL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND2_CONTROL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND3_CONTROL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND4_CONTROL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND5_CONTROL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND6_CONTROL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND7_CONTROL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//GFX_COPY_STATE
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//PA_CL_POINT_X_RAD
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_Y_RAD
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_SIZE
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_CULL_RAD
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//VGT_DMA_BASE_HI
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
+//VGT_DMA_BASE
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
+//VGT_DRAW_INITIATOR
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
+//VGT_EVENT_ADDRESS_REG
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
+//GE_MAX_OUTPUT_PER_SUBGROUP
+#define GE_MAX_OUTPUT_PER_SUBGROUP__MAX_VERTS_PER_SUBGROUP__SHIFT 0x0
+#define GE_MAX_OUTPUT_PER_SUBGROUP__MAX_VERTS_PER_SUBGROUP_MASK 0x000003FFL
+//DB_DEPTH_CONTROL
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+//DB_EQAA
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+//CB_COLOR_CONTROL
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
+#define CB_COLOR_CONTROL__ENABLE_1FRAG_PS_INVOKE__SHIFT 0x1
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
+#define CB_COLOR_CONTROL__ENABLE_1FRAG_PS_INVOKE_MASK 0x00000002L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
+//DB_SHADER_CONTROL
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
+#define DB_SHADER_CONTROL__PRE_SHADER_DEPTH_COVERAGE_ENABLE__SHIFT 0x17
+#define DB_SHADER_CONTROL__OREO_BLEND_ENABLE__SHIFT 0x18
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_ENABLE__SHIFT 0x19
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE__SHIFT 0x1a
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
+#define DB_SHADER_CONTROL__PRE_SHADER_DEPTH_COVERAGE_ENABLE_MASK 0x00800000L
+#define DB_SHADER_CONTROL__OREO_BLEND_ENABLE_MASK 0x01000000L
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_ENABLE_MASK 0x02000000L
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_MASK 0x1C000000L
+//PA_CL_CLIP_CNTL
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA__SHIFT 0x1c
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA_MASK 0x10000000L
+//PA_SU_SC_MODE_CNTL
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
+#define PA_SU_SC_MODE_CNTL__KEEP_TOGETHER_ENABLE__SHIFT 0x18
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
+#define PA_SU_SC_MODE_CNTL__KEEP_TOGETHER_ENABLE_MASK 0x01000000L
+//PA_CL_VTE_CNTL
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+//PA_CL_VS_OUT_CNTL
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT 0x1c
+#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d
+#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e
+#define PA_CL_VS_OUT_CNTL__USE_VTX_FSR_SELECT__SHIFT 0x1f
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK 0x10000000L
+#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L
+#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_FSR_SELECT_MASK 0x80000000L
+//PA_CL_NANINF_CNTL
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+//PA_SU_LINE_STIPPLE_CNTL
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+//PA_SU_LINE_STIPPLE_SCALE
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
+//PA_SU_PRIM_FILTER_CNTL
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+//PA_SU_SMALL_PRIM_FILTER_CNTL
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE__SHIFT 0x6
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE_MASK 0x00000040L
+//PA_CL_NGG_CNTL
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_DEPTH__SHIFT 0x2
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_DEPTH_MASK 0x000003FCL
+//PA_SU_OVER_RASTERIZATION_CNTL
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
+//PA_STEREO_CNTL
+#define PA_STEREO_CNTL__STEREO_MODE__SHIFT 0x1
+#define PA_STEREO_CNTL__RT_SLICE_MODE__SHIFT 0x5
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET__SHIFT 0x8
+#define PA_STEREO_CNTL__VP_ID_MODE__SHIFT 0x10
+#define PA_STEREO_CNTL__VP_ID_OFFSET__SHIFT 0x13
+#define PA_STEREO_CNTL__FSR_MODE__SHIFT 0x18
+#define PA_STEREO_CNTL__FSR_OFFSET__SHIFT 0x1a
+#define PA_STEREO_CNTL__STEREO_MODE_MASK 0x0000001EL
+#define PA_STEREO_CNTL__RT_SLICE_MODE_MASK 0x000000E0L
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET_MASK 0x00000F00L
+#define PA_STEREO_CNTL__VP_ID_MODE_MASK 0x00070000L
+#define PA_STEREO_CNTL__VP_ID_OFFSET_MASK 0x00780000L
+#define PA_STEREO_CNTL__FSR_MODE_MASK 0x03000000L
+#define PA_STEREO_CNTL__FSR_OFFSET_MASK 0x0C000000L
+//PA_STATE_STEREO_X
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VRS_CNTL
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT 0x0
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT 0x3
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT 0x6
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT 0x9
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT 0xd
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT 0xe
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK 0x00000007L
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK 0x00000038L
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK 0x000001C0L
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK 0x00000E00L
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK 0x00002000L
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK 0x00004000L
+//PA_SU_POINT_SIZE
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
+//PA_SU_POINT_MINMAX
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
+//PA_SU_LINE_CNTL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
+//PA_SC_LINE_STIPPLE
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+//VGT_HOS_MAX_TESS_LEVEL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_MIN_TESS_LEVEL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
+//PA_SC_MODE_CNTL_0
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
+//PA_SC_MODE_CNTL_1
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+//VGT_ENHANCE
+#define VGT_ENHANCE__MISC__SHIFT 0x0
+#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//IA_ENHANCE
+#define IA_ENHANCE__MISC__SHIFT 0x0
+#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_DMA_SIZE
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_DMA_MAX_SIZE
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
+//VGT_DMA_INDEX_TYPE
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
+#define VGT_DMA_INDEX_TYPE__ATC__SHIFT 0x8
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
+#define VGT_DMA_INDEX_TYPE__MTYPE__SHIFT 0xb
+#define VGT_DMA_INDEX_TYPE__DISABLE_INSTANCE_PACKING__SHIFT 0xe
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x000000C0L
+#define VGT_DMA_INDEX_TYPE__ATC_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+#define VGT_DMA_INDEX_TYPE__MTYPE_MASK 0x00003800L
+#define VGT_DMA_INDEX_TYPE__DISABLE_INSTANCE_PACKING_MASK 0x00004000L
+//WD_ENHANCE
+#define WD_ENHANCE__MISC__SHIFT 0x0
+#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_EN
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
+//VGT_DMA_NUM_INSTANCES
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_RESET
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
+//VGT_EVENT_INITIATOR
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//VGT_DRAW_PAYLOAD_CNTL
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PRIM_PAYLOAD__SHIFT 0x3
+#define VGT_DRAW_PAYLOAD_CNTL__EN_DRAW_VP__SHIFT 0x4
+#define VGT_DRAW_PAYLOAD_CNTL__EN_FSR__SHIFT 0x5
+#define VGT_DRAW_PAYLOAD_CNTL__EN_VRS_RATE__SHIFT 0x6
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PRIM_PAYLOAD_MASK 0x00000008L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_DRAW_VP_MASK 0x00000010L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_FSR_MASK 0x00000020L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_VRS_RATE_MASK 0x00000040L
+//VGT_ESGS_RING_ITEMSIZE
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_REUSE_OFF
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+//DB_HTILE_SURFACE
+#define DB_HTILE_SURFACE__RESERVED_FIELD_1__SHIFT 0x0
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
+#define DB_HTILE_SURFACE__RESERVED_FIELD_2__SHIFT 0x2
+#define DB_HTILE_SURFACE__RESERVED_FIELD_3__SHIFT 0x3
+#define DB_HTILE_SURFACE__RESERVED_FIELD_4__SHIFT 0x4
+#define DB_HTILE_SURFACE__RESERVED_FIELD_5__SHIFT 0xa
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
+#define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11
+#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
+#define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_3_MASK 0x00000008L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_4_MASK 0x000003F0L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_5_MASK 0x0000FC00L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L
+#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
+//DB_SRESULTS_COMPARE_STATE0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+//DB_SRESULTS_COMPARE_STATE1
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+//DB_PRELOAD_CONTROL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
+//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
+//VGT_GS_MAX_VERT_OUT
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
+//GE_NGG_SUBGRP_CNTL
+#define GE_NGG_SUBGRP_CNTL__PRIM_AMP_FACTOR__SHIFT 0x0
+#define GE_NGG_SUBGRP_CNTL__THDS_PER_SUBGRP__SHIFT 0x9
+#define GE_NGG_SUBGRP_CNTL__PRIM_AMP_FACTOR_MASK 0x000001FFL
+#define GE_NGG_SUBGRP_CNTL__THDS_PER_SUBGRP_MASK 0x0003FE00L
+//VGT_TESS_DISTRIBUTION
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
+//VGT_SHADER_STAGES_EN
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS__SHIFT 0x8
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
+#define VGT_SHADER_STAGES_EN__HS_W32_EN__SHIFT 0x15
+#define VGT_SHADER_STAGES_EN__GS_W32_EN__SHIFT 0x16
+#define VGT_SHADER_STAGES_EN__VS_W32_EN__SHIFT 0x17
+#define VGT_SHADER_STAGES_EN__NGG_WAVE_ID_EN__SHIFT 0x18
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_EN__SHIFT 0x19
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_NO_MSG__SHIFT 0x1a
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS_MASK 0x00000100L
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00180000L
+#define VGT_SHADER_STAGES_EN__HS_W32_EN_MASK 0x00200000L
+#define VGT_SHADER_STAGES_EN__GS_W32_EN_MASK 0x00400000L
+#define VGT_SHADER_STAGES_EN__VS_W32_EN_MASK 0x00800000L
+#define VGT_SHADER_STAGES_EN__NGG_WAVE_ID_EN_MASK 0x01000000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_EN_MASK 0x02000000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_NO_MSG_MASK 0x04000000L
+//VGT_LS_HS_CONFIG
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
+//VGT_TF_PARAM
+#define VGT_TF_PARAM__TYPE__SHIFT 0x0
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
+#define VGT_TF_PARAM__NOT_USED__SHIFT 0x9
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD__SHIFT 0xa
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
+#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
+#define VGT_TF_PARAM__DETECT_ONE__SHIFT 0x13
+#define VGT_TF_PARAM__DETECT_ZERO__SHIFT 0x14
+#define VGT_TF_PARAM__MTYPE__SHIFT 0x17
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__NOT_USED_MASK 0x00000200L
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD_MASK 0x00003C00L
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00018000L
+#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
+#define VGT_TF_PARAM__DETECT_ONE_MASK 0x00080000L
+#define VGT_TF_PARAM__DETECT_ZERO_MASK 0x00100000L
+#define VGT_TF_PARAM__MTYPE_MASK 0x03800000L
+//DB_ALPHA_TO_MASK
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+//PA_SU_POLY_OFFSET_DB_FMT_CNTL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+//PA_SU_POLY_OFFSET_CLAMP
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_SCALE
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_OFFSET
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_SCALE
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_OFFSET
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_GS_INSTANCE_CNT
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
+#define VGT_GS_INSTANCE_CNT__EN_MAX_VERT_OUT_PER_GS_INSTANCE__SHIFT 0x1f
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
+#define VGT_GS_INSTANCE_CNT__EN_MAX_VERT_OUT_PER_GS_INSTANCE_MASK 0x80000000L
+//PA_SC_CENTROID_PRIORITY_0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
+//PA_SC_CENTROID_PRIORITY_1
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
+//PA_SC_LINE_CNTL
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION__SHIFT 0xd
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION_MASK 0x00002000L
+//PA_SC_AA_CONFIG
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
+#define PA_SC_AA_CONFIG__SAMPLE_COVERAGE_ENCODING__SHIFT 0x1c
+#define PA_SC_AA_CONFIG__COVERED_CENTROID_IS_CENTER__SHIFT 0x1d
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
+#define PA_SC_AA_CONFIG__SAMPLE_COVERAGE_ENCODING_MASK 0x10000000L
+#define PA_SC_AA_CONFIG__COVERED_CENTROID_IS_CENTER_MASK 0x20000000L
+//PA_SU_VTX_CNTL
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+//PA_CL_GB_VERT_CLIP_ADJ
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_VERT_DISC_ADJ
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_CLIP_ADJ
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_DISC_ADJ
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_MASK_X0Y0_X1Y0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
+//PA_SC_AA_MASK_X0Y1_X1Y1
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
+//PA_SC_SHADER_CONTROL
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
+#define PA_SC_SHADER_CONTROL__WAVE_BREAK_REGION_SIZE__SHIFT 0x5
+#define PA_SC_SHADER_CONTROL__DISABLE_OREO_CONFLICT_QUAD__SHIFT 0x7
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
+#define PA_SC_SHADER_CONTROL__WAVE_BREAK_REGION_SIZE_MASK 0x00000060L
+#define PA_SC_SHADER_CONTROL__DISABLE_OREO_CONFLICT_QUAD_MASK 0x00000080L
+//PA_SC_BINNER_CNTL_0
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_0__BIN_MAPPING_MODE__SHIFT 0x1d
+#define PA_SC_BINNER_CNTL_0__FSR_EXPANSION_ENABLE__SHIFT 0x1f
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION_MASK 0x10000000L
+#define PA_SC_BINNER_CNTL_0__BIN_MAPPING_MODE_MASK 0x60000000L
+#define PA_SC_BINNER_CNTL_0__FSR_EXPANSION_ENABLE_MASK 0x80000000L
+//PA_SC_BINNER_CNTL_1
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
+//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MULT__SHIFT 0x19
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_PBB_MULT__SHIFT 0x1b
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MULT_MASK 0x06000000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_PBB_MULT_MASK 0x18000000L
+//PA_SC_NGG_MODE_CNTL
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
+#define PA_SC_NGG_MODE_CNTL__DISABLE_FPOG_AND_DEALLOC_CONFLICT__SHIFT 0xc
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_DEALLOC__SHIFT 0xd
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_ATTRIBUTES__SHIFT 0xe
+#define PA_SC_NGG_MODE_CNTL__MAX_FPOVS_IN_WAVE__SHIFT 0x10
+#define PA_SC_NGG_MODE_CNTL__MAX_ATTRIBUTES_IN_WAVE__SHIFT 0x18
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
+#define PA_SC_NGG_MODE_CNTL__DISABLE_FPOG_AND_DEALLOC_CONFLICT_MASK 0x00001000L
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_DEALLOC_MASK 0x00002000L
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_ATTRIBUTES_MASK 0x00004000L
+#define PA_SC_NGG_MODE_CNTL__MAX_FPOVS_IN_WAVE_MASK 0x00FF0000L
+#define PA_SC_NGG_MODE_CNTL__MAX_ATTRIBUTES_IN_WAVE_MASK 0xFF000000L
+//PA_SC_BINNER_CNTL_2
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_X_MULT_BY_1P5X__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_Y_MULT_BY_1P5X__SHIFT 0x1
+#define PA_SC_BINNER_CNTL_2__ENABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_2__DUAL_LIGHT_SHAFT_IN_DRAW__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_2__LIGHT_SHAFT_DRAW_CALL_LIMIT__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_2__CONTEXT_DONE_EVENTS_PER_BIN__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_2__ZPP_ENABLED__SHIFT 0xb
+#define PA_SC_BINNER_CNTL_2__ZPP_OPTIMIZATION_ENABLED__SHIFT 0xc
+#define PA_SC_BINNER_CNTL_2__ZPP_AREA_THRESHOLD__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_2__DISABLE_NOPCEXPORT_BREAKBATCH_CONDITION__SHIFT 0x15
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_X_MULT_BY_1P5X_MASK 0x00000001L
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_Y_MULT_BY_1P5X_MASK 0x00000002L
+#define PA_SC_BINNER_CNTL_2__ENABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_2__DUAL_LIGHT_SHAFT_IN_DRAW_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_2__LIGHT_SHAFT_DRAW_CALL_LIMIT_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_2__CONTEXT_DONE_EVENTS_PER_BIN_MASK 0x00000780L
+#define PA_SC_BINNER_CNTL_2__ZPP_ENABLED_MASK 0x00000800L
+#define PA_SC_BINNER_CNTL_2__ZPP_OPTIMIZATION_ENABLED_MASK 0x00001000L
+#define PA_SC_BINNER_CNTL_2__ZPP_AREA_THRESHOLD_MASK 0x001FE000L
+#define PA_SC_BINNER_CNTL_2__DISABLE_NOPCEXPORT_BREAKBATCH_CONDITION_MASK 0x00200000L
+//CB_COLOR0_BASE
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_VIEW
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR0_INFO
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR0_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR0_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR0_ATTRIB
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR0_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR0_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR0_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR0_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR0_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR0_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR0_FDCC_CONTROL
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR0_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR0_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR0_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR0_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR0_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR0_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR0_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR0_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR0_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR0_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR0_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR0_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR0_DCC_BASE
+#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_BASE
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_VIEW
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR1_INFO
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR1_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR1_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR1_ATTRIB
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR1_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR1_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR1_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR1_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR1_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR1_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR1_FDCC_CONTROL
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR1_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR1_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR1_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR1_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR1_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR1_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR1_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR1_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR1_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR1_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR1_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR1_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR1_DCC_BASE
+#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_BASE
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_VIEW
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR2_INFO
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR2_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR2_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR2_ATTRIB
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR2_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR2_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR2_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR2_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR2_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR2_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR2_FDCC_CONTROL
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR2_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR2_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR2_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR2_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR2_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR2_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR2_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR2_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR2_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR2_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR2_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR2_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR2_DCC_BASE
+#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_BASE
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_VIEW
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR3_INFO
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR3_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR3_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR3_ATTRIB
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR3_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR3_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR3_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR3_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR3_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR3_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR3_FDCC_CONTROL
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR3_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR3_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR3_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR3_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR3_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR3_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR3_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR3_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR3_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR3_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR3_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR3_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR3_DCC_BASE
+#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_BASE
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_VIEW
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR4_INFO
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR4_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR4_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR4_ATTRIB
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR4_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR4_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR4_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR4_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR4_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR4_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR4_FDCC_CONTROL
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR4_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR4_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR4_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR4_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR4_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR4_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR4_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR4_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR4_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR4_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR4_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR4_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR4_DCC_BASE
+#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_BASE
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_VIEW
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR5_INFO
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR5_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR5_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR5_ATTRIB
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR5_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR5_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR5_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR5_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR5_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR5_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR5_FDCC_CONTROL
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR5_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR5_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR5_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR5_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR5_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR5_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR5_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR5_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR5_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR5_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR5_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR5_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR5_DCC_BASE
+#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_BASE
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_VIEW
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR6_INFO
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR6_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR6_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR6_ATTRIB
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR6_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR6_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR6_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR6_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR6_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR6_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR6_FDCC_CONTROL
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR6_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR6_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR6_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR6_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR6_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR6_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR6_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR6_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR6_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR6_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR6_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR6_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR6_DCC_BASE
+#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_BASE
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_VIEW
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR7_INFO
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR7_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR7_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR7_ATTRIB
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR7_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR7_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR7_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR7_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR7_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR7_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR7_FDCC_CONTROL
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR7_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR7_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR7_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR7_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR7_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR7_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR7_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR7_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR7_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR7_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR7_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR7_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR7_DCC_BASE
+#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_BASE_EXT
+#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_BASE_EXT
+#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_BASE_EXT
+#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_BASE_EXT
+#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_BASE_EXT
+#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_BASE_EXT
+#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_BASE_EXT
+#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_BASE_EXT
+#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_DCC_BASE_EXT
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_DCC_BASE_EXT
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_DCC_BASE_EXT
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_DCC_BASE_EXT
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_DCC_BASE_EXT
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_DCC_BASE_EXT
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_DCC_BASE_EXT
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_DCC_BASE_EXT
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_ATTRIB2
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR1_ATTRIB2
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR2_ATTRIB2
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR3_ATTRIB2
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR4_ATTRIB2
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR5_ATTRIB2
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR6_ATTRIB2
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR7_ATTRIB2
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR0_ATTRIB3
+#define CB_COLOR0_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR0_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR0_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR0_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR1_ATTRIB3
+#define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR1_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR1_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR2_ATTRIB3
+#define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR2_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR2_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR3_ATTRIB3
+#define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR3_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR3_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR4_ATTRIB3
+#define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR4_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR4_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR5_ATTRIB3
+#define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR5_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR5_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR6_ATTRIB3
+#define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR6_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR6_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR7_ATTRIB3
+#define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR7_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR7_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+
+
+// addressBlock: gc_pfvf_cpdec
+//CONFIG_RESERVED_REG0
+#define CONFIG_RESERVED_REG0__DATA__SHIFT 0x0
+#define CONFIG_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//CONFIG_RESERVED_REG1
+#define CONFIG_RESERVED_REG1__DATA__SHIFT 0x0
+#define CONFIG_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_CNTL
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
+#define CP_MEC_CNTL__MEC_ME2_PIPE2_RESET__SHIFT 0x16
+#define CP_MEC_CNTL__MEC_ME2_PIPE3_RESET__SHIFT 0x17
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x1b
+#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
+#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
+#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
+#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE2_RESET_MASK 0x00400000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE3_RESET_MASK 0x00800000L
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x08000000L
+#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
+#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
+#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
+#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
+//CP_ME_CNTL
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
+#define CP_ME_CNTL__PFP_PIPE0_DISABLE__SHIFT 0xc
+#define CP_ME_CNTL__PFP_PIPE1_DISABLE__SHIFT 0xd
+#define CP_ME_CNTL__ME_PIPE0_DISABLE__SHIFT 0xe
+#define CP_ME_CNTL__ME_PIPE1_DISABLE__SHIFT 0xf
+#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
+#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
+#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
+#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
+#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
+#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__PFP_PIPE0_DISABLE_MASK 0x00001000L
+#define CP_ME_CNTL__PFP_PIPE1_DISABLE_MASK 0x00002000L
+#define CP_ME_CNTL__ME_PIPE0_DISABLE_MASK 0x00004000L
+#define CP_ME_CNTL__ME_PIPE1_DISABLE_MASK 0x00008000L
+#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
+#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
+#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
+#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
+#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
+#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+
+
+// addressBlock: gc_pfvf_grbmdec
+//GRBM_GFX_CNTL
+#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL__CTXID__SHIFT 0xb
+#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
+#define GRBM_GFX_CNTL__CTXID_MASK 0x00003800L
+//GRBM_NOWHERE
+#define GRBM_NOWHERE__DATA__SHIFT 0x0
+#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfvf_padec
+//PA_SC_VRS_SURFACE_CNTL
+#define PA_SC_VRS_SURFACE_CNTL__VRC_CONTEXT_DONE_SYNC_DISABLE__SHIFT 0x6
+#define PA_SC_VRS_SURFACE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE__SHIFT 0x7
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_EVENT_MASK_DISABLE__SHIFT 0x8
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PREFETCH_DISABLE__SHIFT 0xd
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_NO_INV_DISABLE__SHIFT 0xe
+#define PA_SC_VRS_SURFACE_CNTL__VRC_NONSTALLING_FLUSH_DISABLE__SHIFT 0xf
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PARTIAL_FLUSH_DISABLE__SHIFT 0x10
+#define PA_SC_VRS_SURFACE_CNTL__VRC_AUTO_FLUSH__SHIFT 0x11
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EOP_SYNC_DISABLE__SHIFT 0x12
+#define PA_SC_VRS_SURFACE_CNTL__VRC_MAX_TAGS__SHIFT 0x13
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EVICT_POINT__SHIFT 0x1a
+#define PA_SC_VRS_SURFACE_CNTL__VRC_CONTEXT_DONE_SYNC_DISABLE_MASK 0x00000040L
+#define PA_SC_VRS_SURFACE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE_MASK 0x00000080L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_EVENT_MASK_DISABLE_MASK 0x00001F00L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PREFETCH_DISABLE_MASK 0x00002000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_NO_INV_DISABLE_MASK 0x00004000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_NONSTALLING_FLUSH_DISABLE_MASK 0x00008000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PARTIAL_FLUSH_DISABLE_MASK 0x00010000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_AUTO_FLUSH_MASK 0x00020000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EOP_SYNC_DISABLE_MASK 0x00040000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_MAX_TAGS_MASK 0x03F80000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EVICT_POINT_MASK 0xFC000000L
+//PA_SC_ENHANCE
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
+//PA_SC_ENHANCE_1
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
+#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
+#define PA_SC_ENHANCE_1__DISABLE_NONBINNED_LIVE_PRIM_DG1_LS0_CL0_EOPKT_POKE__SHIFT 0x5
+#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
+#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
+#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_COUNT_PIXELS__SHIFT 0xd
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG__SHIFT 0x17
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER__SHIFT 0x19
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1a
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE__SHIFT 0x1b
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX__SHIFT 0x1c
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1__SHIFT 0x1d
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI__SHIFT 0x1e
+#define PA_SC_ENHANCE_1__DISABLE_FSR_NEAR_AXIS_LINE_VERT_ORDER_SORT_FIX__SHIFT 0x1f
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
+#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
+#define PA_SC_ENHANCE_1__DISABLE_NONBINNED_LIVE_PRIM_DG1_LS0_CL0_EOPKT_POKE_MASK 0x00000020L
+#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
+#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
+#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_COUNT_PIXELS_MASK 0x00002000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG_MASK 0x00800000L
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER_MASK 0x02000000L
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION_MASK 0x04000000L
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE_MASK 0x08000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_MASK 0x10000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1_MASK 0x20000000L
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI_MASK 0x40000000L
+#define PA_SC_ENHANCE_1__DISABLE_FSR_NEAR_AXIS_LINE_VERT_ORDER_SORT_FIX_MASK 0x80000000L
+//PA_SC_ENHANCE_2
+#define PA_SC_ENHANCE_2__DISABLE_SC_MEM_MACRO_FINE_CLOCK_GATE__SHIFT 0x0
+#define PA_SC_ENHANCE_2__DISABLE_SC_DB_QUAD_INTF_FINE_CLOCK_GATE__SHIFT 0x1
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_QUAD_INTF_FINE_CLOCK_GATE__SHIFT 0x2
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_PRIM_INTF_FINE_CLOCK_GATE__SHIFT 0x3
+#define PA_SC_ENHANCE_2__ENABLE_LPOV_WAVE_BREAK__SHIFT 0x4
+#define PA_SC_ENHANCE_2__ENABLE_FPOV_WAVE_BREAK__SHIFT 0x5
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PRIM_PAYLOAD__SHIFT 0x7
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPE_SWITCH__SHIFT 0x8
+#define PA_SC_ENHANCE_2__DISABLE_FULL_TILE_WAVE_BREAK__SHIFT 0x9
+#define PA_SC_ENHANCE_2__ENABLE_VPZ_INJECTION_BEFORE_NULL_PRIMS__SHIFT 0xa
+#define PA_SC_ENHANCE_2__PBB_TIMEOUT_THRESHOLD_MODE__SHIFT 0xb
+#define PA_SC_ENHANCE_2__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xc
+#define PA_SC_ENHANCE_2__DISABLE_SC_SPI_INTF_EARLY_WAKEUP__SHIFT 0xd
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_INTF_EARLY_WAKEUP__SHIFT 0xe
+#define PA_SC_ENHANCE_2__DISABLE_EXPOSED_GT_DETAIL_RATE_TILE_COV_ADJ__SHIFT 0xf
+#define PA_SC_ENHANCE_2__PBB_WARP_CLK_MAIN_CLK_WAKEUP__SHIFT 0x10
+#define PA_SC_ENHANCE_2__PBB_MAIN_CLK_REG_BUSY_WAKEUP__SHIFT 0x11
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPELINE_RESET__SHIFT 0x12
+#define PA_SC_ENHANCE_2__DISABLE_SC_DBR_DATAPATH_FGCG__SHIFT 0x15
+#define PA_SC_ENHANCE_2__FSR_BB_OPTIMIZATION_DISABLE_OVERRIDE__SHIFT 0x16
+#define PA_SC_ENHANCE_2__PROCESS_RESET_FORCE_STILE_MASK_TO_ZERO__SHIFT 0x17
+#define PA_SC_ENHANCE_2__BREAK_WHEN_ONE_NULL_PRIM_BATCH__SHIFT 0x1a
+#define PA_SC_ENHANCE_2__NULL_PRIM_BREAK_BATCH_LIMIT__SHIFT 0x1b
+#define PA_SC_ENHANCE_2__DISABLE_MAX_DEALLOC_FORCE_EOV_RESET_N_WAVES_COUNT__SHIFT 0x1e
+#define PA_SC_ENHANCE_2__RSVD__SHIFT 0x1f
+#define PA_SC_ENHANCE_2__DISABLE_SC_MEM_MACRO_FINE_CLOCK_GATE_MASK 0x00000001L
+#define PA_SC_ENHANCE_2__DISABLE_SC_DB_QUAD_INTF_FINE_CLOCK_GATE_MASK 0x00000002L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_QUAD_INTF_FINE_CLOCK_GATE_MASK 0x00000004L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_PRIM_INTF_FINE_CLOCK_GATE_MASK 0x00000008L
+#define PA_SC_ENHANCE_2__ENABLE_LPOV_WAVE_BREAK_MASK 0x00000010L
+#define PA_SC_ENHANCE_2__ENABLE_FPOV_WAVE_BREAK_MASK 0x00000020L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PRIM_PAYLOAD_MASK 0x00000080L
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPE_SWITCH_MASK 0x00000100L
+#define PA_SC_ENHANCE_2__DISABLE_FULL_TILE_WAVE_BREAK_MASK 0x00000200L
+#define PA_SC_ENHANCE_2__ENABLE_VPZ_INJECTION_BEFORE_NULL_PRIMS_MASK 0x00000400L
+#define PA_SC_ENHANCE_2__PBB_TIMEOUT_THRESHOLD_MODE_MASK 0x00000800L
+#define PA_SC_ENHANCE_2__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00001000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_SPI_INTF_EARLY_WAKEUP_MASK 0x00002000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_INTF_EARLY_WAKEUP_MASK 0x00004000L
+#define PA_SC_ENHANCE_2__DISABLE_EXPOSED_GT_DETAIL_RATE_TILE_COV_ADJ_MASK 0x00008000L
+#define PA_SC_ENHANCE_2__PBB_WARP_CLK_MAIN_CLK_WAKEUP_MASK 0x00010000L
+#define PA_SC_ENHANCE_2__PBB_MAIN_CLK_REG_BUSY_WAKEUP_MASK 0x00020000L
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPELINE_RESET_MASK 0x00040000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_DBR_DATAPATH_FGCG_MASK 0x00200000L
+#define PA_SC_ENHANCE_2__FSR_BB_OPTIMIZATION_DISABLE_OVERRIDE_MASK 0x00400000L
+#define PA_SC_ENHANCE_2__PROCESS_RESET_FORCE_STILE_MASK_TO_ZERO_MASK 0x00800000L
+#define PA_SC_ENHANCE_2__BREAK_WHEN_ONE_NULL_PRIM_BATCH_MASK 0x04000000L
+#define PA_SC_ENHANCE_2__NULL_PRIM_BREAK_BATCH_LIMIT_MASK 0x38000000L
+#define PA_SC_ENHANCE_2__DISABLE_MAX_DEALLOC_FORCE_EOV_RESET_N_WAVES_COUNT_MASK 0x40000000L
+#define PA_SC_ENHANCE_2__RSVD_MASK 0x80000000L
+//PA_SC_ENHANCE_3
+#define PA_SC_ENHANCE_3__FORCE_USE_OF_SC_CENTROID_DATA__SHIFT 0x0
+#define PA_SC_ENHANCE_3__DISABLE_RB_MASK_COPY_FOR_NONP2_SA_PAIR_HARVEST__SHIFT 0x2
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
+#define PA_SC_ENHANCE_3__DISABLE_PKR_BCI_QUAD_NEW_PRIM_DATA_LOAD_OPTIMIZATION__SHIFT 0x4
+#define PA_SC_ENHANCE_3__DISABLE_CP_CONTEXT_DONE_PERFCOUNT_SAMPLE_EN__SHIFT 0x5
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_FIRST_PHASE_FILTER__SHIFT 0x6
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER__SHIFT 0x7
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_FOR_PBB_BINNED_PRIMS__SHIFT 0x8
+#define PA_SC_ENHANCE_3__DISABLE_SET_VPZ_DIRTY_EOPKT_LAST_PHASE_ONLY__SHIFT 0x9
+#define PA_SC_ENHANCE_3__DISABLE_PBB_EOP_OPTIMIZATION_WITH_SAME_CONTEXT_BATCHES__SHIFT 0xa
+#define PA_SC_ENHANCE_3__DISABLE_FAST_NULL_PRIM_OPTIMIZATION__SHIFT 0xb
+#define PA_SC_ENHANCE_3__USE_PBB_PRIM_STORAGE_WHEN_STALLED__SHIFT 0xc
+#define PA_SC_ENHANCE_3__DISABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION__SHIFT 0xd
+#define PA_SC_ENHANCE_3__DISABLE_ZPRE_PASS_OPTIMIZATION__SHIFT 0xe
+#define PA_SC_ENHANCE_3__DISABLE_EVENT_INCLUSION_IN_CONTEXT_STATES_PER_BIN__SHIFT 0xf
+#define PA_SC_ENHANCE_3__DISABLE_PIXEL_WAIT_SYNC_COUNTERS__SHIFT 0x10
+#define PA_SC_ENHANCE_3__DISABLE_SC_CPG_PSINVOC_SEDC_ISOLATION_ACCUM__SHIFT 0x11
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_FB_FINE_CLOCK_GATE__SHIFT 0x12
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_CACHE_RD_FINE_CLOCK_GATE__SHIFT 0x13
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_REZ_CNT_FOR_SPI_BACKPRESSURE_ONLY__SHIFT 0x14
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_CLK_CNT_FOR_SPI_BACKPRESSURE_ONLY__SHIFT 0x15
+#define PA_SC_ENHANCE_3__DO_NOT_INCLUDE_OREO_WAVEID_IN_FORCE_EOV_MAX_CNT_DISABLE__SHIFT 0x16
+#define PA_SC_ENHANCE_3__DISABLE_PWS_PRE_DEPTH_WAIT_SYNC_VPZ_INSERTION__SHIFT 0x17
+#define PA_SC_ENHANCE_3__PKR_CNT_FORCE_EOV_AT_QS_EMPTY_ONLY__SHIFT 0x18
+#define PA_SC_ENHANCE_3__PKR_S0_FORCE_EOV_STALL__SHIFT 0x19
+#define PA_SC_ENHANCE_3__PKR_S1_FORCE_EOV_STALL__SHIFT 0x1a
+#define PA_SC_ENHANCE_3__PKR_S2_FORCE_EOV_STALL__SHIFT 0x1b
+#define PA_SC_ENHANCE_3__ECO_SPARE0__SHIFT 0x1c
+#define PA_SC_ENHANCE_3__ECO_SPARE1__SHIFT 0x1d
+#define PA_SC_ENHANCE_3__ECO_SPARE2__SHIFT 0x1e
+#define PA_SC_ENHANCE_3__ECO_SPARE3__SHIFT 0x1f
+#define PA_SC_ENHANCE_3__FORCE_USE_OF_SC_CENTROID_DATA_MASK 0x00000001L
+#define PA_SC_ENHANCE_3__DISABLE_RB_MASK_COPY_FOR_NONP2_SA_PAIR_HARVEST_MASK 0x00000004L
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_BCI_QUAD_NEW_PRIM_DATA_LOAD_OPTIMIZATION_MASK 0x00000010L
+#define PA_SC_ENHANCE_3__DISABLE_CP_CONTEXT_DONE_PERFCOUNT_SAMPLE_EN_MASK 0x00000020L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_FIRST_PHASE_FILTER_MASK 0x00000040L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_MASK 0x00000080L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_FOR_PBB_BINNED_PRIMS_MASK 0x00000100L
+#define PA_SC_ENHANCE_3__DISABLE_SET_VPZ_DIRTY_EOPKT_LAST_PHASE_ONLY_MASK 0x00000200L
+#define PA_SC_ENHANCE_3__DISABLE_PBB_EOP_OPTIMIZATION_WITH_SAME_CONTEXT_BATCHES_MASK 0x00000400L
+#define PA_SC_ENHANCE_3__DISABLE_FAST_NULL_PRIM_OPTIMIZATION_MASK 0x00000800L
+#define PA_SC_ENHANCE_3__USE_PBB_PRIM_STORAGE_WHEN_STALLED_MASK 0x00001000L
+#define PA_SC_ENHANCE_3__DISABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION_MASK 0x00002000L
+#define PA_SC_ENHANCE_3__DISABLE_ZPRE_PASS_OPTIMIZATION_MASK 0x00004000L
+#define PA_SC_ENHANCE_3__DISABLE_EVENT_INCLUSION_IN_CONTEXT_STATES_PER_BIN_MASK 0x00008000L
+#define PA_SC_ENHANCE_3__DISABLE_PIXEL_WAIT_SYNC_COUNTERS_MASK 0x00010000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_CPG_PSINVOC_SEDC_ISOLATION_ACCUM_MASK 0x00020000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_FB_FINE_CLOCK_GATE_MASK 0x00040000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_CACHE_RD_FINE_CLOCK_GATE_MASK 0x00080000L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_REZ_CNT_FOR_SPI_BACKPRESSURE_ONLY_MASK 0x00100000L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_CLK_CNT_FOR_SPI_BACKPRESSURE_ONLY_MASK 0x00200000L
+#define PA_SC_ENHANCE_3__DO_NOT_INCLUDE_OREO_WAVEID_IN_FORCE_EOV_MAX_CNT_DISABLE_MASK 0x00400000L
+#define PA_SC_ENHANCE_3__DISABLE_PWS_PRE_DEPTH_WAIT_SYNC_VPZ_INSERTION_MASK 0x00800000L
+#define PA_SC_ENHANCE_3__PKR_CNT_FORCE_EOV_AT_QS_EMPTY_ONLY_MASK 0x01000000L
+#define PA_SC_ENHANCE_3__PKR_S0_FORCE_EOV_STALL_MASK 0x02000000L
+#define PA_SC_ENHANCE_3__PKR_S1_FORCE_EOV_STALL_MASK 0x04000000L
+#define PA_SC_ENHANCE_3__PKR_S2_FORCE_EOV_STALL_MASK 0x08000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE0_MASK 0x10000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE1_MASK 0x20000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE2_MASK 0x40000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE3_MASK 0x80000000L
+//PA_SC_BINNER_CNTL_OVERRIDE
+#define PA_SC_BINNER_CNTL_OVERRIDE__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_OVERRIDE__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_OVERRIDE__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_OVERRIDE__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_OVERRIDE__DIRECT_OVERRIDE_MODE__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_OVERRIDE__OVERRIDE__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_OVERRIDE__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_OVERRIDE__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_OVERRIDE__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__DIRECT_OVERRIDE_MODE_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__OVERRIDE_MASK 0xF0000000L
+//PA_SC_PBB_OVERRIDE_FLAG
+#define PA_SC_PBB_OVERRIDE_FLAG__OVERRIDE__SHIFT 0x0
+#define PA_SC_PBB_OVERRIDE_FLAG__PIPE_ID__SHIFT 0x1
+#define PA_SC_PBB_OVERRIDE_FLAG__OVERRIDE_MASK 0x00000001L
+#define PA_SC_PBB_OVERRIDE_FLAG__PIPE_ID_MASK 0x00000002L
+//PA_SC_DSM_CNTL
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
+//PA_SC_TILE_STEERING_CREST_OVERRIDE
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SA_SELECT__SHIFT 0x8
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__FORCE_TILE_STEERING_OVERRIDE_USE__SHIFT 0x1f
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SA_SELECT_MASK 0x00000700L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__FORCE_TILE_STEERING_OVERRIDE_USE_MASK 0x80000000L
+//PA_SC_FIFO_SIZE
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
+//PA_SC_IF_FIFO_SIZE
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
+//PA_SC_PACKER_WAVE_ID_CNTL
+#define PA_SC_PACKER_WAVE_ID_CNTL__WAVE_TABLE_SIZE__SHIFT 0x0
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_DB_WAVE_IF_FIFO_SIZE__SHIFT 0xa
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_DB_WAVE_IF_FGCG_EN__SHIFT 0x10
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_SPI_WAVE_IF_FIFO_SIZE__SHIFT 0x11
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_SPI_WAVE_IF_FGCG_EN__SHIFT 0x17
+#define PA_SC_PACKER_WAVE_ID_CNTL__DEBUG_CONFLICT_QUAD__SHIFT 0x18
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_OREO_CONFLICT_QUAD__SHIFT 0x1f
+#define PA_SC_PACKER_WAVE_ID_CNTL__WAVE_TABLE_SIZE_MASK 0x000003FFL
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_DB_WAVE_IF_FIFO_SIZE_MASK 0x0000FC00L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_DB_WAVE_IF_FGCG_EN_MASK 0x00010000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_SPI_WAVE_IF_FIFO_SIZE_MASK 0x007E0000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_SPI_WAVE_IF_FGCG_EN_MASK 0x00800000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DEBUG_CONFLICT_QUAD_MASK 0x0F000000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_OREO_CONFLICT_QUAD_MASK 0x80000000L
+//PA_SC_ATM_CNTL
+#define PA_SC_ATM_CNTL__SC_PC_IF_SIZE__SHIFT 0x0
+#define PA_SC_ATM_CNTL__DISABLE_SC_PC_IF_FGCG_EN__SHIFT 0x7
+#define PA_SC_ATM_CNTL__MAX_ATTRIBUTES_IN_WAVE__SHIFT 0x8
+#define PA_SC_ATM_CNTL__DISABLE_MAX_ATTRIBUTES__SHIFT 0x10
+#define PA_SC_ATM_CNTL__SELECT_MAX_ATTRIBUTES__SHIFT 0x11
+#define PA_SC_ATM_CNTL__SC_PC_IF_SIZE_MASK 0x0000003FL
+#define PA_SC_ATM_CNTL__DISABLE_SC_PC_IF_FGCG_EN_MASK 0x00000080L
+#define PA_SC_ATM_CNTL__MAX_ATTRIBUTES_IN_WAVE_MASK 0x0000FF00L
+#define PA_SC_ATM_CNTL__DISABLE_MAX_ATTRIBUTES_MASK 0x00010000L
+#define PA_SC_ATM_CNTL__SELECT_MAX_ATTRIBUTES_MASK 0x00020000L
+//PA_SC_PKR_WAVE_TABLE_CNTL
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
+//PA_SC_FORCE_EOV_MAX_CNTS
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
+//PA_SC_BINNER_EVENT_CNTL_0
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_1
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_1__WAIT_SYNC__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_1__BIN_CONF_OVERRIDE_CHECK__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_1__WAIT_SYNC_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__BIN_CONF_OVERRIDE_CHECK_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_2
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_35__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_41__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_35_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_41_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_3
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_50__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_DRAW__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_PIPELINE_NOT_USED__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_3__DRAW_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_50_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_DRAW_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_PIPELINE_NOT_USED_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__DRAW_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_TIMEOUT_COUNTER
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_BINNER_PERF_CNTL_0
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
+//PA_SC_BINNER_PERF_CNTL_1
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
+//PA_SC_BINNER_PERF_CNTL_2
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
+//PA_SC_BINNER_PERF_CNTL_3
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_TRAP_SCREEN_HV_LOCK
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_PH_INTERFACE_FIFO_SIZE
+#define PA_PH_INTERFACE_FIFO_SIZE__PA_PH_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_PH_INTERFACE_FIFO_SIZE__PH_SC_IF_FIFO_SIZE__SHIFT 0x10
+#define PA_PH_INTERFACE_FIFO_SIZE__PA_PH_IF_FIFO_SIZE_MASK 0x000003FFL
+#define PA_PH_INTERFACE_FIFO_SIZE__PH_SC_IF_FIFO_SIZE_MASK 0x003F0000L
+//PA_PH_ENHANCE
+#define PA_PH_ENHANCE__ECO_SPARE0__SHIFT 0x0
+#define PA_PH_ENHANCE__ECO_SPARE1__SHIFT 0x1
+#define PA_PH_ENHANCE__ECO_SPARE2__SHIFT 0x2
+#define PA_PH_ENHANCE__ECO_SPARE3__SHIFT 0x3
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_FINE_CLOCK_GATE__SHIFT 0x4
+#define PA_PH_ENHANCE__DISABLE_FOPKT__SHIFT 0x5
+#define PA_PH_ENHANCE__DISABLE_FOPKT_SCAN_POST_RESET__SHIFT 0x6
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_CLKEN_CLOCK_GATE__SHIFT 0x7
+#define PA_PH_ENHANCE__DISABLE_PH_DEBUG_REG_FGCG__SHIFT 0x8
+#define PA_PH_ENHANCE__DISABLE_PH_PERF_REG_FGCG__SHIFT 0x9
+#define PA_PH_ENHANCE__ENABLE_PH_INTF_CLKEN_STRETCH__SHIFT 0xa
+#define PA_PH_ENHANCE__DISABLE_USE_LAST_PH_ARBITER_PERFCOUNTER_SAMPLE_EVENT__SHIFT 0xd
+#define PA_PH_ENHANCE__USE_PERFCOUNTER_START_STOP_EVENTS__SHIFT 0xe
+#define PA_PH_ENHANCE__FORCE_PH_PERFCOUNTER_SAMPLE_ENABLE_ON__SHIFT 0xf
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE__SHIFT 0x10
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_DISABLE__SHIFT 0x11
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_PERFCOUNTER_COUNT_MODE__SHIFT 0x12
+#define PA_PH_ENHANCE__ECO_SPARE0_MASK 0x00000001L
+#define PA_PH_ENHANCE__ECO_SPARE1_MASK 0x00000002L
+#define PA_PH_ENHANCE__ECO_SPARE2_MASK 0x00000004L
+#define PA_PH_ENHANCE__ECO_SPARE3_MASK 0x00000008L
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_FINE_CLOCK_GATE_MASK 0x00000010L
+#define PA_PH_ENHANCE__DISABLE_FOPKT_MASK 0x00000020L
+#define PA_PH_ENHANCE__DISABLE_FOPKT_SCAN_POST_RESET_MASK 0x00000040L
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_CLKEN_CLOCK_GATE_MASK 0x00000080L
+#define PA_PH_ENHANCE__DISABLE_PH_DEBUG_REG_FGCG_MASK 0x00000100L
+#define PA_PH_ENHANCE__DISABLE_PH_PERF_REG_FGCG_MASK 0x00000200L
+#define PA_PH_ENHANCE__ENABLE_PH_INTF_CLKEN_STRETCH_MASK 0x00001C00L
+#define PA_PH_ENHANCE__DISABLE_USE_LAST_PH_ARBITER_PERFCOUNTER_SAMPLE_EVENT_MASK 0x00002000L
+#define PA_PH_ENHANCE__USE_PERFCOUNTER_START_STOP_EVENTS_MASK 0x00004000L
+#define PA_PH_ENHANCE__FORCE_PH_PERFCOUNTER_SAMPLE_ENABLE_ON_MASK 0x00008000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_MASK 0x00010000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_DISABLE_MASK 0x00020000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_PERFCOUNTER_COUNT_MODE_MASK 0x00040000L
+//PA_SC_VRS_SURFACE_CNTL_1
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE__SHIFT 0x0
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_SHADER_KILL_ENABLE__SHIFT 0x1
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK_OPS_ENABLE__SHIFT 0x2
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_RATE_16XAA__SHIFT 0x3
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_Z_OR_STENCIL__SHIFT 0x4
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_PRE_SHADER_DEPTH_COVERAGE_ENABLED__SHIFT 0x5
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POST_DEPTH_IMPORT__SHIFT 0x6
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POPS__SHIFT 0x7
+#define PA_SC_VRS_SURFACE_CNTL_1__USE_ONLY_VRS_RATE_FINE_CFG__SHIFT 0x8
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_VRS_RATE_NORMALIZATION__SHIFT 0xc
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_PS_ITER_RATE_COMBINER_PASSTHRU_OVERRIDE__SHIFT 0xf
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_CMASK_RATE_HINT_FORCE_ZERO_OVERRIDE__SHIFT 0x13
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_DETAIL_TO_EXPOSED_RATE_CLAMPING__SHIFT 0x14
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_0__SHIFT 0x15
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_1__SHIFT 0x16
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_2__SHIFT 0x17
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_3__SHIFT 0x18
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_4__SHIFT 0x19
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_5__SHIFT 0x1a
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_6__SHIFT 0x1b
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_7__SHIFT 0x1c
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_8__SHIFT 0x1d
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_9__SHIFT 0x1e
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_10__SHIFT 0x1f
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK 0x00000001L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_SHADER_KILL_ENABLE_MASK 0x00000002L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK_OPS_ENABLE_MASK 0x00000004L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_RATE_16XAA_MASK 0x00000008L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_Z_OR_STENCIL_MASK 0x00000010L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_PRE_SHADER_DEPTH_COVERAGE_ENABLED_MASK 0x00000020L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POST_DEPTH_IMPORT_MASK 0x00000040L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POPS_MASK 0x00000080L
+#define PA_SC_VRS_SURFACE_CNTL_1__USE_ONLY_VRS_RATE_FINE_CFG_MASK 0x00000100L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_VRS_RATE_NORMALIZATION_MASK 0x00001000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_PS_ITER_RATE_COMBINER_PASSTHRU_OVERRIDE_MASK 0x00008000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_CMASK_RATE_HINT_FORCE_ZERO_OVERRIDE_MASK 0x00080000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_DETAIL_TO_EXPOSED_RATE_CLAMPING_MASK 0x00100000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_0_MASK 0x00200000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_1_MASK 0x00400000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_2_MASK 0x00800000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_3_MASK 0x01000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_4_MASK 0x02000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_5_MASK 0x04000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_6_MASK 0x08000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_7_MASK 0x10000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_8_MASK 0x20000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_9_MASK 0x40000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_10_MASK 0x80000000L
+
+
+// addressBlock: gc_pfvf_sqdec
+//SQ_RUNTIME_CONFIG
+#define SQ_RUNTIME_CONFIG__UNUSED_REGISTER__SHIFT 0x0
+#define SQ_RUNTIME_CONFIG__UNUSED_REGISTER_MASK 0x00000001L
+//SQ_DEBUG_STS_GLOBAL
+#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_BUSY__SHIFT 0x1
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA0__SHIFT 0x4
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA1__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_BUSY_MASK 0x00000002L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA0_MASK 0x0000FFF0L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA1_MASK 0x0FFF0000L
+//SQ_DEBUG_STS_GLOBAL2
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX0__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX1__SHIFT 0x8
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_COMPUTE__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX0_MASK 0x000000FFL
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX1_MASK 0x0000FF00L
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_COMPUTE_MASK 0x00FF0000L
+//SH_MEM_BASES
+#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
+#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
+#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
+#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
+//SH_MEM_CONFIG
+#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
+#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x2
+#define SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT 0xe
+#define SH_MEM_CONFIG__ICACHE_USE_GL1__SHIFT 0x12
+#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
+#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x0000000CL
+#define SH_MEM_CONFIG__INITIAL_INST_PREFETCH_MASK 0x0000C000L
+#define SH_MEM_CONFIG__ICACHE_USE_GL1_MASK 0x00040000L
+//SQ_DEBUG
+#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x0
+#define SQ_DEBUG__SINGLE_ALU_OP__SHIFT 0x1
+#define SQ_DEBUG__WAIT_DEP_CTR_ZERO__SHIFT 0x2
+#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L
+#define SQ_DEBUG__SINGLE_ALU_OP_MASK 0x00000002L
+#define SQ_DEBUG__WAIT_DEP_CTR_ZERO_MASK 0x00000004L
+//SQ_SHADER_TBA_LO
+#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TBA_HI
+#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TBA_HI__TRAP_EN__SHIFT 0x1f
+#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
+#define SQ_SHADER_TBA_HI__TRAP_EN_MASK 0x80000000L
+//SQ_SHADER_TMA_LO
+#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TMA_HI
+#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
+
+
+// addressBlock: gc_pfonly_cpdec
+//CP_DEBUG_2
+#define CP_DEBUG_2__CHIU_NOALLOC_OVERRIDE__SHIFT 0xc
+#define CP_DEBUG_2__RCIU_SECURE_CHECK_DISABLE__SHIFT 0xd
+#define CP_DEBUG_2__RB_PACKET_INJECTOR_DISABLE__SHIFT 0xe
+#define CP_DEBUG_2__CNTX_DONE_COPY_STATE_DISABLE__SHIFT 0xf
+#define CP_DEBUG_2__NOP_DISCARD_DISABLE__SHIFT 0x10
+#define CP_DEBUG_2__DC_INTERLEAVE_DISABLE__SHIFT 0x11
+#define CP_DEBUG_2__BC_LOOKUP_CB_DB_FLUSH_DISABLE__SHIFT 0x1b
+#define CP_DEBUG_2__DC_FORCE_CLK_EN__SHIFT 0x1c
+#define CP_DEBUG_2__DC_DISABLE_BROADCAST__SHIFT 0x1d
+#define CP_DEBUG_2__NOT_EOP_HW_DETECT_DISABLE__SHIFT 0x1e
+#define CP_DEBUG_2__PFP_DDID_HW_DETECT_DISABLE__SHIFT 0x1f
+#define CP_DEBUG_2__CHIU_NOALLOC_OVERRIDE_MASK 0x00001000L
+#define CP_DEBUG_2__RCIU_SECURE_CHECK_DISABLE_MASK 0x00002000L
+#define CP_DEBUG_2__RB_PACKET_INJECTOR_DISABLE_MASK 0x00004000L
+#define CP_DEBUG_2__CNTX_DONE_COPY_STATE_DISABLE_MASK 0x00008000L
+#define CP_DEBUG_2__NOP_DISCARD_DISABLE_MASK 0x00010000L
+#define CP_DEBUG_2__DC_INTERLEAVE_DISABLE_MASK 0x00020000L
+#define CP_DEBUG_2__BC_LOOKUP_CB_DB_FLUSH_DISABLE_MASK 0x08000000L
+#define CP_DEBUG_2__DC_FORCE_CLK_EN_MASK 0x10000000L
+#define CP_DEBUG_2__DC_DISABLE_BROADCAST_MASK 0x20000000L
+#define CP_DEBUG_2__NOT_EOP_HW_DETECT_DISABLE_MASK 0x40000000L
+#define CP_DEBUG_2__PFP_DDID_HW_DETECT_DISABLE_MASK 0x80000000L
+//CP_FETCHER_SOURCE
+#define CP_FETCHER_SOURCE__ME_SRC__SHIFT 0x0
+#define CP_FETCHER_SOURCE__ME_SRC_MASK 0x00000001L
+//CP_DFY_CNTL
+#define CP_DFY_CNTL__POLICY__SHIFT 0x8
+#define CP_DFY_CNTL__VOL__SHIFT 0xa
+#define CP_DFY_CNTL__MTYPE__SHIFT 0xc
+#define CP_DFY_CNTL__REPEATER_FGCG_DISABLE__SHIFT 0x19
+#define CP_DFY_CNTL__TPI_SDP_SEL__SHIFT 0x1a
+#define CP_DFY_CNTL__WRITE_DIS__SHIFT 0x1b
+#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
+#define CP_DFY_CNTL__MODE__SHIFT 0x1d
+#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DFY_CNTL__POLICY_MASK 0x00000300L
+#define CP_DFY_CNTL__VOL_MASK 0x00000400L
+#define CP_DFY_CNTL__MTYPE_MASK 0x00007000L
+#define CP_DFY_CNTL__REPEATER_FGCG_DISABLE_MASK 0x02000000L
+#define CP_DFY_CNTL__TPI_SDP_SEL_MASK 0x04000000L
+#define CP_DFY_CNTL__WRITE_DIS_MASK 0x08000000L
+#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
+#define CP_DFY_CNTL__MODE_MASK 0x60000000L
+#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
+//CP_DFY_STAT
+#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
+#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
+#define CP_DFY_STAT__BUSY__SHIFT 0x1f
+#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
+#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
+#define CP_DFY_STAT__BUSY_MASK 0x80000000L
+//CP_DFY_ADDR_HI
+#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DFY_ADDR_LO
+#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
+#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
+//CP_DFY_DATA_0
+#define CP_DFY_DATA_0__DATA__SHIFT 0x0
+#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_1
+#define CP_DFY_DATA_1__DATA__SHIFT 0x0
+#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_2
+#define CP_DFY_DATA_2__DATA__SHIFT 0x0
+#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_3
+#define CP_DFY_DATA_3__DATA__SHIFT 0x0
+#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_4
+#define CP_DFY_DATA_4__DATA__SHIFT 0x0
+#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_5
+#define CP_DFY_DATA_5__DATA__SHIFT 0x0
+#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_6
+#define CP_DFY_DATA_6__DATA__SHIFT 0x0
+#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_7
+#define CP_DFY_DATA_7__DATA__SHIFT 0x0
+#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_8
+#define CP_DFY_DATA_8__DATA__SHIFT 0x0
+#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_9
+#define CP_DFY_DATA_9__DATA__SHIFT 0x0
+#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_10
+#define CP_DFY_DATA_10__DATA__SHIFT 0x0
+#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_11
+#define CP_DFY_DATA_11__DATA__SHIFT 0x0
+#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_12
+#define CP_DFY_DATA_12__DATA__SHIFT 0x0
+#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_13
+#define CP_DFY_DATA_13__DATA__SHIFT 0x0
+#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_14
+#define CP_DFY_DATA_14__DATA__SHIFT 0x0
+#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_15
+#define CP_DFY_DATA_15__DATA__SHIFT 0x0
+#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_CMD
+#define CP_DFY_CMD__SIZE__SHIFT 0x10
+#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_pfonly_cpphqddec
+//CP_HPD_MES_ROQ_OFFSETS
+#define CP_HPD_MES_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_MES_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_MES_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_MES_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_MES_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_MES_ROQ_OFFSETS__IB_OFFSET_MASK 0x007F0000L
+//CP_HPD_ROQ_OFFSETS
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x007F0000L
+//CP_HPD_STATUS0
+#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_HPD_STATUS0__MASTER_QUEUE_IDLE_DIS__SHIFT 0x1b
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK__SHIFT 0x1c
+#define CP_HPD_STATUS0__FREEZE_QUEUE_STATE__SHIFT 0x1e
+#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_HPD_STATUS0__MASTER_QUEUE_IDLE_DIS_MASK 0x08000000L
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK_MASK 0x30000000L
+#define CP_HPD_STATUS0__FREEZE_QUEUE_STATE_MASK 0x40000000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_didtdec
+//DIDT_INDEX_AUTO_INCR_EN
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN__SHIFT 0x0
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN_MASK 0x00000001L
+//DIDT_EDC_CTRL
+#define DIDT_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0xa
+#define DIDT_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0xe
+#define DIDT_EDC_CTRL__EDC_ALGORITHM_MODE__SHIFT 0xf
+#define DIDT_EDC_CTRL__EDC_AVGDIV__SHIFT 0x10
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_SEL__SHIFT 0x14
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_BIT_NUMS__SHIFT 0x15
+#define DIDT_EDC_CTRL__RLC_FORCE_STALL_EN__SHIFT 0x18
+#define DIDT_EDC_CTRL__RLC_STALL_LEVEL_SEL__SHIFT 0x19
+#define DIDT_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000003F0L
+#define DIDT_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x00003C00L
+#define DIDT_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00004000L
+#define DIDT_EDC_CTRL__EDC_ALGORITHM_MODE_MASK 0x00008000L
+#define DIDT_EDC_CTRL__EDC_AVGDIV_MASK 0x000F0000L
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_SEL_MASK 0x00100000L
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_BIT_NUMS_MASK 0x00E00000L
+#define DIDT_EDC_CTRL__RLC_FORCE_STALL_EN_MASK 0x01000000L
+#define DIDT_EDC_CTRL__RLC_STALL_LEVEL_SEL_MASK 0x02000000L
+//DIDT_EDC_THROTTLE_CTRL
+#define DIDT_EDC_THROTTLE_CTRL__SQ_STALL_EN__SHIFT 0x0
+#define DIDT_EDC_THROTTLE_CTRL__DB_STALL_EN__SHIFT 0x1
+#define DIDT_EDC_THROTTLE_CTRL__TCP_STALL_EN__SHIFT 0x2
+#define DIDT_EDC_THROTTLE_CTRL__TD_STALL_EN__SHIFT 0x3
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_EN__SHIFT 0x4
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_MODE__SHIFT 0x5
+#define DIDT_EDC_THROTTLE_CTRL__SQ_STALL_EN_MASK 0x00000001L
+#define DIDT_EDC_THROTTLE_CTRL__DB_STALL_EN_MASK 0x00000002L
+#define DIDT_EDC_THROTTLE_CTRL__TCP_STALL_EN_MASK 0x00000004L
+#define DIDT_EDC_THROTTLE_CTRL__TD_STALL_EN_MASK 0x00000008L
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_EN_MASK 0x00000010L
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_MODE_MASK 0x000000E0L
+//DIDT_EDC_THRESHOLD
+#define DIDT_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//DIDT_EDC_STALL_PATTERN_1_2
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_3_4
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_5_6
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_7
+#define DIDT_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_EDC_STATUS
+#define DIDT_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
+#define DIDT_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
+#define DIDT_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
+#define DIDT_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
+//DIDT_EDC_DYNAMIC_THRESHOLD_RO
+#define DIDT_EDC_DYNAMIC_THRESHOLD_RO__EDC_DYNAMIC_THRESHOLD_RO__SHIFT 0x0
+#define DIDT_EDC_DYNAMIC_THRESHOLD_RO__EDC_DYNAMIC_THRESHOLD_RO_MASK 0x00000001L
+//DIDT_EDC_OVERFLOW
+#define DIDT_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
+#define DIDT_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
+#define DIDT_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
+#define DIDT_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
+//DIDT_EDC_ROLLING_POWER_DELTA
+#define DIDT_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
+#define DIDT_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
+//DIDT_IND_INDEX
+#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
+#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xFFFFFFFFL
+//DIDT_IND_DATA
+#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
+#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfonly_spidec
+//SPI_CDBG_SYS_GFX
+#define SPI_CDBG_SYS_GFX__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_GFX__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_GFX__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_GFX__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_GFX__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_GFX__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_GFX__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_GFX__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_HP3D
+#define SPI_CDBG_SYS_HP3D__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_HP3D__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_HP3D__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_HP3D__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_HP3D__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_HP3D__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_HP3D__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_HP3D__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_CS0
+#define SPI_CDBG_SYS_CS0__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS0__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS0__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS0__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS0__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS0__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS0__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS0__PIPE3_MASK 0xFF000000L
+//SPI_GDBG_WAVE_CNTL
+#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL__STALL_LAUNCH__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL__STALL_LAUNCH_MASK 0x00000002L
+//SPI_GDBG_TRAP_CONFIG
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN__SHIFT 0x0
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN__SHIFT 0x8
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN__SHIFT 0x10
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN__SHIFT 0x18
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN_MASK 0x000000FFL
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN_MASK 0x0000FF00L
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN_MASK 0x00FF0000L
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN_MASK 0xFF000000L
+//SPI_GDBG_WAVE_CNTL3
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L
+//SPI_RESET_DEBUG
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET__SHIFT 0x0
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID__SHIFT 0x1
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID__SHIFT 0x2
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE__SHIFT 0x3
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY__SHIFT 0x4
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_MASK 0x01L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID_MASK 0x02L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID_MASK 0x04L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE_MASK 0x08L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY_MASK 0x10L
+//SPI_ARB_CNTL_0
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
+//SPI_FEATURE_CTRL
+#define SPI_FEATURE_CTRL__TUNNELING_WAVE_LIMIT__SHIFT 0x0
+#define SPI_FEATURE_CTRL__RA_PROBE_IGNORE__SHIFT 0x4
+#define SPI_FEATURE_CTRL__PS_THROTTLE_MAX_WAVE_LIMIT__SHIFT 0x5
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_WIF_CTRL__SHIFT 0xb
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_OOO_CTRL__SHIFT 0xd
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_DISABLE__SHIFT 0xe
+#define SPI_FEATURE_CTRL__TUNNELING_WAVE_LIMIT_MASK 0x0000000FL
+#define SPI_FEATURE_CTRL__RA_PROBE_IGNORE_MASK 0x00000010L
+#define SPI_FEATURE_CTRL__PS_THROTTLE_MAX_WAVE_LIMIT_MASK 0x000007E0L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_WIF_CTRL_MASK 0x00001800L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_OOO_CTRL_MASK 0x00002000L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_DISABLE_MASK 0x00004000L
+//SPI_SHADER_RSRC_LIMIT_CTRL
+#define SPI_SHADER_RSRC_LIMIT_CTRL__WAVES_PER_SIMD32__SHIFT 0x0
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_PER_SIMD32__SHIFT 0x5
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_WRAP_DISABLE__SHIFT 0xc
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT__SHIFT 0xd
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_HIERARCHY_LEVEL__SHIFT 0x13
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT__SHIFT 0x14
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_HIERARCHY_LEVEL__SHIFT 0x1c
+#define SPI_SHADER_RSRC_LIMIT_CTRL__PERFORMANCE_LIMIT_ENABLE__SHIFT 0x1f
+#define SPI_SHADER_RSRC_LIMIT_CTRL__WAVES_PER_SIMD32_MASK 0x0000001FL
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_PER_SIMD32_MASK 0x00000FE0L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_WRAP_DISABLE_MASK 0x00001000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_MASK 0x0007E000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_HIERARCHY_LEVEL_MASK 0x00080000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_MASK 0x0FF00000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_HIERARCHY_LEVEL_MASK 0x10000000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__PERFORMANCE_LIMIT_ENABLE_MASK 0x80000000L
+//SPI_COMPUTE_WF_CTX_SAVE_STATUS
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE0_SAVE_BUSY__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE1_SAVE_BUSY__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE2_SAVE_BUSY__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE3_SAVE_BUSY__SHIFT 0x3
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE4_SAVE_BUSY__SHIFT 0x4
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE5_SAVE_BUSY__SHIFT 0x5
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE6_SAVE_BUSY__SHIFT 0x6
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE7_SAVE_BUSY__SHIFT 0x7
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE0_SAVE_BUSY__SHIFT 0x8
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE1_SAVE_BUSY__SHIFT 0x9
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE2_SAVE_BUSY__SHIFT 0xa
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE3_SAVE_BUSY__SHIFT 0xb
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE4_SAVE_BUSY__SHIFT 0xc
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE5_SAVE_BUSY__SHIFT 0xd
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE6_SAVE_BUSY__SHIFT 0xe
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE7_SAVE_BUSY__SHIFT 0xf
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE0_SAVE_BUSY__SHIFT 0x10
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE1_SAVE_BUSY__SHIFT 0x11
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE2_SAVE_BUSY__SHIFT 0x12
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE3_SAVE_BUSY__SHIFT 0x13
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE4_SAVE_BUSY__SHIFT 0x14
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE5_SAVE_BUSY__SHIFT 0x15
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE6_SAVE_BUSY__SHIFT 0x16
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE7_SAVE_BUSY__SHIFT 0x17
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE0_SAVE_BUSY__SHIFT 0x18
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE1_SAVE_BUSY__SHIFT 0x19
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE2_SAVE_BUSY__SHIFT 0x1a
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE3_SAVE_BUSY__SHIFT 0x1b
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE4_SAVE_BUSY__SHIFT 0x1c
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE5_SAVE_BUSY__SHIFT 0x1d
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE6_SAVE_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE7_SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE0_SAVE_BUSY_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE1_SAVE_BUSY_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE2_SAVE_BUSY_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE3_SAVE_BUSY_MASK 0x00000008L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE4_SAVE_BUSY_MASK 0x00000010L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE5_SAVE_BUSY_MASK 0x00000020L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE6_SAVE_BUSY_MASK 0x00000040L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE7_SAVE_BUSY_MASK 0x00000080L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE0_SAVE_BUSY_MASK 0x00000100L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE1_SAVE_BUSY_MASK 0x00000200L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE2_SAVE_BUSY_MASK 0x00000400L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE3_SAVE_BUSY_MASK 0x00000800L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE4_SAVE_BUSY_MASK 0x00001000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE5_SAVE_BUSY_MASK 0x00002000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE6_SAVE_BUSY_MASK 0x00004000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE7_SAVE_BUSY_MASK 0x00008000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE0_SAVE_BUSY_MASK 0x00010000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE1_SAVE_BUSY_MASK 0x00020000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE2_SAVE_BUSY_MASK 0x00040000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE3_SAVE_BUSY_MASK 0x00080000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE4_SAVE_BUSY_MASK 0x00100000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE5_SAVE_BUSY_MASK 0x00200000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE6_SAVE_BUSY_MASK 0x00400000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE7_SAVE_BUSY_MASK 0x00800000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE0_SAVE_BUSY_MASK 0x01000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE1_SAVE_BUSY_MASK 0x02000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE2_SAVE_BUSY_MASK 0x04000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE3_SAVE_BUSY_MASK 0x08000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE4_SAVE_BUSY_MASK 0x10000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE5_SAVE_BUSY_MASK 0x20000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE6_SAVE_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE7_SAVE_BUSY_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_tcpdec
+//TCP_INVALIDATE
+#define TCP_INVALIDATE__START__SHIFT 0x0
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+//TCP_STATUS
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
+#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
+#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
+#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
+#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
+#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
+#define TCP_STATUS__READ_BUSY__SHIFT 0x6
+#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
+#define TCP_STATUS__VM_BUSY__SHIFT 0x8
+#define TCP_STATUS__MEMIF_BUSY__SHIFT 0x9
+#define TCP_STATUS__GCR_BUSY__SHIFT 0xa
+#define TCP_STATUS__OFIFO_BUSY__SHIFT 0xb
+#define TCP_STATUS__OFIFO_QUEUE_BUSY__SHIFT 0xc
+#define TCP_STATUS__XNACK_PRT__SHIFT 0xf
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
+#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
+#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
+#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
+#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
+#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
+#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
+#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
+#define TCP_STATUS__MEMIF_BUSY_MASK 0x00000200L
+#define TCP_STATUS__GCR_BUSY_MASK 0x00000400L
+#define TCP_STATUS__OFIFO_BUSY_MASK 0x00000800L
+#define TCP_STATUS__OFIFO_QUEUE_BUSY_MASK 0x00003000L
+#define TCP_STATUS__XNACK_PRT_MASK 0x00008000L
+//TCP_CNTL
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
+#define TCP_CNTL__TD_DATA_EN_OVERRIDE__SHIFT 0x6
+#define TCP_CNTL__ENABLE_128B_DCC_COMP_READ_FOR_INDEP64__SHIFT 0x7
+#define TCP_CNTL__DISABLE_WRITE_COMBINING__SHIFT 0x9
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
+#define TCP_CNTL__FORCE_EOW_SET_CNT__SHIFT 0x16
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
+#define TCP_CNTL__FORCE_ORDER_BETWEEN_READ_WRITE_TO_SAME_ADDRESS__SHIFT 0x1d
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT__SHIFT 0x1f
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__TD_DATA_EN_OVERRIDE_MASK 0x00000040L
+#define TCP_CNTL__ENABLE_128B_DCC_COMP_READ_FOR_INDEP64_MASK 0x00000080L
+#define TCP_CNTL__DISABLE_WRITE_COMBINING_MASK 0x00000200L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
+#define TCP_CNTL__FORCE_EOW_SET_CNT_MASK 0x07C00000L
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+#define TCP_CNTL__FORCE_ORDER_BETWEEN_READ_WRITE_TO_SAME_ADDRESS_MASK 0x20000000L
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT_MASK 0x80000000L
+//TCP_CNTL2
+#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
+#define TCP_CNTL2__TCP_FMT_MGCG_DISABLE__SHIFT 0x8
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE__SHIFT 0x9
+#define TCP_CNTL2__TCP_WRITE_DATA_MGCG_DISABLE__SHIFT 0xa
+#define TCP_CNTL2__TCP_INNER_BLOCK_MGCG_DISABLE__SHIFT 0xb
+#define TCP_CNTL2__TCP_ADRS_IMG_CALC_MGCG_DISABLE__SHIFT 0xc
+#define TCP_CNTL2__V64_COMBINE_ENABLE__SHIFT 0xd
+#define TCP_CNTL2__TAGRAM_ADDR_SWIZZLE_DISABLE__SHIFT 0xe
+#define TCP_CNTL2__RETURN_ORDER_OVERRIDE__SHIFT 0xf
+#define TCP_CNTL2__POWER_OPT_DISABLE__SHIFT 0x10
+#define TCP_CNTL2__GCR_RSP_FGCG_DISABLE__SHIFT 0x11
+#define TCP_CNTL2__PERF_EN_OVERRIDE__SHIFT 0x12
+#define TCP_CNTL2__TC_TD_RAM_CLKEN_DISABLE__SHIFT 0x14
+#define TCP_CNTL2__TC_TD_DATA_CLKEN_DISABLE__SHIFT 0x15
+#define TCP_CNTL2__TCP_GL1_REQ_CLKEN_DISABLE__SHIFT 0x16
+#define TCP_CNTL2__TCP_GL1R_SRC_CLKEN_DISABLE__SHIFT 0x17
+#define TCP_CNTL2__SPARE_BIT__SHIFT 0x1a
+#define TCP_CNTL2__TAGRAM_XY_BIAS_OVERRIDE__SHIFT 0x1b
+#define TCP_CNTL2__TCP_REQ_MGCG_DISABLE__SHIFT 0x1d
+#define TCP_CNTL2__TCP_MISS_MGCG_DISABLE__SHIFT 0x1e
+#define TCP_CNTL2__DISABLE_MIPMAP_PARAM_CALC_SELF_GATING__SHIFT 0x1f
+#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
+#define TCP_CNTL2__TCP_FMT_MGCG_DISABLE_MASK 0x00000100L
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE_MASK 0x00000200L
+#define TCP_CNTL2__TCP_WRITE_DATA_MGCG_DISABLE_MASK 0x00000400L
+#define TCP_CNTL2__TCP_INNER_BLOCK_MGCG_DISABLE_MASK 0x00000800L
+#define TCP_CNTL2__TCP_ADRS_IMG_CALC_MGCG_DISABLE_MASK 0x00001000L
+#define TCP_CNTL2__V64_COMBINE_ENABLE_MASK 0x00002000L
+#define TCP_CNTL2__TAGRAM_ADDR_SWIZZLE_DISABLE_MASK 0x00004000L
+#define TCP_CNTL2__RETURN_ORDER_OVERRIDE_MASK 0x00008000L
+#define TCP_CNTL2__POWER_OPT_DISABLE_MASK 0x00010000L
+#define TCP_CNTL2__GCR_RSP_FGCG_DISABLE_MASK 0x00020000L
+#define TCP_CNTL2__PERF_EN_OVERRIDE_MASK 0x000C0000L
+#define TCP_CNTL2__TC_TD_RAM_CLKEN_DISABLE_MASK 0x00100000L
+#define TCP_CNTL2__TC_TD_DATA_CLKEN_DISABLE_MASK 0x00200000L
+#define TCP_CNTL2__TCP_GL1_REQ_CLKEN_DISABLE_MASK 0x00400000L
+#define TCP_CNTL2__TCP_GL1R_SRC_CLKEN_DISABLE_MASK 0x00800000L
+#define TCP_CNTL2__SPARE_BIT_MASK 0x04000000L
+#define TCP_CNTL2__TAGRAM_XY_BIAS_OVERRIDE_MASK 0x18000000L
+#define TCP_CNTL2__TCP_REQ_MGCG_DISABLE_MASK 0x20000000L
+#define TCP_CNTL2__TCP_MISS_MGCG_DISABLE_MASK 0x40000000L
+#define TCP_CNTL2__DISABLE_MIPMAP_PARAM_CALC_SELF_GATING_MASK 0x80000000L
+//TCP_CREDIT
+#define TCP_CREDIT__LFIFO_RAM_DEPTH__SHIFT 0x0
+#define TCP_CREDIT__GL1_REQ_CREDIT__SHIFT 0xa
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
+#define TCP_CREDIT__TD_RAM_CREDIT__SHIFT 0x17
+#define TCP_CREDIT__TD_DATA_CREDIT__SHIFT 0x1d
+#define TCP_CREDIT__LFIFO_RAM_DEPTH_MASK 0x000003FFL
+#define TCP_CREDIT__GL1_REQ_CREDIT_MASK 0x0000FC00L
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
+#define TCP_CREDIT__TD_RAM_CREDIT_MASK 0x0F800000L
+#define TCP_CREDIT__TD_DATA_CREDIT_MASK 0xE0000000L
+
+
+// addressBlock: gc_pfonly_gdsdec
+//GDS_ENHANCE2
+#define GDS_ENHANCE2__DISABLE_MEMORY_VIOLATION_REPORT__SHIFT 0x0
+#define GDS_ENHANCE2__GDS_INTERFACES_FGCG_OVERRIDE__SHIFT 0x1
+#define GDS_ENHANCE2__DISABLE_PIPE_MEMORY_RD_OPT__SHIFT 0x2
+#define GDS_ENHANCE2__UNUSED__SHIFT 0x3
+#define GDS_ENHANCE2__DISABLE_MEMORY_VIOLATION_REPORT_MASK 0x00000001L
+#define GDS_ENHANCE2__GDS_INTERFACES_FGCG_OVERRIDE_MASK 0x00000002L
+#define GDS_ENHANCE2__DISABLE_PIPE_MEMORY_RD_OPT_MASK 0x00000004L
+#define GDS_ENHANCE2__UNUSED_MASK 0xFFFFFFF8L
+//GDS_OA_CGPG_RESTORE
+#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
+#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
+#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
+#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
+#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
+#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
+#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
+#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
+#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
+#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
+
+
+// addressBlock: gc_pfonly_utcl1dec
+//UTCL1_CTRL_0
+#define UTCL1_CTRL_0__UTCL1_L0_REQ_VFIFO_DISABLE__SHIFT 0x0
+#define UTCL1_CTRL_0__UTCL1_UTCL2_INVACK_CDC_FIFO_DISABLE__SHIFT 0x1
+#define UTCL1_CTRL_0__RESERVED_0__SHIFT 0x2
+#define UTCL1_CTRL_0__UTCL1_UTCL2_REQ_CREDITS__SHIFT 0x3
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_CREDITS__SHIFT 0x9
+#define UTCL1_CTRL_0__UTCL1_LIMIT_INV_TO_ONE__SHIFT 0xd
+#define UTCL1_CTRL_0__UTCL1_LIMIT_XLAT_TO_ONE__SHIFT 0xe
+#define UTCL1_CTRL_0__UTCL1_UTCL2_FGCG_REPEATERS_OVERRIDE__SHIFT 0xf
+#define UTCL1_CTRL_0__UTCL1_INV_FILTER_VMID__SHIFT 0x10
+#define UTCL1_CTRL_0__UTCL1_RANGE_INV_FORCE_CHK_ALL__SHIFT 0x11
+#define UTCL1_CTRL_0__UTCL1_UTCL0_RET_FGCG_REPEATERS_OVERRIDE__SHIFT 0x12
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_FGCG_REPEATERS_OVERRIDE__SHIFT 0x13
+#define UTCL1_CTRL_0__GCRD_FGCG_DISABLE__SHIFT 0x14
+#define UTCL1_CTRL_0__UTCL1_MH_RANGE_INV_TO_VMID_OVERRIDE__SHIFT 0x15
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_DUPLICATES__SHIFT 0x16
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_REQUEST_SQUASHING__SHIFT 0x17
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_RECENT_BUFFER__SHIFT 0x18
+#define UTCL1_CTRL_0__UTCL1_XLAT_FAULT_LOCK_CTRL__SHIFT 0x19
+#define UTCL1_CTRL_0__UTCL1_REDUCE_CC_SIZE__SHIFT 0x1b
+#define UTCL1_CTRL_0__RESERVED_1__SHIFT 0x1d
+#define UTCL1_CTRL_0__MH_SPARE0__SHIFT 0x1e
+#define UTCL1_CTRL_0__RESERVED_2__SHIFT 0x1f
+#define UTCL1_CTRL_0__UTCL1_L0_REQ_VFIFO_DISABLE_MASK 0x00000001L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_INVACK_CDC_FIFO_DISABLE_MASK 0x00000002L
+#define UTCL1_CTRL_0__RESERVED_0_MASK 0x00000004L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_REQ_CREDITS_MASK 0x000001F8L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_CREDITS_MASK 0x00001E00L
+#define UTCL1_CTRL_0__UTCL1_LIMIT_INV_TO_ONE_MASK 0x00002000L
+#define UTCL1_CTRL_0__UTCL1_LIMIT_XLAT_TO_ONE_MASK 0x00004000L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_FGCG_REPEATERS_OVERRIDE_MASK 0x00008000L
+#define UTCL1_CTRL_0__UTCL1_INV_FILTER_VMID_MASK 0x00010000L
+#define UTCL1_CTRL_0__UTCL1_RANGE_INV_FORCE_CHK_ALL_MASK 0x00020000L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_RET_FGCG_REPEATERS_OVERRIDE_MASK 0x00040000L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_FGCG_REPEATERS_OVERRIDE_MASK 0x00080000L
+#define UTCL1_CTRL_0__GCRD_FGCG_DISABLE_MASK 0x00100000L
+#define UTCL1_CTRL_0__UTCL1_MH_RANGE_INV_TO_VMID_OVERRIDE_MASK 0x00200000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_DUPLICATES_MASK 0x00400000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_REQUEST_SQUASHING_MASK 0x00800000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_RECENT_BUFFER_MASK 0x01000000L
+#define UTCL1_CTRL_0__UTCL1_XLAT_FAULT_LOCK_CTRL_MASK 0x06000000L
+#define UTCL1_CTRL_0__UTCL1_REDUCE_CC_SIZE_MASK 0x18000000L
+#define UTCL1_CTRL_0__RESERVED_1_MASK 0x20000000L
+#define UTCL1_CTRL_0__MH_SPARE0_MASK 0x40000000L
+#define UTCL1_CTRL_0__RESERVED_2_MASK 0x80000000L
+//UTCL1_UTCL0_INVREQ_DISABLE
+#define UTCL1_UTCL0_INVREQ_DISABLE__UTCL1_UTCL0_INVREQ_DISABLE__SHIFT 0x0
+#define UTCL1_UTCL0_INVREQ_DISABLE__UTCL1_UTCL0_INVREQ_DISABLE_MASK 0xFFFFFFFFL
+//UTCL1_CTRL_2
+#define UTCL1_CTRL_2__UTCL1_RNG_TO_VMID_INV_OVRD__SHIFT 0x0
+#define UTCL1_CTRL_2__UTCL1_PMM_INTERRUPT_CREDITS_OVERRIDE__SHIFT 0x4
+#define UTCL1_CTRL_2__UTCL1_CACHE_WRITE_PERM__SHIFT 0xa
+#define UTCL1_CTRL_2__UTCL1_PAGE_OVRD_DISABLE__SHIFT 0xb
+#define UTCL1_CTRL_2__UTCL1_SPARE0__SHIFT 0xc
+#define UTCL1_CTRL_2__UTCL1_SPARE1__SHIFT 0xd
+#define UTCL1_CTRL_2__RESERVED__SHIFT 0xe
+#define UTCL1_CTRL_2__UTCL1_RNG_TO_VMID_INV_OVRD_MASK 0x0000000FL
+#define UTCL1_CTRL_2__UTCL1_PMM_INTERRUPT_CREDITS_OVERRIDE_MASK 0x000003F0L
+#define UTCL1_CTRL_2__UTCL1_CACHE_WRITE_PERM_MASK 0x00000400L
+#define UTCL1_CTRL_2__UTCL1_PAGE_OVRD_DISABLE_MASK 0x00000800L
+#define UTCL1_CTRL_2__UTCL1_SPARE0_MASK 0x00001000L
+#define UTCL1_CTRL_2__UTCL1_SPARE1_MASK 0x00002000L
+#define UTCL1_CTRL_2__RESERVED_MASK 0xFFFFC000L
+//UTCL1_FIFO_SIZING
+#define UTCL1_FIFO_SIZING__UTCL1_UTCL2_INVACK_CDC_FIFO_THRESH__SHIFT 0x0
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_LOW__SHIFT 0x3
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_HIGH__SHIFT 0x10
+#define UTCL1_FIFO_SIZING__UTCL1_UTCL2_INVACK_CDC_FIFO_THRESH_MASK 0x00000007L
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_LOW_MASK 0x0000FFF8L
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_HIGH_MASK 0xFFFF0000L
+//GCRD_SA0_TARGETS_DISABLE
+#define GCRD_SA0_TARGETS_DISABLE__GCRD_SA0_TARGETS_DISABLE__SHIFT 0x0
+#define GCRD_SA0_TARGETS_DISABLE__GCRD_SA0_TARGETS_DISABLE_MASK 0x0007FFFFL
+//GCRD_SA1_TARGETS_DISABLE
+#define GCRD_SA1_TARGETS_DISABLE__GCRD_SA1_TARGETS_DISABLE__SHIFT 0x0
+#define GCRD_SA1_TARGETS_DISABLE__GCRD_SA1_TARGETS_DISABLE_MASK 0x0007FFFFL
+//GCRD_CREDIT_SAFE
+#define GCRD_CREDIT_SAFE__GCRD_CHAIN_CREDIT_SAFE_REG__SHIFT 0x0
+#define GCRD_CREDIT_SAFE__GCRD_TARGET_CREDIT_SAFE_REG__SHIFT 0x4
+#define GCRD_CREDIT_SAFE__GCRD_CHAIN_CREDIT_SAFE_REG_MASK 0x00000007L
+#define GCRD_CREDIT_SAFE__GCRD_TARGET_CREDIT_SAFE_REG_MASK 0x00000070L
+
+
+// addressBlock: gc_pfonly_pmmdec
+//GCR_GENERAL_CNTL
+#define GCR_GENERAL_CNTL__FORCE_4K_L2_RESP__SHIFT 0x0
+#define GCR_GENERAL_CNTL__REDUCE_HALF_MAIN_WQ__SHIFT 0x1
+#define GCR_GENERAL_CNTL__REDUCE_HALF_PHY_WQ__SHIFT 0x2
+#define GCR_GENERAL_CNTL__FORCE_INV_ALL__SHIFT 0x3
+#define GCR_GENERAL_CNTL__HI_PRIORITY_CNTL__SHIFT 0x4
+#define GCR_GENERAL_CNTL__HI_PRIORITY_DISABLE__SHIFT 0x6
+#define GCR_GENERAL_CNTL__BIG_PAGE_FILTER_DISABLE__SHIFT 0x7
+#define GCR_GENERAL_CNTL__PERF_CNTR_ENABLE__SHIFT 0x8
+#define GCR_GENERAL_CNTL__FORCE_SINGLE_WQ__SHIFT 0x9
+#define GCR_GENERAL_CNTL__UTCL2_REQ_PERM__SHIFT 0xa
+#define GCR_GENERAL_CNTL__TARGET_MGCG_CLKEN_DIS__SHIFT 0xd
+#define GCR_GENERAL_CNTL__MIXED_RANGE_MODE_DIS__SHIFT 0xe
+#define GCR_GENERAL_CNTL__ENABLE_16K_UTCL2_REQ__SHIFT 0xf
+#define GCR_GENERAL_CNTL__DISABLE_FGCG__SHIFT 0x10
+#define GCR_GENERAL_CNTL__CLIENT_ID__SHIFT 0x14
+#define GCR_GENERAL_CNTL__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define GCR_GENERAL_CNTL__REDUCE_HALF_MAIN_WQ_MASK 0x00000002L
+#define GCR_GENERAL_CNTL__REDUCE_HALF_PHY_WQ_MASK 0x00000004L
+#define GCR_GENERAL_CNTL__FORCE_INV_ALL_MASK 0x00000008L
+#define GCR_GENERAL_CNTL__HI_PRIORITY_CNTL_MASK 0x00000030L
+#define GCR_GENERAL_CNTL__HI_PRIORITY_DISABLE_MASK 0x00000040L
+#define GCR_GENERAL_CNTL__BIG_PAGE_FILTER_DISABLE_MASK 0x00000080L
+#define GCR_GENERAL_CNTL__PERF_CNTR_ENABLE_MASK 0x00000100L
+#define GCR_GENERAL_CNTL__FORCE_SINGLE_WQ_MASK 0x00000200L
+#define GCR_GENERAL_CNTL__UTCL2_REQ_PERM_MASK 0x00001C00L
+#define GCR_GENERAL_CNTL__TARGET_MGCG_CLKEN_DIS_MASK 0x00002000L
+#define GCR_GENERAL_CNTL__MIXED_RANGE_MODE_DIS_MASK 0x00004000L
+#define GCR_GENERAL_CNTL__ENABLE_16K_UTCL2_REQ_MASK 0x00008000L
+#define GCR_GENERAL_CNTL__DISABLE_FGCG_MASK 0x00010000L
+#define GCR_GENERAL_CNTL__CLIENT_ID_MASK 0x1FF00000L
+//GCR_TARGET_DISABLE
+#define GCR_TARGET_DISABLE__DISABLE_SE0_PHY__SHIFT 0x0
+#define GCR_TARGET_DISABLE__DISABLE_SE0_VIRT__SHIFT 0x1
+#define GCR_TARGET_DISABLE__DISABLE_SE1_PHY__SHIFT 0x2
+#define GCR_TARGET_DISABLE__DISABLE_SE1_VIRT__SHIFT 0x3
+#define GCR_TARGET_DISABLE__DISABLE_SE2_PHY__SHIFT 0x4
+#define GCR_TARGET_DISABLE__DISABLE_SE2_VIRT__SHIFT 0x5
+#define GCR_TARGET_DISABLE__DISABLE_GL2A0_PHY__SHIFT 0x6
+#define GCR_TARGET_DISABLE__DISABLE_GL2A1_PHY__SHIFT 0x7
+#define GCR_TARGET_DISABLE__DISABLE_GL2A2_PHY__SHIFT 0x8
+#define GCR_TARGET_DISABLE__DISABLE_GL2A3_PHY__SHIFT 0x9
+#define GCR_TARGET_DISABLE__DISABLE_SE3_PHY__SHIFT 0xa
+#define GCR_TARGET_DISABLE__DISABLE_SE3_VIRT__SHIFT 0xb
+#define GCR_TARGET_DISABLE__DISABLE_SE4_PHY__SHIFT 0xc
+#define GCR_TARGET_DISABLE__DISABLE_SE4_VIRT__SHIFT 0xd
+#define GCR_TARGET_DISABLE__DISABLE_SE5_PHY__SHIFT 0xe
+#define GCR_TARGET_DISABLE__DISABLE_SE5_VIRT__SHIFT 0xf
+#define GCR_TARGET_DISABLE__SE0_INACTIVE_STATUS__SHIFT 0x10
+#define GCR_TARGET_DISABLE__SE1_INACTIVE_STATUS__SHIFT 0x11
+#define GCR_TARGET_DISABLE__SE2_INACTIVE_STATUS__SHIFT 0x12
+#define GCR_TARGET_DISABLE__SE3_INACTIVE_STATUS__SHIFT 0x13
+#define GCR_TARGET_DISABLE__SE4_INACTIVE_STATUS__SHIFT 0x14
+#define GCR_TARGET_DISABLE__SE5_INACTIVE_STATUS__SHIFT 0x15
+#define GCR_TARGET_DISABLE__DISABLE_SE0_PHY_MASK 0x00000001L
+#define GCR_TARGET_DISABLE__DISABLE_SE0_VIRT_MASK 0x00000002L
+#define GCR_TARGET_DISABLE__DISABLE_SE1_PHY_MASK 0x00000004L
+#define GCR_TARGET_DISABLE__DISABLE_SE1_VIRT_MASK 0x00000008L
+#define GCR_TARGET_DISABLE__DISABLE_SE2_PHY_MASK 0x00000010L
+#define GCR_TARGET_DISABLE__DISABLE_SE2_VIRT_MASK 0x00000020L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A0_PHY_MASK 0x00000040L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A1_PHY_MASK 0x00000080L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A2_PHY_MASK 0x00000100L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A3_PHY_MASK 0x00000200L
+#define GCR_TARGET_DISABLE__DISABLE_SE3_PHY_MASK 0x00000400L
+#define GCR_TARGET_DISABLE__DISABLE_SE3_VIRT_MASK 0x00000800L
+#define GCR_TARGET_DISABLE__DISABLE_SE4_PHY_MASK 0x00001000L
+#define GCR_TARGET_DISABLE__DISABLE_SE4_VIRT_MASK 0x00002000L
+#define GCR_TARGET_DISABLE__DISABLE_SE5_PHY_MASK 0x00004000L
+#define GCR_TARGET_DISABLE__DISABLE_SE5_VIRT_MASK 0x00008000L
+#define GCR_TARGET_DISABLE__SE0_INACTIVE_STATUS_MASK 0x00010000L
+#define GCR_TARGET_DISABLE__SE1_INACTIVE_STATUS_MASK 0x00020000L
+#define GCR_TARGET_DISABLE__SE2_INACTIVE_STATUS_MASK 0x00040000L
+#define GCR_TARGET_DISABLE__SE3_INACTIVE_STATUS_MASK 0x00080000L
+#define GCR_TARGET_DISABLE__SE4_INACTIVE_STATUS_MASK 0x00100000L
+#define GCR_TARGET_DISABLE__SE5_INACTIVE_STATUS_MASK 0x00200000L
+//GCR_CMD_STATUS
+#define GCR_CMD_STATUS__GCR_CONTROL__SHIFT 0x0
+#define GCR_CMD_STATUS__GCR_SRC__SHIFT 0x13
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN__SHIFT 0x17
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_VMID__SHIFT 0x18
+#define GCR_CMD_STATUS__UTCL2_NACK_STATUS__SHIFT 0x1c
+#define GCR_CMD_STATUS__GCR_SEQ_OP_ERROR__SHIFT 0x1e
+#define GCR_CMD_STATUS__UTCL2_NACK_ERROR__SHIFT 0x1f
+#define GCR_CMD_STATUS__GCR_CONTROL_MASK 0x0007FFFFL
+#define GCR_CMD_STATUS__GCR_SRC_MASK 0x00380000L
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_MASK 0x00800000L
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_VMID_MASK 0x0F000000L
+#define GCR_CMD_STATUS__UTCL2_NACK_STATUS_MASK 0x30000000L
+#define GCR_CMD_STATUS__GCR_SEQ_OP_ERROR_MASK 0x40000000L
+#define GCR_CMD_STATUS__UTCL2_NACK_ERROR_MASK 0x80000000L
+//GCR_SPARE
+#define GCR_SPARE__SPARE_BIT_1__SHIFT 0x1
+#define GCR_SPARE__SPARE_BIT_2__SHIFT 0x2
+#define GCR_SPARE__SPARE_BIT_3__SHIFT 0x3
+#define GCR_SPARE__SPARE_BIT_4__SHIFT 0x4
+#define GCR_SPARE__SPARE_BIT_5__SHIFT 0x5
+#define GCR_SPARE__SPARE_BIT_6__SHIFT 0x6
+#define GCR_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define GCR_SPARE__UTCL2_REQ_CREDIT__SHIFT 0x8
+#define GCR_SPARE__GCRD_GL2A_REQ_CREDIT__SHIFT 0x10
+#define GCR_SPARE__GCRD_SE_REQ_CREDIT__SHIFT 0x14
+#define GCR_SPARE__SPARE_BIT_31_24__SHIFT 0x18
+#define GCR_SPARE__SPARE_BIT_1_MASK 0x00000002L
+#define GCR_SPARE__SPARE_BIT_2_MASK 0x00000004L
+#define GCR_SPARE__SPARE_BIT_3_MASK 0x00000008L
+#define GCR_SPARE__SPARE_BIT_4_MASK 0x00000010L
+#define GCR_SPARE__SPARE_BIT_5_MASK 0x00000020L
+#define GCR_SPARE__SPARE_BIT_6_MASK 0x00000040L
+#define GCR_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define GCR_SPARE__UTCL2_REQ_CREDIT_MASK 0x0000FF00L
+#define GCR_SPARE__GCRD_GL2A_REQ_CREDIT_MASK 0x000F0000L
+#define GCR_SPARE__GCRD_SE_REQ_CREDIT_MASK 0x00F00000L
+#define GCR_SPARE__SPARE_BIT_31_24_MASK 0xFF000000L
+//PMM_CNTL2
+#define PMM_CNTL2__GCEA_MAM_DISABLE__SHIFT 0x0
+#define PMM_CNTL2__ABIT_FORCE_FLUSH_OVERRIDE__SHIFT 0x18
+#define PMM_CNTL2__ABIT_TIMER_FLUSH_OVERRIDE__SHIFT 0x19
+#define PMM_CNTL2__PMM_IH_INTERRUPT_CREDITS_OVERRIDE__SHIFT 0x1a
+#define PMM_CNTL2__ABIT_INTR_ON_FLUSH_DONE__SHIFT 0x1e
+#define PMM_CNTL2__RESERVED__SHIFT 0x1f
+#define PMM_CNTL2__GCEA_MAM_DISABLE_MASK 0x00FFFFFFL
+#define PMM_CNTL2__ABIT_FORCE_FLUSH_OVERRIDE_MASK 0x01000000L
+#define PMM_CNTL2__ABIT_TIMER_FLUSH_OVERRIDE_MASK 0x02000000L
+#define PMM_CNTL2__PMM_IH_INTERRUPT_CREDITS_OVERRIDE_MASK 0x3C000000L
+#define PMM_CNTL2__ABIT_INTR_ON_FLUSH_DONE_MASK 0x40000000L
+#define PMM_CNTL2__RESERVED_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_gccacdec
+//GC_CAC_CTRL_1
+#define GC_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define GC_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x8
+#define GC_CAC_CTRL_1__CAC_WINDOW_MASK 0x000000FFL
+#define GC_CAC_CTRL_1__TDP_WINDOW_MASK 0xFFFFFF00L
+//GC_CAC_CTRL_2
+#define GC_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE__SHIFT 0x1
+#define GC_CAC_CTRL_2__GC_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x2
+#define GC_CAC_CTRL_2__TOGGLE_EN__SHIFT 0x3
+#define GC_CAC_CTRL_2__INTR_EN__SHIFT 0x4
+#define GC_CAC_CTRL_2__CAC_COUNTER_SNAP_SEL__SHIFT 0x5
+#define GC_CAC_CTRL_2__SE_AGGR_ACC_EN__SHIFT 0x6
+#define GC_CAC_CTRL_2__GC_AGGR_ACC_EN__SHIFT 0xe
+#define GC_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE_MASK 0x00000002L
+#define GC_CAC_CTRL_2__GC_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000004L
+#define GC_CAC_CTRL_2__TOGGLE_EN_MASK 0x00000008L
+#define GC_CAC_CTRL_2__INTR_EN_MASK 0x00000010L
+#define GC_CAC_CTRL_2__CAC_COUNTER_SNAP_SEL_MASK 0x00000020L
+#define GC_CAC_CTRL_2__SE_AGGR_ACC_EN_MASK 0x00003FC0L
+#define GC_CAC_CTRL_2__GC_AGGR_ACC_EN_MASK 0x00004000L
+//GC_CAC_AGGR_LOWER
+#define GC_CAC_AGGR_LOWER__GC_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_AGGR_LOWER__GC_AGGR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_UPPER
+#define GC_CAC_AGGR_UPPER__GC_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_AGGR_UPPER__GC_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_LOWER
+#define SE0_CAC_AGGR_LOWER__SE0_AGGR_31_0__SHIFT 0x0
+#define SE0_CAC_AGGR_LOWER__SE0_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_UPPER
+#define SE0_CAC_AGGR_UPPER__SE0_AGGR_63_32__SHIFT 0x0
+#define SE0_CAC_AGGR_UPPER__SE0_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_LOWER
+#define SE1_CAC_AGGR_LOWER__SE1_AGGR_31_0__SHIFT 0x0
+#define SE1_CAC_AGGR_LOWER__SE1_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_UPPER
+#define SE1_CAC_AGGR_UPPER__SE1_AGGR_63_32__SHIFT 0x0
+#define SE1_CAC_AGGR_UPPER__SE1_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_LOWER
+#define SE2_CAC_AGGR_LOWER__SE2_AGGR_31_0__SHIFT 0x0
+#define SE2_CAC_AGGR_LOWER__SE2_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_UPPER
+#define SE2_CAC_AGGR_UPPER__SE2_AGGR_63_32__SHIFT 0x0
+#define SE2_CAC_AGGR_UPPER__SE2_AGGR_63_32_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_GFXCLK_CYCLE
+#define GC_CAC_AGGR_GFXCLK_CYCLE__GC_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define GC_CAC_AGGR_GFXCLK_CYCLE__GC_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_GFXCLK_CYCLE
+#define SE0_CAC_AGGR_GFXCLK_CYCLE__SE0_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE0_CAC_AGGR_GFXCLK_CYCLE__SE0_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_GFXCLK_CYCLE
+#define SE1_CAC_AGGR_GFXCLK_CYCLE__SE1_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE1_CAC_AGGR_GFXCLK_CYCLE__SE1_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_GFXCLK_CYCLE
+#define SE2_CAC_AGGR_GFXCLK_CYCLE__SE2_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE2_CAC_AGGR_GFXCLK_CYCLE__SE2_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//GC_EDC_CTRL
+#define GC_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define GC_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0xa
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0xb
+#define GC_EDC_CTRL__EDC_LEVEL_SEL__SHIFT 0xf
+#define GC_EDC_CTRL__EDC_ALGORITHM_MODE__SHIFT 0x10
+#define GC_EDC_CTRL__EDC_AVGDIV__SHIFT 0x11
+#define GC_EDC_CTRL__PSM_THROTTLE_SRC_SEL__SHIFT 0x15
+#define GC_EDC_CTRL__THROTTLE_SRC0_MASK__SHIFT 0x18
+#define GC_EDC_CTRL__THROTTLE_SRC1_MASK__SHIFT 0x19
+#define GC_EDC_CTRL__THROTTLE_SRC2_MASK__SHIFT 0x1a
+#define GC_EDC_CTRL__THROTTLE_SRC3_MASK__SHIFT 0x1b
+#define GC_EDC_CTRL__EDC_CREDIT_SHIFT_BIT_NUMS__SHIFT 0x1c
+#define GC_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define GC_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define GC_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000003F0L
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00000400L
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00007800L
+#define GC_EDC_CTRL__EDC_LEVEL_SEL_MASK 0x00008000L
+#define GC_EDC_CTRL__EDC_ALGORITHM_MODE_MASK 0x00010000L
+#define GC_EDC_CTRL__EDC_AVGDIV_MASK 0x001E0000L
+#define GC_EDC_CTRL__PSM_THROTTLE_SRC_SEL_MASK 0x00E00000L
+#define GC_EDC_CTRL__THROTTLE_SRC0_MASK_MASK 0x01000000L
+#define GC_EDC_CTRL__THROTTLE_SRC1_MASK_MASK 0x02000000L
+#define GC_EDC_CTRL__THROTTLE_SRC2_MASK_MASK 0x04000000L
+#define GC_EDC_CTRL__THROTTLE_SRC3_MASK_MASK 0x08000000L
+#define GC_EDC_CTRL__EDC_CREDIT_SHIFT_BIT_NUMS_MASK 0xF0000000L
+//GC_EDC_THRESHOLD
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//GC_EDC_STRETCH_CTRL
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_EN__SHIFT 0x0
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_DELAY__SHIFT 0x1
+#define GC_EDC_STRETCH_CTRL__EDC_UNSTRETCH_DELAY__SHIFT 0xa
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_EN_MASK 0x00000001L
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_DELAY_MASK 0x000003FEL
+#define GC_EDC_STRETCH_CTRL__EDC_UNSTRETCH_DELAY_MASK 0x0007FC00L
+//GC_EDC_STRETCH_THRESHOLD
+#define GC_EDC_STRETCH_THRESHOLD__EDC_STRETCH_THRESHOLD__SHIFT 0x0
+#define GC_EDC_STRETCH_THRESHOLD__EDC_STRETCH_THRESHOLD_MASK 0xFFFFFFFFL
+//EDC_HYSTERESIS_CNTL
+#define EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_TIMER__SHIFT 0x8
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_EN__SHIFT 0x10
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_MODE__SHIFT 0x11
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_MODE__SHIFT 0x14
+#define EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_TIMER_MASK 0x0000FF00L
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_EN_MASK 0x00010000L
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_MODE_MASK 0x000E0000L
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_MODE_MASK 0x00100000L
+//GC_THROTTLE_CTRL
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST__SHIFT 0x0
+#define GC_THROTTLE_CTRL__GC_EDC_STALL_EN__SHIFT 0x1
+#define GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT 0x2
+#define GC_THROTTLE_CTRL__PWRBRK_POLARITY_CNTL__SHIFT 0x3
+#define GC_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x4
+#define GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT 0x5
+#define GC_THROTTLE_CTRL__GC_EDC_ONLY_MODE__SHIFT 0x6
+#define GC_THROTTLE_CTRL__GC_EDC_OVERRIDE__SHIFT 0x7
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE__SHIFT 0x8
+#define GC_THROTTLE_CTRL__PWRBRK_OVERRIDE__SHIFT 0x9
+#define GC_THROTTLE_CTRL__GC_EDC_PERF_COUNTER_EN__SHIFT 0xa
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN__SHIFT 0xb
+#define GC_THROTTLE_CTRL__PWRBRK_PERF_COUNTER_EN__SHIFT 0xc
+#define GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT 0xd
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_PERF_COUNTER_EN__SHIFT 0x17
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_LOG_INDEX__SHIFT 0x18
+#define GC_THROTTLE_CTRL__LUT_HW_UPDATE__SHIFT 0x1d
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_CLK_EN_OVERRIDE__SHIFT 0x1e
+#define GC_THROTTLE_CTRL__PCC_POLARITY_CNTL__SHIFT 0x1f
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST_MASK 0x00000001L
+#define GC_THROTTLE_CTRL__GC_EDC_STALL_EN_MASK 0x00000002L
+#define GC_THROTTLE_CTRL__PWRBRK_STALL_EN_MASK 0x00000004L
+#define GC_THROTTLE_CTRL__PWRBRK_POLARITY_CNTL_MASK 0x00000008L
+#define GC_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000010L
+#define GC_THROTTLE_CTRL__PATTERN_MODE_MASK 0x00000020L
+#define GC_THROTTLE_CTRL__GC_EDC_ONLY_MODE_MASK 0x00000040L
+#define GC_THROTTLE_CTRL__GC_EDC_OVERRIDE_MASK 0x00000080L
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE_MASK 0x00000100L
+#define GC_THROTTLE_CTRL__PWRBRK_OVERRIDE_MASK 0x00000200L
+#define GC_THROTTLE_CTRL__GC_EDC_PERF_COUNTER_EN_MASK 0x00000400L
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN_MASK 0x00000800L
+#define GC_THROTTLE_CTRL__PWRBRK_PERF_COUNTER_EN_MASK 0x00001000L
+#define GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL_MASK 0x007FE000L
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_PERF_COUNTER_EN_MASK 0x00800000L
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_LOG_INDEX_MASK 0x1F000000L
+#define GC_THROTTLE_CTRL__LUT_HW_UPDATE_MASK 0x20000000L
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_CLK_EN_OVERRIDE_MASK 0x40000000L
+#define GC_THROTTLE_CTRL__PCC_POLARITY_CNTL_MASK 0x80000000L
+//GC_THROTTLE_CTRL1
+#define GC_THROTTLE_CTRL1__PCC_FP_PROGRAM_STEP_EN__SHIFT 0x0
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MIN_STEP__SHIFT 0x1
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MAX_STEP__SHIFT 0x5
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_UPWARDS_STEP_SIZE__SHIFT 0xa
+#define GC_THROTTLE_CTRL1__PWRBRK_FP_PROGRAM_STEP_EN__SHIFT 0xd
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MIN_STEP__SHIFT 0xe
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MAX_STEP__SHIFT 0x12
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_UPWARDS_STEP_SIZE__SHIFT 0x17
+#define GC_THROTTLE_CTRL1__FIXED_PATTERN_SELECT__SHIFT 0x1a
+#define GC_THROTTLE_CTRL1__GC_EDC_STRETCH_PERF_COUNTER_EN__SHIFT 0x1e
+#define GC_THROTTLE_CTRL1__GC_EDC_UNSTRETCH_PERF_COUNTER_EN__SHIFT 0x1f
+#define GC_THROTTLE_CTRL1__PCC_FP_PROGRAM_STEP_EN_MASK 0x00000001L
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MIN_STEP_MASK 0x0000001EL
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MAX_STEP_MASK 0x000003E0L
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_UPWARDS_STEP_SIZE_MASK 0x00001C00L
+#define GC_THROTTLE_CTRL1__PWRBRK_FP_PROGRAM_STEP_EN_MASK 0x00002000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MIN_STEP_MASK 0x0003C000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MAX_STEP_MASK 0x007C0000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_UPWARDS_STEP_SIZE_MASK 0x03800000L
+#define GC_THROTTLE_CTRL1__FIXED_PATTERN_SELECT_MASK 0x0C000000L
+#define GC_THROTTLE_CTRL1__GC_EDC_STRETCH_PERF_COUNTER_EN_MASK 0x40000000L
+#define GC_THROTTLE_CTRL1__GC_EDC_UNSTRETCH_PERF_COUNTER_EN_MASK 0x80000000L
+//PCC_STALL_PATTERN_CTRL
+#define PCC_STALL_PATTERN_CTRL__PCC_STEP_INTERVAL__SHIFT 0x0
+#define PCC_STALL_PATTERN_CTRL__PCC_BEGIN_STEP__SHIFT 0xa
+#define PCC_STALL_PATTERN_CTRL__PCC_END_STEP__SHIFT 0xf
+#define PCC_STALL_PATTERN_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x14
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_INCR__SHIFT 0x18
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_DECR__SHIFT 0x19
+#define PCC_STALL_PATTERN_CTRL__PCC_DITHER_MODE__SHIFT 0x1a
+#define PCC_STALL_PATTERN_CTRL__PCC_STEP_INTERVAL_MASK 0x000003FFL
+#define PCC_STALL_PATTERN_CTRL__PCC_BEGIN_STEP_MASK 0x00007C00L
+#define PCC_STALL_PATTERN_CTRL__PCC_END_STEP_MASK 0x000F8000L
+#define PCC_STALL_PATTERN_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00F00000L
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_INCR_MASK 0x01000000L
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_DECR_MASK 0x02000000L
+#define PCC_STALL_PATTERN_CTRL__PCC_DITHER_MODE_MASK 0x04000000L
+//PWRBRK_STALL_PATTERN_CTRL
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT 0xa
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT 0xf
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x14
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL_MASK 0x000003FFL
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP_MASK 0x00007C00L
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP_MASK 0x000F8000L
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00F00000L
+//PCC_STALL_PATTERN_1_2
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1__SHIFT 0x0
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2__SHIFT 0x10
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_3_4
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3__SHIFT 0x0
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4__SHIFT 0x10
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_5_6
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5__SHIFT 0x0
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6__SHIFT 0x10
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_7
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7__SHIFT 0x0
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7_MASK 0x00007FFFL
+//PWRBRK_STALL_PATTERN_1_2
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_1__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_2__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_3_4
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_3__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_4__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_5_6
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_5__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_6__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_7
+#define PWRBRK_STALL_PATTERN_7__PWRBRK_STALL_PATTERN_7__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_7__PWRBRK_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_STALL_PATTERN_CTRL
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CTRL_EN__SHIFT 0x0
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_SW_RST__SHIFT 0x1
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_STALL_PATTERN_CTRL__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0x3
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_EN__SHIFT 0x7
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_MODE__SHIFT 0x8
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CTRL_EN_MASK 0x00000001L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_SW_RST_MASK 0x00000002L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x00000078L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_EN_MASK 0x00000080L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_MODE_MASK 0x00000700L
+//DIDT_STALL_PATTERN_1_2
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_3_4
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_5_6
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_7
+#define DIDT_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//PCC_PWRBRK_HYSTERESIS_CTRL
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PCC_MAX_HYSTERESIS__SHIFT 0x0
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PWRBRK_MAX_HYSTERESIS__SHIFT 0x8
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PCC_MAX_HYSTERESIS_MASK 0x000000FFL
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PWRBRK_MAX_HYSTERESIS_MASK 0x0000FF00L
+//EDC_STRETCH_PERF_COUNTER
+#define EDC_STRETCH_PERF_COUNTER__STRETCH_PERF_COUNTER__SHIFT 0x0
+#define EDC_STRETCH_PERF_COUNTER__STRETCH_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_UNSTRETCH_PERF_COUNTER
+#define EDC_UNSTRETCH_PERF_COUNTER__UNSTRETCH_PERF_COUNTER__SHIFT 0x0
+#define EDC_UNSTRETCH_PERF_COUNTER__UNSTRETCH_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_STRETCH_NUM_PERF_COUNTER
+#define EDC_STRETCH_NUM_PERF_COUNTER__STRETCH_NUM_PERF_COUNTER__SHIFT 0x0
+#define EDC_STRETCH_NUM_PERF_COUNTER__STRETCH_NUM_PERF_COUNTER_MASK 0xFFFFFFFFL
+//GC_EDC_STATUS
+#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x0
+#define GC_EDC_STATUS__GPIO_IN_0__SHIFT 0x3
+#define GC_EDC_STATUS__GPIO_IN_1__SHIFT 0x4
+#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x00000007L
+#define GC_EDC_STATUS__GPIO_IN_0_MASK 0x00000008L
+#define GC_EDC_STATUS__GPIO_IN_1_MASK 0x00000010L
+//GC_EDC_OVERFLOW
+#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
+#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
+#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
+#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
+//GC_EDC_ROLLING_POWER_DELTA
+#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
+#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
+//GC_THROTTLE_STATUS
+#define GC_THROTTLE_STATUS__FSM_STATE__SHIFT 0x0
+#define GC_THROTTLE_STATUS__PATTERN_INDEX__SHIFT 0x4
+#define GC_THROTTLE_STATUS__FSM_STATE_MASK 0x0000000FL
+#define GC_THROTTLE_STATUS__PATTERN_INDEX_MASK 0x000001F0L
+//EDC_PERF_COUNTER
+#define EDC_PERF_COUNTER__EDC_PERF_COUNTER__SHIFT 0x0
+#define EDC_PERF_COUNTER__EDC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//PCC_PERF_COUNTER
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER__SHIFT 0x0
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//PWRBRK_PERF_COUNTER
+#define PWRBRK_PERF_COUNTER__PWRBRK_PERF_COUNTER__SHIFT 0x0
+#define PWRBRK_PERF_COUNTER__PWRBRK_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_HYSTERESIS_STAT
+#define EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define EDC_HYSTERESIS_STAT__EDC_STATUS__SHIFT 0x8
+#define EDC_HYSTERESIS_STAT__EDC_CREDIT_INCR_OVERFLOW__SHIFT 0x9
+#define EDC_HYSTERESIS_STAT__EDC_THRESHOLD_SEL__SHIFT 0xa
+#define EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define EDC_HYSTERESIS_STAT__EDC_STATUS_MASK 0x00000100L
+#define EDC_HYSTERESIS_STAT__EDC_CREDIT_INCR_OVERFLOW_MASK 0x00000200L
+#define EDC_HYSTERESIS_STAT__EDC_THRESHOLD_SEL_MASK 0x00000400L
+//GC_CAC_WEIGHT_CP_0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CP_1
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_EA_0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_1
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_2
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_1
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_2
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_3
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_4
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_1
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_2
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_UTCL2_WALKER_0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_1
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_2
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GDS_0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_1
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_2
+#define GC_CAC_WEIGHT_GDS_2__WEIGHT_GDS_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_2__WEIGHT_GDS_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GE_0
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_1
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_2
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_3
+#define GC_CAC_WEIGHT_GE_3__WEIGHT_GE_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_3__WEIGHT_GE_SIG6_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_PMM_0
+#define GC_CAC_WEIGHT_PMM_0__WEIGHT_PMM_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PMM_0__WEIGHT_PMM_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GL2C_0
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GL2C_1
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GL2C_2
+#define GC_CAC_WEIGHT_GL2C_2__WEIGHT_GL2C_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_2__WEIGHT_GL2C_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_PH_0
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_1
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_2
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_3
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_0
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_1
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_2
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_3
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_4
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_5
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG10_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG11_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CHC_0
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CHC_1
+#define GC_CAC_WEIGHT_CHC_1__WEIGHT_CHC_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CHC_1__WEIGHT_CHC_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GUS_0
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GUS_1
+#define GC_CAC_WEIGHT_GUS_1__WEIGHT_GUS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GUS_1__WEIGHT_GUS_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_RLC_0
+#define GC_CAC_WEIGHT_RLC_0__WEIGHT_RLC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_RLC_0__WEIGHT_RLC_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GRBM_0
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG1_MASK 0xFFFF0000L
+//GC_EDC_CLK_MONITOR_CTRL
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_EN__SHIFT 0x0
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_INTERVAL__SHIFT 0x1
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_THRESHOLD__SHIFT 0x5
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_EN_MASK 0x00000001L
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_INTERVAL_MASK 0x0000001EL
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_THRESHOLD_MASK 0x0001FFE0L
+//GC_CAC_IND_INDEX
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR__SHIFT 0x0
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//GC_CAC_IND_DATA
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA__SHIFT 0x0
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA_MASK 0xFFFFFFFFL
+//SE_CAC_CTRL_1
+#define SE_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define SE_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x8
+#define SE_CAC_CTRL_1__CAC_WINDOW_MASK 0x000000FFL
+#define SE_CAC_CTRL_1__TDP_WINDOW_MASK 0xFFFFFF00L
+//SE_CAC_CTRL_2
+#define SE_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define SE_CAC_CTRL_2__SE_LCAC_ENABLE__SHIFT 0x1
+#define SE_CAC_CTRL_2__WGP_CAC_CLK_OVERRIDE__SHIFT 0x2
+#define SE_CAC_CTRL_2__SE_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x3
+#define SE_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define SE_CAC_CTRL_2__SE_LCAC_ENABLE_MASK 0x00000002L
+#define SE_CAC_CTRL_2__WGP_CAC_CLK_OVERRIDE_MASK 0x00000004L
+#define SE_CAC_CTRL_2__SE_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000008L
+//SE_CAC_WEIGHT_TA_0
+#define SE_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_TD_0
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_1
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_2
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_3
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_4
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_5
+#define SE_CAC_WEIGHT_TD_5__WEIGHT_TD_SIG10__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_5__WEIGHT_TD_SIG10_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_TCP_0
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_1
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_2
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_3
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_0
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_1
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_2
+#define SE_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SP_0
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SP_1
+#define SE_CAC_WEIGHT_SP_1__WEIGHT_SP_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SP_1__WEIGHT_SP_SIG2_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_LDS_0
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_1
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_2
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_3
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQC_0
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQC_1
+#define SE_CAC_WEIGHT_SQC_1__WEIGHT_SQC_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQC_1__WEIGHT_SQC_SIG2_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_CU_0
+#define SE_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_BCI_0
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_0
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_1
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_2
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_3
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_4
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_5
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG10__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG11__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG10_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG11_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_6
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG12__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG13__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG12_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG13_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_7
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG14__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG15__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG14_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG15_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_8
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG16__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG17__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG16_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG17_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_9
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG18__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG19__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG18_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG19_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_10
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG20__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG21__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG20_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG21_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_11
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG22__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG23__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG22_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG23_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_0
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_1
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_2
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_3
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_4
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_RMI_0
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_RMI_1
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SX_0
+#define SE_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SXRB_0
+#define SE_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_UTCL1_0
+#define SE_CAC_WEIGHT_UTCL1_0__WEIGHT_UTCL1_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_UTCL1_0__WEIGHT_UTCL1_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_GL1C_0
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_GL1C_1
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_GL1C_2
+#define SE_CAC_WEIGHT_GL1C_2__WEIGHT_GL1C_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_2__WEIGHT_GL1C_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SPI_0
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SPI_1
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SPI_2
+#define SE_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_PC_0
+#define SE_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_PA_0
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_1
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_2
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_3
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_0
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_1
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_2
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_3
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WINDOW_AGGR_VALUE
+#define SE_CAC_WINDOW_AGGR_VALUE__SE_CAC_WINDOW_AGGR_VALUE__SHIFT 0x0
+#define SE_CAC_WINDOW_AGGR_VALUE__SE_CAC_WINDOW_AGGR_VALUE_MASK 0xFFFFFFFFL
+//SE_CAC_WINDOW_GFXCLK_CYCLE
+#define SE_CAC_WINDOW_GFXCLK_CYCLE__SE_CAC_WINDOW_GFXCLK_CYCLE__SHIFT 0x0
+#define SE_CAC_WINDOW_GFXCLK_CYCLE__SE_CAC_WINDOW_GFXCLK_CYCLE_MASK 0x000003FFL
+//SE_CAC_IND_INDEX
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR__SHIFT 0x0
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//SE_CAC_IND_DATA
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA__SHIFT 0x0
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfonly2_spidec
+//SPI_RESOURCE_RESERVE_CU_0
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_1
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_2
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_3
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_4
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_5
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_6
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_7
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_8
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_9
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_10
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_11
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_12
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_13
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_14
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_15
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_2
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_3
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_4
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_5
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_6
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_7
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_8
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_9
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_11
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_12
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_13
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_14
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_15
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
+
+
+// addressBlock: gc_gfxudec
+//CP_EOP_DONE_ADDR_LO
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_EOP_DONE_ADDR_HI
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_EOP_DONE_DATA_LO
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_EOP_DONE_DATA_HI
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_LO
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_HI
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_ADDR_LO
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_PIPE_STATS_ADDR_HI
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
+//CP_VGT_IAVERT_COUNT_LO
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAVERT_COUNT_HI
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_LO
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_HI
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_LO
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_HI
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_LO
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_HI
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_LO
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_HI
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_LO
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_HI
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_LO
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_HI
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_LO
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_HI
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_LO
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_HI
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_LO
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_HI
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_LO
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_HI
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_LO
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_HI
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_ASINVOC_COUNT_LO
+#define CP_VGT_ASINVOC_COUNT_LO__ASINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_ASINVOC_COUNT_LO__ASINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_ASINVOC_COUNT_HI
+#define CP_VGT_ASINVOC_COUNT_HI__ASINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_ASINVOC_COUNT_HI__ASINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_CONTROL
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x06000000L
+//SCRATCH_REG0
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//SCRATCH_REG1
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//SCRATCH_REG2
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//SCRATCH_REG3
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//SCRATCH_REG4
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//SCRATCH_REG5
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//SCRATCH_REG6
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//SCRATCH_REG7
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//SCRATCH_REG_ATOMIC
+#define SCRATCH_REG_ATOMIC__IMMED__SHIFT 0x0
+#define SCRATCH_REG_ATOMIC__ID__SHIFT 0x18
+#define SCRATCH_REG_ATOMIC__reserved27__SHIFT 0x1b
+#define SCRATCH_REG_ATOMIC__OP__SHIFT 0x1c
+#define SCRATCH_REG_ATOMIC__reserved31__SHIFT 0x1f
+#define SCRATCH_REG_ATOMIC__IMMED_MASK 0x00FFFFFFL
+#define SCRATCH_REG_ATOMIC__ID_MASK 0x07000000L
+#define SCRATCH_REG_ATOMIC__reserved27_MASK 0x08000000L
+#define SCRATCH_REG_ATOMIC__OP_MASK 0x70000000L
+#define SCRATCH_REG_ATOMIC__reserved31_MASK 0x80000000L
+//SCRATCH_REG_CMPSWAP_ATOMIC
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_COMPARE__SHIFT 0x0
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_REPLACE__SHIFT 0xc
+#define SCRATCH_REG_CMPSWAP_ATOMIC__ID__SHIFT 0x18
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved27__SHIFT 0x1b
+#define SCRATCH_REG_CMPSWAP_ATOMIC__OP__SHIFT 0x1c
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved31__SHIFT 0x1f
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_COMPARE_MASK 0x00000FFFL
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_REPLACE_MASK 0x00FFF000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__ID_MASK 0x07000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved27_MASK 0x08000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__OP_MASK 0x70000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved31_MASK 0x80000000L
+//CP_APPEND_DDID_CNT
+#define CP_APPEND_DDID_CNT__DATA__SHIFT 0x0
+#define CP_APPEND_DDID_CNT__DATA_MASK 0x000000FFL
+//CP_APPEND_DATA_HI
+#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_HI
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_HI
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_LO
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_HI
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_LO
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_HI
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_LO
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_HI
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_APPEND_ADDR_LO
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_ADDR_HI
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
+#define CP_APPEND_ADDR_HI__FENCE_SIZE__SHIFT 0x12
+#define CP_APPEND_ADDR_HI__PWS_ENABLE__SHIFT 0x13
+#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00030000L
+#define CP_APPEND_ADDR_HI__FENCE_SIZE_MASK 0x00040000L
+#define CP_APPEND_ADDR_HI__PWS_ENABLE_MASK 0x00080000L
+#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x06000000L
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
+//CP_APPEND_DATA
+#define CP_APPEND_DATA__DATA__SHIFT 0x0
+#define CP_APPEND_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_DATA_LO
+#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_LO
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_LO
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_LO
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_LO
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_HI
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_HI
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_LO
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_LO
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_HI
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_HI
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_LO
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_LO
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_HI
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_HI
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_WADDR_LO
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_WADDR_HI
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
+#define CP_ME_MC_WADDR_HI__WRITE_CONFIRM__SHIFT 0x11
+#define CP_ME_MC_WADDR_HI__WRITE64__SHIFT 0x12
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_WADDR_HI__VMID__SHIFT 0x18
+#define CP_ME_MC_WADDR_HI__RINGID__SHIFT 0x1c
+#define CP_ME_MC_WADDR_HI__PRIVILEGE__SHIFT 0x1f
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_WADDR_HI__WRITE_CONFIRM_MASK 0x00020000L
+#define CP_ME_MC_WADDR_HI__WRITE64_MASK 0x00040000L
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00C00000L
+#define CP_ME_MC_WADDR_HI__VMID_MASK 0x0F000000L
+#define CP_ME_MC_WADDR_HI__RINGID_MASK 0x30000000L
+#define CP_ME_MC_WADDR_HI__PRIVILEGE_MASK 0x80000000L
+//CP_ME_MC_WDATA_LO
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
+//CP_ME_MC_WDATA_HI
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_RADDR_LO
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_RADDR_HI
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
+#define CP_ME_MC_RADDR_HI__SIZE__SHIFT 0x10
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_RADDR_HI__VMID__SHIFT 0x18
+#define CP_ME_MC_RADDR_HI__PRIVILEGE__SHIFT 0x1f
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_RADDR_HI__SIZE_MASK 0x000F0000L
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00C00000L
+#define CP_ME_MC_RADDR_HI__VMID_MASK 0x0F000000L
+#define CP_ME_MC_RADDR_HI__PRIVILEGE_MASK 0x80000000L
+//CP_SEM_WAIT_TIMER
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
+//CP_SIG_SEM_ADDR_LO
+#define CP_SIG_SEM_ADDR_LO__SEM_PRIV__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_SIG_SEM_ADDR_LO__SEM_PRIV_MASK 0x00000001L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_SIG_SEM_ADDR_HI
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_WAIT_REG_MEM_TIMEOUT
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
+//CP_WAIT_SEM_ADDR_LO
+#define CP_WAIT_SEM_ADDR_LO__SEM_PRIV__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_WAIT_SEM_ADDR_LO__SEM_PRIV_MASK 0x00000001L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_WAIT_SEM_ADDR_HI
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_DMA_PFP_CONTROL
+#define CP_DMA_PFP_CONTROL__VMID__SHIFT 0x0
+#define CP_DMA_PFP_CONTROL__TMZ__SHIFT 0x4
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_PFP_CONTROL__SRC_VOLATLE__SHIFT 0xf
+#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_PFP_CONTROL__DST_VOLATLE__SHIFT 0x1b
+#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_PFP_CONTROL__VMID_MASK 0x0000000FL
+#define CP_DMA_PFP_CONTROL__TMZ_MASK 0x00000010L
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00006000L
+#define CP_DMA_PFP_CONTROL__SRC_VOLATLE_MASK 0x00008000L
+#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x06000000L
+#define CP_DMA_PFP_CONTROL__DST_VOLATLE_MASK 0x08000000L
+#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_CONTROL
+#define CP_DMA_ME_CONTROL__VMID__SHIFT 0x0
+#define CP_DMA_ME_CONTROL__TMZ__SHIFT 0x4
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_ME_CONTROL__SRC_VOLATLE__SHIFT 0xf
+#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_ME_CONTROL__DST_VOLATLE__SHIFT 0x1b
+#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_ME_CONTROL__VMID_MASK 0x0000000FL
+#define CP_DMA_ME_CONTROL__TMZ_MASK 0x00000010L
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00006000L
+#define CP_DMA_ME_CONTROL__SRC_VOLATLE_MASK 0x00008000L
+#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x06000000L
+#define CP_DMA_ME_CONTROL__DST_VOLATLE_MASK 0x08000000L
+#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_SRC_ADDR
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_SRC_ADDR_HI
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_DST_ADDR
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_DST_ADDR_HI
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_COMMAND
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_PFP_SRC_ADDR
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_SRC_ADDR_HI
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_DST_ADDR
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_DST_ADDR_HI
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_COMMAND
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_CNTL
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
+#define CP_DMA_CNTL__WATCH_CONTROL__SHIFT 0x1
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
+#define CP_DMA_CNTL__WATCH_CONTROL_MASK 0x00000002L
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x01FF0000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
+//CP_DMA_READ_TAGS
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+//CP_PFP_IB_CONTROL
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
+//CP_PFP_LOAD_CONTROL
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN__SHIFT 0xf
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
+#define CP_PFP_LOAD_CONTROL__LOAD_ORDINAL__SHIFT 0x1f
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN_MASK 0x00008000L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+#define CP_PFP_LOAD_CONTROL__LOAD_ORDINAL_MASK 0x80000000L
+//CP_SCRATCH_INDEX
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_SCRATCH_DATA
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_RB_OFFSET
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_OFFSET
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_IB2_OFFSET
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_BEGIN
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_END
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_BEGIN
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_END
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_DMA_ME_CMD_ADDR_LO
+#define CP_DMA_ME_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_ME_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_ME_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_DMA_ME_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_ME_CMD_ADDR_HI
+#define CP_DMA_ME_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_ME_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_ME_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_PFP_CMD_ADDR_LO
+#define CP_DMA_PFP_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_PFP_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_PFP_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_DMA_PFP_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_PFP_CMD_ADDR_HI
+#define CP_DMA_PFP_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_PFP_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_PFP_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_APPEND_CMD_ADDR_LO
+#define CP_APPEND_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_APPEND_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_APPEND_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_APPEND_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_CMD_ADDR_HI
+#define CP_APPEND_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_APPEND_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_APPEND_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//UCONFIG_RESERVED_REG0
+#define UCONFIG_RESERVED_REG0__DATA__SHIFT 0x0
+#define UCONFIG_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//UCONFIG_RESERVED_REG1
+#define UCONFIG_RESERVED_REG1__DATA__SHIFT 0x0
+#define UCONFIG_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//CP_PA_MSPRIM_COUNT_LO
+#define CP_PA_MSPRIM_COUNT_LO__MSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_MSPRIM_COUNT_LO__MSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_MSPRIM_COUNT_HI
+#define CP_PA_MSPRIM_COUNT_HI__MSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_MSPRIM_COUNT_HI__MSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_GE_MSINVOC_COUNT_LO
+#define CP_GE_MSINVOC_COUNT_LO__MSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_GE_MSINVOC_COUNT_LO__MSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_GE_MSINVOC_COUNT_HI
+#define CP_GE_MSINVOC_COUNT_HI__MSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_GE_MSINVOC_COUNT_HI__MSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_IB1_CMD_BUFSZ
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB2_CMD_BUFSZ
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_ST_CMD_BUFSZ
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_IB2_BASE_LO
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB2_BASE_HI
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_IB2_BUFSZ
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_ST_BASE_LO
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
+//CP_ST_BASE_HI
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
+//CP_ST_BUFSZ
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
+//CP_EOP_DONE_EVENT_CNTL
+#define CP_EOP_DONE_EVENT_CNTL__GCR_CNTL__SHIFT 0xc
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
+#define CP_EOP_DONE_EVENT_CNTL__EOP_VOLATILE__SHIFT 0x1b
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
+#define CP_EOP_DONE_EVENT_CNTL__GLK_INV__SHIFT 0x1e
+#define CP_EOP_DONE_EVENT_CNTL__PWS_ENABLE__SHIFT 0x1f
+#define CP_EOP_DONE_EVENT_CNTL__GCR_CNTL_MASK 0x01FFF000L
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x06000000L
+#define CP_EOP_DONE_EVENT_CNTL__EOP_VOLATILE_MASK 0x08000000L
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
+#define CP_EOP_DONE_EVENT_CNTL__GLK_INV_MASK 0x40000000L
+#define CP_EOP_DONE_EVENT_CNTL__PWS_ENABLE_MASK 0x80000000L
+//CP_EOP_DONE_DATA_CNTL
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
+#define CP_EOP_DONE_DATA_CNTL__SEMAPHORE_SIGNAL_TYPE__SHIFT 0x13
+#define CP_EOP_DONE_DATA_CNTL__ACTION_PIPE_ID__SHIFT 0x14
+#define CP_EOP_DONE_DATA_CNTL__ACTION_ID__SHIFT 0x16
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__SEMAPHORE_SIGNAL_TYPE_MASK 0x00080000L
+#define CP_EOP_DONE_DATA_CNTL__ACTION_PIPE_ID_MASK 0x00300000L
+#define CP_EOP_DONE_DATA_CNTL__ACTION_ID_MASK 0x00C00000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
+//CP_EOP_DONE_CNTX_ID
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_DB_BASE_LO
+#define CP_DB_BASE_LO__DB_BASE_LO__SHIFT 0x2
+#define CP_DB_BASE_LO__DB_BASE_LO_MASK 0xFFFFFFFCL
+//CP_DB_BASE_HI
+#define CP_DB_BASE_HI__DB_BASE_HI__SHIFT 0x0
+#define CP_DB_BASE_HI__DB_BASE_HI_MASK 0x0000FFFFL
+//CP_DB_BUFSZ
+#define CP_DB_BUFSZ__DB_BUFSZ__SHIFT 0x0
+#define CP_DB_BUFSZ__DB_BUFSZ_MASK 0x000FFFFFL
+//CP_DB_CMD_BUFSZ
+#define CP_DB_CMD_BUFSZ__DB_CMD_REQSZ__SHIFT 0x0
+#define CP_DB_CMD_BUFSZ__DB_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_PFP_COMPLETION_STATUS
+#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_PRED_NOT_VISIBLE
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
+//CP_PFP_METADATA_BASE_ADDR
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_PFP_METADATA_BASE_ADDR_HI
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DRAW_INDX_INDR_ADDR
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DRAW_INDX_INDR_ADDR_HI
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DISPATCH_INDR_ADDR
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DISPATCH_INDR_ADDR_HI
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_BASE_ADDR
+#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_INDEX_BASE_ADDR_HI
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_TYPE
+#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+//CP_GDS_BKUP_ADDR
+#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_GDS_BKUP_ADDR_HI
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_SAMPLE_STATUS
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
+//CP_ME_COHER_CNTL
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+//CP_ME_COHER_SIZE
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_SIZE_HI
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_BASE
+#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_BASE_HI
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_STATUS
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
+#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
+#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
+//RLC_GPM_PERF_COUNT_0
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_0__SA_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_0__WGP_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_0__SA_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_0__WGP_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_PERF_COUNT_1
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_1__SA_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_1__WGP_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_1__SA_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_1__WGP_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SA_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SA_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SA_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+//VGT_PRIMITIVE_TYPE
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_INDEX_TYPE
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_INDEX_TYPE__DISABLE_INSTANCE_PACKING__SHIFT 0xe
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__DISABLE_INSTANCE_PACKING_MASK 0x00004000L
+//GE_MIN_VTX_INDX
+#define GE_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
+#define GE_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
+//GE_INDX_OFFSET
+#define GE_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
+#define GE_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
+//GE_MULTI_PRIM_IB_RESET_EN
+#define GE_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
+#define GE_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
+#define GE_MULTI_PRIM_IB_RESET_EN__DISABLE_FOR_AUTO_INDEX__SHIFT 0x2
+#define GE_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define GE_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
+#define GE_MULTI_PRIM_IB_RESET_EN__DISABLE_FOR_AUTO_INDEX_MASK 0x00000004L
+//VGT_NUM_INDICES
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_NUM_INSTANCES
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_TF_RING_SIZE
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0001FFFFL
+//VGT_HS_OFFCHIP_PARAM
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0xa
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000003FFL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000C00L
+//VGT_TF_MEMORY_BASE
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
+//GE_MAX_VTX_INDX
+#define GE_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
+#define GE_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
+//VGT_INSTANCE_BASE_ID
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
+//GE_CNTL
+#define GE_CNTL__PRIMS_PER_SUBGRP__SHIFT 0x0
+#define GE_CNTL__VERTS_PER_SUBGRP__SHIFT 0x9
+#define GE_CNTL__BREAK_SUBGRP_AT_EOI__SHIFT 0x12
+#define GE_CNTL__PACKET_TO_ONE_PA__SHIFT 0x13
+#define GE_CNTL__BREAK_PRIMGRP_AT_EOI__SHIFT 0x14
+#define GE_CNTL__PRIM_GRP_SIZE__SHIFT 0x15
+#define GE_CNTL__GCR_DISABLE__SHIFT 0x1e
+#define GE_CNTL__DIS_PG_SIZE_ADJUST_FOR_STRIP__SHIFT 0x1f
+#define GE_CNTL__PRIMS_PER_SUBGRP_MASK 0x000001FFL
+#define GE_CNTL__VERTS_PER_SUBGRP_MASK 0x0003FE00L
+#define GE_CNTL__BREAK_SUBGRP_AT_EOI_MASK 0x00040000L
+#define GE_CNTL__PACKET_TO_ONE_PA_MASK 0x00080000L
+#define GE_CNTL__BREAK_PRIMGRP_AT_EOI_MASK 0x00100000L
+#define GE_CNTL__PRIM_GRP_SIZE_MASK 0x3FE00000L
+#define GE_CNTL__GCR_DISABLE_MASK 0x40000000L
+#define GE_CNTL__DIS_PG_SIZE_ADJUST_FOR_STRIP_MASK 0x80000000L
+//GE_USER_VGPR1
+#define GE_USER_VGPR1__DATA__SHIFT 0x0
+#define GE_USER_VGPR1__DATA_MASK 0xFFFFFFFFL
+//GE_USER_VGPR2
+#define GE_USER_VGPR2__DATA__SHIFT 0x0
+#define GE_USER_VGPR2__DATA_MASK 0xFFFFFFFFL
+//GE_USER_VGPR3
+#define GE_USER_VGPR3__DATA__SHIFT 0x0
+#define GE_USER_VGPR3__DATA_MASK 0xFFFFFFFFL
+//GE_STEREO_CNTL
+#define GE_STEREO_CNTL__RT_SLICE__SHIFT 0x0
+#define GE_STEREO_CNTL__VIEWPORT__SHIFT 0x3
+#define GE_STEREO_CNTL__FSR_SELECT__SHIFT 0x7
+#define GE_STEREO_CNTL__EN_STEREO__SHIFT 0x8
+#define GE_STEREO_CNTL__RT_SLICE_MASK 0x00000007L
+#define GE_STEREO_CNTL__VIEWPORT_MASK 0x00000078L
+#define GE_STEREO_CNTL__FSR_SELECT_MASK 0x00000080L
+#define GE_STEREO_CNTL__EN_STEREO_MASK 0x00000100L
+//GE_PC_ALLOC
+#define GE_PC_ALLOC__OVERSUB_EN__SHIFT 0x0
+#define GE_PC_ALLOC__NUM_PC_LINES__SHIFT 0x1
+#define GE_PC_ALLOC__OVERSUB_EN_MASK 0x00000001L
+#define GE_PC_ALLOC__NUM_PC_LINES_MASK 0x000007FEL
+//VGT_TF_MEMORY_BASE_HI
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
+//GE_USER_VGPR_EN
+#define GE_USER_VGPR_EN__EN_USER_VGPR1__SHIFT 0x0
+#define GE_USER_VGPR_EN__EN_USER_VGPR2__SHIFT 0x1
+#define GE_USER_VGPR_EN__EN_USER_VGPR3__SHIFT 0x2
+#define GE_USER_VGPR_EN__EN_USER_VGPR1_MASK 0x00000001L
+#define GE_USER_VGPR_EN__EN_USER_VGPR2_MASK 0x00000002L
+#define GE_USER_VGPR_EN__EN_USER_VGPR3_MASK 0x00000004L
+//GE_GS_FAST_LAUNCH_WG_DIM
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_X__SHIFT 0x0
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_Y__SHIFT 0x10
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_X_MASK 0x0000FFFFL
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_Y_MASK 0xFFFF0000L
+//GE_GS_FAST_LAUNCH_WG_DIM_1
+#define GE_GS_FAST_LAUNCH_WG_DIM_1__GS_FL_DIM_Z__SHIFT 0x0
+#define GE_GS_FAST_LAUNCH_WG_DIM_1__GS_FL_DIM_Z_MASK 0x0000FFFFL
+//VGT_GS_OUT_PRIM_TYPE
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
+//PA_SU_LINE_STIPPLE_VALUE
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
+//PA_SC_LINE_STIPPLE_STATE
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
+//PA_SC_SCREEN_EXTENT_MIN_0
+#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_0
+#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MIN_1
+#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_1
+#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
+//PA_SC_P3D_TRAP_SCREEN_HV_EN
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_P3D_TRAP_SCREEN_H
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_V
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_P3D_TRAP_SCREEN_COUNT
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_HV_EN
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_HP3D_TRAP_SCREEN_H
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_V
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_COUNT
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_HV_EN
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_TRAP_SCREEN_H
+#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_V
+#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_COUNT
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//SQ_THREAD_TRACE_USERDATA_0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_1
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_2
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_3
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_4
+#define SQ_THREAD_TRACE_USERDATA_4__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_4__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_5
+#define SQ_THREAD_TRACE_USERDATA_5__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_5__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_6
+#define SQ_THREAD_TRACE_USERDATA_6__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_6__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_7
+#define SQ_THREAD_TRACE_USERDATA_7__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_7__DATA_MASK 0xFFFFFFFFL
+//SQC_CACHES
+#define SQC_CACHES__TARGET_INST__SHIFT 0x0
+#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
+#define SQC_CACHES__INVALIDATE__SHIFT 0x2
+#define SQC_CACHES__COMPLETE__SHIFT 0x10
+#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
+#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
+#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
+#define SQC_CACHES__COMPLETE_MASK 0x00010000L
+//TA_CS_BC_BASE_ADDR
+#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_CS_BC_BASE_ADDR_HI
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//DB_OCCLUSION_COUNT0_LOW
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT0_HI
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT1_LOW
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT1_HI
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT2_LOW
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT2_HI
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT3_LOW
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT3_HI
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//GDS_RD_ADDR
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_DATA
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
+#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_ADDR
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_COUNT
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_DATA
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_ADDR
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_DATA
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_ADDR
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_DATA
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WRITE_COMPLETE
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
+//GDS_ATOM_CNTL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
+//GDS_ATOM_COMPLETE
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
+//GDS_ATOM_BASE
+#define GDS_ATOM_BASE__BASE__SHIFT 0x0
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0xc
+#define GDS_ATOM_BASE__BASE_MASK 0x00000FFFL
+#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFFF000L
+//GDS_ATOM_SIZE
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0xd
+#define GDS_ATOM_SIZE__SIZE_MASK 0x00001FFFL
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFFE000L
+//GDS_ATOM_OFFSET0
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_OFFSET1
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_DST
+#define GDS_ATOM_DST__DST__SHIFT 0x0
+#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
+//GDS_ATOM_OP
+#define GDS_ATOM_OP__OP__SHIFT 0x0
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OP__OP_MASK 0x000000FFL
+#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_SRC0
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC0_U
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1_U
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0
+#define GDS_ATOM_READ0__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0_U
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1
+#define GDS_ATOM_READ1__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1_U
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_GWS_RESOURCE_CNTL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GWS_RESOURCE
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
+#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1d
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1e
+#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1f
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001FFEL
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
+#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x1FFF0000L
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x20000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x40000000L
+#define GDS_GWS_RESOURCE__HALTED_MASK 0x80000000L
+//GDS_GWS_RESOURCE_CNT
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_CNTL
+#define GDS_OA_CNTL__INDEX__SHIFT 0x0
+#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
+#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
+#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
+//GDS_OA_COUNTER
+#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
+#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
+//GDS_OA_ADDRESS
+#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
+#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x10
+#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x14
+#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x18
+#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
+#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
+#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
+#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x000F0000L
+#define GDS_OA_ADDRESS__CRAWLER_MASK 0x00F00000L
+#define GDS_OA_ADDRESS__UNUSED_MASK 0x3F000000L
+#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
+#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
+//GDS_OA_INCDEC
+#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
+#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
+#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
+#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
+//GDS_OA_RING_SIZE
+#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
+#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_0
+#define GDS_STRMOUT_DWORDS_WRITTEN_0__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_0__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_1
+#define GDS_STRMOUT_DWORDS_WRITTEN_1__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_1__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_2
+#define GDS_STRMOUT_DWORDS_WRITTEN_2__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_2__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_3
+#define GDS_STRMOUT_DWORDS_WRITTEN_3__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_3__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_0
+#define GDS_GS_0__DATA__SHIFT 0x0
+#define GDS_GS_0__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_1
+#define GDS_GS_1__DATA__SHIFT 0x0
+#define GDS_GS_1__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_2
+#define GDS_GS_2__DATA__SHIFT 0x0
+#define GDS_GS_2__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_3
+#define GDS_GS_3__DATA__SHIFT 0x0
+#define GDS_GS_3__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_0_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_0_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_0_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_0_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_0_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_0_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_0_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_0_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_1_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_1_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_1_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_1_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_1_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_1_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_1_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_1_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_2_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_2_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_2_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_2_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_2_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_2_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_2_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_2_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_3_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_3_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_3_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_3_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_3_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_3_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_3_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_3_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_HI__DATA_MASK 0xFFFFFFFFL
+//SPI_CONFIG_CNTL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
+//SPI_CONFIG_CNTL_1
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x5
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MODE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_1__OREO_EXPALLOC_STALL__SHIFT 0x9
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
+#define SPI_CONFIG_CNTL_1__MAX_VTX_SYNC_CNT__SHIFT 0x10
+#define SPI_CONFIG_CNTL_1__EN_USER_ACCUM__SHIFT 0x15
+#define SPI_CONFIG_CNTL_1__SA_SCREEN_MAP__SHIFT 0x16
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT__SHIFT 0x17
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000060L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MODE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__OREO_EXPALLOC_STALL_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
+#define SPI_CONFIG_CNTL_1__MAX_VTX_SYNC_CNT_MASK 0x001F0000L
+#define SPI_CONFIG_CNTL_1__EN_USER_ACCUM_MASK 0x00200000L
+#define SPI_CONFIG_CNTL_1__SA_SCREEN_MAP_MASK 0x00400000L
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MASK 0xFF800000L
+//SPI_CONFIG_CNTL_2
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
+#define SPI_CONFIG_CNTL_2__PWS_CSG_WAIT_DISABLE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_2__PWS_HS_WAIT_DISABLE__SHIFT 0x9
+#define SPI_CONFIG_CNTL_2__PWS_GS_WAIT_DISABLE__SHIFT 0xa
+#define SPI_CONFIG_CNTL_2__PWS_PS_WAIT_DISABLE__SHIFT 0xb
+#define SPI_CONFIG_CNTL_2__CSC_HALT_ACK_DELAY__SHIFT 0xc
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
+#define SPI_CONFIG_CNTL_2__PWS_CSG_WAIT_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_2__PWS_HS_WAIT_DISABLE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_2__PWS_GS_WAIT_DISABLE_MASK 0x00000400L
+#define SPI_CONFIG_CNTL_2__PWS_PS_WAIT_DISABLE_MASK 0x00000800L
+#define SPI_CONFIG_CNTL_2__CSC_HALT_ACK_DELAY_MASK 0x0001F000L
+//SPI_WAVE_LIMIT_CNTL
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN__SHIFT 0x0
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN__SHIFT 0x4
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN__SHIFT 0x6
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN_MASK 0x00000003L
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L
+//SPI_GS_THROTTLE_CNTL1
+#define SPI_GS_THROTTLE_CNTL1__PH_POLL_INTERVAL__SHIFT 0x0
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_BASE__SHIFT 0x4
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_STEP_SIZE__SHIFT 0x8
+#define SPI_GS_THROTTLE_CNTL1__SPI_VGPR_THRESHOLD__SHIFT 0xc
+#define SPI_GS_THROTTLE_CNTL1__SPI_LDS_THRESHOLD__SHIFT 0x10
+#define SPI_GS_THROTTLE_CNTL1__SPI_POLL_INTERVAL__SHIFT 0x14
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_BASE__SHIFT 0x18
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_STEP_SIZE__SHIFT 0x1c
+#define SPI_GS_THROTTLE_CNTL1__PH_POLL_INTERVAL_MASK 0x0000000FL
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_BASE_MASK 0x000000F0L
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_STEP_SIZE_MASK 0x00000F00L
+#define SPI_GS_THROTTLE_CNTL1__SPI_VGPR_THRESHOLD_MASK 0x0000F000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_LDS_THRESHOLD_MASK 0x000F0000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_POLL_INTERVAL_MASK 0x00F00000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_BASE_MASK 0x0F000000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_STEP_SIZE_MASK 0xF0000000L
+//SPI_GS_THROTTLE_CNTL2
+#define SPI_GS_THROTTLE_CNTL2__SPI_THROTTLE_MODE__SHIFT 0x0
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD__SHIFT 0x2
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_FACTOR__SHIFT 0x6
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY1__SHIFT 0x8
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY2__SHIFT 0xb
+#define SPI_GS_THROTTLE_CNTL2__PS_STALL_THRESHOLD__SHIFT 0xe
+#define SPI_GS_THROTTLE_CNTL2__PH_MODE__SHIFT 0x10
+#define SPI_GS_THROTTLE_CNTL2__RESERVED__SHIFT 0x11
+#define SPI_GS_THROTTLE_CNTL2__SPI_THROTTLE_MODE_MASK 0x00000003L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_MASK 0x0000003CL
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_FACTOR_MASK 0x000000C0L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY1_MASK 0x00000700L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY2_MASK 0x00003800L
+#define SPI_GS_THROTTLE_CNTL2__PS_STALL_THRESHOLD_MASK 0x0000C000L
+#define SPI_GS_THROTTLE_CNTL2__PH_MODE_MASK 0x00010000L
+#define SPI_GS_THROTTLE_CNTL2__RESERVED_MASK 0xFFFE0000L
+//SPI_ATTRIBUTE_RING_BASE
+#define SPI_ATTRIBUTE_RING_BASE__BASE__SHIFT 0x0
+#define SPI_ATTRIBUTE_RING_BASE__BASE_MASK 0xFFFFFFFFL
+//SPI_ATTRIBUTE_RING_SIZE
+#define SPI_ATTRIBUTE_RING_SIZE__MEM_SIZE__SHIFT 0x0
+#define SPI_ATTRIBUTE_RING_SIZE__BIG_PAGE__SHIFT 0x10
+#define SPI_ATTRIBUTE_RING_SIZE__L1_POLICY__SHIFT 0x11
+#define SPI_ATTRIBUTE_RING_SIZE__L2_POLICY__SHIFT 0x13
+#define SPI_ATTRIBUTE_RING_SIZE__LLC_NOALLOC__SHIFT 0x15
+#define SPI_ATTRIBUTE_RING_SIZE__GL1_PERF_COUNTER_DISABLE__SHIFT 0x16
+#define SPI_ATTRIBUTE_RING_SIZE__MEM_SIZE_MASK 0x000000FFL
+#define SPI_ATTRIBUTE_RING_SIZE__BIG_PAGE_MASK 0x00010000L
+#define SPI_ATTRIBUTE_RING_SIZE__L1_POLICY_MASK 0x00060000L
+#define SPI_ATTRIBUTE_RING_SIZE__L2_POLICY_MASK 0x00180000L
+#define SPI_ATTRIBUTE_RING_SIZE__LLC_NOALLOC_MASK 0x00200000L
+#define SPI_ATTRIBUTE_RING_SIZE__GL1_PERF_COUNTER_DISABLE_MASK 0x00400000L
+
+
+// addressBlock: gc_cprs64dec
+//CP_MES_PRGRM_CNTR_START
+#define CP_MES_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MES_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MES_INTR_ROUTINE_START
+#define CP_MES_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MES_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_MES_MTVEC_LO
+#define CP_MES_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define CP_MES_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_INTR_ROUTINE_START_HI
+#define CP_MES_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_MES_INTR_ROUTINE_START_HI__IR_START_MASK 0xFFFFFFFFL
+//CP_MES_MTVEC_HI
+#define CP_MES_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define CP_MES_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_CNTL
+#define CP_MES_CNTL__MES_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MES_CNTL__MES_PIPE0_RESET__SHIFT 0x10
+#define CP_MES_CNTL__MES_PIPE1_RESET__SHIFT 0x11
+#define CP_MES_CNTL__MES_PIPE2_RESET__SHIFT 0x12
+#define CP_MES_CNTL__MES_PIPE3_RESET__SHIFT 0x13
+#define CP_MES_CNTL__MES_PIPE0_ACTIVE__SHIFT 0x1a
+#define CP_MES_CNTL__MES_PIPE1_ACTIVE__SHIFT 0x1b
+#define CP_MES_CNTL__MES_PIPE2_ACTIVE__SHIFT 0x1c
+#define CP_MES_CNTL__MES_PIPE3_ACTIVE__SHIFT 0x1d
+#define CP_MES_CNTL__MES_HALT__SHIFT 0x1e
+#define CP_MES_CNTL__MES_STEP__SHIFT 0x1f
+#define CP_MES_CNTL__MES_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MES_CNTL__MES_PIPE0_RESET_MASK 0x00010000L
+#define CP_MES_CNTL__MES_PIPE1_RESET_MASK 0x00020000L
+#define CP_MES_CNTL__MES_PIPE2_RESET_MASK 0x00040000L
+#define CP_MES_CNTL__MES_PIPE3_RESET_MASK 0x00080000L
+#define CP_MES_CNTL__MES_PIPE0_ACTIVE_MASK 0x04000000L
+#define CP_MES_CNTL__MES_PIPE1_ACTIVE_MASK 0x08000000L
+#define CP_MES_CNTL__MES_PIPE2_ACTIVE_MASK 0x10000000L
+#define CP_MES_CNTL__MES_PIPE3_ACTIVE_MASK 0x20000000L
+#define CP_MES_CNTL__MES_HALT_MASK 0x40000000L
+#define CP_MES_CNTL__MES_STEP_MASK 0x80000000L
+//CP_MES_PIPE_PRIORITY_CNTS
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_MES_PIPE0_PRIORITY
+#define CP_MES_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE1_PRIORITY
+#define CP_MES_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE2_PRIORITY
+#define CP_MES_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE3_PRIORITY
+#define CP_MES_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_HEADER_DUMP
+#define CP_MES_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MES_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MES_MIE_LO
+#define CP_MES_MIE_LO__MES_INT__SHIFT 0x0
+#define CP_MES_MIE_LO__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_MIE_HI
+#define CP_MES_MIE_HI__MES_INT__SHIFT 0x0
+#define CP_MES_MIE_HI__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT
+#define CP_MES_INTERRUPT__MES_INT__SHIFT 0x0
+#define CP_MES_INTERRUPT__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_SCRATCH_INDEX
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_MES_SCRATCH_DATA
+#define CP_MES_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_MES_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_MES_INSTR_PNTR
+#define CP_MES_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MES_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_MES_MSCRATCH_HI
+#define CP_MES_MSCRATCH_HI__DATA__SHIFT 0x0
+#define CP_MES_MSCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_MSCRATCH_LO
+#define CP_MES_MSCRATCH_LO__DATA__SHIFT 0x0
+#define CP_MES_MSCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_MSTATUS_LO
+#define CP_MES_MSTATUS_LO__STATUS_LO__SHIFT 0x0
+#define CP_MES_MSTATUS_LO__STATUS_LO_MASK 0xFFFFFFFFL
+//CP_MES_MSTATUS_HI
+#define CP_MES_MSTATUS_HI__STATUS_HI__SHIFT 0x0
+#define CP_MES_MSTATUS_HI__STATUS_HI_MASK 0xFFFFFFFFL
+//CP_MES_MEPC_LO
+#define CP_MES_MEPC_LO__MEPC_LO__SHIFT 0x0
+#define CP_MES_MEPC_LO__MEPC_LO_MASK 0xFFFFFFFFL
+//CP_MES_MEPC_HI
+#define CP_MES_MEPC_HI__MEPC_HI__SHIFT 0x0
+#define CP_MES_MEPC_HI__MEPC_HI_MASK 0xFFFFFFFFL
+//CP_MES_MCAUSE_LO
+#define CP_MES_MCAUSE_LO__CAUSE_LO__SHIFT 0x0
+#define CP_MES_MCAUSE_LO__CAUSE_LO_MASK 0xFFFFFFFFL
+//CP_MES_MCAUSE_HI
+#define CP_MES_MCAUSE_HI__CAUSE_HI__SHIFT 0x0
+#define CP_MES_MCAUSE_HI__CAUSE_HI_MASK 0xFFFFFFFFL
+//CP_MES_MBADADDR_LO
+#define CP_MES_MBADADDR_LO__ADDR_LO__SHIFT 0x0
+#define CP_MES_MBADADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_MBADADDR_HI
+#define CP_MES_MBADADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_MES_MBADADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_MES_MIP_LO
+#define CP_MES_MIP_LO__MIP_LO__SHIFT 0x0
+#define CP_MES_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIP_HI
+#define CP_MES_MIP_HI__MIP_HI__SHIFT 0x0
+#define CP_MES_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//CP_MES_IC_OP_CNTL
+#define CP_MES_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_MES_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_MES_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_MES_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_MES_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_MES_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_MES_MCYCLE_LO
+#define CP_MES_MCYCLE_LO__CYCLE_LO__SHIFT 0x0
+#define CP_MES_MCYCLE_LO__CYCLE_LO_MASK 0xFFFFFFFFL
+//CP_MES_MCYCLE_HI
+#define CP_MES_MCYCLE_HI__CYCLE_HI__SHIFT 0x0
+#define CP_MES_MCYCLE_HI__CYCLE_HI_MASK 0xFFFFFFFFL
+//CP_MES_MTIME_LO
+#define CP_MES_MTIME_LO__TIME_LO__SHIFT 0x0
+#define CP_MES_MTIME_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MES_MTIME_HI
+#define CP_MES_MTIME_HI__TIME_HI__SHIFT 0x0
+#define CP_MES_MTIME_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MES_MINSTRET_LO
+#define CP_MES_MINSTRET_LO__INSTRET_LO__SHIFT 0x0
+#define CP_MES_MINSTRET_LO__INSTRET_LO_MASK 0xFFFFFFFFL
+//CP_MES_MINSTRET_HI
+#define CP_MES_MINSTRET_HI__INSTRET_HI__SHIFT 0x0
+#define CP_MES_MINSTRET_HI__INSTRET_HI_MASK 0xFFFFFFFFL
+//CP_MES_MISA_LO
+#define CP_MES_MISA_LO__MISA_LO__SHIFT 0x0
+#define CP_MES_MISA_LO__MISA_LO_MASK 0xFFFFFFFFL
+//CP_MES_MISA_HI
+#define CP_MES_MISA_HI__MISA_HI__SHIFT 0x0
+#define CP_MES_MISA_HI__MISA_HI_MASK 0xFFFFFFFFL
+//CP_MES_MVENDORID_LO
+#define CP_MES_MVENDORID_LO__MVENDORID_LO__SHIFT 0x0
+#define CP_MES_MVENDORID_LO__MVENDORID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MVENDORID_HI
+#define CP_MES_MVENDORID_HI__MVENDORID_HI__SHIFT 0x0
+#define CP_MES_MVENDORID_HI__MVENDORID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MARCHID_LO
+#define CP_MES_MARCHID_LO__MARCHID_LO__SHIFT 0x0
+#define CP_MES_MARCHID_LO__MARCHID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MARCHID_HI
+#define CP_MES_MARCHID_HI__MARCHID_HI__SHIFT 0x0
+#define CP_MES_MARCHID_HI__MARCHID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MIMPID_LO
+#define CP_MES_MIMPID_LO__MIMPID_LO__SHIFT 0x0
+#define CP_MES_MIMPID_LO__MIMPID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIMPID_HI
+#define CP_MES_MIMPID_HI__MIMPID_HI__SHIFT 0x0
+#define CP_MES_MIMPID_HI__MIMPID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MHARTID_LO
+#define CP_MES_MHARTID_LO__MHARTID_LO__SHIFT 0x0
+#define CP_MES_MHARTID_LO__MHARTID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MHARTID_HI
+#define CP_MES_MHARTID_HI__MHARTID_HI__SHIFT 0x0
+#define CP_MES_MHARTID_HI__MHARTID_HI_MASK 0xFFFFFFFFL
+//CP_MES_DC_BASE_CNTL
+#define CP_MES_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MES_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_DC_OP_CNTL
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_MES_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_MES_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+//CP_MES_MTIMECMP_LO
+#define CP_MES_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define CP_MES_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MES_MTIMECMP_HI
+#define CP_MES_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define CP_MES_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MES_PROCESS_QUANTUM_PIPE0
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_DURATION__SHIFT 0x0
+#define CP_MES_PROCESS_QUANTUM_PIPE0__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_EN__SHIFT 0x1f
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_MES_PROCESS_QUANTUM_PIPE0__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_EN_MASK 0x80000000L
+//CP_MES_PROCESS_QUANTUM_PIPE1
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_DURATION__SHIFT 0x0
+#define CP_MES_PROCESS_QUANTUM_PIPE1__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_EN__SHIFT 0x1f
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_MES_PROCESS_QUANTUM_PIPE1__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_EN_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL1
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL2
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL3
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL4
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL5
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL6
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_GP0_LO
+#define CP_MES_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MES_GP0_LO__DATA__SHIFT 0x1
+#define CP_MES_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MES_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MES_GP0_HI
+#define CP_MES_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MES_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MES_GP1_LO
+#define CP_MES_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MES_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP1_HI
+#define CP_MES_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MES_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP2_LO
+#define CP_MES_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MES_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP2_HI
+#define CP_MES_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MES_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP3_LO
+#define CP_MES_GP3_LO__DATA__SHIFT 0x0
+#define CP_MES_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP3_HI
+#define CP_MES_GP3_HI__DATA__SHIFT 0x0
+#define CP_MES_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP4_LO
+#define CP_MES_GP4_LO__DATA__SHIFT 0x0
+#define CP_MES_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP4_HI
+#define CP_MES_GP4_HI__DATA__SHIFT 0x0
+#define CP_MES_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP5_LO
+#define CP_MES_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MES_GP5_LO__DATA__SHIFT 0x1
+#define CP_MES_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MES_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MES_GP5_HI
+#define CP_MES_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MES_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MES_GP6_LO
+#define CP_MES_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MES_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP6_HI
+#define CP_MES_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MES_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP7_LO
+#define CP_MES_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MES_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP7_HI
+#define CP_MES_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MES_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP8_LO
+#define CP_MES_GP8_LO__DATA__SHIFT 0x0
+#define CP_MES_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP8_HI
+#define CP_MES_GP8_HI__DATA__SHIFT 0x0
+#define CP_MES_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP9_LO
+#define CP_MES_GP9_LO__DATA__SHIFT 0x0
+#define CP_MES_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP9_HI
+#define CP_MES_GP9_HI__DATA__SHIFT 0x0
+#define CP_MES_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_LOCAL_BASE0_LO
+#define CP_MES_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_MES_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_BASE0_HI
+#define CP_MES_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_MES_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_MASK0_LO
+#define CP_MES_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_MES_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_MASK0_HI
+#define CP_MES_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_MES_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_APERTURE
+#define CP_MES_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_INSTR_BASE_LO
+#define CP_MES_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_INSTR_BASE_HI
+#define CP_MES_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_INSTR_MASK_LO
+#define CP_MES_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_MES_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_INSTR_MASK_HI
+#define CP_MES_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_INSTR_APERTURE
+#define CP_MES_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_SCRATCH_APERTURE
+#define CP_MES_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_SCRATCH_BASE_LO
+#define CP_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_SCRATCH_BASE_HI
+#define CP_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_PERFCOUNT_CNTL
+#define CP_MES_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define CP_MES_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//CP_MES_PENDING_INTERRUPT
+#define CP_MES_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_MES_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_MES_PRGRM_CNTR_START_HI
+#define CP_MES_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_MES_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MES_INTERRUPT_DATA_16
+#define CP_MES_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_17
+#define CP_MES_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_18
+#define CP_MES_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_19
+#define CP_MES_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_20
+#define CP_MES_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_21
+#define CP_MES_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_22
+#define CP_MES_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_23
+#define CP_MES_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_24
+#define CP_MES_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_25
+#define CP_MES_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_26
+#define CP_MES_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_27
+#define CP_MES_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_28
+#define CP_MES_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_29
+#define CP_MES_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_30
+#define CP_MES_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_31
+#define CP_MES_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_BASE
+#define CP_MES_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_MASK
+#define CP_MES_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_CNTL
+#define CP_MES_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE1_BASE
+#define CP_MES_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE1_MASK
+#define CP_MES_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE1_CNTL
+#define CP_MES_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE2_BASE
+#define CP_MES_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE2_MASK
+#define CP_MES_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE2_CNTL
+#define CP_MES_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE3_BASE
+#define CP_MES_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE3_MASK
+#define CP_MES_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE3_CNTL
+#define CP_MES_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE4_BASE
+#define CP_MES_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE4_MASK
+#define CP_MES_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE4_CNTL
+#define CP_MES_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE5_BASE
+#define CP_MES_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE5_MASK
+#define CP_MES_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE5_CNTL
+#define CP_MES_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE6_BASE
+#define CP_MES_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE6_MASK
+#define CP_MES_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE6_CNTL
+#define CP_MES_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE7_BASE
+#define CP_MES_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE7_MASK
+#define CP_MES_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE7_CNTL
+#define CP_MES_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE8_BASE
+#define CP_MES_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE8_MASK
+#define CP_MES_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE8_CNTL
+#define CP_MES_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE9_BASE
+#define CP_MES_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE9_MASK
+#define CP_MES_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE9_CNTL
+#define CP_MES_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE10_BASE
+#define CP_MES_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE10_MASK
+#define CP_MES_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE10_CNTL
+#define CP_MES_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE11_BASE
+#define CP_MES_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE11_MASK
+#define CP_MES_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE11_CNTL
+#define CP_MES_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE12_BASE
+#define CP_MES_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE12_MASK
+#define CP_MES_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE12_CNTL
+#define CP_MES_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE13_BASE
+#define CP_MES_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE13_MASK
+#define CP_MES_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE13_CNTL
+#define CP_MES_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE14_BASE
+#define CP_MES_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE14_MASK
+#define CP_MES_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE14_CNTL
+#define CP_MES_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE15_BASE
+#define CP_MES_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE15_MASK
+#define CP_MES_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE15_CNTL
+#define CP_MES_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_RS64_PRGRM_CNTR_START
+#define CP_MEC_RS64_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC_RS64_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MEC_MTVEC_LO
+#define CP_MEC_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define CP_MEC_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MTVEC_HI
+#define CP_MEC_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define CP_MEC_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_ISA_CNTL
+#define CP_MEC_ISA_CNTL__ISA_MODE__SHIFT 0x0
+#define CP_MEC_ISA_CNTL__ISA_MODE_MASK 0x00000001L
+//CP_MEC_RS64_CNTL
+#define CP_MEC_RS64_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_ACTIVE__SHIFT 0x1a
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_ACTIVE__SHIFT 0x1b
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_ACTIVE__SHIFT 0x1c
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_ACTIVE__SHIFT 0x1d
+#define CP_MEC_RS64_CNTL__MEC_HALT__SHIFT 0x1e
+#define CP_MEC_RS64_CNTL__MEC_STEP__SHIFT 0x1f
+#define CP_MEC_RS64_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_ACTIVE_MASK 0x04000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_ACTIVE_MASK 0x08000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_ACTIVE_MASK 0x10000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_ACTIVE_MASK 0x20000000L
+#define CP_MEC_RS64_CNTL__MEC_HALT_MASK 0x40000000L
+#define CP_MEC_RS64_CNTL__MEC_STEP_MASK 0x80000000L
+//CP_MEC_MIE_LO
+#define CP_MEC_MIE_LO__MEC_INT__SHIFT 0x0
+#define CP_MEC_MIE_LO__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_MIE_HI
+#define CP_MEC_MIE_HI__MEC_INT__SHIFT 0x0
+#define CP_MEC_MIE_HI__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT
+#define CP_MEC_RS64_INTERRUPT__MEC_INT__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INSTR_PNTR
+#define CP_MEC_RS64_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC_RS64_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_MEC_MIP_LO
+#define CP_MEC_MIP_LO__MIP_LO__SHIFT 0x0
+#define CP_MEC_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MIP_HI
+#define CP_MEC_MIP_HI__MIP_HI__SHIFT 0x0
+#define CP_MEC_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//CP_MEC_DC_BASE_CNTL
+#define CP_MEC_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MEC_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MEC_DC_OP_CNTL
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_MEC_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_MEC_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+//CP_MEC_MTIMECMP_LO
+#define CP_MEC_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define CP_MEC_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MTIMECMP_HI
+#define CP_MEC_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define CP_MEC_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP0_LO
+#define CP_MEC_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MEC_GP0_LO__DATA__SHIFT 0x1
+#define CP_MEC_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MEC_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MEC_GP0_HI
+#define CP_MEC_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MEC_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_GP1_LO
+#define CP_MEC_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MEC_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP1_HI
+#define CP_MEC_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MEC_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP2_LO
+#define CP_MEC_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MEC_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP2_HI
+#define CP_MEC_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MEC_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP3_LO
+#define CP_MEC_GP3_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP3_HI
+#define CP_MEC_GP3_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP4_LO
+#define CP_MEC_GP4_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP4_HI
+#define CP_MEC_GP4_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP5_LO
+#define CP_MEC_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MEC_GP5_LO__DATA__SHIFT 0x1
+#define CP_MEC_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MEC_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MEC_GP5_HI
+#define CP_MEC_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MEC_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_GP6_LO
+#define CP_MEC_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MEC_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP6_HI
+#define CP_MEC_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MEC_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP7_LO
+#define CP_MEC_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MEC_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP7_HI
+#define CP_MEC_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MEC_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP8_LO
+#define CP_MEC_GP8_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP8_HI
+#define CP_MEC_GP8_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP9_LO
+#define CP_MEC_GP9_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP9_HI
+#define CP_MEC_GP9_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_LOCAL_BASE0_LO
+#define CP_MEC_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_BASE0_HI
+#define CP_MEC_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_MASK0_LO
+#define CP_MEC_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_MASK0_HI
+#define CP_MEC_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_APERTURE
+#define CP_MEC_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_INSTR_BASE_LO
+#define CP_MEC_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_INSTR_BASE_HI
+#define CP_MEC_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_INSTR_MASK_LO
+#define CP_MEC_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_INSTR_MASK_HI
+#define CP_MEC_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_INSTR_APERTURE
+#define CP_MEC_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_SCRATCH_APERTURE
+#define CP_MEC_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_SCRATCH_BASE_LO
+#define CP_MEC_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_SCRATCH_BASE_HI
+#define CP_MEC_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_RS64_PERFCOUNT_CNTL
+#define CP_MEC_RS64_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define CP_MEC_RS64_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//CP_MEC_RS64_PENDING_INTERRUPT
+#define CP_MEC_RS64_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_MEC_RS64_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_PRGRM_CNTR_START_HI
+#define CP_MEC_RS64_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_MEC_RS64_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_16
+#define CP_MEC_RS64_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_17
+#define CP_MEC_RS64_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_18
+#define CP_MEC_RS64_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_19
+#define CP_MEC_RS64_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_20
+#define CP_MEC_RS64_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_21
+#define CP_MEC_RS64_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_22
+#define CP_MEC_RS64_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_23
+#define CP_MEC_RS64_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_24
+#define CP_MEC_RS64_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_25
+#define CP_MEC_RS64_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_26
+#define CP_MEC_RS64_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_27
+#define CP_MEC_RS64_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_28
+#define CP_MEC_RS64_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_29
+#define CP_MEC_RS64_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_30
+#define CP_MEC_RS64_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_31
+#define CP_MEC_RS64_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_BASE
+#define CP_MEC_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_MASK
+#define CP_MEC_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_CNTL
+#define CP_MEC_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE1_BASE
+#define CP_MEC_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE1_MASK
+#define CP_MEC_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE1_CNTL
+#define CP_MEC_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE2_BASE
+#define CP_MEC_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE2_MASK
+#define CP_MEC_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE2_CNTL
+#define CP_MEC_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE3_BASE
+#define CP_MEC_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE3_MASK
+#define CP_MEC_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE3_CNTL
+#define CP_MEC_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE4_BASE
+#define CP_MEC_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE4_MASK
+#define CP_MEC_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE4_CNTL
+#define CP_MEC_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE5_BASE
+#define CP_MEC_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE5_MASK
+#define CP_MEC_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE5_CNTL
+#define CP_MEC_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE6_BASE
+#define CP_MEC_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE6_MASK
+#define CP_MEC_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE6_CNTL
+#define CP_MEC_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE7_BASE
+#define CP_MEC_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE7_MASK
+#define CP_MEC_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE7_CNTL
+#define CP_MEC_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE8_BASE
+#define CP_MEC_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE8_MASK
+#define CP_MEC_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE8_CNTL
+#define CP_MEC_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE9_BASE
+#define CP_MEC_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE9_MASK
+#define CP_MEC_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE9_CNTL
+#define CP_MEC_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE10_BASE
+#define CP_MEC_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE10_MASK
+#define CP_MEC_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE10_CNTL
+#define CP_MEC_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE11_BASE
+#define CP_MEC_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE11_MASK
+#define CP_MEC_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE11_CNTL
+#define CP_MEC_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE12_BASE
+#define CP_MEC_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE12_MASK
+#define CP_MEC_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE12_CNTL
+#define CP_MEC_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE13_BASE
+#define CP_MEC_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE13_MASK
+#define CP_MEC_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE13_CNTL
+#define CP_MEC_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE14_BASE
+#define CP_MEC_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE14_MASK
+#define CP_MEC_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE14_CNTL
+#define CP_MEC_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE15_BASE
+#define CP_MEC_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE15_MASK
+#define CP_MEC_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE15_CNTL
+#define CP_MEC_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_CPC_IC_OP_CNTL
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_GFX_CNTL
+#define CP_GFX_CNTL__ENGINE_SEL__SHIFT 0x0
+#define CP_GFX_CNTL__CONFIG__SHIFT 0x1
+#define CP_GFX_CNTL__ENGINE_SEL_MASK 0x00000001L
+#define CP_GFX_CNTL__CONFIG_MASK 0x00000006L
+//CP_GFX_RS64_INTERRUPT0
+#define CP_GFX_RS64_INTERRUPT0__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTERRUPT0__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INTR_EN0
+#define CP_GFX_RS64_INTR_EN0__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTR_EN0__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INTR_EN1
+#define CP_GFX_RS64_INTR_EN1__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTR_EN1__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_BASE_CNTL
+#define CP_GFX_RS64_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_RS64_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_GFX_RS64_DC_OP_CNTL
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_GFX_RS64_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_GFX_RS64_DC_OP_CNTL__RESERVED__SHIFT 0x3
+#define CP_GFX_RS64_DC_OP_CNTL__PRIME_DCACHE__SHIFT 0x4
+#define CP_GFX_RS64_DC_OP_CNTL__DCACHE_PRIMED__SHIFT 0x5
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_GFX_RS64_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+#define CP_GFX_RS64_DC_OP_CNTL__RESERVED_MASK 0x00000008L
+#define CP_GFX_RS64_DC_OP_CNTL__PRIME_DCACHE_MASK 0x00000010L
+#define CP_GFX_RS64_DC_OP_CNTL__DCACHE_PRIMED_MASK 0x00000020L
+//CP_GFX_RS64_LOCAL_BASE0_LO
+#define CP_GFX_RS64_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_BASE0_HI
+#define CP_GFX_RS64_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_MASK0_LO
+#define CP_GFX_RS64_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_MASK0_HI
+#define CP_GFX_RS64_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_APERTURE
+#define CP_GFX_RS64_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_INSTR_BASE_LO
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_INSTR_BASE_HI
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_INSTR_MASK_LO
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_INSTR_MASK_HI
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_INSTR_APERTURE
+#define CP_GFX_RS64_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_SCRATCH_APERTURE
+#define CP_GFX_RS64_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_PERFCOUNT_CNTL0
+#define CP_GFX_RS64_PERFCOUNT_CNTL0__EVENT_SEL__SHIFT 0x0
+#define CP_GFX_RS64_PERFCOUNT_CNTL0__EVENT_SEL_MASK 0x0000001FL
+//CP_GFX_RS64_PERFCOUNT_CNTL1
+#define CP_GFX_RS64_PERFCOUNT_CNTL1__EVENT_SEL__SHIFT 0x0
+#define CP_GFX_RS64_PERFCOUNT_CNTL1__EVENT_SEL_MASK 0x0000001FL
+//CP_GFX_RS64_MIP_LO0
+#define CP_GFX_RS64_MIP_LO0__MIP_LO__SHIFT 0x0
+#define CP_GFX_RS64_MIP_LO0__MIP_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_LO1
+#define CP_GFX_RS64_MIP_LO1__MIP_LO__SHIFT 0x0
+#define CP_GFX_RS64_MIP_LO1__MIP_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_HI0
+#define CP_GFX_RS64_MIP_HI0__MIP_HI__SHIFT 0x0
+#define CP_GFX_RS64_MIP_HI0__MIP_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_HI1
+#define CP_GFX_RS64_MIP_HI1__MIP_HI__SHIFT 0x0
+#define CP_GFX_RS64_MIP_HI1__MIP_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_LO0
+#define CP_GFX_RS64_MTIMECMP_LO0__TIME_LO__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_LO0__TIME_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_LO1
+#define CP_GFX_RS64_MTIMECMP_LO1__TIME_LO__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_LO1__TIME_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_HI0
+#define CP_GFX_RS64_MTIMECMP_HI0__TIME_HI__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_HI0__TIME_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_HI1
+#define CP_GFX_RS64_MTIMECMP_HI1__TIME_HI__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_HI1__TIME_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP0_LO0
+#define CP_GFX_RS64_GP0_LO0__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP0_LO0__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP0_LO0__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP0_LO0__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP0_LO1
+#define CP_GFX_RS64_GP0_LO1__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP0_LO1__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP0_LO1__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP0_LO1__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP0_HI0
+#define CP_GFX_RS64_GP0_HI0__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP0_HI0__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP0_HI1
+#define CP_GFX_RS64_GP0_HI1__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP0_HI1__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_LO0
+#define CP_GFX_RS64_GP1_LO0__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP1_LO0__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_LO1
+#define CP_GFX_RS64_GP1_LO1__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP1_LO1__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_HI0
+#define CP_GFX_RS64_GP1_HI0__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP1_HI0__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_HI1
+#define CP_GFX_RS64_GP1_HI1__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP1_HI1__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_LO0
+#define CP_GFX_RS64_GP2_LO0__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP2_LO0__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_LO1
+#define CP_GFX_RS64_GP2_LO1__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP2_LO1__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_HI0
+#define CP_GFX_RS64_GP2_HI0__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP2_HI0__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_HI1
+#define CP_GFX_RS64_GP2_HI1__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP2_HI1__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_LO0
+#define CP_GFX_RS64_GP3_LO0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_LO0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_LO1
+#define CP_GFX_RS64_GP3_LO1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_LO1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_HI0
+#define CP_GFX_RS64_GP3_HI0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_HI0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_HI1
+#define CP_GFX_RS64_GP3_HI1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_HI1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_LO0
+#define CP_GFX_RS64_GP4_LO0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_LO0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_LO1
+#define CP_GFX_RS64_GP4_LO1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_LO1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_HI0
+#define CP_GFX_RS64_GP4_HI0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_HI0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_HI1
+#define CP_GFX_RS64_GP4_HI1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_HI1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP5_LO0
+#define CP_GFX_RS64_GP5_LO0__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP5_LO0__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP5_LO0__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP5_LO0__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP5_LO1
+#define CP_GFX_RS64_GP5_LO1__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP5_LO1__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP5_LO1__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP5_LO1__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP5_HI0
+#define CP_GFX_RS64_GP5_HI0__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP5_HI0__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP5_HI1
+#define CP_GFX_RS64_GP5_HI1__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP5_HI1__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP6_LO
+#define CP_GFX_RS64_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP6_HI
+#define CP_GFX_RS64_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP7_LO
+#define CP_GFX_RS64_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP7_HI
+#define CP_GFX_RS64_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP8_LO
+#define CP_GFX_RS64_GP8_LO__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP8_HI
+#define CP_GFX_RS64_GP8_HI__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP9_LO
+#define CP_GFX_RS64_GP9_LO__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP9_HI
+#define CP_GFX_RS64_GP9_HI__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INSTR_PNTR0
+#define CP_GFX_RS64_INSTR_PNTR0__INSTR_PNTR__SHIFT 0x0
+#define CP_GFX_RS64_INSTR_PNTR0__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_GFX_RS64_INSTR_PNTR1
+#define CP_GFX_RS64_INSTR_PNTR1__INSTR_PNTR__SHIFT 0x0
+#define CP_GFX_RS64_INSTR_PNTR1__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_GFX_RS64_PENDING_INTERRUPT0
+#define CP_GFX_RS64_PENDING_INTERRUPT0__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_GFX_RS64_PENDING_INTERRUPT0__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_PENDING_INTERRUPT1
+#define CP_GFX_RS64_PENDING_INTERRUPT1__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_GFX_RS64_PENDING_INTERRUPT1__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_BASE0
+#define CP_GFX_RS64_DC_APERTURE0_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_MASK0
+#define CP_GFX_RS64_DC_APERTURE0_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_CNTL0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE1_BASE0
+#define CP_GFX_RS64_DC_APERTURE1_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_MASK0
+#define CP_GFX_RS64_DC_APERTURE1_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_CNTL0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE2_BASE0
+#define CP_GFX_RS64_DC_APERTURE2_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_MASK0
+#define CP_GFX_RS64_DC_APERTURE2_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_CNTL0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE3_BASE0
+#define CP_GFX_RS64_DC_APERTURE3_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_MASK0
+#define CP_GFX_RS64_DC_APERTURE3_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_CNTL0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE4_BASE0
+#define CP_GFX_RS64_DC_APERTURE4_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_MASK0
+#define CP_GFX_RS64_DC_APERTURE4_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_CNTL0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE5_BASE0
+#define CP_GFX_RS64_DC_APERTURE5_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_MASK0
+#define CP_GFX_RS64_DC_APERTURE5_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_CNTL0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE6_BASE0
+#define CP_GFX_RS64_DC_APERTURE6_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_MASK0
+#define CP_GFX_RS64_DC_APERTURE6_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_CNTL0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE7_BASE0
+#define CP_GFX_RS64_DC_APERTURE7_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_MASK0
+#define CP_GFX_RS64_DC_APERTURE7_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_CNTL0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE8_BASE0
+#define CP_GFX_RS64_DC_APERTURE8_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_MASK0
+#define CP_GFX_RS64_DC_APERTURE8_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_CNTL0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE9_BASE0
+#define CP_GFX_RS64_DC_APERTURE9_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_MASK0
+#define CP_GFX_RS64_DC_APERTURE9_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_CNTL0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE10_BASE0
+#define CP_GFX_RS64_DC_APERTURE10_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_MASK0
+#define CP_GFX_RS64_DC_APERTURE10_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_CNTL0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE11_BASE0
+#define CP_GFX_RS64_DC_APERTURE11_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_MASK0
+#define CP_GFX_RS64_DC_APERTURE11_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_CNTL0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE12_BASE0
+#define CP_GFX_RS64_DC_APERTURE12_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_MASK0
+#define CP_GFX_RS64_DC_APERTURE12_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_CNTL0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE13_BASE0
+#define CP_GFX_RS64_DC_APERTURE13_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_MASK0
+#define CP_GFX_RS64_DC_APERTURE13_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_CNTL0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE14_BASE0
+#define CP_GFX_RS64_DC_APERTURE14_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_MASK0
+#define CP_GFX_RS64_DC_APERTURE14_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_CNTL0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE15_BASE0
+#define CP_GFX_RS64_DC_APERTURE15_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_MASK0
+#define CP_GFX_RS64_DC_APERTURE15_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_CNTL0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE0_BASE1
+#define CP_GFX_RS64_DC_APERTURE0_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_MASK1
+#define CP_GFX_RS64_DC_APERTURE0_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_CNTL1
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE1_BASE1
+#define CP_GFX_RS64_DC_APERTURE1_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_MASK1
+#define CP_GFX_RS64_DC_APERTURE1_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_CNTL1
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE2_BASE1
+#define CP_GFX_RS64_DC_APERTURE2_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_MASK1
+#define CP_GFX_RS64_DC_APERTURE2_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_CNTL1
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE3_BASE1
+#define CP_GFX_RS64_DC_APERTURE3_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_MASK1
+#define CP_GFX_RS64_DC_APERTURE3_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_CNTL1
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE4_BASE1
+#define CP_GFX_RS64_DC_APERTURE4_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_MASK1
+#define CP_GFX_RS64_DC_APERTURE4_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_CNTL1
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE5_BASE1
+#define CP_GFX_RS64_DC_APERTURE5_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_MASK1
+#define CP_GFX_RS64_DC_APERTURE5_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_CNTL1
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE6_BASE1
+#define CP_GFX_RS64_DC_APERTURE6_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_MASK1
+#define CP_GFX_RS64_DC_APERTURE6_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_CNTL1
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE7_BASE1
+#define CP_GFX_RS64_DC_APERTURE7_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_MASK1
+#define CP_GFX_RS64_DC_APERTURE7_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_CNTL1
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE8_BASE1
+#define CP_GFX_RS64_DC_APERTURE8_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_MASK1
+#define CP_GFX_RS64_DC_APERTURE8_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_CNTL1
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE9_BASE1
+#define CP_GFX_RS64_DC_APERTURE9_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_MASK1
+#define CP_GFX_RS64_DC_APERTURE9_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_CNTL1
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE10_BASE1
+#define CP_GFX_RS64_DC_APERTURE10_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_MASK1
+#define CP_GFX_RS64_DC_APERTURE10_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_CNTL1
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE11_BASE1
+#define CP_GFX_RS64_DC_APERTURE11_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_MASK1
+#define CP_GFX_RS64_DC_APERTURE11_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_CNTL1
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE12_BASE1
+#define CP_GFX_RS64_DC_APERTURE12_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_MASK1
+#define CP_GFX_RS64_DC_APERTURE12_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_CNTL1
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE13_BASE1
+#define CP_GFX_RS64_DC_APERTURE13_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_MASK1
+#define CP_GFX_RS64_DC_APERTURE13_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_CNTL1
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE14_BASE1
+#define CP_GFX_RS64_DC_APERTURE14_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_MASK1
+#define CP_GFX_RS64_DC_APERTURE14_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_CNTL1
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE15_BASE1
+#define CP_GFX_RS64_DC_APERTURE15_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_MASK1
+#define CP_GFX_RS64_DC_APERTURE15_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_CNTL1
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_INTERRUPT1
+#define CP_GFX_RS64_INTERRUPT1__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTERRUPT1__ME_INT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gl1dec
+//GL1_ARB_CTRL
+#define GL1_ARB_CTRL__NUM_MEM_PIPES__SHIFT 0x0
+#define GL1_ARB_CTRL__FGCG_DISABLE__SHIFT 0x2
+#define GL1_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x3
+#define GL1_ARB_CTRL__CHICKEN_BITS__SHIFT 0x4
+#define GL1_ARB_CTRL__NUM_MEM_PIPES_MASK 0x00000003L
+#define GL1_ARB_CTRL__FGCG_DISABLE_MASK 0x00000004L
+#define GL1_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000008L
+#define GL1_ARB_CTRL__CHICKEN_BITS_MASK 0x00000FF0L
+//GL1_DRAM_BURST_MASK
+#define GL1_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK__SHIFT 0x0
+#define GL1_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK_MASK 0x000000FFL
+//GL1_ARB_STATUS
+#define GL1_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define GL1_ARB_STATUS__RET_ARB_BUSY__SHIFT 0x1
+#define GL1_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define GL1_ARB_STATUS__RET_ARB_BUSY_MASK 0x00000002L
+//GL1_DRAM_BURST_CTRL
+#define GL1_DRAM_BURST_CTRL__MAX_DRAM_BURST__SHIFT 0x0
+#define GL1_DRAM_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define GL1_DRAM_BURST_CTRL__GATHER_64B_BURST_DISABLE__SHIFT 0x4
+#define GL1_DRAM_BURST_CTRL__GATHER_32B_BURST_DISABLE__SHIFT 0x5
+#define GL1_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE__SHIFT 0x8
+#define GL1_DRAM_BURST_CTRL__MAX_DRAM_BURST_MASK 0x00000007L
+#define GL1_DRAM_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define GL1_DRAM_BURST_CTRL__GATHER_64B_BURST_DISABLE_MASK 0x00000010L
+#define GL1_DRAM_BURST_CTRL__GATHER_32B_BURST_DISABLE_MASK 0x00000020L
+#define GL1_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE_MASK 0x00000100L
+//GL1I_GL1R_REP_FGCG_OVERRIDE
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IR_REP_FGCG_OVERRIDE__SHIFT 0x0
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IW_REP_FGCG_OVERRIDE__SHIFT 0x1
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_SRC_REP_FGCG_OVERRIDE__SHIFT 0x2
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_RET_REP_FGCG_OVERRIDE__SHIFT 0x3
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IR_REP_FGCG_OVERRIDE_MASK 0x00000001L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IW_REP_FGCG_OVERRIDE_MASK 0x00000002L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_SRC_REP_FGCG_OVERRIDE_MASK 0x00000004L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_RET_REP_FGCG_OVERRIDE_MASK 0x00000008L
+//GL1C_CTRL
+#define GL1C_CTRL__FORCE_MISS__SHIFT 0x0
+#define GL1C_CTRL__FORCE_HIT__SHIFT 0x1
+#define GL1C_CTRL__NOFILL_32B__SHIFT 0x2
+#define GL1C_CTRL__NOFILL_64B__SHIFT 0x3
+#define GL1C_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x4
+#define GL1C_CTRL__ACK_QUEUE_DISABLE__SHIFT 0x8
+#define GL1C_CTRL__RMI_META_READ_MISS_QUEUE_DISABLE__SHIFT 0x9
+#define GL1C_CTRL__HIT_QUEUE_DISABLE__SHIFT 0xa
+#define GL1C_CTRL__GL2_REQ_CREDITS__SHIFT 0xb
+#define GL1C_CTRL__GL2_DATA_CREDITS__SHIFT 0x12
+#define GL1C_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x19
+#define GL1C_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x1a
+#define GL1C_CTRL__GCR_RSP_FGCG_DISABLE__SHIFT 0x1b
+#define GL1C_CTRL__DISABLE_HASH_TO_UPPER_16_SETS__SHIFT 0x1c
+#define GL1C_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT__SHIFT 0x1d
+#define GL1C_CTRL__DISABLE_PERF_SPLIT_EVICT_WRITE__SHIFT 0x1e
+#define GL1C_CTRL__FORCE_MISS_MASK 0x00000001L
+#define GL1C_CTRL__FORCE_HIT_MASK 0x00000002L
+#define GL1C_CTRL__NOFILL_32B_MASK 0x00000004L
+#define GL1C_CTRL__NOFILL_64B_MASK 0x00000008L
+#define GL1C_CTRL__LATENCY_FIFO_SIZE_MASK 0x000000F0L
+#define GL1C_CTRL__ACK_QUEUE_DISABLE_MASK 0x00000100L
+#define GL1C_CTRL__RMI_META_READ_MISS_QUEUE_DISABLE_MASK 0x00000200L
+#define GL1C_CTRL__HIT_QUEUE_DISABLE_MASK 0x00000400L
+#define GL1C_CTRL__GL2_REQ_CREDITS_MASK 0x0003F800L
+#define GL1C_CTRL__GL2_DATA_CREDITS_MASK 0x01FC0000L
+#define GL1C_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x02000000L
+#define GL1C_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x04000000L
+#define GL1C_CTRL__GCR_RSP_FGCG_DISABLE_MASK 0x08000000L
+#define GL1C_CTRL__DISABLE_HASH_TO_UPPER_16_SETS_MASK 0x10000000L
+#define GL1C_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT_MASK 0x20000000L
+#define GL1C_CTRL__DISABLE_PERF_SPLIT_EVICT_WRITE_MASK 0x40000000L
+//GL1C_STATUS
+#define GL1C_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define GL1C_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define GL1C_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define GL1C_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define GL1C_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define GL1C_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define GL1C_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define GL1C_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define GL1C_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define GL1C_STATUS__LATENCY_FIFO_FULL_STALL__SHIFT 0x14
+#define GL1C_STATUS__TAG_STALL__SHIFT 0x15
+#define GL1C_STATUS__TAG_BUSY__SHIFT 0x16
+#define GL1C_STATUS__TAG_ACK_STALL__SHIFT 0x17
+#define GL1C_STATUS__TAG_GCR_INV_STALL__SHIFT 0x18
+#define GL1C_STATUS__TAG_NO_AVAILABLE_LINE_TO_EVICT_STALL__SHIFT 0x19
+#define GL1C_STATUS__TAG_EVICT__SHIFT 0x1a
+#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION__SHIFT 0x1b
+#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET__SHIFT 0x1f
+#define GL1C_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define GL1C_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define GL1C_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define GL1C_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define GL1C_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define GL1C_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define GL1C_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define GL1C_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define GL1C_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define GL1C_STATUS__LATENCY_FIFO_FULL_STALL_MASK 0x00100000L
+#define GL1C_STATUS__TAG_STALL_MASK 0x00200000L
+#define GL1C_STATUS__TAG_BUSY_MASK 0x00400000L
+#define GL1C_STATUS__TAG_ACK_STALL_MASK 0x00800000L
+#define GL1C_STATUS__TAG_GCR_INV_STALL_MASK 0x01000000L
+#define GL1C_STATUS__TAG_NO_AVAILABLE_LINE_TO_EVICT_STALL_MASK 0x02000000L
+#define GL1C_STATUS__TAG_EVICT_MASK 0x04000000L
+#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION_MASK 0x78000000L
+#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET_MASK 0x80000000L
+//GL1C_UTCL0_CNTL2
+#define GL1C_UTCL0_CNTL2__SPARE__SHIFT 0x0
+#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE__SHIFT 0x8
+#define GL1C_UTCL0_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define GL1C_UTCL0_CNTL2__ANY_LINE_VALID__SHIFT 0xa
+#define GL1C_UTCL0_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define GL1C_UTCL0_CNTL2__DISABLE_BURST__SHIFT 0x11
+#define GL1C_UTCL0_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define GL1C_UTCL0_CNTL2__FGCG_DISABLE__SHIFT 0x1e
+#define GL1C_UTCL0_CNTL2__BIG_PAGE_DISABLE__SHIFT 0x1f
+#define GL1C_UTCL0_CNTL2__SPARE_MASK 0x000000FFL
+#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE_MASK 0x00000100L
+#define GL1C_UTCL0_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define GL1C_UTCL0_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
+#define GL1C_UTCL0_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define GL1C_UTCL0_CNTL2__DISABLE_BURST_MASK 0x00020000L
+#define GL1C_UTCL0_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define GL1C_UTCL0_CNTL2__FGCG_DISABLE_MASK 0x40000000L
+#define GL1C_UTCL0_CNTL2__BIG_PAGE_DISABLE_MASK 0x80000000L
+//GL1C_UTCL0_STATUS
+#define GL1C_UTCL0_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define GL1C_UTCL0_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define GL1C_UTCL0_STATUS__PRT_DETECTED__SHIFT 0x2
+#define GL1C_UTCL0_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define GL1C_UTCL0_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define GL1C_UTCL0_STATUS__PRT_DETECTED_MASK 0x00000004L
+//GL1C_UTCL0_RETRY
+#define GL1C_UTCL0_RETRY__INCR__SHIFT 0x0
+#define GL1C_UTCL0_RETRY__COUNT__SHIFT 0x8
+#define GL1C_UTCL0_RETRY__INCR_MASK 0x000000FFL
+#define GL1C_UTCL0_RETRY__COUNT_MASK 0x00000F00L
+//GL1C_CTRL2
+#define GL1C_CTRL2__UTCL0_INFLIGHT_MAX__SHIFT 0x0
+#define GL1C_CTRL2__UTCL0_SD_SIDEBAND_IF_DISABLE__SHIFT 0x8
+#define GL1C_CTRL2__REDUCE_REQ_PROTECTION_LINE_LEVEL__SHIFT 0x9
+#define GL1C_CTRL2__UTCL0_INFLIGHT_MAX_MASK 0x000000FFL
+#define GL1C_CTRL2__UTCL0_SD_SIDEBAND_IF_DISABLE_MASK 0x00000100L
+#define GL1C_CTRL2__REDUCE_REQ_PROTECTION_LINE_LEVEL_MASK 0x00003E00L
+
+
+// addressBlock: gc_chdec
+//CH_ARB_CTRL
+#define CH_ARB_CTRL__NUM_MEM_PIPES__SHIFT 0x0
+#define CH_ARB_CTRL__UC_IO_WR_PATH__SHIFT 0x2
+#define CH_ARB_CTRL__FGCG_DISABLE__SHIFT 0x3
+#define CH_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x4
+#define CH_ARB_CTRL__CHICKEN_BITS__SHIFT 0x5
+#define CH_ARB_CTRL__NUM_MEM_PIPES_MASK 0x00000003L
+#define CH_ARB_CTRL__UC_IO_WR_PATH_MASK 0x00000004L
+#define CH_ARB_CTRL__FGCG_DISABLE_MASK 0x00000008L
+#define CH_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000010L
+#define CH_ARB_CTRL__CHICKEN_BITS_MASK 0x00001FE0L
+//CH_DRAM_BURST_MASK
+#define CH_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK__SHIFT 0x0
+#define CH_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK_MASK 0x000000FFL
+//CH_ARB_STATUS
+#define CH_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define CH_ARB_STATUS__RET_ARB_BUSY__SHIFT 0x1
+#define CH_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define CH_ARB_STATUS__RET_ARB_BUSY_MASK 0x00000002L
+//CH_DRAM_BURST_CTRL
+#define CH_DRAM_BURST_CTRL__MAX_DRAM_BURST__SHIFT 0x0
+#define CH_DRAM_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define CH_DRAM_BURST_CTRL__GATHER_64B_MEMORY_BURST_DISABLE__SHIFT 0x4
+#define CH_DRAM_BURST_CTRL__GATHER_64B_IO_BURST_DISABLE__SHIFT 0x5
+#define CH_DRAM_BURST_CTRL__GATHER_32B_MEMORY_BURST_DISABLE__SHIFT 0x6
+#define CH_DRAM_BURST_CTRL__GATHER_32B_IO_BURST_DISABLE__SHIFT 0x7
+#define CH_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE__SHIFT 0x8
+#define CH_DRAM_BURST_CTRL__MAX_DRAM_BURST_MASK 0x00000007L
+#define CH_DRAM_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define CH_DRAM_BURST_CTRL__GATHER_64B_MEMORY_BURST_DISABLE_MASK 0x00000010L
+#define CH_DRAM_BURST_CTRL__GATHER_64B_IO_BURST_DISABLE_MASK 0x00000020L
+#define CH_DRAM_BURST_CTRL__GATHER_32B_MEMORY_BURST_DISABLE_MASK 0x00000040L
+#define CH_DRAM_BURST_CTRL__GATHER_32B_IO_BURST_DISABLE_MASK 0x00000080L
+#define CH_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE_MASK 0x00000100L
+//CHA_CHC_CREDITS
+#define CHA_CHC_CREDITS__CHC_REQ_CREDITS__SHIFT 0x0
+#define CHA_CHC_CREDITS__CHCG_REQ_CREDITS__SHIFT 0x8
+#define CHA_CHC_CREDITS__CHC_REQ_CREDITS_MASK 0x000000FFL
+#define CHA_CHC_CREDITS__CHCG_REQ_CREDITS_MASK 0x0000FF00L
+//CHA_CLIENT_FREE_DELAY
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_0_FREE_DELAY__SHIFT 0x0
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_1_FREE_DELAY__SHIFT 0x3
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_2_FREE_DELAY__SHIFT 0x6
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_3_FREE_DELAY__SHIFT 0x9
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_4_FREE_DELAY__SHIFT 0xc
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_0_FREE_DELAY_MASK 0x00000007L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_1_FREE_DELAY_MASK 0x00000038L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_2_FREE_DELAY_MASK 0x000001C0L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_3_FREE_DELAY_MASK 0x00000E00L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_4_FREE_DELAY_MASK 0x00007000L
+//CHI_CHR_REP_FGCG_OVERRIDE
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIW_REP_FGCG_OVERRIDE__SHIFT 0x0
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIR_REP_FGCG_OVERRIDE__SHIFT 0x1
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_SRC_REP_FGCG_OVERRIDE__SHIFT 0x2
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_RET_REP_FGCG_OVERRIDE__SHIFT 0x3
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIW_REP_FGCG_OVERRIDE_MASK 0x00000001L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIR_REP_FGCG_OVERRIDE_MASK 0x00000002L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_SRC_REP_FGCG_OVERRIDE_MASK 0x00000004L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_RET_REP_FGCG_OVERRIDE_MASK 0x00000008L
+//CH_VC5_ENABLE
+#define CH_VC5_ENABLE__UTCL2_VC5_ENABLE__SHIFT 0x1
+#define CH_VC5_ENABLE__UTCL2_VC5_ENABLE_MASK 0x00000002L
+//CHC_CTRL
+#define CHC_CTRL__BUFFER_DEPTH_MAX__SHIFT 0x0
+#define CHC_CTRL__GL2_REQ_CREDITS__SHIFT 0x4
+#define CHC_CTRL__GL2_DATA_CREDITS__SHIFT 0xb
+#define CHC_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x12
+#define CHC_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x13
+#define CHC_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT__SHIFT 0x1d
+#define CHC_CTRL__BUFFER_DEPTH_MAX_MASK 0x0000000FL
+#define CHC_CTRL__GL2_REQ_CREDITS_MASK 0x000007F0L
+#define CHC_CTRL__GL2_DATA_CREDITS_MASK 0x0003F800L
+#define CHC_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x00040000L
+#define CHC_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x00080000L
+#define CHC_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT_MASK 0x20000000L
+//CHC_STATUS
+#define CHC_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define CHC_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define CHC_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define CHC_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define CHC_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define CHC_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define CHC_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define CHC_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define CHC_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define CHC_STATUS__VIRTUAL_FIFO_FULL_STALL__SHIFT 0x14
+#define CHC_STATUS__REQUEST_TRACKER_BUFFER_STALL__SHIFT 0x15
+#define CHC_STATUS__REQUEST_TRACKER_BUSY__SHIFT 0x16
+#define CHC_STATUS__BUFFER_FULL__SHIFT 0x17
+#define CHC_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define CHC_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define CHC_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define CHC_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define CHC_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define CHC_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define CHC_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define CHC_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define CHC_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define CHC_STATUS__VIRTUAL_FIFO_FULL_STALL_MASK 0x00100000L
+#define CHC_STATUS__REQUEST_TRACKER_BUFFER_STALL_MASK 0x00200000L
+#define CHC_STATUS__REQUEST_TRACKER_BUSY_MASK 0x00400000L
+#define CHC_STATUS__BUFFER_FULL_MASK 0x00800000L
+//CHCG_CTRL
+#define CHCG_CTRL__BUFFER_DEPTH_MAX__SHIFT 0x0
+#define CHCG_CTRL__VC0_BUFFER_DEPTH_MAX__SHIFT 0x4
+#define CHCG_CTRL__GL2_REQ_CREDITS__SHIFT 0x8
+#define CHCG_CTRL__GL2_DATA_CREDITS__SHIFT 0xf
+#define CHCG_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x16
+#define CHCG_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x17
+#define CHCG_CTRL__BUFFER_DEPTH_MAX_MASK 0x0000000FL
+#define CHCG_CTRL__VC0_BUFFER_DEPTH_MAX_MASK 0x000000F0L
+#define CHCG_CTRL__GL2_REQ_CREDITS_MASK 0x00007F00L
+#define CHCG_CTRL__GL2_DATA_CREDITS_MASK 0x003F8000L
+#define CHCG_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x00400000L
+#define CHCG_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x00800000L
+//CHCG_STATUS
+#define CHCG_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define CHCG_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define CHCG_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define CHCG_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define CHCG_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define CHCG_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define CHCG_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define CHCG_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define CHCG_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define CHCG_STATUS__VIRTUAL_FIFO_FULL_STALL__SHIFT 0x14
+#define CHCG_STATUS__REQUEST_TRACKER_BUFFER_STALL__SHIFT 0x15
+#define CHCG_STATUS__REQUEST_TRACKER_BUSY__SHIFT 0x16
+#define CHCG_STATUS__BUFFER_FULL__SHIFT 0x17
+#define CHCG_STATUS__INPUT_BUFFER_VC1_BUSY__SHIFT 0x18
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_BUSY__SHIFT 0x19
+#define CHCG_STATUS__INPUT_BUFFER_VC1_FIFO_FULL__SHIFT 0x1a
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_FULL__SHIFT 0x1b
+#define CHCG_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define CHCG_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define CHCG_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define CHCG_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define CHCG_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define CHCG_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define CHCG_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define CHCG_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define CHCG_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define CHCG_STATUS__VIRTUAL_FIFO_FULL_STALL_MASK 0x00100000L
+#define CHCG_STATUS__REQUEST_TRACKER_BUFFER_STALL_MASK 0x00200000L
+#define CHCG_STATUS__REQUEST_TRACKER_BUSY_MASK 0x00400000L
+#define CHCG_STATUS__BUFFER_FULL_MASK 0x00800000L
+#define CHCG_STATUS__INPUT_BUFFER_VC1_BUSY_MASK 0x01000000L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_BUSY_MASK 0x02000000L
+#define CHCG_STATUS__INPUT_BUFFER_VC1_FIFO_FULL_MASK 0x04000000L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_FULL_MASK 0x08000000L
+
+
+// addressBlock: gc_gl2dec
+//GL2C_CTRL
+#define GL2C_CTRL__CACHE_SIZE__SHIFT 0x0
+#define GL2C_CTRL__RATE__SHIFT 0x2
+#define GL2C_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
+#define GL2C_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
+#define GL2C_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
+#define GL2C_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
+#define GL2C_CTRL__METADATA_TO_HI_PRIORITY__SHIFT 0x14
+#define GL2C_CTRL__LINEAR_SET_HASH__SHIFT 0x15
+#define GL2C_CTRL__FORCE_HIT_QUEUE_POP__SHIFT 0x16
+#define GL2C_CTRL__MDC_SIZE__SHIFT 0x18
+#define GL2C_CTRL__METADATA_TO_HIT_QUEUE__SHIFT 0x1a
+#define GL2C_CTRL__IGNORE_FULLY_WRITTEN__SHIFT 0x1b
+#define GL2C_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
+#define GL2C_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define GL2C_CTRL__RATE_MASK 0x0000000CL
+#define GL2C_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
+#define GL2C_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0x00000F00L
+#define GL2C_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
+#define GL2C_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
+#define GL2C_CTRL__METADATA_TO_HI_PRIORITY_MASK 0x00100000L
+#define GL2C_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
+#define GL2C_CTRL__FORCE_HIT_QUEUE_POP_MASK 0x00C00000L
+#define GL2C_CTRL__MDC_SIZE_MASK 0x03000000L
+#define GL2C_CTRL__METADATA_TO_HIT_QUEUE_MASK 0x04000000L
+#define GL2C_CTRL__IGNORE_FULLY_WRITTEN_MASK 0x08000000L
+#define GL2C_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xF0000000L
+//GL2C_CTRL2
+#define GL2C_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
+#define GL2C_CTRL2__ADDR_MATCH_DISABLE__SHIFT 0x4
+#define GL2C_CTRL2__FILL_SIZE_32__SHIFT 0x5
+#define GL2C_CTRL2__RB_TO_HI_PRIORITY__SHIFT 0x6
+#define GL2C_CTRL2__HIT_UNDER_MISS_DISABLE__SHIFT 0x7
+#define GL2C_CTRL2__RO_DISABLE__SHIFT 0x8
+#define GL2C_CTRL2__FORCE_MDC_INV__SHIFT 0x9
+#define GL2C_CTRL2__GCR_ARB_CTRL__SHIFT 0xa
+#define GL2C_CTRL2__GCR_ALL_SET__SHIFT 0xd
+#define GL2C_CTRL2__FILL_SIZE_64__SHIFT 0x11
+#define GL2C_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK__SHIFT 0x12
+#define GL2C_CTRL2__WRITEBACK_ALL_WAIT_FOR_ALL_EA_WRITE_COMPLETE__SHIFT 0x13
+#define GL2C_CTRL2__METADATA_VOLATILE_EN__SHIFT 0x14
+#define GL2C_CTRL2__RB_VOLATILE_EN__SHIFT 0x15
+#define GL2C_CTRL2__PROBE_UNSHARED_EN__SHIFT 0x16
+#define GL2C_CTRL2__MAX_MIN_CTRL__SHIFT 0x17
+#define GL2C_CTRL2__MDC_UC_TO_C_RO_EN__SHIFT 0x1a
+#define GL2C_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
+#define GL2C_CTRL2__ADDR_MATCH_DISABLE_MASK 0x00000010L
+#define GL2C_CTRL2__FILL_SIZE_32_MASK 0x00000020L
+#define GL2C_CTRL2__RB_TO_HI_PRIORITY_MASK 0x00000040L
+#define GL2C_CTRL2__HIT_UNDER_MISS_DISABLE_MASK 0x00000080L
+#define GL2C_CTRL2__RO_DISABLE_MASK 0x00000100L
+#define GL2C_CTRL2__FORCE_MDC_INV_MASK 0x00000200L
+#define GL2C_CTRL2__GCR_ARB_CTRL_MASK 0x00001C00L
+#define GL2C_CTRL2__GCR_ALL_SET_MASK 0x00002000L
+#define GL2C_CTRL2__FILL_SIZE_64_MASK 0x00020000L
+#define GL2C_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK_MASK 0x00040000L
+#define GL2C_CTRL2__WRITEBACK_ALL_WAIT_FOR_ALL_EA_WRITE_COMPLETE_MASK 0x00080000L
+#define GL2C_CTRL2__METADATA_VOLATILE_EN_MASK 0x00100000L
+#define GL2C_CTRL2__RB_VOLATILE_EN_MASK 0x00200000L
+#define GL2C_CTRL2__PROBE_UNSHARED_EN_MASK 0x00400000L
+#define GL2C_CTRL2__MAX_MIN_CTRL_MASK 0x01800000L
+#define GL2C_CTRL2__MDC_UC_TO_C_RO_EN_MASK 0x04000000L
+//GL2C_STATUS
+#define GL2C_STATUS__NONCACHEABLE_FLOAT_ATOMIC__SHIFT 0x0
+#define GL2C_STATUS__NONCACHEABLE_U8_ATOMIC__SHIFT 0x4
+#define GL2C_STATUS__NONCACHEABLE_CLAMP_SUB_ATOMIC__SHIFT 0x5
+#define GL2C_STATUS__WRRET_NACK_FAULT__SHIFT 0x6
+#define GL2C_STATUS__RDRET_NACK_FAULT__SHIFT 0x7
+#define GL2C_STATUS__METADATA_FED__SHIFT 0x8
+#define GL2C_STATUS__FED_FSM_STATE__SHIFT 0x9
+#define GL2C_STATUS__SAFE_MODE_FED__SHIFT 0xb
+#define GL2C_STATUS__DCC_OUT_INVALID_KEY_ERROR_CODE__SHIFT 0x12
+#define GL2C_STATUS__NONCACHEABLE_FLOAT_ATOMIC_MASK 0x00000001L
+#define GL2C_STATUS__NONCACHEABLE_U8_ATOMIC_MASK 0x00000010L
+#define GL2C_STATUS__NONCACHEABLE_CLAMP_SUB_ATOMIC_MASK 0x00000020L
+#define GL2C_STATUS__WRRET_NACK_FAULT_MASK 0x00000040L
+#define GL2C_STATUS__RDRET_NACK_FAULT_MASK 0x00000080L
+#define GL2C_STATUS__METADATA_FED_MASK 0x00000100L
+#define GL2C_STATUS__FED_FSM_STATE_MASK 0x00000600L
+#define GL2C_STATUS__SAFE_MODE_FED_MASK 0x00000800L
+#define GL2C_STATUS__DCC_OUT_INVALID_KEY_ERROR_CODE_MASK 0x007C0000L
+//GL2C_ADDR_MATCH_MASK
+#define GL2C_ADDR_MATCH_MASK__ADDR_MASK__SHIFT 0x0
+#define GL2C_ADDR_MATCH_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//GL2C_ADDR_MATCH_SIZE
+#define GL2C_ADDR_MATCH_SIZE__MAX_COUNT__SHIFT 0x0
+#define GL2C_ADDR_MATCH_SIZE__MAX_COUNT_MASK 0x00000007L
+//GL2C_WBINVL2
+#define GL2C_WBINVL2__DONE__SHIFT 0x4
+#define GL2C_WBINVL2__DONE_MASK 0x00000010L
+//GL2C_SOFT_RESET
+#define GL2C_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
+#define GL2C_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
+//GL2C_CM_CTRL0
+#define GL2C_CM_CTRL0__HASH_MASK__SHIFT 0x0
+#define GL2C_CM_CTRL0__HASH_MASK_MASK 0xFFFFFFFFL
+//GL2C_CM_CTRL1
+#define GL2C_CM_CTRL1__HASH_MASK__SHIFT 0x0
+#define GL2C_CM_CTRL1__BURST_TIMER__SHIFT 0x8
+#define GL2C_CM_CTRL1__RVF_SIZE__SHIFT 0x10
+#define GL2C_CM_CTRL1__WRITE_COH_MODE__SHIFT 0x17
+#define GL2C_CM_CTRL1__MDC_ARB_MODE__SHIFT 0x19
+#define GL2C_CM_CTRL1__READ_REQ_ONLY__SHIFT 0x1a
+#define GL2C_CM_CTRL1__COMP_TO_CONSTANT_EN__SHIFT 0x1b
+#define GL2C_CM_CTRL1__COMP_TO_SINGLE_EN__SHIFT 0x1c
+#define GL2C_CM_CTRL1__BURST_MODE__SHIFT 0x1d
+#define GL2C_CM_CTRL1__UNCOMP_READBACK_FILTER__SHIFT 0x1e
+#define GL2C_CM_CTRL1__WAIT_ATOMIC_RECOMP_WRITE__SHIFT 0x1f
+#define GL2C_CM_CTRL1__HASH_MASK_MASK 0x0000000FL
+#define GL2C_CM_CTRL1__BURST_TIMER_MASK 0x0000FF00L
+#define GL2C_CM_CTRL1__RVF_SIZE_MASK 0x000F0000L
+#define GL2C_CM_CTRL1__WRITE_COH_MODE_MASK 0x01800000L
+#define GL2C_CM_CTRL1__MDC_ARB_MODE_MASK 0x02000000L
+#define GL2C_CM_CTRL1__READ_REQ_ONLY_MASK 0x04000000L
+#define GL2C_CM_CTRL1__COMP_TO_CONSTANT_EN_MASK 0x08000000L
+#define GL2C_CM_CTRL1__COMP_TO_SINGLE_EN_MASK 0x10000000L
+#define GL2C_CM_CTRL1__BURST_MODE_MASK 0x20000000L
+#define GL2C_CM_CTRL1__UNCOMP_READBACK_FILTER_MASK 0x40000000L
+#define GL2C_CM_CTRL1__WAIT_ATOMIC_RECOMP_WRITE_MASK 0x80000000L
+//GL2C_CM_STALL
+#define GL2C_CM_STALL__QUEUE__SHIFT 0x0
+#define GL2C_CM_STALL__QUEUE_MASK 0xFFFFFFFFL
+//GL2C_CM_CTRL2
+#define GL2C_CM_CTRL2__READ_BURST_TIMER__SHIFT 0x0
+#define GL2C_CM_CTRL2__VRS_DISABLE__SHIFT 0x8
+#define GL2C_CM_CTRL2__SKIP_LOW_COMP_RATIO__SHIFT 0x9
+#define GL2C_CM_CTRL2__CM_NBC_IND64_DISABLE__SHIFT 0xa
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MODE__SHIFT 0xb
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_METADATA_WR_MODE__SHIFT 0xc
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MAX_UNCOMP_BLK_SZ_MODE__SHIFT 0xd
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_SECTOR_READBACK_MODE__SHIFT 0xf
+#define GL2C_CM_CTRL2__RECOMP_DISABLE__SHIFT 0x10
+#define GL2C_CM_CTRL2__DCC_COMP_KEY_ERROR_DETECTION_EN__SHIFT 0x11
+#define GL2C_CM_CTRL2__DCC_CLEAR_FRAG2DCC_KEY_ERROR_CODE__SHIFT 0x12
+#define GL2C_CM_CTRL2__READ_BURST_TIMER_MASK 0x000000FFL
+#define GL2C_CM_CTRL2__VRS_DISABLE_MASK 0x00000100L
+#define GL2C_CM_CTRL2__SKIP_LOW_COMP_RATIO_MASK 0x00000200L
+#define GL2C_CM_CTRL2__CM_NBC_IND64_DISABLE_MASK 0x00000400L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MODE_MASK 0x00000800L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_METADATA_WR_MODE_MASK 0x00001000L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MAX_UNCOMP_BLK_SZ_MODE_MASK 0x00006000L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_SECTOR_READBACK_MODE_MASK 0x00008000L
+#define GL2C_CM_CTRL2__RECOMP_DISABLE_MASK 0x00010000L
+#define GL2C_CM_CTRL2__DCC_COMP_KEY_ERROR_DETECTION_EN_MASK 0x00020000L
+#define GL2C_CM_CTRL2__DCC_CLEAR_FRAG2DCC_KEY_ERROR_CODE_MASK 0x00040000L
+//GL2C_CTRL3
+#define GL2C_CTRL3__METADATA_MTYPE_COHERENCY__SHIFT 0x0
+#define GL2C_CTRL3__METADATA_NOFILL__SHIFT 0x3
+#define GL2C_CTRL3__METADATA_NEXT_CL_PREFETCH__SHIFT 0x4
+#define GL2C_CTRL3__BANK_LINEAR_HASH_MODE__SHIFT 0x5
+#define GL2C_CTRL3__HTILE_TO_HI_PRIORITY__SHIFT 0x6
+#define GL2C_CTRL3__UNCACHED_WRITE_ATOMIC_TO_UC_WRITE__SHIFT 0x7
+#define GL2C_CTRL3__IO_CHANNEL_ENABLE__SHIFT 0x8
+#define GL2C_CTRL3__FMASK_TO_HI_PRIORITY__SHIFT 0x9
+#define GL2C_CTRL3__DCC_CMASK_TO_HI_PRIORITY__SHIFT 0xa
+#define GL2C_CTRL3__BANK_LINEAR_HASH_ENABLE__SHIFT 0xb
+#define GL2C_CTRL3__HASH_256B_ENABLE__SHIFT 0xc
+#define GL2C_CTRL3__DECOMP_NBC_IND64_DISABLE__SHIFT 0xd
+#define GL2C_CTRL3__FORCE_READ_ON_WRITE_OP__SHIFT 0xe
+#define GL2C_CTRL3__FGCG_OVERRIDE__SHIFT 0xf
+#define GL2C_CTRL3__FORCE_MTYPE_UC__SHIFT 0x10
+#define GL2C_CTRL3__DGPU_SHARED_MODE__SHIFT 0x11
+#define GL2C_CTRL3__WRITE_SET_SECTOR_FULLY_WRITTEN__SHIFT 0x12
+#define GL2C_CTRL3__EA_READ_SIZE_LIMIT__SHIFT 0x13
+#define GL2C_CTRL3__READ_BYPASS_AS_UC__SHIFT 0x14
+#define GL2C_CTRL3__WB_OPT_ENABLE__SHIFT 0x15
+#define GL2C_CTRL3__WB_OPT_BURST_MAX_COUNT__SHIFT 0x16
+#define GL2C_CTRL3__SET_GROUP_LINEAR_HASH_ENABLE__SHIFT 0x18
+#define GL2C_CTRL3__EA_GMI_DISABLE__SHIFT 0x19
+#define GL2C_CTRL3__SQC_TO_HI_PRIORITY__SHIFT 0x1a
+#define GL2C_CTRL3__INF_NAN_CLAMP__SHIFT 0x1b
+#define GL2C_CTRL3__SCRATCH__SHIFT 0x1c
+#define GL2C_CTRL3__METADATA_MTYPE_COHERENCY_MASK 0x00000003L
+#define GL2C_CTRL3__METADATA_NOFILL_MASK 0x00000008L
+#define GL2C_CTRL3__METADATA_NEXT_CL_PREFETCH_MASK 0x00000010L
+#define GL2C_CTRL3__BANK_LINEAR_HASH_MODE_MASK 0x00000020L
+#define GL2C_CTRL3__HTILE_TO_HI_PRIORITY_MASK 0x00000040L
+#define GL2C_CTRL3__UNCACHED_WRITE_ATOMIC_TO_UC_WRITE_MASK 0x00000080L
+#define GL2C_CTRL3__IO_CHANNEL_ENABLE_MASK 0x00000100L
+#define GL2C_CTRL3__FMASK_TO_HI_PRIORITY_MASK 0x00000200L
+#define GL2C_CTRL3__DCC_CMASK_TO_HI_PRIORITY_MASK 0x00000400L
+#define GL2C_CTRL3__BANK_LINEAR_HASH_ENABLE_MASK 0x00000800L
+#define GL2C_CTRL3__HASH_256B_ENABLE_MASK 0x00001000L
+#define GL2C_CTRL3__DECOMP_NBC_IND64_DISABLE_MASK 0x00002000L
+#define GL2C_CTRL3__FORCE_READ_ON_WRITE_OP_MASK 0x00004000L
+#define GL2C_CTRL3__FGCG_OVERRIDE_MASK 0x00008000L
+#define GL2C_CTRL3__FORCE_MTYPE_UC_MASK 0x00010000L
+#define GL2C_CTRL3__DGPU_SHARED_MODE_MASK 0x00020000L
+#define GL2C_CTRL3__WRITE_SET_SECTOR_FULLY_WRITTEN_MASK 0x00040000L
+#define GL2C_CTRL3__EA_READ_SIZE_LIMIT_MASK 0x00080000L
+#define GL2C_CTRL3__READ_BYPASS_AS_UC_MASK 0x00100000L
+#define GL2C_CTRL3__WB_OPT_ENABLE_MASK 0x00200000L
+#define GL2C_CTRL3__WB_OPT_BURST_MAX_COUNT_MASK 0x00C00000L
+#define GL2C_CTRL3__SET_GROUP_LINEAR_HASH_ENABLE_MASK 0x01000000L
+#define GL2C_CTRL3__EA_GMI_DISABLE_MASK 0x02000000L
+#define GL2C_CTRL3__SQC_TO_HI_PRIORITY_MASK 0x04000000L
+#define GL2C_CTRL3__INF_NAN_CLAMP_MASK 0x08000000L
+#define GL2C_CTRL3__SCRATCH_MASK 0xF0000000L
+//GL2C_LB_CTR_CTRL
+#define GL2C_LB_CTR_CTRL__START__SHIFT 0x0
+#define GL2C_LB_CTR_CTRL__LOAD__SHIFT 0x1
+#define GL2C_LB_CTR_CTRL__CLEAR__SHIFT 0x2
+#define GL2C_LB_CTR_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x1f
+#define GL2C_LB_CTR_CTRL__START_MASK 0x00000001L
+#define GL2C_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define GL2C_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+#define GL2C_LB_CTR_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x80000000L
+//GL2C_LB_DATA0
+#define GL2C_LB_DATA0__DATA__SHIFT 0x0
+#define GL2C_LB_DATA0__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA1
+#define GL2C_LB_DATA1__DATA__SHIFT 0x0
+#define GL2C_LB_DATA1__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA2
+#define GL2C_LB_DATA2__DATA__SHIFT 0x0
+#define GL2C_LB_DATA2__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA3
+#define GL2C_LB_DATA3__DATA__SHIFT 0x0
+#define GL2C_LB_DATA3__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_CTR_SEL0
+#define GL2C_LB_CTR_SEL0__SEL0__SHIFT 0x0
+#define GL2C_LB_CTR_SEL0__DIV0__SHIFT 0xf
+#define GL2C_LB_CTR_SEL0__SEL1__SHIFT 0x10
+#define GL2C_LB_CTR_SEL0__DIV1__SHIFT 0x1f
+#define GL2C_LB_CTR_SEL0__SEL0_MASK 0x000000FFL
+#define GL2C_LB_CTR_SEL0__DIV0_MASK 0x00008000L
+#define GL2C_LB_CTR_SEL0__SEL1_MASK 0x00FF0000L
+#define GL2C_LB_CTR_SEL0__DIV1_MASK 0x80000000L
+//GL2C_LB_CTR_SEL1
+#define GL2C_LB_CTR_SEL1__SEL2__SHIFT 0x0
+#define GL2C_LB_CTR_SEL1__DIV2__SHIFT 0xf
+#define GL2C_LB_CTR_SEL1__SEL3__SHIFT 0x10
+#define GL2C_LB_CTR_SEL1__DIV3__SHIFT 0x1f
+#define GL2C_LB_CTR_SEL1__SEL2_MASK 0x000000FFL
+#define GL2C_LB_CTR_SEL1__DIV2_MASK 0x00008000L
+#define GL2C_LB_CTR_SEL1__SEL3_MASK 0x00FF0000L
+#define GL2C_LB_CTR_SEL1__DIV3_MASK 0x80000000L
+//GL2C_CTRL4
+#define GL2C_CTRL4__METADATA_WR_OP_CID__SHIFT 0x0
+#define GL2C_CTRL4__SPA_CHANNEL_ENABLE__SHIFT 0x1
+#define GL2C_CTRL4__SRC_FIFO_MDC_LOW_PRIORITY__SHIFT 0x2
+#define GL2C_CTRL4__WRITEBACK_FIFO_STALL_ENABLE__SHIFT 0x3
+#define GL2C_CTRL4__CM_MGCG_MODE__SHIFT 0x4
+#define GL2C_CTRL4__MDC_MGCG_MODE__SHIFT 0x5
+#define GL2C_CTRL4__TAG_MGCG_MODE__SHIFT 0x6
+#define GL2C_CTRL4__CORE_MGCG_MODE__SHIFT 0x7
+#define GL2C_CTRL4__EXECUTE_MGCG_MODE__SHIFT 0x8
+#define GL2C_CTRL4__EA_NACK_DISABLE__SHIFT 0x9
+#define GL2C_CTRL4__FED_SAFE_MODE__SHIFT 0xa
+#define GL2C_CTRL4__FLUSH_SET_COUNTER_MASK_DISABLE__SHIFT 0xb
+#define GL2C_CTRL4__NO_WRITE_ACK_TO_HIT_QUEUE__SHIFT 0x1a
+#define GL2C_CTRL4__METADATA_WR_OP_CID_MASK 0x00000001L
+#define GL2C_CTRL4__SPA_CHANNEL_ENABLE_MASK 0x00000002L
+#define GL2C_CTRL4__SRC_FIFO_MDC_LOW_PRIORITY_MASK 0x00000004L
+#define GL2C_CTRL4__WRITEBACK_FIFO_STALL_ENABLE_MASK 0x00000008L
+#define GL2C_CTRL4__CM_MGCG_MODE_MASK 0x00000010L
+#define GL2C_CTRL4__MDC_MGCG_MODE_MASK 0x00000020L
+#define GL2C_CTRL4__TAG_MGCG_MODE_MASK 0x00000040L
+#define GL2C_CTRL4__CORE_MGCG_MODE_MASK 0x00000080L
+#define GL2C_CTRL4__EXECUTE_MGCG_MODE_MASK 0x00000100L
+#define GL2C_CTRL4__EA_NACK_DISABLE_MASK 0x00000200L
+#define GL2C_CTRL4__FED_SAFE_MODE_MASK 0x00000400L
+#define GL2C_CTRL4__FLUSH_SET_COUNTER_MASK_DISABLE_MASK 0x00000800L
+#define GL2C_CTRL4__NO_WRITE_ACK_TO_HIT_QUEUE_MASK 0x04000000L
+//GL2C_DISCARD_STALL_CTRL
+#define GL2C_DISCARD_STALL_CTRL__LIMIT__SHIFT 0x0
+#define GL2C_DISCARD_STALL_CTRL__WINDOW__SHIFT 0xf
+#define GL2C_DISCARD_STALL_CTRL__DROP_NEXT__SHIFT 0x1e
+#define GL2C_DISCARD_STALL_CTRL__ENABLE__SHIFT 0x1f
+#define GL2C_DISCARD_STALL_CTRL__LIMIT_MASK 0x00007FFFL
+#define GL2C_DISCARD_STALL_CTRL__WINDOW_MASK 0x3FFF8000L
+#define GL2C_DISCARD_STALL_CTRL__DROP_NEXT_MASK 0x40000000L
+#define GL2C_DISCARD_STALL_CTRL__ENABLE_MASK 0x80000000L
+//GL2A_ADDR_MATCH_CTRL
+#define GL2A_ADDR_MATCH_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_ADDR_MATCH_CTRL__DISABLE_MASK 0xFFFFFFFFL
+//GL2A_ADDR_MATCH_MASK
+#define GL2A_ADDR_MATCH_MASK__ADDR_MASK__SHIFT 0x0
+#define GL2A_ADDR_MATCH_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//GL2A_ADDR_MATCH_SIZE
+#define GL2A_ADDR_MATCH_SIZE__MAX_COUNT__SHIFT 0x0
+#define GL2A_ADDR_MATCH_SIZE__MAX_COUNT_MASK 0x00000007L
+//GL2A_PRIORITY_CTRL
+#define GL2A_PRIORITY_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_PRIORITY_CTRL__DISABLE_MASK 0xFFFFFFFFL
+//GL2A_CTRL
+#define GL2A_CTRL__RTN_ARB_TIMER_RESET_VALUE__SHIFT 0x0
+#define GL2A_CTRL__STAY_ON_BURST__SHIFT 0x1
+#define GL2A_CTRL__FGCG_OVERRIDE__SHIFT 0x2
+#define GL2A_CTRL__CLIENT_ARB_PRIO_STAY__SHIFT 0x3
+#define GL2A_CTRL__GCRD_CREDIT_SAFE_REG__SHIFT 0x4
+#define GL2A_CTRL__REQ_CREDIT_SAFE_REG__SHIFT 0x8
+#define GL2A_CTRL__WRITE_COMBINE_TIMEOUT_COUNT__SHIFT 0xc
+#define GL2A_CTRL__INTERNAL_RETURN_BYPASS_ENABLE__SHIFT 0x11
+#define GL2A_CTRL__ADDR_REMOVE_COLBITS__SHIFT 0x12
+#define GL2A_CTRL__RTN_ARB_TIMER_RESET_VALUE_MASK 0x00000001L
+#define GL2A_CTRL__STAY_ON_BURST_MASK 0x00000002L
+#define GL2A_CTRL__FGCG_OVERRIDE_MASK 0x00000004L
+#define GL2A_CTRL__CLIENT_ARB_PRIO_STAY_MASK 0x00000008L
+#define GL2A_CTRL__GCRD_CREDIT_SAFE_REG_MASK 0x000000F0L
+#define GL2A_CTRL__REQ_CREDIT_SAFE_REG_MASK 0x00000F00L
+#define GL2A_CTRL__WRITE_COMBINE_TIMEOUT_COUNT_MASK 0x0001F000L
+#define GL2A_CTRL__INTERNAL_RETURN_BYPASS_ENABLE_MASK 0x00020000L
+#define GL2A_CTRL__ADDR_REMOVE_COLBITS_MASK 0x00040000L
+//GL2A_RESP_THROTTLE_CTRL
+#define GL2A_RESP_THROTTLE_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_GL1__SHIFT 0x10
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_CH__SHIFT 0x18
+#define GL2A_RESP_THROTTLE_CTRL__DISABLE_MASK 0x0000FFFFL
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_GL1_MASK 0x00FF0000L
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_CH_MASK 0xFF000000L
+
+
+// addressBlock: gc_gl1hdec
+//GL1H_ARB_CTRL
+#define GL1H_ARB_CTRL__REQ_FGCG_DISABLE__SHIFT 0x0
+#define GL1H_ARB_CTRL__SRC_FGCG_DISABLE__SHIFT 0x1
+#define GL1H_ARB_CTRL__RET_FGCG_DISABLE__SHIFT 0x2
+#define GL1H_ARB_CTRL__CHICKEN_BITS__SHIFT 0x3
+#define GL1H_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0xb
+#define GL1H_ARB_CTRL__REQ_FGCG_DISABLE_MASK 0x00000001L
+#define GL1H_ARB_CTRL__SRC_FGCG_DISABLE_MASK 0x00000002L
+#define GL1H_ARB_CTRL__RET_FGCG_DISABLE_MASK 0x00000004L
+#define GL1H_ARB_CTRL__CHICKEN_BITS_MASK 0x000007F8L
+#define GL1H_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000800L
+//GL1H_GL1_CREDITS
+#define GL1H_GL1_CREDITS__GL1_REQ_CREDITS__SHIFT 0x0
+#define GL1H_GL1_CREDITS__GL1_REQ_CREDITS_MASK 0x000000FFL
+//GL1H_BURST_MASK
+#define GL1H_BURST_MASK__BURST_ADDR_MASK__SHIFT 0x0
+#define GL1H_BURST_MASK__BURST_ADDR_MASK_MASK 0x000000FFL
+//GL1H_BURST_CTRL
+#define GL1H_BURST_CTRL__MAX_BURST_SIZE__SHIFT 0x0
+#define GL1H_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define GL1H_BURST_CTRL__SPARE_BURST_CTRL_BITS__SHIFT 0x4
+#define GL1H_BURST_CTRL__MAX_BURST_SIZE_MASK 0x00000007L
+#define GL1H_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define GL1H_BURST_CTRL__SPARE_BURST_CTRL_BITS_MASK 0x00000030L
+//GL1H_ARB_STATUS
+#define GL1H_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define GL1H_ARB_STATUS__CLIENT1_ILLEGAL_REQ__SHIFT 0x1
+#define GL1H_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define GL1H_ARB_STATUS__CLIENT1_ILLEGAL_REQ_MASK 0x00000002L
+
+
+// addressBlock: gc_perfddec
+//CPG_PERFCOUNTER1_LO
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER1_HI
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_LO
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_HI
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_LO
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_HI
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_LO
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_HI
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_LO
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_HI
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_LO
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_HI
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_LATENCY_STATS_DATA
+#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_LATENCY_STATS_DATA
+#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPC_LATENCY_STATS_DATA
+#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_LO
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_HI
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_LO
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_HI
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_LO
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_HI
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_LO
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_HI
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_LO
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_HI
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_LO
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_HI
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER0_LO
+#define GE1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER0_HI
+#define GE1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER1_LO
+#define GE1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER1_HI
+#define GE1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER2_LO
+#define GE1_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER2_HI
+#define GE1_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER3_LO
+#define GE1_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER3_HI
+#define GE1_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER0_LO
+#define GE2_DIST_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER0_HI
+#define GE2_DIST_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER1_LO
+#define GE2_DIST_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER1_HI
+#define GE2_DIST_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER2_LO
+#define GE2_DIST_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER2_HI
+#define GE2_DIST_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER3_LO
+#define GE2_DIST_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER3_HI
+#define GE2_DIST_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER0_LO
+#define GE2_SE_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER0_HI
+#define GE2_SE_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER1_LO
+#define GE2_SE_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER1_HI
+#define GE2_SE_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER2_LO
+#define GE2_SE_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER2_HI
+#define GE2_SE_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER3_LO
+#define GE2_SE_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER3_HI
+#define GE2_SE_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_LO
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_HI
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_LO
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_HI
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_LO
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_HI
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_LO
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_HI
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_LO
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_HI
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_LO
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_HI
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_LO
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_HI
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_LO
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_HI
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_LO
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_HI
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_LO
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_HI
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_LO
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_HI
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_LO
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_HI
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_HI
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_LO
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_HI
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_LO
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_HI
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_LO
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_HI
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_LO
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_HI
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_LO
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_HI
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_LO
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER0_HI
+#define PC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER0_LO
+#define PC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER1_HI
+#define PC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER1_LO
+#define PC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER2_HI
+#define PC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER2_LO
+#define PC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER3_HI
+#define PC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER3_LO
+#define PC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_LO
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_LO
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_LO
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_LO
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_LO
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_LO
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_LO
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_LO
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER0_LO
+#define SQG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER0_HI
+#define SQG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER1_LO
+#define SQG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER1_HI
+#define SQG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER2_LO
+#define SQG_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER2_HI
+#define SQG_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER3_LO
+#define SQG_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER3_HI
+#define SQG_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER4_LO
+#define SQG_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER4_HI
+#define SQG_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER5_LO
+#define SQG_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER5_HI
+#define SQG_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER6_LO
+#define SQG_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER6_HI
+#define SQG_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER7_LO
+#define SQG_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER7_HI
+#define SQG_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_LO
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_HI
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_LO
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_HI
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_LO
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_HI
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_LO
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_HI
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_LO
+#define GCEA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_HI
+#define GCEA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_LO
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_HI
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GDS_PERFCOUNTER0_LO
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_HI
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_LO
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_HI
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_LO
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_HI
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_LO
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_HI
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_LO
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_HI
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_LO
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_HI
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_LO
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_HI
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_LO
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_HI
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_LO
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_HI
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_LO
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_HI
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_LO
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_HI
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_LO
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_HI
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER_FILTER
+#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xd
+#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0x11
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x16
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x18
+#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1b
+#define TCP_PERFCOUNTER_FILTER__DLC__SHIFT 0x1c
+#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x1d
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1e
+#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x00000FE0L
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x0001E000L
+#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x003E0000L
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00C00000L
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x07000000L
+#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x08000000L
+#define TCP_PERFCOUNTER_FILTER__DLC_MASK 0x10000000L
+#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x20000000L
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x40000000L
+//TCP_PERFCOUNTER_FILTER2
+#define TCP_PERFCOUNTER_FILTER2__REQ_MODE__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER2__REQ_MODE_MASK 0x00000007L
+//TCP_PERFCOUNTER_FILTER_EN
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
+#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x8
+#define TCP_PERFCOUNTER_FILTER_EN__DLC__SHIFT 0x9
+#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0xa
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER_EN__REQ_MODE__SHIFT 0xc
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
+#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000100L
+#define TCP_PERFCOUNTER_FILTER_EN__DLC_MASK 0x00000200L
+#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000400L
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000800L
+#define TCP_PERFCOUNTER_FILTER_EN__REQ_MODE_MASK 0x00001000L
+//GL2C_PERFCOUNTER0_LO
+#define GL2C_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER0_HI
+#define GL2C_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER1_LO
+#define GL2C_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER1_HI
+#define GL2C_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER2_LO
+#define GL2C_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER2_HI
+#define GL2C_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER3_LO
+#define GL2C_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER3_HI
+#define GL2C_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER0_LO
+#define GL2A_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER0_HI
+#define GL2A_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER1_LO
+#define GL2A_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER1_HI
+#define GL2A_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER2_LO
+#define GL2A_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER2_HI
+#define GL2A_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER3_LO
+#define GL2A_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER3_HI
+#define GL2A_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER0_LO
+#define GL1C_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER0_HI
+#define GL1C_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER1_LO
+#define GL1C_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER1_HI
+#define GL1C_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER2_LO
+#define GL1C_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER2_HI
+#define GL1C_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER3_LO
+#define GL1C_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER3_HI
+#define GL1C_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER0_LO
+#define CHC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER0_HI
+#define CHC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER1_LO
+#define CHC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER1_HI
+#define CHC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER2_LO
+#define CHC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER2_HI
+#define CHC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER3_LO
+#define CHC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER3_HI
+#define CHC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER0_LO
+#define CHCG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER0_HI
+#define CHCG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER1_LO
+#define CHCG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER1_HI
+#define CHCG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER2_LO
+#define CHCG_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER2_HI
+#define CHCG_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER3_LO
+#define CHCG_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER3_HI
+#define CHCG_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_LO
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_HI
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_LO
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_HI
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_LO
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_HI
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_LO
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_HI
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_LO
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_HI
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_LO
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_HI
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_LO
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_HI
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_LO
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_HI
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_LO
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_HI
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_LO
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_HI
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_LO
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_HI
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_LO
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_HI
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_LO
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_HI
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_LO
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_HI
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER0_LO
+#define GCR_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCR_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER0_HI
+#define GCR_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCR_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER1_LO
+#define GCR_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCR_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER1_HI
+#define GCR_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCR_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER0_LO
+#define PA_PH_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER0_HI
+#define PA_PH_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER1_LO
+#define PA_PH_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER1_HI
+#define PA_PH_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER2_LO
+#define PA_PH_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER2_HI
+#define PA_PH_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER3_LO
+#define PA_PH_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER3_HI
+#define PA_PH_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER4_LO
+#define PA_PH_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER4_HI
+#define PA_PH_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER5_LO
+#define PA_PH_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER5_HI
+#define PA_PH_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER6_LO
+#define PA_PH_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER6_HI
+#define PA_PH_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER7_LO
+#define PA_PH_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER7_HI
+#define PA_PH_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER0_LO
+#define UTCL1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER0_HI
+#define UTCL1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER1_LO
+#define UTCL1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER1_HI
+#define UTCL1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER2_LO
+#define UTCL1_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER2_HI
+#define UTCL1_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER3_LO
+#define UTCL1_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER3_HI
+#define UTCL1_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER0_LO
+#define GL1A_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER0_HI
+#define GL1A_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER1_LO
+#define GL1A_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER1_HI
+#define GL1A_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER2_LO
+#define GL1A_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER2_HI
+#define GL1A_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER3_LO
+#define GL1A_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER3_HI
+#define GL1A_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER0_LO
+#define GL1H_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER0_HI
+#define GL1H_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER1_LO
+#define GL1H_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER1_HI
+#define GL1H_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER2_LO
+#define GL1H_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER2_HI
+#define GL1H_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER3_LO
+#define GL1H_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER3_HI
+#define GL1H_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER0_LO
+#define CHA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER0_HI
+#define CHA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER1_LO
+#define CHA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER1_HI
+#define CHA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER2_LO
+#define CHA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER2_HI
+#define CHA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER3_LO
+#define CHA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER3_HI
+#define CHA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER2_LO
+#define GUS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GUS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER2_HI
+#define GUS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GUS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER_LO
+#define GUS_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GUS_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER_HI
+#define GUS_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GUS_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GUS_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GUS_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_perfsdec
+//CPG_PERFCOUNTER1_SELECT
+#define CPG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT1
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER1_SELECT
+#define CPC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPC_PERFCOUNTER0_SELECT1
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER1_SELECT
+#define CPF_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPF_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT1
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CP_PERFMON_CNTL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//CPC_PERFCOUNTER0_SELECT
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPG_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPF_LATENCY_STATS_SELECT
+#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
+#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPG_LATENCY_STATS_SELECT
+#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_LATENCY_STATS_SELECT
+#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CP_DRAW_OBJECT
+#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
+#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
+//CP_DRAW_OBJECT_COUNTER
+#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
+#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
+//CP_DRAW_WINDOW_MASK_HI
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_HI
+#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_LO
+#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
+#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
+#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
+#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
+//CP_DRAW_WINDOW_CNTL
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
+#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
+#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
+//GRBM_PERFCOUNTER0_SELECT
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER0_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER0_SELECT__GE_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__GE_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_PERFCOUNTER1_SELECT
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER1_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER1_SELECT__GE_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__GE_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_SE0_PERFCOUNTER_SELECT
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE0_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE0_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE0_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE0_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE1_PERFCOUNTER_SELECT
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE1_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE1_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE1_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE1_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE2_PERFCOUNTER_SELECT
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE2_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE2_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE2_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE2_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE3_PERFCOUNTER_SELECT
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE3_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE3_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE3_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE3_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_PERFCOUNTER0_SELECT_HI
+#define GRBM_PERFCOUNTER0_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x1
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK__SHIFT 0x2
+#define GRBM_PERFCOUNTER0_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK__SHIFT 0x3
+#define GRBM_PERFCOUNTER0_SELECT_HI__CH_BUSY_USER_DEFINED_MASK__SHIFT 0x4
+#define GRBM_PERFCOUNTER0_SELECT_HI__PH_BUSY_USER_DEFINED_MASK__SHIFT 0x5
+#define GRBM_PERFCOUNTER0_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK__SHIFT 0x6
+#define GRBM_PERFCOUNTER0_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK__SHIFT 0x7
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x8
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x9
+#define GRBM_PERFCOUNTER0_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00000002L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK_MASK 0x00000004L
+#define GRBM_PERFCOUNTER0_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK_MASK 0x00000008L
+#define GRBM_PERFCOUNTER0_SELECT_HI__CH_BUSY_USER_DEFINED_MASK_MASK 0x00000010L
+#define GRBM_PERFCOUNTER0_SELECT_HI__PH_BUSY_USER_DEFINED_MASK_MASK 0x00000020L
+#define GRBM_PERFCOUNTER0_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK_MASK 0x00000040L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK_MASK 0x00000080L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x00000100L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x00000200L
+//GRBM_PERFCOUNTER1_SELECT_HI
+#define GRBM_PERFCOUNTER1_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x1
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK__SHIFT 0x2
+#define GRBM_PERFCOUNTER1_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK__SHIFT 0x3
+#define GRBM_PERFCOUNTER1_SELECT_HI__CH_BUSY_USER_DEFINED_MASK__SHIFT 0x4
+#define GRBM_PERFCOUNTER1_SELECT_HI__PH_BUSY_USER_DEFINED_MASK__SHIFT 0x5
+#define GRBM_PERFCOUNTER1_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK__SHIFT 0x6
+#define GRBM_PERFCOUNTER1_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK__SHIFT 0x7
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x8
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x9
+#define GRBM_PERFCOUNTER1_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00000002L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK_MASK 0x00000004L
+#define GRBM_PERFCOUNTER1_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK_MASK 0x00000008L
+#define GRBM_PERFCOUNTER1_SELECT_HI__CH_BUSY_USER_DEFINED_MASK_MASK 0x00000010L
+#define GRBM_PERFCOUNTER1_SELECT_HI__PH_BUSY_USER_DEFINED_MASK_MASK 0x00000020L
+#define GRBM_PERFCOUNTER1_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK_MASK 0x00000040L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK_MASK 0x00000080L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x00000100L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x00000200L
+//GE1_PERFCOUNTER0_SELECT
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER0_SELECT1
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER1_SELECT
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER1_SELECT1
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER2_SELECT
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER2_SELECT1
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER3_SELECT
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER3_SELECT1
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER0_SELECT
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER0_SELECT1
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER1_SELECT
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER1_SELECT1
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER2_SELECT
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER2_SELECT1
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER3_SELECT
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER3_SELECT1
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER0_SELECT
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER0_SELECT1
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER1_SELECT
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER1_SELECT1
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER2_SELECT
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER2_SELECT1
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER3_SELECT
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER3_SELECT1
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT1
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT1
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT1
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT1
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT1
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER1_SELECT
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER2_SELECT
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER3_SELECT
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER4_SELECT
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER5_SELECT
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER6_SELECT
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER7_SELECT
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER0_SELECT
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER0_SELECT1
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT1
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT1
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT1
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER4_SELECT
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER5_SELECT
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER_BINS
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
+//PC_PERFCOUNTER0_SELECT
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER1_SELECT
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER2_SELECT
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER3_SELECT
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER0_SELECT1
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER1_SELECT1
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER2_SELECT1
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER3_SELECT1
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SQ_PERFCOUNTER0_SELECT
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER1_SELECT
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER2_SELECT
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER3_SELECT
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER4_SELECT
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER5_SELECT
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER6_SELECT
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER7_SELECT
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER8_SELECT
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER9_SELECT
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER10_SELECT
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER11_SELECT
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER12_SELECT
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER13_SELECT
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER14_SELECT
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER15_SELECT
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER0_SELECT
+#define SQG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER1_SELECT
+#define SQG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER2_SELECT
+#define SQG_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER3_SELECT
+#define SQG_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER4_SELECT
+#define SQG_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER5_SELECT
+#define SQG_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER6_SELECT
+#define SQG_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER7_SELECT
+#define SQG_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER_CTRL
+#define SQG_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQG_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQG_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQG_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF__SHIFT 0xe
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF__SHIFT 0xf
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF__SHIFT 0x10
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF__SHIFT 0x11
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF__SHIFT 0x12
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF__SHIFT 0x13
+#define SQG_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQG_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQG_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQG_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF_MASK 0x00004000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF_MASK 0x00008000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF_MASK 0x00010000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF_MASK 0x00020000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF_MASK 0x00040000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF_MASK 0x00080000L
+//SQG_PERFCOUNTER_CTRL2
+#define SQG_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQG_PERFCOUNTER_CTRL2__VMID_EN__SHIFT 0x1
+#define SQG_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQG_PERFCOUNTER_CTRL2__VMID_EN_MASK 0x0001FFFEL
+//SQG_PERF_SAMPLE_FINISH
+#define SQG_PERF_SAMPLE_FINISH__STATUS__SHIFT 0x0
+#define SQG_PERF_SAMPLE_FINISH__STATUS_MASK 0x0000007FL
+//SQ_PERFCOUNTER_CTRL
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF__SHIFT 0xe
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF__SHIFT 0xf
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF__SHIFT 0x10
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF__SHIFT 0x11
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF__SHIFT 0x12
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF__SHIFT 0x13
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF_MASK 0x00004000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF_MASK 0x00008000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF_MASK 0x00010000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF_MASK 0x00020000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF_MASK 0x00040000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF_MASK 0x00080000L
+//SQ_PERFCOUNTER_CTRL2
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL2__VMID_EN__SHIFT 0x1
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL2__VMID_EN_MASK 0x0001FFFEL
+//SQ_THREAD_TRACE_BUF0_BASE
+#define SQ_THREAD_TRACE_BUF0_BASE__BASE_LO__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF0_BASE__BASE_LO_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BUF0_SIZE
+#define SQ_THREAD_TRACE_BUF0_SIZE__BASE_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF0_SIZE__SIZE__SHIFT 0x8
+#define SQ_THREAD_TRACE_BUF0_SIZE__BASE_HI_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_BUF0_SIZE__SIZE_MASK 0x3FFFFF00L
+//SQ_THREAD_TRACE_BUF1_BASE
+#define SQ_THREAD_TRACE_BUF1_BASE__BASE_LO__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF1_BASE__BASE_LO_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BUF1_SIZE
+#define SQ_THREAD_TRACE_BUF1_SIZE__BASE_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF1_SIZE__SIZE__SHIFT 0x8
+#define SQ_THREAD_TRACE_BUF1_SIZE__BASE_HI_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_BUF1_SIZE__SIZE_MASK 0x3FFFFF00L
+//SQ_THREAD_TRACE_CTRL
+#define SQ_THREAD_TRACE_CTRL__MODE__SHIFT 0x0
+#define SQ_THREAD_TRACE_CTRL__ALL_VMID__SHIFT 0x2
+#define SQ_THREAD_TRACE_CTRL__GL1_PERF_EN__SHIFT 0x3
+#define SQ_THREAD_TRACE_CTRL__INTERRUPT_EN__SHIFT 0x4
+#define SQ_THREAD_TRACE_CTRL__DOUBLE_BUFFER__SHIFT 0x5
+#define SQ_THREAD_TRACE_CTRL__HIWATER__SHIFT 0x6
+#define SQ_THREAD_TRACE_CTRL__REG_AT_HWM__SHIFT 0x9
+#define SQ_THREAD_TRACE_CTRL__SPI_STALL_EN__SHIFT 0xb
+#define SQ_THREAD_TRACE_CTRL__SQ_STALL_EN__SHIFT 0xc
+#define SQ_THREAD_TRACE_CTRL__UTIL_TIMER__SHIFT 0xd
+#define SQ_THREAD_TRACE_CTRL__WAVESTART_MODE__SHIFT 0xe
+#define SQ_THREAD_TRACE_CTRL__RT_FREQ__SHIFT 0x10
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_MARKERS__SHIFT 0x12
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_DRAWS__SHIFT 0x13
+#define SQ_THREAD_TRACE_CTRL__LOWATER_OFFSET__SHIFT 0x14
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_PADDING_DIS__SHIFT 0x1c
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_MODE__SHIFT 0x1d
+#define SQ_THREAD_TRACE_CTRL__DRAW_EVENT_EN__SHIFT 0x1f
+#define SQ_THREAD_TRACE_CTRL__MODE_MASK 0x00000003L
+#define SQ_THREAD_TRACE_CTRL__ALL_VMID_MASK 0x00000004L
+#define SQ_THREAD_TRACE_CTRL__GL1_PERF_EN_MASK 0x00000008L
+#define SQ_THREAD_TRACE_CTRL__INTERRUPT_EN_MASK 0x00000010L
+#define SQ_THREAD_TRACE_CTRL__DOUBLE_BUFFER_MASK 0x00000020L
+#define SQ_THREAD_TRACE_CTRL__HIWATER_MASK 0x000001C0L
+#define SQ_THREAD_TRACE_CTRL__REG_AT_HWM_MASK 0x00000600L
+#define SQ_THREAD_TRACE_CTRL__SPI_STALL_EN_MASK 0x00000800L
+#define SQ_THREAD_TRACE_CTRL__SQ_STALL_EN_MASK 0x00001000L
+#define SQ_THREAD_TRACE_CTRL__UTIL_TIMER_MASK 0x00002000L
+#define SQ_THREAD_TRACE_CTRL__WAVESTART_MODE_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_CTRL__RT_FREQ_MASK 0x00030000L
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_MARKERS_MASK 0x00040000L
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_DRAWS_MASK 0x00080000L
+#define SQ_THREAD_TRACE_CTRL__LOWATER_OFFSET_MASK 0x00700000L
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_PADDING_DIS_MASK 0x10000000L
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_CTRL__DRAW_EVENT_EN_MASK 0x80000000L
+//SQ_THREAD_TRACE_MASK
+#define SQ_THREAD_TRACE_MASK__SIMD_SEL__SHIFT 0x0
+#define SQ_THREAD_TRACE_MASK__WGP_SEL__SHIFT 0x4
+#define SQ_THREAD_TRACE_MASK__SA_SEL__SHIFT 0x9
+#define SQ_THREAD_TRACE_MASK__WTYPE_INCLUDE__SHIFT 0xa
+#define SQ_THREAD_TRACE_MASK__EXCLUDE_NONDETAIL_SHADERDATA__SHIFT 0x11
+#define SQ_THREAD_TRACE_MASK__SIMD_SEL_MASK 0x00000003L
+#define SQ_THREAD_TRACE_MASK__WGP_SEL_MASK 0x000000F0L
+#define SQ_THREAD_TRACE_MASK__SA_SEL_MASK 0x00000200L
+#define SQ_THREAD_TRACE_MASK__WTYPE_INCLUDE_MASK 0x0001FC00L
+#define SQ_THREAD_TRACE_MASK__EXCLUDE_NONDETAIL_SHADERDATA_MASK 0x00020000L
+//SQ_THREAD_TRACE_TOKEN_MASK
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_EXCLUDE__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK__TTRACE_EXEC__SHIFT 0xb
+#define SQ_THREAD_TRACE_TOKEN_MASK__BOP_EVENTS_TOKEN_INCLUDE__SHIFT 0xc
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_INCLUDE__SHIFT 0x10
+#define SQ_THREAD_TRACE_TOKEN_MASK__INST_EXCLUDE__SHIFT 0x18
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_EXCLUDE__SHIFT 0x1a
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DETAIL_ALL__SHIFT 0x1f
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_EXCLUDE_MASK 0x000007FFL
+#define SQ_THREAD_TRACE_TOKEN_MASK__TTRACE_EXEC_MASK 0x00000800L
+#define SQ_THREAD_TRACE_TOKEN_MASK__BOP_EVENTS_TOKEN_INCLUDE_MASK 0x00001000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_INCLUDE_MASK 0x00FF0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__INST_EXCLUDE_MASK 0x03000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_EXCLUDE_MASK 0x1C000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DETAIL_ALL_MASK 0x80000000L
+//SQ_THREAD_TRACE_WPTR
+#define SQ_THREAD_TRACE_WPTR__OFFSET__SHIFT 0x0
+#define SQ_THREAD_TRACE_WPTR__BUFFER_ID__SHIFT 0x1f
+#define SQ_THREAD_TRACE_WPTR__OFFSET_MASK 0x1FFFFFFFL
+#define SQ_THREAD_TRACE_WPTR__BUFFER_ID_MASK 0x80000000L
+//SQ_THREAD_TRACE_STATUS
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0xc
+#define SQ_THREAD_TRACE_STATUS__WRITE_ERROR__SHIFT 0x18
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x19
+#define SQ_THREAD_TRACE_STATUS__OWNER_VMID__SHIFT 0x1c
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x00000FFFL
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x00FFF000L
+#define SQ_THREAD_TRACE_STATUS__WRITE_ERROR_MASK 0x01000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x02000000L
+#define SQ_THREAD_TRACE_STATUS__OWNER_VMID_MASK 0xF0000000L
+//SQ_THREAD_TRACE_STATUS2
+#define SQ_THREAD_TRACE_STATUS2__BUF0_FULL__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS2__BUF1_FULL__SHIFT 0x1
+#define SQ_THREAD_TRACE_STATUS2__PACKET_LOST_BUF_NO_LOCKDOWN__SHIFT 0x4
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_STATUS__SHIFT 0x8
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE__SHIFT 0xd
+#define SQ_THREAD_TRACE_STATUS2__WRITE_BUF_FULL__SHIFT 0xe
+#define SQ_THREAD_TRACE_STATUS2__BUF0_FULL_MASK 0x00000001L
+#define SQ_THREAD_TRACE_STATUS2__BUF1_FULL_MASK 0x00000002L
+#define SQ_THREAD_TRACE_STATUS2__PACKET_LOST_BUF_NO_LOCKDOWN_MASK 0x00000010L
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_STATUS_MASK 0x00001F00L
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_MASK 0x00002000L
+#define SQ_THREAD_TRACE_STATUS2__WRITE_BUF_FULL_MASK 0x00004000L
+//SQ_THREAD_TRACE_GFX_DRAW_CNTR
+#define SQ_THREAD_TRACE_GFX_DRAW_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_GFX_DRAW_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_GFX_MARKER_CNTR
+#define SQ_THREAD_TRACE_GFX_MARKER_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_GFX_MARKER_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_HP3D_DRAW_CNTR
+#define SQ_THREAD_TRACE_HP3D_DRAW_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_HP3D_DRAW_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_HP3D_MARKER_CNTR
+#define SQ_THREAD_TRACE_HP3D_MARKER_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_HP3D_MARKER_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_DROPPED_CNTR
+#define SQ_THREAD_TRACE_DROPPED_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_DROPPED_CNTR__CNTR_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_SELECT
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCEA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCEA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCEA_PERFCOUNTER2_SELECT1
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCEA_PERFCOUNTER2_MODE
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GCEA_PERFCOUNTER0_CFG
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER1_CFG
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER_RSLT_CNTL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SX_PERFCOUNTER0_SELECT
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER2_SELECT
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER3_SELECT
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER0_SELECT1
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT1
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT1
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT1
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT1
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT1
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT1
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER1_SELECT
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT1
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TD_PERFCOUNTER1_SELECT
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT1
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT1
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER2_SELECT
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER3_SELECT
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER0_SELECT
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2C_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER0_SELECT1
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2C_PERFCOUNTER1_SELECT
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2C_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER1_SELECT1
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2C_PERFCOUNTER2_SELECT
+#define GL2C_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER3_SELECT
+#define GL2C_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER0_SELECT
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2A_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER0_SELECT1
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2A_PERFCOUNTER1_SELECT
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2A_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER1_SELECT1
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2A_PERFCOUNTER2_SELECT
+#define GL2A_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER3_SELECT
+#define GL2A_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER0_SELECT
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1C_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1C_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER0_SELECT1
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1C_PERFCOUNTER1_SELECT
+#define GL1C_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER2_SELECT
+#define GL1C_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER3_SELECT
+#define GL1C_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER0_SELECT
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER0_SELECT1
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHC_PERFCOUNTER1_SELECT
+#define CHC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER2_SELECT
+#define CHC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER3_SELECT
+#define CHC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER0_SELECT
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHCG_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHCG_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER0_SELECT1
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHCG_PERFCOUNTER1_SELECT
+#define CHCG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER2_SELECT
+#define CHCG_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER3_SELECT
+#define CHCG_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER_FILTER
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
+//CB_PERFCOUNTER0_SELECT
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER0_SELECT1
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//CB_PERFCOUNTER1_SELECT
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER2_SELECT
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER3_SELECT
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT1
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT1
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER2_SELECT
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER3_SELECT
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RLC_SPM_PERFMON_CNTL
+#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x0
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
+#define RLC_SPM_PERFMON_CNTL__DISABLE_GFXCLOCK_COUNT__SHIFT 0xe
+#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xf
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFFL
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
+#define RLC_SPM_PERFMON_CNTL__DISABLE_GFXCLOCK_COUNT_MASK 0x00004000L
+#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x00008000L
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_BASE_LO
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_RING_BASE_HI
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_SIZE
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
+//RLC_SPM_RING_WRPTR
+#define RLC_SPM_RING_WRPTR__RESERVED__SHIFT 0x0
+#define RLC_SPM_RING_WRPTR__PERFMON_RING_WRPTR__SHIFT 0x5
+#define RLC_SPM_RING_WRPTR__RESERVED_MASK 0x0000001FL
+#define RLC_SPM_RING_WRPTR__PERFMON_RING_WRPTR_MASK 0xFFFFFFE0L
+//RLC_SPM_RING_RDPTR
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
+//RLC_SPM_SEGMENT_THRESHOLD
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
+#define RLC_SPM_SEGMENT_THRESHOLD__RESERVED__SHIFT 0x8
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0x000000FFL
+#define RLC_SPM_SEGMENT_THRESHOLD__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PERFMON_SEGMENT_SIZE
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__TOTAL_NUM_SEGMENT__SHIFT 0x0
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_SEGMENT__SHIFT 0x10
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE_NUM_SEGMENT__SHIFT 0x18
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__TOTAL_NUM_SEGMENT_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_SEGMENT_MASK 0x00FF0000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE_NUM_SEGMENT_MASK 0xFF000000L
+//RLC_SPM_GLOBAL_MUXSEL_ADDR
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_GLOBAL_MUXSEL_DATA
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL0__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL1__SHIFT 0x10
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL0_MASK 0x0000FFFFL
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL1_MASK 0xFFFF0000L
+//RLC_SPM_SE_MUXSEL_ADDR
+#define RLC_SPM_SE_MUXSEL_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_SE_MUXSEL_DATA
+#define RLC_SPM_SE_MUXSEL_DATA__SEL0__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_DATA__SEL1__SHIFT 0x10
+#define RLC_SPM_SE_MUXSEL_DATA__SEL0_MASK 0x0000FFFFL
+#define RLC_SPM_SE_MUXSEL_DATA__SEL1_MASK 0xFFFF0000L
+//RLC_SPM_ACCUM_DATARAM_ADDR
+#define RLC_SPM_ACCUM_DATARAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_ACCUM_DATARAM_ADDR__addr_MASK 0x0000007FL
+#define RLC_SPM_ACCUM_DATARAM_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_ACCUM_DATARAM_DATA
+#define RLC_SPM_ACCUM_DATARAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_DATA__data_MASK 0xFFFFFFFFL
+//RLC_SPM_ACCUM_SWA_DATARAM_ADDR
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__addr_MASK 0x0000007FL
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_ACCUM_SWA_DATARAM_DATA
+#define RLC_SPM_ACCUM_SWA_DATARAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_SWA_DATARAM_DATA__data_MASK 0xFFFFFFFFL
+//RLC_SPM_ACCUM_CTRLRAM_ADDR
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__RESERVED__SHIFT 0xb
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__addr_MASK 0x000007FFL
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__RESERVED_MASK 0xFFFFF800L
+//RLC_SPM_ACCUM_CTRLRAM_DATA
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__RESERVED__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__data_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__global_offset__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_se_offset__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_global_offset__SHIFT 0x10
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__RESERVED__SHIFT 0x18
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__global_offset_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_se_offset_MASK 0x0000FF00L
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_global_offset_MASK 0x00FF0000L
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__RESERVED_MASK 0xFF000000L
+//RLC_SPM_ACCUM_STATUS
+#define RLC_SPM_ACCUM_STATUS__NumbSamplesCompleted__SHIFT 0x0
+#define RLC_SPM_ACCUM_STATUS__AccumDone__SHIFT 0x8
+#define RLC_SPM_ACCUM_STATUS__SpmDone__SHIFT 0x9
+#define RLC_SPM_ACCUM_STATUS__AccumOverflow__SHIFT 0xa
+#define RLC_SPM_ACCUM_STATUS__AccumArmed__SHIFT 0xb
+#define RLC_SPM_ACCUM_STATUS__SequenceInProgress__SHIFT 0xc
+#define RLC_SPM_ACCUM_STATUS__FinalSequenceInProgress__SHIFT 0xd
+#define RLC_SPM_ACCUM_STATUS__AllFifosEmpty__SHIFT 0xe
+#define RLC_SPM_ACCUM_STATUS__FSMIsIdle__SHIFT 0xf
+#define RLC_SPM_ACCUM_STATUS__SwaAccumDone__SHIFT 0x10
+#define RLC_SPM_ACCUM_STATUS__SwaSpmDone__SHIFT 0x11
+#define RLC_SPM_ACCUM_STATUS__SwaAccumOverflow__SHIFT 0x12
+#define RLC_SPM_ACCUM_STATUS__SwaAccumArmed__SHIFT 0x13
+#define RLC_SPM_ACCUM_STATUS__AllSegsDone__SHIFT 0x14
+#define RLC_SPM_ACCUM_STATUS__RearmSwaPending__SHIFT 0x15
+#define RLC_SPM_ACCUM_STATUS__RearmSppPending__SHIFT 0x16
+#define RLC_SPM_ACCUM_STATUS__MultiSampleAborted__SHIFT 0x17
+#define RLC_SPM_ACCUM_STATUS__RESERVED__SHIFT 0x18
+#define RLC_SPM_ACCUM_STATUS__NumbSamplesCompleted_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_STATUS__AccumDone_MASK 0x00000100L
+#define RLC_SPM_ACCUM_STATUS__SpmDone_MASK 0x00000200L
+#define RLC_SPM_ACCUM_STATUS__AccumOverflow_MASK 0x00000400L
+#define RLC_SPM_ACCUM_STATUS__AccumArmed_MASK 0x00000800L
+#define RLC_SPM_ACCUM_STATUS__SequenceInProgress_MASK 0x00001000L
+#define RLC_SPM_ACCUM_STATUS__FinalSequenceInProgress_MASK 0x00002000L
+#define RLC_SPM_ACCUM_STATUS__AllFifosEmpty_MASK 0x00004000L
+#define RLC_SPM_ACCUM_STATUS__FSMIsIdle_MASK 0x00008000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumDone_MASK 0x00010000L
+#define RLC_SPM_ACCUM_STATUS__SwaSpmDone_MASK 0x00020000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumOverflow_MASK 0x00040000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumArmed_MASK 0x00080000L
+#define RLC_SPM_ACCUM_STATUS__AllSegsDone_MASK 0x00100000L
+#define RLC_SPM_ACCUM_STATUS__RearmSwaPending_MASK 0x00200000L
+#define RLC_SPM_ACCUM_STATUS__RearmSppPending_MASK 0x00400000L
+#define RLC_SPM_ACCUM_STATUS__MultiSampleAborted_MASK 0x00800000L
+#define RLC_SPM_ACCUM_STATUS__RESERVED_MASK 0xFF000000L
+//RLC_SPM_ACCUM_CTRL
+#define RLC_SPM_ACCUM_CTRL__StrobeResetPerfMonitors__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRL__StrobeStartAccumulation__SHIFT 0x1
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmAccum__SHIFT 0x2
+#define RLC_SPM_ACCUM_CTRL__StrobeResetSpmBlock__SHIFT 0x3
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSpm__SHIFT 0x4
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmSwaAccum__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSwa__SHIFT 0x9
+#define RLC_SPM_ACCUM_CTRL__StrobePerfmonSampleWires__SHIFT 0xa
+#define RLC_SPM_ACCUM_CTRL__RESERVED__SHIFT 0xb
+#define RLC_SPM_ACCUM_CTRL__StrobeResetPerfMonitors_MASK 0x00000001L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartAccumulation_MASK 0x00000002L
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmAccum_MASK 0x00000004L
+#define RLC_SPM_ACCUM_CTRL__StrobeResetSpmBlock_MASK 0x00000008L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSpm_MASK 0x000000F0L
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmSwaAccum_MASK 0x00000100L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSwa_MASK 0x00000200L
+#define RLC_SPM_ACCUM_CTRL__StrobePerfmonSampleWires_MASK 0x00000400L
+#define RLC_SPM_ACCUM_CTRL__RESERVED_MASK 0xFFFFF800L
+//RLC_SPM_ACCUM_MODE
+#define RLC_SPM_ACCUM_MODE__EnableAccum__SHIFT 0x0
+#define RLC_SPM_ACCUM_MODE__EnableSpmWithAccumMode__SHIFT 0x1
+#define RLC_SPM_ACCUM_MODE__EnableSPPMode__SHIFT 0x2
+#define RLC_SPM_ACCUM_MODE__AutoResetPerfmonDisable__SHIFT 0x3
+#define RLC_SPM_ACCUM_MODE__AutoAccumEn__SHIFT 0x5
+#define RLC_SPM_ACCUM_MODE__SwaAutoAccumEn__SHIFT 0x6
+#define RLC_SPM_ACCUM_MODE__AutoSpmEn__SHIFT 0x7
+#define RLC_SPM_ACCUM_MODE__SwaAutoSpmEn__SHIFT 0x8
+#define RLC_SPM_ACCUM_MODE__Globals_LoadOverride__SHIFT 0x9
+#define RLC_SPM_ACCUM_MODE__Globals_SwaLoadOverride__SHIFT 0xa
+#define RLC_SPM_ACCUM_MODE__SE0_LoadOverride__SHIFT 0xb
+#define RLC_SPM_ACCUM_MODE__SE0_SwaLoadOverride__SHIFT 0xc
+#define RLC_SPM_ACCUM_MODE__SE1_LoadOverride__SHIFT 0xd
+#define RLC_SPM_ACCUM_MODE__SE1_SwaLoadOverride__SHIFT 0xe
+#define RLC_SPM_ACCUM_MODE__SE2_LoadOverride__SHIFT 0xf
+#define RLC_SPM_ACCUM_MODE__SE2_SwaLoadOverride__SHIFT 0x10
+#define RLC_SPM_ACCUM_MODE__EnableAccum_MASK 0x00000001L
+#define RLC_SPM_ACCUM_MODE__EnableSpmWithAccumMode_MASK 0x00000002L
+#define RLC_SPM_ACCUM_MODE__EnableSPPMode_MASK 0x00000004L
+#define RLC_SPM_ACCUM_MODE__AutoResetPerfmonDisable_MASK 0x00000008L
+#define RLC_SPM_ACCUM_MODE__AutoAccumEn_MASK 0x00000020L
+#define RLC_SPM_ACCUM_MODE__SwaAutoAccumEn_MASK 0x00000040L
+#define RLC_SPM_ACCUM_MODE__AutoSpmEn_MASK 0x00000080L
+#define RLC_SPM_ACCUM_MODE__SwaAutoSpmEn_MASK 0x00000100L
+#define RLC_SPM_ACCUM_MODE__Globals_LoadOverride_MASK 0x00000200L
+#define RLC_SPM_ACCUM_MODE__Globals_SwaLoadOverride_MASK 0x00000400L
+#define RLC_SPM_ACCUM_MODE__SE0_LoadOverride_MASK 0x00000800L
+#define RLC_SPM_ACCUM_MODE__SE0_SwaLoadOverride_MASK 0x00001000L
+#define RLC_SPM_ACCUM_MODE__SE1_LoadOverride_MASK 0x00002000L
+#define RLC_SPM_ACCUM_MODE__SE1_SwaLoadOverride_MASK 0x00004000L
+#define RLC_SPM_ACCUM_MODE__SE2_LoadOverride_MASK 0x00008000L
+#define RLC_SPM_ACCUM_MODE__SE2_SwaLoadOverride_MASK 0x00010000L
+//RLC_SPM_ACCUM_THRESHOLD
+#define RLC_SPM_ACCUM_THRESHOLD__Threshold__SHIFT 0x0
+#define RLC_SPM_ACCUM_THRESHOLD__Threshold_MASK 0x0000FFFFL
+//RLC_SPM_ACCUM_SAMPLES_REQUESTED
+#define RLC_SPM_ACCUM_SAMPLES_REQUESTED__SamplesRequested__SHIFT 0x0
+#define RLC_SPM_ACCUM_SAMPLES_REQUESTED__SamplesRequested_MASK 0x000000FFL
+//RLC_SPM_ACCUM_DATARAM_WRCOUNT
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__DataRamWrCount__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__RESERVED__SHIFT 0x13
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__DataRamWrCount_MASK 0x0007FFFFL
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__RESERVED_MASK 0xFFF80000L
+//RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__spp_addr_region__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__swa_addr_region__SHIFT 0x8
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__RESERVED__SHIFT 0x10
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__spp_addr_region_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__swa_addr_region_MASK 0x0000FF00L
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PAUSE
+#define RLC_SPM_PAUSE__PAUSE__SHIFT 0x0
+#define RLC_SPM_PAUSE__PAUSED__SHIFT 0x1
+#define RLC_SPM_PAUSE__PAUSE_MASK 0x00000001L
+#define RLC_SPM_PAUSE__PAUSED_MASK 0x00000002L
+//RLC_SPM_STATUS
+#define RLC_SPM_STATUS__CTL_BUSY__SHIFT 0x0
+#define RLC_SPM_STATUS__RSPM_REG_BUSY__SHIFT 0x1
+#define RLC_SPM_STATUS__SPM_RSPM_BUSY__SHIFT 0x2
+#define RLC_SPM_STATUS__SPM_RSPM_IO_BUSY__SHIFT 0x3
+#define RLC_SPM_STATUS__SE_RSPM_IO_BUSY__SHIFT 0x4
+#define RLC_SPM_STATUS__ACCUM_BUSY__SHIFT 0xf
+#define RLC_SPM_STATUS__FSM_MASTER_STATE__SHIFT 0x10
+#define RLC_SPM_STATUS__FSM_MEMORY_STATE__SHIFT 0x14
+#define RLC_SPM_STATUS__CTL_REQ_STATE__SHIFT 0x18
+#define RLC_SPM_STATUS__CTL_RET_STATE__SHIFT 0x1a
+#define RLC_SPM_STATUS__CTL_BUSY_MASK 0x00000001L
+#define RLC_SPM_STATUS__RSPM_REG_BUSY_MASK 0x00000002L
+#define RLC_SPM_STATUS__SPM_RSPM_BUSY_MASK 0x00000004L
+#define RLC_SPM_STATUS__SPM_RSPM_IO_BUSY_MASK 0x00000008L
+#define RLC_SPM_STATUS__SE_RSPM_IO_BUSY_MASK 0x00000FF0L
+#define RLC_SPM_STATUS__ACCUM_BUSY_MASK 0x00008000L
+#define RLC_SPM_STATUS__FSM_MASTER_STATE_MASK 0x000F0000L
+#define RLC_SPM_STATUS__FSM_MEMORY_STATE_MASK 0x00F00000L
+#define RLC_SPM_STATUS__CTL_REQ_STATE_MASK 0x03000000L
+#define RLC_SPM_STATUS__CTL_RET_STATE_MASK 0x04000000L
+//RLC_SPM_GFXCLOCK_LOWCOUNT
+#define RLC_SPM_GFXCLOCK_LOWCOUNT__GFXCLOCK_LOWCOUNT__SHIFT 0x0
+#define RLC_SPM_GFXCLOCK_LOWCOUNT__GFXCLOCK_LOWCOUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_GFXCLOCK_HIGHCOUNT
+#define RLC_SPM_GFXCLOCK_HIGHCOUNT__GFXCLOCK_HIGHCOUNT__SHIFT 0x0
+#define RLC_SPM_GFXCLOCK_HIGHCOUNT__GFXCLOCK_HIGHCOUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_MODE
+#define RLC_SPM_MODE__MODE__SHIFT 0x0
+#define RLC_SPM_MODE__MODE_MASK 0x00000001L
+//RLC_SPM_RSPM_REQ_DATA_LO
+#define RLC_SPM_RSPM_REQ_DATA_LO__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RSPM_REQ_DATA_HI
+#define RLC_SPM_RSPM_REQ_DATA_HI__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_DATA_HI__DATA_MASK 0x00000FFFL
+//RLC_SPM_RSPM_REQ_OP
+#define RLC_SPM_RSPM_REQ_OP__OP__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_OP__OP_MASK 0x0000000FL
+//RLC_SPM_RSPM_RET_DATA
+#define RLC_SPM_RSPM_RET_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_RET_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RSPM_RET_OP
+#define RLC_SPM_RSPM_RET_OP__OP__SHIFT 0x0
+#define RLC_SPM_RSPM_RET_OP__VALID__SHIFT 0x8
+#define RLC_SPM_RSPM_RET_OP__OP_MASK 0x0000000FL
+#define RLC_SPM_RSPM_RET_OP__VALID_MASK 0x00000100L
+//RLC_SPM_SE_RSPM_REQ_DATA_LO
+#define RLC_SPM_SE_RSPM_REQ_DATA_LO__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_RSPM_REQ_DATA_HI
+#define RLC_SPM_SE_RSPM_REQ_DATA_HI__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_DATA_HI__DATA_MASK 0x00000FFFL
+//RLC_SPM_SE_RSPM_REQ_OP
+#define RLC_SPM_SE_RSPM_REQ_OP__OP__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_OP__OP_MASK 0x0000000FL
+//RLC_SPM_SE_RSPM_RET_DATA
+#define RLC_SPM_SE_RSPM_RET_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_RET_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_RSPM_RET_OP
+#define RLC_SPM_SE_RSPM_RET_OP__OP__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_RET_OP__VALID__SHIFT 0x8
+#define RLC_SPM_SE_RSPM_RET_OP__OP_MASK 0x0000000FL
+#define RLC_SPM_SE_RSPM_RET_OP__VALID_MASK 0x00000100L
+//RLC_SPM_RSPM_CMD
+#define RLC_SPM_RSPM_CMD__CMD__SHIFT 0x0
+#define RLC_SPM_RSPM_CMD__CMD_MASK 0x0000000FL
+//RLC_SPM_RSPM_CMD_ACK
+#define RLC_SPM_RSPM_CMD_ACK__SE0_ACK__SHIFT 0x0
+#define RLC_SPM_RSPM_CMD_ACK__SE1_ACK__SHIFT 0x1
+#define RLC_SPM_RSPM_CMD_ACK__SE2_ACK__SHIFT 0x2
+#define RLC_SPM_RSPM_CMD_ACK__SE3_ACK__SHIFT 0x3
+#define RLC_SPM_RSPM_CMD_ACK__SE4_ACK__SHIFT 0x4
+#define RLC_SPM_RSPM_CMD_ACK__SE5_ACK__SHIFT 0x5
+#define RLC_SPM_RSPM_CMD_ACK__SE6_ACK__SHIFT 0x6
+#define RLC_SPM_RSPM_CMD_ACK__SE7_ACK__SHIFT 0x7
+#define RLC_SPM_RSPM_CMD_ACK__SPM_ACK__SHIFT 0x8
+#define RLC_SPM_RSPM_CMD_ACK__SE0_ACK_MASK 0x00000001L
+#define RLC_SPM_RSPM_CMD_ACK__SE1_ACK_MASK 0x00000002L
+#define RLC_SPM_RSPM_CMD_ACK__SE2_ACK_MASK 0x00000004L
+#define RLC_SPM_RSPM_CMD_ACK__SE3_ACK_MASK 0x00000008L
+#define RLC_SPM_RSPM_CMD_ACK__SE4_ACK_MASK 0x00000010L
+#define RLC_SPM_RSPM_CMD_ACK__SE5_ACK_MASK 0x00000020L
+#define RLC_SPM_RSPM_CMD_ACK__SE6_ACK_MASK 0x00000040L
+#define RLC_SPM_RSPM_CMD_ACK__SE7_ACK_MASK 0x00000080L
+#define RLC_SPM_RSPM_CMD_ACK__SPM_ACK_MASK 0x00000100L
+//RLC_SPM_SPARE
+#define RLC_SPM_SPARE__SPARE__SHIFT 0x0
+#define RLC_SPM_SPARE__SPARE_MASK 0xFFFFFFFFL
+//RLC_PERFMON_CNTL
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//RLC_PERFCOUNTER0_SELECT
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000FFL
+//RLC_PERFCOUNTER1_SELECT
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000FFL
+//RLC_GPU_IOV_PERF_CNT_CNTL
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
+//RLC_GPU_IOV_PERF_CNT_WR_ADDR
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_WR_DATA
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_PERF_CNT_RD_ADDR
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_RD_DATA
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_SELECT
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER0_SELECT1
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER1_SELECT
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT1
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER3_SELECT
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERF_COUNTER_CNTL
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
+//GCR_PERFCOUNTER0_SELECT
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCR_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCR_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCR_PERFCOUNTER0_SELECT1
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCR_PERFCOUNTER1_SELECT
+#define GCR_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GCR_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCR_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCR_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCR_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCR_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER0_SELECT
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER0_SELECT1
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER1_SELECT
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER2_SELECT
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER3_SELECT
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER4_SELECT
+#define PA_PH_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER5_SELECT
+#define PA_PH_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER6_SELECT
+#define PA_PH_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER7_SELECT
+#define PA_PH_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER1_SELECT1
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER2_SELECT1
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER3_SELECT1
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER0_SELECT
+#define UTCL1_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER0_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER1_SELECT
+#define UTCL1_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER1_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER2_SELECT
+#define UTCL1_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER2_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER3_SELECT
+#define UTCL1_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER3_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER0_SELECT
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1A_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1A_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER0_SELECT1
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1A_PERFCOUNTER1_SELECT
+#define GL1A_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER2_SELECT
+#define GL1A_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER3_SELECT
+#define GL1A_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER0_SELECT
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1H_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1H_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER0_SELECT1
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1H_PERFCOUNTER1_SELECT
+#define GL1H_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER2_SELECT
+#define GL1H_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER3_SELECT
+#define GL1H_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER0_SELECT
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER0_SELECT1
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHA_PERFCOUNTER1_SELECT
+#define CHA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER2_SELECT
+#define CHA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER3_SELECT
+#define CHA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_SELECT
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GUS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GUS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_SELECT1
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_MODE
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GUS_PERFCOUNTER0_CFG
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GUS_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GUS_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GUS_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GUS_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GUS_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GUS_PERFCOUNTER1_CFG
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GUS_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GUS_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GUS_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GUS_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GUS_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GUS_PERFCOUNTER_RSLT_CNTL
+#define GUS_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GUS_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GUS_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GUS_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GUS_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GUS_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gdfll_gdfll_dec
+//GDFLL_EDC_HYSTERESIS_CNTL
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_EDC_HYSTERESIS_STAT
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: gc_gdfll_se_gdfll_dec
+//GDFLL_SE_EDC_HYSTERESIS_CNTL
+#define GDFLL_SE_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_SE_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_SE_EDC_HYSTERESIS_STAT
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: gc_grtavfs_grtavfs_dec
+//GRTAVFS_RTAVFS_REG_ADDR
+#define GRTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//GRTAVFS_RTAVFS_WR_DATA
+#define GRTAVFS_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_GENERAL_0
+#define GRTAVFS_GENERAL_0__DATA__SHIFT 0x0
+#define GRTAVFS_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//GRTAVFS_RTAVFS_RD_DATA
+#define GRTAVFS_RTAVFS_RD_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_RTAVFS_RD_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_RTAVFS_REG_CTRL
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_WR_EN__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_RD_EN__SHIFT 0x1
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_WR_EN_MASK 0x00000001L
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_RD_EN_MASK 0x00000002L
+//GRTAVFS_RTAVFS_REG_STATUS
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_WR_ACK__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID__SHIFT 0x1
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_WR_ACK_MASK 0x00000001L
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID_MASK 0x00000002L
+//GRTAVFS_TARG_FREQ
+#define GRTAVFS_TARG_FREQ__TARGET_FREQUENCY__SHIFT 0x0
+#define GRTAVFS_TARG_FREQ__REQUEST__SHIFT 0x10
+#define GRTAVFS_TARG_FREQ__RESERVED__SHIFT 0x11
+#define GRTAVFS_TARG_FREQ__TARGET_FREQUENCY_MASK 0x0000FFFFL
+#define GRTAVFS_TARG_FREQ__REQUEST_MASK 0x00010000L
+#define GRTAVFS_TARG_FREQ__RESERVED_MASK 0xFFFE0000L
+//GRTAVFS_TARG_VOLT
+#define GRTAVFS_TARG_VOLT__TARGET_VOLTAGE__SHIFT 0x0
+#define GRTAVFS_TARG_VOLT__VALID__SHIFT 0xa
+#define GRTAVFS_TARG_VOLT__RESERVED__SHIFT 0xb
+#define GRTAVFS_TARG_VOLT__TARGET_VOLTAGE_MASK 0x000003FFL
+#define GRTAVFS_TARG_VOLT__VALID_MASK 0x00000400L
+#define GRTAVFS_TARG_VOLT__RESERVED_MASK 0xFFFFF800L
+//GRTAVFS_SOFT_RESET
+#define GRTAVFS_SOFT_RESET__RESETN_OVERRIDE__SHIFT 0x0
+#define GRTAVFS_SOFT_RESET__RESERVED__SHIFT 0x1
+#define GRTAVFS_SOFT_RESET__RESETN_OVERRIDE_MASK 0x00000001L
+#define GRTAVFS_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//GRTAVFS_PSM_CNTL
+#define GRTAVFS_PSM_CNTL__PSM_COUNT__SHIFT 0x0
+#define GRTAVFS_PSM_CNTL__PSM_SAMPLE_EN__SHIFT 0xe
+#define GRTAVFS_PSM_CNTL__RESERVED__SHIFT 0xf
+#define GRTAVFS_PSM_CNTL__PSM_COUNT_MASK 0x00003FFFL
+#define GRTAVFS_PSM_CNTL__PSM_SAMPLE_EN_MASK 0x00004000L
+#define GRTAVFS_PSM_CNTL__RESERVED_MASK 0xFFFF8000L
+//GRTAVFS_CLK_CNTL
+#define GRTAVFS_CLK_CNTL__GRTAVFS_MUX_CLK_SEL__SHIFT 0x0
+#define GRTAVFS_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL__SHIFT 0x1
+#define GRTAVFS_CLK_CNTL__RESERVED__SHIFT 0x2
+#define GRTAVFS_CLK_CNTL__GRTAVFS_MUX_CLK_SEL_MASK 0x00000001L
+#define GRTAVFS_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL_MASK 0x00000002L
+#define GRTAVFS_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_grtavfs_se_grtavfs_dec
+//GRTAVFS_SE_RTAVFS_REG_ADDR
+#define GRTAVFS_SE_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//GRTAVFS_SE_RTAVFS_WR_DATA
+#define GRTAVFS_SE_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_GENERAL_0
+#define GRTAVFS_SE_GENERAL_0__DATA__SHIFT 0x0
+#define GRTAVFS_SE_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_RTAVFS_RD_DATA
+#define GRTAVFS_SE_RTAVFS_RD_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_RD_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_RTAVFS_REG_CTRL
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_WR_EN__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_RD_EN__SHIFT 0x1
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_WR_EN_MASK 0x00000001L
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_RD_EN_MASK 0x00000002L
+//GRTAVFS_SE_RTAVFS_REG_STATUS
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_WR_ACK__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID__SHIFT 0x1
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_WR_ACK_MASK 0x00000001L
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID_MASK 0x00000002L
+//GRTAVFS_SE_TARG_FREQ
+#define GRTAVFS_SE_TARG_FREQ__TARGET_FREQUENCY__SHIFT 0x0
+#define GRTAVFS_SE_TARG_FREQ__REQUEST__SHIFT 0x10
+#define GRTAVFS_SE_TARG_FREQ__RESERVED__SHIFT 0x11
+#define GRTAVFS_SE_TARG_FREQ__TARGET_FREQUENCY_MASK 0x0000FFFFL
+#define GRTAVFS_SE_TARG_FREQ__REQUEST_MASK 0x00010000L
+#define GRTAVFS_SE_TARG_FREQ__RESERVED_MASK 0xFFFE0000L
+//GRTAVFS_SE_TARG_VOLT
+#define GRTAVFS_SE_TARG_VOLT__TARGET_VOLTAGE__SHIFT 0x0
+#define GRTAVFS_SE_TARG_VOLT__VALID__SHIFT 0xa
+#define GRTAVFS_SE_TARG_VOLT__RESERVED__SHIFT 0xb
+#define GRTAVFS_SE_TARG_VOLT__TARGET_VOLTAGE_MASK 0x000003FFL
+#define GRTAVFS_SE_TARG_VOLT__VALID_MASK 0x00000400L
+#define GRTAVFS_SE_TARG_VOLT__RESERVED_MASK 0xFFFFF800L
+//GRTAVFS_SE_SOFT_RESET
+#define GRTAVFS_SE_SOFT_RESET__RESETN_OVERRIDE__SHIFT 0x0
+#define GRTAVFS_SE_SOFT_RESET__RESERVED__SHIFT 0x1
+#define GRTAVFS_SE_SOFT_RESET__RESETN_OVERRIDE_MASK 0x00000001L
+#define GRTAVFS_SE_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//GRTAVFS_SE_PSM_CNTL
+#define GRTAVFS_SE_PSM_CNTL__PSM_COUNT__SHIFT 0x0
+#define GRTAVFS_SE_PSM_CNTL__PSM_SAMPLE_EN__SHIFT 0xe
+#define GRTAVFS_SE_PSM_CNTL__RESERVED__SHIFT 0xf
+#define GRTAVFS_SE_PSM_CNTL__PSM_COUNT_MASK 0x00003FFFL
+#define GRTAVFS_SE_PSM_CNTL__PSM_SAMPLE_EN_MASK 0x00004000L
+#define GRTAVFS_SE_PSM_CNTL__RESERVED_MASK 0xFFFF8000L
+//GRTAVFS_SE_CLK_CNTL
+#define GRTAVFS_SE_CLK_CNTL__GRTAVFS_MUX_CLK_SEL__SHIFT 0x0
+#define GRTAVFS_SE_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL__SHIFT 0x1
+#define GRTAVFS_SE_CLK_CNTL__RESERVED__SHIFT 0x2
+#define GRTAVFS_SE_CLK_CNTL__GRTAVFS_MUX_CLK_SEL_MASK 0x00000001L
+#define GRTAVFS_SE_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL_MASK 0x00000002L
+#define GRTAVFS_SE_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_grtavfsdec
+//RTAVFS_RTAVFS_REG_ADDR
+#define RTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define RTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//RTAVFS_RTAVFS_WR_DATA
+#define RTAVFS_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define RTAVFS_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_hypdec
+//GFX_PIPE_PRIORITY
+#define GFX_PIPE_PRIORITY__HP_PIPE_SELECT__SHIFT 0x0
+#define GFX_PIPE_PRIORITY__HP_PIPE_SELECT_MASK 0x00000001L
+//RLC_GPU_IOV_VF_ENABLE
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG6
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
+#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
+#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//RLC_SDMA0_STATUS
+#define RLC_SDMA0_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA0_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA1_STATUS
+#define RLC_SDMA1_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA1_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA2_STATUS
+#define RLC_SDMA2_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA2_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA3_STATUS
+#define RLC_SDMA3_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA3_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA0_BUSY_STATUS
+#define RLC_SDMA0_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA0_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA1_BUSY_STATUS
+#define RLC_SDMA1_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA1_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA2_BUSY_STATUS
+#define RLC_SDMA2_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA2_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA3_BUSY_STATUS
+#define RLC_SDMA3_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA3_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_CFG_REG8
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_0
+#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_1
+#define RLC_RLCV_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_CTRL
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x2
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x3
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x4
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x5
+#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x6
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000004L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000008L
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00000010L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00000020L
+#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_RLCV_TIMER_STAT
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_RLCV_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0xa
+#define RLC_RLCV_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0xb
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_RLCV_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00000400L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00000800L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
+//RLC_GPU_IOV_VF_MASK
+#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
+#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x7FFFFFFFL
+//RLC_HYP_SEMAPHORE_0
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_1
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_BUSY_CLK_CNTL
+#define RLC_BUSY_CLK_CNTL__BUSY_OFF_LATENCY__SHIFT 0x0
+#define RLC_BUSY_CLK_CNTL__GRBM_BUSY_OFF_LATENCY__SHIFT 0x8
+#define RLC_BUSY_CLK_CNTL__BUSY_OFF_LATENCY_MASK 0x0000003FL
+#define RLC_BUSY_CLK_CNTL__GRBM_BUSY_OFF_LATENCY_MASK 0x00003F00L
+//RLC_CLK_CNTL
+#define RLC_CLK_CNTL__RLC_SRM_ICG_OVERRIDE__SHIFT 0x0
+#define RLC_CLK_CNTL__RLC_IMU_ICG_OVERRIDE__SHIFT 0x1
+#define RLC_CLK_CNTL__RLC_SPM_ICG_OVERRIDE__SHIFT 0x2
+#define RLC_CLK_CNTL__RLC_SPM_RSPM_ICG_OVERRIDE__SHIFT 0x3
+#define RLC_CLK_CNTL__RLC_GPM_ICG_OVERRIDE__SHIFT 0x4
+#define RLC_CLK_CNTL__RLC_CMN_ICG_OVERRIDE__SHIFT 0x5
+#define RLC_CLK_CNTL__RLC_TC_ICG_OVERRIDE__SHIFT 0x6
+#define RLC_CLK_CNTL__RLC_REG_ICG_OVERRIDE__SHIFT 0x7
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE__SHIFT 0x8
+#define RLC_CLK_CNTL__RESERVED_9__SHIFT 0x9
+#define RLC_CLK_CNTL__RLC_SPP_ICG_OVERRIDE__SHIFT 0xa
+#define RLC_CLK_CNTL__RESERVED_11__SHIFT 0xb
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE__SHIFT 0xc
+#define RLC_CLK_CNTL__RLC_DFLL_ICG_OVERRIDE__SHIFT 0xd
+#define RLC_CLK_CNTL__RESERVED_15__SHIFT 0xf
+#define RLC_CLK_CNTL__RLC_LX6_CORE_ICG_OVERRIDE__SHIFT 0x10
+#define RLC_CLK_CNTL__RLC_LX6_ICG_OVERRIDE__SHIFT 0x11
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE__SHIFT 0x12
+#define RLC_CLK_CNTL__RLC_IH_GASKET_ICG_OVERRIDE__SHIFT 0x13
+#define RLC_CLK_CNTL__RESERVED__SHIFT 0x14
+#define RLC_CLK_CNTL__RLC_SRM_ICG_OVERRIDE_MASK 0x00000001L
+#define RLC_CLK_CNTL__RLC_IMU_ICG_OVERRIDE_MASK 0x00000002L
+#define RLC_CLK_CNTL__RLC_SPM_ICG_OVERRIDE_MASK 0x00000004L
+#define RLC_CLK_CNTL__RLC_SPM_RSPM_ICG_OVERRIDE_MASK 0x00000008L
+#define RLC_CLK_CNTL__RLC_GPM_ICG_OVERRIDE_MASK 0x00000010L
+#define RLC_CLK_CNTL__RLC_CMN_ICG_OVERRIDE_MASK 0x00000020L
+#define RLC_CLK_CNTL__RLC_TC_ICG_OVERRIDE_MASK 0x00000040L
+#define RLC_CLK_CNTL__RLC_REG_ICG_OVERRIDE_MASK 0x00000080L
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK 0x00000100L
+#define RLC_CLK_CNTL__RESERVED_9_MASK 0x00000200L
+#define RLC_CLK_CNTL__RLC_SPP_ICG_OVERRIDE_MASK 0x00000400L
+#define RLC_CLK_CNTL__RESERVED_11_MASK 0x00000800L
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE_MASK 0x00001000L
+#define RLC_CLK_CNTL__RLC_DFLL_ICG_OVERRIDE_MASK 0x00002000L
+#define RLC_CLK_CNTL__RESERVED_15_MASK 0x00008000L
+#define RLC_CLK_CNTL__RLC_LX6_CORE_ICG_OVERRIDE_MASK 0x00010000L
+#define RLC_CLK_CNTL__RLC_LX6_ICG_OVERRIDE_MASK 0x00020000L
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE_MASK 0x00040000L
+#define RLC_CLK_CNTL__RLC_IH_GASKET_ICG_OVERRIDE_MASK 0x00080000L
+#define RLC_CLK_CNTL__RESERVED_MASK 0xFFF00000L
+//RLC_PACE_TIMER_STAT
+#define RLC_PACE_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_PACE_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_PACE_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_PACE_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_PACE_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_PACE_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0xa
+#define RLC_PACE_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0xb
+#define RLC_PACE_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_PACE_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_PACE_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_PACE_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_PACE_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_PACE_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00000400L
+#define RLC_PACE_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00000800L
+//RLC_GPU_IOV_SCH_BLOCK
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x0000FF00L
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG1
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
+//RLC_GPU_IOV_CFG_REG2
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPU_IOV_VM_BUSY_STATUS
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_ACTIVE_FCN_ID
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_7_4__SHIFT 0x4
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__ACTIVE_FCN_ID_STATUS__SHIFT 0x8
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_30_12__SHIFT 0xc
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_7_4_MASK 0x000000F0L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__ACTIVE_FCN_ID_STATUS_MASK 0x00000F00L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_30_12_MASK 0x7FFFF000L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//RLC_GPU_IOV_SCH_3
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_1
+#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_2
+#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_FORCE
+#define RLC_PACE_INT_FORCE__FORCE_INT__SHIFT 0x0
+#define RLC_PACE_INT_FORCE__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_CLEAR
+#define RLC_PACE_INT_CLEAR__SMU_STRETCH_PCC_CLEAR__SHIFT 0x0
+#define RLC_PACE_INT_CLEAR__SMU_PCC_CLEAR__SHIFT 0x1
+#define RLC_PACE_INT_CLEAR__SMU_STRETCH_PCC_CLEAR_MASK 0x00000001L
+#define RLC_PACE_INT_CLEAR__SMU_PCC_CLEAR_MASK 0x00000002L
+//RLC_GPU_IOV_INT_STAT
+#define RLC_GPU_IOV_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_IH_COOKIE
+#define RLC_IH_COOKIE__DATA__SHIFT 0x0
+#define RLC_IH_COOKIE__DATA_MASK 0xFFFFFFFFL
+//RLC_IH_COOKIE_CNTL
+#define RLC_IH_COOKIE_CNTL__CREDIT__SHIFT 0x0
+#define RLC_IH_COOKIE_CNTL__RESET_COUNTER__SHIFT 0x2
+#define RLC_IH_COOKIE_CNTL__CREDIT_MASK 0x00000003L
+#define RLC_IH_COOKIE_CNTL__RESET_COUNTER_MASK 0x00000004L
+//RLC_HYP_RLCG_UCODE_CHKSUM
+#define RLC_HYP_RLCG_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCG_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_HYP_RLCP_UCODE_CHKSUM
+#define RLC_HYP_RLCP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_HYP_RLCV_UCODE_CHKSUM
+#define RLC_HYP_RLCV_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCV_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_CNTL
+#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
+//RLC_GPU_IOV_F32_RESET
+#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
+#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
+//RLC_GPU_IOV_UCODE_ADDR
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_GPU_IOV_UCODE_DATA
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SMU_RESPONSE
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_INVALIDATE_CACHE
+#define RLC_GPU_IOV_F32_INVALIDATE_CACHE__INVALIDATE_CACHE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_INVALIDATE_CACHE__INVALIDATE_CACHE_MASK 0x00000001L
+//RLC_GPU_IOV_VIRT_RESET_REQ
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
+//RLC_GPU_IOV_RLC_RESPONSE
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_DISABLE
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_INT__SHIFT 0x0
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_FORCE
+#define RLC_GPU_IOV_INT_FORCE__FORCE_INT__SHIFT 0x0
+#define RLC_GPU_IOV_INT_FORCE__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCRATCH_ADDR
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_GPU_IOV_SCRATCH_DATA
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_HYP_SEMAPHORE_2
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_3
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_LX6_SCRATCH_ADDR
+#define RLC_LX6_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_SCRATCH_ADDR__ADDR_MASK 0x000000FFL
+//RLC_LX6_CORE1_SCRATCH_ADDR
+#define RLC_LX6_CORE1_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_CORE1_SCRATCH_ADDR__ADDR_MASK 0x000000FFL
+//RLC_GPM_UCODE_ADDR
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
+//RLC_GPM_UCODE_DATA
+#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_IRAM_ADDR
+#define RLC_GPM_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_GPM_IRAM_DATA
+#define RLC_GPM_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_IRAM_ADDR
+#define RLC_RLCP_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_RLCP_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_RLCP_IRAM_DATA
+#define RLC_RLCP_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_RLCP_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_IRAM_ADDR
+#define RLC_RLCV_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_RLCV_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_RLCV_IRAM_DATA
+#define RLC_RLCV_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_RLCV_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_LX6_DRAM_ADDR
+#define RLC_LX6_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_DRAM_ADDR__ADDR_MASK 0x000007FFL
+//RLC_LX6_DRAM_DATA
+#define RLC_LX6_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_LX6_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_LX6_IRAM_ADDR
+#define RLC_LX6_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_IRAM_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_LX6_IRAM_DATA
+#define RLC_LX6_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_LX6_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_UCODE_ADDR
+#define RLC_PACE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_PACE_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_PACE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_PACE_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_PACE_UCODE_DATA
+#define RLC_PACE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_PACE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_SCRATCH_ADDR
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_GPM_SCRATCH_DATA
+#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_DRAM_ADDR
+#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xd
+#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x00001FFFL
+#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFE000L
+//RLC_SRM_DRAM_DATA
+#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_ARAM_ADDR
+#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xd
+#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x00001FFFL
+#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFE000L
+//RLC_SRM_ARAM_DATA
+#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_SCRATCH_ADDR
+#define RLC_PACE_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_PACE_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_PACE_SCRATCH_DATA
+#define RLC_PACE_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_PACE_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GTS_OFFSET_LSB
+#define RLC_GTS_OFFSET_LSB__DATA__SHIFT 0x0
+#define RLC_GTS_OFFSET_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_GTS_OFFSET_MSB
+#define RLC_GTS_OFFSET_MSB__DATA__SHIFT 0x0
+#define RLC_GTS_OFFSET_MSB__DATA_MASK 0xFFFFFFFFL
+//GL2_PIPE_STEER_0
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q0__SHIFT 0x0
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q0__SHIFT 0x4
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q0__SHIFT 0x8
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q0__SHIFT 0xc
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q1__SHIFT 0x10
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q1__SHIFT 0x14
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q1__SHIFT 0x18
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q1__SHIFT 0x1c
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q0_MASK 0x00000007L
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q0_MASK 0x00000070L
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q0_MASK 0x00000700L
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q0_MASK 0x00007000L
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q1_MASK 0x00070000L
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q1_MASK 0x00700000L
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q1_MASK 0x07000000L
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q1_MASK 0x70000000L
+//GL2_PIPE_STEER_1
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q2__SHIFT 0x0
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q2__SHIFT 0x4
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q2__SHIFT 0x8
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q2__SHIFT 0xc
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q3__SHIFT 0x10
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q3__SHIFT 0x14
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q3__SHIFT 0x18
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q3__SHIFT 0x1c
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q2_MASK 0x00000007L
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q2_MASK 0x00000070L
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q2_MASK 0x00000700L
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q2_MASK 0x00007000L
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q3_MASK 0x00070000L
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q3_MASK 0x00700000L
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q3_MASK 0x07000000L
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q3_MASK 0x70000000L
+//GL2_PIPE_STEER_2
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q0__SHIFT 0x0
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q0__SHIFT 0x4
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q0__SHIFT 0x8
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q0__SHIFT 0xc
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q1__SHIFT 0x10
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q1__SHIFT 0x14
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q1__SHIFT 0x18
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q1__SHIFT 0x1c
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q0_MASK 0x00000007L
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q0_MASK 0x00000070L
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q0_MASK 0x00000700L
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q0_MASK 0x00007000L
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q1_MASK 0x00070000L
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q1_MASK 0x00700000L
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q1_MASK 0x07000000L
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q1_MASK 0x70000000L
+//GL2_PIPE_STEER_3
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q2__SHIFT 0x0
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q2__SHIFT 0x4
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q2__SHIFT 0x8
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q2__SHIFT 0xc
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q3__SHIFT 0x10
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q3__SHIFT 0x14
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q3__SHIFT 0x18
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q3__SHIFT 0x1c
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q2_MASK 0x00000007L
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q2_MASK 0x00000070L
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q2_MASK 0x00000700L
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q2_MASK 0x00007000L
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q3_MASK 0x00070000L
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q3_MASK 0x00700000L
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q3_MASK 0x07000000L
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q3_MASK 0x70000000L
+//GL1_PIPE_STEER
+#define GL1_PIPE_STEER__PIPE0__SHIFT 0x0
+#define GL1_PIPE_STEER__PIPE1__SHIFT 0x2
+#define GL1_PIPE_STEER__PIPE2__SHIFT 0x4
+#define GL1_PIPE_STEER__PIPE3__SHIFT 0x6
+#define GL1_PIPE_STEER__PIPE0_MASK 0x00000003L
+#define GL1_PIPE_STEER__PIPE1_MASK 0x0000000CL
+#define GL1_PIPE_STEER__PIPE2_MASK 0x00000030L
+#define GL1_PIPE_STEER__PIPE3_MASK 0x000000C0L
+//CH_PIPE_STEER
+#define CH_PIPE_STEER__PIPE0__SHIFT 0x0
+#define CH_PIPE_STEER__PIPE1__SHIFT 0x2
+#define CH_PIPE_STEER__PIPE2__SHIFT 0x4
+#define CH_PIPE_STEER__PIPE3__SHIFT 0x6
+#define CH_PIPE_STEER__PIPE0_MASK 0x00000003L
+#define CH_PIPE_STEER__PIPE1_MASK 0x0000000CL
+#define CH_PIPE_STEER__PIPE2_MASK 0x00000030L
+#define CH_PIPE_STEER__PIPE3_MASK 0x000000C0L
+//GC_USER_SHADER_ARRAY_CONFIG
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT 0x10
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK 0xFFFF0000L
+//GC_USER_PRIM_CONFIG
+#define GC_USER_PRIM_CONFIG__INACTIVE_PA__SHIFT 0x4
+#define GC_USER_PRIM_CONFIG__INACTIVE_PA_MASK 0x000FFFF0L
+//GC_USER_SA_UNIT_DISABLE
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x00FFFF00L
+//GC_USER_RB_REDUNDANCY
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//GC_USER_RB_BACKEND_DISABLE
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x4
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xFFFFFFF0L
+//GC_USER_RMI_REDUNDANCY
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_0__SHIFT 0x1
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_1__SHIFT 0x2
+#define GC_USER_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE__SHIFT 0x3
+#define GC_USER_RMI_REDUNDANCY__REPAIR_ID_SWAP__SHIFT 0x4
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_0_MASK 0x00000002L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_1_MASK 0x00000004L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE_MASK 0x00000008L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_ID_SWAP_MASK 0x00000010L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//GC_USER_SHADER_RATE_CONFIG
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+//RLC_GPU_IOV_SDMA0_STATUS
+#define RLC_GPU_IOV_SDMA0_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_STATUS
+#define RLC_GPU_IOV_SDMA1_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA2_STATUS
+#define RLC_GPU_IOV_SDMA2_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_STATUS
+#define RLC_GPU_IOV_SDMA3_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_STATUS
+#define RLC_GPU_IOV_SDMA4_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_STATUS
+#define RLC_GPU_IOV_SDMA5_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_STATUS
+#define RLC_GPU_IOV_SDMA6_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_STATUS
+#define RLC_GPU_IOV_SDMA7_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA0_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA2_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cphypdec
+//CP_HYP_PFP_UCODE_ADDR
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_PFP_UCODE_ADDR
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_PFP_UCODE_DATA
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_PFP_UCODE_DATA
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_ADDR
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_ME_RAM_RADDR
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x000FFFFFL
+//CP_ME_RAM_WADDR
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x001FFFFFL
+//CP_HYP_ME_UCODE_DATA
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_ME_RAM_DATA
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC1_UCODE_ADDR
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_MEC_ME1_UCODE_ADDR
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_MEC1_UCODE_DATA
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME1_UCODE_DATA
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC2_UCODE_ADDR
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_MEC_ME2_UCODE_ADDR
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_MEC2_UCODE_DATA
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_UCODE_DATA
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_PFP_UCODE_CHKSUM
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_CHKSUM
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME1_UCODE_CHKSUM
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME2_UCODE_CHKSUM
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_PFP_IC_BASE_LO
+#define CP_PFP_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_PFP_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_PFP_IC_BASE_HI
+#define CP_PFP_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_PFP_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_PFP_IC_BASE_CNTL
+#define CP_PFP_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_PFP_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_PFP_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_PFP_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_PFP_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_PFP_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_PFP_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_PFP_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_PFP_IC_OP_CNTL
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_PFP_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_PFP_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_PFP_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_PFP_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_ME_IC_BASE_LO
+#define CP_ME_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_ME_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_ME_IC_BASE_HI
+#define CP_ME_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_ME_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_ME_IC_BASE_CNTL
+#define CP_ME_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_ME_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_ME_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_ME_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_ME_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_ME_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_ME_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_ME_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_ME_IC_OP_CNTL
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_ME_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_ME_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_ME_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_ME_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_CPC_IC_BASE_LO
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_CPC_IC_BASE_HI
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_CPC_IC_BASE_CNTL
+#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_CPC_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_CPC_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_CPC_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_CPC_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_IC_BASE_LO
+#define CP_MES_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_MES_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_MES_MIBASE_LO
+#define CP_MES_MIBASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_MES_MIBASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_MES_IC_BASE_HI
+#define CP_MES_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_MES_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MIBASE_HI
+#define CP_MES_MIBASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_MES_MIBASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_IC_BASE_CNTL
+#define CP_MES_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MES_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_MES_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MES_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MES_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_DC_BASE_LO
+#define CP_MES_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_MES_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_MES_MDBASE_LO
+#define CP_MES_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_DC_BASE_HI
+#define CP_MES_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_MES_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MDBASE_HI
+#define CP_MES_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MIBOUND_LO
+#define CP_MES_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MES_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIBOUND_HI
+#define CP_MES_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MES_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_MES_MDBOUND_LO
+#define CP_MES_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MES_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MES_MDBOUND_HI
+#define CP_MES_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MES_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_BASE0_LO
+#define CP_GFX_RS64_DC_BASE0_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_DC_BASE0_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_DC_BASE1_LO
+#define CP_GFX_RS64_DC_BASE1_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_DC_BASE1_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_DC_BASE0_HI
+#define CP_GFX_RS64_DC_BASE0_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE0_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_DC_BASE1_HI
+#define CP_GFX_RS64_DC_BASE1_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE1_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_MIBOUND_LO
+#define CP_GFX_RS64_MIBOUND_LO__BOUND__SHIFT 0x0
+#define CP_GFX_RS64_MIBOUND_LO__BOUND_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIBOUND_HI
+#define CP_GFX_RS64_MIBOUND_HI__BOUND__SHIFT 0x0
+#define CP_GFX_RS64_MIBOUND_HI__BOUND_MASK 0xFFFFFFFFL
+//CP_MEC_DC_BASE_LO
+#define CP_MEC_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_MEC_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_MDBASE_LO
+#define CP_MEC_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_DC_BASE_HI
+#define CP_MEC_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_MEC_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_MDBASE_HI
+#define CP_MEC_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_MIBOUND_LO
+#define CP_MEC_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MEC_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MIBOUND_HI
+#define CP_MEC_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MEC_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_MEC_MDBOUND_LO
+#define CP_MEC_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MEC_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MDBOUND_HI
+#define CP_MEC_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MEC_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_grbm_hypdec
+//GRBM_GFX_INDEX_SR_SELECT
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_INDEX_SR_DATA
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_DATA__SA_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX_SR_DATA__SA_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX_SR_DATA__SA_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX_SR_DATA__SA_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_SELECT
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_DATA
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
+//GC_IH_COOKIE_0_PTR
+#define GC_IH_COOKIE_0_PTR__ADDR__SHIFT 0x0
+#define GC_IH_COOKIE_0_PTR__ADDR_MASK 0x000FFFFFL
+//GRBM_SE_REMAP_CNTL
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_EN__SHIFT 0x0
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP__SHIFT 0x1
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_EN__SHIFT 0x4
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP__SHIFT 0x5
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_EN__SHIFT 0x8
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP__SHIFT 0x9
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_EN__SHIFT 0xc
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP__SHIFT 0xd
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_EN__SHIFT 0x10
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP__SHIFT 0x11
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_EN__SHIFT 0x14
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP__SHIFT 0x15
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_EN__SHIFT 0x18
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP__SHIFT 0x19
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_EN__SHIFT 0x1c
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP__SHIFT 0x1d
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_EN_MASK 0x00000001L
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_MASK 0x0000000EL
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_EN_MASK 0x00000010L
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_MASK 0x000000E0L
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_EN_MASK 0x00000100L
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_MASK 0x00000E00L
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_EN_MASK 0x00001000L
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_MASK 0x0000E000L
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_EN_MASK 0x00010000L
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_MASK 0x000E0000L
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_EN_MASK 0x00100000L
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_MASK 0x00E00000L
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_EN_MASK 0x01000000L
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_MASK 0x0E000000L
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_EN_MASK 0x10000000L
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_MASK 0xE0000000L
+
+
+// addressBlock: gc_gcvmsharedhvdec
+//GCMC_VM_FB_SIZE_OFFSET_VF0
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF1
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF2
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF3
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF4
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF5
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF6
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF7
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF8
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF9
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF10
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF11
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF12
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF13
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF14
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF15
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_rlcdec
+//RLC_CNTL
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
+#define RLC_CNTL__RESERVED__SHIFT 0x4
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_F32_UCODE_VERSION
+#define RLC_F32_UCODE_VERSION__THREAD0_VERSION__SHIFT 0x0
+#define RLC_F32_UCODE_VERSION__THREAD1_VERSION__SHIFT 0xa
+#define RLC_F32_UCODE_VERSION__THREAD2_VERSION__SHIFT 0x14
+#define RLC_F32_UCODE_VERSION__THREAD0_VERSION_MASK 0x000003FFL
+#define RLC_F32_UCODE_VERSION__THREAD1_VERSION_MASK 0x000FFC00L
+#define RLC_F32_UCODE_VERSION__THREAD2_VERSION_MASK 0x3FF00000L
+//RLC_STAT
+#define RLC_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x1
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x2
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x3
+#define RLC_STAT__MC_BUSY__SHIFT 0x4
+#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
+#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
+#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
+#define RLC_STAT__RESERVED__SHIFT 0x8
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000008L
+#define RLC_STAT__MC_BUSY_MASK 0x00000010L
+#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
+#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
+#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
+#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
+//RLC_REFCLOCK_TIMESTAMP_LSB
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
+//RLC_REFCLOCK_TIMESTAMP_MSB
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_0
+#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_1
+#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_2
+#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_3
+#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_4
+#define RLC_GPM_TIMER_INT_4__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_4__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_CTRL
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
+#define RLC_GPM_TIMER_CTRL__TIMER_4_EN__SHIFT 0x4
+#define RLC_GPM_TIMER_CTRL__RESERVED_1__SHIFT 0x5
+#define RLC_GPM_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x8
+#define RLC_GPM_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x9
+#define RLC_GPM_TIMER_CTRL__TIMER_2_AUTO_REARM__SHIFT 0xa
+#define RLC_GPM_TIMER_CTRL__TIMER_3_AUTO_REARM__SHIFT 0xb
+#define RLC_GPM_TIMER_CTRL__TIMER_4_AUTO_REARM__SHIFT 0xc
+#define RLC_GPM_TIMER_CTRL__RESERVED_2__SHIFT 0xd
+#define RLC_GPM_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x10
+#define RLC_GPM_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x11
+#define RLC_GPM_TIMER_CTRL__TIMER_2_INT_CLEAR__SHIFT 0x12
+#define RLC_GPM_TIMER_CTRL__TIMER_3_INT_CLEAR__SHIFT 0x13
+#define RLC_GPM_TIMER_CTRL__TIMER_4_INT_CLEAR__SHIFT 0x14
+#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x15
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_EN_MASK 0x00000010L
+#define RLC_GPM_TIMER_CTRL__RESERVED_1_MASK 0x000000E0L
+#define RLC_GPM_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000100L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000200L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_AUTO_REARM_MASK 0x00000400L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_AUTO_REARM_MASK 0x00000800L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_AUTO_REARM_MASK 0x00001000L
+#define RLC_GPM_TIMER_CTRL__RESERVED_2_MASK 0x0000E000L
+#define RLC_GPM_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00010000L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00020000L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_INT_CLEAR_MASK 0x00040000L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_INT_CLEAR_MASK 0x00080000L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_INT_CLEAR_MASK 0x00100000L
+#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_TIMER_STAT
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
+#define RLC_GPM_TIMER_STAT__TIMER_4_STAT__SHIFT 0x4
+#define RLC_GPM_TIMER_STAT__RESERVED_1__SHIFT 0x5
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC__SHIFT 0xa
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC__SHIFT 0xb
+#define RLC_GPM_TIMER_STAT__TIMER_4_ENABLE_SYNC__SHIFT 0xc
+#define RLC_GPM_TIMER_STAT__RESERVED_2__SHIFT 0xd
+#define RLC_GPM_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0x10
+#define RLC_GPM_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0x11
+#define RLC_GPM_TIMER_STAT__TIMER_2_AUTO_REARM_SYNC__SHIFT 0x12
+#define RLC_GPM_TIMER_STAT__TIMER_3_AUTO_REARM_SYNC__SHIFT 0x13
+#define RLC_GPM_TIMER_STAT__TIMER_4_AUTO_REARM_SYNC__SHIFT 0x14
+#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0x15
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
+#define RLC_GPM_TIMER_STAT__TIMER_4_STAT_MASK 0x00000010L
+#define RLC_GPM_TIMER_STAT__RESERVED_1_MASK 0x000000E0L
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC_MASK 0x00000400L
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC_MASK 0x00000800L
+#define RLC_GPM_TIMER_STAT__TIMER_4_ENABLE_SYNC_MASK 0x00001000L
+#define RLC_GPM_TIMER_STAT__RESERVED_2_MASK 0x0000E000L
+#define RLC_GPM_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00010000L
+#define RLC_GPM_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00020000L
+#define RLC_GPM_TIMER_STAT__TIMER_2_AUTO_REARM_SYNC_MASK 0x00040000L
+#define RLC_GPM_TIMER_STAT__TIMER_3_AUTO_REARM_SYNC_MASK 0x00080000L
+#define RLC_GPM_TIMER_STAT__TIMER_4_AUTO_REARM_SYNC_MASK 0x00100000L
+#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_LEGACY_INT_STAT
+#define RLC_GPM_LEGACY_INT_STAT__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_STAT__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_STAT__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_STAT__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_STAT__STORE_LOAD_TIMER3_EXPIRED_T0__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_STAT__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_STAT__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_STAT__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_STAT__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_STAT__STORE_LOAD_TIMER3_EXPIRED_T0_MASK 0x00000010L
+//RLC_GPM_LEGACY_INT_CLEAR
+#define RLC_GPM_LEGACY_INT_CLEAR__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_CLEAR__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_CLEAR__RESERVED_4__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_CLEAR__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_CLEAR__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_CLEAR__RESERVED_4_MASK 0x00000010L
+//RLC_INT_STAT
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
+#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
+#define RLC_INT_STAT__RESERVED__SHIFT 0x9
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
+#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
+#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
+//RLC_MGCG_CTRL
+#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
+#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
+#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
+#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
+#define RLC_MGCG_CTRL__SPARE__SHIFT 0xf
+#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
+#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
+#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
+#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
+#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFF8000L
+//RLC_JUMP_TABLE_RESTORE
+#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
+#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_2
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY_2__PERWGP_TIMEOUT_VALUE__SHIFT 0x10
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY_2__PERWGP_TIMEOUT_VALUE_MASK 0xFFFF0000L
+//RLC_GPU_CLOCK_COUNT_LSB
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_UCODE_CNTL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
+//RLC_GPM_THREAD_RESET
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
+#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
+#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPM_CP_DMA_COMPLETE_T0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_CP_DMA_COMPLETE_T1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_THREAD_INVALIDATE_CACHE
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD0_INVALIDATE_CACHE__SHIFT 0x0
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD1_INVALIDATE_CACHE__SHIFT 0x1
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD2_INVALIDATE_CACHE__SHIFT 0x2
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD3_INVALIDATE_CACHE__SHIFT 0x3
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD0_INVALIDATE_CACHE_MASK 0x00000001L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD1_INVALIDATE_CACHE_MASK 0x00000002L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD2_INVALIDATE_CACHE_MASK 0x00000004L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD3_INVALIDATE_CACHE_MASK 0x00000008L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_CLK_COUNT_GFXCLK_LSB
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_MSB
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_LSB
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_MSB
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_CTRL
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN__SHIFT 0x0
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET__SHIFT 0x1
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE__SHIFT 0x2
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN__SHIFT 0x3
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET__SHIFT 0x4
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE__SHIFT 0x5
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN_MASK 0x00000001L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET_MASK 0x00000002L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE_MASK 0x00000004L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN_MASK 0x00000008L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET_MASK 0x00000010L
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE_MASK 0x00000020L
+//RLC_CLK_COUNT_STAT
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID__SHIFT 0x0
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID__SHIFT 0x1
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC__SHIFT 0x2
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC__SHIFT 0x3
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC__SHIFT 0x4
+#define RLC_CLK_COUNT_STAT__RESERVED__SHIFT 0x5
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID_MASK 0x00000001L
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID_MASK 0x00000002L
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC_MASK 0x00000004L
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC_MASK 0x00000008L
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC_MASK 0x00000010L
+#define RLC_CLK_COUNT_STAT__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCG_DOORBELL_CNTL
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCG_DOORBELL_CNTL__RESERVED__SHIFT 0x16
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+#define RLC_RLCG_DOORBELL_CNTL__RESERVED_MASK 0xFFC00000L
+//RLC_RLCG_DOORBELL_STAT
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCG_DOORBELL_0_DATA_LO
+#define RLC_RLCG_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_0_DATA_HI
+#define RLC_RLCG_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_1_DATA_LO
+#define RLC_RLCG_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_1_DATA_HI
+#define RLC_RLCG_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_2_DATA_LO
+#define RLC_RLCG_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_2_DATA_HI
+#define RLC_RLCG_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_3_DATA_LO
+#define RLC_RLCG_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_3_DATA_HI
+#define RLC_RLCG_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_32_RES_SEL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_CLOCK_32
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
+//RLC_PG_CNTL
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
+#define RLC_PG_CNTL__DYN_PER_WGP_PG_ENABLE__SHIFT 0x2
+#define RLC_PG_CNTL__STATIC_PER_WGP_PG_ENABLE__SHIFT 0x3
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
+#define RLC_PG_CNTL__MEM_DS_DISABLE__SHIFT 0xd
+#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
+#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x13
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable__SHIFT 0x15
+#define RLC_PG_CNTL__RESERVED2__SHIFT 0x16
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE__SHIFT 0x17
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__DYN_PER_WGP_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__STATIC_PER_WGP_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
+#define RLC_PG_CNTL__RESERVED_MASK 0x00001FE0L
+#define RLC_PG_CNTL__MEM_DS_DISABLE_MASK 0x00002000L
+#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
+#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00180000L
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable_MASK 0x00200000L
+#define RLC_PG_CNTL__RESERVED2_MASK 0x00400000L
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK 0x00800000L
+//RLC_GPM_THREAD_PRIORITY
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
+//RLC_GPM_THREAD_ENABLE
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
+#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
+#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCG_DOORBELL_RANGE
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_CGTT_MGCG_OVERRIDE
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE__SHIFT 0xa
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_16_11__SHIFT 0xb
+#define RLC_CGTT_MGCG_OVERRIDE__GC_CAC_MGCG_CLK_CNTL__SHIFT 0x11
+#define RLC_CGTT_MGCG_OVERRIDE__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x12
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_19__SHIFT 0x13
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK 0x00000001L
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK 0x00000400L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_16_11_MASK 0x0001F800L
+#define RLC_CGTT_MGCG_OVERRIDE__GC_CAC_MGCG_CLK_CNTL_MASK 0x00020000L
+#define RLC_CGTT_MGCG_OVERRIDE__SE_CAC_MGCG_CLK_CNTL_MASK 0x00040000L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_19_MASK 0xFFF80000L
+//RLC_CGCG_CGLS_CTRL
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_DYN_PG_STATUS
+#define RLC_DYN_PG_STATUS__PG_STATUS_WGP_MASK__SHIFT 0x0
+#define RLC_DYN_PG_STATUS__PG_STATUS_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_DYN_PG_REQUEST
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_WGP_MASK__SHIFT 0x0
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY
+#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
+#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
+#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
+#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
+//RLC_WGP_STATUS
+#define RLC_WGP_STATUS__WORK_PENDING__SHIFT 0x0
+#define RLC_WGP_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
+//RLC_PG_ALWAYS_ON_WGP_MASK
+#define RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK__SHIFT 0x0
+#define RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_MAX_PG_WGP
+#define RLC_MAX_PG_WGP__MAX_POWERED_UP_WGP__SHIFT 0x0
+#define RLC_MAX_PG_WGP__SPARE__SHIFT 0x8
+#define RLC_MAX_PG_WGP__MAX_POWERED_UP_WGP_MASK 0x000000FFL
+#define RLC_MAX_PG_WGP__SPARE_MASK 0xFFFFFF00L
+//RLC_AUTO_PG_CTRL
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
+//RLC_SERDES_RD_INDEX
+#define RLC_SERDES_RD_INDEX__DATA_REG_ID__SHIFT 0x0
+#define RLC_SERDES_RD_INDEX__SPARE__SHIFT 0x2
+#define RLC_SERDES_RD_INDEX__DATA_REG_ID_MASK 0x00000003L
+#define RLC_SERDES_RD_INDEX__SPARE_MASK 0xFFFFFFFCL
+//RLC_SERDES_RD_DATA_0
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_1
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_2
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_3
+#define RLC_SERDES_RD_DATA_3__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_MASK
+#define RLC_SERDES_MASK__GC_CENTER_HUB_0__SHIFT 0x0
+#define RLC_SERDES_MASK__GC_CENTER_HUB_1__SHIFT 0x1
+#define RLC_SERDES_MASK__RESERVED__SHIFT 0x2
+#define RLC_SERDES_MASK__GC_SE_0__SHIFT 0x10
+#define RLC_SERDES_MASK__GC_SE_1__SHIFT 0x11
+#define RLC_SERDES_MASK__GC_SE_2__SHIFT 0x12
+#define RLC_SERDES_MASK__GC_SE_3__SHIFT 0x13
+#define RLC_SERDES_MASK__GC_SE_4__SHIFT 0x14
+#define RLC_SERDES_MASK__GC_SE_5__SHIFT 0x15
+#define RLC_SERDES_MASK__GC_SE_6__SHIFT 0x16
+#define RLC_SERDES_MASK__GC_SE_7__SHIFT 0x17
+#define RLC_SERDES_MASK__RESERVED_31_24__SHIFT 0x18
+#define RLC_SERDES_MASK__GC_CENTER_HUB_0_MASK 0x00000001L
+#define RLC_SERDES_MASK__GC_CENTER_HUB_1_MASK 0x00000002L
+#define RLC_SERDES_MASK__RESERVED_MASK 0x0000FFFCL
+#define RLC_SERDES_MASK__GC_SE_0_MASK 0x00010000L
+#define RLC_SERDES_MASK__GC_SE_1_MASK 0x00020000L
+#define RLC_SERDES_MASK__GC_SE_2_MASK 0x00040000L
+#define RLC_SERDES_MASK__GC_SE_3_MASK 0x00080000L
+#define RLC_SERDES_MASK__GC_SE_4_MASK 0x00100000L
+#define RLC_SERDES_MASK__GC_SE_5_MASK 0x00200000L
+#define RLC_SERDES_MASK__GC_SE_6_MASK 0x00400000L
+#define RLC_SERDES_MASK__GC_SE_7_MASK 0x00800000L
+#define RLC_SERDES_MASK__RESERVED_31_24_MASK 0xFF000000L
+//RLC_SERDES_CTRL
+#define RLC_SERDES_CTRL__BPM_BROADCAST__SHIFT 0x0
+#define RLC_SERDES_CTRL__BPM_REG_WRITE__SHIFT 0x1
+#define RLC_SERDES_CTRL__BPM_LONG_CMD__SHIFT 0x2
+#define RLC_SERDES_CTRL__BPM_ADDR__SHIFT 0x3
+#define RLC_SERDES_CTRL__REG_ADDR__SHIFT 0x10
+#define RLC_SERDES_CTRL__BPM_BROADCAST_MASK 0x000001L
+#define RLC_SERDES_CTRL__BPM_REG_WRITE_MASK 0x000002L
+#define RLC_SERDES_CTRL__BPM_LONG_CMD_MASK 0x000004L
+#define RLC_SERDES_CTRL__BPM_ADDR_MASK 0x00FFF8L
+#define RLC_SERDES_CTRL__REG_ADDR_MASK 0xFF0000L
+//RLC_SERDES_DATA
+#define RLC_SERDES_DATA__DATA__SHIFT 0x0
+#define RLC_SERDES_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_BUSY
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_0__SHIFT 0x0
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_1__SHIFT 0x1
+#define RLC_SERDES_BUSY__RESERVED__SHIFT 0x2
+#define RLC_SERDES_BUSY__GC_SE_0__SHIFT 0x10
+#define RLC_SERDES_BUSY__GC_SE_1__SHIFT 0x11
+#define RLC_SERDES_BUSY__GC_SE_2__SHIFT 0x12
+#define RLC_SERDES_BUSY__GC_SE_3__SHIFT 0x13
+#define RLC_SERDES_BUSY__GC_SE_4__SHIFT 0x14
+#define RLC_SERDES_BUSY__GC_SE_5__SHIFT 0x15
+#define RLC_SERDES_BUSY__GC_SE_6__SHIFT 0x16
+#define RLC_SERDES_BUSY__GC_SE_7__SHIFT 0x17
+#define RLC_SERDES_BUSY__RESERVED_29_24__SHIFT 0x18
+#define RLC_SERDES_BUSY__RD_FIFO_NOT_EMPTY__SHIFT 0x1e
+#define RLC_SERDES_BUSY__RD_PENDING__SHIFT 0x1f
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_0_MASK 0x00000001L
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_1_MASK 0x00000002L
+#define RLC_SERDES_BUSY__RESERVED_MASK 0x0000FFFCL
+#define RLC_SERDES_BUSY__GC_SE_0_MASK 0x00010000L
+#define RLC_SERDES_BUSY__GC_SE_1_MASK 0x00020000L
+#define RLC_SERDES_BUSY__GC_SE_2_MASK 0x00040000L
+#define RLC_SERDES_BUSY__GC_SE_3_MASK 0x00080000L
+#define RLC_SERDES_BUSY__GC_SE_4_MASK 0x00100000L
+#define RLC_SERDES_BUSY__GC_SE_5_MASK 0x00200000L
+#define RLC_SERDES_BUSY__GC_SE_6_MASK 0x00400000L
+#define RLC_SERDES_BUSY__GC_SE_7_MASK 0x00800000L
+#define RLC_SERDES_BUSY__RESERVED_29_24_MASK 0x3F000000L
+#define RLC_SERDES_BUSY__RD_FIFO_NOT_EMPTY_MASK 0x40000000L
+#define RLC_SERDES_BUSY__RD_PENDING_MASK 0x80000000L
+//RLC_GPM_GENERAL_0
+#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_1
+#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_2
+#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_3
+#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_4
+#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_5
+#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_6
+#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_7
+#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_STATIC_PG_STATUS
+#define RLC_STATIC_PG_STATUS__PG_STATUS_WGP_MASK__SHIFT 0x0
+#define RLC_STATIC_PG_STATUS__PG_STATUS_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_16
+#define RLC_GPM_GENERAL_16__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_16__DATA_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_3
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
+#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
+#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
+//RLC_GPR_REG1
+#define RLC_GPR_REG1__DATA__SHIFT 0x0
+#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPR_REG2
+#define RLC_GPR_REG2__DATA__SHIFT 0x0
+#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_DISABLE_TH0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_INT__SHIFT 0x0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_GPM_LEGACY_INT_DISABLE
+#define RLC_GPM_LEGACY_INT_DISABLE__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_DISABLE__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_DISABLE__STORE_LOAD_TIMER3_EXPIRED_T0__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_DISABLE__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_DISABLE__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_DISABLE__STORE_LOAD_TIMER3_EXPIRED_T0_MASK 0x00000010L
+//RLC_GPM_INT_FORCE_TH0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_INT__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_SRM_CNTL
+#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
+#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
+#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_GPM_COMMAND_STATUS
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_INDEX_CNTL_ADDR_0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_1
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_2
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_3
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_4
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_5
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_6
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_7
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_DATA_0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_1
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_2
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_3
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_4
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_5
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_6
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_7
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_STAT
+#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
+#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
+#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
+#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
+#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
+#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
+//RLC_GPM_GENERAL_8
+#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_9
+#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_10
+#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_11
+#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_12
+#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_CNTL_0
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_1
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_2
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0xC0000000L
+//RLC_SPM_UTCL1_CNTL
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_SPM_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x1e
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_SPM_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
+//RLC_UTCL1_STATUS_2
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
+#define RLC_UTCL1_STATUS_2__RESERVED_1__SHIFT 0x4
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
+#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0x9
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
+#define RLC_UTCL1_STATUS_2__RESERVED_1_MASK 0x00000010L
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
+#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFE00L
+//RLC_SPM_UTCL1_ERROR_1
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_SPM_UTCL1_ERROR_2
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_1
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH0_ERROR_2
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH1_ERROR_1
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH1_ERROR_2
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH2_ERROR_1
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH2_ERROR_2
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_CGCG_CGLS_CTRL_3D
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL_3D
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_SEMAPHORE_0
+#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_1
+#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_2
+#define RLC_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_3
+#define RLC_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_PACE_INT_STAT
+#define RLC_PACE_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_PACE_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_UTCL1_STATUS
+#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
+#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
+//RLC_R2I_CNTL_0
+#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_1
+#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_2
+#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_3
+#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_STAT_TH0
+#define RLC_GPM_INT_STAT_TH0__STATUS__SHIFT 0x0
+#define RLC_GPM_INT_STAT_TH0__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_13
+#define RLC_GPM_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_14
+#define RLC_GPM_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_15
+#define RLC_GPM_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_CLOCK_COUNT_LSB_2
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_2
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_DISABLE
+#define RLC_PACE_INT_DISABLE__DISABLE_INT__SHIFT 0x0
+#define RLC_PACE_INT_DISABLE__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_2
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_DOORBELL_RANGE
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCV_DOORBELL_CNTL
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_RLCV_DOORBELL_STAT
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCV_DOORBELL_0_DATA_LO
+#define RLC_RLCV_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_0_DATA_HI
+#define RLC_RLCV_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_1_DATA_LO
+#define RLC_RLCV_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_1_DATA_HI
+#define RLC_RLCV_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_2_DATA_LO
+#define RLC_RLCV_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_2_DATA_HI
+#define RLC_RLCV_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_3_DATA_LO
+#define RLC_RLCV_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_3_DATA_HI
+#define RLC_RLCV_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_LSB_1
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_1
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_RLCV_SPARE_INT
+#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_FIREWALL_VIOLATION
+#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
+#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_INT_0
+#define RLC_PACE_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_PACE_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_INT_1
+#define RLC_PACE_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_PACE_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_CTRL
+#define RLC_PACE_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_PACE_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_PACE_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x2
+#define RLC_PACE_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x3
+#define RLC_PACE_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x4
+#define RLC_PACE_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x5
+#define RLC_PACE_TIMER_CTRL__RESERVED__SHIFT 0x6
+#define RLC_PACE_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_PACE_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000004L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000008L
+#define RLC_PACE_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00000010L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00000020L
+#define RLC_PACE_TIMER_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_SMU_CLK_REQ
+#define RLC_SMU_CLK_REQ__VALID__SHIFT 0x0
+#define RLC_SMU_CLK_REQ__VALID_MASK 0x00000001L
+//RLC_CP_STAT_INVAL_STAT
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND__SHIFT 0x0
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND__SHIFT 0x1
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND__SHIFT 0x2
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_CHANGED__SHIFT 0x3
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_CHANGED__SHIFT 0x4
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_CHANGED__SHIFT 0x5
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_MASK 0x00000001L
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_MASK 0x00000002L
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_MASK 0x00000004L
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_CHANGED_MASK 0x00000008L
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_CHANGED_MASK 0x00000010L
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_CHANGED_MASK 0x00000020L
+//RLC_CP_STAT_INVAL_CTRL
+#define RLC_CP_STAT_INVAL_CTRL__CPG_STAT_INVAL_PEND_EN__SHIFT 0x0
+#define RLC_CP_STAT_INVAL_CTRL__CPC_STAT_INVAL_PEND_EN__SHIFT 0x1
+#define RLC_CP_STAT_INVAL_CTRL__CPF_STAT_INVAL_PEND_EN__SHIFT 0x2
+#define RLC_CP_STAT_INVAL_CTRL__CPG_STAT_INVAL_PEND_EN_MASK 0x00000001L
+#define RLC_CP_STAT_INVAL_CTRL__CPC_STAT_INVAL_PEND_EN_MASK 0x00000002L
+#define RLC_CP_STAT_INVAL_CTRL__CPF_STAT_INVAL_PEND_EN_MASK 0x00000004L
+//RLC_SPARE
+#define RLC_SPARE__SPARE__SHIFT 0x0
+#define RLC_SPARE__SPARE_MASK 0xFFFFFFFFL
+//RLC_SPP_CTRL
+#define RLC_SPP_CTRL__ENABLE__SHIFT 0x0
+#define RLC_SPP_CTRL__ENABLE_PPROF__SHIFT 0x1
+#define RLC_SPP_CTRL__ENABLE_PWR_OPT__SHIFT 0x2
+#define RLC_SPP_CTRL__PAUSE__SHIFT 0x3
+#define RLC_SPP_CTRL__ENABLE_MASK 0x00000001L
+#define RLC_SPP_CTRL__ENABLE_PPROF_MASK 0x00000002L
+#define RLC_SPP_CTRL__ENABLE_PWR_OPT_MASK 0x00000004L
+#define RLC_SPP_CTRL__PAUSE_MASK 0x00000008L
+//RLC_SPP_SHADER_PROFILE_EN
+#define RLC_SPP_SHADER_PROFILE_EN__PS_ENABLE__SHIFT 0x0
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_1__SHIFT 0x1
+#define RLC_SPP_SHADER_PROFILE_EN__GS_ENABLE__SHIFT 0x2
+#define RLC_SPP_SHADER_PROFILE_EN__HS_ENABLE__SHIFT 0x3
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_ENABLE__SHIFT 0x4
+#define RLC_SPP_SHADER_PROFILE_EN__CS_ENABLE__SHIFT 0x5
+#define RLC_SPP_SHADER_PROFILE_EN__PS_STOP_CONDITION__SHIFT 0x6
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_7__SHIFT 0x7
+#define RLC_SPP_SHADER_PROFILE_EN__GS_STOP_CONDITION__SHIFT 0x8
+#define RLC_SPP_SHADER_PROFILE_EN__HS_STOP_CONDITION__SHIFT 0x9
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_STOP_CONDITION__SHIFT 0xa
+#define RLC_SPP_SHADER_PROFILE_EN__CS_STOP_CONDITION__SHIFT 0xb
+#define RLC_SPP_SHADER_PROFILE_EN__PS_START_CONDITION__SHIFT 0xc
+#define RLC_SPP_SHADER_PROFILE_EN__CS_START_CONDITION__SHIFT 0xd
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_MISS__SHIFT 0xe
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_UNLOCKED__SHIFT 0xf
+#define RLC_SPP_SHADER_PROFILE_EN__ENABLE_PROF_INFO_LOCK__SHIFT 0x10
+#define RLC_SPP_SHADER_PROFILE_EN__PS_ENABLE_MASK 0x00000001L
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_1_MASK 0x00000002L
+#define RLC_SPP_SHADER_PROFILE_EN__GS_ENABLE_MASK 0x00000004L
+#define RLC_SPP_SHADER_PROFILE_EN__HS_ENABLE_MASK 0x00000008L
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_ENABLE_MASK 0x00000010L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_ENABLE_MASK 0x00000020L
+#define RLC_SPP_SHADER_PROFILE_EN__PS_STOP_CONDITION_MASK 0x00000040L
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_7_MASK 0x00000080L
+#define RLC_SPP_SHADER_PROFILE_EN__GS_STOP_CONDITION_MASK 0x00000100L
+#define RLC_SPP_SHADER_PROFILE_EN__HS_STOP_CONDITION_MASK 0x00000200L
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_STOP_CONDITION_MASK 0x00000400L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_STOP_CONDITION_MASK 0x00000800L
+#define RLC_SPP_SHADER_PROFILE_EN__PS_START_CONDITION_MASK 0x00001000L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_START_CONDITION_MASK 0x00002000L
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_MISS_MASK 0x00004000L
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_UNLOCKED_MASK 0x00008000L
+#define RLC_SPP_SHADER_PROFILE_EN__ENABLE_PROF_INFO_LOCK_MASK 0x00010000L
+//RLC_SPP_SSF_CAPTURE_EN
+#define RLC_SPP_SSF_CAPTURE_EN__PS_ENABLE__SHIFT 0x0
+#define RLC_SPP_SSF_CAPTURE_EN__RESERVED_1__SHIFT 0x1
+#define RLC_SPP_SSF_CAPTURE_EN__GS_ENABLE__SHIFT 0x2
+#define RLC_SPP_SSF_CAPTURE_EN__HS_ENABLE__SHIFT 0x3
+#define RLC_SPP_SSF_CAPTURE_EN__CSG_ENABLE__SHIFT 0x4
+#define RLC_SPP_SSF_CAPTURE_EN__CS_ENABLE__SHIFT 0x5
+#define RLC_SPP_SSF_CAPTURE_EN__PS_ENABLE_MASK 0x00000001L
+#define RLC_SPP_SSF_CAPTURE_EN__RESERVED_1_MASK 0x00000002L
+#define RLC_SPP_SSF_CAPTURE_EN__GS_ENABLE_MASK 0x00000004L
+#define RLC_SPP_SSF_CAPTURE_EN__HS_ENABLE_MASK 0x00000008L
+#define RLC_SPP_SSF_CAPTURE_EN__CSG_ENABLE_MASK 0x00000010L
+#define RLC_SPP_SSF_CAPTURE_EN__CS_ENABLE_MASK 0x00000020L
+//RLC_SPP_SSF_THRESHOLD_0
+#define RLC_SPP_SSF_THRESHOLD_0__PS_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_0__RESERVED__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_0__PS_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_0__RESERVED_MASK 0xFFFF0000L
+//RLC_SPP_SSF_THRESHOLD_1
+#define RLC_SPP_SSF_THRESHOLD_1__GS_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_1__HS_THRESHOLD__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_1__GS_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_1__HS_THRESHOLD_MASK 0xFFFF0000L
+//RLC_SPP_SSF_THRESHOLD_2
+#define RLC_SPP_SSF_THRESHOLD_2__CSG_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_2__CS_THRESHOLD__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_2__CSG_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_2__CS_THRESHOLD_MASK 0xFFFF0000L
+//RLC_SPP_INFLIGHT_RD_ADDR
+#define RLC_SPP_INFLIGHT_RD_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_INFLIGHT_RD_ADDR__ADDR_MASK 0x0000001FL
+//RLC_SPP_INFLIGHT_RD_DATA
+#define RLC_SPP_INFLIGHT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_SPP_INFLIGHT_RD_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPP_PROF_INFO_1
+#define RLC_SPP_PROF_INFO_1__SH_ID__SHIFT 0x0
+#define RLC_SPP_PROF_INFO_1__SH_ID_MASK 0xFFFFFFFFL
+//RLC_SPP_PROF_INFO_2
+#define RLC_SPP_PROF_INFO_2__SH_TYPE__SHIFT 0x0
+#define RLC_SPP_PROF_INFO_2__CAM_HIT__SHIFT 0x4
+#define RLC_SPP_PROF_INFO_2__CAM_LOCK__SHIFT 0x5
+#define RLC_SPP_PROF_INFO_2__CAM_CONFLICT__SHIFT 0x6
+#define RLC_SPP_PROF_INFO_2__SH_TYPE_MASK 0x0000000FL
+#define RLC_SPP_PROF_INFO_2__CAM_HIT_MASK 0x00000010L
+#define RLC_SPP_PROF_INFO_2__CAM_LOCK_MASK 0x00000020L
+#define RLC_SPP_PROF_INFO_2__CAM_CONFLICT_MASK 0x00000040L
+//RLC_SPP_GLOBAL_SH_ID
+#define RLC_SPP_GLOBAL_SH_ID__SH_ID__SHIFT 0x0
+#define RLC_SPP_GLOBAL_SH_ID__SH_ID_MASK 0xFFFFFFFFL
+//RLC_SPP_GLOBAL_SH_ID_VALID
+#define RLC_SPP_GLOBAL_SH_ID_VALID__VALID__SHIFT 0x0
+#define RLC_SPP_GLOBAL_SH_ID_VALID__VALID_MASK 0x00000001L
+//RLC_SPP_STATUS
+#define RLC_SPP_STATUS__RESERVED_0__SHIFT 0x0
+#define RLC_SPP_STATUS__SSF_BUSY__SHIFT 0x1
+#define RLC_SPP_STATUS__EVENT_ARB_BUSY__SHIFT 0x2
+#define RLC_SPP_STATUS__SPP_BUSY__SHIFT 0x1f
+#define RLC_SPP_STATUS__RESERVED_0_MASK 0x00000001L
+#define RLC_SPP_STATUS__SSF_BUSY_MASK 0x00000002L
+#define RLC_SPP_STATUS__EVENT_ARB_BUSY_MASK 0x00000004L
+#define RLC_SPP_STATUS__SPP_BUSY_MASK 0x80000000L
+//RLC_SPP_PVT_STAT_0
+#define RLC_SPP_PVT_STAT_0__LEVEL_0_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_0__LEVEL_1_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_0__LEVEL_2_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_0__LEVEL_3_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_0__LEVEL_0_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_0__LEVEL_1_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_0__LEVEL_2_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_0__LEVEL_3_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_1
+#define RLC_SPP_PVT_STAT_1__LEVEL_4_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_1__LEVEL_5_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_1__LEVEL_6_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_1__LEVEL_7_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_1__LEVEL_4_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_1__LEVEL_5_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_1__LEVEL_6_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_1__LEVEL_7_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_2
+#define RLC_SPP_PVT_STAT_2__LEVEL_8_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_2__LEVEL_9_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_2__LEVEL_10_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_2__LEVEL_11_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_2__LEVEL_8_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_2__LEVEL_9_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_2__LEVEL_10_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_2__LEVEL_11_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_3
+#define RLC_SPP_PVT_STAT_3__LEVEL_12_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_3__LEVEL_13_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_3__LEVEL_14_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_3__LEVEL_15_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_3__LEVEL_12_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_3__LEVEL_13_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_3__LEVEL_14_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_3__LEVEL_15_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_LEVEL_MAX
+#define RLC_SPP_PVT_LEVEL_MAX__LEVEL__SHIFT 0x0
+#define RLC_SPP_PVT_LEVEL_MAX__LEVEL_MASK 0x0000000FL
+//RLC_SPP_STALL_STATE_UPDATE
+#define RLC_SPP_STALL_STATE_UPDATE__STALL__SHIFT 0x0
+#define RLC_SPP_STALL_STATE_UPDATE__ENABLE__SHIFT 0x1
+#define RLC_SPP_STALL_STATE_UPDATE__STALL_MASK 0x00000001L
+#define RLC_SPP_STALL_STATE_UPDATE__ENABLE_MASK 0x00000002L
+//RLC_SPP_PBB_INFO
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE__SHIFT 0x0
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_VALID__SHIFT 0x1
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE__SHIFT 0x2
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_VALID__SHIFT 0x3
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_MASK 0x00000001L
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_VALID_MASK 0x00000002L
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_MASK 0x00000004L
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_VALID_MASK 0x00000008L
+//RLC_SPP_RESET
+#define RLC_SPP_RESET__SSF_RESET__SHIFT 0x0
+#define RLC_SPP_RESET__EVENT_ARB_RESET__SHIFT 0x1
+#define RLC_SPP_RESET__CAM_RESET__SHIFT 0x2
+#define RLC_SPP_RESET__PVT_RESET__SHIFT 0x3
+#define RLC_SPP_RESET__SSF_RESET_MASK 0x00000001L
+#define RLC_SPP_RESET__EVENT_ARB_RESET_MASK 0x00000002L
+#define RLC_SPP_RESET__CAM_RESET_MASK 0x00000004L
+#define RLC_SPP_RESET__PVT_RESET_MASK 0x00000008L
+//RLC_RLCP_DOORBELL_RANGE
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCP_DOORBELL_CNTL
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_RLCP_DOORBELL_STAT
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCP_DOORBELL_0_DATA_LO
+#define RLC_RLCP_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_0_DATA_HI
+#define RLC_RLCP_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_1_DATA_LO
+#define RLC_RLCP_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_1_DATA_HI
+#define RLC_RLCP_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_2_DATA_LO
+#define RLC_RLCP_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_2_DATA_HI
+#define RLC_RLCP_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_3_DATA_LO
+#define RLC_RLCP_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_3_DATA_HI
+#define RLC_RLCP_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_CAC_MASK_CNTL
+#define RLC_CAC_MASK_CNTL__RLC_CAC_MASK__SHIFT 0x0
+#define RLC_CAC_MASK_CNTL__RLC_CAC_MASK_MASK 0xFFFFFFFFL
+//RLC_POWER_RESIDENCY_CNTR_CTRL
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CLK_RESIDENCY_CNTR_CTRL
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_DS_RESIDENCY_CNTR_CTRL
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_DS_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_ULV_RESIDENCY_CNTR_CTRL
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_PCC_RESIDENCY_CNTR_CTRL
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__EVENT_SEL__SHIFT 0x5
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x9
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__EVENT_SEL_MASK 0x000001E0L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFE00L
+//RLC_GENERAL_RESIDENCY_CNTR_CTRL
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_POWER_RESIDENCY_EVENT_CNTR
+#define RLC_POWER_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_CLK_RESIDENCY_EVENT_CNTR
+#define RLC_CLK_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_DS_RESIDENCY_EVENT_CNTR
+#define RLC_DS_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_DS_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_ULV_RESIDENCY_EVENT_CNTR
+#define RLC_ULV_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_PCC_RESIDENCY_EVENT_CNTR
+#define RLC_PCC_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GENERAL_RESIDENCY_EVENT_CNTR
+#define RLC_GENERAL_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_POWER_RESIDENCY_REF_CNTR
+#define RLC_POWER_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_CLK_RESIDENCY_REF_CNTR
+#define RLC_CLK_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_DS_RESIDENCY_REF_CNTR
+#define RLC_DS_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_DS_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_ULV_RESIDENCY_REF_CNTR
+#define RLC_ULV_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_PCC_RESIDENCY_REF_CNTR
+#define RLC_PCC_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GENERAL_RESIDENCY_REF_CNTR
+#define RLC_GENERAL_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GFX_IH_CLIENT_CTRL
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_MASK__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_MASK__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_MASK__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_MASK__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_MASK__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_15__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_ERROR_CLEAR__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_ERROR_CLEAR__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_ERROR_CLEAR__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_ERROR_CLEAR__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_ERROR_CLEAR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_31__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_MASK_MASK 0x000000FFL
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_MASK_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_MASK_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_MASK_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_MASK_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_15_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_ERROR_CLEAR_MASK 0x00FF0000L
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_ERROR_CLEAR_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_ERROR_CLEAR_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_ERROR_CLEAR_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_ERROR_CLEAR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_31_MASK 0x80000000L
+//RLC_GFX_IH_ARBITER_STAT
+#define RLC_GFX_IH_ARBITER_STAT__CLIENT_GRANTED__SHIFT 0x0
+#define RLC_GFX_IH_ARBITER_STAT__RESERVED__SHIFT 0x10
+#define RLC_GFX_IH_ARBITER_STAT__LAST_CLIENT_GRANTED__SHIFT 0x1c
+#define RLC_GFX_IH_ARBITER_STAT__CLIENT_GRANTED_MASK 0x0000FFFFL
+#define RLC_GFX_IH_ARBITER_STAT__RESERVED_MASK 0x0FFF0000L
+#define RLC_GFX_IH_ARBITER_STAT__LAST_CLIENT_GRANTED_MASK 0xF0000000L
+//RLC_GFX_IH_CLIENT_SE_STAT_L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_SE_STAT_H
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_SDMA_STAT
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_OTHER_STAT
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__RESERVED_31_24__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__RESERVED_31_24_MASK 0xFF000000L
+//RLC_SPM_GLOBAL_DELAY_IND_ADDR
+#define RLC_SPM_GLOBAL_DELAY_IND_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_DELAY_IND_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_GLOBAL_DELAY_IND_DATA
+#define RLC_SPM_GLOBAL_DELAY_IND_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_GLOBAL_DELAY_IND_DATA__DATA_MASK 0x0000003FL
+//RLC_SPM_SE_DELAY_IND_ADDR
+#define RLC_SPM_SE_DELAY_IND_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_SE_DELAY_IND_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_SE_DELAY_IND_DATA
+#define RLC_SPM_SE_DELAY_IND_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_SE_DELAY_IND_DATA__DATA_MASK 0x0000003FL
+//RLC_LX6_CNTL
+#define RLC_LX6_CNTL__BRESET__SHIFT 0x0
+#define RLC_LX6_CNTL__RUNSTALL__SHIFT 0x1
+#define RLC_LX6_CNTL__PDEBUG_ENABLE__SHIFT 0x2
+#define RLC_LX6_CNTL__STAT_VECTOR_SEL__SHIFT 0x3
+#define RLC_LX6_CNTL__BRESET_MASK 0x00000001L
+#define RLC_LX6_CNTL__RUNSTALL_MASK 0x00000002L
+#define RLC_LX6_CNTL__PDEBUG_ENABLE_MASK 0x00000004L
+#define RLC_LX6_CNTL__STAT_VECTOR_SEL_MASK 0x00000008L
+//RLC_XT_CORE_STATUS
+#define RLC_XT_CORE_STATUS__P_WAIT_MODE__SHIFT 0x0
+#define RLC_XT_CORE_STATUS__P_FATAL_ERROR__SHIFT 0x1
+#define RLC_XT_CORE_STATUS__DOUBLE_EXCEPTION_ERROR__SHIFT 0x2
+#define RLC_XT_CORE_STATUS__P_WAIT_MODE_MASK 0x00000001L
+#define RLC_XT_CORE_STATUS__P_FATAL_ERROR_MASK 0x00000002L
+#define RLC_XT_CORE_STATUS__DOUBLE_EXCEPTION_ERROR_MASK 0x00000004L
+//RLC_XT_CORE_INTERRUPT
+#define RLC_XT_CORE_INTERRUPT__EXTINT1__SHIFT 0x0
+#define RLC_XT_CORE_INTERRUPT__EXTINT2__SHIFT 0x1a
+#define RLC_XT_CORE_INTERRUPT__NMI__SHIFT 0x1b
+#define RLC_XT_CORE_INTERRUPT__EXTINT1_MASK 0x03FFFFFFL
+#define RLC_XT_CORE_INTERRUPT__EXTINT2_MASK 0x04000000L
+#define RLC_XT_CORE_INTERRUPT__NMI_MASK 0x08000000L
+//RLC_XT_CORE_FAULT_INFO
+#define RLC_XT_CORE_FAULT_INFO__FAULT_INFO__SHIFT 0x0
+#define RLC_XT_CORE_FAULT_INFO__FAULT_INFO_MASK 0xFFFFFFFFL
+//RLC_XT_CORE_ALT_RESET_VEC
+#define RLC_XT_CORE_ALT_RESET_VEC__ALT_RESET_VEC__SHIFT 0x0
+#define RLC_XT_CORE_ALT_RESET_VEC__ALT_RESET_VEC_MASK 0xFFFFFFFFL
+//RLC_XT_CORE_RESERVED
+#define RLC_XT_CORE_RESERVED__RESERVED__SHIFT 0x0
+#define RLC_XT_CORE_RESERVED__RESERVED_MASK 0xFFFFFFFFL
+//RLC_XT_INT_VEC_FORCE
+#define RLC_XT_INT_VEC_FORCE__NUM_0__SHIFT 0x0
+#define RLC_XT_INT_VEC_FORCE__NUM_1__SHIFT 0x1
+#define RLC_XT_INT_VEC_FORCE__NUM_2__SHIFT 0x2
+#define RLC_XT_INT_VEC_FORCE__NUM_3__SHIFT 0x3
+#define RLC_XT_INT_VEC_FORCE__NUM_4__SHIFT 0x4
+#define RLC_XT_INT_VEC_FORCE__NUM_5__SHIFT 0x5
+#define RLC_XT_INT_VEC_FORCE__NUM_6__SHIFT 0x6
+#define RLC_XT_INT_VEC_FORCE__NUM_7__SHIFT 0x7
+#define RLC_XT_INT_VEC_FORCE__NUM_8__SHIFT 0x8
+#define RLC_XT_INT_VEC_FORCE__NUM_9__SHIFT 0x9
+#define RLC_XT_INT_VEC_FORCE__NUM_10__SHIFT 0xa
+#define RLC_XT_INT_VEC_FORCE__NUM_11__SHIFT 0xb
+#define RLC_XT_INT_VEC_FORCE__NUM_12__SHIFT 0xc
+#define RLC_XT_INT_VEC_FORCE__NUM_13__SHIFT 0xd
+#define RLC_XT_INT_VEC_FORCE__NUM_14__SHIFT 0xe
+#define RLC_XT_INT_VEC_FORCE__NUM_15__SHIFT 0xf
+#define RLC_XT_INT_VEC_FORCE__NUM_16__SHIFT 0x10
+#define RLC_XT_INT_VEC_FORCE__NUM_17__SHIFT 0x11
+#define RLC_XT_INT_VEC_FORCE__NUM_18__SHIFT 0x12
+#define RLC_XT_INT_VEC_FORCE__NUM_19__SHIFT 0x13
+#define RLC_XT_INT_VEC_FORCE__NUM_20__SHIFT 0x14
+#define RLC_XT_INT_VEC_FORCE__NUM_21__SHIFT 0x15
+#define RLC_XT_INT_VEC_FORCE__NUM_22__SHIFT 0x16
+#define RLC_XT_INT_VEC_FORCE__NUM_23__SHIFT 0x17
+#define RLC_XT_INT_VEC_FORCE__NUM_24__SHIFT 0x18
+#define RLC_XT_INT_VEC_FORCE__NUM_25__SHIFT 0x19
+#define RLC_XT_INT_VEC_FORCE__NUM_0_MASK 0x00000001L
+#define RLC_XT_INT_VEC_FORCE__NUM_1_MASK 0x00000002L
+#define RLC_XT_INT_VEC_FORCE__NUM_2_MASK 0x00000004L
+#define RLC_XT_INT_VEC_FORCE__NUM_3_MASK 0x00000008L
+#define RLC_XT_INT_VEC_FORCE__NUM_4_MASK 0x00000010L
+#define RLC_XT_INT_VEC_FORCE__NUM_5_MASK 0x00000020L
+#define RLC_XT_INT_VEC_FORCE__NUM_6_MASK 0x00000040L
+#define RLC_XT_INT_VEC_FORCE__NUM_7_MASK 0x00000080L
+#define RLC_XT_INT_VEC_FORCE__NUM_8_MASK 0x00000100L
+#define RLC_XT_INT_VEC_FORCE__NUM_9_MASK 0x00000200L
+#define RLC_XT_INT_VEC_FORCE__NUM_10_MASK 0x00000400L
+#define RLC_XT_INT_VEC_FORCE__NUM_11_MASK 0x00000800L
+#define RLC_XT_INT_VEC_FORCE__NUM_12_MASK 0x00001000L
+#define RLC_XT_INT_VEC_FORCE__NUM_13_MASK 0x00002000L
+#define RLC_XT_INT_VEC_FORCE__NUM_14_MASK 0x00004000L
+#define RLC_XT_INT_VEC_FORCE__NUM_15_MASK 0x00008000L
+#define RLC_XT_INT_VEC_FORCE__NUM_16_MASK 0x00010000L
+#define RLC_XT_INT_VEC_FORCE__NUM_17_MASK 0x00020000L
+#define RLC_XT_INT_VEC_FORCE__NUM_18_MASK 0x00040000L
+#define RLC_XT_INT_VEC_FORCE__NUM_19_MASK 0x00080000L
+#define RLC_XT_INT_VEC_FORCE__NUM_20_MASK 0x00100000L
+#define RLC_XT_INT_VEC_FORCE__NUM_21_MASK 0x00200000L
+#define RLC_XT_INT_VEC_FORCE__NUM_22_MASK 0x00400000L
+#define RLC_XT_INT_VEC_FORCE__NUM_23_MASK 0x00800000L
+#define RLC_XT_INT_VEC_FORCE__NUM_24_MASK 0x01000000L
+#define RLC_XT_INT_VEC_FORCE__NUM_25_MASK 0x02000000L
+//RLC_XT_INT_VEC_CLEAR
+#define RLC_XT_INT_VEC_CLEAR__NUM_0__SHIFT 0x0
+#define RLC_XT_INT_VEC_CLEAR__NUM_1__SHIFT 0x1
+#define RLC_XT_INT_VEC_CLEAR__NUM_2__SHIFT 0x2
+#define RLC_XT_INT_VEC_CLEAR__NUM_3__SHIFT 0x3
+#define RLC_XT_INT_VEC_CLEAR__NUM_4__SHIFT 0x4
+#define RLC_XT_INT_VEC_CLEAR__NUM_5__SHIFT 0x5
+#define RLC_XT_INT_VEC_CLEAR__NUM_6__SHIFT 0x6
+#define RLC_XT_INT_VEC_CLEAR__NUM_7__SHIFT 0x7
+#define RLC_XT_INT_VEC_CLEAR__NUM_8__SHIFT 0x8
+#define RLC_XT_INT_VEC_CLEAR__NUM_9__SHIFT 0x9
+#define RLC_XT_INT_VEC_CLEAR__NUM_10__SHIFT 0xa
+#define RLC_XT_INT_VEC_CLEAR__NUM_11__SHIFT 0xb
+#define RLC_XT_INT_VEC_CLEAR__NUM_12__SHIFT 0xc
+#define RLC_XT_INT_VEC_CLEAR__NUM_13__SHIFT 0xd
+#define RLC_XT_INT_VEC_CLEAR__NUM_14__SHIFT 0xe
+#define RLC_XT_INT_VEC_CLEAR__NUM_15__SHIFT 0xf
+#define RLC_XT_INT_VEC_CLEAR__NUM_16__SHIFT 0x10
+#define RLC_XT_INT_VEC_CLEAR__NUM_17__SHIFT 0x11
+#define RLC_XT_INT_VEC_CLEAR__NUM_18__SHIFT 0x12
+#define RLC_XT_INT_VEC_CLEAR__NUM_19__SHIFT 0x13
+#define RLC_XT_INT_VEC_CLEAR__NUM_20__SHIFT 0x14
+#define RLC_XT_INT_VEC_CLEAR__NUM_21__SHIFT 0x15
+#define RLC_XT_INT_VEC_CLEAR__NUM_22__SHIFT 0x16
+#define RLC_XT_INT_VEC_CLEAR__NUM_23__SHIFT 0x17
+#define RLC_XT_INT_VEC_CLEAR__NUM_24__SHIFT 0x18
+#define RLC_XT_INT_VEC_CLEAR__NUM_25__SHIFT 0x19
+#define RLC_XT_INT_VEC_CLEAR__NUM_0_MASK 0x00000001L
+#define RLC_XT_INT_VEC_CLEAR__NUM_1_MASK 0x00000002L
+#define RLC_XT_INT_VEC_CLEAR__NUM_2_MASK 0x00000004L
+#define RLC_XT_INT_VEC_CLEAR__NUM_3_MASK 0x00000008L
+#define RLC_XT_INT_VEC_CLEAR__NUM_4_MASK 0x00000010L
+#define RLC_XT_INT_VEC_CLEAR__NUM_5_MASK 0x00000020L
+#define RLC_XT_INT_VEC_CLEAR__NUM_6_MASK 0x00000040L
+#define RLC_XT_INT_VEC_CLEAR__NUM_7_MASK 0x00000080L
+#define RLC_XT_INT_VEC_CLEAR__NUM_8_MASK 0x00000100L
+#define RLC_XT_INT_VEC_CLEAR__NUM_9_MASK 0x00000200L
+#define RLC_XT_INT_VEC_CLEAR__NUM_10_MASK 0x00000400L
+#define RLC_XT_INT_VEC_CLEAR__NUM_11_MASK 0x00000800L
+#define RLC_XT_INT_VEC_CLEAR__NUM_12_MASK 0x00001000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_13_MASK 0x00002000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_14_MASK 0x00004000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_15_MASK 0x00008000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_16_MASK 0x00010000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_17_MASK 0x00020000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_18_MASK 0x00040000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_19_MASK 0x00080000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_20_MASK 0x00100000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_21_MASK 0x00200000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_22_MASK 0x00400000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_23_MASK 0x00800000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_24_MASK 0x01000000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_25_MASK 0x02000000L
+//RLC_XT_INT_VEC_MUX_SEL
+#define RLC_XT_INT_VEC_MUX_SEL__MUX_SEL__SHIFT 0x0
+#define RLC_XT_INT_VEC_MUX_SEL__MUX_SEL_MASK 0x0000001FL
+//RLC_XT_INT_VEC_MUX_INT_SEL
+#define RLC_XT_INT_VEC_MUX_INT_SEL__INT_SEL__SHIFT 0x0
+#define RLC_XT_INT_VEC_MUX_INT_SEL__INT_SEL_MASK 0x0000003FL
+//RLC_GPU_CLOCK_COUNT_SPM_LSB
+#define RLC_GPU_CLOCK_COUNT_SPM_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_SPM_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_SPM_MSB
+#define RLC_GPU_CLOCK_COUNT_SPM_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_SPM_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_SPM_THREAD_TRACE_CTRL
+#define RLC_SPM_THREAD_TRACE_CTRL__THREAD_TRACE_INT_EN__SHIFT 0x0
+#define RLC_SPM_THREAD_TRACE_CTRL__THREAD_TRACE_INT_EN_MASK 0x00000001L
+//RLC_SPP_CAM_ADDR
+#define RLC_SPP_CAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_CAM_ADDR__ADDR_MASK 0x000000FFL
+//RLC_SPP_CAM_DATA
+#define RLC_SPP_CAM_DATA__DATA__SHIFT 0x0
+#define RLC_SPP_CAM_DATA__TAG__SHIFT 0x8
+#define RLC_SPP_CAM_DATA__DATA_MASK 0x000000FFL
+#define RLC_SPP_CAM_DATA__TAG_MASK 0xFFFFFF00L
+//RLC_SPP_CAM_EXT_ADDR
+#define RLC_SPP_CAM_EXT_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_CAM_EXT_ADDR__ADDR_MASK 0x000000FFL
+//RLC_SPP_CAM_EXT_DATA
+#define RLC_SPP_CAM_EXT_DATA__VALID__SHIFT 0x0
+#define RLC_SPP_CAM_EXT_DATA__LOCK__SHIFT 0x1
+#define RLC_SPP_CAM_EXT_DATA__VALID_MASK 0x00000001L
+#define RLC_SPP_CAM_EXT_DATA__LOCK_MASK 0x00000002L
+//RLC_CPAXI_DOORBELL_MON_CTRL
+#define RLC_CPAXI_DOORBELL_MON_CTRL__EN__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_CTRL__ID__SHIFT 0x1
+#define RLC_CPAXI_DOORBELL_MON_CTRL__EN_MASK 0x00000001L
+#define RLC_CPAXI_DOORBELL_MON_CTRL__ID_MASK 0x0000003EL
+//RLC_CPAXI_DOORBELL_MON_STAT
+#define RLC_CPAXI_DOORBELL_MON_STAT__ID_MATCH__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_STAT__MATCH_CLEAR__SHIFT 0x1
+#define RLC_CPAXI_DOORBELL_MON_STAT__ADDR__SHIFT 0x2
+#define RLC_CPAXI_DOORBELL_MON_STAT__ID_MATCH_MASK 0x00000001L
+#define RLC_CPAXI_DOORBELL_MON_STAT__MATCH_CLEAR_MASK 0x00000002L
+#define RLC_CPAXI_DOORBELL_MON_STAT__ADDR_MASK 0x0FFFFFFCL
+//RLC_CPAXI_DOORBELL_MON_DATA_LSB
+#define RLC_CPAXI_DOORBELL_MON_DATA_LSB__DATA__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_DATA_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_CPAXI_DOORBELL_MON_DATA_MSB
+#define RLC_CPAXI_DOORBELL_MON_DATA_MSB__DATA__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_DATA_MSB__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_RANGE
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_XT_DOORBELL_CNTL
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_XT_DOORBELL_STAT
+#define RLC_XT_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_XT_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_XT_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_XT_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_XT_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_XT_DOORBELL_0_DATA_LO
+#define RLC_XT_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_0_DATA_HI
+#define RLC_XT_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_1_DATA_LO
+#define RLC_XT_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_1_DATA_HI
+#define RLC_XT_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_2_DATA_LO
+#define RLC_XT_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_2_DATA_HI
+#define RLC_XT_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_3_DATA_LO
+#define RLC_XT_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_3_DATA_HI
+#define RLC_XT_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_MEM_SLP_CNTL
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_LS_OVERRIDE__SHIFT 0x2
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_DS_OVERRIDE__SHIFT 0x3
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_LS_OVERRIDE__SHIFT 0x4
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_DS_OVERRIDE__SHIFT 0x5
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x6
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_LS_OVERRIDE__SHIFT 0x18
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_DS_OVERRIDE__SHIFT 0x19
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x1a
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_LS_OVERRIDE_MASK 0x00000004L
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_DS_OVERRIDE_MASK 0x00000008L
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_LS_OVERRIDE_MASK 0x00000010L
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_DS_OVERRIDE_MASK 0x00000020L
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x00000040L
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_LS_OVERRIDE_MASK 0x01000000L
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_DS_OVERRIDE_MASK 0x02000000L
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFC000000L
+//SMU_RLC_RESPONSE
+#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
+#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_RLCV_SAFE_MODE
+#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SMU_SAFE_MODE
+#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCV_COMMAND
+#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
+#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
+#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
+#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
+//RLC_SMU_MESSAGE
+#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_MESSAGE_1
+#define RLC_SMU_MESSAGE_1__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE_1__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_MESSAGE_2
+#define RLC_SMU_MESSAGE_2__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE_2__CMD_MASK 0xFFFFFFFFL
+//RLC_SRM_GPM_COMMAND
+#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
+#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x12
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
+#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0003FFE0L
+#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x7FFC0000L
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_GPM_ABORT
+#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
+#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
+#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
+#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_COMMAND
+#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
+#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_1
+#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_2
+#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_3
+#define RLC_SMU_ARGUMENT_3__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_3__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_4
+#define RLC_SMU_ARGUMENT_4__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_4__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_5
+#define RLC_SMU_ARGUMENT_5__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_5__ARG_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_ADDR_HI
+#define RLC_IMU_BOOTLOAD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_ADDR_LO
+#define RLC_IMU_BOOTLOAD_ADDR_LO__ADDR_LO__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_SIZE
+#define RLC_IMU_BOOTLOAD_SIZE__SIZE__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_SIZE__RESERVED__SHIFT 0x1a
+#define RLC_IMU_BOOTLOAD_SIZE__SIZE_MASK 0x03FFFFFFL
+#define RLC_IMU_BOOTLOAD_SIZE__RESERVED_MASK 0xFC000000L
+//RLC_IMU_MISC
+#define RLC_IMU_MISC__THROTTLE_GFX__SHIFT 0x0
+#define RLC_IMU_MISC__EARLY_MGCG__SHIFT 0x1
+#define RLC_IMU_MISC__RESERVED__SHIFT 0x2
+#define RLC_IMU_MISC__THROTTLE_GFX_MASK 0x00000001L
+#define RLC_IMU_MISC__EARLY_MGCG_MASK 0x00000002L
+#define RLC_IMU_MISC__RESERVED_MASK 0xFFFFFFFCL
+//RLC_IMU_RESET_VECTOR
+#define RLC_IMU_RESET_VECTOR__COLD_BOOT_EXIT__SHIFT 0x0
+#define RLC_IMU_RESET_VECTOR__VDDGFX_EXIT__SHIFT 0x1
+#define RLC_IMU_RESET_VECTOR__VECTOR__SHIFT 0x2
+#define RLC_IMU_RESET_VECTOR__RESERVED__SHIFT 0x8
+#define RLC_IMU_RESET_VECTOR__COLD_BOOT_EXIT_MASK 0x00000001L
+#define RLC_IMU_RESET_VECTOR__VDDGFX_EXIT_MASK 0x00000002L
+#define RLC_IMU_RESET_VECTOR__VECTOR_MASK 0x000000FCL
+#define RLC_IMU_RESET_VECTOR__RESERVED_MASK 0xFFFFFF00L
+
+
+// addressBlock: gc_rlcsdec
+//RLC_RLCS_DEC_START
+//RLC_RLCS_DEC_DUMP_ADDR
+//RLC_RLCS_EXCEPTION_REG_1
+#define RLC_RLCS_EXCEPTION_REG_1__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_1__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_2
+#define RLC_RLCS_EXCEPTION_REG_2__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_2__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_2__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_2__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_3
+#define RLC_RLCS_EXCEPTION_REG_3__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_3__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_3__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_3__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_4
+#define RLC_RLCS_EXCEPTION_REG_4__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_4__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_4__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_4__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_CGCG_REQUEST
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST__SHIFT 0x0
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_3D__SHIFT 0x1
+#define RLC_RLCS_CGCG_REQUEST__RESERVED__SHIFT 0x2
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_MASK 0x00000001L
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_3D_MASK 0x00000002L
+#define RLC_RLCS_CGCG_REQUEST__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_CGCG_STATUS
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS__SHIFT 0x0
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS__SHIFT 0x2
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_3D__SHIFT 0x3
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_3D__SHIFT 0x5
+#define RLC_RLCS_CGCG_STATUS__RESERVED__SHIFT 0x6
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_MASK 0x00000003L
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_MASK 0x00000004L
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_3D_MASK 0x00000018L
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_3D_MASK 0x00000020L
+#define RLC_RLCS_CGCG_STATUS__RESERVED_MASK 0xFFFFFFC0L
+//RLC_RLCS_SOC_DS_CNTL
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_ALLOW__SHIFT 0x0
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x1
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x2
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_GFX_PWR_STALLED_MASK__SHIFT 0x6
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_NON3D_PWR_STALLED_MASK__SHIFT 0x7
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_0_BUSY_MASK__SHIFT 0x10
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_1_BUSY_MASK__SHIFT 0x11
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_2_BUSY_MASK__SHIFT 0x12
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_3_BUSY_MASK__SHIFT 0x13
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_4_BUSY_MASK__SHIFT 0x14
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_5_BUSY_MASK__SHIFT 0x15
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_6_BUSY_MASK__SHIFT 0x16
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_7_BUSY_MASK__SHIFT 0x17
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_ALLOW_MASK 0x00000001L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00000002L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00000004L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_GFX_PWR_STALLED_MASK_MASK 0x00000040L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_NON3D_PWR_STALLED_MASK_MASK 0x00000080L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_0_BUSY_MASK_MASK 0x00010000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_1_BUSY_MASK_MASK 0x00020000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_2_BUSY_MASK_MASK 0x00040000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_3_BUSY_MASK_MASK 0x00080000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_4_BUSY_MASK_MASK 0x00100000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_5_BUSY_MASK_MASK 0x00200000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_6_BUSY_MASK_MASK 0x00400000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_7_BUSY_MASK_MASK 0x00800000L
+//RLC_RLCS_GFX_DS_CNTL
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_ALLOW__SHIFT 0x0
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x1
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x2
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_GFX_PWR_STALLED_MASK__SHIFT 0x6
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_NON3D_PWR_STALLED_MASK__SHIFT 0x7
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_IMU_DISABLE_MASK__SHIFT 0x8
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_0_BUSY_MASK__SHIFT 0x10
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_1_BUSY_MASK__SHIFT 0x11
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_2_BUSY_MASK__SHIFT 0x12
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_3_BUSY_MASK__SHIFT 0x13
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_4_BUSY_MASK__SHIFT 0x14
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_5_BUSY_MASK__SHIFT 0x15
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_6_BUSY_MASK__SHIFT 0x16
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_7_BUSY_MASK__SHIFT 0x17
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_ALLOW_MASK 0x00000001L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000002L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000004L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_GFX_PWR_STALLED_MASK_MASK 0x00000040L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_NON3D_PWR_STALLED_MASK_MASK 0x00000080L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_IMU_DISABLE_MASK_MASK 0x00000100L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_0_BUSY_MASK_MASK 0x00010000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_1_BUSY_MASK_MASK 0x00020000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_2_BUSY_MASK_MASK 0x00040000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_3_BUSY_MASK_MASK 0x00080000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_4_BUSY_MASK_MASK 0x00100000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_5_BUSY_MASK_MASK 0x00200000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_6_BUSY_MASK_MASK 0x00400000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_7_BUSY_MASK_MASK 0x00800000L
+//RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL__SHIFT 0x0
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE0__SHIFT 0x1
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE1__SHIFT 0x2
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE2__SHIFT 0x3
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_MASK 0x00000001L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE0_MASK 0x00000002L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE1_MASK 0x00000004L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE2_MASK 0x00000008L
+//RLC_GPM_STAT
+#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_UP__SHIFT 0xd
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_DOWN__SHIFT 0xe
+#define RLC_GPM_STAT__DYN_WGP_POWERING_UP__SHIFT 0xf
+#define RLC_GPM_STAT__DYN_WGP_POWERING_DOWN__SHIFT 0x10
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_UP_MASK 0x00002000L
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_DOWN_MASK 0x00004000L
+#define RLC_GPM_STAT__DYN_WGP_POWERING_UP_MASK 0x00008000L
+#define RLC_GPM_STAT__DYN_WGP_POWERING_DOWN_MASK 0x00010000L
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_RLCS_GPM_STAT
+#define RLC_RLCS_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_RLCS_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_RLCS_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_RLCS_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_RLCS_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_RLCS_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_RLCS_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_RLCS_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_RLCS_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_RLCS_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_RLCS_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_UP__SHIFT 0xd
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_DOWN__SHIFT 0xe
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_UP__SHIFT 0xf
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_DOWN__SHIFT 0x10
+#define RLC_RLCS_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_RLCS_GPM_STAT__CMP_POWER_STATUS__SHIFT 0x12
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_RLCS_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_RLCS_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_RLCS_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_RLCS_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_RLCS_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_RLCS_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_RLCS_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_RLCS_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_RLCS_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_RLCS_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_RLCS_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_RLCS_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_RLCS_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_RLCS_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_RLCS_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_UP_MASK 0x00002000L
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_DOWN_MASK 0x00004000L
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_UP_MASK 0x00008000L
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_DOWN_MASK 0x00010000L
+#define RLC_RLCS_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_RLCS_GPM_STAT__CMP_POWER_STATUS_MASK 0x00040000L
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_RLCS_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_RLCS_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_RLCS_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_RLCS_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_RLCS_ABORTED_PD_SEQUENCE
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__APS__SHIFT 0x0
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__RESERVED__SHIFT 0x10
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__APS_MASK 0x0000FFFFL
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_DIDT_FORCE_STALL
+#define RLC_RLCS_DIDT_FORCE_STALL__DFS__SHIFT 0x0
+#define RLC_RLCS_DIDT_FORCE_STALL__VALID__SHIFT 0x3
+#define RLC_RLCS_DIDT_FORCE_STALL__RESERVED__SHIFT 0x4
+#define RLC_RLCS_DIDT_FORCE_STALL__DFS_MASK 0x00000007L
+#define RLC_RLCS_DIDT_FORCE_STALL__VALID_MASK 0x00000008L
+#define RLC_RLCS_DIDT_FORCE_STALL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_IOV_CMD_STATUS
+#define RLC_RLCS_IOV_CMD_STATUS__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_CMD_STATUS__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IOV_CNTX_LOC_SIZE
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__RESERVED__SHIFT 0x8
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__DATA_MASK 0x000000FFL
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__RESERVED_MASK 0xFFFFFF00L
+//RLC_RLCS_IOV_SCH_BLOCK
+#define RLC_RLCS_IOV_SCH_BLOCK__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_SCH_BLOCK__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IOV_VM_BUSY_STATUS
+#define RLC_RLCS_IOV_VM_BUSY_STATUS__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_VM_BUSY_STATUS__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GPM_STAT_2
+#define RLC_RLCS_GPM_STAT_2__TC_TRANS_ERROR__SHIFT 0x0
+#define RLC_RLCS_GPM_STAT_2__RLC_PWR_NON3D_STALLED__SHIFT 0x1
+#define RLC_RLCS_GPM_STAT_2__GFX_PWR_STALLED_STATUS__SHIFT 0x2
+#define RLC_RLCS_GPM_STAT_2__GFX_ULV_STATUS__SHIFT 0x3
+#define RLC_RLCS_GPM_STAT_2__GFX_GENERAL_STATUS__SHIFT 0x4
+#define RLC_RLCS_GPM_STAT_2__RESERVED__SHIFT 0x5
+#define RLC_RLCS_GPM_STAT_2__TC_TRANS_ERROR_MASK 0x00000001L
+#define RLC_RLCS_GPM_STAT_2__RLC_PWR_NON3D_STALLED_MASK 0x00000002L
+#define RLC_RLCS_GPM_STAT_2__GFX_PWR_STALLED_STATUS_MASK 0x00000004L
+#define RLC_RLCS_GPM_STAT_2__GFX_ULV_STATUS_MASK 0x00000008L
+#define RLC_RLCS_GPM_STAT_2__GFX_GENERAL_STATUS_MASK 0x00000010L
+#define RLC_RLCS_GPM_STAT_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_GRBM_SOFT_RESET
+#define RLC_RLCS_GRBM_SOFT_RESET__RESET__SHIFT 0x0
+#define RLC_RLCS_GRBM_SOFT_RESET__RESERVED__SHIFT 0x1
+#define RLC_RLCS_GRBM_SOFT_RESET__RESET_MASK 0x00000001L
+#define RLC_RLCS_GRBM_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_PG_CHANGE_STATUS
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_CNTL_CHANGED__SHIFT 0x0
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_REG_CHANGED__SHIFT 0x1
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_REQ_CHANGED__SHIFT 0x3
+#define RLC_RLCS_PG_CHANGE_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_CNTL_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_REG_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_REQ_CHANGED_MASK 0x00000008L
+#define RLC_RLCS_PG_CHANGE_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_PG_CHANGE_READ
+#define RLC_RLCS_PG_CHANGE_READ__RESERVED__SHIFT 0x0
+#define RLC_RLCS_PG_CHANGE_READ__PG_REG_CHANGED__SHIFT 0x1
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_REQ_CHANGED__SHIFT 0x3
+#define RLC_RLCS_PG_CHANGE_READ__RESERVED_MASK 0x00000001L
+#define RLC_RLCS_PG_CHANGE_READ__PG_REG_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_REQ_CHANGED_MASK 0x00000008L
+//RLC_RLCS_IH_SEMAPHORE
+#define RLC_RLCS_IH_SEMAPHORE__CLIENT_ID__SHIFT 0x0
+#define RLC_RLCS_IH_SEMAPHORE__RESERVED__SHIFT 0x5
+#define RLC_RLCS_IH_SEMAPHORE__CLIENT_ID_MASK 0x0000001FL
+#define RLC_RLCS_IH_SEMAPHORE__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_IH_COOKIE_SEMAPHORE
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__CLIENT_ID__SHIFT 0x0
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__RESERVED__SHIFT 0x5
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__CLIENT_ID_MASK 0x0000001FL
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_WGP_STATUS
+#define RLC_RLCS_WGP_STATUS__CS_WORK_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_WGP_STATUS__STATIC_WGP_STATUS_CHANGED__SHIFT 0x1
+#define RLC_RLCS_WGP_STATUS__DYMANIC_WGP_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_WGP_STATUS__STATIC_PERWGP_PD_INCOMPLETE__SHIFT 0x3
+#define RLC_RLCS_WGP_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_WGP_STATUS__CS_WORK_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_WGP_STATUS__STATIC_WGP_STATUS_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_WGP_STATUS__DYMANIC_WGP_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_WGP_STATUS__STATIC_PERWGP_PD_INCOMPLETE_MASK 0x00000008L
+#define RLC_RLCS_WGP_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_WGP_READ
+#define RLC_RLCS_WGP_READ__CS_WORK_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_WGP_READ__STATIC_WGP_STATUS_CHANGED__SHIFT 0x1
+#define RLC_RLCS_WGP_READ__DYMANIC_WGP_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_WGP_READ__RESERVED__SHIFT 0x3
+#define RLC_RLCS_WGP_READ__CS_WORK_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_WGP_READ__STATIC_WGP_STATUS_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_WGP_READ__DYMANIC_WGP_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_WGP_READ__RESERVED_MASK 0xFFFFFFF8L
+//RLC_RLCS_CP_INT_CTRL_1
+#define RLC_RLCS_CP_INT_CTRL_1__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_CP_INT_CTRL_1__RESERVED__SHIFT 0x1
+#define RLC_RLCS_CP_INT_CTRL_1__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_CP_INT_CTRL_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_CP_INT_CTRL_2
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_EN__SHIFT 0x0
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_EN__SHIFT 0x1
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_ACTIVE__SHIFT 0x2
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_ACTIVE__SHIFT 0x3
+#define RLC_RLCS_CP_INT_CTRL_2__INTERRUPT_PENDING__SHIFT 0x4
+#define RLC_RLCS_CP_INT_CTRL_2__RESERVED__SHIFT 0x5
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_EN_MASK 0x00000001L
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_EN_MASK 0x00000002L
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_ACTIVE_MASK 0x00000004L
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_ACTIVE_MASK 0x00000008L
+#define RLC_RLCS_CP_INT_CTRL_2__INTERRUPT_PENDING_MASK 0x00000010L
+#define RLC_RLCS_CP_INT_CTRL_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_CP_INT_INFO_1
+#define RLC_RLCS_CP_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_RLCS_CP_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_CP_INT_INFO_2
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_CP_INT_INFO_2__RESERVED__SHIFT 0x19
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_ID_MASK 0x01FF0000L
+#define RLC_RLCS_CP_INT_INFO_2__RESERVED_MASK 0xFE000000L
+//RLC_RLCS_SPM_INT_CTRL
+#define RLC_RLCS_SPM_INT_CTRL__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_CTRL__RESERVED__SHIFT 0x1
+#define RLC_RLCS_SPM_INT_CTRL__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_SPM_INT_CTRL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_SPM_INT_INFO_1
+#define RLC_RLCS_SPM_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_SPM_INT_INFO_2
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_SPM_INT_INFO_2__RESERVED__SHIFT 0x19
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_ID_MASK 0x01FF0000L
+#define RLC_RLCS_SPM_INT_INFO_2__RESERVED_MASK 0xFE000000L
+//RLC_RLCS_DSM_TRIG
+#define RLC_RLCS_DSM_TRIG__START__SHIFT 0x0
+#define RLC_RLCS_DSM_TRIG__RESERVED__SHIFT 0x1
+#define RLC_RLCS_DSM_TRIG__START_MASK 0x00000001L
+#define RLC_RLCS_DSM_TRIG__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_BOOTLOAD_STATUS
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_INIT_DONE__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_DONE__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_DONE__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_STATUS__RESERVED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_STATUS__BOOTLOAD_COMPLETE__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_INIT_DONE_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_DONE_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_DONE_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_STATUS__RESERVED_MASK 0x7FFFFFE0L
+#define RLC_RLCS_BOOTLOAD_STATUS__BOOTLOAD_COMPLETE_MASK 0x80000000L
+//RLC_RLCS_POWER_BRAKE_CNTL
+#define RLC_RLCS_POWER_BRAKE_CNTL__POWER_BRAKE__SHIFT 0x0
+#define RLC_RLCS_POWER_BRAKE_CNTL__INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_POWER_BRAKE_CNTL__MAX_HYSTERESIS__SHIFT 0x2
+#define RLC_RLCS_POWER_BRAKE_CNTL__HYSTERESIS_CNT__SHIFT 0xa
+#define RLC_RLCS_POWER_BRAKE_CNTL__RESERVED__SHIFT 0x12
+#define RLC_RLCS_POWER_BRAKE_CNTL__POWER_BRAKE_MASK 0x00000001L
+#define RLC_RLCS_POWER_BRAKE_CNTL__INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_POWER_BRAKE_CNTL__MAX_HYSTERESIS_MASK 0x000003FCL
+#define RLC_RLCS_POWER_BRAKE_CNTL__HYSTERESIS_CNT_MASK 0x0003FC00L
+#define RLC_RLCS_POWER_BRAKE_CNTL__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_POWER_BRAKE_CNTL_TH1
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__POWER_BRAKE__SHIFT 0x0
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__MAX_HYSTERESIS__SHIFT 0x2
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__HYSTERESIS_CNT__SHIFT 0xa
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__POWER_BRAKE_MASK 0x00000001L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__MAX_HYSTERESIS_MASK 0x000003FCL
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__HYSTERESIS_CNT_MASK 0x0003FC00L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_GRBM_IDLE_BUSY_STAT
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__GRBM_RLC_GC_STAT_IDLE__SHIFT 0x0
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY__SHIFT 0x10
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY__SHIFT 0x11
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY__SHIFT 0x12
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY__SHIFT 0x13
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY__SHIFT 0x14
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY__SHIFT 0x15
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY__SHIFT 0x16
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY__SHIFT 0x17
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_CHANGED__SHIFT 0x18
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_CHANGED__SHIFT 0x19
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_CHANGED__SHIFT 0x1a
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_CHANGED__SHIFT 0x1b
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_CHANGED__SHIFT 0x1c
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_CHANGED__SHIFT 0x1d
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_CHANGED__SHIFT 0x1e
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_CHANGED__SHIFT 0x1f
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__GRBM_RLC_GC_STAT_IDLE_MASK 0x00000003L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_MASK 0x00010000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_MASK 0x00020000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_MASK 0x00040000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_MASK 0x00080000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_MASK 0x00100000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_MASK 0x00200000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_MASK 0x00400000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_MASK 0x00800000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_CHANGED_MASK 0x01000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_CHANGED_MASK 0x02000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_CHANGED_MASK 0x04000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_CHANGED_MASK 0x08000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_CHANGED_MASK 0x10000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_CHANGED_MASK 0x20000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_CHANGED_MASK 0x40000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_CHANGED_MASK 0x80000000L
+//RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA0_BUSY_INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA1_BUSY_INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA2_BUSY_INT_CLEAR__SHIFT 0x2
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA3_BUSY_INT_CLEAR__SHIFT 0x3
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA4_BUSY_INT_CLEAR__SHIFT 0x4
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA5_BUSY_INT_CLEAR__SHIFT 0x5
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA6_BUSY_INT_CLEAR__SHIFT 0x6
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA7_BUSY_INT_CLEAR__SHIFT 0x7
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA0_BUSY_INT_CLEAR_MASK 0x00000001L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA1_BUSY_INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA2_BUSY_INT_CLEAR_MASK 0x00000004L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA3_BUSY_INT_CLEAR_MASK 0x00000008L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA4_BUSY_INT_CLEAR_MASK 0x00000010L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA5_BUSY_INT_CLEAR_MASK 0x00000020L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA6_BUSY_INT_CLEAR_MASK 0x00000040L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA7_BUSY_INT_CLEAR_MASK 0x00000080L
+//RLC_RLCS_CMP_IDLE_CNTL
+#define RLC_RLCS_CMP_IDLE_CNTL__INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_HYST__SHIFT 0x1
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE__SHIFT 0x2
+#define RLC_RLCS_CMP_IDLE_CNTL__MAX_HYSTERESIS__SHIFT 0x3
+#define RLC_RLCS_CMP_IDLE_CNTL__HYSTERESIS_CNT__SHIFT 0xb
+#define RLC_RLCS_CMP_IDLE_CNTL__RESERVED__SHIFT 0x13
+#define RLC_RLCS_CMP_IDLE_CNTL__INT_CLEAR_MASK 0x00000001L
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_HYST_MASK 0x00000002L
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_MASK 0x00000004L
+#define RLC_RLCS_CMP_IDLE_CNTL__MAX_HYSTERESIS_MASK 0x000007F8L
+#define RLC_RLCS_CMP_IDLE_CNTL__HYSTERESIS_CNT_MASK 0x0007F800L
+#define RLC_RLCS_CMP_IDLE_CNTL__RESERVED_MASK 0xFFF80000L
+//RLC_RLCS_GENERAL_0
+#define RLC_RLCS_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_1
+#define RLC_RLCS_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_2
+#define RLC_RLCS_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_3
+#define RLC_RLCS_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_4
+#define RLC_RLCS_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_5
+#define RLC_RLCS_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_6
+#define RLC_RLCS_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_7
+#define RLC_RLCS_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_8
+#define RLC_RLCS_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_9
+#define RLC_RLCS_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_10
+#define RLC_RLCS_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_11
+#define RLC_RLCS_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_12
+#define RLC_RLCS_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_13
+#define RLC_RLCS_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_14
+#define RLC_RLCS_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_15
+#define RLC_RLCS_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_16
+#define RLC_RLCS_GENERAL_16__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_16__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_AUXILIARY_REG_1
+#define RLC_RLCS_AUXILIARY_REG_1__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_1__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_2
+#define RLC_RLCS_AUXILIARY_REG_2__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_2__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_2__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_2__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_3
+#define RLC_RLCS_AUXILIARY_REG_3__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_3__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_3__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_3__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_4
+#define RLC_RLCS_AUXILIARY_REG_4__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_4__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_4__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_4__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_SPM_SQTT_MODE
+#define RLC_RLCS_SPM_SQTT_MODE__MODE__SHIFT 0x0
+#define RLC_RLCS_SPM_SQTT_MODE__MODE_MASK 0x00000001L
+//RLC_RLCS_CP_DMA_SRCID_OVER
+#define RLC_RLCS_CP_DMA_SRCID_OVER__SRCID_OVERRIDE__SHIFT 0x0
+#define RLC_RLCS_CP_DMA_SRCID_OVER__SRCID_OVERRIDE_MASK 0x00000001L
+//RLC_RLCS_BOOTLOAD_ID_STATUS1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_0_LOADED__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_1_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_2_LOADED__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_3_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_4_LOADED__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_5_LOADED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_6_LOADED__SHIFT 0x6
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_7_LOADED__SHIFT 0x7
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_8_LOADED__SHIFT 0x8
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_9_LOADED__SHIFT 0x9
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_10_LOADED__SHIFT 0xa
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_11_LOADED__SHIFT 0xb
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_12_LOADED__SHIFT 0xc
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_13_LOADED__SHIFT 0xd
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_14_LOADED__SHIFT 0xe
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_15_LOADED__SHIFT 0xf
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_16_LOADED__SHIFT 0x10
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_17_LOADED__SHIFT 0x11
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_18_LOADED__SHIFT 0x12
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_19_LOADED__SHIFT 0x13
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_20_LOADED__SHIFT 0x14
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_21_LOADED__SHIFT 0x15
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_22_LOADED__SHIFT 0x16
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_23_LOADED__SHIFT 0x17
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_24_LOADED__SHIFT 0x18
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_25_LOADED__SHIFT 0x19
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_26_LOADED__SHIFT 0x1a
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_27_LOADED__SHIFT 0x1b
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_28_LOADED__SHIFT 0x1c
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_29_LOADED__SHIFT 0x1d
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_30_LOADED__SHIFT 0x1e
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_31_LOADED__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_0_LOADED_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_1_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_2_LOADED_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_3_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_4_LOADED_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_5_LOADED_MASK 0x00000020L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_6_LOADED_MASK 0x00000040L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_7_LOADED_MASK 0x00000080L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_8_LOADED_MASK 0x00000100L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_9_LOADED_MASK 0x00000200L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_10_LOADED_MASK 0x00000400L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_11_LOADED_MASK 0x00000800L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_12_LOADED_MASK 0x00001000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_13_LOADED_MASK 0x00002000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_14_LOADED_MASK 0x00004000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_15_LOADED_MASK 0x00008000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_16_LOADED_MASK 0x00010000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_17_LOADED_MASK 0x00020000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_18_LOADED_MASK 0x00040000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_19_LOADED_MASK 0x00080000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_20_LOADED_MASK 0x00100000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_21_LOADED_MASK 0x00200000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_22_LOADED_MASK 0x00400000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_23_LOADED_MASK 0x00800000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_24_LOADED_MASK 0x01000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_25_LOADED_MASK 0x02000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_26_LOADED_MASK 0x04000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_27_LOADED_MASK 0x08000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_28_LOADED_MASK 0x10000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_29_LOADED_MASK 0x20000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_30_LOADED_MASK 0x40000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_31_LOADED_MASK 0x80000000L
+//RLC_RLCS_BOOTLOAD_ID_STATUS2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_32_LOADED__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_33_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_34_LOADED__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_35_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_36_LOADED__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_37_LOADED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_38_LOADED__SHIFT 0x6
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_39_LOADED__SHIFT 0x7
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_40_LOADED__SHIFT 0x8
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_41_LOADED__SHIFT 0x9
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_42_LOADED__SHIFT 0xa
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_43_LOADED__SHIFT 0xb
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_44_LOADED__SHIFT 0xc
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_45_LOADED__SHIFT 0xd
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_46_LOADED__SHIFT 0xe
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_47_LOADED__SHIFT 0xf
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_48_LOADED__SHIFT 0x10
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_49_LOADED__SHIFT 0x11
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_50_LOADED__SHIFT 0x12
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_51_LOADED__SHIFT 0x13
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_52_LOADED__SHIFT 0x14
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_53_LOADED__SHIFT 0x15
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_54_LOADED__SHIFT 0x16
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_55_LOADED__SHIFT 0x17
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_56_LOADED__SHIFT 0x18
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_57_LOADED__SHIFT 0x19
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_58_LOADED__SHIFT 0x1a
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_59_LOADED__SHIFT 0x1b
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_60_LOADED__SHIFT 0x1c
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_61_LOADED__SHIFT 0x1d
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_62_LOADED__SHIFT 0x1e
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_63_LOADED__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_32_LOADED_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_33_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_34_LOADED_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_35_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_36_LOADED_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_37_LOADED_MASK 0x00000020L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_38_LOADED_MASK 0x00000040L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_39_LOADED_MASK 0x00000080L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_40_LOADED_MASK 0x00000100L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_41_LOADED_MASK 0x00000200L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_42_LOADED_MASK 0x00000400L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_43_LOADED_MASK 0x00000800L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_44_LOADED_MASK 0x00001000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_45_LOADED_MASK 0x00002000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_46_LOADED_MASK 0x00004000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_47_LOADED_MASK 0x00008000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_48_LOADED_MASK 0x00010000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_49_LOADED_MASK 0x00020000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_50_LOADED_MASK 0x00040000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_51_LOADED_MASK 0x00080000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_52_LOADED_MASK 0x00100000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_53_LOADED_MASK 0x00200000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_54_LOADED_MASK 0x00400000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_55_LOADED_MASK 0x00800000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_56_LOADED_MASK 0x01000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_57_LOADED_MASK 0x02000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_58_LOADED_MASK 0x04000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_59_LOADED_MASK 0x08000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_60_LOADED_MASK 0x10000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_61_LOADED_MASK 0x20000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_62_LOADED_MASK 0x40000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_63_LOADED_MASK 0x80000000L
+//RLC_RLCS_IMU_VIDCHG_CNTL
+#define RLC_RLCS_IMU_VIDCHG_CNTL__REQ__SHIFT 0x0
+#define RLC_RLCS_IMU_VIDCHG_CNTL__DATA__SHIFT 0x1
+#define RLC_RLCS_IMU_VIDCHG_CNTL__PSIEN__SHIFT 0xa
+#define RLC_RLCS_IMU_VIDCHG_CNTL__ACK__SHIFT 0xb
+#define RLC_RLCS_IMU_VIDCHG_CNTL__RESERVED__SHIFT 0xc
+#define RLC_RLCS_IMU_VIDCHG_CNTL__REQ_MASK 0x00000001L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__DATA_MASK 0x000003FEL
+#define RLC_RLCS_IMU_VIDCHG_CNTL__PSIEN_MASK 0x00000400L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__ACK_MASK 0x00000800L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCS_EDC_INT_CNTL
+#define RLC_RLCS_EDC_INT_CNTL__EDC_EVENT_INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_EDC_INT_CNTL__EDC_EVENT_INT_CLEAR_MASK 0x00000001L
+//RLC_RLCS_KMD_LOG_CNTL1
+#define RLC_RLCS_KMD_LOG_CNTL1__DATA__SHIFT 0x0
+#define RLC_RLCS_KMD_LOG_CNTL1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_KMD_LOG_CNTL2
+#define RLC_RLCS_KMD_LOG_CNTL2__DATA__SHIFT 0x0
+#define RLC_RLCS_KMD_LOG_CNTL2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GPM_LEGACY_INT_STAT
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GC_CAC_EDC_EVENT_CHANGED__SHIFT 0x0
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GFX_POWER_BRAKE_CHANGED__SHIFT 0x1
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GC_CAC_EDC_EVENT_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GFX_POWER_BRAKE_CHANGED_MASK 0x00000002L
+//RLC_RLCS_GPM_LEGACY_INT_DISABLE
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GC_CAC_EDC_EVENT_CHANGED__SHIFT 0x0
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GFX_POWER_BRAKE_CHANGED__SHIFT 0x1
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GC_CAC_EDC_EVENT_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GFX_POWER_BRAKE_CHANGED_MASK 0x00000002L
+//RLC_RLCS_SRM_SRCID_CNTL
+#define RLC_RLCS_SRM_SRCID_CNTL__SRCID__SHIFT 0x0
+#define RLC_RLCS_SRM_SRCID_CNTL__SRCID_MASK 0x00000007L
+//RLC_RLCS_GCR_DATA_0
+#define RLC_RLCS_GCR_DATA_0__PHASE_0__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_0__PHASE_1__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_0__PHASE_0_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_0__PHASE_1_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_1
+#define RLC_RLCS_GCR_DATA_1__PHASE_2__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_1__PHASE_3__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_1__PHASE_2_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_1__PHASE_3_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_2
+#define RLC_RLCS_GCR_DATA_2__PHASE_4__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_2__PHASE_5__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_2__PHASE_4_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_2__PHASE_5_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_3
+#define RLC_RLCS_GCR_DATA_3__PHASE_6__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_3__PHASE_7__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_3__PHASE_6_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_3__PHASE_7_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_STATUS
+#define RLC_RLCS_GCR_STATUS__GCR_BUSY__SHIFT 0x0
+#define RLC_RLCS_GCR_STATUS__GCR_OUT_COUNT__SHIFT 0x1
+#define RLC_RLCS_GCR_STATUS__RESERVED_2__SHIFT 0x5
+#define RLC_RLCS_GCR_STATUS__GCRIU_CLI_RSP_TAG__SHIFT 0x8
+#define RLC_RLCS_GCR_STATUS__RESERVED__SHIFT 0x10
+#define RLC_RLCS_GCR_STATUS__GCR_BUSY_MASK 0x00000001L
+#define RLC_RLCS_GCR_STATUS__GCR_OUT_COUNT_MASK 0x0000001EL
+#define RLC_RLCS_GCR_STATUS__RESERVED_2_MASK 0x000000E0L
+#define RLC_RLCS_GCR_STATUS__GCRIU_CLI_RSP_TAG_MASK 0x0000FF00L
+#define RLC_RLCS_GCR_STATUS__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_PERFMON_CLK_CNTL_UCODE
+#define RLC_RLCS_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE__SHIFT 0x0
+#define RLC_RLCS_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE_MASK 0x00000001L
+//RLC_RLCS_UTCL2_CNTL
+#define RLC_RLCS_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE__SHIFT 0x1
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE__SHIFT 0x2
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_VALUE__SHIFT 0x3
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_VALUE__SHIFT 0x5
+#define RLC_RLCS_UTCL2_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x6
+#define RLC_RLCS_UTCL2_CNTL__RESERVED__SHIFT 0x7
+#define RLC_RLCS_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_MASK 0x00000002L
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_MASK 0x00000004L
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_VALUE_MASK 0x00000018L
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_VALUE_MASK 0x00000020L
+#define RLC_RLCS_UTCL2_CNTL__IGNORE_PTE_PERMISSION_MASK 0x00000040L
+#define RLC_RLCS_UTCL2_CNTL__RESERVED_MASK 0xFFFFFF80L
+//RLC_RLCS_IMU_RLC_MSG_DATA0
+#define RLC_RLCS_IMU_RLC_MSG_DATA0__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA1
+#define RLC_RLCS_IMU_RLC_MSG_DATA1__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA2
+#define RLC_RLCS_IMU_RLC_MSG_DATA2__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA3
+#define RLC_RLCS_IMU_RLC_MSG_DATA3__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA3__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA4
+#define RLC_RLCS_IMU_RLC_MSG_DATA4__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA4__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_CONTROL
+#define RLC_RLCS_IMU_RLC_MSG_CONTROL__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_CONTROL__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_CNTL
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__DONETOG__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__CHGTOG__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__DONETOG_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__CHGTOG_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_RLC_IMU_MSG_DATA0
+#define RLC_RLCS_RLC_IMU_MSG_DATA0__DATA__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_DATA0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_RLC_IMU_MSG_CONTROL
+#define RLC_RLCS_RLC_IMU_MSG_CONTROL__DATA__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_CONTROL__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_RLC_IMU_MSG_CNTL
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__CHGTOG__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__DONETOG__SHIFT 0x1
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__CHGTOG_MASK 0x00000001L
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__DONETOG_MASK 0x00000002L
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__CURRENT__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__VOLTAGE__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__CURRENT_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__VOLTAGE_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__TEMPERATURE1__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__TEMPERATURE1_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RLC_MUTEX_CNTL
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__REQ__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__ACQUIRE__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__REQ_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__ACQUIRE_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_RLC_STATUS
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_GFXOFF__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_FA_DCS__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_14_2__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_STATUS__DISABLE_GFXCLK_DS__SHIFT 0xf
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_GFXOFF_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_FA_DCS_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_14_2_MASK 0x00007FFCL
+#define RLC_RLCS_IMU_RLC_STATUS__DISABLE_GFXCLK_DS_MASK 0x00008000L
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_RLC_IMU_STATUS
+#define RLC_RLCS_RLC_IMU_STATUS__PWR_DOWN_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_STATUS__RLC_ALIVE__SHIFT 0x1
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_3_2__SHIFT 0x2
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_RLC_IMU_STATUS__PWR_DOWN_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_RLC_IMU_STATUS__RLC_ALIVE_MASK 0x00000002L
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_3_2_MASK 0x0000000CL
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_IMU_RAM_DATA_1
+#define RLC_RLCS_IMU_RAM_DATA_1__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_1_LSB
+#define RLC_RLCS_IMU_RAM_ADDR_1_LSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_1_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_1_MSB
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__DATA_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RAM_DATA_0
+#define RLC_RLCS_IMU_RAM_DATA_0__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_0_LSB
+#define RLC_RLCS_IMU_RAM_ADDR_0_LSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_0_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_0_MSB
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__DATA_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RAM_CNTL
+#define RLC_RLCS_IMU_RAM_CNTL__REQTOG__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_CNTL__ACKTOG__SHIFT 0x1
+#define RLC_RLCS_IMU_RAM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RAM_CNTL__REQTOG_MASK 0x00000001L
+#define RLC_RLCS_IMU_RAM_CNTL__ACKTOG_MASK 0x00000002L
+#define RLC_RLCS_IMU_RAM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_GFX_DOORBELL_FENCE
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ENABLE__SHIFT 0x0
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ACK__SHIFT 0x1
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ENABLE_MASK 0x00000001L
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ACK_MASK 0x00000002L
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_CNTL_1
+#define RLC_RLCS_SDMA_INT_CNTL_1__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESP_ID__SHIFT 0x1
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESERVED__SHIFT 0x2
+#define RLC_RLCS_SDMA_INT_CNTL_1__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESP_ID_MASK 0x00000002L
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_CNTL_2
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_EN__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_ACTIVE__SHIFT 0x1
+#define RLC_RLCS_SDMA_INT_CNTL_2__RESERVED__SHIFT 0x2
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_EN_MASK 0x00000001L
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_ACTIVE_MASK 0x00000002L
+#define RLC_RLCS_SDMA_INT_CNTL_2__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_STAT
+#define RLC_RLCS_SDMA_INT_STAT__REQ_IDLE_HIST__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_STAT__REQ_BUSY_HIST__SHIFT 0x8
+#define RLC_RLCS_SDMA_INT_STAT__LAST_SDMA_RLC_INT_ID__SHIFT 0x10
+#define RLC_RLCS_SDMA_INT_STAT__SDMA_RLC_INT_PENDING__SHIFT 0x11
+#define RLC_RLCS_SDMA_INT_STAT__RESERVED__SHIFT 0x12
+#define RLC_RLCS_SDMA_INT_STAT__REQ_IDLE_HIST_MASK 0x000000FFL
+#define RLC_RLCS_SDMA_INT_STAT__REQ_BUSY_HIST_MASK 0x0000FF00L
+#define RLC_RLCS_SDMA_INT_STAT__LAST_SDMA_RLC_INT_ID_MASK 0x00010000L
+#define RLC_RLCS_SDMA_INT_STAT__SDMA_RLC_INT_PENDING_MASK 0x00020000L
+#define RLC_RLCS_SDMA_INT_STAT__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_SDMA_INT_INFO
+#define RLC_RLCS_SDMA_INT_INFO__REQ_IDLE_TO_FW__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_INFO__REQ_BUSY_TO_FW__SHIFT 0x8
+#define RLC_RLCS_SDMA_INT_INFO__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_SDMA_INT_INFO__RESERVED__SHIFT 0x11
+#define RLC_RLCS_SDMA_INT_INFO__REQ_IDLE_TO_FW_MASK 0x000000FFL
+#define RLC_RLCS_SDMA_INT_INFO__REQ_BUSY_TO_FW_MASK 0x0000FF00L
+#define RLC_RLCS_SDMA_INT_INFO__INTERRUPT_ID_MASK 0x00010000L
+#define RLC_RLCS_SDMA_INT_INFO__RESERVED_MASK 0xFFFE0000L
+//RLC_RLCS_PMM_CGCG_CNTL
+#define RLC_RLCS_PMM_CGCG_CNTL__VALID__SHIFT 0x0
+#define RLC_RLCS_PMM_CGCG_CNTL__CLEAN__SHIFT 0x1
+#define RLC_RLCS_PMM_CGCG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_PMM_CGCG_CNTL__VALID_MASK 0x00000001L
+#define RLC_RLCS_PMM_CGCG_CNTL__CLEAN_MASK 0x00000002L
+#define RLC_RLCS_PMM_CGCG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_GFX_MEM_POWER_CTRL_LO
+#define RLC_RLCS_GFX_MEM_POWER_CTRL_LO__DATA__SHIFT 0x0
+#define RLC_RLCS_GFX_MEM_POWER_CTRL_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GFX_RM_CNTL
+#define RLC_RLCS_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
+#define RLC_RLCS_GFX_RM_CNTL__RESERVED__SHIFT 0x1
+#define RLC_RLCS_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
+#define RLC_RLCS_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_IH_CTRL_1
+#define RLC_RLCS_IH_CTRL_1__IH_CONTEXT_ID_1__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_1__IH_CONTEXT_ID_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_IH_CTRL_2
+#define RLC_RLCS_IH_CTRL_2__IH_CONTEXT_ID_2__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_2__IH_RING_ID__SHIFT 0x8
+#define RLC_RLCS_IH_CTRL_2__IH_VM_ID__SHIFT 0x10
+#define RLC_RLCS_IH_CTRL_2__RESERVED__SHIFT 0x14
+#define RLC_RLCS_IH_CTRL_2__IH_CONTEXT_ID_2_MASK 0x000000FFL
+#define RLC_RLCS_IH_CTRL_2__IH_RING_ID_MASK 0x0000FF00L
+#define RLC_RLCS_IH_CTRL_2__IH_VM_ID_MASK 0x000F0000L
+#define RLC_RLCS_IH_CTRL_2__RESERVED_MASK 0xFFF00000L
+//RLC_RLCS_IH_CTRL_3
+#define RLC_RLCS_IH_CTRL_3__IH_SOURCE_ID__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_3__IH_VF_ID__SHIFT 0x8
+#define RLC_RLCS_IH_CTRL_3__IH_VF__SHIFT 0xd
+#define RLC_RLCS_IH_CTRL_3__RESERVED__SHIFT 0xe
+#define RLC_RLCS_IH_CTRL_3__IH_SOURCE_ID_MASK 0x000000FFL
+#define RLC_RLCS_IH_CTRL_3__IH_VF_ID_MASK 0x00001F00L
+#define RLC_RLCS_IH_CTRL_3__IH_VF_MASK 0x00002000L
+#define RLC_RLCS_IH_CTRL_3__RESERVED_MASK 0xFFFFC000L
+//RLC_RLCS_IH_STATUS
+#define RLC_RLCS_IH_STATUS__IH_CREDIT_COUNT__SHIFT 0x0
+#define RLC_RLCS_IH_STATUS__IH_BUSY__SHIFT 0x6
+#define RLC_RLCS_IH_STATUS__IH_WRITE_DONE__SHIFT 0x7
+#define RLC_RLCS_IH_STATUS__RESERVED__SHIFT 0x8
+#define RLC_RLCS_IH_STATUS__IH_CREDIT_COUNT_MASK 0x0000003FL
+#define RLC_RLCS_IH_STATUS__IH_BUSY_MASK 0x00000040L
+#define RLC_RLCS_IH_STATUS__IH_WRITE_DONE_MASK 0x00000080L
+#define RLC_RLCS_IH_STATUS__RESERVED_MASK 0xFFFFFF00L
+//RLC_RLCS_DEC_END
+
+
+// addressBlock: gc_pfvfdec_rlc
+//RLC_SAFE_MODE
+#define RLC_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SPM_SAMPLE_CNT
+#define RLC_SPM_SAMPLE_CNT__COUNT__SHIFT 0x0
+#define RLC_SPM_SAMPLE_CNT__COUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_MC_CNTL
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x6
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x7
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x8
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x9
+#define RLC_SPM_MC_CNTL__RLC_SPM_BC__SHIFT 0xc
+#define RLC_SPM_MC_CNTL__RLC_SPM_RO__SHIFT 0xd
+#define RLC_SPM_MC_CNTL__RLC_SPM_VOL__SHIFT 0xe
+#define RLC_SPM_MC_CNTL__RLC_SPM_NOFILL__SHIFT 0xf
+#define RLC_SPM_MC_CNTL__RESERVED_3__SHIFT 0x10
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC__SHIFT 0x12
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_OVER__SHIFT 0x13
+#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0x14
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000030L
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000040L
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000080L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000100L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000E00L
+#define RLC_SPM_MC_CNTL__RLC_SPM_BC_MASK 0x00001000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_RO_MASK 0x00002000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_VOL_MASK 0x00004000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_NOFILL_MASK 0x00008000L
+#define RLC_SPM_MC_CNTL__RESERVED_3_MASK 0x00030000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_MASK 0x00040000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_OVER_MASK 0x00080000L
+#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFF00000L
+//RLC_SPM_INT_CNTL
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
+#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
+#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_STATUS
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
+#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
+#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_INFO_1
+#define RLC_SPM_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_SPM_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_SPM_INT_INFO_2
+#define RLC_SPM_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_SPM_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_SPM_INT_INFO_2__RESERVED__SHIFT 0x18
+#define RLC_SPM_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_SPM_INT_INFO_2__INTERRUPT_ID_MASK 0x00FF0000L
+#define RLC_SPM_INT_INFO_2__RESERVED_MASK 0xFF000000L
+//RLC_CSIB_ADDR_LO
+#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
+//RLC_CSIB_ADDR_HI
+#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
+//RLC_CSIB_LENGTH
+#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
+#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
+//RLC_CP_SCHEDULERS
+#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
+#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
+#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
+#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
+//RLC_CP_EOF_INT
+#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
+#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
+#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CP_EOF_INT_CNT
+#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
+#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT_0
+#define RLC_SPARE_INT_0__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_0__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_0__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_0__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_0__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_0__COMPLETE_MASK 0x80000000L
+//RLC_SPARE_INT_1
+#define RLC_SPARE_INT_1__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_1__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_1__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_1__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_1__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_1__COMPLETE_MASK 0x80000000L
+//RLC_SPARE_INT_2
+#define RLC_SPARE_INT_2__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_2__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_2__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_2__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_2__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_2__COMPLETE_MASK 0x80000000L
+//RLC_PACE_SPARE_INT
+#define RLC_PACE_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_PACE_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_PACE_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_PACE_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_PACE_SPARE_INT_1
+#define RLC_PACE_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_PACE_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_PACE_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_PACE_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_SPARE_INT_1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+
+
+// addressBlock: gc_pwrdec
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTX_SPI_DEBUG_CLK_CTRL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x0
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x6
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x7
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL__SHIFT 0x8
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x0000003FL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x00000040L
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x00000080L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL_MASK 0x00000100L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__PI1_OVERRIDE__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__PI0_OVERRIDE__SHIFT 0x18
+#define CGTT_VGT_CLK_CTRL__HS_OVERRIDE__SHIFT 0x19
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__PI1_OVERRIDE_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__PI0_OVERRIDE_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__HS_OVERRIDE_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__DIST_OVERRIDE__SHIFT 0x1a
+#define CGTT_IA_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_IA_CLK_CTRL__PCM_OVERRIDE__SHIFT 0x1c
+#define CGTT_IA_CLK_CTRL__TESS_DIST_OVERRIDE__SHIFT 0x1d
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__DIST_OVERRIDE_MASK 0x04000000L
+#define CGTT_IA_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__PCM_OVERRIDE_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__TESS_DIST_OVERRIDE_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__FE_OUT_OVERRIDE__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__ASSEMBLER_OVERRIDE__SHIFT 0x18
+#define CGTT_WD_CLK_CTRL__DMA_PROC0_OVERRIDE__SHIFT 0x19
+#define CGTT_WD_CLK_CTRL__DMA_PROC1_OVERRIDE__SHIFT 0x1a
+#define CGTT_WD_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_WD_CLK_CTRL__DMA_OVERRIDE__SHIFT 0x1c
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__FE_OUT_OVERRIDE_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__ASSEMBLER_OVERRIDE_MASK 0x01000000L
+#define CGTT_WD_CLK_CTRL__DMA_PROC0_OVERRIDE_MASK 0x02000000L
+#define CGTT_WD_CLK_CTRL__DMA_PROC1_OVERRIDE_MASK 0x04000000L
+#define CGTT_WD_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_WD_CLK_CTRL__DMA_OVERRIDE_MASK 0x10000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__CLIP_SU_PRIM_FIFO_CLK_OVERRIDE__SHIFT 0xc
+#define CGTT_PA_CLK_CTRL__SXIFCCG_CLK_OVERRIDE__SHIFT 0xd
+#define CGTT_PA_CLK_CTRL__AG_CLK_OVERRIDE__SHIFT 0xe
+#define CGTT_PA_CLK_CTRL__VE_VTE_REC_CLK_OVERRIDE__SHIFT 0xf
+#define CGTT_PA_CLK_CTRL__ENGG_CLK_OVERRIDE__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__CL_VTE_CLK_OVERRIDE__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__AG_REG_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__CL_VTE_REG_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PA_CLK_CTRL__VTE_REG_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__NGG_INDEX_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__NGG_CSB_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__SU_CL_REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__CLIP_SU_PRIM_FIFO_CLK_OVERRIDE_MASK 0x00001000L
+#define CGTT_PA_CLK_CTRL__SXIFCCG_CLK_OVERRIDE_MASK 0x00002000L
+#define CGTT_PA_CLK_CTRL__AG_CLK_OVERRIDE_MASK 0x00004000L
+#define CGTT_PA_CLK_CTRL__VE_VTE_REC_CLK_OVERRIDE_MASK 0x00008000L
+#define CGTT_PA_CLK_CTRL__ENGG_CLK_OVERRIDE_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__CL_VTE_CLK_OVERRIDE_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__AG_REG_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__CL_VTE_REG_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PA_CLK_CTRL__VTE_REG_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__NGG_INDEX_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__NGG_CSB_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__SU_CL_REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON__SHIFT 0xf
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_VRS_INTF_CLK_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL2__SC_DB_COURSE_MGCG_BUSY_ENABLE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL2__SC_DB_STAGE_IN_TP_PFFB_WR_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_Z_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_PROC_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_ACCUM_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL2__SC_DB_PFFB_RP_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL2__SC_DB_PKR_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_FREE_WAVE_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_WAVE_2_SC_SPI_WAVE_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON_MASK 0x00008000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_VRS_INTF_CLK_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_COURSE_MGCG_BUSY_ENABLE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_STAGE_IN_TP_PFFB_WR_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_Z_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_PROC_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_ACCUM_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_PFFB_RP_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_PKR_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_FREE_WAVE_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_WAVE_2_SC_SPI_WAVE_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__FORCE_GL1H_CLKEN__SHIFT 0x17
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPALLOC_FGCG__SHIFT 0x18
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPGRANT_FGCG__SHIFT 0x19
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPREQ_FGCG__SHIFT 0x1a
+#define CGTT_SQG_CLK_CTRL__FORCE_CMD_FGCG__SHIFT 0x1b
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__FORCE_GL1H_CLKEN_MASK 0x00800000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPALLOC_FGCG_MASK 0x01000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPGRANT_FGCG_MASK 0x02000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPREQ_FGCG_MASK 0x04000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_CMD_FGCG_MASK 0x08000000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//SQ_ALU_CLK_CTRL
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//SQ_TEX_CLK_CTRL
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//SQ_LDS_CLK_CTRL
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//ICG_SP_CLK_CTRL
+#define ICG_SP_CLK_CTRL__CLK_OVERRIDE__SHIFT 0x0
+#define ICG_SP_CLK_CTRL__CLK_OVERRIDE_MASK 0xFFFFFFFFL
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x2
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x3
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x5
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x6
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x7
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE8__SHIFT 0x8
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0x9
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x00000001L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x00000002L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x00000004L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x00000008L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x00000010L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x00000020L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x00000040L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x00000080L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE8_MASK 0x00000100L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0xFFFFFE00L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GFX_ICG_GL2A_CTRL
+#define GFX_ICG_GL2A_CTRL__REG_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2A_CTRL__PERFMON_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2A_CTRL__CROSSBAR_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2A_CTRL__RTN_ARB_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2A_CTRL__GCRD_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2A_CTRL__CLIENT0_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2A_CTRL__CLIENT1_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2A_CTRL__CLIENT2_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2A_CTRL__CLIENT3_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2A_CTRL__CLIENT4_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2A_CTRL__CLIENT5_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2A_CTRL__CLIENT6_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2A_CTRL__CLIENT7_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2A_CTRL__CLIENT8_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2A_CTRL__CLIENT9_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2A_CTRL__CLIENT10_OVERRIDE__SHIFT 0x12
+#define GFX_ICG_GL2A_CTRL__CLIENT11_OVERRIDE__SHIFT 0x13
+#define GFX_ICG_GL2A_CTRL__CLIENT12_OVERRIDE__SHIFT 0x14
+#define GFX_ICG_GL2A_CTRL__CLIENT13_OVERRIDE__SHIFT 0x15
+#define GFX_ICG_GL2A_CTRL__CLIENT14_OVERRIDE__SHIFT 0x16
+#define GFX_ICG_GL2A_CTRL__CLIENT15_OVERRIDE__SHIFT 0x17
+#define GFX_ICG_GL2A_CTRL__REG_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2A_CTRL__PERFMON_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2A_CTRL__CROSSBAR_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2A_CTRL__RTN_ARB_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2A_CTRL__GCRD_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2A_CTRL__CLIENT0_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2A_CTRL__CLIENT1_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2A_CTRL__CLIENT2_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2A_CTRL__CLIENT3_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2A_CTRL__CLIENT4_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2A_CTRL__CLIENT5_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2A_CTRL__CLIENT6_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2A_CTRL__CLIENT7_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2A_CTRL__CLIENT8_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2A_CTRL__CLIENT9_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2A_CTRL__CLIENT10_OVERRIDE_MASK 0x00040000L
+#define GFX_ICG_GL2A_CTRL__CLIENT11_OVERRIDE_MASK 0x00080000L
+#define GFX_ICG_GL2A_CTRL__CLIENT12_OVERRIDE_MASK 0x00100000L
+#define GFX_ICG_GL2A_CTRL__CLIENT13_OVERRIDE_MASK 0x00200000L
+#define GFX_ICG_GL2A_CTRL__CLIENT14_OVERRIDE_MASK 0x00400000L
+#define GFX_ICG_GL2A_CTRL__CLIENT15_OVERRIDE_MASK 0x00800000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1a
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT__SHIFT 0x1b
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP__SHIFT 0x1c
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x04000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT_MASK 0x08000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP_MASK 0x10000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__RESERVED__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__RESERVED_MASK 0xFFFFFFFFL
+//CGTT_SC_CLK_CTRL3
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_STALL_OVERRIDE__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_STALL_OVERRIDE__SHIFT 0x1
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_STALL_OVERRIDE__SHIFT 0x2
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_STALL_OVERRIDE__SHIFT 0x3
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_STALL_OVERRIDE__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_STALL_OVERRIDE__SHIFT 0x5
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_STALL_OVERRIDE__SHIFT 0x6
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_STALL_OVERRIDE__SHIFT 0x7
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_STALL_OVERRIDE__SHIFT 0x8
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_STALL_OVERRIDE__SHIFT 0x9
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_STALL_OVERRIDE__SHIFT 0xa
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_STALL_OVERRIDE__SHIFT 0xb
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_STALL_OVERRIDE__SHIFT 0xc
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_STALL_OVERRIDE__SHIFT 0xd
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_STALL_OVERRIDE_MASK 0x00000001L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_STALL_OVERRIDE_MASK 0x00000002L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_STALL_OVERRIDE_MASK 0x00000004L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_STALL_OVERRIDE_MASK 0x00000008L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_STALL_OVERRIDE_MASK 0x00000010L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_STALL_OVERRIDE_MASK 0x00000020L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_STALL_OVERRIDE_MASK 0x00000040L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_STALL_OVERRIDE_MASK 0x00000080L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_STALL_OVERRIDE_MASK 0x00000100L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_STALL_OVERRIDE_MASK 0x00000200L
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_STALL_OVERRIDE_MASK 0x00000400L
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_STALL_OVERRIDE_MASK 0x00000800L
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_STALL_OVERRIDE_MASK 0x00001000L
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_STALL_OVERRIDE_MASK 0x00002000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL4
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_STALL_OVERRIDE__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_STALL_OVERRIDE__SHIFT 0x1
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_STALL_OVERRIDE__SHIFT 0x2
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_STALL_OVERRIDE__SHIFT 0x3
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_STALL_OVERRIDE__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_STALL_OVERRIDE__SHIFT 0x5
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_STALL_OVERRIDE__SHIFT 0x6
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_STALL_OVERRIDE__SHIFT 0x7
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_STALL_OVERRIDE__SHIFT 0x8
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_STALL_OVERRIDE__SHIFT 0x9
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_STALL_OVERRIDE__SHIFT 0xa
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_STALL_OVERRIDE__SHIFT 0xb
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_STALL_OVERRIDE__SHIFT 0xc
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_STALL_OVERRIDE_MASK 0x00000001L
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_STALL_OVERRIDE_MASK 0x00000002L
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_STALL_OVERRIDE_MASK 0x00000004L
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_STALL_OVERRIDE_MASK 0x00000008L
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_STALL_OVERRIDE_MASK 0x00000010L
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_STALL_OVERRIDE_MASK 0x00000020L
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_STALL_OVERRIDE_MASK 0x00000040L
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_STALL_OVERRIDE_MASK 0x00000080L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_STALL_OVERRIDE_MASK 0x00000100L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_STALL_OVERRIDE_MASK 0x00000200L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_STALL_OVERRIDE_MASK 0x00000400L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_STALL_OVERRIDE_MASK 0x00000800L
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_STALL_OVERRIDE_MASK 0x00001000L
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_OVERRIDE_MASK 0x80000000L
+//GCEA_ICG_CTRL
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x0
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x2
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x3
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x4
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_MAM__SHIFT 0x5
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x00000001L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000002L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000004L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000008L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x00000010L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_MAM_MASK 0x00000020L
+//GL1I_GL1R_MGCG_OVERRIDE
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_SCLK_OVERRIDE__SHIFT 0x0
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x1
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SCLK_OVERRIDE__SHIFT 0x2
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x3
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SRC_DCLK_OVERRIDE__SHIFT 0x4
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_SRC_MGCG_SCLK_OVERRIDE__SHIFT 0x5
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_RET_MGCG_SCLK_OVERRIDE__SHIFT 0x6
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_SCLK_OVERRIDE_MASK 0x00000001L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000002L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SCLK_OVERRIDE_MASK 0x00000004L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000008L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SRC_DCLK_OVERRIDE_MASK 0x00000010L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_SRC_MGCG_SCLK_OVERRIDE_MASK 0x00000020L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_RET_MGCG_SCLK_OVERRIDE_MASK 0x00000040L
+//GL1H_ICG_CTRL
+#define GL1H_ICG_CTRL__REG_DCLK_OVERRIDE__SHIFT 0x0
+#define GL1H_ICG_CTRL__REQ_ARB_DCLK_OVERRIDE__SHIFT 0x1
+#define GL1H_ICG_CTRL__PERFMON_DCLK_OVERRIDE__SHIFT 0x2
+#define GL1H_ICG_CTRL__REQ_ARB_CLI0_DCLK_OVERRIDE__SHIFT 0x3
+#define GL1H_ICG_CTRL__REQ_ARB_CLI1_DCLK_OVERRIDE__SHIFT 0x4
+#define GL1H_ICG_CTRL__REQ_ARB_CLI2_DCLK_OVERRIDE__SHIFT 0x5
+#define GL1H_ICG_CTRL__REQ_ARB_CLI3_DCLK_OVERRIDE__SHIFT 0x6
+#define GL1H_ICG_CTRL__SRC_DCLK_OVERRIDE__SHIFT 0x7
+#define GL1H_ICG_CTRL__RET_DCLK_OVERRIDE__SHIFT 0x8
+#define GL1H_ICG_CTRL__REG_DCLK_OVERRIDE_MASK 0x00000001L
+#define GL1H_ICG_CTRL__REQ_ARB_DCLK_OVERRIDE_MASK 0x00000002L
+#define GL1H_ICG_CTRL__PERFMON_DCLK_OVERRIDE_MASK 0x00000004L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI0_DCLK_OVERRIDE_MASK 0x00000008L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI1_DCLK_OVERRIDE_MASK 0x00000010L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI2_DCLK_OVERRIDE_MASK 0x00000020L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI3_DCLK_OVERRIDE_MASK 0x00000040L
+#define GL1H_ICG_CTRL__SRC_DCLK_OVERRIDE_MASK 0x00000080L
+#define GL1H_ICG_CTRL__RET_DCLK_OVERRIDE_MASK 0x00000100L
+//CHI_CHR_MGCG_OVERRIDE
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_SCLK_OVERRIDE__SHIFT 0x0
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x1
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SCLK_OVERRIDE__SHIFT 0x2
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x3
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SRC_DCLK_OVERRIDE__SHIFT 0x4
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_RET_MGCG_SCLK_OVERRIDE__SHIFT 0x5
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_SRC_MGCG_SCLK_OVERRIDE__SHIFT 0x6
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_SCLK_OVERRIDE_MASK 0x00000001L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000002L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SCLK_OVERRIDE_MASK 0x00000004L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000008L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SRC_DCLK_OVERRIDE_MASK 0x00000010L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_RET_MGCG_SCLK_OVERRIDE_MASK 0x00000020L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_SRC_MGCG_SCLK_OVERRIDE_MASK 0x00000040L
+//ICG_GL1C_CLK_CTRL
+#define ICG_GL1C_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_GL1C_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_GL1C_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_GL1C_CLK_CTRL__VM_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_GL1C_CLK_CTRL__TAG_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_GL1C_CLK_CTRL__GCR_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_GL1C_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_GL1C_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x7
+#define ICG_GL1C_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x8
+#define ICG_GL1C_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x9
+#define ICG_GL1C_CLK_CTRL__LATENCY_FIFO_CLK_OVERRIDE__SHIFT 0xa
+#define ICG_GL1C_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_GL1C_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_GL1C_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_GL1C_CLK_CTRL__VM_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_GL1C_CLK_CTRL__TAG_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_GL1C_CLK_CTRL__GCR_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_GL1C_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000040L
+#define ICG_GL1C_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000080L
+#define ICG_GL1C_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000100L
+#define ICG_GL1C_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000200L
+#define ICG_GL1C_CLK_CTRL__LATENCY_FIFO_CLK_OVERRIDE_MASK 0x00000400L
+//ICG_GL1A_CTRL
+#define ICG_GL1A_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_GL1A_CTRL__REQ_CLI_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_GL1A_CTRL__REQ_ARB_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_GL1A_CTRL__RET_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_GL1A_CTRL__REQ_CREDIT_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_GL1A_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_GL1A_CTRL__REG_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_GL1A_CTRL__REQ_CLI_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_GL1A_CTRL__REQ_ARB_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_GL1A_CTRL__RET_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_GL1A_CTRL__REQ_CREDIT_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_GL1A_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x00000020L
+//ICG_CHA_CTRL
+#define ICG_CHA_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHA_CTRL__REQ_CLI_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHA_CTRL__REQ_ARB_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHA_CTRL__RET_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHA_CTRL__REQ_CREDIT_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHA_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHA_CTRL__REG_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHA_CTRL__REQ_CLI_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHA_CTRL__REQ_ARB_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHA_CTRL__RET_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHA_CTRL__REQ_CREDIT_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHA_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x00000020L
+//GUS_ICG_CTRL
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_DRAM__SHIFT 0x0
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x2
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_DEMUX__SHIFT 0x3
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_WRITE__SHIFT 0x4
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_READ__SHIFT 0x5
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x6
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x7
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_STATIC__SHIFT 0x8
+#define GUS_ICG_CTRL__SPARE1__SHIFT 0x9
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_DRAM_MASK 0x00000001L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000002L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000004L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_DEMUX_MASK 0x00000008L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_WRITE_MASK 0x00000010L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_READ_MASK 0x00000020L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000040L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x00000080L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_STATIC_MASK 0x00000100L
+#define GUS_ICG_CTRL__SPARE1_MASK 0x0003FE00L
+//CGTT_PH_CLK_CTRL0
+#define CGTT_PH_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL0__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PH_CLK_CTRL0__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PH_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL0__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PH_CLK_CTRL0__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL1
+#define CGTT_PH_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL2
+#define CGTT_PH_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL3
+#define CGTT_PH_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+//GFX_ICG_GL2C_CTRL
+#define GFX_ICG_GL2C_CTRL__REG_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2C_CTRL__PERFMON_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2C_CTRL__IB_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2C_CTRL__TAG_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2C_CTRL__CM_CORE_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2C_CTRL__CORE_OVERRIDE__SHIFT 0x5
+#define GFX_ICG_GL2C_CTRL__CACHE_RAM_OVERRIDE__SHIFT 0x6
+#define GFX_ICG_GL2C_CTRL__GCR_OVERRIDE__SHIFT 0x7
+#define GFX_ICG_GL2C_CTRL__EXECUTE_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2C_CTRL__RETURN_BUFFER_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2C_CTRL__LATENCY_FIFO_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2C_CTRL__OUTPUT_FIFOS_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2C_CTRL__MC_WRITE_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2C_CTRL__EXECUTE_DECOMP_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2C_CTRL__EXECUTE_WRITE_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP0_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP1_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP2_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP3_OVERRIDE__SHIFT 0x12
+#define GFX_ICG_GL2C_CTRL__CM_RVF_OVERRIDE__SHIFT 0x14
+#define GFX_ICG_GL2C_CTRL__CM_SDR_OVERRIDE__SHIFT 0x15
+#define GFX_ICG_GL2C_CTRL__CM_RPF_OVERRIDE__SHIFT 0x16
+#define GFX_ICG_GL2C_CTRL__CM_STS_OVERRIDE__SHIFT 0x17
+#define GFX_ICG_GL2C_CTRL__CM_READ_OVERRIDE__SHIFT 0x18
+#define GFX_ICG_GL2C_CTRL__CM_MERGE_OVERRIDE__SHIFT 0x19
+#define GFX_ICG_GL2C_CTRL__CM_COMP_OVERRIDE__SHIFT 0x1a
+#define GFX_ICG_GL2C_CTRL__CM_DCC_OVERRIDE__SHIFT 0x1b
+#define GFX_ICG_GL2C_CTRL__CM_WRITE_OVERRIDE__SHIFT 0x1c
+#define GFX_ICG_GL2C_CTRL__CM_NOOP_OVERRIDE__SHIFT 0x1d
+#define GFX_ICG_GL2C_CTRL__MDC_TAG_OVERRIDE__SHIFT 0x1e
+#define GFX_ICG_GL2C_CTRL__MDC_DATA_OVERRIDE__SHIFT 0x1f
+#define GFX_ICG_GL2C_CTRL__REG_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2C_CTRL__PERFMON_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2C_CTRL__IB_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2C_CTRL__TAG_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2C_CTRL__CM_CORE_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2C_CTRL__CORE_OVERRIDE_MASK 0x00000020L
+#define GFX_ICG_GL2C_CTRL__CACHE_RAM_OVERRIDE_MASK 0x00000040L
+#define GFX_ICG_GL2C_CTRL__GCR_OVERRIDE_MASK 0x00000080L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2C_CTRL__RETURN_BUFFER_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2C_CTRL__LATENCY_FIFO_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2C_CTRL__OUTPUT_FIFOS_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2C_CTRL__MC_WRITE_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_DECOMP_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_WRITE_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP0_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP1_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP2_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP3_OVERRIDE_MASK 0x00040000L
+#define GFX_ICG_GL2C_CTRL__CM_RVF_OVERRIDE_MASK 0x00100000L
+#define GFX_ICG_GL2C_CTRL__CM_SDR_OVERRIDE_MASK 0x00200000L
+#define GFX_ICG_GL2C_CTRL__CM_RPF_OVERRIDE_MASK 0x00400000L
+#define GFX_ICG_GL2C_CTRL__CM_STS_OVERRIDE_MASK 0x00800000L
+#define GFX_ICG_GL2C_CTRL__CM_READ_OVERRIDE_MASK 0x01000000L
+#define GFX_ICG_GL2C_CTRL__CM_MERGE_OVERRIDE_MASK 0x02000000L
+#define GFX_ICG_GL2C_CTRL__CM_COMP_OVERRIDE_MASK 0x04000000L
+#define GFX_ICG_GL2C_CTRL__CM_DCC_OVERRIDE_MASK 0x08000000L
+#define GFX_ICG_GL2C_CTRL__CM_WRITE_OVERRIDE_MASK 0x10000000L
+#define GFX_ICG_GL2C_CTRL__CM_NOOP_OVERRIDE_MASK 0x20000000L
+#define GFX_ICG_GL2C_CTRL__MDC_TAG_OVERRIDE_MASK 0x40000000L
+#define GFX_ICG_GL2C_CTRL__MDC_DATA_OVERRIDE_MASK 0x80000000L
+//GFX_ICG_GL2C_CTRL1
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT0_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT1_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT2_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT3_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT4_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT5_OVERRIDE__SHIFT 0x5
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT6_OVERRIDE__SHIFT 0x6
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT7_OVERRIDE__SHIFT 0x7
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT8_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT9_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT10_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT11_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT12_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT13_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT14_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT15_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT16_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT17_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2C_CTRL1__TAG_PROBE_OVERRIDE__SHIFT 0x18
+#define GFX_ICG_GL2C_CTRL1__DCC_UPPER_OVERRIDE__SHIFT 0x19
+#define GFX_ICG_GL2C_CTRL1__DCC_LOWER_OVERRIDE__SHIFT 0x1a
+#define GFX_ICG_GL2C_CTRL1__ZD_UPPER_OVERRIDE__SHIFT 0x1b
+#define GFX_ICG_GL2C_CTRL1__ZD_LOWER_OVERRIDE__SHIFT 0x1c
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT0_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT1_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT2_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT3_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT4_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT5_OVERRIDE_MASK 0x00000020L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT6_OVERRIDE_MASK 0x00000040L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT7_OVERRIDE_MASK 0x00000080L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT8_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT9_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT10_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT11_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT12_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT13_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT14_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT15_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT16_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT17_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2C_CTRL1__TAG_PROBE_OVERRIDE_MASK 0x01000000L
+#define GFX_ICG_GL2C_CTRL1__DCC_UPPER_OVERRIDE_MASK 0x02000000L
+#define GFX_ICG_GL2C_CTRL1__DCC_LOWER_OVERRIDE_MASK 0x04000000L
+#define GFX_ICG_GL2C_CTRL1__ZD_UPPER_OVERRIDE_MASK 0x08000000L
+#define GFX_ICG_GL2C_CTRL1__ZD_LOWER_OVERRIDE_MASK 0x10000000L
+//ICG_LDS_CLK_CTRL
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD0_OVERRIDE__SHIFT 0x0
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD1_OVERRIDE__SHIFT 0x1
+#define ICG_LDS_CLK_CTRL__LDS_WGP_ARB_OVERRIDE__SHIFT 0x2
+#define ICG_LDS_CLK_CTRL__LDS_TD_OVERRIDE__SHIFT 0x3
+#define ICG_LDS_CLK_CTRL__LDS_ATTR_WR_OVERRIDE__SHIFT 0x4
+#define ICG_LDS_CLK_CTRL__LDS_CONFIG_REG_OVERRIDE__SHIFT 0x5
+#define ICG_LDS_CLK_CTRL__LDS_IDX_PIPE_OVERRIDE__SHIFT 0x6
+#define ICG_LDS_CLK_CTRL__LDS_IDX_DIR_OVERRIDE__SHIFT 0x7
+#define ICG_LDS_CLK_CTRL__LDS_IDX_WR_OVERRIDE__SHIFT 0x8
+#define ICG_LDS_CLK_CTRL__LDS_IDX_INPUT_QUEUE_OVERRIDE__SHIFT 0x9
+#define ICG_LDS_CLK_CTRL__LDS_MEM_OVERRIDE__SHIFT 0xa
+#define ICG_LDS_CLK_CTRL__LDS_IDX_OUTPUT_ALIGNER_OVERRIDE__SHIFT 0xb
+#define ICG_LDS_CLK_CTRL__LDS_DIR_OUTPUT_ALIGNER_OVERRIDE__SHIFT 0xc
+#define ICG_LDS_CLK_CTRL__LDS_IDX_BANK_CONFLICT_OVERRIDE__SHIFT 0xd
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_INPUT_OVERRIDE__SHIFT 0xe
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_OUTPUT_OVERRIDE__SHIFT 0xf
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_PIPE_OVERRIDE__SHIFT 0x10
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHEDULER_OVERRIDE__SHIFT 0x11
+#define ICG_LDS_CLK_CTRL__LDS_IDX_RDRTN_OVERRIDE__SHIFT 0x12
+#define ICG_LDS_CLK_CTRL__LDS_SP_DONE_OVERRIDE__SHIFT 0x13
+#define ICG_LDS_CLK_CTRL__LDS_SQC_PERF_OVERRIDE__SHIFT 0x14
+#define ICG_LDS_CLK_CTRL__LDS_SP_READ_OVERRIDE__SHIFT 0x15
+#define ICG_LDS_CLK_CTRL__SQ_LDS_VMEMCMD_OVERRIDE__SHIFT 0x16
+#define ICG_LDS_CLK_CTRL__SP_LDS_VMEMREQ_OVERRIDE__SHIFT 0x17
+#define ICG_LDS_CLK_CTRL__SPI_LDS_STALL_OVERRIDE__SHIFT 0x18
+#define ICG_LDS_CLK_CTRL__MEM_WR_OVERRIDE__SHIFT 0x19
+#define ICG_LDS_CLK_CTRL__LDS_CLK_OVERRIDE_UNUSED__SHIFT 0x1a
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD0_OVERRIDE_MASK 0x00000001L
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD1_OVERRIDE_MASK 0x00000002L
+#define ICG_LDS_CLK_CTRL__LDS_WGP_ARB_OVERRIDE_MASK 0x00000004L
+#define ICG_LDS_CLK_CTRL__LDS_TD_OVERRIDE_MASK 0x00000008L
+#define ICG_LDS_CLK_CTRL__LDS_ATTR_WR_OVERRIDE_MASK 0x00000010L
+#define ICG_LDS_CLK_CTRL__LDS_CONFIG_REG_OVERRIDE_MASK 0x00000020L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_PIPE_OVERRIDE_MASK 0x00000040L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_DIR_OVERRIDE_MASK 0x00000080L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_WR_OVERRIDE_MASK 0x00000100L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_INPUT_QUEUE_OVERRIDE_MASK 0x00000200L
+#define ICG_LDS_CLK_CTRL__LDS_MEM_OVERRIDE_MASK 0x00000400L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_OUTPUT_ALIGNER_OVERRIDE_MASK 0x00000800L
+#define ICG_LDS_CLK_CTRL__LDS_DIR_OUTPUT_ALIGNER_OVERRIDE_MASK 0x00001000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_BANK_CONFLICT_OVERRIDE_MASK 0x00002000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_INPUT_OVERRIDE_MASK 0x00004000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_OUTPUT_OVERRIDE_MASK 0x00008000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_PIPE_OVERRIDE_MASK 0x00010000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHEDULER_OVERRIDE_MASK 0x00020000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_RDRTN_OVERRIDE_MASK 0x00040000L
+#define ICG_LDS_CLK_CTRL__LDS_SP_DONE_OVERRIDE_MASK 0x00080000L
+#define ICG_LDS_CLK_CTRL__LDS_SQC_PERF_OVERRIDE_MASK 0x00100000L
+#define ICG_LDS_CLK_CTRL__LDS_SP_READ_OVERRIDE_MASK 0x00200000L
+#define ICG_LDS_CLK_CTRL__SQ_LDS_VMEMCMD_OVERRIDE_MASK 0x00400000L
+#define ICG_LDS_CLK_CTRL__SP_LDS_VMEMREQ_OVERRIDE_MASK 0x00800000L
+#define ICG_LDS_CLK_CTRL__SPI_LDS_STALL_OVERRIDE_MASK 0x01000000L
+#define ICG_LDS_CLK_CTRL__MEM_WR_OVERRIDE_MASK 0x02000000L
+#define ICG_LDS_CLK_CTRL__LDS_CLK_OVERRIDE_UNUSED_MASK 0xFC000000L
+//GFX_ICG_UTCL1_CTRL
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE0__SHIFT 0x0
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE1__SHIFT 0x1
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE2__SHIFT 0x2
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE3__SHIFT 0x3
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE4__SHIFT 0x4
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE5__SHIFT 0x5
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE6__SHIFT 0x6
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE7__SHIFT 0x7
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE8__SHIFT 0x8
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE9__SHIFT 0x9
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE10__SHIFT 0xa
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE11__SHIFT 0xb
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE12__SHIFT 0xc
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE13__SHIFT 0xd
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE14__SHIFT 0xe
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE15_31__SHIFT 0xf
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE0_MASK 0x00000001L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE1_MASK 0x00000002L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE2_MASK 0x00000004L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE3_MASK 0x00000008L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE4_MASK 0x00000010L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE5_MASK 0x00000020L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE6_MASK 0x00000040L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE7_MASK 0x00000080L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE8_MASK 0x00000100L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE9_MASK 0x00000200L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE10_MASK 0x00000400L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE11_MASK 0x00000800L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE12_MASK 0x00001000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE13_MASK 0x00002000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE14_MASK 0x00004000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE15_31_MASK 0xFFFF8000L
+//ICG_CHC_CLK_CTRL
+#define ICG_CHC_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHC_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHC_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHC_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHC_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHC_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHC_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_CHC_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHC_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHC_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHC_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHC_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHC_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_CHC_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000040L
+//ICG_CHCG_CLK_CTRL
+#define ICG_CHCG_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHCG_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHCG_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHCG_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHCG_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHCG_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHCG_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_CHCG_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHCG_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHCG_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHCG_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHCG_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHCG_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_CHCG_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000040L
+
+
+// addressBlock: gc_pspdec
+//CP_MES_DM_INDEX_ADDR
+#define CP_MES_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_MES_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_MES_DM_INDEX_DATA
+#define CP_MES_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_MES_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_DM_INDEX_ADDR
+#define CP_MEC_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_MEC_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_DM_INDEX_DATA
+#define CP_MEC_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_MEC_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DM_INDEX_ADDR
+#define CP_GFX_RS64_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_GFX_RS64_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DM_INDEX_DATA
+#define CP_GFX_RS64_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_GFX_RS64_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_PSP_DEBUG
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL__SHIFT 0x2
+#define CPG_PSP_DEBUG__GPA_OVERRIDE__SHIFT 0x3
+#define CPG_PSP_DEBUG__UCODE_VF_OVERRIDE__SHIFT 0x4
+#define CPG_PSP_DEBUG__MTYPE_TMZ_OVERRIDE__SHIFT 0x5
+#define CPG_PSP_DEBUG__SECURE_REG_OVERRIDE__SHIFT 0x6
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL_MASK 0x00000004L
+#define CPG_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L
+#define CPG_PSP_DEBUG__UCODE_VF_OVERRIDE_MASK 0x00000010L
+#define CPG_PSP_DEBUG__MTYPE_TMZ_OVERRIDE_MASK 0x00000020L
+#define CPG_PSP_DEBUG__SECURE_REG_OVERRIDE_MASK 0x00000040L
+//CPC_PSP_DEBUG
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPC_PSP_DEBUG__GPA_OVERRIDE__SHIFT 0x3
+#define CPC_PSP_DEBUG__UCODE_VF_OVERRIDE__SHIFT 0x4
+#define CPC_PSP_DEBUG__MTYPE_TMZ_OVERRIDE__SHIFT 0x5
+#define CPC_PSP_DEBUG__SECURE_REG_OVERRIDE__SHIFT 0x6
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPC_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L
+#define CPC_PSP_DEBUG__UCODE_VF_OVERRIDE_MASK 0x00000010L
+#define CPC_PSP_DEBUG__MTYPE_TMZ_OVERRIDE_MASK 0x00000020L
+#define CPC_PSP_DEBUG__SECURE_REG_OVERRIDE_MASK 0x00000040L
+//GRBM_IOV_ERROR_FIFO
+#define GRBM_IOV_ERROR_FIFO__IOV_ADDR__SHIFT 0x0
+#define GRBM_IOV_ERROR_FIFO__IOV_VFID__SHIFT 0x12
+#define GRBM_IOV_ERROR_FIFO__IOV_SSRCID__SHIFT 0x18
+#define GRBM_IOV_ERROR_FIFO__IOV_OP__SHIFT 0x1c
+#define GRBM_IOV_ERROR_FIFO__IOV_VF__SHIFT 0x1d
+#define GRBM_IOV_ERROR_FIFO__FIFO_OVERFLOW__SHIFT 0x1e
+#define GRBM_IOV_ERROR_FIFO__READ_VALID__SHIFT 0x1f
+#define GRBM_IOV_ERROR_FIFO__IOV_ADDR_MASK 0x0003FFFFL
+#define GRBM_IOV_ERROR_FIFO__IOV_VFID_MASK 0x00FC0000L
+#define GRBM_IOV_ERROR_FIFO__IOV_SSRCID_MASK 0x0F000000L
+#define GRBM_IOV_ERROR_FIFO__IOV_OP_MASK 0x10000000L
+#define GRBM_IOV_ERROR_FIFO__IOV_VF_MASK 0x20000000L
+#define GRBM_IOV_ERROR_FIFO__FIFO_OVERFLOW_MASK 0x40000000L
+#define GRBM_IOV_ERROR_FIFO__READ_VALID_MASK 0x80000000L
+//GRBM_SEC_CNTL
+#define GRBM_SEC_CNTL__DEBUG_ENABLE__SHIFT 0x0
+#define GRBM_SEC_CNTL__DEBUG_ENABLE_MASK 0x00000001L
+//GRBM_CAM_INDEX
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x0000000FL
+//GRBM_HYP_CAM_INDEX
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x0000000FL
+//GRBM_CAM_DATA
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_HYP_CAM_DATA
+#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_CAM_DATA_UPPER
+#define GRBM_CAM_DATA_UPPER__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA_UPPER__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA_UPPER__CAM_ADDR_MASK 0x00000003L
+#define GRBM_CAM_DATA_UPPER__CAM_REMAPADDR_MASK 0x00030000L
+//GRBM_HYP_CAM_DATA_UPPER
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_ADDR_MASK 0x00000003L
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_REMAPADDR_MASK 0x00030000L
+//RLC_FWL_FIRST_VIOL_ADDR
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR__SHIFT 0x0
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID__SHIFT 0x12
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP__SHIFT 0x1e
+#define RLC_FWL_FIRST_VIOL_ADDR__RESERVED__SHIFT 0x1f
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR_MASK 0x0003FFFFL
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID_MASK 0x3FFC0000L
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP_MASK 0x40000000L
+#define RLC_FWL_FIRST_VIOL_ADDR__RESERVED_MASK 0x80000000L
+
+
+// addressBlock: gc_gfx_imu_gfx_imudec
+//GFX_IMU_C2PMSG_0
+#define GFX_IMU_C2PMSG_0__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_1
+#define GFX_IMU_C2PMSG_1__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_2
+#define GFX_IMU_C2PMSG_2__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_3
+#define GFX_IMU_C2PMSG_3__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_4
+#define GFX_IMU_C2PMSG_4__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_5
+#define GFX_IMU_C2PMSG_5__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_5__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_6
+#define GFX_IMU_C2PMSG_6__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_6__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_7
+#define GFX_IMU_C2PMSG_7__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_7__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_8
+#define GFX_IMU_C2PMSG_8__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_8__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_9
+#define GFX_IMU_C2PMSG_9__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_9__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_10
+#define GFX_IMU_C2PMSG_10__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_10__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_11
+#define GFX_IMU_C2PMSG_11__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_11__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_12
+#define GFX_IMU_C2PMSG_12__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_12__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_13
+#define GFX_IMU_C2PMSG_13__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_13__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_14
+#define GFX_IMU_C2PMSG_14__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_14__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_15
+#define GFX_IMU_C2PMSG_15__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_15__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_16
+#define GFX_IMU_C2PMSG_16__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_16__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_17
+#define GFX_IMU_C2PMSG_17__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_17__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_18
+#define GFX_IMU_C2PMSG_18__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_18__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_19
+#define GFX_IMU_C2PMSG_19__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_19__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_20
+#define GFX_IMU_C2PMSG_20__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_20__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_21
+#define GFX_IMU_C2PMSG_21__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_21__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_22
+#define GFX_IMU_C2PMSG_22__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_22__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_23
+#define GFX_IMU_C2PMSG_23__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_23__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_24
+#define GFX_IMU_C2PMSG_24__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_24__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_25
+#define GFX_IMU_C2PMSG_25__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_25__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_26
+#define GFX_IMU_C2PMSG_26__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_26__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_27
+#define GFX_IMU_C2PMSG_27__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_27__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_28
+#define GFX_IMU_C2PMSG_28__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_28__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_29
+#define GFX_IMU_C2PMSG_29__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_29__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_30
+#define GFX_IMU_C2PMSG_30__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_30__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_31
+#define GFX_IMU_C2PMSG_31__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_31__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_32
+#define GFX_IMU_C2PMSG_32__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_32__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_33
+#define GFX_IMU_C2PMSG_33__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_33__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_34
+#define GFX_IMU_C2PMSG_34__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_34__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_35
+#define GFX_IMU_C2PMSG_35__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_35__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_36
+#define GFX_IMU_C2PMSG_36__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_36__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_37
+#define GFX_IMU_C2PMSG_37__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_37__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_38
+#define GFX_IMU_C2PMSG_38__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_38__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_39
+#define GFX_IMU_C2PMSG_39__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_39__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_40
+#define GFX_IMU_C2PMSG_40__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_40__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_41
+#define GFX_IMU_C2PMSG_41__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_41__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_42
+#define GFX_IMU_C2PMSG_42__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_42__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_43
+#define GFX_IMU_C2PMSG_43__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_43__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_44
+#define GFX_IMU_C2PMSG_44__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_44__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_45
+#define GFX_IMU_C2PMSG_45__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_45__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_46
+#define GFX_IMU_C2PMSG_46__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_46__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_47
+#define GFX_IMU_C2PMSG_47__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_47__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_MSG_FLAGS
+#define GFX_IMU_MSG_FLAGS__STATUS__SHIFT 0x0
+#define GFX_IMU_MSG_FLAGS__STATUS_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_ACCESS_CTRL0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC0__SHIFT 0x0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC1__SHIFT 0x3
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC2__SHIFT 0x6
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC3__SHIFT 0x9
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC4__SHIFT 0xc
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC5__SHIFT 0xf
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC6__SHIFT 0x12
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC7__SHIFT 0x15
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC0_MASK 0x00000007L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC1_MASK 0x00000038L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC2_MASK 0x000001C0L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC3_MASK 0x00000E00L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC4_MASK 0x00007000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC5_MASK 0x00038000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC6_MASK 0x001C0000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC7_MASK 0x00E00000L
+//GFX_IMU_C2PMSG_ACCESS_CTRL1
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC8_15__SHIFT 0x0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC16_23__SHIFT 0x3
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC24_31__SHIFT 0x6
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC32_39__SHIFT 0x9
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC40_47__SHIFT 0xc
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC8_15_MASK 0x00000007L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC16_23_MASK 0x00000038L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC24_31_MASK 0x000001C0L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC32_39_MASK 0x00000E00L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC40_47_MASK 0x00007000L
+//GFX_IMU_PWRMGT_IRQ_CTRL
+#define GFX_IMU_PWRMGT_IRQ_CTRL__REQ__SHIFT 0x0
+#define GFX_IMU_PWRMGT_IRQ_CTRL__REQ_MASK 0x00000001L
+//GFX_IMU_MP1_MUTEX
+#define GFX_IMU_MP1_MUTEX__MUTEX__SHIFT 0x0
+#define GFX_IMU_MP1_MUTEX__MUTEX_MASK 0x00000003L
+//GFX_IMU_RLC_DATA_4
+#define GFX_IMU_RLC_DATA_4__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_3
+#define GFX_IMU_RLC_DATA_3__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_2
+#define GFX_IMU_RLC_DATA_2__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_1
+#define GFX_IMU_RLC_DATA_1__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_0
+#define GFX_IMU_RLC_DATA_0__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_CMD
+#define GFX_IMU_RLC_CMD__CMD__SHIFT 0x0
+#define GFX_IMU_RLC_CMD__CMD_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_MUTEX
+#define GFX_IMU_RLC_MUTEX__MUTEX__SHIFT 0x0
+#define GFX_IMU_RLC_MUTEX__MUTEX_MASK 0x00000003L
+//GFX_IMU_RLC_MSG_STATUS
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_BUSY__SHIFT 0x0
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_MSG_ERROR__SHIFT 0x1
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_MSGDONE__SHIFT 0x10
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_CHGTOG__SHIFT 0x1e
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_DONETOG__SHIFT 0x1f
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_BUSY_MASK 0x00000001L
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_MSG_ERROR_MASK 0x00000002L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_MSGDONE_MASK 0x00010000L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_CHGTOG_MASK 0x40000000L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_DONETOG_MASK 0x80000000L
+//RLC_GFX_IMU_DATA_0
+#define RLC_GFX_IMU_DATA_0__DATA__SHIFT 0x0
+#define RLC_GFX_IMU_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GFX_IMU_CMD
+#define RLC_GFX_IMU_CMD__CMD__SHIFT 0x0
+#define RLC_GFX_IMU_CMD__CMD_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_STATUS
+#define GFX_IMU_RLC_STATUS__PD_ACTIVE__SHIFT 0x0
+#define GFX_IMU_RLC_STATUS__RLC_ALIVE__SHIFT 0x1
+#define GFX_IMU_RLC_STATUS__TBD2__SHIFT 0x2
+#define GFX_IMU_RLC_STATUS__TBD3__SHIFT 0x3
+#define GFX_IMU_RLC_STATUS__PD_ACTIVE_MASK 0x00000001L
+#define GFX_IMU_RLC_STATUS__RLC_ALIVE_MASK 0x00000002L
+#define GFX_IMU_RLC_STATUS__TBD2_MASK 0x00000004L
+#define GFX_IMU_RLC_STATUS__TBD3_MASK 0x00000008L
+//GFX_IMU_STATUS
+#define GFX_IMU_STATUS__ALLOW_GFXOFF__SHIFT 0x0
+#define GFX_IMU_STATUS__ALLOW_FA_DCS__SHIFT 0x1
+#define GFX_IMU_STATUS__TBD2__SHIFT 0x2
+#define GFX_IMU_STATUS__TBD3__SHIFT 0x3
+#define GFX_IMU_STATUS__TBD4__SHIFT 0x4
+#define GFX_IMU_STATUS__TBD5__SHIFT 0x5
+#define GFX_IMU_STATUS__TBD6__SHIFT 0x6
+#define GFX_IMU_STATUS__TBD7__SHIFT 0x7
+#define GFX_IMU_STATUS__TBD8__SHIFT 0x8
+#define GFX_IMU_STATUS__TBD9__SHIFT 0x9
+#define GFX_IMU_STATUS__TBD10__SHIFT 0xa
+#define GFX_IMU_STATUS__TBD11__SHIFT 0xb
+#define GFX_IMU_STATUS__TBD12__SHIFT 0xc
+#define GFX_IMU_STATUS__TBD13__SHIFT 0xd
+#define GFX_IMU_STATUS__TBD14__SHIFT 0xe
+#define GFX_IMU_STATUS__DISABLE_GFXCLK_DS__SHIFT 0xf
+#define GFX_IMU_STATUS__ALLOW_GFXOFF_MASK 0x00000001L
+#define GFX_IMU_STATUS__ALLOW_FA_DCS_MASK 0x00000002L
+#define GFX_IMU_STATUS__TBD2_MASK 0x00000004L
+#define GFX_IMU_STATUS__TBD3_MASK 0x00000008L
+#define GFX_IMU_STATUS__TBD4_MASK 0x00000010L
+#define GFX_IMU_STATUS__TBD5_MASK 0x00000020L
+#define GFX_IMU_STATUS__TBD6_MASK 0x00000040L
+#define GFX_IMU_STATUS__TBD7_MASK 0x00000080L
+#define GFX_IMU_STATUS__TBD8_MASK 0x00000100L
+#define GFX_IMU_STATUS__TBD9_MASK 0x00000200L
+#define GFX_IMU_STATUS__TBD10_MASK 0x00000400L
+#define GFX_IMU_STATUS__TBD11_MASK 0x00000800L
+#define GFX_IMU_STATUS__TBD12_MASK 0x00001000L
+#define GFX_IMU_STATUS__TBD13_MASK 0x00002000L
+#define GFX_IMU_STATUS__TBD14_MASK 0x00004000L
+#define GFX_IMU_STATUS__DISABLE_GFXCLK_DS_MASK 0x00008000L
+//GFX_IMU_SOC_DATA
+#define GFX_IMU_SOC_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_SOC_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SOC_ADDR
+#define GFX_IMU_SOC_ADDR__ADDR__SHIFT 0x0
+#define GFX_IMU_SOC_ADDR__ADDR_MASK 0xFFFFFFFFL
+//GFX_IMU_SOC_REQ
+#define GFX_IMU_SOC_REQ__REQ_BUSY__SHIFT 0x0
+#define GFX_IMU_SOC_REQ__R_W__SHIFT 0x1
+#define GFX_IMU_SOC_REQ__ERR__SHIFT 0x1f
+#define GFX_IMU_SOC_REQ__REQ_BUSY_MASK 0x00000001L
+#define GFX_IMU_SOC_REQ__R_W_MASK 0x00000002L
+#define GFX_IMU_SOC_REQ__ERR_MASK 0x80000000L
+//GFX_IMU_VF_CTRL
+#define GFX_IMU_VF_CTRL__VF__SHIFT 0x0
+#define GFX_IMU_VF_CTRL__VFID__SHIFT 0x1
+#define GFX_IMU_VF_CTRL__QOS__SHIFT 0x7
+#define GFX_IMU_VF_CTRL__VF_MASK 0x00000001L
+#define GFX_IMU_VF_CTRL__VFID_MASK 0x0000007EL
+#define GFX_IMU_VF_CTRL__QOS_MASK 0x00000780L
+//GFX_IMU_TELEMETRY
+#define GFX_IMU_TELEMETRY__TELEMETRY_ENTRIES__SHIFT 0x0
+#define GFX_IMU_TELEMETRY__TELEMETRY_DATA_SAMPLE_SIZE__SHIFT 0x5
+#define GFX_IMU_TELEMETRY__FIFO_OVERFLOW__SHIFT 0x6
+#define GFX_IMU_TELEMETRY__FIFO_UNDERFLOW__SHIFT 0x7
+#define GFX_IMU_TELEMETRY__FSM_STATE__SHIFT 0x8
+#define GFX_IMU_TELEMETRY__SVI_TYPE__SHIFT 0xc
+#define GFX_IMU_TELEMETRY__ENABLE_FIFO__SHIFT 0x1e
+#define GFX_IMU_TELEMETRY__ENABLE_IMU_RLC_TELEMETRY__SHIFT 0x1f
+#define GFX_IMU_TELEMETRY__TELEMETRY_ENTRIES_MASK 0x0000001FL
+#define GFX_IMU_TELEMETRY__TELEMETRY_DATA_SAMPLE_SIZE_MASK 0x00000020L
+#define GFX_IMU_TELEMETRY__FIFO_OVERFLOW_MASK 0x00000040L
+#define GFX_IMU_TELEMETRY__FIFO_UNDERFLOW_MASK 0x00000080L
+#define GFX_IMU_TELEMETRY__FSM_STATE_MASK 0x00000700L
+#define GFX_IMU_TELEMETRY__SVI_TYPE_MASK 0x00003000L
+#define GFX_IMU_TELEMETRY__ENABLE_FIFO_MASK 0x40000000L
+#define GFX_IMU_TELEMETRY__ENABLE_IMU_RLC_TELEMETRY_MASK 0x80000000L
+//GFX_IMU_TELEMETRY_DATA
+#define GFX_IMU_TELEMETRY_DATA__CURRENT__SHIFT 0x0
+#define GFX_IMU_TELEMETRY_DATA__VOLTAGE__SHIFT 0x10
+#define GFX_IMU_TELEMETRY_DATA__CURRENT_MASK 0x0000FFFFL
+#define GFX_IMU_TELEMETRY_DATA__VOLTAGE_MASK 0xFFFF0000L
+//GFX_IMU_TELEMETRY_TEMPERATURE
+#define GFX_IMU_TELEMETRY_TEMPERATURE__TEMPERATURE__SHIFT 0x0
+#define GFX_IMU_TELEMETRY_TEMPERATURE__TEMPERATURE_MASK 0x0000FFFFL
+//GFX_IMU_SCRATCH_0
+#define GFX_IMU_SCRATCH_0__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_1
+#define GFX_IMU_SCRATCH_1__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_2
+#define GFX_IMU_SCRATCH_2__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_3
+#define GFX_IMU_SCRATCH_3__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_4
+#define GFX_IMU_SCRATCH_4__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_5
+#define GFX_IMU_SCRATCH_5__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_5__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_6
+#define GFX_IMU_SCRATCH_6__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_6__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_7
+#define GFX_IMU_SCRATCH_7__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_7__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_8
+#define GFX_IMU_SCRATCH_8__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_8__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_9
+#define GFX_IMU_SCRATCH_9__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_9__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_10
+#define GFX_IMU_SCRATCH_10__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_10__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_11
+#define GFX_IMU_SCRATCH_11__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_11__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_12
+#define GFX_IMU_SCRATCH_12__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_12__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_13
+#define GFX_IMU_SCRATCH_13__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_13__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_14
+#define GFX_IMU_SCRATCH_14__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_14__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_15
+#define GFX_IMU_SCRATCH_15__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_15__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_FW_GTS_LO
+#define GFX_IMU_FW_GTS_LO__TSTAMP_LO__SHIFT 0x0
+#define GFX_IMU_FW_GTS_LO__TSTAMP_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_FW_GTS_HI
+#define GFX_IMU_FW_GTS_HI__TSTAMP_HI__SHIFT 0x0
+#define GFX_IMU_FW_GTS_HI__TSTAMP_HI_MASK 0x00FFFFFFL
+//GFX_IMU_GTS_OFFSET_LO
+#define GFX_IMU_GTS_OFFSET_LO__GTS_OFFSET_LO__SHIFT 0x0
+#define GFX_IMU_GTS_OFFSET_LO__GTS_OFFSET_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_GTS_OFFSET_HI
+#define GFX_IMU_GTS_OFFSET_HI__GTS_OFFSET_HI__SHIFT 0x0
+#define GFX_IMU_GTS_OFFSET_HI__GTS_OFFSET_HI_MASK 0x00FFFFFFL
+//GFX_IMU_RLC_GTS_OFFSET_LO
+#define GFX_IMU_RLC_GTS_OFFSET_LO__GTS_OFFSET_LO__SHIFT 0x0
+#define GFX_IMU_RLC_GTS_OFFSET_LO__GTS_OFFSET_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_GTS_OFFSET_HI
+#define GFX_IMU_RLC_GTS_OFFSET_HI__GTS_OFFSET_HI__SHIFT 0x0
+#define GFX_IMU_RLC_GTS_OFFSET_HI__GTS_OFFSET_HI_MASK 0x00FFFFFFL
+//GFX_IMU_CORE_INT_STATUS
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_24__SHIFT 0x18
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_25__SHIFT 0x19
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_29__SHIFT 0x1d
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_24_MASK 0x01000000L
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_25_MASK 0x02000000L
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_29_MASK 0x20000000L
+//GFX_IMU_PIC_INT_MASK
+#define GFX_IMU_PIC_INT_MASK__MASK_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_MASK__MASK_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_MASK__MASK_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_MASK__MASK_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_MASK__MASK_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_MASK__MASK_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_MASK__MASK_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_MASK__MASK_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_MASK__MASK_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_MASK__MASK_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_MASK__MASK_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_MASK__MASK_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_MASK__MASK_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_MASK__MASK_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_MASK__MASK_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_MASK__MASK_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_MASK__MASK_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_MASK__MASK_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_MASK__MASK_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_MASK__MASK_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_MASK__MASK_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_MASK__MASK_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_MASK__MASK_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_MASK__MASK_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_MASK__MASK_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_MASK__MASK_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_MASK__MASK_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_MASK__MASK_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_MASK__MASK_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_MASK__MASK_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_MASK__MASK_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_MASK__MASK_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_MASK__MASK_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_MASK__MASK_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_MASK__MASK_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_MASK__MASK_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_MASK__MASK_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_MASK__MASK_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_MASK__MASK_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_MASK__MASK_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_MASK__MASK_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_MASK__MASK_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_MASK__MASK_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_MASK__MASK_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_MASK__MASK_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_MASK__MASK_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_MASK__MASK_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_MASK__MASK_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_MASK__MASK_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_MASK__MASK_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_MASK__MASK_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_MASK__MASK_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_MASK__MASK_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_MASK__MASK_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_MASK__MASK_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_MASK__MASK_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_MASK__MASK_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_LVL
+#define GFX_IMU_PIC_INT_LVL__LVL_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_LVL__LVL_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_LVL__LVL_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_LVL__LVL_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_LVL__LVL_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_LVL__LVL_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_LVL__LVL_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_LVL__LVL_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_LVL__LVL_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_LVL__LVL_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_LVL__LVL_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_LVL__LVL_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_LVL__LVL_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_LVL__LVL_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_LVL__LVL_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_LVL__LVL_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_LVL__LVL_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_LVL__LVL_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_LVL__LVL_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_LVL__LVL_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_LVL__LVL_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_LVL__LVL_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_LVL__LVL_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_LVL__LVL_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_LVL__LVL_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_LVL__LVL_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_LVL__LVL_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_LVL__LVL_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_LVL__LVL_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_LVL__LVL_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_LVL__LVL_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_LVL__LVL_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_LVL__LVL_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_LVL__LVL_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_LVL__LVL_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_LVL__LVL_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_LVL__LVL_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_LVL__LVL_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_LVL__LVL_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_LVL__LVL_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_LVL__LVL_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_LVL__LVL_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_LVL__LVL_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_LVL__LVL_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_LVL__LVL_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_LVL__LVL_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_LVL__LVL_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_LVL__LVL_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_LVL__LVL_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_LVL__LVL_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_LVL__LVL_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_LVL__LVL_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_LVL__LVL_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_LVL__LVL_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_LVL__LVL_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_LVL__LVL_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_LVL__LVL_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_EDGE
+#define GFX_IMU_PIC_INT_EDGE__EDGE_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_EDGE__EDGE_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_EDGE__EDGE_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_EDGE__EDGE_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_EDGE__EDGE_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_EDGE__EDGE_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_EDGE__EDGE_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_EDGE__EDGE_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_EDGE__EDGE_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_EDGE__EDGE_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_EDGE__EDGE_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_EDGE__EDGE_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_EDGE__EDGE_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_EDGE__EDGE_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_EDGE__EDGE_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_EDGE__EDGE_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_EDGE__EDGE_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_EDGE__EDGE_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_EDGE__EDGE_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_EDGE__EDGE_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_EDGE__EDGE_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_EDGE__EDGE_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_EDGE__EDGE_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_EDGE__EDGE_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_EDGE__EDGE_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_EDGE__EDGE_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_EDGE__EDGE_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_EDGE__EDGE_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_EDGE__EDGE_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_EDGE__EDGE_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_EDGE__EDGE_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_EDGE__EDGE_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_EDGE__EDGE_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_PRI_0
+#define GFX_IMU_PIC_INT_PRI_0__PRI_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_0__PRI_1__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_0__PRI_2__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_0__PRI_3__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_0__PRI_0_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_0__PRI_1_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_0__PRI_2_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_0__PRI_3_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_1
+#define GFX_IMU_PIC_INT_PRI_1__PRI_4__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_1__PRI_5__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_1__PRI_6__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_1__PRI_7__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_1__PRI_4_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_1__PRI_5_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_1__PRI_6_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_1__PRI_7_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_2
+#define GFX_IMU_PIC_INT_PRI_2__PRI_8__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_2__PRI_9__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_2__PRI_10__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_2__PRI_11__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_2__PRI_8_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_2__PRI_9_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_2__PRI_10_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_2__PRI_11_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_3
+#define GFX_IMU_PIC_INT_PRI_3__PRI_12__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_3__PRI_13__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_3__PRI_14__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_3__PRI_15__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_3__PRI_12_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_3__PRI_13_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_3__PRI_14_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_3__PRI_15_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_4
+#define GFX_IMU_PIC_INT_PRI_4__PRI_16__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_4__PRI_17__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_4__PRI_18__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_4__PRI_19__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_4__PRI_16_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_4__PRI_17_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_4__PRI_18_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_4__PRI_19_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_5
+#define GFX_IMU_PIC_INT_PRI_5__PRI_20__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_5__PRI_21__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_5__PRI_22__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_5__PRI_23__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_5__PRI_20_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_5__PRI_21_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_5__PRI_22_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_5__PRI_23_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_6
+#define GFX_IMU_PIC_INT_PRI_6__PRI_24__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_6__PRI_25__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_6__PRI_26__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_6__PRI_27__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_6__PRI_24_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_6__PRI_25_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_6__PRI_26_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_6__PRI_27_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_7
+#define GFX_IMU_PIC_INT_PRI_7__PRI_28__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_7__PRI_29__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_7__PRI_30__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_7__PRI_31__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_7__PRI_28_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_7__PRI_29_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_7__PRI_30_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_7__PRI_31_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_STATUS
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS31_MASK 0x80000000L
+//GFX_IMU_PIC_INTR
+#define GFX_IMU_PIC_INTR__INTR_n__SHIFT 0x0
+#define GFX_IMU_PIC_INTR__INTR_n_MASK 0x00000001L
+//GFX_IMU_PIC_INTR_ID
+#define GFX_IMU_PIC_INTR_ID__INTR_n__SHIFT 0x0
+#define GFX_IMU_PIC_INTR_ID__INTR_n_MASK 0x000000FFL
+//GFX_IMU_IH_CTRL_1
+#define GFX_IMU_IH_CTRL_1__CONTEXT_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//GFX_IMU_IH_CTRL_2
+#define GFX_IMU_IH_CTRL_2__CONTEXT_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_2__RING_ID__SHIFT 0x8
+#define GFX_IMU_IH_CTRL_2__VM_ID__SHIFT 0x10
+#define GFX_IMU_IH_CTRL_2__SRSTB__SHIFT 0x1f
+#define GFX_IMU_IH_CTRL_2__CONTEXT_ID_MASK 0x000000FFL
+#define GFX_IMU_IH_CTRL_2__RING_ID_MASK 0x0000FF00L
+#define GFX_IMU_IH_CTRL_2__VM_ID_MASK 0x000F0000L
+#define GFX_IMU_IH_CTRL_2__SRSTB_MASK 0x80000000L
+//GFX_IMU_IH_CTRL_3
+#define GFX_IMU_IH_CTRL_3__SOURCE_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_3__VF_ID__SHIFT 0x8
+#define GFX_IMU_IH_CTRL_3__VF__SHIFT 0xd
+#define GFX_IMU_IH_CTRL_3__SOURCE_ID_MASK 0x000000FFL
+#define GFX_IMU_IH_CTRL_3__VF_ID_MASK 0x00001F00L
+#define GFX_IMU_IH_CTRL_3__VF_MASK 0x00002000L
+//GFX_IMU_IH_STATUS
+#define GFX_IMU_IH_STATUS__IH_BUSY__SHIFT 0x0
+#define GFX_IMU_IH_STATUS__IH_BUSY_MASK 0x00000001L
+//GFX_IMU_FUSESTRAP
+#define GFX_IMU_FUSESTRAP__BOOT_VID__SHIFT 0x0
+#define GFX_IMU_FUSESTRAP__BOOT_VID_MASK 0x000001FFL
+//GFX_IMU_SMUIO_VIDCHG_CTRL
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__REQ__SHIFT 0x0
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__DATA__SHIFT 0x1
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__PSIEN__SHIFT 0xa
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__ACK__SHIFT 0xb
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__SRC_SEL__SHIFT 0x1f
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__REQ_MASK 0x00000001L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__DATA_MASK 0x000003FEL
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__PSIEN_MASK 0x00000400L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__ACK_MASK 0x00000800L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__SRC_SEL_MASK 0x80000000L
+//GFX_IMU_GFXCLK_BYPASS_CTRL
+#define GFX_IMU_GFXCLK_BYPASS_CTRL__BYPASS_SEL__SHIFT 0x0
+#define GFX_IMU_GFXCLK_BYPASS_CTRL__BYPASS_SEL_MASK 0x00000001L
+//GFX_IMU_CLK_CTRL
+#define GFX_IMU_CLK_CTRL__CG_OVR__SHIFT 0x0
+#define GFX_IMU_CLK_CTRL__CG_OVR_CORE__SHIFT 0x1
+#define GFX_IMU_CLK_CTRL__CLKDIV__SHIFT 0x4
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_CHGTOG__SHIFT 0x8
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DONETOG__SHIFT 0x9
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DIV__SHIFT 0x10
+#define GFX_IMU_CLK_CTRL__COOLDOWN_PERIOD__SHIFT 0x1c
+#define GFX_IMU_CLK_CTRL__CG_OVR_MASK 0x00000001L
+#define GFX_IMU_CLK_CTRL__CG_OVR_CORE_MASK 0x00000002L
+#define GFX_IMU_CLK_CTRL__CLKDIV_MASK 0x00000010L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_CHGTOG_MASK 0x00000100L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DONETOG_MASK 0x00000200L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DIV_MASK 0x007F0000L
+#define GFX_IMU_CLK_CTRL__COOLDOWN_PERIOD_MASK 0xF0000000L
+//GFX_IMU_DOORBELL_CONTROL
+#define GFX_IMU_DOORBELL_CONTROL__OVR_EN__SHIFT 0x0
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_OVR__SHIFT 0x1
+#define GFX_IMU_DOORBELL_CONTROL__CP_DB_RESP_PEND_COUNT__SHIFT 0x18
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_STATUS__SHIFT 0x1f
+#define GFX_IMU_DOORBELL_CONTROL__OVR_EN_MASK 0x00000001L
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_OVR_MASK 0x00000002L
+#define GFX_IMU_DOORBELL_CONTROL__CP_DB_RESP_PEND_COUNT_MASK 0x7F000000L
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_STATUS_MASK 0x80000000L
+//GFX_IMU_RLC_CG_CTRL
+#define GFX_IMU_RLC_CG_CTRL__FORCE_CGCG__SHIFT 0x0
+#define GFX_IMU_RLC_CG_CTRL__MGCG_EARLY_EN__SHIFT 0x1
+#define GFX_IMU_RLC_CG_CTRL__FORCE_CGCG_MASK 0x00000001L
+#define GFX_IMU_RLC_CG_CTRL__MGCG_EARLY_EN_MASK 0x00000002L
+//GFX_IMU_RLC_THROTTLE_GFX
+#define GFX_IMU_RLC_THROTTLE_GFX__THROTTLE_EN__SHIFT 0x0
+#define GFX_IMU_RLC_THROTTLE_GFX__THROTTLE_EN_MASK 0x00000001L
+//GFX_IMU_RLC_RESET_VECTOR
+#define GFX_IMU_RLC_RESET_VECTOR__COLD_VS_GFXOFF__SHIFT 0x0
+#define GFX_IMU_RLC_RESET_VECTOR__WARM_RESET_EXIT__SHIFT 0x2
+#define GFX_IMU_RLC_RESET_VECTOR__VF_FLR_EXIT__SHIFT 0x3
+#define GFX_IMU_RLC_RESET_VECTOR__VECTOR__SHIFT 0x4
+#define GFX_IMU_RLC_RESET_VECTOR__COLD_VS_GFXOFF_MASK 0x00000001L
+#define GFX_IMU_RLC_RESET_VECTOR__WARM_RESET_EXIT_MASK 0x00000004L
+#define GFX_IMU_RLC_RESET_VECTOR__VF_FLR_EXIT_MASK 0x00000008L
+#define GFX_IMU_RLC_RESET_VECTOR__VECTOR_MASK 0x000000F0L
+//GFX_IMU_RLC_OVERRIDE
+#define GFX_IMU_RLC_OVERRIDE__DS_ALLOW__SHIFT 0x0
+#define GFX_IMU_RLC_OVERRIDE__DS_ALLOW_MASK 0x00000001L
+//GFX_IMU_DPM_CONTROL
+#define GFX_IMU_DPM_CONTROL__ACC_RESET__SHIFT 0x0
+#define GFX_IMU_DPM_CONTROL__ACC_START__SHIFT 0x1
+#define GFX_IMU_DPM_CONTROL__BUSY_MASK__SHIFT 0x2
+#define GFX_IMU_DPM_CONTROL__ACC_RESET_MASK 0x00000001L
+#define GFX_IMU_DPM_CONTROL__ACC_START_MASK 0x00000002L
+#define GFX_IMU_DPM_CONTROL__BUSY_MASK_MASK 0x0003FFFCL
+//GFX_IMU_DPM_ACC
+#define GFX_IMU_DPM_ACC__COUNT__SHIFT 0x0
+#define GFX_IMU_DPM_ACC__COUNT_MASK 0x00FFFFFFL
+//GFX_IMU_DPM_REF_COUNTER
+#define GFX_IMU_DPM_REF_COUNTER__COUNT__SHIFT 0x0
+#define GFX_IMU_DPM_REF_COUNTER__COUNT_MASK 0x00FFFFFFL
+//GFX_IMU_RLC_RAM_INDEX
+#define GFX_IMU_RLC_RAM_INDEX__INDEX__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_INDEX__RLC_INDEX__SHIFT 0x10
+#define GFX_IMU_RLC_RAM_INDEX__RAM_VALID__SHIFT 0x1f
+#define GFX_IMU_RLC_RAM_INDEX__INDEX_MASK 0x000000FFL
+#define GFX_IMU_RLC_RAM_INDEX__RLC_INDEX_MASK 0x00FF0000L
+#define GFX_IMU_RLC_RAM_INDEX__RAM_VALID_MASK 0x80000000L
+//GFX_IMU_RLC_RAM_ADDR_HIGH
+#define GFX_IMU_RLC_RAM_ADDR_HIGH__ADDR_MSB__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_ADDR_HIGH__ADDR_MSB_MASK 0x0000FFFFL
+//GFX_IMU_RLC_RAM_ADDR_LOW
+#define GFX_IMU_RLC_RAM_ADDR_LOW__ADDR_LSB__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_ADDR_LOW__ADDR_LSB_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_RAM_DATA
+#define GFX_IMU_RLC_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_FENCE_CTRL
+#define GFX_IMU_FENCE_CTRL__ENABLED__SHIFT 0x0
+#define GFX_IMU_FENCE_CTRL__ARM_LOG__SHIFT 0x1
+#define GFX_IMU_FENCE_CTRL__GRBM_RSMU_FENCE_ENABLE__SHIFT 0x2
+#define GFX_IMU_FENCE_CTRL__FLUSH_ARBITER_CREDITS__SHIFT 0x3
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_EN__SHIFT 0x8
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR__SHIFT 0x9
+#define GFX_IMU_FENCE_CTRL__ENABLED_MASK 0x00000001L
+#define GFX_IMU_FENCE_CTRL__ARM_LOG_MASK 0x00000002L
+#define GFX_IMU_FENCE_CTRL__GRBM_RSMU_FENCE_ENABLE_MASK 0x00000004L
+#define GFX_IMU_FENCE_CTRL__FLUSH_ARBITER_CREDITS_MASK 0x00000008L
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_EN_MASK 0x00000100L
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_MASK 0x00000200L
+//GFX_IMU_FENCE_LOG_INIT
+#define GFX_IMU_FENCE_LOG_INIT__UNIT_ID__SHIFT 0x0
+#define GFX_IMU_FENCE_LOG_INIT__INITIATOR_ID__SHIFT 0x7
+#define GFX_IMU_FENCE_LOG_INIT__UNIT_ID_MASK 0x0000007FL
+#define GFX_IMU_FENCE_LOG_INIT__INITIATOR_ID_MASK 0x0001FF80L
+//GFX_IMU_FENCE_LOG_ADDR
+#define GFX_IMU_FENCE_LOG_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_FENCE_LOG_ADDR__ADDR_MASK 0x000FFFFCL
+//GFX_IMU_PROGRAM_CTR
+#define GFX_IMU_PROGRAM_CTR__PC__SHIFT 0x0
+#define GFX_IMU_PROGRAM_CTR__PC_MASK 0xFFFFFFFFL
+//GFX_IMU_CORE_CTRL
+#define GFX_IMU_CORE_CTRL__CRESET__SHIFT 0x0
+#define GFX_IMU_CORE_CTRL__CSTALL__SHIFT 0x1
+#define GFX_IMU_CORE_CTRL__CDBGENABLE__SHIFT 0x2
+#define GFX_IMU_CORE_CTRL__DRESET__SHIFT 0x3
+#define GFX_IMU_CORE_CTRL__HALT_ON_RESET__SHIFT 0x4
+#define GFX_IMU_CORE_CTRL__BREAK_IN__SHIFT 0x8
+#define GFX_IMU_CORE_CTRL__BREAK_OUT_ACK__SHIFT 0x9
+#define GFX_IMU_CORE_CTRL__CRESET_MASK 0x00000001L
+#define GFX_IMU_CORE_CTRL__CSTALL_MASK 0x00000002L
+#define GFX_IMU_CORE_CTRL__CDBGENABLE_MASK 0x00000004L
+#define GFX_IMU_CORE_CTRL__DRESET_MASK 0x00000008L
+#define GFX_IMU_CORE_CTRL__HALT_ON_RESET_MASK 0x00000010L
+#define GFX_IMU_CORE_CTRL__BREAK_IN_MASK 0x00000100L
+#define GFX_IMU_CORE_CTRL__BREAK_OUT_ACK_MASK 0x00000200L
+//GFX_IMU_CORE_STATUS
+#define GFX_IMU_CORE_STATUS__CBUSY__SHIFT 0x0
+#define GFX_IMU_CORE_STATUS__PWAIT_MODE__SHIFT 0x1
+#define GFX_IMU_CORE_STATUS__PSP_ACC_ERR__SHIFT 0x2
+#define GFX_IMU_CORE_STATUS__CINTLEVEL__SHIFT 0x4
+#define GFX_IMU_CORE_STATUS__BREAK_IN_ACK__SHIFT 0x8
+#define GFX_IMU_CORE_STATUS__BREAK_OUT__SHIFT 0x9
+#define GFX_IMU_CORE_STATUS__DEBUG_MODE__SHIFT 0xa
+#define GFX_IMU_CORE_STATUS__P_FATAL_ERROR__SHIFT 0xb
+#define GFX_IMU_CORE_STATUS__FAULT_SEVERITY_LEVEL__SHIFT 0x18
+#define GFX_IMU_CORE_STATUS__FAULT_TYPE__SHIFT 0x1c
+#define GFX_IMU_CORE_STATUS__CBUSY_MASK 0x00000001L
+#define GFX_IMU_CORE_STATUS__PWAIT_MODE_MASK 0x00000002L
+#define GFX_IMU_CORE_STATUS__PSP_ACC_ERR_MASK 0x00000004L
+#define GFX_IMU_CORE_STATUS__CINTLEVEL_MASK 0x000000F0L
+#define GFX_IMU_CORE_STATUS__BREAK_IN_ACK_MASK 0x00000100L
+#define GFX_IMU_CORE_STATUS__BREAK_OUT_MASK 0x00000200L
+#define GFX_IMU_CORE_STATUS__DEBUG_MODE_MASK 0x00000400L
+#define GFX_IMU_CORE_STATUS__P_FATAL_ERROR_MASK 0x00000800L
+#define GFX_IMU_CORE_STATUS__FAULT_SEVERITY_LEVEL_MASK 0x0F000000L
+#define GFX_IMU_CORE_STATUS__FAULT_TYPE_MASK 0xF0000000L
+//GFX_IMU_PWROKRAW
+#define GFX_IMU_PWROKRAW__PWROKRAW__SHIFT 0x0
+#define GFX_IMU_PWROKRAW__PWROKRAW_MASK 0x00000001L
+//GFX_IMU_PWROK
+#define GFX_IMU_PWROK__PWROK__SHIFT 0x0
+#define GFX_IMU_PWROK__PWROK_MASK 0x00000001L
+//GFX_IMU_GAP_PWROK
+#define GFX_IMU_GAP_PWROK__GAP_PWROK__SHIFT 0x0
+#define GFX_IMU_GAP_PWROK__GAP_PWROK_MASK 0x00000001L
+//GFX_IMU_RESETn
+#define GFX_IMU_RESETn__Cpl_RESETn__SHIFT 0x0
+#define GFX_IMU_RESETn__Cpl_RESETn_MASK 0x00000001L
+//GFX_IMU_GFX_RESET_CTRL
+#define GFX_IMU_GFX_RESET_CTRL__HARD_RESETB__SHIFT 0x0
+#define GFX_IMU_GFX_RESET_CTRL__EA_RESETB__SHIFT 0x1
+#define GFX_IMU_GFX_RESET_CTRL__UTCL2_RESETB__SHIFT 0x2
+#define GFX_IMU_GFX_RESET_CTRL__SDMA_RESETB__SHIFT 0x3
+#define GFX_IMU_GFX_RESET_CTRL__GRBM_RESETB__SHIFT 0x4
+#define GFX_IMU_GFX_RESET_CTRL__HARD_RESETB_MASK 0x00000001L
+#define GFX_IMU_GFX_RESET_CTRL__EA_RESETB_MASK 0x00000002L
+#define GFX_IMU_GFX_RESET_CTRL__UTCL2_RESETB_MASK 0x00000004L
+#define GFX_IMU_GFX_RESET_CTRL__SDMA_RESETB_MASK 0x00000008L
+#define GFX_IMU_GFX_RESET_CTRL__GRBM_RESETB_MASK 0x00000010L
+//GFX_IMU_AEB_OVERRIDE
+#define GFX_IMU_AEB_OVERRIDE__AEB_OVERRIDE_CTRL__SHIFT 0x0
+#define GFX_IMU_AEB_OVERRIDE__AEB_RESET_VALUE__SHIFT 0x1
+#define GFX_IMU_AEB_OVERRIDE__AEB_VALID_VALUE__SHIFT 0x2
+#define GFX_IMU_AEB_OVERRIDE__AEB_OVERRIDE_CTRL_MASK 0x00000001L
+#define GFX_IMU_AEB_OVERRIDE__AEB_RESET_VALUE_MASK 0x00000002L
+#define GFX_IMU_AEB_OVERRIDE__AEB_VALID_VALUE_MASK 0x00000004L
+//GFX_IMU_VDCI_RESET_CTRL
+#define GFX_IMU_VDCI_RESET_CTRL__SOC2GFX_VDCI_RESETn__SHIFT 0x0
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_EA_SDF_VDCI_RESET__SHIFT 0x1
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_UTCL2_ATHUB_VDCI_RESET__SHIFT 0x2
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_IMUAXI_SYSHUB_VDCI_RESET__SHIFT 0x3
+#define GFX_IMU_VDCI_RESET_CTRL__IMU2GFX_VDCI_RESETn__SHIFT 0x4
+#define GFX_IMU_VDCI_RESET_CTRL__SOC2GFX_VDCI_RESETn_MASK 0x00000001L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_EA_SDF_VDCI_RESET_MASK 0x00000002L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_UTCL2_ATHUB_VDCI_RESET_MASK 0x00000004L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_IMUAXI_SYSHUB_VDCI_RESET_MASK 0x00000008L
+#define GFX_IMU_VDCI_RESET_CTRL__IMU2GFX_VDCI_RESETn_MASK 0x00000010L
+//GFX_IMU_GFX_ISO_CTRL
+#define GFX_IMU_GFX_ISO_CTRL__GFX2IMU_ISOn__SHIFT 0x0
+#define GFX_IMU_GFX_ISO_CTRL__SOC_EA_SDF_VDCI_ISOn_EN__SHIFT 0x1
+#define GFX_IMU_GFX_ISO_CTRL__SOC_UTCL2_ATHUB_VDCI_ISOn_EN__SHIFT 0x2
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_ISOn__SHIFT 0x3
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_CLK_ISOn__SHIFT 0x4
+#define GFX_IMU_GFX_ISO_CTRL__GFX2IMU_ISOn_MASK 0x00000001L
+#define GFX_IMU_GFX_ISO_CTRL__SOC_EA_SDF_VDCI_ISOn_EN_MASK 0x00000002L
+#define GFX_IMU_GFX_ISO_CTRL__SOC_UTCL2_ATHUB_VDCI_ISOn_EN_MASK 0x00000004L
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_ISOn_MASK 0x00000008L
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_CLK_ISOn_MASK 0x00000010L
+//GFX_IMU_TIMER0_CTRL0
+#define GFX_IMU_TIMER0_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER0_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER0_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER0_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER0_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER0_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER0_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER0_CTRL1
+#define GFX_IMU_TIMER0_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER0_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER0_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER0_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER0_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER0_CMP_AUTOINC
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER0_CMP_INTEN
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER0_CMP0
+#define GFX_IMU_TIMER0_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_CMP1
+#define GFX_IMU_TIMER0_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_CMP3
+#define GFX_IMU_TIMER0_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_VALUE
+#define GFX_IMU_TIMER0_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CTRL0
+#define GFX_IMU_TIMER1_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER1_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER1_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER1_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER1_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER1_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER1_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER1_CTRL1
+#define GFX_IMU_TIMER1_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER1_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER1_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER1_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER1_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER1_CMP_AUTOINC
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER1_CMP_INTEN
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER1_CMP0
+#define GFX_IMU_TIMER1_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CMP1
+#define GFX_IMU_TIMER1_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CMP3
+#define GFX_IMU_TIMER1_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_VALUE
+#define GFX_IMU_TIMER1_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CTRL0
+#define GFX_IMU_TIMER2_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER2_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER2_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER2_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER2_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER2_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER2_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER2_CTRL1
+#define GFX_IMU_TIMER2_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER2_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER2_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER2_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER2_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER2_CMP_AUTOINC
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER2_CMP_INTEN
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER2_CMP0
+#define GFX_IMU_TIMER2_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CMP1
+#define GFX_IMU_TIMER2_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CMP3
+#define GFX_IMU_TIMER2_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_VALUE
+#define GFX_IMU_TIMER2_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_FUSE_CTRL
+#define GFX_IMU_FUSE_CTRL__DIV_OVR__SHIFT 0x0
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_EN__SHIFT 0x5
+#define GFX_IMU_FUSE_CTRL__FORCE_DONE__SHIFT 0x6
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_MASK 0x0000001FL
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_EN_MASK 0x00000020L
+#define GFX_IMU_FUSE_CTRL__FORCE_DONE_MASK 0x00000040L
+//GFX_IMU_D_RAM_ADDR
+#define GFX_IMU_D_RAM_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_D_RAM_ADDR__ADDR_MASK 0x0000FFFCL
+//GFX_IMU_D_RAM_DATA
+#define GFX_IMU_D_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_D_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_GFX_IH_GASKET_CTRL
+#define GFX_IMU_GFX_IH_GASKET_CTRL__SRSTB__SHIFT 0x0
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_LEVEL__SHIFT 0x10
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_OVERFLOW__SHIFT 0x14
+#define GFX_IMU_GFX_IH_GASKET_CTRL__SRSTB_MASK 0x00000001L
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_LEVEL_MASK 0x000F0000L
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_OVERFLOW_MASK 0x00100000L
+
+
+// addressBlock: gc_gfx_imu_gfx_imu_pspdec
+//GFX_IMU_RLC_BOOTLOADER_ADDR_HI
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_BOOTLOADER_ADDR_LO
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_LO__ADDR_LO__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_BOOTLOADER_SIZE
+#define GFX_IMU_RLC_BOOTLOADER_SIZE__SIZE__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_SIZE__SIZE_MASK 0x03FFFFFFL
+//GFX_IMU_I_RAM_ADDR
+#define GFX_IMU_I_RAM_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_I_RAM_ADDR__ADDR_MASK 0x0000FFFCL
+//GFX_IMU_I_RAM_DATA
+#define GFX_IMU_I_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_I_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gccacind
+//GC_CAC_ID
+#define GC_CAC_ID__CAC_BLOCK_ID__SHIFT 0x0
+#define GC_CAC_ID__CAC_SIGNAL_ID__SHIFT 0x6
+#define GC_CAC_ID__CAC_BLOCK_ID_MASK 0x0000003FL
+#define GC_CAC_ID__CAC_SIGNAL_ID_MASK 0x00003FC0L
+//GC_CAC_CNTL
+#define GC_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x0
+#define GC_CAC_CNTL__CAC_THRESHOLD_MASK 0x0000FFFFL
+//GC_CAC_ACC_CP0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP1
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP2
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA1
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA2
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA3
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA4
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA5
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER1
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER2
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER3
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER4
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER5
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER6
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER7
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER8
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER9
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML20
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML21
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML22
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML23
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML24
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER1
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER2
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER3
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER4
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS1
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS2
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS3
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS4
+#define GC_CAC_ACC_GDS4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE0
+#define GC_CAC_ACC_GE0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE1
+#define GC_CAC_ACC_GE1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE2
+#define GC_CAC_ACC_GE2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE3
+#define GC_CAC_ACC_GE3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE4
+#define GC_CAC_ACC_GE4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE5
+#define GC_CAC_ACC_GE5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE6
+#define GC_CAC_ACC_GE6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE7
+#define GC_CAC_ACC_GE7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE8
+#define GC_CAC_ACC_GE8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE9
+#define GC_CAC_ACC_GE9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE10
+#define GC_CAC_ACC_GE10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE11
+#define GC_CAC_ACC_GE11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE11__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE12
+#define GC_CAC_ACC_GE12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE12__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE13
+#define GC_CAC_ACC_GE13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE13__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE14
+#define GC_CAC_ACC_GE14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE14__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE15
+#define GC_CAC_ACC_GE15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE15__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE16
+#define GC_CAC_ACC_GE16__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE16__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE17
+#define GC_CAC_ACC_GE17__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE17__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE18
+#define GC_CAC_ACC_GE18__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE18__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE19
+#define GC_CAC_ACC_GE19__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE19__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE20
+#define GC_CAC_ACC_GE20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PMM0
+#define GC_CAC_ACC_PMM0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PMM0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C0
+#define GC_CAC_ACC_GL2C0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C1
+#define GC_CAC_ACC_GL2C1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C2
+#define GC_CAC_ACC_GL2C2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C3
+#define GC_CAC_ACC_GL2C3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C4
+#define GC_CAC_ACC_GL2C4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH0
+#define GC_CAC_ACC_PH0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH1
+#define GC_CAC_ACC_PH1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH2
+#define GC_CAC_ACC_PH2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH3
+#define GC_CAC_ACC_PH3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH4
+#define GC_CAC_ACC_PH4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH5
+#define GC_CAC_ACC_PH5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH6
+#define GC_CAC_ACC_PH6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH7
+#define GC_CAC_ACC_PH7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA0
+#define GC_CAC_ACC_SDMA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA1
+#define GC_CAC_ACC_SDMA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA2
+#define GC_CAC_ACC_SDMA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA3
+#define GC_CAC_ACC_SDMA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA4
+#define GC_CAC_ACC_SDMA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA5
+#define GC_CAC_ACC_SDMA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA6
+#define GC_CAC_ACC_SDMA6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA7
+#define GC_CAC_ACC_SDMA7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA8
+#define GC_CAC_ACC_SDMA8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA9
+#define GC_CAC_ACC_SDMA9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA10
+#define GC_CAC_ACC_SDMA10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA11
+#define GC_CAC_ACC_SDMA11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA11__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC0
+#define GC_CAC_ACC_CHC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC1
+#define GC_CAC_ACC_CHC1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC2
+#define GC_CAC_ACC_CHC2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS0
+#define GC_CAC_ACC_GUS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS1
+#define GC_CAC_ACC_GUS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS2
+#define GC_CAC_ACC_GUS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_RLC0
+#define GC_CAC_ACC_RLC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_RLC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL20
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL21
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL22
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL23
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL24
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//RELEASE_TO_STALL_LUT_1_8
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//RELEASE_TO_STALL_LUT_9_16
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//RELEASE_TO_STALL_LUT_17_20
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//STALL_TO_RELEASE_LUT_1_4
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//STALL_TO_RELEASE_LUT_5_7
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+//STALL_TO_PWRBRK_LUT_1_4
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_1_MASK 0x00000007L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_2_MASK 0x00000700L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_3_MASK 0x00070000L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_4_MASK 0x07000000L
+//STALL_TO_PWRBRK_LUT_5_7
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_5_MASK 0x00000007L
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_6_MASK 0x00000700L
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_7_MASK 0x00070000L
+//PWRBRK_STALL_TO_RELEASE_LUT_1_4
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//PWRBRK_STALL_TO_RELEASE_LUT_5_7
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+//PWRBRK_RELEASE_TO_STALL_LUT_1_8
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//PWRBRK_RELEASE_TO_STALL_LUT_9_16
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//PWRBRK_RELEASE_TO_STALL_LUT_17_20
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//FIXED_PATTERN_PERF_COUNTER_1
+#define FIXED_PATTERN_PERF_COUNTER_1__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_1__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_2
+#define FIXED_PATTERN_PERF_COUNTER_2__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_2__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_3
+#define FIXED_PATTERN_PERF_COUNTER_3__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_3__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_4
+#define FIXED_PATTERN_PERF_COUNTER_4__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_4__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_5
+#define FIXED_PATTERN_PERF_COUNTER_5__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_5__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_6
+#define FIXED_PATTERN_PERF_COUNTER_6__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_6__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_7
+#define FIXED_PATTERN_PERF_COUNTER_7__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_7__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_8
+#define FIXED_PATTERN_PERF_COUNTER_8__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_8__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_9
+#define FIXED_PATTERN_PERF_COUNTER_9__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_9__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_10
+#define FIXED_PATTERN_PERF_COUNTER_10__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_10__PERF_COUNTER_MASK 0x0001FFFFL
+//HW_LUT_UPDATE_STATUS
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_DONE__SHIFT 0x0
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR__SHIFT 0x1
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_STEP__SHIFT 0x2
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_DONE__SHIFT 0x5
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR__SHIFT 0x6
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_STEP__SHIFT 0x7
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_DONE__SHIFT 0xa
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR__SHIFT 0xb
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_STEP__SHIFT 0xc
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_DONE__SHIFT 0x11
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR__SHIFT 0x12
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_STEP__SHIFT 0x13
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_DONE__SHIFT 0x16
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR__SHIFT 0x17
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_STEP__SHIFT 0x18
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_DONE_MASK 0x00000001L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_MASK 0x00000002L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_STEP_MASK 0x0000001CL
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_DONE_MASK 0x00000020L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_MASK 0x00000040L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_STEP_MASK 0x00000380L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_DONE_MASK 0x00000400L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_MASK 0x00000800L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_STEP_MASK 0x0001F000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_DONE_MASK 0x00020000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_MASK 0x00040000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_STEP_MASK 0x00380000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_DONE_MASK 0x00400000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_MASK 0x00800000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_STEP_MASK 0x1F000000L
+
+
+// addressBlock: secacind
+//SE_CAC_ID
+#define SE_CAC_ID__CAC_BLOCK_ID__SHIFT 0x0
+#define SE_CAC_ID__CAC_SIGNAL_ID__SHIFT 0x6
+#define SE_CAC_ID__CAC_BLOCK_ID_MASK 0x0000003FL
+#define SE_CAC_ID__CAC_SIGNAL_ID_MASK 0x00003FC0L
+//SE_CAC_CNTL
+#define SE_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x0
+#define SE_CAC_CNTL__CAC_THRESHOLD_MASK 0x0000FFFFL
+
+
+// addressBlock: grtavfsind
+//RTAVFS_REG0
+#define RTAVFS_REG0__RTAVFSZONE0STARTCNT__SHIFT 0x0
+#define RTAVFS_REG0__RTAVFSZONE0STOPCNT__SHIFT 0x10
+#define RTAVFS_REG0__RTAVFSZONE0STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG0__RTAVFSZONE0STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG1
+#define RTAVFS_REG1__RTAVFSZONE1STARTCNT__SHIFT 0x0
+#define RTAVFS_REG1__RTAVFSZONE1STOPCNT__SHIFT 0x10
+#define RTAVFS_REG1__RTAVFSZONE1STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG1__RTAVFSZONE1STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG2
+#define RTAVFS_REG2__RTAVFSZONE2STARTCNT__SHIFT 0x0
+#define RTAVFS_REG2__RTAVFSZONE2STOPCNT__SHIFT 0x10
+#define RTAVFS_REG2__RTAVFSZONE2STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG2__RTAVFSZONE2STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG3
+#define RTAVFS_REG3__RTAVFSZONE3STARTCNT__SHIFT 0x0
+#define RTAVFS_REG3__RTAVFSZONE3STOPCNT__SHIFT 0x10
+#define RTAVFS_REG3__RTAVFSZONE3STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG3__RTAVFSZONE3STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG4
+#define RTAVFS_REG4__RTAVFSZONE4STARTCNT__SHIFT 0x0
+#define RTAVFS_REG4__RTAVFSZONE4STOPCNT__SHIFT 0x10
+#define RTAVFS_REG4__RTAVFSZONE4STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG4__RTAVFSZONE4STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG5
+#define RTAVFS_REG5__RTAVFSZONE0EN0__SHIFT 0x0
+#define RTAVFS_REG5__RTAVFSZONE0EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG6
+#define RTAVFS_REG6__RTAVFSZONE0EN1__SHIFT 0x0
+#define RTAVFS_REG6__RTAVFSZONE0EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG7
+#define RTAVFS_REG7__RTAVFSZONE1EN0__SHIFT 0x0
+#define RTAVFS_REG7__RTAVFSZONE1EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG8
+#define RTAVFS_REG8__RTAVFSZONE1EN1__SHIFT 0x0
+#define RTAVFS_REG8__RTAVFSZONE1EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG9
+#define RTAVFS_REG9__RTAVFSZONE2EN0__SHIFT 0x0
+#define RTAVFS_REG9__RTAVFSZONE2EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG10
+#define RTAVFS_REG10__RTAVFSZONE2EN1__SHIFT 0x0
+#define RTAVFS_REG10__RTAVFSZONE2EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG11
+#define RTAVFS_REG11__RTAVFSZONE3EN0__SHIFT 0x0
+#define RTAVFS_REG11__RTAVFSZONE3EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG12
+#define RTAVFS_REG12__RTAVFSZONE3EN1__SHIFT 0x0
+#define RTAVFS_REG12__RTAVFSZONE3EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG13
+#define RTAVFS_REG13__RTAVFSZONE4EN0__SHIFT 0x0
+#define RTAVFS_REG13__RTAVFSZONE4EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG14
+#define RTAVFS_REG14__RTAVFSZONE4EN1__SHIFT 0x0
+#define RTAVFS_REG14__RTAVFSZONE4EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG15
+#define RTAVFS_REG15__RTAVFSVF0FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG15__RTAVFSVF0VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG15__RTAVFSVF0FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG15__RTAVFSVF0VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG16
+#define RTAVFS_REG16__RTAVFSVF1FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG16__RTAVFSVF1VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG16__RTAVFSVF1FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG16__RTAVFSVF1VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG17
+#define RTAVFS_REG17__RTAVFSVF2FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG17__RTAVFSVF2VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG17__RTAVFSVF2FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG17__RTAVFSVF2VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG18
+#define RTAVFS_REG18__RTAVFSVF3FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG18__RTAVFSVF3VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG18__RTAVFSVF3FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG18__RTAVFSVF3VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG19
+#define RTAVFS_REG19__RTAVFSGB_ZONE0__SHIFT 0x0
+#define RTAVFS_REG19__RTAVFSGB_ZONE1__SHIFT 0x6
+#define RTAVFS_REG19__RTAVFSGB_ZONE2__SHIFT 0xc
+#define RTAVFS_REG19__RTAVFSGB_ZONE3__SHIFT 0x12
+#define RTAVFS_REG19__RTAVFSGB_ZONE4__SHIFT 0x19
+#define RTAVFS_REG19__RTAVFSGB_ZONE0_MASK 0x0000003FL
+#define RTAVFS_REG19__RTAVFSGB_ZONE1_MASK 0x00000FC0L
+#define RTAVFS_REG19__RTAVFSGB_ZONE2_MASK 0x0003F000L
+#define RTAVFS_REG19__RTAVFSGB_ZONE3_MASK 0x01FC0000L
+#define RTAVFS_REG19__RTAVFSGB_ZONE4_MASK 0xFE000000L
+//RTAVFS_REG20
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG20__RTAVFSZONE0RESERVED__SHIFT 0x12
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG20__RTAVFSZONE0RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG21
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG21__RTAVFSZONE1RESERVED__SHIFT 0x12
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG21__RTAVFSZONE1RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG22
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG22__RTAVFSZONE2RESERVED__SHIFT 0x12
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG22__RTAVFSZONE2RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG23
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG23__RTAVFSZONE3RESERVED__SHIFT 0x12
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG23__RTAVFSZONE3RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG24
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG24__RTAVFSZONE4RESERVED__SHIFT 0x12
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG24__RTAVFSZONE4RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG25
+#define RTAVFS_REG25__RTAVFSRESERVED0__SHIFT 0x0
+#define RTAVFS_REG25__RTAVFSRESERVED0_MASK 0xFFFFFFFFL
+//RTAVFS_REG26
+#define RTAVFS_REG26__RTAVFSRESERVED1__SHIFT 0x0
+#define RTAVFS_REG26__RTAVFSRESERVED1_MASK 0xFFFFFFFFL
+//RTAVFS_REG27
+#define RTAVFS_REG27__RTAVFSRESERVED2__SHIFT 0x0
+#define RTAVFS_REG27__RTAVFSRESERVED2_MASK 0xFFFFFFFFL
+//RTAVFS_REG28
+#define RTAVFS_REG28__RTAVFSZONE0INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG28__RTAVFSZONE1INTERCEPT__SHIFT 0x10
+#define RTAVFS_REG28__RTAVFSZONE0INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG28__RTAVFSZONE1INTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG29
+#define RTAVFS_REG29__RTAVFSZONE2INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG29__RTAVFSZONE3INTERCEPT__SHIFT 0x10
+#define RTAVFS_REG29__RTAVFSZONE2INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG29__RTAVFSZONE3INTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG30
+#define RTAVFS_REG30__RTAVFSZONE4INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG30__RTAVFSRESERVEDINTERCEPT__SHIFT 0x10
+#define RTAVFS_REG30__RTAVFSZONE4INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG30__RTAVFSRESERVEDINTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG31
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV0__SHIFT 0x0
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV1__SHIFT 0x2
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV2__SHIFT 0x4
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV3__SHIFT 0x6
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV4__SHIFT 0x8
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV5__SHIFT 0xa
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV6__SHIFT 0xc
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV7__SHIFT 0xe
+#define RTAVFS_REG31__RESERVED__SHIFT 0x10
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV0_MASK 0x00000003L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV1_MASK 0x0000000CL
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV2_MASK 0x00000030L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV3_MASK 0x000000C0L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV4_MASK 0x00000300L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV5_MASK 0x00000C00L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV6_MASK 0x00003000L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV7_MASK 0x0000C000L
+#define RTAVFS_REG31__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG32
+#define RTAVFS_REG32__RTAVFSFSMSTARTUPCNT__SHIFT 0x0
+#define RTAVFS_REG32__RESERVED__SHIFT 0x10
+#define RTAVFS_REG32__RTAVFSFSMSTARTUPCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG32__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG33
+#define RTAVFS_REG33__RTAVFSFSMIDLECNT__SHIFT 0x0
+#define RTAVFS_REG33__RESERVED__SHIFT 0x10
+#define RTAVFS_REG33__RTAVFSFSMIDLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG33__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG34
+#define RTAVFS_REG34__RTAVFSFSMRESETCPORIPPLECOUNTERSCNT__SHIFT 0x0
+#define RTAVFS_REG34__RESERVED__SHIFT 0x10
+#define RTAVFS_REG34__RTAVFSFSMRESETCPORIPPLECOUNTERSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG34__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG35
+#define RTAVFS_REG35__RTAVFSFSMSTARTCPOSCNT__SHIFT 0x0
+#define RTAVFS_REG35__RESERVED__SHIFT 0x10
+#define RTAVFS_REG35__RTAVFSFSMSTARTCPOSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG35__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG36
+#define RTAVFS_REG36__RTAVFSFSMSTARTRIPPLECOUNTERSCNT__SHIFT 0x0
+#define RTAVFS_REG36__RESERVED__SHIFT 0x10
+#define RTAVFS_REG36__RTAVFSFSMSTARTRIPPLECOUNTERSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG36__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG37
+#define RTAVFS_REG37__RTAVFSFSMRIPPLECOUNTERSDONECNT__SHIFT 0x0
+#define RTAVFS_REG37__RESERVED__SHIFT 0x10
+#define RTAVFS_REG37__RTAVFSFSMRIPPLECOUNTERSDONECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG37__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG38
+#define RTAVFS_REG38__RTAVFSFSMCPOFINALRESULTREADYCNT__SHIFT 0x0
+#define RTAVFS_REG38__RESERVED__SHIFT 0x10
+#define RTAVFS_REG38__RTAVFSFSMCPOFINALRESULTREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG38__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG39
+#define RTAVFS_REG39__RTAVFSFSMVOLTCODEREADYCNT__SHIFT 0x0
+#define RTAVFS_REG39__RESERVED__SHIFT 0x10
+#define RTAVFS_REG39__RTAVFSFSMVOLTCODEREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG39__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG40
+#define RTAVFS_REG40__RTAVFSFSMTARGETVOLTAGEREADYCNT__SHIFT 0x0
+#define RTAVFS_REG40__RESERVED__SHIFT 0x10
+#define RTAVFS_REG40__RTAVFSFSMTARGETVOLTAGEREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG40__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG41
+#define RTAVFS_REG41__RTAVFSFSMSTOPCPOSCNT__SHIFT 0x0
+#define RTAVFS_REG41__RESERVED__SHIFT 0x10
+#define RTAVFS_REG41__RTAVFSFSMSTOPCPOSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG41__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG42
+#define RTAVFS_REG42__RTAVFSFSMWAITFORACKCNT__SHIFT 0x0
+#define RTAVFS_REG42__RESERVED__SHIFT 0x10
+#define RTAVFS_REG42__RTAVFSFSMWAITFORACKCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG42__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG43
+#define RTAVFS_REG43__RTAVFSKP0__SHIFT 0x0
+#define RTAVFS_REG43__RTAVFSKP1__SHIFT 0x4
+#define RTAVFS_REG43__RTAVFSKP2__SHIFT 0x8
+#define RTAVFS_REG43__RTAVFSKP3__SHIFT 0xc
+#define RTAVFS_REG43__RTAVFSKI0__SHIFT 0x10
+#define RTAVFS_REG43__RTAVFSKI1__SHIFT 0x14
+#define RTAVFS_REG43__RTAVFSKI2__SHIFT 0x18
+#define RTAVFS_REG43__RTAVFSKI3__SHIFT 0x1c
+#define RTAVFS_REG43__RTAVFSKP0_MASK 0x0000000FL
+#define RTAVFS_REG43__RTAVFSKP1_MASK 0x000000F0L
+#define RTAVFS_REG43__RTAVFSKP2_MASK 0x00000F00L
+#define RTAVFS_REG43__RTAVFSKP3_MASK 0x0000F000L
+#define RTAVFS_REG43__RTAVFSKI0_MASK 0x000F0000L
+#define RTAVFS_REG43__RTAVFSKI1_MASK 0x00F00000L
+#define RTAVFS_REG43__RTAVFSKI2_MASK 0x0F000000L
+#define RTAVFS_REG43__RTAVFSKI3_MASK 0xF0000000L
+//RTAVFS_REG44
+#define RTAVFS_REG44__RTAVFSV1__SHIFT 0x0
+#define RTAVFS_REG44__RTAVFSV2__SHIFT 0xa
+#define RTAVFS_REG44__RTAVFSV3__SHIFT 0x14
+#define RTAVFS_REG44__RTAVFSUSEBINARYSEARCH__SHIFT 0x1e
+#define RTAVFS_REG44__RTAVFSVOLTCODEHWCAL__SHIFT 0x1f
+#define RTAVFS_REG44__RTAVFSV1_MASK 0x000003FFL
+#define RTAVFS_REG44__RTAVFSV2_MASK 0x000FFC00L
+#define RTAVFS_REG44__RTAVFSV3_MASK 0x3FF00000L
+#define RTAVFS_REG44__RTAVFSUSEBINARYSEARCH_MASK 0x40000000L
+#define RTAVFS_REG44__RTAVFSVOLTCODEHWCAL_MASK 0x80000000L
+//RTAVFS_REG45
+#define RTAVFS_REG45__RTAVFSVRBLEEDCNTRL__SHIFT 0x0
+#define RTAVFS_REG45__RTAVFSVRENABLE__SHIFT 0x1
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDE__SHIFT 0x2
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDESEL__SHIFT 0xc
+#define RTAVFS_REG45__RTAVFSLOWPWREN__SHIFT 0xd
+#define RTAVFS_REG45__RTAVFSUREGENABLE__SHIFT 0xe
+#define RTAVFS_REG45__RTAVFSBGENABLE__SHIFT 0xf
+#define RTAVFS_REG45__RTAVFSENABLEVDDRETSENSING__SHIFT 0x10
+#define RTAVFS_REG45__RESERVED__SHIFT 0x11
+#define RTAVFS_REG45__RTAVFSVRBLEEDCNTRL_MASK 0x00000001L
+#define RTAVFS_REG45__RTAVFSVRENABLE_MASK 0x00000002L
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDE_MASK 0x00000FFCL
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDESEL_MASK 0x00001000L
+#define RTAVFS_REG45__RTAVFSLOWPWREN_MASK 0x00002000L
+#define RTAVFS_REG45__RTAVFSUREGENABLE_MASK 0x00004000L
+#define RTAVFS_REG45__RTAVFSBGENABLE_MASK 0x00008000L
+#define RTAVFS_REG45__RTAVFSENABLEVDDRETSENSING_MASK 0x00010000L
+#define RTAVFS_REG45__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG46
+#define RTAVFS_REG46__RTAVFSKP__SHIFT 0x0
+#define RTAVFS_REG46__RTAVFSKI__SHIFT 0x4
+#define RTAVFS_REG46__RTAVFSPIENABLEANTIWINDUP__SHIFT 0x8
+#define RTAVFS_REG46__RTAVFSPISHIFT__SHIFT 0x9
+#define RTAVFS_REG46__RTAVFSPIERREN__SHIFT 0xd
+#define RTAVFS_REG46__RTAVFSPISHIFTOUT__SHIFT 0xe
+#define RTAVFS_REG46__RTAVFSUSELUTKPKI__SHIFT 0x12
+#define RTAVFS_REG46__RESERVED__SHIFT 0x13
+#define RTAVFS_REG46__RTAVFSKP_MASK 0x0000000FL
+#define RTAVFS_REG46__RTAVFSKI_MASK 0x000000F0L
+#define RTAVFS_REG46__RTAVFSPIENABLEANTIWINDUP_MASK 0x00000100L
+#define RTAVFS_REG46__RTAVFSPISHIFT_MASK 0x00001E00L
+#define RTAVFS_REG46__RTAVFSPIERREN_MASK 0x00002000L
+#define RTAVFS_REG46__RTAVFSPISHIFTOUT_MASK 0x0003C000L
+#define RTAVFS_REG46__RTAVFSUSELUTKPKI_MASK 0x00040000L
+#define RTAVFS_REG46__RESERVED_MASK 0xFFF80000L
+//RTAVFS_REG47
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMIN__SHIFT 0x0
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMAX__SHIFT 0xa
+#define RTAVFS_REG47__RTAVFSPIERRMASK__SHIFT 0x14
+#define RTAVFS_REG47__RTAVFSFORCEDISABLEPI__SHIFT 0x1b
+#define RTAVFS_REG47__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMIN_MASK 0x000003FFL
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMAX_MASK 0x000FFC00L
+#define RTAVFS_REG47__RTAVFSPIERRMASK_MASK 0x07F00000L
+#define RTAVFS_REG47__RTAVFSFORCEDISABLEPI_MASK 0x08000000L
+#define RTAVFS_REG47__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG48
+#define RTAVFS_REG48__RTAVFSPILOOPNITERATIONS__SHIFT 0x0
+#define RTAVFS_REG48__RTAVFSPIERRTHRESHOLD__SHIFT 0x10
+#define RTAVFS_REG48__RTAVFSPILOOPNITERATIONS_MASK 0x0000FFFFL
+#define RTAVFS_REG48__RTAVFSPIERRTHRESHOLD_MASK 0xFFFF0000L
+//RTAVFS_REG49
+#define RTAVFS_REG49__RTAVFSPSMRSTAVGVDD__SHIFT 0x0
+#define RTAVFS_REG49__RTAVFSPSMMEASMAXVDD__SHIFT 0x1
+#define RTAVFS_REG49__RTAVFSPSMCLKDIVVDD__SHIFT 0x2
+#define RTAVFS_REG49__RTAVFSPSMAVGDIVVDD__SHIFT 0x4
+#define RTAVFS_REG49__RTAVFSPSMOSCENVDD__SHIFT 0xa
+#define RTAVFS_REG49__RTAVFSPSMAVGENVDD__SHIFT 0xb
+#define RTAVFS_REG49__RTAVFSPSMRSTMINMAXVDD__SHIFT 0xc
+#define RTAVFS_REG49__RESERVED__SHIFT 0xd
+#define RTAVFS_REG49__RTAVFSPSMRSTAVGVDD_MASK 0x00000001L
+#define RTAVFS_REG49__RTAVFSPSMMEASMAXVDD_MASK 0x00000002L
+#define RTAVFS_REG49__RTAVFSPSMCLKDIVVDD_MASK 0x0000000CL
+#define RTAVFS_REG49__RTAVFSPSMAVGDIVVDD_MASK 0x000003F0L
+#define RTAVFS_REG49__RTAVFSPSMOSCENVDD_MASK 0x00000400L
+#define RTAVFS_REG49__RTAVFSPSMAVGENVDD_MASK 0x00000800L
+#define RTAVFS_REG49__RTAVFSPSMRSTMINMAXVDD_MASK 0x00001000L
+#define RTAVFS_REG49__RESERVED_MASK 0xFFFFE000L
+//RTAVFS_REG50
+#define RTAVFS_REG50__RTAVFSPSMRSTAVGVREG__SHIFT 0x0
+#define RTAVFS_REG50__RTAVFSPSMMEASMAXVREG__SHIFT 0x1
+#define RTAVFS_REG50__RTAVFSPSMCLKDIVVREG__SHIFT 0x2
+#define RTAVFS_REG50__RTAVFSPSMAVGDIVVREG__SHIFT 0x4
+#define RTAVFS_REG50__RTAVFSPSMOSCENVREG__SHIFT 0xa
+#define RTAVFS_REG50__RTAVFSPSMAVGENVREG__SHIFT 0xb
+#define RTAVFS_REG50__RTAVFSPSMRSTMINMAXVREG__SHIFT 0xc
+#define RTAVFS_REG50__RESERVED__SHIFT 0xd
+#define RTAVFS_REG50__RTAVFSPSMRSTAVGVREG_MASK 0x00000001L
+#define RTAVFS_REG50__RTAVFSPSMMEASMAXVREG_MASK 0x00000002L
+#define RTAVFS_REG50__RTAVFSPSMCLKDIVVREG_MASK 0x0000000CL
+#define RTAVFS_REG50__RTAVFSPSMAVGDIVVREG_MASK 0x000003F0L
+#define RTAVFS_REG50__RTAVFSPSMOSCENVREG_MASK 0x00000400L
+#define RTAVFS_REG50__RTAVFSPSMAVGENVREG_MASK 0x00000800L
+#define RTAVFS_REG50__RTAVFSPSMRSTMINMAXVREG_MASK 0x00001000L
+#define RTAVFS_REG50__RESERVED_MASK 0xFFFFE000L
+//RTAVFS_REG51
+#define RTAVFS_REG51__RTAVFSAVFSENABLE__SHIFT 0x0
+#define RTAVFS_REG51__RTAVFSCPOTURNONDELAY__SHIFT 0x1
+#define RTAVFS_REG51__RTAVFSSELECTMINMAX__SHIFT 0x5
+#define RTAVFS_REG51__RTAVFSSELECTPERPATHSCALING__SHIFT 0x6
+#define RTAVFS_REG51__RTAVFSADDVOLTCODEGUARDBAND__SHIFT 0x7
+#define RTAVFS_REG51__RTAVFSSENDAVGPSMTOPSMOUT__SHIFT 0x8
+#define RTAVFS_REG51__RTAVFSUPDATEANCHORVOLTAGES__SHIFT 0x9
+#define RTAVFS_REG51__RTAVFSSENDVDDTOPSMOUT__SHIFT 0xa
+#define RTAVFS_REG51__RESERVED__SHIFT 0xb
+#define RTAVFS_REG51__RTAVFSAVFSENABLE_MASK 0x00000001L
+#define RTAVFS_REG51__RTAVFSCPOTURNONDELAY_MASK 0x0000001EL
+#define RTAVFS_REG51__RTAVFSSELECTMINMAX_MASK 0x00000020L
+#define RTAVFS_REG51__RTAVFSSELECTPERPATHSCALING_MASK 0x00000040L
+#define RTAVFS_REG51__RTAVFSADDVOLTCODEGUARDBAND_MASK 0x00000080L
+#define RTAVFS_REG51__RTAVFSSENDAVGPSMTOPSMOUT_MASK 0x00000100L
+#define RTAVFS_REG51__RTAVFSUPDATEANCHORVOLTAGES_MASK 0x00000200L
+#define RTAVFS_REG51__RTAVFSSENDVDDTOPSMOUT_MASK 0x00000400L
+#define RTAVFS_REG51__RESERVED_MASK 0xFFFFF800L
+//RTAVFS_REG52
+#define RTAVFS_REG52__RTAVFSMINMAXPSMVDD__SHIFT 0x0
+#define RTAVFS_REG52__RTAVFSAVGPSMVDD__SHIFT 0xe
+#define RTAVFS_REG52__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG52__RTAVFSMINMAXPSMVDD_MASK 0x00003FFFL
+#define RTAVFS_REG52__RTAVFSAVGPSMVDD_MASK 0x0FFFC000L
+#define RTAVFS_REG52__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG53
+#define RTAVFS_REG53__RTAVFSMINMAXPSMVREG__SHIFT 0x0
+#define RTAVFS_REG53__RTAVFSAVGPSMVREG__SHIFT 0xe
+#define RTAVFS_REG53__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG53__RTAVFSMINMAXPSMVREG_MASK 0x00003FFFL
+#define RTAVFS_REG53__RTAVFSAVGPSMVREG_MASK 0x0FFFC000L
+#define RTAVFS_REG53__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG54
+#define RTAVFS_REG54__RTAVFSCPO0_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG54__RTAVFSCPO0_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG54__RTAVFSCPO0_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG54__RTAVFSCPO0_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG55
+#define RTAVFS_REG55__RTAVFSCPO1_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG55__RTAVFSCPO1_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG55__RTAVFSCPO1_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG55__RTAVFSCPO1_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG56
+#define RTAVFS_REG56__RTAVFSCPO2_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG56__RTAVFSCPO2_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG56__RTAVFSCPO2_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG56__RTAVFSCPO2_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG57
+#define RTAVFS_REG57__RTAVFSCPO3_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG57__RTAVFSCPO3_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG57__RTAVFSCPO3_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG57__RTAVFSCPO3_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG58
+#define RTAVFS_REG58__RTAVFSCPO4_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG58__RTAVFSCPO4_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG58__RTAVFSCPO4_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG58__RTAVFSCPO4_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG59
+#define RTAVFS_REG59__RTAVFSCPO5_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG59__RTAVFSCPO5_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG59__RTAVFSCPO5_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG59__RTAVFSCPO5_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG60
+#define RTAVFS_REG60__RTAVFSCPO6_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG60__RTAVFSCPO6_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG60__RTAVFSCPO6_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG60__RTAVFSCPO6_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG61
+#define RTAVFS_REG61__RTAVFSCPO7_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG61__RTAVFSCPO7_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG61__RTAVFSCPO7_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG61__RTAVFSCPO7_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG62
+#define RTAVFS_REG62__RTAVFSCPO8_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG62__RTAVFSCPO8_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG62__RTAVFSCPO8_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG62__RTAVFSCPO8_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG63
+#define RTAVFS_REG63__RTAVFSCPO9_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG63__RTAVFSCPO9_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG63__RTAVFSCPO9_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG63__RTAVFSCPO9_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG64
+#define RTAVFS_REG64__RTAVFSCPO10_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG64__RTAVFSCPO10_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG64__RTAVFSCPO10_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG64__RTAVFSCPO10_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG65
+#define RTAVFS_REG65__RTAVFSCPO11_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG65__RTAVFSCPO11_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG65__RTAVFSCPO11_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG65__RTAVFSCPO11_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG66
+#define RTAVFS_REG66__RTAVFSCPO12_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG66__RTAVFSCPO12_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG66__RTAVFSCPO12_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG66__RTAVFSCPO12_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG67
+#define RTAVFS_REG67__RTAVFSCPO13_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG67__RTAVFSCPO13_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG67__RTAVFSCPO13_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG67__RTAVFSCPO13_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG68
+#define RTAVFS_REG68__RTAVFSCPO14_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG68__RTAVFSCPO14_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG68__RTAVFSCPO14_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG68__RTAVFSCPO14_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG69
+#define RTAVFS_REG69__RTAVFSCPO15_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG69__RTAVFSCPO15_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG69__RTAVFSCPO15_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG69__RTAVFSCPO15_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG70
+#define RTAVFS_REG70__RTAVFSCPO16_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG70__RTAVFSCPO16_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG70__RTAVFSCPO16_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG70__RTAVFSCPO16_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG71
+#define RTAVFS_REG71__RTAVFSCPO17_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG71__RTAVFSCPO17_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG71__RTAVFSCPO17_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG71__RTAVFSCPO17_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG72
+#define RTAVFS_REG72__RTAVFSCPO18_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG72__RTAVFSCPO18_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG72__RTAVFSCPO18_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG72__RTAVFSCPO18_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG73
+#define RTAVFS_REG73__RTAVFSCPO19_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG73__RTAVFSCPO19_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG73__RTAVFSCPO19_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG73__RTAVFSCPO19_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG74
+#define RTAVFS_REG74__RTAVFSCPO20_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG74__RTAVFSCPO20_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG74__RTAVFSCPO20_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG74__RTAVFSCPO20_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG75
+#define RTAVFS_REG75__RTAVFSCPO21_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG75__RTAVFSCPO21_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG75__RTAVFSCPO21_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG75__RTAVFSCPO21_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG76
+#define RTAVFS_REG76__RTAVFSCPO22_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG76__RTAVFSCPO22_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG76__RTAVFSCPO22_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG76__RTAVFSCPO22_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG77
+#define RTAVFS_REG77__RTAVFSCPO23_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG77__RTAVFSCPO23_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG77__RTAVFSCPO23_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG77__RTAVFSCPO23_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG78
+#define RTAVFS_REG78__RTAVFSCPO24_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG78__RTAVFSCPO24_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG78__RTAVFSCPO24_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG78__RTAVFSCPO24_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG79
+#define RTAVFS_REG79__RTAVFSCPO25_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG79__RTAVFSCPO25_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG79__RTAVFSCPO25_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG79__RTAVFSCPO25_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG80
+#define RTAVFS_REG80__RTAVFSCPO26_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG80__RTAVFSCPO26_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG80__RTAVFSCPO26_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG80__RTAVFSCPO26_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG81
+#define RTAVFS_REG81__RTAVFSCPO27_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG81__RTAVFSCPO27_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG81__RTAVFSCPO27_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG81__RTAVFSCPO27_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG82
+#define RTAVFS_REG82__RTAVFSCPO28_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG82__RTAVFSCPO28_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG82__RTAVFSCPO28_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG82__RTAVFSCPO28_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG83
+#define RTAVFS_REG83__RTAVFSCPO29_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG83__RTAVFSCPO29_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG83__RTAVFSCPO29_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG83__RTAVFSCPO29_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG84
+#define RTAVFS_REG84__RTAVFSCPO30_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG84__RTAVFSCPO30_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG84__RTAVFSCPO30_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG84__RTAVFSCPO30_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG85
+#define RTAVFS_REG85__RTAVFSCPO31_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG85__RTAVFSCPO31_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG85__RTAVFSCPO31_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG85__RTAVFSCPO31_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG86
+#define RTAVFS_REG86__RTAVFSCPO32_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG86__RTAVFSCPO32_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG86__RTAVFSCPO32_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG86__RTAVFSCPO32_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG87
+#define RTAVFS_REG87__RTAVFSCPO33_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG87__RTAVFSCPO33_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG87__RTAVFSCPO33_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG87__RTAVFSCPO33_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG88
+#define RTAVFS_REG88__RTAVFSCPO34_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG88__RTAVFSCPO34_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG88__RTAVFSCPO34_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG88__RTAVFSCPO34_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG89
+#define RTAVFS_REG89__RTAVFSCPO35_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG89__RTAVFSCPO35_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG89__RTAVFSCPO35_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG89__RTAVFSCPO35_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG90
+#define RTAVFS_REG90__RTAVFSCPO36_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG90__RTAVFSCPO36_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG90__RTAVFSCPO36_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG90__RTAVFSCPO36_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG91
+#define RTAVFS_REG91__RTAVFSCPO37_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG91__RTAVFSCPO37_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG91__RTAVFSCPO37_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG91__RTAVFSCPO37_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG92
+#define RTAVFS_REG92__RTAVFSCPO38_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG92__RTAVFSCPO38_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG92__RTAVFSCPO38_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG92__RTAVFSCPO38_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG93
+#define RTAVFS_REG93__RTAVFSCPO39_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG93__RTAVFSCPO39_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG93__RTAVFSCPO39_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG93__RTAVFSCPO39_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG94
+#define RTAVFS_REG94__RTAVFSCPO40_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG94__RTAVFSCPO40_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG94__RTAVFSCPO40_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG94__RTAVFSCPO40_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG95
+#define RTAVFS_REG95__RTAVFSCPO41_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG95__RTAVFSCPO41_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG95__RTAVFSCPO41_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG95__RTAVFSCPO41_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG96
+#define RTAVFS_REG96__RTAVFSCPO42_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG96__RTAVFSCPO42_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG96__RTAVFSCPO42_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG96__RTAVFSCPO42_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG97
+#define RTAVFS_REG97__RTAVFSCPO43_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG97__RTAVFSCPO43_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG97__RTAVFSCPO43_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG97__RTAVFSCPO43_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG98
+#define RTAVFS_REG98__RTAVFSCPO44_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG98__RTAVFSCPO44_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG98__RTAVFSCPO44_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG98__RTAVFSCPO44_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG99
+#define RTAVFS_REG99__RTAVFSCPO45_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG99__RTAVFSCPO45_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG99__RTAVFSCPO45_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG99__RTAVFSCPO45_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG100
+#define RTAVFS_REG100__RTAVFSCPO46_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG100__RTAVFSCPO46_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG100__RTAVFSCPO46_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG100__RTAVFSCPO46_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG101
+#define RTAVFS_REG101__RTAVFSCPO47_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG101__RTAVFSCPO47_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG101__RTAVFSCPO47_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG101__RTAVFSCPO47_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG102
+#define RTAVFS_REG102__RTAVFSCPO48_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG102__RTAVFSCPO48_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG102__RTAVFSCPO48_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG102__RTAVFSCPO48_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG103
+#define RTAVFS_REG103__RTAVFSCPO49_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG103__RTAVFSCPO49_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG103__RTAVFSCPO49_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG103__RTAVFSCPO49_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG104
+#define RTAVFS_REG104__RTAVFSCPO50_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG104__RTAVFSCPO50_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG104__RTAVFSCPO50_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG104__RTAVFSCPO50_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG105
+#define RTAVFS_REG105__RTAVFSCPO51_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG105__RTAVFSCPO51_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG105__RTAVFSCPO51_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG105__RTAVFSCPO51_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG106
+#define RTAVFS_REG106__RTAVFSCPO52_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG106__RTAVFSCPO52_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG106__RTAVFSCPO52_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG106__RTAVFSCPO52_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG107
+#define RTAVFS_REG107__RTAVFSCPO53_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG107__RTAVFSCPO53_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG107__RTAVFSCPO53_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG107__RTAVFSCPO53_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG108
+#define RTAVFS_REG108__RTAVFSCPO54_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG108__RTAVFSCPO54_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG108__RTAVFSCPO54_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG108__RTAVFSCPO54_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG109
+#define RTAVFS_REG109__RTAVFSCPO55_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG109__RTAVFSCPO55_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG109__RTAVFSCPO55_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG109__RTAVFSCPO55_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG110
+#define RTAVFS_REG110__RTAVFSCPO56_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG110__RTAVFSCPO56_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG110__RTAVFSCPO56_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG110__RTAVFSCPO56_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG111
+#define RTAVFS_REG111__RTAVFSCPO57_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG111__RTAVFSCPO57_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG111__RTAVFSCPO57_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG111__RTAVFSCPO57_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG112
+#define RTAVFS_REG112__RTAVFSCPO58_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG112__RTAVFSCPO58_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG112__RTAVFSCPO58_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG112__RTAVFSCPO58_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG113
+#define RTAVFS_REG113__RTAVFSCPO59_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG113__RTAVFSCPO59_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG113__RTAVFSCPO59_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG113__RTAVFSCPO59_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG114
+#define RTAVFS_REG114__RTAVFSCPO60_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG114__RTAVFSCPO60_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG114__RTAVFSCPO60_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG114__RTAVFSCPO60_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG115
+#define RTAVFS_REG115__RTAVFSCPO61_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG115__RTAVFSCPO61_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG115__RTAVFSCPO61_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG115__RTAVFSCPO61_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG116
+#define RTAVFS_REG116__RTAVFSCPO62_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG116__RTAVFSCPO62_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG116__RTAVFSCPO62_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG116__RTAVFSCPO62_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG117
+#define RTAVFS_REG117__RTAVFSCPO63_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG117__RTAVFSCPO63_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG117__RTAVFSCPO63_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG117__RTAVFSCPO63_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG118
+#define RTAVFS_REG118__RTAVFSCPOEN0__SHIFT 0x0
+#define RTAVFS_REG118__RTAVFSCPOEN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG119
+#define RTAVFS_REG119__RTAVFSCPOEN1__SHIFT 0x0
+#define RTAVFS_REG119__RTAVFSCPOEN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG120
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG120__RTAVFSCPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG120__RESERVED__SHIFT 0x12
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG120__RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG121
+#define RTAVFS_REG121__RTAVFSZONE0INUSE__SHIFT 0x0
+#define RTAVFS_REG121__RTAVFSZONE1INUSE__SHIFT 0x1
+#define RTAVFS_REG121__RTAVFSZONE2INUSE__SHIFT 0x2
+#define RTAVFS_REG121__RTAVFSZONE3INUSE__SHIFT 0x3
+#define RTAVFS_REG121__RTAVFSZONE4INUSE__SHIFT 0x4
+#define RTAVFS_REG121__RTAVFSRESERVED__SHIFT 0x5
+#define RTAVFS_REG121__RTAVFSERRORCODE__SHIFT 0x1c
+#define RTAVFS_REG121__RTAVFSZONE0INUSE_MASK 0x00000001L
+#define RTAVFS_REG121__RTAVFSZONE1INUSE_MASK 0x00000002L
+#define RTAVFS_REG121__RTAVFSZONE2INUSE_MASK 0x00000004L
+#define RTAVFS_REG121__RTAVFSZONE3INUSE_MASK 0x00000008L
+#define RTAVFS_REG121__RTAVFSZONE4INUSE_MASK 0x00000010L
+#define RTAVFS_REG121__RTAVFSRESERVED_MASK 0x0FFFFFE0L
+#define RTAVFS_REG121__RTAVFSERRORCODE_MASK 0xF0000000L
+//RTAVFS_REG122
+#define RTAVFS_REG122__RTAVFSCPO0_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG122__RESERVED__SHIFT 0x10
+#define RTAVFS_REG122__RTAVFSCPO0_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG122__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG123
+#define RTAVFS_REG123__RTAVFSCPO1_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG123__RESERVED__SHIFT 0x10
+#define RTAVFS_REG123__RTAVFSCPO1_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG123__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG124
+#define RTAVFS_REG124__RTAVFSCPO2_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG124__RESERVED__SHIFT 0x10
+#define RTAVFS_REG124__RTAVFSCPO2_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG124__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG125
+#define RTAVFS_REG125__RTAVFSCPO3_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG125__RESERVED__SHIFT 0x10
+#define RTAVFS_REG125__RTAVFSCPO3_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG125__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG126
+#define RTAVFS_REG126__RTAVFSCPO4_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG126__RESERVED__SHIFT 0x10
+#define RTAVFS_REG126__RTAVFSCPO4_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG126__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG127
+#define RTAVFS_REG127__RTAVFSCPO5_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG127__RESERVED__SHIFT 0x10
+#define RTAVFS_REG127__RTAVFSCPO5_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG127__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG128
+#define RTAVFS_REG128__RTAVFSCPO6_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG128__RESERVED__SHIFT 0x10
+#define RTAVFS_REG128__RTAVFSCPO6_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG128__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG129
+#define RTAVFS_REG129__RTAVFSCPO7_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG129__RESERVED__SHIFT 0x10
+#define RTAVFS_REG129__RTAVFSCPO7_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG129__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG130
+#define RTAVFS_REG130__RTAVFSCPO8_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG130__RESERVED__SHIFT 0x10
+#define RTAVFS_REG130__RTAVFSCPO8_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG130__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG131
+#define RTAVFS_REG131__RTAVFSCPO9_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG131__RESERVED__SHIFT 0x10
+#define RTAVFS_REG131__RTAVFSCPO9_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG131__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG132
+#define RTAVFS_REG132__RTAVFSCPO10_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG132__RESERVED__SHIFT 0x10
+#define RTAVFS_REG132__RTAVFSCPO10_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG132__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG133
+#define RTAVFS_REG133__RTAVFSCPO11_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG133__RESERVED__SHIFT 0x10
+#define RTAVFS_REG133__RTAVFSCPO11_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG133__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG134
+#define RTAVFS_REG134__RTAVFSCPO12_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG134__RESERVED__SHIFT 0x10
+#define RTAVFS_REG134__RTAVFSCPO12_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG134__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG135
+#define RTAVFS_REG135__RTAVFSCPO13_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG135__RESERVED__SHIFT 0x10
+#define RTAVFS_REG135__RTAVFSCPO13_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG135__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG136
+#define RTAVFS_REG136__RTAVFSCPO14_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG136__RESERVED__SHIFT 0x10
+#define RTAVFS_REG136__RTAVFSCPO14_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG136__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG137
+#define RTAVFS_REG137__RTAVFSCPO15_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG137__RESERVED__SHIFT 0x10
+#define RTAVFS_REG137__RTAVFSCPO15_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG137__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG138
+#define RTAVFS_REG138__RTAVFSCPO16_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG138__RESERVED__SHIFT 0x10
+#define RTAVFS_REG138__RTAVFSCPO16_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG138__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG139
+#define RTAVFS_REG139__RTAVFSCPO17_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG139__RESERVED__SHIFT 0x10
+#define RTAVFS_REG139__RTAVFSCPO17_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG139__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG140
+#define RTAVFS_REG140__RTAVFSCPO18_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG140__RESERVED__SHIFT 0x10
+#define RTAVFS_REG140__RTAVFSCPO18_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG140__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG141
+#define RTAVFS_REG141__RTAVFSCPO19_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG141__RESERVED__SHIFT 0x10
+#define RTAVFS_REG141__RTAVFSCPO19_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG141__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG142
+#define RTAVFS_REG142__RTAVFSCPO20_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG142__RESERVED__SHIFT 0x10
+#define RTAVFS_REG142__RTAVFSCPO20_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG142__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG143
+#define RTAVFS_REG143__RTAVFSCPO21_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG143__RESERVED__SHIFT 0x10
+#define RTAVFS_REG143__RTAVFSCPO21_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG143__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG144
+#define RTAVFS_REG144__RTAVFSCPO22_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG144__RESERVED__SHIFT 0x10
+#define RTAVFS_REG144__RTAVFSCPO22_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG144__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG145
+#define RTAVFS_REG145__RTAVFSCPO23_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG145__RESERVED__SHIFT 0x10
+#define RTAVFS_REG145__RTAVFSCPO23_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG145__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG146
+#define RTAVFS_REG146__RTAVFSCPO24_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG146__RESERVED__SHIFT 0x10
+#define RTAVFS_REG146__RTAVFSCPO24_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG146__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG147
+#define RTAVFS_REG147__RTAVFSCPO25_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG147__RESERVED__SHIFT 0x10
+#define RTAVFS_REG147__RTAVFSCPO25_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG147__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG148
+#define RTAVFS_REG148__RTAVFSCPO26_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG148__RESERVED__SHIFT 0x10
+#define RTAVFS_REG148__RTAVFSCPO26_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG148__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG149
+#define RTAVFS_REG149__RTAVFSCPO27_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG149__RESERVED__SHIFT 0x10
+#define RTAVFS_REG149__RTAVFSCPO27_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG149__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG150
+#define RTAVFS_REG150__RTAVFSCPO28_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG150__RESERVED__SHIFT 0x10
+#define RTAVFS_REG150__RTAVFSCPO28_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG150__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG151
+#define RTAVFS_REG151__RTAVFSCPO29_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG151__RESERVED__SHIFT 0x10
+#define RTAVFS_REG151__RTAVFSCPO29_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG151__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG152
+#define RTAVFS_REG152__RTAVFSCPO30_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG152__RESERVED__SHIFT 0x10
+#define RTAVFS_REG152__RTAVFSCPO30_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG152__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG153
+#define RTAVFS_REG153__RTAVFSCPO31_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG153__RESERVED__SHIFT 0x10
+#define RTAVFS_REG153__RTAVFSCPO31_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG153__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG154
+#define RTAVFS_REG154__RTAVFSCPO32_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG154__RESERVED__SHIFT 0x10
+#define RTAVFS_REG154__RTAVFSCPO32_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG154__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG155
+#define RTAVFS_REG155__RTAVFSCPO33_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG155__RESERVED__SHIFT 0x10
+#define RTAVFS_REG155__RTAVFSCPO33_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG155__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG156
+#define RTAVFS_REG156__RTAVFSCPO34_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG156__RESERVED__SHIFT 0x10
+#define RTAVFS_REG156__RTAVFSCPO34_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG156__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG157
+#define RTAVFS_REG157__RTAVFSCPO35_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG157__RESERVED__SHIFT 0x10
+#define RTAVFS_REG157__RTAVFSCPO35_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG157__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG158
+#define RTAVFS_REG158__RTAVFSCPO36_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG158__RESERVED__SHIFT 0x10
+#define RTAVFS_REG158__RTAVFSCPO36_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG158__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG159
+#define RTAVFS_REG159__RTAVFSCPO37_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG159__RESERVED__SHIFT 0x10
+#define RTAVFS_REG159__RTAVFSCPO37_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG159__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG160
+#define RTAVFS_REG160__RTAVFSCPO38_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG160__RESERVED__SHIFT 0x10
+#define RTAVFS_REG160__RTAVFSCPO38_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG160__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG161
+#define RTAVFS_REG161__RTAVFSCPO39_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG161__RESERVED__SHIFT 0x10
+#define RTAVFS_REG161__RTAVFSCPO39_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG161__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG162
+#define RTAVFS_REG162__RTAVFSCPO40_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG162__RESERVED__SHIFT 0x10
+#define RTAVFS_REG162__RTAVFSCPO40_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG162__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG163
+#define RTAVFS_REG163__RTAVFSCPO41_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG163__RESERVED__SHIFT 0x10
+#define RTAVFS_REG163__RTAVFSCPO41_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG163__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG164
+#define RTAVFS_REG164__RTAVFSCPO42_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG164__RESERVED__SHIFT 0x10
+#define RTAVFS_REG164__RTAVFSCPO42_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG164__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG165
+#define RTAVFS_REG165__RTAVFSCPO43_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG165__RESERVED__SHIFT 0x10
+#define RTAVFS_REG165__RTAVFSCPO43_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG165__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG166
+#define RTAVFS_REG166__RTAVFSCPO44_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG166__RESERVED__SHIFT 0x10
+#define RTAVFS_REG166__RTAVFSCPO44_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG166__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG167
+#define RTAVFS_REG167__RTAVFSCPO45_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG167__RESERVED__SHIFT 0x10
+#define RTAVFS_REG167__RTAVFSCPO45_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG167__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG168
+#define RTAVFS_REG168__RTAVFSCPO46_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG168__RESERVED__SHIFT 0x10
+#define RTAVFS_REG168__RTAVFSCPO46_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG168__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG169
+#define RTAVFS_REG169__RTAVFSCPO47_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG169__RESERVED__SHIFT 0x10
+#define RTAVFS_REG169__RTAVFSCPO47_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG169__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG170
+#define RTAVFS_REG170__RTAVFSCPO48_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG170__RESERVED__SHIFT 0x10
+#define RTAVFS_REG170__RTAVFSCPO48_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG170__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG171
+#define RTAVFS_REG171__RTAVFSCPO49_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG171__RESERVED__SHIFT 0x10
+#define RTAVFS_REG171__RTAVFSCPO49_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG171__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG172
+#define RTAVFS_REG172__RTAVFSCPO50_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG172__RESERVED__SHIFT 0x10
+#define RTAVFS_REG172__RTAVFSCPO50_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG172__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG173
+#define RTAVFS_REG173__RTAVFSCPO51_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG173__RESERVED__SHIFT 0x10
+#define RTAVFS_REG173__RTAVFSCPO51_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG173__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG174
+#define RTAVFS_REG174__RTAVFSCPO52_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG174__RESERVED__SHIFT 0x10
+#define RTAVFS_REG174__RTAVFSCPO52_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG174__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG175
+#define RTAVFS_REG175__RTAVFSCPO53_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG175__RESERVED__SHIFT 0x10
+#define RTAVFS_REG175__RTAVFSCPO53_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG175__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG176
+#define RTAVFS_REG176__RTAVFSCPO54_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG176__RESERVED__SHIFT 0x10
+#define RTAVFS_REG176__RTAVFSCPO54_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG176__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG177
+#define RTAVFS_REG177__RTAVFSCPO55_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG177__RESERVED__SHIFT 0x10
+#define RTAVFS_REG177__RTAVFSCPO55_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG177__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG178
+#define RTAVFS_REG178__RTAVFSCPO56_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG178__RESERVED__SHIFT 0x10
+#define RTAVFS_REG178__RTAVFSCPO56_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG178__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG179
+#define RTAVFS_REG179__RTAVFSCPO57_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG179__RESERVED__SHIFT 0x10
+#define RTAVFS_REG179__RTAVFSCPO57_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG179__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG180
+#define RTAVFS_REG180__RTAVFSCPO58_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG180__RESERVED__SHIFT 0x10
+#define RTAVFS_REG180__RTAVFSCPO58_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG180__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG181
+#define RTAVFS_REG181__RTAVFSCPO59_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG181__RESERVED__SHIFT 0x10
+#define RTAVFS_REG181__RTAVFSCPO59_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG181__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG182
+#define RTAVFS_REG182__RTAVFSCPO60_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG182__RESERVED__SHIFT 0x10
+#define RTAVFS_REG182__RTAVFSCPO60_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG182__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG183
+#define RTAVFS_REG183__RTAVFSCPO61_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG183__RESERVED__SHIFT 0x10
+#define RTAVFS_REG183__RTAVFSCPO61_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG183__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG184
+#define RTAVFS_REG184__RTAVFSCPO62_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG184__RESERVED__SHIFT 0x10
+#define RTAVFS_REG184__RTAVFSCPO62_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG184__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG185
+#define RTAVFS_REG185__RTAVFSCPO63_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG185__RESERVED__SHIFT 0x10
+#define RTAVFS_REG185__RTAVFSCPO63_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG185__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG186
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDE__SHIFT 0x0
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDESEL__SHIFT 0x10
+#define RTAVFS_REG186__RESERVED__SHIFT 0x11
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDE_MASK 0x0000FFFFL
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDESEL_MASK 0x00010000L
+#define RTAVFS_REG186__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG187
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDE__SHIFT 0x0
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDESEL__SHIFT 0x10
+#define RTAVFS_REG187__RESERVED__SHIFT 0x11
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDE_MASK 0x0000FFFFL
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDESEL_MASK 0x00010000L
+#define RTAVFS_REG187__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG189
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMPI__SHIFT 0x0
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMBINARYSEARCH__SHIFT 0xa
+#define RTAVFS_REG189__RTAVFSVDDREGON__SHIFT 0x14
+#define RTAVFS_REG189__RTAVFSVDDABOVEVDDRET__SHIFT 0x15
+#define RTAVFS_REG189__RESERVED__SHIFT 0x16
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMPI_MASK 0x000003FFL
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMBINARYSEARCH_MASK 0x000FFC00L
+#define RTAVFS_REG189__RTAVFSVDDREGON_MASK 0x00100000L
+#define RTAVFS_REG189__RTAVFSVDDABOVEVDDRET_MASK 0x00200000L
+#define RTAVFS_REG189__RESERVED_MASK 0xFFC00000L
+//RTAVFS_REG190
+#define RTAVFS_REG190__RTAVFSIGNORERLCREQ__SHIFT 0x0
+#define RTAVFS_REG190__RTAVFSRIPPLECOUNTEROUTSEL__SHIFT 0x1
+#define RTAVFS_REG190__RTAVFSRUNLOOP__SHIFT 0x6
+#define RTAVFS_REG190__RTAVFSSAVECPOWEIGHTS__SHIFT 0x7
+#define RTAVFS_REG190__RTAVFSRESTORECPOWEIGHTS__SHIFT 0x8
+#define RTAVFS_REG190__RTAVFSRESETRETENTIONREGS__SHIFT 0x9
+#define RTAVFS_REG190__RESERVED__SHIFT 0xa
+#define RTAVFS_REG190__RTAVFSIGNORERLCREQ_MASK 0x00000001L
+#define RTAVFS_REG190__RTAVFSRIPPLECOUNTEROUTSEL_MASK 0x0000003EL
+#define RTAVFS_REG190__RTAVFSRUNLOOP_MASK 0x00000040L
+#define RTAVFS_REG190__RTAVFSSAVECPOWEIGHTS_MASK 0x00000080L
+#define RTAVFS_REG190__RTAVFSRESTORECPOWEIGHTS_MASK 0x00000100L
+#define RTAVFS_REG190__RTAVFSRESETRETENTIONREGS_MASK 0x00000200L
+#define RTAVFS_REG190__RESERVED_MASK 0xFFFFFC00L
+//RTAVFS_REG191
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTUP__SHIFT 0x0
+#define RTAVFS_REG191__RTAVFSSTOPATIDLE__SHIFT 0x1
+#define RTAVFS_REG191__RTAVFSSTOPATRESETCPORIPPLECOUNTERS__SHIFT 0x2
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTCPOS__SHIFT 0x3
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTRIPPLECOUNTERS__SHIFT 0x4
+#define RTAVFS_REG191__RTAVFSSTOPATRIPPLECOUNTERSDONE__SHIFT 0x5
+#define RTAVFS_REG191__RTAVFSSTOPATCPOFINALRESULTREADY__SHIFT 0x6
+#define RTAVFS_REG191__RTAVFSSTOPATVOLTCODEREADY__SHIFT 0x7
+#define RTAVFS_REG191__RTAVFSSTOPATTARGETVOLATGEREADY__SHIFT 0x8
+#define RTAVFS_REG191__RTAVFSSTOPATSTOPCPOS__SHIFT 0x9
+#define RTAVFS_REG191__RTAVFSSTOPATWAITFORACK__SHIFT 0xa
+#define RTAVFS_REG191__RESERVED__SHIFT 0xb
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTUP_MASK 0x00000001L
+#define RTAVFS_REG191__RTAVFSSTOPATIDLE_MASK 0x00000002L
+#define RTAVFS_REG191__RTAVFSSTOPATRESETCPORIPPLECOUNTERS_MASK 0x00000004L
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTCPOS_MASK 0x00000008L
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTRIPPLECOUNTERS_MASK 0x00000010L
+#define RTAVFS_REG191__RTAVFSSTOPATRIPPLECOUNTERSDONE_MASK 0x00000020L
+#define RTAVFS_REG191__RTAVFSSTOPATCPOFINALRESULTREADY_MASK 0x00000040L
+#define RTAVFS_REG191__RTAVFSSTOPATVOLTCODEREADY_MASK 0x00000080L
+#define RTAVFS_REG191__RTAVFSSTOPATTARGETVOLATGEREADY_MASK 0x00000100L
+#define RTAVFS_REG191__RTAVFSSTOPATSTOPCPOS_MASK 0x00000200L
+#define RTAVFS_REG191__RTAVFSSTOPATWAITFORACK_MASK 0x00000400L
+#define RTAVFS_REG191__RESERVED_MASK 0xFFFFF800L
+//RTAVFS_REG192
+#define RTAVFS_REG192__RTAVFSAVFSSCALEDCPOCOUNT__SHIFT 0x0
+#define RTAVFS_REG192__RTAVFSAVFSFINALMINCPOCOUNT__SHIFT 0x10
+#define RTAVFS_REG192__RTAVFSAVFSSCALEDCPOCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG192__RTAVFSAVFSFINALMINCPOCOUNT_MASK 0xFFFF0000L
+//RTAVFS_REG193
+#define RTAVFS_REG193__RTAVFSFSMSTATE__SHIFT 0x0
+#define RTAVFS_REG193__RESERVED__SHIFT 0x10
+#define RTAVFS_REG193__RTAVFSFSMSTATE_MASK 0x0000FFFFL
+#define RTAVFS_REG193__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG194
+#define RTAVFS_REG194__RTAVFSRIPPLECNTREAD__SHIFT 0x0
+#define RTAVFS_REG194__RTAVFSRIPPLECNTREAD_MASK 0xFFFFFFFFL
+
+
+// addressBlock: sqind
+//SQ_DEBUG_STS_LOCAL
+#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x4
+#define SQ_DEBUG_STS_LOCAL__SQ_BUSY__SHIFT 0xc
+#define SQ_DEBUG_STS_LOCAL__IS_BUSY__SHIFT 0xd
+#define SQ_DEBUG_STS_LOCAL__IB_BUSY__SHIFT 0xe
+#define SQ_DEBUG_STS_LOCAL__ARB_BUSY__SHIFT 0xf
+#define SQ_DEBUG_STS_LOCAL__EXP_BUSY__SHIFT 0x10
+#define SQ_DEBUG_STS_LOCAL__BRMSG_BUSY__SHIFT 0x11
+#define SQ_DEBUG_STS_LOCAL__VM_BUSY__SHIFT 0x12
+#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x000003F0L
+#define SQ_DEBUG_STS_LOCAL__SQ_BUSY_MASK 0x00001000L
+#define SQ_DEBUG_STS_LOCAL__IS_BUSY_MASK 0x00002000L
+#define SQ_DEBUG_STS_LOCAL__IB_BUSY_MASK 0x00004000L
+#define SQ_DEBUG_STS_LOCAL__ARB_BUSY_MASK 0x00008000L
+#define SQ_DEBUG_STS_LOCAL__EXP_BUSY_MASK 0x00010000L
+#define SQ_DEBUG_STS_LOCAL__BRMSG_BUSY_MASK 0x00020000L
+#define SQ_DEBUG_STS_LOCAL__VM_BUSY_MASK 0x00040000L
+//SQ_DEBUG_CTRL_LOCAL
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x0
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0x000000FFL
+//SQ_WAVE_ACTIVE
+#define SQ_WAVE_ACTIVE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_ACTIVE__WAVE_SLOT_MASK 0x000FFFFFL
+//SQ_WAVE_VALID_AND_IDLE
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT_MASK 0x000FFFFFL
+//SQ_WAVE_MODE
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
+#define SQ_WAVE_MODE__TRAP_AFTER_INST_EN__SHIFT 0xb
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
+#define SQ_WAVE_MODE__WAVE_END__SHIFT 0x15
+#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
+#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1b
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__TRAP_AFTER_INST_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
+#define SQ_WAVE_MODE__WAVE_END_MASK 0x00200000L
+#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
+#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x08000000L
+//SQ_WAVE_STATUS
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
+#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
+#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
+#define SQ_WAVE_STATUS__TTRACE_SIMD_EN__SHIFT 0xf
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x14
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x15
+#define SQ_WAVE_STATUS__OREO_CONFLICT__SHIFT 0x16
+#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
+#define SQ_WAVE_STATUS__NO_VGPRS__SHIFT 0x18
+#define SQ_WAVE_STATUS__LDS_PARAM_READY__SHIFT 0x19
+#define SQ_WAVE_STATUS__MUST_GS_ALLOC__SHIFT 0x1a
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
+#define SQ_WAVE_STATUS__IDLE__SHIFT 0x1c
+#define SQ_WAVE_STATUS__SCRATCH_EN__SHIFT 0x1d
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TTRACE_SIMD_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__OREO_CONFLICT_MASK 0x00400000L
+#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
+#define SQ_WAVE_STATUS__NO_VGPRS_MASK 0x01000000L
+#define SQ_WAVE_STATUS__LDS_PARAM_READY_MASK 0x02000000L
+#define SQ_WAVE_STATUS__MUST_GS_ALLOC_MASK 0x04000000L
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+#define SQ_WAVE_STATUS__IDLE_MASK 0x10000000L
+#define SQ_WAVE_STATUS__SCRATCH_EN_MASK 0x20000000L
+//SQ_WAVE_TRAPSTS
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
+#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
+#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
+#define SQ_WAVE_TRAPSTS__BUFFER_OOB__SHIFT 0xf
+#define SQ_WAVE_TRAPSTS__HOST_TRAP__SHIFT 0x10
+#define SQ_WAVE_TRAPSTS__WAVESTART__SHIFT 0x11
+#define SQ_WAVE_TRAPSTS__WAVE_END__SHIFT 0x12
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT__SHIFT 0x13
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST__SHIFT 0x14
+#define SQ_WAVE_TRAPSTS__UTC_ERROR__SHIFT 0x1c
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
+#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
+#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
+#define SQ_WAVE_TRAPSTS__BUFFER_OOB_MASK 0x00008000L
+#define SQ_WAVE_TRAPSTS__HOST_TRAP_MASK 0x00010000L
+#define SQ_WAVE_TRAPSTS__WAVESTART_MASK 0x00020000L
+#define SQ_WAVE_TRAPSTS__WAVE_END_MASK 0x00040000L
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT_MASK 0x00080000L
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST_MASK 0x00100000L
+#define SQ_WAVE_TRAPSTS__UTC_ERROR_MASK 0x10000000L
+//SQ_WAVE_GPR_ALLOC
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0xc
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x000001FFL
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x000FF000L
+//SQ_WAVE_LDS_ALLOC
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
+#define SQ_WAVE_LDS_ALLOC__VGPR_SHARED_SIZE__SHIFT 0x18
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000001FFL
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
+#define SQ_WAVE_LDS_ALLOC__VGPR_SHARED_SIZE_MASK 0x0F000000L
+//SQ_WAVE_IB_STS
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x0
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x4
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0xa
+#define SQ_WAVE_IB_STS__VS_CNT__SHIFT 0x1a
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000007L
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x000003F0L
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000FC00L
+#define SQ_WAVE_IB_STS__VS_CNT_MASK 0xFC000000L
+//SQ_WAVE_PC_LO
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_PC_HI
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
+//SQ_WAVE_IB_DBG1
+#define SQ_WAVE_IB_DBG1__WAVE_IDLE__SHIFT 0x18
+#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
+#define SQ_WAVE_IB_DBG1__WAVE_IDLE_MASK 0x01000000L
+#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
+//SQ_WAVE_FLUSH_IB
+#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
+#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
+//SQ_WAVE_FLAT_SCRATCH_LO
+#define SQ_WAVE_FLAT_SCRATCH_LO__DATA__SHIFT 0x0
+#define SQ_WAVE_FLAT_SCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_FLAT_SCRATCH_HI
+#define SQ_WAVE_FLAT_SCRATCH_HI__DATA__SHIFT 0x0
+#define SQ_WAVE_FLAT_SCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_HW_ID1
+#define SQ_WAVE_HW_ID1__WAVE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID1__SIMD_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID1__WGP_ID__SHIFT 0xa
+#define SQ_WAVE_HW_ID1__SA_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID1__SE_ID__SHIFT 0x12
+#define SQ_WAVE_HW_ID1__DP_RATE__SHIFT 0x1d
+#define SQ_WAVE_HW_ID1__WAVE_ID_MASK 0x0000001FL
+#define SQ_WAVE_HW_ID1__SIMD_ID_MASK 0x00000300L
+#define SQ_WAVE_HW_ID1__WGP_ID_MASK 0x00003C00L
+#define SQ_WAVE_HW_ID1__SA_ID_MASK 0x00010000L
+#define SQ_WAVE_HW_ID1__SE_ID_MASK 0x001C0000L
+#define SQ_WAVE_HW_ID1__DP_RATE_MASK 0xE0000000L
+//SQ_WAVE_HW_ID2
+#define SQ_WAVE_HW_ID2__QUEUE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID2__PIPE_ID__SHIFT 0x4
+#define SQ_WAVE_HW_ID2__ME_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID2__STATE_ID__SHIFT 0xc
+#define SQ_WAVE_HW_ID2__WG_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID2__VM_ID__SHIFT 0x18
+#define SQ_WAVE_HW_ID2__QUEUE_ID_MASK 0x0000000FL
+#define SQ_WAVE_HW_ID2__PIPE_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID2__ME_ID_MASK 0x00000300L
+#define SQ_WAVE_HW_ID2__STATE_ID_MASK 0x00007000L
+#define SQ_WAVE_HW_ID2__WG_ID_MASK 0x001F0000L
+#define SQ_WAVE_HW_ID2__VM_ID_MASK 0x0F000000L
+//SQ_WAVE_POPS_PACKER
+#define SQ_WAVE_POPS_PACKER__POPS_EN__SHIFT 0x0
+#define SQ_WAVE_POPS_PACKER__POPS_PACKER_ID__SHIFT 0x1
+#define SQ_WAVE_POPS_PACKER__POPS_EN_MASK 0x00000001L
+#define SQ_WAVE_POPS_PACKER__POPS_PACKER_ID_MASK 0x00000006L
+//SQ_WAVE_SCHED_MODE
+#define SQ_WAVE_SCHED_MODE__DEP_MODE__SHIFT 0x0
+#define SQ_WAVE_SCHED_MODE__DEP_MODE_MASK 0x00000003L
+//SQ_WAVE_IB_STS2
+#define SQ_WAVE_IB_STS2__INST_PREFETCH__SHIFT 0x0
+#define SQ_WAVE_IB_STS2__MEM_ORDER__SHIFT 0x8
+#define SQ_WAVE_IB_STS2__FWD_PROGRESS__SHIFT 0xa
+#define SQ_WAVE_IB_STS2__WAVE64__SHIFT 0xb
+#define SQ_WAVE_IB_STS2__INST_PREFETCH_MASK 0x00000003L
+#define SQ_WAVE_IB_STS2__MEM_ORDER_MASK 0x00000300L
+#define SQ_WAVE_IB_STS2__FWD_PROGRESS_MASK 0x00000400L
+#define SQ_WAVE_IB_STS2__WAVE64_MASK 0x00000800L
+//SQ_WAVE_SHADER_CYCLES
+#define SQ_WAVE_SHADER_CYCLES__CYCLES__SHIFT 0x0
+#define SQ_WAVE_SHADER_CYCLES__CYCLES_MASK 0x000FFFFFL
+//SQ_WAVE_TTMP0
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP1
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP2
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP3
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP4
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP5
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP6
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP7
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP8
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP9
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP10
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP11
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP12
+#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP13
+#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP14
+#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP15
+#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_M0
+#define SQ_WAVE_M0__M0__SHIFT 0x0
+#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_LO
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_HI
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index d8632ccf3494..c488d4a50cf4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -4409,6 +4409,10 @@
#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL 0x0af9
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 111a71b434e2..2969fbf282b7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -26728,6 +26728,14 @@
//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
index 2ed95790a600..cf8d60c4df1b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
@@ -15243,6 +15243,8 @@
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
+#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
#define regBIF0_PCIE_TX_STATUS 0x420194
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
index eb62a18fcc48..3d60c9e92548 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
@@ -85627,6 +85627,19 @@
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
//BIF0_PCIE_TX_CTRL_4
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 7e3231c2191c..a40ead44778a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -824,4 +824,62 @@ struct gpu_metrics_v2_2 {
uint64_t indep_throttle_status;
};
+struct gpu_metrics_v2_3 {
+ struct metrics_table_header common_header;
+
+ /* Temperature */
+ uint16_t temperature_gfx; // gfx temperature on APUs
+ uint16_t temperature_soc; // soc temperature on APUs
+ uint16_t temperature_core[8]; // CPU core temperature on APUs
+ uint16_t temperature_l3[2];
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Power/Energy */
+ uint16_t average_socket_power; // dGPU + APU power on A + A platform
+ uint16_t average_cpu_power;
+ uint16_t average_soc_power;
+ uint16_t average_gfx_power;
+ uint16_t average_core_power[8]; // CPU core power on APUs
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_fclk_frequency;
+ uint16_t average_vclk_frequency;
+ uint16_t average_dclk_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_fclk;
+ uint16_t current_vclk;
+ uint16_t current_dclk;
+ uint16_t current_coreclk[8]; // CPU core clocks
+ uint16_t current_l3clk[2];
+
+ /* Throttle status (ASIC dependent) */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t fan_pwm;
+
+ uint16_t padding[3];
+
+ /* Throttle status (ASIC independent) */
+ uint64_t indep_throttle_status;
+
+ /* Average Temperature */
+ uint16_t average_temperature_gfx; // average gfx temperature on APUs
+ uint16_t average_temperature_soc; // average soc temperature on APUs
+ uint16_t average_temperature_core[8]; // average CPU core temperature on APUs
+ uint16_t average_temperature_l3[2];
+};
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 80dab1146439..7e85cdc5bd34 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -268,7 +268,9 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
- uint32_t reserved : 22;
+ uint32_t trap_en : 1;
+ uint32_t is_aql_queue : 1;
+ uint32_t reserved : 20;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 956b6ce81c84..1b300c569faf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -668,6 +668,51 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_set_residency_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_residency_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_entrycount_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5e318b3f6c0f..948cc75376f8 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3405,9 +3405,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
- if (adev->pm.dpm_enabled == 0)
- return;
-
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 65624d091ed2..cb5b9df78b4d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -435,6 +435,9 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev);
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
uint64_t event_arg);
+int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value);
+int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value);
+int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value);
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev);
void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 1eb4e613b27a..ec055858eb95 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1485,6 +1485,7 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
+ int err;
if (!addr || !size)
return -EINVAL;
@@ -1492,7 +1493,9 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
if (adev->pm.smu_prv_buffer) {
- amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ if (err)
+ return err;
*size = adev->pm.smu_prv_buffer_size;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index dad3e3741a4e..190af79f3236 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- uint32_t current_rpm;
- uint32_t percent = 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
- if (vega10_get_current_rpm(hwmgr, &current_rpm))
- return -1;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
- if (hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM != 0)
- percent = current_rpm * 255 /
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM;
+ if (!duty100)
+ return -EINVAL;
- *speed = MIN(percent, 255);
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
index 1e79baab753e..bd54fbd393b9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
@@ -195,7 +195,6 @@ static int init_powerplay_table_information(
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
uint32_t disable_power_control = 0;
- int result;
hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
@@ -257,9 +256,7 @@ static int init_powerplay_table_information(
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
- result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
-
- return result;
+ return append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
}
static int vega12_pp_tables_initialize(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index 6e0be6027705..01a7d66864f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
@@ -401,8 +401,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
-extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pcurrent_state,
const struct pp_hw_power_state *pnew_power_state);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index 45214a364baa..e7ed2a7adf8f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2567,15 +2567,13 @@ static uint8_t polaris10_get_memory_modile_index(struct pp_hwmgr *hwmgr)
static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- int result;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *mc_reg_table = &smu_data->mc_reg_table;
uint8_t module_index = polaris10_get_memory_modile_index(hwmgr);
memset(mc_reg_table, 0, sizeof(pp_atomctrl_mc_reg_table));
- result = atomctrl_initialize_mc_reg_table_v2_2(hwmgr, module_index, mc_reg_table);
- return result;
+ return atomctrl_initialize_mc_reg_table_v2_2(hwmgr, module_index, mc_reg_table);
}
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7510d470b864..13c5c7f1ecb9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -90,6 +90,30 @@ static int smu_sys_set_pp_feature_mask(void *handle,
return smu_set_pp_feature_mask(smu, new_mask);
}
+int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
+{
+ if (!smu->ppt_funcs->set_gfx_off_residency)
+ return -EINVAL;
+
+ return smu_set_gfx_off_residency(smu, value);
+}
+
+int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
+{
+ if (!smu->ppt_funcs->get_gfx_off_residency)
+ return -EINVAL;
+
+ return smu_get_gfx_off_residency(smu, value);
+}
+
+int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
+{
+ if (!smu->ppt_funcs->get_gfx_off_entrycount)
+ return -EINVAL;
+
+ return smu_get_gfx_off_entrycount(smu, value);
+}
+
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
{
if (!smu->ppt_funcs->get_gfx_off_status)
@@ -581,6 +605,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
smu->od_enabled = true;
break;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 10):
smu_v13_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 7):
@@ -1576,6 +1601,7 @@ static int smu_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
+ uint64_t count;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1593,6 +1619,14 @@ static int smu_suspend(void *handle)
smu_set_gfx_cgpg(smu, false);
+ /*
+ * pwfw resets entrycount when device is suspended, so we save the
+ * last value to be used when we resume to keep it consistent
+ */
+ ret = smu_get_entrycount_gfxoff(smu, &count);
+ if (!ret)
+ adev->gfx.gfx_off_entrycount = count;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b81c657c7386..e2fa3b066b96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1112,6 +1112,22 @@ struct pptable_funcs {
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
/**
+ * @gfx_off_entrycount: total GFXOFF entry count at the time of
+ * query since system power-up
+ */
+ u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
+
+ /**
+ * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
+ */
+ u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
+
+ /**
+ * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
+ */
+ u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
+
+ /**
* @register_irq_handler: Register interupt request handlers.
*/
int (*register_irq_handler)(struct smu_context *smu);
@@ -1454,6 +1470,12 @@ int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
+int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
+
+int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
+
+int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
+
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 78620b0bd279..063f4a737605 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -1193,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
index d2e10a724560..82cf9e563065 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
@@ -137,7 +137,7 @@
#define PPSMC_MSG_DisallowGpo 0x56
#define PPSMC_MSG_Enable2ndUSB20Port 0x57
-
-#define PPSMC_Message_Count 0x58
+#define PPSMC_MSG_DriverMode2Reset 0x5D
+#define PPSMC_Message_Count 0x5E
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
index fe130a497d6c..7471e2df2828 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
@@ -108,7 +108,10 @@
#define PPSMC_MSG_SetSlowPPTLimit 0x4A
#define PPSMC_MSG_GetFastPPTLimit 0x4B
#define PPSMC_MSG_GetSlowPPTLimit 0x4C
-#define PPSMC_Message_Count 0x4D
+#define PPSMC_MSG_GetGfxOffStatus 0x50
+#define PPSMC_MSG_GetGfxOffEntryCount 0x51
+#define PPSMC_MSG_LogGfxOffResidency 0x52
+#define PPSMC_Message_Count 0x53
//Argument for PPSMC_MSG_GfxDeviceDriverReset
enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 19084a4fcb2b..58098b82df66 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -235,7 +235,11 @@
__SMU_DUMMY_MAP(UnforceGfxVid), \
__SMU_DUMMY_MAP(HeavySBR), \
__SMU_DUMMY_MAP(SetBadHBMPagesRetiredFlagsPerChannel), \
- __SMU_DUMMY_MAP(EnableGfxImu),
+ __SMU_DUMMY_MAP(EnableGfxImu), \
+ __SMU_DUMMY_MAP(DriverMode2Reset), \
+ __SMU_DUMMY_MAP(GetGfxOffStatus), \
+ __SMU_DUMMY_MAP(GetGfxOffEntryCount), \
+ __SMU_DUMMY_MAP(LogGfxOffResidency),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index c02e5e576728..9d62ea2af132 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,10 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -291,5 +292,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
+
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa520d79ef67..74996a8fb671 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -154,6 +154,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0),
MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0),
MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0),
+ MSG_MAP(DriverMode2Reset, PPSMC_MSG_DriverMode2Reset, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -368,6 +369,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
false;
+
+ /*
+ * Disable BACO entry/exit completely on below SKUs to
+ * avoid hardware intermittent failures.
+ */
+ if (((adev->pdev->device == 0x73A1) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73BF) &&
+ (adev->pdev->revision == 0xCF)))
+ smu_baco->platform_support = false;
+
}
}
@@ -4254,6 +4266,57 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
+static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
+{
+ return true;
+}
+
+static int sienna_cichlid_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 100;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_DriverMode2Reset);
+
+ mutex_lock(&smu->message_lock);
+
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
+ SMU_RESET_MODE_2);
+
+ ret = smu_cmn_wait_for_response(smu);
+ while (ret != 0 && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret != 0) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ if (!timeout) {
+ dev_err(adev->dev,
+ "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+
+ dev_info(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -4283,6 +4346,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
@@ -4348,6 +4412,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
+ .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
+ .mode2_reset = sienna_cichlid_mode2_reset,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 89504ff8e9ed..cb10c7e31264 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -138,6 +138,9 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
+ MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0),
+ MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0),
+ MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -220,14 +223,13 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- struct amdgpu_device *adev = smu->adev;
uint32_t if_version;
+ uint32_t smu_version;
uint32_t ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
- goto err0_out;
+ return ret;
}
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
@@ -252,7 +254,10 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ if (smu_version >= 0x043F3E00)
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
+ else
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -1645,6 +1650,63 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
return 0;
}
+static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_3 *gpu_metrics =
+ (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
+ SmuMetrics_legacy_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
+
+ gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Power[0];
+ gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_3);
+}
+
static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1702,6 +1764,77 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_2);
}
+static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_3 *gpu_metrics =
+ (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
+ gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
+ memcpy(&gpu_metrics->average_temperature_core[0],
+ &metrics.Average.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_3);
+}
+
static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1769,20 +1902,26 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
void **table)
{
- struct amdgpu_device *adev = smu->adev;
uint32_t if_version;
+ uint32_t smu_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
return ret;
}
- if (if_version < 0x3)
- ret = vangogh_get_legacy_gpu_metrics(smu, table);
- else
- ret = vangogh_get_gpu_metrics(smu, table);
+ if (smu_version >= 0x043F3E00) {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+ } else {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+ }
return ret;
}
@@ -2200,6 +2339,76 @@ static int vangogh_set_power_limit(struct smu_context *smu,
return ret;
}
+/**
+ * vangogh_set_gfxoff_residency
+ *
+ * @smu: amdgpu_device pointer
+ * @start: start/stop residency log
+ *
+ * This function will be used to log gfxoff residency
+ *
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
+{
+ int ret = 0;
+ u32 residency;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
+ start, &residency);
+
+ if (!start)
+ adev->gfx.gfx_off_residency = residency;
+
+ return ret;
+}
+
+/**
+ * vangogh_get_gfxoff_residency
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff residency.
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *residency = adev->gfx.gfx_off_residency;
+
+ return 0;
+}
+
+/**
+ * vangogh_get_gfxoff_entrycount - get gfxoff entry count
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff entry count
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount)
+{
+ int ret = 0, value = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value);
+ *entrycount = value + adev->gfx.gfx_off_entrycount;
+
+ return ret;
+}
+
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -2237,6 +2446,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.mode2_reset = vangogh_mode2_reset,
.gfx_off_control = smu_v11_0_gfx_off_control,
.get_gfx_off_status = vangogh_get_gfxoff_status,
+ .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount,
+ .get_gfx_off_residency = vangogh_get_gfxoff_residency,
+ .set_gfx_off_residency = vangogh_set_gfxoff_residency,
.get_ppt_limit = vangogh_get_ppt_limit,
.get_power_limit = vangogh_get_power_limit,
.set_power_limit = vangogh_set_power_limit,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e8fe84f806d1..93fffdbab4f0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -59,6 +59,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
#define mmMP1_SMN_C2PMSG_66 0x0282
#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
@@ -84,9 +85,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id);
-
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -212,40 +210,16 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use 36831 signed pptable when pp_table_id is 3683
- * - use 36641 signed pptable when pp_table_id is 3664 or 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- case 3664:
- pptable_id = 36641;
- break;
- case 3683:
- pptable_id = 36831;
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -330,6 +304,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
+ case IP_VERSION(13, 0, 10):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -425,8 +402,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id)
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
@@ -476,25 +455,8 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
- * - use 3664 when pptable_id is 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- pptable_id = 3664;
- break;
- case 3664:
- case 3683:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ pptable_id = 6666;
}
/* force using vbios pptable in sriov mode */
@@ -1106,6 +1068,9 @@ int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->irq_source.num_types)
+ return 0;
+
ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
if (ret)
return ret;
@@ -1115,6 +1080,9 @@ int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
{
+ if (!smu->irq_source.num_types)
+ return 0;
+
return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
}
@@ -1486,6 +1454,9 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu)
struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
irq_src->num_types = 1;
irq_src->funcs = &smu_v13_0_irq_funcs;
@@ -2344,8 +2315,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
-
- return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1bbeceeb9e3c..1d454485e0d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
if (num > 2)
return -EINVAL;
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
- if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
- (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
-#if 0
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
-#endif
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
-
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
-
- if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
return 0;
}
@@ -388,30 +353,35 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * With SCPM enabled, the pptable used will be signed. It cannot
- * be used directly by driver. To get the raw pptable, we need to
- * rely on the combo pptable(and its revelant SMU message).
- */
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_0_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
@@ -1792,7 +1762,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 82d3718d8324..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
- return ret;
-}
-
static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 47360ef5c175..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9dd56e73218b..c422bf8a09b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -400,11 +401,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_7_powerplay_table);
+
+ return 0;
+}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -413,18 +430,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
@@ -1567,6 +1577,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1594,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1646,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 15e4298c7cc8..e4f8f90ac5aa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -969,6 +969,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 2):
structure_size = sizeof(struct gpu_metrics_v2_2);
break;
+ case METRICS_VERSION(2, 3):
+ structure_size = sizeof(struct gpu_metrics_v2_3);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 7469bbfce1fb..ceb13c838067 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -47,6 +47,9 @@
#define smu_notify_memory_pool_location(smu) smu_ppt_funcs(notify_memory_pool_location, 0, smu)
#define smu_gfx_off_control(smu, enable) smu_ppt_funcs(gfx_off_control, 0, smu, enable)
#define smu_get_gfx_off_status(smu) smu_ppt_funcs(get_gfx_off_status, 0, smu)
+#define smu_get_gfx_off_entrycount(smu, value) smu_ppt_funcs(get_gfx_off_entrycount, 0, smu, value)
+#define smu_get_gfx_off_residency(smu, value) smu_ppt_funcs(get_gfx_off_residency, 0, smu, value)
+#define smu_set_gfx_off_residency(smu, value) smu_ppt_funcs(set_gfx_off_residency, 0, smu, value)
#define smu_set_last_dcef_min_deep_sleep_clk(smu) smu_ppt_funcs(set_last_dcef_min_deep_sleep_clk, 0, smu)
#define smu_system_features_control(smu, en) smu_ppt_funcs(system_features_control, 0, smu, en)
#define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 6e3f1d600541..c1b89274d2a4 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,7 +6,7 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
@@ -27,7 +27,7 @@ config DRM_MALI_DISPLAY
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you want to compile the ARM Mali Display
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index e91598b60781..4acc4285a4eb 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -4,7 +4,7 @@ config DRM_KOMEDA
depends on DRM && OF
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you want to compile the ARM Komeda display
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index daa1faccd3e7..6c56f5662bc7 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -310,8 +310,7 @@ static int d71_reset(struct d71_dev *d71)
u32 __iomem *gcu = d71->gcu_addr;
int ret;
- malidp_write32_mask(gcu, BLK_CONTROL,
- GCU_CONTROL_SRST, GCU_CONTROL_SRST);
+ malidp_write32(gcu, BLK_CONTROL, GCU_CONTROL_SRST);
ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
100, 1000, 10000);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 59172acb9738..4cc07d6bb9d8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -235,7 +234,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
} else {
- DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n",
+ DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n",
drm_crtc_index(&kcrtc->base));
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -286,7 +285,7 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc,
komeda_crtc_do_flush(crtc, old);
}
-static void
+void
komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
struct completion *input_flip_done)
{
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index ba16895690f1..9fce4239d4ad 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/component.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
@@ -72,6 +73,7 @@ static int komeda_bind(struct device *dev)
}
dev_set_drvdata(dev, mdrv);
+ drm_fbdev_generic_setup(&mdrv->kms->base, 32);
return 0;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3c372d2deb0a..df5da5a44755 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -5,9 +5,9 @@
*
*/
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "komeda_framebuffer.h"
@@ -137,7 +137,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
}
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
- - to_drm_gem_cma_obj(obj)->paddr;
+ - to_drm_gem_dma_obj(obj)->dma_addr;
if (obj->size < min_size) {
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
i, obj->size, min_size);
@@ -239,7 +239,7 @@ dma_addr_t
komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
{
struct drm_framebuffer *fb = &kfb->base;
- const struct drm_gem_cma_object *obj;
+ const struct drm_gem_dma_object *obj;
u32 offset, plane_x, plane_y, block_w, block_sz;
if (plane >= fb->format->num_planes) {
@@ -247,7 +247,7 @@ komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
return -EINVAL;
}
- obj = drm_fb_cma_get_gem_obj(fb, plane);
+ obj = drm_fb_dma_get_gem_obj(fb, plane);
offset = fb->offsets[plane];
if (!fb->modifier) {
@@ -260,7 +260,7 @@ komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
+ plane_y * fb->pitches[plane];
}
- return obj->paddr + offset;
+ return obj->dma_addr + offset;
}
/* if the fb can be supported by a specific layer */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 93b7f09b96ca..451746ebbe71 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -11,7 +11,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -21,9 +21,9 @@
#include "komeda_framebuffer.h"
#include "komeda_kms.h"
-DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
+DEFINE_DRM_GEM_DMA_FOPS(komeda_cma_fops);
-static int komeda_gem_cma_dumb_create(struct drm_file *file,
+static int komeda_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -32,7 +32,7 @@ static int komeda_gem_cma_dumb_create(struct drm_file *file,
args->pitch = ALIGN(pitch, mdev->chip.bus_width);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
@@ -60,7 +60,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
@@ -69,6 +69,25 @@ static const struct drm_driver komeda_kms_driver = {
.minor = 1,
};
+static void komeda_kms_atomic_commit_hw_done(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct komeda_kms_dev *kms = to_kdev(dev);
+ int i;
+
+ for (i = 0; i < kms->n_crtcs; i++) {
+ struct komeda_crtc *kcrtc = &kms->crtcs[i];
+
+ if (kcrtc->base.state->active) {
+ struct completion *flip_done = NULL;
+ if (kcrtc->base.state->event)
+ flip_done = kcrtc->base.state->event->base.completion;
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, flip_done);
+ }
+ }
+ drm_atomic_helper_commit_hw_done(state);
+}
+
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
@@ -81,7 +100,7 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_hw_done(old_state);
+ komeda_kms_atomic_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 7889e380ab23..7339339ef6b8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -183,6 +183,8 @@ void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms);
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
struct komeda_events *evts);
+void komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
+ struct completion *input_flip_done);
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index e672b9cffee3..3276a3e82c62 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -1271,7 +1271,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
return 0;
}
-/* Since standalong disabled components must be disabled separately and in the
+/* Since standalone disabled components must be disabled separately and in the
* last, So a complete disable operation may needs to call pipeline_disable
* twice (two phase disabling).
* Phase 1: disable the common components, flush it.
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index dff22dec54b5..c20ff72f0ae5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index afc9cd856501..7030339fa232 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -18,12 +18,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -252,8 +251,8 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -274,7 +273,7 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
return;
dest_h = drm_rect_height(&new_plane_state->dst);
- scanout_start = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
+ scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index e89ae0ec60eb..a032003c340c 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -21,13 +21,13 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -40,8 +40,7 @@
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
- struct drm_device *drm = arg;
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = arg;
unsigned long irq_status;
irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
@@ -69,61 +68,32 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static void hdlcd_irq_preinstall(struct drm_device *drm)
+static int hdlcd_irq_install(struct hdlcd_drm_private *hdlcd)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ int ret;
+
/* Ensure interrupts are disabled */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
-}
-
-static void hdlcd_irq_postinstall(struct drm_device *drm)
-{
-#ifdef CONFIG_DEBUG_FS
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
- unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
-
- /* enable debug interrupts */
- irq_mask |= HDLCD_DEBUG_INT_MASK;
-
- hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
-#endif
-}
-
-static int hdlcd_irq_install(struct drm_device *drm, int irq)
-{
- int ret;
-
- if (irq == IRQ_NOTCONNECTED)
- return -ENOTCONN;
-
- hdlcd_irq_preinstall(drm);
- ret = request_irq(irq, hdlcd_irq, 0, drm->driver->name, drm);
+ ret = request_irq(hdlcd->irq, hdlcd_irq, 0, "hdlcd", hdlcd);
if (ret)
return ret;
- hdlcd_irq_postinstall(drm);
+#ifdef CONFIG_DEBUG_FS
+ /* enable debug interrupts */
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, HDLCD_DEBUG_INT_MASK);
+#endif
return 0;
}
-static void hdlcd_irq_uninstall(struct drm_device *drm)
+static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
/* disable all the interrupts that we might have enabled */
- unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
-
-#ifdef CONFIG_DEBUG_FS
- /* disable debug interrupts */
- irq_mask &= ~HDLCD_DEBUG_INT_MASK;
-#endif
-
- /* disable vsync interrupts */
- irq_mask &= ~HDLCD_INTERRUPT_VSYNC;
- hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
- free_irq(hdlcd->irq, drm);
+ free_irq(hdlcd->irq, hdlcd);
}
static int hdlcd_load(struct drm_device *drm, unsigned long flags)
@@ -183,7 +153,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
goto irq_fail;
hdlcd->irq = ret;
- ret = hdlcd_irq_install(drm, hdlcd->irq);
+ ret = hdlcd_irq_install(hdlcd);
if (ret < 0) {
DRM_ERROR("failed to install IRQ handler\n");
goto irq_fail;
@@ -255,11 +225,11 @@ static void hdlcd_debugfs_init(struct drm_minor *minor)
}
#endif
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = hdlcd_debugfs_init,
#endif
@@ -314,6 +284,15 @@ static int hdlcd_drm_bind(struct device *dev)
goto err_vblank;
}
+ /*
+ * If EFI left us running, take over from simple framebuffer
+ * drivers. Read HDLCD_REG_COMMAND to see if we are enabled.
+ */
+ if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
+ hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+ drm_aperture_remove_framebuffers(false, &hdlcd_driver);
+ }
+
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
@@ -335,7 +314,7 @@ err_pm_active:
err_unload:
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- hdlcd_irq_uninstall(drm);
+ hdlcd_irq_uninstall(hdlcd);
of_reserved_mem_device_release(drm->dev);
err_free:
drm_mode_config_cleanup(drm);
@@ -357,7 +336,7 @@ static void hdlcd_drm_unbind(struct device *dev)
hdlcd->crtc.port = NULL;
pm_runtime_get_sync(dev);
drm_atomic_helper_shutdown(drm);
- hdlcd_irq_uninstall(drm);
+ hdlcd_irq_uninstall(hdlcd);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index d5aef21426cf..1d0b0c54ccc7 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -19,10 +19,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -457,7 +456,7 @@ static int malidp_irq_init(struct platform_device *pdev)
return 0;
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static int malidp_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
@@ -469,7 +468,7 @@ static int malidp_dumb_create(struct drm_file *file_priv,
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
#ifdef CONFIG_DEBUG_FS
@@ -566,7 +565,7 @@ static void malidp_debugfs_init(struct drm_minor *minor)
static const struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index b66ca5b33a7f..ef76d0e6ee2f 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -10,10 +10,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
@@ -160,7 +160,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
n_planes = fb->format->num_planes;
for (i = 0; i < n_planes; i++) {
- struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, i);
/* memory write buffers are never rotated */
u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
@@ -170,7 +170,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
mw_state->pitches[i] = fb->pitches[i];
- mw_state->addrs[i] = obj->paddr + fb->offsets[i];
+ mw_state->addrs[i] = obj->dma_addr + fb->offsets[i];
}
mw_state->n_planes = n_planes;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 8a9562642d16..45f5e35e7f24 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -13,12 +13,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "malidp_hw.h"
@@ -334,15 +333,15 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
for (i = 0; i < ms->n_planes; i++) {
struct drm_gem_object *obj;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sg_table *sgt;
struct scatterlist *sgl;
obj = drm_gem_fb_get_obj(ms->base.fb, i);
- cma_obj = to_drm_gem_cma_obj(obj);
+ dma_obj = to_drm_gem_dma_obj(obj);
- if (cma_obj->sgt)
- sgt = cma_obj->sgt;
+ if (dma_obj->sgt)
+ sgt = dma_obj->sgt;
else
sgt = obj->funcs->get_sg_table(obj);
@@ -353,14 +352,14 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
while (sgl) {
if (sgl->length < pgsize) {
- if (!cma_obj->sgt)
+ if (!dma_obj->sgt)
kfree(sgt);
return false;
}
sgl = sg_next(sgl);
}
- if (!cma_obj->sgt)
+ if (!dma_obj->sgt)
kfree(sgt);
}
@@ -715,7 +714,7 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
struct malidp_plane *mp,
int plane_index)
{
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
u16 ptr;
struct drm_plane *plane = &mp->base;
bool afbc = fb->modifier ? true : false;
@@ -723,27 +722,27 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
ptr = mp->layer->ptr + (plane_index << 4);
/*
- * drm_fb_cma_get_gem_addr() alters the physical base address of the
+ * drm_fb_dma_get_gem_addr() alters the physical base address of the
* framebuffer as per the plane's src_x, src_y co-ordinates (ie to
* take care of source cropping).
* For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
* and _AD_CROP_V registers.
*/
if (!afbc) {
- paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
- plane_index);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, plane->state,
+ plane_index);
} else {
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
- obj = drm_fb_cma_get_gem_obj(fb, plane_index);
+ obj = drm_fb_dma_get_gem_obj(fb, plane_index);
if (WARN_ON(!obj))
return;
- paddr = obj->paddr;
+ dma_addr = obj->dma_addr;
}
- malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
- malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
+ malidp_hw_write(mp->hwdev, lower_32_bits(dma_addr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(dma_addr), ptr + 4);
}
static void malidp_de_set_plane_afbc(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 514c50dcb74d..3bc16db70ddb 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -145,7 +145,7 @@
#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff
#define MALIDP_SE_SET_COEFFTAB_DATA(x) \
((x) & MALIDP_SE_COEFFTAB_DATA_MASK)
-/* Enhance coeffents reigster offset */
+/* Enhance coefficients register offset */
#define MALIDP_SE_IMAGE_ENH 0x3C
/* ENH_LIMITS offset 0x0 */
#define MALIDP_SE_ENH_LOW_LEVEL 24
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index b7bb90ae787f..15dd667aa2e7 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -12,7 +12,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 147abf1a3968..5430265ad458 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -107,11 +107,11 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
}
/*
- * We could grab something from CMA if it's enabled, but that
+ * We could grab something from DMA if it's enabled, but that
* involves building in a problem:
*
- * CMA's interface uses dma_alloc_coherent(), which provides us
- * with an CPU virtual address and a device address.
+ * GEM DMA helper interface uses dma_alloc_coherent(), which provides
+ * us with an CPU virtual address and a device address.
*
* The CPU virtual address may be either an address in the kernel
* direct mapped region (for example, as it would be on x86) or
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 424250535fed..f21eb8fb76d8 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -298,12 +298,6 @@ fail:
return ret;
}
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(plane);
-}
-
static void armada_overlay_reset(struct drm_plane *plane)
{
struct armada_overlay_state *state;
@@ -468,7 +462,7 @@ static int armada_overlay_get_property(struct drm_plane *plane,
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
.update_plane = armada_overlay_plane_update,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = armada_ovl_plane_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = armada_overlay_reset,
.atomic_duplicate_state = armada_overlay_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index 959d7f0a5108..cc47c032dbc1 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -288,7 +288,7 @@ struct drm_plane_state *armada_plane_duplicate_state(struct drm_plane *plane)
static const struct drm_plane_funcs armada_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = armada_plane_reset,
.atomic_duplicate_state = armada_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 024ccab14f88..8137c39b057b 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -5,7 +5,7 @@ config DRM_ASPEED_GFX
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
select CMA if HAVE_DMA_CONTIGUOUS
select MFD_SYSCON
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index f3788d7d82d6..55a3444a51d8 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -7,11 +7,11 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -168,7 +168,7 @@ static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_framebuffer *fb = pipe->plane.state->fb;
struct drm_pending_vblank_event *event;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
spin_lock_irq(&crtc->dev->event_lock);
event = crtc->state->event;
@@ -185,10 +185,10 @@ static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
if (!fb)
return;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
if (!gem)
return;
- writel(gem->paddr, priv->base + CRT_ADDR);
+ writel(gem->dma_addr, priv->base + CRT_ADDR);
}
static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 7780b72de9e8..a94f1a9e8f40 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -16,9 +16,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -246,11 +245,11 @@ static void aspeed_gfx_unload(struct drm_device *drm)
drm_kms_helper_poll_fini(drm);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 760b27971557..b9392f31e629 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -39,7 +39,7 @@
#include "ast_drv.h"
-int ast_modeset = -1;
+static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 214b10178454..1bc0220e6783 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -42,7 +42,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -114,6 +113,9 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
case 1024:
vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
break;
+ case 1152:
+ vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
+ break;
case 1280:
if (mode->crtc_vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
@@ -311,7 +313,7 @@ static void ast_set_crtc_reg(struct ast_private *ast,
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
- if ((ast->chip == AST2500) &&
+ if ((ast->chip == AST2500 || ast->chip == AST2600) &&
(vbios_mode->enh_table->flags & AST2500PreCatchCRT))
precache = 40;
@@ -352,6 +354,12 @@ static void ast_set_crtc_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+ // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
+ if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080))
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02);
+ else
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00);
+
/* vert timings */
temp = (mode->crtc_vtotal) - 2;
if (temp & 0x100)
@@ -429,7 +437,7 @@ static void ast_set_dclk_reg(struct ast_private *ast,
{
const struct ast_vbios_dclk_info *clk_info;
- if (ast->chip == AST2500)
+ if ((ast->chip == AST2500) || (ast->chip == AST2600))
clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
else
clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
@@ -555,8 +563,8 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (ret)
return ret;
@@ -779,8 +787,8 @@ static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
@@ -1058,6 +1066,8 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
return MODE_OK;
if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
return MODE_OK;
+ if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
+ return MODE_OK;
if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
(ast->chip == AST2300) || (ast->chip == AST2400) ||
@@ -1090,6 +1100,10 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
if (mode->vdisplay == 768)
status = MODE_OK;
break;
+ case 1152:
+ if (mode->vdisplay == 864)
+ status = MODE_OK;
+ break;
case 1280:
if (mode->vdisplay == 1024)
status = MODE_OK;
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index dbe1cc620f6e..0378c9bc079b 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -272,6 +272,13 @@ static const struct ast_vbios_enhtable res_1600x1200[] = {
(SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
};
+static const struct ast_vbios_enhtable res_1152x864[] = {
+ {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */
+ (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B },
+ {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */
+ (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B },
+};
+
/* 16:9 */
static const struct ast_vbios_enhtable res_1360x768[] = {
{1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 8ae679f1a518..3bdbab3a6333 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -2,7 +2,7 @@
config DRM_ATMEL_HLCDC
tristate "DRM Support for ATMEL HLCDC Display Controller"
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 651e3c109360..f7e7f4e919c7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -730,11 +730,11 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
clk_disable_unprepare(dc->hlcdc->periph_clk);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 2306ceb3e999..daa508504f47 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -12,11 +12,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -449,9 +448,9 @@ static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR);
for (i = 0; i < state->nplanes; i++) {
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
- state->dscrs[i]->addr = gem->paddr + state->offsets[i];
+ state->dscrs[i]->addr = gem->dma_addr + state->offsets[i];
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index a031a0cd1f18..94de73cbeb2d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -394,10 +394,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
- unsigned int offset = adv7511->type == ADV7533 ?
- ADV7533_REG_CEC_OFFSET : 0;
-
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 0b266f28f150..99964f5a5457 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -359,7 +359,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
goto err_cec_alloc;
}
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
@@ -386,7 +386,7 @@ err_cec_alloc:
dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
ret);
err_cec_parse_dt:
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return ret == -EPROBE_DEFER ? ret : 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 38bf28720f3a..f887200e8abc 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1336,13 +1336,10 @@ uninit_regulators:
return ret;
}
-static int adv7511_remove(struct i2c_client *i2c)
+static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- i2c_unregister_device(adv7511->i2c_cec);
- clk_disable_unprepare(adv7511->cec_clk);
-
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);
@@ -1350,11 +1347,11 @@ static int adv7511_remove(struct i2c_client *i2c)
adv7511_audio_exit(adv7511);
cec_unregister_adapter(adv7511->cec_adap);
+ i2c_unregister_device(adv7511->i2c_cec);
+ clk_disable_unprepare(adv7511->cec_clk);
i2c_unregister_device(adv7511->i2c_packet);
i2c_unregister_device(adv7511->i2c_edid);
-
- return 0;
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index ae3d6e9a606c..660a54857929 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -787,7 +787,7 @@ err_unregister_i2c:
return err;
}
-static int anx6345_i2c_remove(struct i2c_client *client)
+static void anx6345_i2c_remove(struct i2c_client *client)
{
struct anx6345 *anx6345 = i2c_get_clientdata(client);
@@ -798,8 +798,6 @@ static int anx6345_i2c_remove(struct i2c_client *client)
kfree(anx6345->edid);
mutex_destroy(&anx6345->lock);
-
- return 0;
}
static const struct i2c_device_id anx6345_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index d2fc8676fab6..5997049fde5b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1357,7 +1357,7 @@ err_unregister_i2c:
return err;
}
-static int anx78xx_i2c_remove(struct i2c_client *client)
+static void anx78xx_i2c_remove(struct i2c_client *client)
{
struct anx78xx *anx78xx = i2c_get_clientdata(client);
@@ -1366,8 +1366,6 @@ static int anx78xx_i2c_remove(struct i2c_client *client)
unregister_i2c_dummy_clients(anx78xx);
kfree(anx78xx->edid);
-
- return 0;
}
static const struct i2c_device_id anx78xx_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 8aadcc0aa90b..df9370e0ff23 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index d1f1d525aeb6..b0ff1ecb80a5 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1440,6 +1440,20 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
+ int ret;
+
+ /* Set irq detect window to 2ms */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT8_15,
+ (HPD_TIME >> 8) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT16_23,
+ (HPD_TIME >> 16) & 0xFF);
+ if (ret < 0)
+ return ret;
+
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
}
@@ -1642,6 +1656,7 @@ static int anx7625_parse_dt(struct device *dev,
anx7625_get_swing_setting(dev, pdata);
pdata->is_dpi = 0; /* default dsi mode */
+ of_node_put(pdata->mipi_host_node);
pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
if (!pdata->mipi_host_node) {
DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
@@ -1796,8 +1811,13 @@ static int anx7625_audio_hw_params(struct device *dev, void *data,
int wl, ch, rate;
int ret = 0;
- if (fmt->fmt != HDMI_DSP_A) {
- DRM_DEV_ERROR(dev, "only supports DSP_A\n");
+ if (anx7625_sink_detect(ctx) == connector_status_disconnected) {
+ DRM_DEV_DEBUG_DRIVER(dev, "DP not connected\n");
+ return 0;
+ }
+
+ if (fmt->fmt != HDMI_DSP_A && fmt->fmt != HDMI_I2S) {
+ DRM_DEV_ERROR(dev, "only supports DSP_A & I2S\n");
return -EINVAL;
}
@@ -1805,10 +1825,16 @@ static int anx7625_audio_hw_params(struct device *dev, void *data,
params->sample_rate, params->sample_width,
params->cea.channels);
- ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CHANNEL_STATUS_6,
- ~I2S_SLAVE_MODE,
- TDM_SLAVE_MODE);
+ if (fmt->fmt == HDMI_DSP_A)
+ ret = anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6,
+ ~I2S_SLAVE_MODE,
+ TDM_SLAVE_MODE);
+ else
+ ret = anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6,
+ ~TDM_SLAVE_MODE,
+ I2S_SLAVE_MODE);
/* Word length */
switch (params->sample_width) {
@@ -2689,7 +2715,7 @@ free_hdcp_wq:
return ret;
}
-static int anx7625_i2c_remove(struct i2c_client *client)
+static void anx7625_i2c_remove(struct i2c_client *client)
{
struct anx7625_data *platform = i2c_get_clientdata(client);
@@ -2709,8 +2735,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.audio_en)
anx7625_unregister_audio(platform);
-
- return 0;
}
static const struct i2c_device_id anx7625_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index e257a84db962..14f33d6be289 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -132,6 +132,12 @@
#define I2S_SLAVE_MODE 0x08
#define AUDIO_LAYOUT 0x01
+#define HPD_DET_TIMER_BIT0_7 0xea
+#define HPD_DET_TIMER_BIT8_15 0xeb
+#define HPD_DET_TIMER_BIT16_23 0xec
+/* HPD debounce time 2ms for 27M clock */
+#define HPD_TIME 54000
+
#define AUDIO_CONTROL_REGISTER 0xe6
#define TDM_TIMING_MODE 0x08
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index ab63e7b11944..31442a922502 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2605,7 +2605,8 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
cancel_work_sync(&mhdp->modeset_retry_work);
- flush_scheduled_work();
+ flush_work(&mhdp->hpd_work);
+ /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
clk_disable_unprepare(mhdp->clk);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 481c86b2406e..bf920c3503aa 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -151,6 +152,8 @@ struct chipone {
struct regulator *vdd1;
struct regulator *vdd2;
struct regulator *vdd3;
+ struct clk *refclk;
+ unsigned long refclk_rate;
bool interface_i2c;
};
@@ -259,7 +262,7 @@ static void chipone_configure_pll(struct chipone *icn,
/*
* DSI byte clock frequency (input into PLL) is calculated as:
- * DSI_CLK = mode clock * bpp / dsi_data_lanes / 8
+ * DSI_CLK = HS clock / 4
*
* DPI pixel clock frequency (output from PLL) is mode clock.
*
@@ -273,8 +276,10 @@ static void chipone_configure_pll(struct chipone *icn,
* It seems the PLL input clock after applying P pre-divider have
* to be lower than 20 MHz.
*/
- fin = mode_clock * mipi_dsi_pixel_format_to_bpp(icn->dsi->format) /
- icn->dsi->lanes / 8; /* in Hz */
+ if (icn->refclk)
+ fin = icn->refclk_rate;
+ else
+ fin = icn->dsi->hs_rate / 4; /* in Hz */
/* Minimum value of P predivider for PLL input in 5..20 MHz */
p_min = clamp(DIV_ROUND_UP(fin, 20000000), 1U, 31U);
@@ -319,16 +324,18 @@ static void chipone_configure_pll(struct chipone *icn,
best_p_pot = !(best_p & 1);
dev_dbg(icn->dev,
- "PLL: P[3:0]=%d P[4]=2*%d M=%d S[7:5]=2^%d delta=%d => DSI f_in=%d Hz ; DPI f_out=%d Hz\n",
+ "PLL: P[3:0]=%d P[4]=2*%d M=%d S[7:5]=2^%d delta=%d => DSI f_in(%s)=%d Hz ; DPI f_out=%d Hz\n",
best_p >> best_p_pot, best_p_pot, best_m, best_s + 1,
- min_delta, fin, (fin * best_m) / (best_p << (best_s + 1)));
+ min_delta, icn->refclk ? "EXT" : "DSI", fin,
+ (fin * best_m) / (best_p << (best_s + 1)));
ref_div = PLL_REF_DIV_P(best_p >> best_p_pot) | PLL_REF_DIV_S(best_s);
if (best_p_pot) /* Prefer /2 pre-divider */
ref_div |= PLL_REF_DIV_Pe;
- /* Clock source selection fixed to MIPI DSI clock lane */
- chipone_writeb(icn, PLL_CTRL(6), PLL_CTRL_6_MIPI_CLK);
+ /* Clock source selection either external clock or MIPI DSI clock lane */
+ chipone_writeb(icn, PLL_CTRL(6),
+ icn->refclk ? PLL_CTRL_6_EXTERNAL : PLL_CTRL_6_MIPI_CLK);
chipone_writeb(icn, PLL_REF_DIV, ref_div);
chipone_writeb(icn, PLL_INT(0), best_m);
}
@@ -464,6 +471,11 @@ static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
"failed to enable VDD3 regulator: %d\n", ret);
}
+ ret = clk_prepare_enable(icn->refclk);
+ if (ret)
+ DRM_DEV_ERROR(icn->dev,
+ "failed to enable RECLK clock: %d\n", ret);
+
gpiod_set_value(icn->enable_gpio, 1);
usleep_range(10000, 11000);
@@ -474,6 +486,8 @@ static void chipone_atomic_post_disable(struct drm_bridge *bridge,
{
struct chipone *icn = bridge_to_chipone(bridge);
+ clk_disable_unprepare(icn->refclk);
+
if (icn->vdd1)
regulator_disable(icn->vdd1);
@@ -515,6 +529,8 @@ static int chipone_dsi_attach(struct chipone *icn)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
+ dsi->hs_rate = 500000000;
+ dsi->lp_rate = 16000000;
ret = mipi_dsi_attach(dsi);
if (ret < 0)
@@ -617,6 +633,20 @@ static int chipone_parse_dt(struct chipone *icn)
struct device *dev = icn->dev;
int ret;
+ icn->refclk = devm_clk_get_optional(dev, "refclk");
+ if (IS_ERR(icn->refclk)) {
+ ret = PTR_ERR(icn->refclk);
+ DRM_DEV_ERROR(dev, "failed to get REFCLK clock: %d\n", ret);
+ return ret;
+ } else if (icn->refclk) {
+ icn->refclk_rate = clk_get_rate(icn->refclk);
+ if (icn->refclk_rate < 10000000 || icn->refclk_rate > 154000000) {
+ DRM_DEV_ERROR(dev, "REFCLK out of range: %ld Hz\n",
+ icn->refclk_rate);
+ return -EINVAL;
+ }
+ }
+
icn->vdd1 = devm_regulator_get_optional(dev, "vdd1");
if (IS_ERR(icn->vdd1)) {
ret = PTR_ERR(icn->vdd1);
@@ -735,14 +765,12 @@ static int chipone_i2c_probe(struct i2c_client *client,
return chipone_dsi_host_attach(icn);
}
-static int chipone_dsi_remove(struct mipi_dsi_device *dsi)
+static void chipone_dsi_remove(struct mipi_dsi_device *dsi)
{
struct chipone *icn = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&icn->bridge);
-
- return 0;
}
static const struct of_device_id chipone_of_match[] = {
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index ba060277c3fd..b94f39a86846 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -583,14 +583,12 @@ static int ch7033_probe(struct i2c_client *client,
return 0;
}
-static int ch7033_remove(struct i2c_client *client)
+static void ch7033_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv = dev_get_drvdata(dev);
drm_bridge_remove(&priv->bridge);
-
- return 0;
}
static const struct of_device_id ch7033_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index 0f6d907432e3..fa91bdeddef0 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -159,13 +159,11 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
return 0;
}
-static int cros_ec_anx7688_bridge_remove(struct i2c_client *client)
+static void cros_ec_anx7688_bridge_remove(struct i2c_client *client)
{
struct cros_ec_anx7688 *anx7688 = i2c_get_clientdata(client);
drm_bridge_remove(&anx7688->bridge);
-
- return 0;
}
static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 4b673c4792d7..dfe4351c9bdd 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -506,6 +506,9 @@ static int it6505_read(struct it6505 *it6505, unsigned int reg_addr)
int err;
struct device *dev = &it6505->client->dev;
+ if (!it6505->powered)
+ return -ENODEV;
+
err = regmap_read(it6505->regmap, reg_addr, &value);
if (err < 0) {
dev_err(dev, "read failed reg[0x%x] err: %d", reg_addr, err);
@@ -521,6 +524,9 @@ static int it6505_write(struct it6505 *it6505, unsigned int reg_addr,
int err;
struct device *dev = &it6505->client->dev;
+ if (!it6505->powered)
+ return -ENODEV;
+
err = regmap_write(it6505->regmap, reg_addr, reg_val);
if (err < 0) {
@@ -538,6 +544,9 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
int err;
struct device *dev = &it6505->client->dev;
+ if (!it6505->powered)
+ return -ENODEV;
+
err = regmap_update_bits(it6505->regmap, reg, mask, value);
if (err < 0) {
dev_err(dev, "write reg[0x%x] = 0x%x mask = 0x%x failed err %d",
@@ -554,7 +563,7 @@ static void it6505_debug_print(struct it6505 *it6505, unsigned int reg,
struct device *dev = &it6505->client->dev;
int val;
- if (likely(!(__drm_debug & DRM_UT_DRIVER)))
+ if (!drm_debug_enabled(DRM_UT_DRIVER))
return;
val = it6505_read(it6505, reg);
@@ -682,7 +691,7 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_DEV_DEBUG_DRIVER(dev, "hactive_start:%d, vactive_start:%d",
hdes, vdes);
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER,
ENABLE_PCLK_COUNTER);
usleep_range(10000, 15000);
@@ -699,7 +708,7 @@ static void it6505_calc_video_info(struct it6505 *it6505)
return;
}
- sum /= 10;
+ sum /= 3;
pclk = 13500 * 2048 / sum;
it6505->video_info.clock = pclk;
it6505->video_info.hdisplay = hdew;
@@ -2341,8 +2350,6 @@ static void it6505_irq_hpd(struct it6505 *it6505)
if (!it6505_get_video_status(it6505))
it6505_video_reset(it6505);
-
- it6505_calc_video_info(it6505);
} else {
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
@@ -2559,13 +2566,12 @@ static int it6505_poweron(struct it6505 *it6505)
usleep_range(10000, 20000);
}
+ it6505->powered = true;
it6505_reset_logic(it6505);
it6505_int_mask_enable(it6505);
it6505_init(it6505);
it6505_lane_off(it6505);
- it6505->powered = true;
-
return 0;
}
@@ -2954,6 +2960,9 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
+
+ it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
+ DP_SET_POWER_D0);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -2965,9 +2974,9 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_video_disable(it6505);
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
DP_SET_POWER_D3);
+ it6505_video_disable(it6505);
}
}
@@ -3044,7 +3053,7 @@ static int it6505_init_pdata(struct it6505 *it6505)
return PTR_ERR(pdata->ovdd);
}
- pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pdata->gpiod_reset)) {
dev_err(dev, "gpiod_reset gpio not found");
return PTR_ERR(pdata->gpiod_reset);
@@ -3316,7 +3325,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
return 0;
}
-static int it6505_i2c_remove(struct i2c_client *client)
+static void it6505_i2c_remove(struct i2c_client *client)
{
struct it6505 *it6505 = i2c_get_clientdata(client);
@@ -3324,8 +3333,6 @@ static int it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
-
- return 0;
}
static const struct i2c_device_id it6505_id[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 44278d54d35d..4f6f1deba28c 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1623,15 +1623,13 @@ static int it66121_probe(struct i2c_client *client,
return 0;
}
-static int it66121_remove(struct i2c_client *client)
+static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
-
- return 0;
}
static const struct of_device_id it66121_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 28bad30dc4e5..a98efef0ba0e 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -165,30 +165,38 @@ static int lt8912_write_rxlogicres_config(struct lt8912 *lt)
return ret;
};
+/* enable LVDS output with some hardcoded configuration, not required for the HDMI output */
static int lt8912_write_lvds_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
+ // lvds power up
{0x44, 0x30},
{0x51, 0x05},
- {0x50, 0x24},
- {0x51, 0x2d},
- {0x52, 0x04},
- {0x69, 0x0e},
+
+ // core pll bypass
+ {0x50, 0x24}, // cp=50uA
+ {0x51, 0x2d}, // Pix_clk as reference, second order passive LPF PLL
+ {0x52, 0x04}, // loopdiv=0, use second-order PLL
+ {0x69, 0x0e}, // CP_PRESET_DIV_RATIO
{0x69, 0x8e},
{0x6a, 0x00},
- {0x6c, 0xb8},
+ {0x6c, 0xb8}, // RGD_CP_SOFT_K_EN,RGD_CP_SOFT_K[13:8]
{0x6b, 0x51},
- {0x04, 0xfb},
+
+ {0x04, 0xfb}, // core pll reset
{0x04, 0xff},
- {0x7f, 0x00},
- {0xa8, 0x13},
- {0x02, 0xf7},
+
+ // scaler bypass
+ {0x7f, 0x00}, // disable scaler
+ {0xa8, 0x13}, // 0x13: JEIDA, 0x33: VESA
+
+ {0x02, 0xf7}, // lvds pll reset
{0x02, 0xff},
{0x03, 0xcf},
{0x03, 0xff},
};
- return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
+ return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq));
};
static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b)
@@ -268,7 +276,7 @@ static int lt8912_video_setup(struct lt8912 *lt)
u32 hactive, h_total, hpw, hfp, hbp;
u32 vactive, v_total, vpw, vfp, vbp;
u8 settle = 0x08;
- int ret;
+ int ret, hsync_activehigh, vsync_activehigh;
if (!lt)
return -EINVAL;
@@ -278,12 +286,14 @@ static int lt8912_video_setup(struct lt8912 *lt)
hpw = lt->mode.hsync_len;
hbp = lt->mode.hback_porch;
h_total = hactive + hfp + hpw + hbp;
+ hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH;
vactive = lt->mode.vactive;
vfp = lt->mode.vfront_porch;
vpw = lt->mode.vsync_len;
vbp = lt->mode.vback_porch;
v_total = vactive + vfp + vpw + vbp;
+ vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH;
if (vactive <= 600)
settle = 0x04;
@@ -317,6 +327,13 @@ static int lt8912_video_setup(struct lt8912 *lt)
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0),
+ vsync_activehigh ? BIT(0) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1),
+ hsync_activehigh ? BIT(1) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0),
+ lt->connector.display_info.is_hdmi ? BIT(0) : 0);
+
return ret;
}
@@ -714,7 +731,7 @@ err_dt_parse:
return ret;
}
-static int lt8912_remove(struct i2c_client *client)
+static void lt8912_remove(struct i2c_client *client)
{
struct lt8912 *lt = i2c_get_clientdata(client);
@@ -722,7 +739,6 @@ static int lt8912_remove(struct i2c_client *client)
drm_bridge_remove(&lt->bridge);
lt8912_free_i2c(lt);
lt8912_put_dt(lt);
- return 0;
}
static const struct of_device_id lt8912_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9a3e90427d12..933ca028d612 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -766,13 +766,11 @@ static int lt9211_probe(struct i2c_client *client,
return ret;
}
-static int lt9211_remove(struct i2c_client *client)
+static void lt9211_remove(struct i2c_client *client)
{
struct lt9211 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id lt9211_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 8a60e83482a0..7c0a99173b39 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -813,13 +813,14 @@ static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt961
drm_connector_helper_add(&lt9611->connector,
&lt9611_bridge_connector_helper_funcs);
- drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
+ drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
+
return 0;
}
@@ -1216,7 +1217,7 @@ err_of_put:
return ret;
}
-static int lt9611_remove(struct i2c_client *client)
+static void lt9611_remove(struct i2c_client *client)
{
struct lt9611 *lt9611 = i2c_get_clientdata(client);
@@ -1228,8 +1229,6 @@ static int lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi1_node);
of_node_put(lt9611->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fdf12d4c6416..fa1ee6264d92 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -978,7 +978,7 @@ err_of_put:
return ret;
}
-static int lt9611uxc_remove(struct i2c_client *client)
+static void lt9611uxc_remove(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
@@ -993,8 +993,6 @@ static int lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi1_node);
of_node_put(lt9611uxc->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611uxc_id[] = {
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 702ea803a743..39e7004de720 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -180,7 +180,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
of_node_put(bus_node);
if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
- } else if (ret) {
+ } else if (ret < 0) {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return ret;
} else {
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index cce98bf2a4e7..97359f807bfc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -296,7 +296,9 @@ static void ge_b850v3_lvds_remove(void)
* This check is to avoid both the drivers
* removing the bridge in their remove() function
*/
- if (!ge_b850v3_lvds_ptr)
+ if (!ge_b850v3_lvds_ptr ||
+ !ge_b850v3_lvds_ptr->stdp2690_i2c ||
+ !ge_b850v3_lvds_ptr->stdp4028_i2c)
goto out;
drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge);
@@ -355,11 +357,9 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
return ge_b850v3_register();
}
-static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
+static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
@@ -405,11 +405,9 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
return ge_b850v3_register();
}
-static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
+static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1ab91f4e057b..0851101a8c72 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -315,13 +315,11 @@ static int ptn3460_probe(struct i2c_client *client,
return 0;
}
-static int ptn3460_remove(struct i2c_client *client)
+static void ptn3460_remove(struct i2c_client *client)
{
struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client);
drm_bridge_remove(&ptn_bridge->bridge);
-
- return 0;
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 4277bf4f032b..216af76d0042 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -8,6 +8,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -367,6 +368,44 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
}
EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
+static void drmm_drm_panel_bridge_release(struct drm_device *drm, void *ptr)
+{
+ struct drm_bridge *bridge = ptr;
+
+ drm_panel_bridge_remove(bridge);
+}
+
+/**
+ * drmm_panel_bridge_add - Creates a DRM-managed &drm_bridge and
+ * &drm_connector that just calls the
+ * appropriate functions from &drm_panel.
+ *
+ * @drm: DRM device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ *
+ * This is the DRM-managed version of drm_panel_bridge_add() which
+ * automatically calls drm_panel_bridge_remove() when @dev is cleaned
+ * up.
+ */
+struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
+ struct drm_panel *panel)
+{
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = drm_panel_bridge_add_typed(panel, panel->connector_type);
+ if (IS_ERR(bridge))
+ return bridge;
+
+ ret = drmm_add_action_or_reset(drm, drmm_drm_panel_bridge_release,
+ bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drmm_panel_bridge_add);
+
/**
* drm_panel_bridge_connector - return the connector for the panel bridge
* @bridge: The drm_bridge.
@@ -420,4 +459,39 @@ struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
return bridge;
}
EXPORT_SYMBOL(devm_drm_of_get_bridge);
+
+/**
+ * drmm_of_get_bridge - Return next bridge in the chain
+ * @drm: device to tie the bridge lifetime to
+ * @np: device tree node containing encoder output ports
+ * @port: port in the device tree node
+ * @endpoint: endpoint in the device tree node
+ *
+ * Given a DT node's port and endpoint number, finds the connected node
+ * and returns the associated bridge if any, or creates and returns a
+ * drm panel bridge instance if a panel is connected.
+ *
+ * Returns a drmm managed pointer to the bridge if successful, or an error
+ * pointer otherwise.
+ */
+struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
+ struct device_node *np,
+ u32 port, u32 endpoint)
+{
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(np, port, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (panel)
+ bridge = drmm_panel_bridge_add(drm, panel);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drmm_of_get_bridge);
+
#endif
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index b5750e5f71d7..309de802863d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -520,14 +520,12 @@ static int ps8622_probe(struct i2c_client *client,
return 0;
}
-static int ps8622_remove(struct i2c_client *client)
+static void ps8622_remove(struct i2c_client *client)
{
struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
backlight_device_unregister(ps8622->bl);
drm_bridge_remove(&ps8622->bridge);
-
- return 0;
}
static const struct i2c_device_id ps8622_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 31e88cb39f8a..d7483c13c569 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -375,6 +375,11 @@ static int __maybe_unused ps8640_resume(struct device *dev)
gpiod_set_value(ps_bridge->gpio_reset, 1);
usleep_range(2000, 2500);
gpiod_set_value(ps_bridge->gpio_reset, 0);
+ /* Double reset for T4 and T5 */
+ msleep(50);
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ msleep(50);
+ gpiod_set_value(ps_bridge->gpio_reset, 0);
/*
* Mystery 200 ms delay for the "MCU to be ready". It's unclear if
@@ -631,8 +636,8 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps_bridge)
return -ENOMEM;
- ps_bridge->supplies[0].supply = "vdd33";
- ps_bridge->supplies[1].supply = "vdd12";
+ ps_bridge->supplies[0].supply = "vdd12";
+ ps_bridge->supplies[1].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies);
if (ret)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 7ab38d734ad6..878fb7d3732b 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1145,7 +1145,7 @@ static int sii902x_probe(struct i2c_client *client,
return ret;
}
-static int sii902x_remove(struct i2c_client *client)
+static void sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
@@ -1154,8 +1154,6 @@ static int sii902x_remove(struct i2c_client *client)
drm_bridge_remove(&sii902x->bridge);
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
-
- return 0;
}
static const struct of_device_id sii902x_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 15c98a7bd81c..5b3061d4b5c3 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -936,14 +936,12 @@ static int sii9234_probe(struct i2c_client *client,
return 0;
}
-static int sii9234_remove(struct i2c_client *client)
+static void sii9234_remove(struct i2c_client *client)
{
struct sii9234 *ctx = i2c_get_clientdata(client);
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii9234_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index ab0bce4a988c..511982a1cedb 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2346,7 +2346,7 @@ static int sii8620_probe(struct i2c_client *client,
return 0;
}
-static int sii8620_remove(struct i2c_client *client)
+static void sii8620_remove(struct i2c_client *client)
{
struct sii8620 *ctx = i2c_get_clientdata(client);
@@ -2360,8 +2360,6 @@ static int sii8620_remove(struct i2c_client *client)
sii8620_cable_out(ctx);
}
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii8620_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 7d2ed0ed2fe2..4efb62bcdb63 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -542,8 +542,8 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
- strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
+ strscpy(card->driver, DRIVER_NAME, sizeof(card->driver));
+ strscpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
"%s rev 0x%02x, irq %d", card->shortname, revision,
data->irq);
@@ -561,7 +561,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
dw->pcm = pcm;
pcm->private_data = dw;
- strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
+ strscpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
/*
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 25a60eb4d67c..40d8ca37f5bc 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3096,6 +3096,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
+ enum drm_connector_status status = connector_status_unknown;
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
@@ -3134,13 +3135,15 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
cec_notifier_phys_addr_invalidate(hdmi->cec_notifier);
mutex_unlock(&hdmi->cec_notifier_mutex);
}
- }
- if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
- enum drm_connector_status status = phy_int_pol & HDMI_PHY_HPD
- ? connector_status_connected
- : connector_status_disconnected;
+ if (phy_stat & HDMI_PHY_HPD)
+ status = connector_status_connected;
+
+ if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE)))
+ status = connector_status_disconnected;
+ }
+ if (status != connector_status_unknown) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
status == connector_status_connected ?
"plugin" : "plugout");
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 40439da4db49..7f4fce1aa998 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -241,14 +241,12 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int tc358762_remove(struct mipi_dsi_device *dsi)
+static void tc358762_remove(struct mipi_dsi_device *dsi)
{
struct tc358762 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id tc358762_of_match[] = {
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index fdfb14aca926..53259c12d777 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -381,14 +381,12 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int tc358764_remove(struct mipi_dsi_device *dsi)
+static void tc358764_remove(struct mipi_dsi_device *dsi)
{
struct tc358764 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id tc358764_of_match[] = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 02bd757a8987..2a58eb271f70 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -889,6 +889,7 @@ static int tc_set_edp_video_mode(struct tc_data *tc,
u32 dp0_syncval;
u32 bits_per_pixel = 24;
u32 in_bw, out_bw;
+ u32 dpipxlfmt;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
@@ -938,10 +939,15 @@ static int tc_set_edp_video_mode(struct tc_data *tc,
if (ret)
return ret;
- ret = regmap_write(tc->regmap, DPIPXLFMT,
- VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
- DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 |
- DPI_BPP_RGB888);
+ dpipxlfmt = DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dpipxlfmt |= VS_POL_ACTIVE_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dpipxlfmt |= HS_POL_ACTIVE_LOW;
+
+ ret = regmap_write(tc->regmap, DPIPXLFMT, dpipxlfmt);
if (ret)
return ret;
@@ -1244,7 +1250,13 @@ static int tc_main_link_disable(struct tc_data *tc)
if (ret)
return ret;
- return regmap_write(tc->regmap, DP0CTL, 0);
+ ret = regmap_write(tc->regmap, DP0CTL, 0);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(tc->regmap, DP_PHY_CTRL,
+ PHY_M0_RST | PHY_M1_RST | PHY_M0_EN,
+ PHY_M0_RST | PHY_M1_RST);
}
static int tc_dsi_rx_enable(struct tc_data *tc)
@@ -1252,10 +1264,10 @@ static int tc_dsi_rx_enable(struct tc_data *tc)
u32 value;
int ret;
- regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
+ regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 5);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
@@ -1496,41 +1508,16 @@ tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
dev_err(tc->dev, "main link disable error: %d\n", ret);
}
-static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj)
-{
- /* Fixup sync polarities, both hsync and vsync are active low */
- adj->flags = mode->flags;
- adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
- adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-
- return true;
-}
-
-static int tc_common_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- const unsigned int max_khz)
-{
- tc_bridge_mode_fixup(bridge, &crtc_state->mode,
- &crtc_state->adjusted_mode);
-
- if (crtc_state->adjusted_mode.clock > max_khz)
- return -EINVAL;
-
- return 0;
-}
-
static int tc_dpi_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
/* DSI->DPI interface clock limitation: upto 100 MHz */
- return tc_common_atomic_check(bridge, bridge_state, crtc_state,
- conn_state, 100000);
+ if (crtc_state->adjusted_mode.clock > 100000)
+ return -EINVAL;
+
+ return 0;
}
static int tc_edp_atomic_check(struct drm_bridge *bridge,
@@ -1539,8 +1526,10 @@ static int tc_edp_atomic_check(struct drm_bridge *bridge,
struct drm_connector_state *conn_state)
{
/* DPI->(e)DP interface clock limitation: upto 154 MHz */
- return tc_common_atomic_check(bridge, bridge_state, crtc_state,
- conn_state, 154000);
+ if (crtc_state->adjusted_mode.clock > 154000)
+ return -EINVAL;
+
+ return 0;
}
static enum drm_mode_status
@@ -1783,7 +1772,6 @@ static const struct drm_bridge_funcs tc_edp_bridge_funcs = {
.atomic_check = tc_edp_atomic_check,
.atomic_enable = tc_edp_bridge_atomic_enable,
.atomic_disable = tc_edp_bridge_atomic_disable,
- .mode_fixup = tc_bridge_mode_fixup,
.detect = tc_bridge_detect,
.get_edid = tc_get_edid,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
@@ -1925,22 +1913,23 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
{
struct device *dev = tc->dev;
+ struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
/* port@1 is the DPI input/output port */
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge);
if (ret && ret != -ENODEV)
return ret;
if (panel) {
- struct drm_bridge *panel_bridge;
-
- panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
- tc->panel_bridge = panel_bridge;
+ if (bridge) {
+ tc->panel_bridge = bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
tc->bridge.funcs = &tc_dpi_bridge_funcs;
@@ -2010,9 +1999,10 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
for_each_endpoint_of_node(dev->of_node, node) {
of_graph_parse_endpoint(node, &endpoint);
- if (endpoint.port > 2)
+ if (endpoint.port > 2) {
+ of_node_put(node);
return -EINVAL;
-
+ }
mode |= BIT(endpoint.port);
}
@@ -2194,13 +2184,11 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index fd585bf925fe..4c4b77ce8aba 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1072,13 +1072,11 @@ static int tc358768_i2c_probe(struct i2c_client *client,
return mipi_dsi_host_register(&priv->dsi_host);
}
-static int tc358768_i2c_remove(struct i2c_client *client)
+static void tc358768_i2c_remove(struct i2c_client *client)
{
struct tc358768_priv *priv = i2c_get_clientdata(client);
mipi_dsi_host_unregister(&priv->dsi_host);
-
- return 0;
}
static struct i2c_driver tc358768_driver = {
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index f1c6e62b0e1d..02dc12b8151e 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -704,13 +704,11 @@ err_bridge_remove:
return ret;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358775_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index cef454862b67..186a9e2ff24d 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -379,14 +379,12 @@ err_remove_bridge:
return ret;
}
-static int dlpc3433_remove(struct i2c_client *client)
+static void dlpc3433_remove(struct i2c_client *client)
{
struct dlpc *dlpc = i2c_get_clientdata(client);
drm_bridge_remove(&dlpc->bridge);
of_node_put(dlpc->host_node);
-
- return 0;
}
static const struct i2c_device_id dlpc3433_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 14e7aa77e758..7ba9467fff12 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -708,13 +708,11 @@ err_remove_bridge:
return ret;
}
-static int sn65dsi83_remove(struct i2c_client *client)
+static void sn65dsi83_remove(struct i2c_client *client)
{
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id sn65dsi83_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index d6dd4d99a229..3c3561942eb6 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -68,6 +69,7 @@
#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
+#define HPD_DEBOUNCED_STATE BIT(4)
#define SN_GPIO_IO_REG 0x5E
#define SN_GPIO_INPUT_SHIFT 4
#define SN_GPIO_OUTPUT_SHIFT 0
@@ -92,6 +94,8 @@
#define SN_DATARATE_CONFIG_REG 0x94
#define DP_DATARATE_MASK GENMASK(7, 5)
#define DP_DATARATE(x) ((x) << 5)
+#define SN_TRAINING_SETTING_REG 0x95
+#define SCRAMBLE_DISABLE BIT(4)
#define SN_ML_TX_MODE_REG 0x96
#define ML_TX_MAIN_LINK_OFF 0
#define ML_TX_NORMAL_MODE BIT(0)
@@ -698,11 +702,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
pdata->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&pdata->aux);
if (ret < 0) {
@@ -710,15 +709,18 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- /* We never want the next bridge to *also* create a connector: */
- flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR;
-
- /* Attach the next bridge */
+ /*
+ * Attach the next bridge.
+ * We never want the next bridge to *also* create a connector.
+ */
ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
- &pdata->bridge, flags);
+ &pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
pdata->connector = drm_bridge_connector_init(pdata->bridge.dev,
pdata->bridge.encoder);
if (IS_ERR(pdata->connector)) {
@@ -749,6 +751,29 @@ ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
+ /*
+ * The front and back porch registers are 8 bits, and pulse width
+ * registers are 15 bits, so reject any modes with larger periods.
+ */
+
+ if ((mode->hsync_start - mode->hdisplay) > 0xff)
+ return MODE_HBLANK_WIDE;
+
+ if ((mode->vsync_start - mode->vdisplay) > 0xff)
+ return MODE_VBLANK_WIDE;
+
+ if ((mode->hsync_end - mode->hsync_start) > 0x7fff)
+ return MODE_HSYNC_WIDE;
+
+ if ((mode->vsync_end - mode->vsync_start) > 0x7fff)
+ return MODE_VSYNC_WIDE;
+
+ if ((mode->htotal - mode->hsync_end) > 0xff)
+ return MODE_HBLANK_WIDE;
+
+ if ((mode->vtotal - mode->vsync_end) > 0xff)
+ return MODE_VBLANK_WIDE;
+
return MODE_OK;
}
@@ -779,9 +804,9 @@ static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
}
-static unsigned int ti_sn_bridge_get_bpp(struct ti_sn65dsi86 *pdata)
+static unsigned int ti_sn_bridge_get_bpp(struct drm_connector *connector)
{
- if (pdata->connector->display_info.bpc <= 6)
+ if (connector->display_info.bpc <= 6)
return 18;
else
return 24;
@@ -796,7 +821,7 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
@@ -804,7 +829,7 @@ static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata)
&pdata->bridge.encoder->crtc->state->adjusted_mode;
/* Calculate minimum bit rate based on our pixel clock. */
- bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
+ bit_rate_khz = mode->clock * bpp;
/* Calculate minimum DP data rate, taking 80% as per DP spec */
dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
@@ -1016,12 +1041,21 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct drm_connector *connector;
const char *last_err_str = "No supported DP rate";
unsigned int valid_rates;
int dp_rate_idx;
unsigned int val;
int ret = -EINVAL;
int max_dp_lanes;
+ unsigned int bpp;
+
+ connector = drm_atomic_get_new_connector_for_encoder(old_bridge_state->base.state,
+ bridge->encoder);
+ if (!connector) {
+ dev_err_ratelimited(pdata->dev, "Could not get the connector\n");
+ return;
+ }
max_dp_lanes = ti_sn_get_max_lanes(pdata);
pdata->dp_lanes = min(pdata->dp_lanes, max_dp_lanes);
@@ -1040,15 +1074,27 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
- * this method is enabled by default. An eDP panel must support this
+ * this method is enabled for eDP panels. An eDP panel must support this
* authentication method. We need to enable this method in the eDP panel
* at DisplayPort address 0x0010A prior to link training.
+ *
+ * As only ASSR is supported by SN65DSI86, for full DisplayPort displays
+ * we need to disable the scrambler.
*/
- drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
- DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_eDP) {
+ drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ regmap_update_bits(pdata->regmap, SN_TRAINING_SETTING_REG,
+ SCRAMBLE_DISABLE, 0);
+ } else {
+ regmap_update_bits(pdata->regmap, SN_TRAINING_SETTING_REG,
+ SCRAMBLE_DISABLE, SCRAMBLE_DISABLE);
+ }
+
+ bpp = ti_sn_bridge_get_bpp(connector);
/* Set the DP output format (18 bpp or 24 bpp) */
- val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+ val = bpp == 18 ? BPP_18_RGB : 0;
regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
/* DP lane config */
@@ -1059,7 +1105,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1114,10 +1160,33 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
pm_runtime_put_sync(pdata->dev);
}
+static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ int val = 0;
+
+ pm_runtime_get_sync(pdata->dev);
+ regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
+ pm_runtime_put_autosuspend(pdata->dev);
+
+ return val & HPD_DEBOUNCED_STATE ? connector_status_connected
+ : connector_status_disconnected;
+}
+
+static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+
+ return drm_get_edid(connector, &pdata->aux.ddc);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
.mode_valid = ti_sn_bridge_mode_valid,
+ .get_edid = ti_sn_bridge_get_edid,
+ .detect = ti_sn_bridge_detect,
.atomic_pre_enable = ti_sn_bridge_atomic_pre_enable,
.atomic_enable = ti_sn_bridge_atomic_enable,
.atomic_disable = ti_sn_bridge_atomic_disable,
@@ -1198,10 +1267,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
int ret;
pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
- if (IS_ERR(pdata->next_bridge)) {
- DRM_ERROR("failed to create panel bridge\n");
- return PTR_ERR(pdata->next_bridge);
- }
+ if (IS_ERR(pdata->next_bridge))
+ return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
+ "failed to create panel bridge\n");
ti_sn_bridge_parse_lanes(pdata, np);
@@ -1211,6 +1279,11 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
+ pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
+ ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
+
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
+ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
drm_bridge_add(&pdata->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 401fe61217c7..b9635abbad16 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -394,11 +394,9 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return tfp410_init(&client->dev, true);
}
-static int tfp410_i2c_remove(struct i2c_client *client)
+static void tfp410_i2c_remove(struct i2c_client *client)
{
tfp410_fini(&client->dev);
-
- return 0;
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index e5bab236b3ae..9f055d9710ea 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -390,6 +390,38 @@ void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+/**
+ * drm_dp_phy_name() - Get the name of the given DP PHY
+ * @dp_phy: The DP PHY identifier
+ *
+ * Given the @dp_phy, get a user friendly name of the DP PHY, either "DPRX" or
+ * "LTTPR <N>", or "<INVALID DP PHY>" on errors. The returned string is always
+ * non-NULL and valid.
+ *
+ * Returns: Name of the DP PHY.
+ */
+const char *drm_dp_phy_name(enum drm_dp_phy dp_phy)
+{
+ static const char * const phy_names[] = {
+ [DP_PHY_DPRX] = "DPRX",
+ [DP_PHY_LTTPR1] = "LTTPR 1",
+ [DP_PHY_LTTPR2] = "LTTPR 2",
+ [DP_PHY_LTTPR3] = "LTTPR 3",
+ [DP_PHY_LTTPR4] = "LTTPR 4",
+ [DP_PHY_LTTPR5] = "LTTPR 5",
+ [DP_PHY_LTTPR6] = "LTTPR 6",
+ [DP_PHY_LTTPR7] = "LTTPR 7",
+ [DP_PHY_LTTPR8] = "LTTPR 8",
+ };
+
+ if (dp_phy < 0 || dp_phy >= ARRAY_SIZE(phy_names) ||
+ WARN_ON(!phy_names[dp_phy]))
+ return "<INVALID DP PHY>";
+
+ return phy_names[dp_phy];
+}
+EXPORT_SYMBOL(drm_dp_phy_name);
+
void drm_dp_lttpr_link_train_clock_recovery_delay(void)
{
usleep_range(100, 200);
@@ -1597,7 +1629,7 @@ static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
/*
* Calculate the length of the i2c transfer in usec, assuming
- * the i2c bus speed is as specified. Gives the the "worst"
+ * the i2c bus speed is as specified. Gives the "worst"
* case estimate, ie. successful while as long as possible.
* Doesn't account the "MOT" bit, and instead assumes each
* message includes a START, ADDRESS and STOP. Neither does it
@@ -2638,17 +2670,8 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data, u8 dp_rev)
{
int err, i;
- u8 link_config[2];
u8 test_pattern;
- link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
- link_config[1] = data->num_lanes;
- if (data->enhanced_frame_cap)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
- if (err < 0)
- return err;
-
test_pattern = data->phy_pattern;
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 57e65423e50d..ecd22c038c8c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -68,8 +68,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id,
- struct drm_dp_payload *payload);
+ int id, u8 start_slot, u8 num_slots);
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
@@ -1235,57 +1234,6 @@ build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
return 0;
}
-static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_vcpi *vcpi)
-{
- int ret, vcpi_ret;
-
- mutex_lock(&mgr->payload_lock);
- ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
- if (ret > mgr->max_payloads) {
- ret = -EINVAL;
- drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
- goto out_unlock;
- }
-
- vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
- if (vcpi_ret > mgr->max_payloads) {
- ret = -EINVAL;
- drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
- goto out_unlock;
- }
-
- set_bit(ret, &mgr->payload_mask);
- set_bit(vcpi_ret, &mgr->vcpi_mask);
- vcpi->vcpi = vcpi_ret + 1;
- mgr->proposed_vcpis[ret - 1] = vcpi;
-out_unlock:
- mutex_unlock(&mgr->payload_lock);
- return ret;
-}
-
-static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
- int vcpi)
-{
- int i;
-
- if (vcpi == 0)
- return;
-
- mutex_lock(&mgr->payload_lock);
- drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
- clear_bit(vcpi - 1, &mgr->vcpi_mask);
-
- for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i] &&
- mgr->proposed_vcpis[i]->vcpi == vcpi) {
- mgr->proposed_vcpis[i] = NULL;
- clear_bit(i + 1, &mgr->payload_mask);
- }
- }
- mutex_unlock(&mgr->payload_lock);
-}
-
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
@@ -1738,6 +1686,20 @@ drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
#define save_port_topology_ref(port, type)
#endif
+struct drm_dp_mst_atomic_payload *
+drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_atomic_payload *payload;
+
+ list_for_each_entry(payload, &state->payloads, next)
+ if (payload->port == port)
+ return payload;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb =
@@ -2496,7 +2458,7 @@ fail_put:
return ret;
}
-static void
+static int
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_connection_status_notify *conn_stat)
{
@@ -2509,7 +2471,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
- return;
+ return 0;
if (port->connector) {
if (!port->input && conn_stat->input_port) {
@@ -2562,8 +2524,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
out:
drm_dp_mst_topology_put_port(port);
- if (dowork)
- queue_work(system_long_wq, &mstb->mgr->work);
+ return dowork;
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -3240,6 +3201,8 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status)
{
+ struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_sideband_msg_tx *txmsg;
u8 nonce[7];
int ret;
@@ -3256,6 +3219,10 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
get_random_bytes(nonce, sizeof(nonce));
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ state = to_drm_dp_mst_topology_state(mgr->base.state);
+ payload = drm_atomic_get_mst_payload_state(state, port);
+
/*
* "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
* transaction at the MST Branch device directly connected to the
@@ -3263,7 +3230,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
*/
txmsg->dst = mgr->mst_primary;
- build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
+ build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3280,6 +3247,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
out:
+ drm_modeset_unlock(&mgr->base.lock);
drm_dp_mst_topology_put_port(port);
out_get_port:
kfree(txmsg);
@@ -3288,238 +3256,162 @@ out_get_port:
EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
- int id,
- struct drm_dp_payload *payload)
+ struct drm_dp_mst_atomic_payload *payload)
{
- int ret;
-
- ret = drm_dp_dpcd_write_payload(mgr, id, payload);
- if (ret < 0) {
- payload->payload_state = 0;
- return ret;
- }
- payload->payload_state = DP_PAYLOAD_LOCAL;
- return 0;
+ return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ payload->time_slots);
}
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- int id,
- struct drm_dp_payload *payload)
+ struct drm_dp_mst_atomic_payload *payload)
{
int ret;
+ struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
- ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
- if (ret < 0)
- return ret;
- payload->payload_state = DP_PAYLOAD_REMOTE;
+ if (!port)
+ return -EIO;
+
+ ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- int id,
- struct drm_dp_payload *payload)
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload)
{
drm_dbg_kms(mgr->dev, "\n");
+
/* it's okay for these to fail */
- if (port) {
- drm_dp_payload_send_msg(mgr, port, id, 0);
- }
+ drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
+ drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
- drm_dp_dpcd_write_payload(mgr, id, payload);
- payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
- return 0;
-}
-
-static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
- int id,
- struct drm_dp_payload *payload)
-{
- payload->payload_state = 0;
return 0;
}
/**
- * drm_dp_update_payload_part1() - Execute payload update part 1
- * @mgr: manager to use.
- * @start_slot: this is the cur slot
- *
- * NOTE: start_slot is a temporary workaround for non-atomic drivers,
- * this will be removed when non-atomic mst helpers are moved out of the helper
+ * drm_dp_add_payload_part1() - Execute payload update part 1
+ * @mgr: Manager to use.
+ * @mst_state: The MST atomic state
+ * @payload: The payload to write
*
- * This iterates over all proposed virtual channels, and tries to
- * allocate space in the link for them. For 0->slots transitions,
- * this step just writes the VCPI to the MST device. For slots->0
- * transitions, this writes the updated VCPIs and removes the
- * remote VC payloads.
+ * Determines the starting time slot for the given payload, and programs the VCPI for this payload
+ * into hardware. After calling this, the driver should generate ACT and payload packets.
*
- * after calling this the driver should generate ACT and payload
- * packets.
+ * Returns: 0 on success, error code on failure. In the event that this fails,
+ * @payload.vc_start_slot will also be set to -1.
*/
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
+int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload)
{
- struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
- int i, j;
- int cur_slots = start_slot;
- bool skip;
+ int ret;
- mutex_lock(&mgr->payload_lock);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
- struct drm_dp_payload *payload = &mgr->payloads[i];
- bool put_port = false;
+ port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
+ if (!port)
+ return 0;
- /* solve the current payloads - compare to the hw ones
- - update the hw view */
- req_payload.start_slot = cur_slots;
- if (vcpi) {
- port = container_of(vcpi, struct drm_dp_mst_port,
- vcpi);
+ if (mgr->payload_count == 0)
+ mgr->next_start_slot = mst_state->start_slot;
- mutex_lock(&mgr->lock);
- skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
- mutex_unlock(&mgr->lock);
+ payload->vc_start_slot = mgr->next_start_slot;
- if (skip) {
- drm_dbg_kms(mgr->dev,
- "Virtual channel %d is not in current topology\n",
- i);
- continue;
- }
- /* Validated ports don't matter if we're releasing
- * VCPI
- */
- if (vcpi->num_slots) {
- port = drm_dp_mst_topology_get_port_validated(
- mgr, port);
- if (!port) {
- if (vcpi->num_slots == payload->num_slots) {
- cur_slots += vcpi->num_slots;
- payload->start_slot = req_payload.start_slot;
- continue;
- } else {
- drm_dbg_kms(mgr->dev,
- "Fail:set payload to invalid sink");
- mutex_unlock(&mgr->payload_lock);
- return -EINVAL;
- }
- }
- put_port = true;
- }
+ ret = drm_dp_create_payload_step1(mgr, payload);
+ drm_dp_mst_topology_put_port(port);
+ if (ret < 0) {
+ drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
+ payload->port, ret);
+ payload->vc_start_slot = -1;
+ return ret;
+ }
- req_payload.num_slots = vcpi->num_slots;
- req_payload.vcpi = vcpi->vcpi;
- } else {
- port = NULL;
- req_payload.num_slots = 0;
- }
+ mgr->payload_count++;
+ mgr->next_start_slot += payload->time_slots;
- payload->start_slot = req_payload.start_slot;
- /* work out what is required to happen with this payload */
- if (payload->num_slots != req_payload.num_slots) {
-
- /* need to push an update for this payload */
- if (req_payload.num_slots) {
- drm_dp_create_payload_step1(mgr, vcpi->vcpi,
- &req_payload);
- payload->num_slots = req_payload.num_slots;
- payload->vcpi = req_payload.vcpi;
-
- } else if (payload->num_slots) {
- payload->num_slots = 0;
- drm_dp_destroy_payload_step1(mgr, port,
- payload->vcpi,
- payload);
- req_payload.payload_state =
- payload->payload_state;
- payload->start_slot = 0;
- }
- payload->payload_state = req_payload.payload_state;
- }
- cur_slots += req_payload.num_slots;
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_add_payload_part1);
- if (put_port)
- drm_dp_mst_topology_put_port(port);
- }
+/**
+ * drm_dp_remove_payload() - Remove an MST payload
+ * @mgr: Manager to use.
+ * @mst_state: The MST atomic state
+ * @payload: The payload to write
+ *
+ * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
+ * the starting time slots of all other payloads which would have been shifted towards the start of
+ * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
+ */
+void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload)
+{
+ struct drm_dp_mst_atomic_payload *pos;
+ bool send_remove = false;
- for (i = 0; i < mgr->max_payloads; /* do nothing */) {
- if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
- i++;
- continue;
- }
+ /* We failed to make the payload, so nothing to do */
+ if (payload->vc_start_slot == -1)
+ return;
- drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
- for (j = i; j < mgr->max_payloads - 1; j++) {
- mgr->payloads[j] = mgr->payloads[j + 1];
- mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
+ mutex_lock(&mgr->lock);
+ send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
- if (mgr->proposed_vcpis[j] &&
- mgr->proposed_vcpis[j]->num_slots) {
- set_bit(j + 1, &mgr->payload_mask);
- } else {
- clear_bit(j + 1, &mgr->payload_mask);
- }
- }
+ if (send_remove)
+ drm_dp_destroy_payload_step1(mgr, mst_state, payload);
+ else
+ drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
+ payload->vcpi);
- memset(&mgr->payloads[mgr->max_payloads - 1], 0,
- sizeof(struct drm_dp_payload));
- mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
- clear_bit(mgr->max_payloads, &mgr->payload_mask);
+ list_for_each_entry(pos, &mst_state->payloads, next) {
+ if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
+ pos->vc_start_slot -= payload->time_slots;
}
- mutex_unlock(&mgr->payload_lock);
+ payload->vc_start_slot = -1;
- return 0;
+ mgr->payload_count--;
+ mgr->next_start_slot -= payload->time_slots;
}
-EXPORT_SYMBOL(drm_dp_update_payload_part1);
+EXPORT_SYMBOL(drm_dp_remove_payload);
/**
- * drm_dp_update_payload_part2() - Execute payload update part 2
- * @mgr: manager to use.
+ * drm_dp_add_payload_part2() - Execute payload update part 2
+ * @mgr: Manager to use.
+ * @state: The global atomic state
+ * @payload: The payload to update
+ *
+ * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
+ * function will send the sideband messages to finish allocating this payload.
*
- * This iterates over all proposed virtual channels, and tries to
- * allocate space in the link for them. For 0->slots transitions,
- * this step writes the remote VC payload commands. For slots->0
- * this just resets some internal state.
+ * Returns: 0 on success, negative error code on failure.
*/
-int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_atomic_state *state,
+ struct drm_dp_mst_atomic_payload *payload)
{
- struct drm_dp_mst_port *port;
- int i;
int ret = 0;
- bool skip;
- mutex_lock(&mgr->payload_lock);
- for (i = 0; i < mgr->max_payloads; i++) {
-
- if (!mgr->proposed_vcpis[i])
- continue;
-
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-
- mutex_lock(&mgr->lock);
- skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
- mutex_unlock(&mgr->lock);
-
- if (skip)
- continue;
+ /* Skip failed payloads */
+ if (payload->vc_start_slot == -1) {
+ drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ payload->port->connector->name);
+ return -EIO;
+ }
- drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
- if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
- ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
- } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
- ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
- }
- if (ret) {
- mutex_unlock(&mgr->payload_lock);
- return ret;
- }
+ ret = drm_dp_create_payload_step2(mgr, payload);
+ if (ret < 0) {
+ if (!payload->delete)
+ drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
+ payload->port, ret);
+ else
+ drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
+ payload->port, ret);
}
- mutex_unlock(&mgr->payload_lock);
- return 0;
+
+ return ret;
}
-EXPORT_SYMBOL(drm_dp_update_payload_part2);
+EXPORT_SYMBOL(drm_dp_add_payload_part2);
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
@@ -3699,7 +3591,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int ret = 0;
struct drm_dp_mst_branch *mstb = NULL;
- mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -3707,10 +3598,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
- struct drm_dp_payload reset_pay;
- int lane_count;
- int link_rate;
-
WARN_ON(mgr->mst_primary);
/* get dpcd info */
@@ -3721,16 +3608,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
- link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
- link_rate,
- lane_count);
- if (mgr->pbn_div == 0) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
/* add initial branch device at LCT 1 */
mstb = drm_dp_add_mst_branch_device(1, NULL);
if (mstb == NULL) {
@@ -3750,9 +3627,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
if (ret < 0)
goto out_unlock;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ /* Write reset payload */
+ drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
queue_work(system_long_wq, &mgr->work);
@@ -3764,19 +3640,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- memset(mgr->payloads, 0,
- mgr->max_payloads * sizeof(mgr->payloads[0]));
- memset(mgr->proposed_vcpis, 0,
- mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
- mgr->payload_mask = 0;
- set_bit(0, &mgr->payload_mask);
- mgr->vcpi_mask = 0;
mgr->payload_id_table_cleared = false;
}
out_unlock:
mutex_unlock(&mgr->lock);
- mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
@@ -4047,7 +3915,7 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb = NULL;
struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
- bool hotplug = false;
+ bool hotplug = false, dowork = false;
if (hdr->broadcast) {
const u8 *guid = NULL;
@@ -4070,11 +3938,14 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
hotplug = true;
}
drm_dp_mst_topology_put_mstb(mstb);
+
+ if (dowork)
+ queue_work(system_long_wq, &mgr->work);
return hotplug;
}
@@ -4293,341 +4164,352 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
EXPORT_SYMBOL(drm_dp_mst_get_edid);
/**
- * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
- * @mgr: manager to use
- * @pbn: payload bandwidth to convert into slots.
- *
- * Calculate the number of VCPI slots that will be required for the given PBN
- * value. This function is deprecated, and should not be used in atomic
- * drivers.
- *
- * RETURNS:
- * The total slots required for this port, or error.
- */
-int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
- int pbn)
-{
- int num_slots;
-
- num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
-
- /* max. time slots - one slot for MTP header */
- if (num_slots > 63)
- return -ENOSPC;
- return num_slots;
-}
-EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
-
-static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_vcpi *vcpi, int pbn, int slots)
-{
- int ret;
-
- vcpi->pbn = pbn;
- vcpi->aligned_pbn = slots * mgr->pbn_div;
- vcpi->num_slots = slots;
-
- ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-/**
- * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
+ * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
* @state: global atomic state
* @mgr: MST topology manager for the port
- * @port: port to find vcpi slots for
+ * @port: port to find time slots for
* @pbn: bandwidth required for the mode in PBN
- * @pbn_div: divider for DSC mode that takes FEC into account
*
- * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
- * may have had. Any atomic drivers which support MST must call this function
- * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
- * current VCPI allocation for the new state, but only when
- * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
- * to ensure compatibility with userspace applications that still use the
- * legacy modesetting UAPI.
+ * Allocates time slots to @port, replacing any previous time slot allocations it may
+ * have had. Any atomic drivers which support MST must call this function in
+ * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
+ * change the current time slot allocation for the new state, and ensure the MST
+ * atomic state is added whenever the state of payloads in the topology changes.
*
* Allocations set by this function are not checked against the bandwidth
* restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
*
* Additionally, it is OK to call this function multiple times on the same
* @port as needed. It is not OK however, to call this function and
- * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
+ * drm_dp_atomic_release_time_slots() in the same atomic check phase.
*
* See also:
- * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_atomic_release_time_slots()
* drm_dp_mst_atomic_check()
*
* Returns:
* Total slots in the atomic state assigned for this port, or a negative error
* code if the port no longer exists
*/
-int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn,
- int pbn_div)
+ struct drm_dp_mst_port *port, int pbn)
{
struct drm_dp_mst_topology_state *topology_state;
- struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, prev_bw, req_slots;
+ struct drm_dp_mst_atomic_payload *payload = NULL;
+ struct drm_connector_state *conn_state;
+ int prev_slots = 0, prev_bw = 0, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- /* Find the current allocation for this port, if any */
- list_for_each_entry(pos, &topology_state->vcpis, next) {
- if (pos->port == port) {
- vcpi = pos;
- prev_slots = vcpi->vcpi;
- prev_bw = vcpi->pbn;
+ conn_state = drm_atomic_get_new_connector_state(state, port->connector);
+ topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
- /*
- * This should never happen, unless the driver tries
- * releasing and allocating the same VCPI allocation,
- * which is an error
- */
- if (WARN_ON(!prev_slots)) {
- drm_err(mgr->dev,
- "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
- port);
- return -EINVAL;
- }
+ /* Find the current allocation for this port, if any */
+ payload = drm_atomic_get_mst_payload_state(topology_state, port);
+ if (payload) {
+ prev_slots = payload->time_slots;
+ prev_bw = payload->pbn;
- break;
+ /*
+ * This should never happen, unless the driver tries
+ * releasing and allocating the same timeslot allocation,
+ * which is an error
+ */
+ if (drm_WARN_ON(mgr->dev, payload->delete)) {
+ drm_err(mgr->dev,
+ "cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
+ port);
+ return -EINVAL;
}
}
- if (!vcpi) {
- prev_slots = 0;
- prev_bw = 0;
- }
-
- if (pbn_div <= 0)
- pbn_div = mgr->pbn_div;
- req_slots = DIV_ROUND_UP(pbn, pbn_div);
+ req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
- drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
+ drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_slots, req_slots);
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_bw, pbn);
- /* Add the new allocation to the state */
- if (!vcpi) {
- vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
- if (!vcpi)
+ /* Add the new allocation to the state, note the VCPI isn't assigned until the end */
+ if (!payload) {
+ payload = kzalloc(sizeof(*payload), GFP_KERNEL);
+ if (!payload)
return -ENOMEM;
drm_dp_mst_get_port_malloc(port);
- vcpi->port = port;
- list_add(&vcpi->next, &topology_state->vcpis);
+ payload->port = port;
+ payload->vc_start_slot = -1;
+ list_add(&payload->next, &topology_state->payloads);
}
- vcpi->vcpi = req_slots;
- vcpi->pbn = pbn;
+ payload->time_slots = req_slots;
+ payload->pbn = pbn;
return req_slots;
}
-EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
/**
- * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
+ * drm_dp_atomic_release_time_slots() - Release allocated time slots
* @state: global atomic state
* @mgr: MST topology manager for the port
- * @port: The port to release the VCPI slots from
+ * @port: The port to release the time slots from
*
- * Releases any VCPI slots that have been allocated to a port in the atomic
- * state. Any atomic drivers which support MST must call this function in
- * their &drm_connector_helper_funcs.atomic_check() callback when the
- * connector will no longer have VCPI allocated (e.g. because its CRTC was
- * removed) when it had VCPI allocated in the previous atomic state.
+ * Releases any time slots that have been allocated to a port in the atomic
+ * state. Any atomic drivers which support MST must call this function
+ * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
+ * This helper will check whether time slots would be released by the new state and
+ * respond accordingly, along with ensuring the MST state is always added to the
+ * atomic state whenever a new state would modify the state of payloads on the
+ * topology.
*
* It is OK to call this even if @port has been removed from the system.
* Additionally, it is OK to call this function multiple times on the same
* @port as needed. It is not OK however, to call this function and
- * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
+ * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
* phase.
*
* See also:
- * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_atomic_find_time_slots()
* drm_dp_mst_atomic_check()
*
* Returns:
- * 0 if all slots for this port were added back to
- * &drm_dp_mst_topology_state.avail_slots or negative error code
+ * 0 on success, negative error code otherwise
*/
-int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
struct drm_dp_mst_topology_state *topology_state;
- struct drm_dp_vcpi_allocation *pos;
- bool found = false;
+ struct drm_dp_mst_atomic_payload *payload;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ bool update_payload = true;
+
+ old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
+ if (!old_conn_state->crtc)
+ return 0;
+
+ /* If the CRTC isn't disabled by this state, don't release it's payload */
+ new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
+ if (new_conn_state->crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+
+ /* No modeset means no payload changes, so it's safe to not pull in the MST state */
+ if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ update_payload = false;
+ }
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- list_for_each_entry(pos, &topology_state->vcpis, next) {
- if (pos->port == port) {
- found = true;
- break;
- }
- }
- if (WARN_ON(!found)) {
- drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n",
+ topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
+ if (!update_payload)
+ return 0;
+
+ payload = drm_atomic_get_mst_payload_state(topology_state, port);
+ if (WARN_ON(!payload)) {
+ drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
port, &topology_state->base);
return -EINVAL;
}
- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
- if (pos->vcpi) {
+ if (new_conn_state->crtc)
+ return 0;
+
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
+ if (!payload->delete) {
drm_dp_mst_put_port_malloc(port);
- pos->vcpi = 0;
- pos->pbn = 0;
+ payload->pbn = 0;
+ payload->delete = true;
+ topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
}
return 0;
}
-EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
/**
- * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
- * @mst_state: mst_state to update
- * @link_encoding_cap: the ecoding format on the link
- */
-void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
-{
- if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
- mst_state->total_avail_slots = 64;
- mst_state->start_slot = 0;
- } else {
- mst_state->total_avail_slots = 63;
- mst_state->start_slot = 1;
- }
-
- DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
- (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
- mst_state);
-}
-EXPORT_SYMBOL(drm_dp_mst_update_slots);
-
-/**
- * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
- * @mgr: manager for this port
- * @port: port to allocate a virtual channel for.
- * @pbn: payload bandwidth number to request
- * @slots: returned number of slots for this PBN.
+ * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
+ * @state: global atomic state
+ *
+ * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
+ * currently assigned to an MST topology. Drivers must call this hook from their
+ * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
+ *
+ * Returns:
+ * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
*/
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn, int slots)
+int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
{
- int ret;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, j, commit_idx, num_commit_deps;
- if (slots < 0)
- return false;
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ if (!mst_state->pending_crtc_mask)
+ continue;
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return false;
+ num_commit_deps = hweight32(mst_state->pending_crtc_mask);
+ mst_state->commit_deps = kmalloc_array(num_commit_deps,
+ sizeof(*mst_state->commit_deps), GFP_KERNEL);
+ if (!mst_state->commit_deps)
+ return -ENOMEM;
+ mst_state->num_commit_deps = num_commit_deps;
- if (port->vcpi.vcpi > 0) {
- drm_dbg_kms(mgr->dev,
- "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
- port->vcpi.vcpi, port->vcpi.pbn, pbn);
- if (pbn == port->vcpi.pbn) {
- drm_dp_mst_topology_put_port(port);
- return true;
+ commit_idx = 0;
+ for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
+ if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
+ mst_state->commit_deps[commit_idx++] =
+ drm_crtc_commit_get(crtc_state->commit);
+ }
}
}
- ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
- if (ret) {
- drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
-
- /* Keep port allocated until its payload has been removed */
- drm_dp_mst_get_port_malloc(port);
- drm_dp_mst_topology_put_port(port);
- return true;
-out:
- return false;
+ return 0;
}
-EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
-int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+/**
+ * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
+ * prepare new MST state for commit
+ * @state: global atomic state
+ *
+ * Goes through any MST topologies in this atomic state, and waits for any pending commits which
+ * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
+ * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
+ * with eachother by forcing them to be executed sequentially in situations where the only resources
+ * the modeset objects in these commits share are an MST topology.
+ *
+ * This function also prepares the new MST state for commit by performing some state preparation
+ * which can't be done until this point, such as reading back the final VC start slots (which are
+ * determined at commit-time) from the previous state.
+ *
+ * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
+ * or whatever their equivalent of that is.
+ */
+void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
{
- int slots = 0;
+ struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
+ int i, j, ret;
+
+ for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
+ for (j = 0; j < old_mst_state->num_commit_deps; j++) {
+ ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
+ if (ret < 0)
+ drm_err(state->dev, "Failed to wait for %s: %d\n",
+ old_mst_state->commit_deps[j]->crtc->name, ret);
+ }
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return slots;
+ /* Now that previous state is committed, it's safe to copy over the start slot
+ * assignments
+ */
+ list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
+ if (old_payload->delete)
+ continue;
- slots = port->vcpi.num_slots;
- drm_dp_mst_topology_put_port(port);
- return slots;
+ new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
+ old_payload->port);
+ new_payload->vc_start_slot = old_payload->vc_start_slot;
+ }
+ }
}
-EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
/**
- * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
+ * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
+ * in SST mode
+ * @new_conn_state: The new connector state of the &drm_connector
+ * @mgr: The MST topology manager for the &drm_connector
+ *
+ * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
+ * serialize non-blocking commits happening on the real DP connector of an MST topology switching
+ * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
+ * MST topology will never share the same &drm_encoder.
+ *
+ * This function takes care of this serialization issue, by checking a root MST connector's atomic
+ * state to determine if it is about to have a modeset - and then pulling in the MST topology state
+ * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
*
- * This just resets the number of slots for the ports VCPI for later programming.
+ * Drivers implementing MST must call this function from the
+ * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
+ * driving MST sinks.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise
*/
-void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr)
{
- /*
- * A port with VCPI will remain allocated until its VCPI is
- * released, no verified ref needed
- */
+ struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, new_conn_state->connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_dp_mst_topology_state *mst_state = NULL;
+
+ if (new_conn_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
+ }
+ }
+
+ if (old_conn_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
+ if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (!mst_state) {
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+ }
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
+ }
+ }
- port->vcpi.num_slots = 0;
+ return 0;
}
-EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
/**
- * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
- * @mgr: manager for this port
- * @port: port to deallocate vcpi for
- *
- * This can be called unconditionally, regardless of whether
- * drm_dp_mst_allocate_vcpi() succeeded or not.
+ * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
+ * @mst_state: mst_state to update
+ * @link_encoding_cap: the ecoding format on the link
*/
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port)
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
{
- bool skip;
-
- if (!port->vcpi.vcpi)
- return;
-
- mutex_lock(&mgr->lock);
- skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
- mutex_unlock(&mgr->lock);
-
- if (skip)
- return;
+ if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
+ mst_state->total_avail_slots = 64;
+ mst_state->start_slot = 0;
+ } else {
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+ }
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- port->vcpi.num_slots = 0;
- port->vcpi.pbn = 0;
- port->vcpi.aligned_pbn = 0;
- port->vcpi.vcpi = 0;
- drm_dp_mst_put_port_malloc(port);
+ DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
+ (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
+ mst_state);
}
-EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
+EXPORT_SYMBOL(drm_dp_mst_update_slots);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, struct drm_dp_payload *payload)
+ int id, u8 start_slot, u8 num_slots)
{
u8 payload_alloc[3], status;
int ret;
@@ -4637,8 +4519,8 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = id;
- payload_alloc[1] = payload->start_slot;
- payload_alloc[2] = payload->num_slots;
+ payload_alloc[1] = start_slot;
+ payload_alloc[2] = num_slots;
ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
if (ret != 3) {
@@ -4853,8 +4735,9 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr)
{
- int i;
- struct drm_dp_mst_port *port;
+ struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_atomic_payload *payload;
+ int i, ret;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
@@ -4863,36 +4746,35 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
/* dump VCPIs */
mutex_unlock(&mgr->lock);
- mutex_lock(&mgr->payload_lock);
- seq_printf(m, "\n*** VCPI Info ***\n");
- seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
+ ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
+ if (ret < 0)
+ return;
+
+ state = to_drm_dp_mst_topology_state(mgr->base.state);
+ seq_printf(m, "\n*** Atomic state info ***\n");
+ seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
+ state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
- seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
+ seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i]) {
+ list_for_each_entry(payload, &state->payloads, next) {
char name[14];
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
- fetch_monitor_name(mgr, port, name, sizeof(name));
- seq_printf(m, "%10d%10d%10d%10d%20s\n",
+ if (payload->vcpi != i || payload->delete)
+ continue;
+
+ fetch_monitor_name(mgr, payload->port, name, sizeof(name));
+ seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
i,
- port->port_num,
- port->vcpi.vcpi,
- port->vcpi.num_slots,
+ payload->port->port_num,
+ payload->vcpi,
+ payload->vc_start_slot,
+ payload->vc_start_slot + payload->time_slots - 1,
+ payload->pbn,
+ payload->dsc_enabled ? "Y" : "N",
(*name != 0) ? name : "Unknown");
- } else
- seq_printf(m, "%6d - Unused\n", i);
- }
- seq_printf(m, "\n*** Payload Info ***\n");
- seq_printf(m, "| idx | state | start slot | # slots |\n");
- for (i = 0; i < mgr->max_payloads; i++) {
- seq_printf(m, "%10d%10d%15d%10d\n",
- i,
- mgr->payloads[i].payload_state,
- mgr->payloads[i].start_slot,
- mgr->payloads[i].num_slots);
+ }
}
- mutex_unlock(&mgr->payload_lock);
seq_printf(m, "\n*** DPCD Info ***\n");
mutex_lock(&mgr->lock);
@@ -4907,14 +4789,14 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret) {
+ if (ret != 2) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret) {
+ if (ret != 1) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
@@ -4922,7 +4804,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret) {
+ if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -4938,7 +4820,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
out:
mutex_unlock(&mgr->lock);
-
+ drm_modeset_unlock(&mgr->base.lock);
}
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
@@ -5060,7 +4942,7 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
struct drm_dp_mst_topology_state *state, *old_state =
to_dp_mst_topology_state(obj->state);
- struct drm_dp_vcpi_allocation *pos, *vcpi;
+ struct drm_dp_mst_atomic_payload *pos, *payload;
state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
if (!state)
@@ -5068,25 +4950,28 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- INIT_LIST_HEAD(&state->vcpis);
+ INIT_LIST_HEAD(&state->payloads);
+ state->commit_deps = NULL;
+ state->num_commit_deps = 0;
+ state->pending_crtc_mask = 0;
- list_for_each_entry(pos, &old_state->vcpis, next) {
- /* Prune leftover freed VCPI allocations */
- if (!pos->vcpi)
+ list_for_each_entry(pos, &old_state->payloads, next) {
+ /* Prune leftover freed timeslot allocations */
+ if (pos->delete)
continue;
- vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
- if (!vcpi)
+ payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
+ if (!payload)
goto fail;
- drm_dp_mst_get_port_malloc(vcpi->port);
- list_add(&vcpi->next, &state->vcpis);
+ drm_dp_mst_get_port_malloc(payload->port);
+ list_add(&payload->next, &state->payloads);
}
return &state->base;
fail:
- list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
+ list_for_each_entry_safe(pos, payload, &state->payloads, next) {
drm_dp_mst_put_port_malloc(pos->port);
kfree(pos);
}
@@ -5100,15 +4985,20 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
- struct drm_dp_vcpi_allocation *pos, *tmp;
+ struct drm_dp_mst_atomic_payload *pos, *tmp;
+ int i;
- list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
- /* We only keep references to ports with non-zero VCPIs */
- if (pos->vcpi)
+ list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
+ /* We only keep references to ports with active payloads */
+ if (!pos->delete)
drm_dp_mst_put_port_malloc(pos->port);
kfree(pos);
}
+ for (i = 0; i < mst_state->num_commit_deps; i++)
+ drm_crtc_commit_put(mst_state->commit_deps[i]);
+
+ kfree(mst_state->commit_deps);
kfree(mst_state);
}
@@ -5135,7 +5025,7 @@ static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_topology_state *state)
{
- struct drm_dp_vcpi_allocation *vcpi;
+ struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_port *port;
int pbn_used = 0, ret;
bool found = false;
@@ -5143,9 +5033,9 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
/* Check that we have at least one port in our state that's downstream
* of this branch, otherwise we can skip this branch
*/
- list_for_each_entry(vcpi, &state->vcpis, next) {
- if (!vcpi->pbn ||
- !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
+ list_for_each_entry(payload, &state->payloads, next) {
+ if (!payload->pbn ||
+ !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
continue;
found = true;
@@ -5176,25 +5066,15 @@ static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state)
{
- struct drm_dp_vcpi_allocation *vcpi;
+ struct drm_dp_mst_atomic_payload *payload;
int pbn_used = 0;
if (port->pdt == DP_PEER_DEVICE_NONE)
return 0;
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
- bool found = false;
-
- list_for_each_entry(vcpi, &state->vcpis, next) {
- if (vcpi->port != port)
- continue;
- if (!vcpi->pbn)
- return 0;
-
- found = true;
- break;
- }
- if (!found)
+ payload = drm_atomic_get_mst_payload_state(state, port);
+ if (!payload)
return 0;
/*
@@ -5208,7 +5088,7 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
return -EINVAL;
}
- pbn_used = vcpi->pbn;
+ pbn_used = payload->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
state);
@@ -5230,28 +5110,28 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
}
static inline int
-drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
{
- struct drm_dp_vcpi_allocation *vcpi;
+ struct drm_dp_mst_atomic_payload *payload;
int avail_slots = mst_state->total_avail_slots, payload_count = 0;
- list_for_each_entry(vcpi, &mst_state->vcpis, next) {
- /* Releasing VCPI is always OK-even if the port is gone */
- if (!vcpi->vcpi) {
- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n",
- vcpi->port);
+ list_for_each_entry(payload, &mst_state->payloads, next) {
+ /* Releasing payloads is always OK-even if the port is gone */
+ if (payload->delete) {
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
+ payload->port);
continue;
}
- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n",
- vcpi->port, vcpi->vcpi);
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
+ payload->port, payload->time_slots);
- avail_slots -= vcpi->vcpi;
+ avail_slots -= payload->time_slots;
if (avail_slots < 0) {
drm_dbg_atomic(mgr->dev,
- "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
- vcpi->port, mst_state, avail_slots + vcpi->vcpi);
+ "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
+ payload->port, mst_state, avail_slots + payload->time_slots);
return -ENOSPC;
}
@@ -5261,9 +5141,22 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
mgr, mst_state, mgr->max_payloads);
return -EINVAL;
}
+
+ /* Assign a VCPI */
+ if (!payload->vcpi) {
+ payload->vcpi = ffz(mst_state->payload_mask) + 1;
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
+ payload->port, payload->vcpi);
+ mst_state->payload_mask |= BIT(payload->vcpi - 1);
+ }
}
- drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
- mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
+
+ if (!payload_count)
+ mst_state->pbn_div = 0;
+
+ drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
+ mgr, mst_state, mst_state->pbn_div, avail_slots,
+ mst_state->total_avail_slots - avail_slots);
return 0;
}
@@ -5284,7 +5177,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_vcpi_allocation *pos;
+ struct drm_dp_mst_atomic_payload *pos;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_crtc *crtc;
@@ -5295,7 +5188,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (IS_ERR(mst_state))
return -EINVAL;
- list_for_each_entry(pos, &mst_state->vcpis, next) {
+ list_for_each_entry(pos, &mst_state->payloads, next) {
connector = pos->port->connector;
@@ -5334,7 +5227,6 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
* @state: Pointer to the new drm_atomic_state
* @port: Pointer to the affected MST Port
* @pbn: Newly recalculated bw required for link with DSC enabled
- * @pbn_div: Divider to calculate correct number of pbn per slot
* @enable: Boolean flag to enable or disable DSC on the port
*
* This function enables DSC on the given Port
@@ -5345,54 +5237,46 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
*/
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
struct drm_dp_mst_port *port,
- int pbn, int pbn_div,
- bool enable)
+ int pbn, bool enable)
{
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_vcpi_allocation *pos;
- bool found = false;
- int vcpi = 0;
+ struct drm_dp_mst_atomic_payload *payload;
+ int time_slots = 0;
mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
-
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- list_for_each_entry(pos, &mst_state->vcpis, next) {
- if (pos->port == port) {
- found = true;
- break;
- }
- }
-
- if (!found) {
+ payload = drm_atomic_get_mst_payload_state(mst_state, port);
+ if (!payload) {
drm_dbg_atomic(state->dev,
- "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+ "[MST PORT:%p] Couldn't find payload in mst state %p\n",
port, mst_state);
return -EINVAL;
}
- if (pos->dsc_enabled == enable) {
+ if (payload->dsc_enabled == enable) {
drm_dbg_atomic(state->dev,
- "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
- port, enable, pos->vcpi);
- vcpi = pos->vcpi;
+ "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
+ port, enable, payload->time_slots);
+ time_slots = payload->time_slots;
}
if (enable) {
- vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+ time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
drm_dbg_atomic(state->dev,
- "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
- port, vcpi);
- if (vcpi < 0)
+ "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
+ port, time_slots);
+ if (time_slots < 0)
return -EINVAL;
}
- pos->dsc_enabled = enable;
+ payload->dsc_enabled = enable;
- return vcpi;
+ return time_slots;
}
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+
/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
@@ -5400,15 +5284,15 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
*
* Checks the given topology state for an atomic update to ensure that it's
* valid. This includes checking whether there's enough bandwidth to support
- * the new VCPI allocations in the atomic update.
+ * the new timeslot allocations in the atomic update.
*
* Any atomic drivers supporting DP MST must make sure to call this after
* checking the rest of their state in their
* &drm_mode_config_funcs.atomic_check() callback.
*
* See also:
- * drm_dp_atomic_find_vcpi_slots()
- * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_atomic_find_time_slots()
+ * drm_dp_atomic_release_time_slots()
*
* Returns:
*
@@ -5424,7 +5308,7 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
if (!mgr->mst_state)
continue;
- ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+ ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
if (ret)
break;
@@ -5450,7 +5334,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
/**
* drm_atomic_get_mst_topology_state: get MST topology state
- *
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
@@ -5470,14 +5353,37 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
/**
+ * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+ * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+ * state vtable so that the private object state returned is that of a MST
+ * topology object.
+ *
+ * Returns:
+ *
+ * The MST topology state, or NULL if there's no topology state for this MST mgr
+ * in the global atomic state
+ */
+struct drm_dp_mst_topology_state *
+drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_private_state *priv_state =
+ drm_atomic_get_new_private_obj_state(state, &mgr->base);
+
+ return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+
+/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
* @dev: device providing this structure - for i2c addition.
* @aux: DP helper aux channel to talk to this device
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
- * @max_lane_count: maximum number of lanes this GPU supports
- * @max_link_rate: maximum link rate per lane this GPU supports in kHz
* @conn_base_id: the connector object ID the MST device is connected to.
*
* Return 0 for success, or negative error code on failure
@@ -5485,14 +5391,12 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes, int max_payloads,
- int max_lane_count, int max_link_rate,
int conn_base_id)
{
struct drm_dp_mst_topology_state *mst_state;
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
- mutex_init(&mgr->payload_lock);
mutex_init(&mgr->delayed_destroy_lock);
mutex_init(&mgr->up_req_lock);
mutex_init(&mgr->probe_lock);
@@ -5522,19 +5426,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mgr->aux = aux;
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
- mgr->max_lane_count = max_lane_count;
- mgr->max_link_rate = max_link_rate;
mgr->conn_base_id = conn_base_id;
- if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
- max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
- return -EINVAL;
- mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
- if (!mgr->payloads)
- return -ENOMEM;
- mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
- if (!mgr->proposed_vcpis)
- return -ENOMEM;
- set_bit(0, &mgr->payload_mask);
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
@@ -5544,7 +5436,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mst_state->start_slot = 1;
mst_state->mgr = mgr;
- INIT_LIST_HEAD(&mst_state->vcpis);
+ INIT_LIST_HEAD(&mst_state->payloads);
drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base,
@@ -5567,19 +5459,12 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
destroy_workqueue(mgr->delayed_destroy_wq);
mgr->delayed_destroy_wq = NULL;
}
- mutex_lock(&mgr->payload_lock);
- kfree(mgr->payloads);
- mgr->payloads = NULL;
- kfree(mgr->proposed_vcpis);
- mgr->proposed_vcpis = NULL;
- mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
mutex_destroy(&mgr->delayed_destroy_lock);
- mutex_destroy(&mgr->payload_lock);
mutex_destroy(&mgr->qlock);
mutex_destroy(&mgr->lock);
mutex_destroy(&mgr->up_req_lock);
@@ -5908,8 +5793,10 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* Enpoint decompression with DP-to-DP peer device */
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE) &&
- (upstream_dsc & 0x2) /* DSC passthrough */)
+ (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
+ port->passthrough_aux = &immediate_upstream_port->aux;
return &port->aux;
+ }
/* Virtual DPCD decompression with DP-to-DP peer device */
return &immediate_upstream_port->aux;
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 81881e81ceae..c3ad4ab2b456 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -35,6 +35,19 @@
* HDMI 2.0 specification. It is a point-to-point protocol that allows the
* HDMI source and HDMI sink to exchange data. The same I2C interface that
* is used to access EDID serves as the transport mechanism for SCDC.
+ *
+ * Note: The SCDC status is going to be lost when the display is
+ * disconnected. This can happen physically when the user disconnects
+ * the cable, but also when a display is switched on (such as waking up
+ * a TV).
+ *
+ * This is further complicated by the fact that, upon a disconnection /
+ * reconnection, KMS won't change the mode on its own. This means that
+ * one can't just rely on setting the SCDC status on enable, but also
+ * has to track the connector status changes using interrupts and
+ * restore the SCDC status. The typical solution for this is to trigger an
+ * empty modeset in drm_connector_helper_funcs.detect_ctx(), like what vc4 does
+ * in vc4_hdmi_reset_link().
*/
#define SCDC_I2C_SLAVE_ADDRESS 0x54
diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
index fdb7d5c17ba1..3b8fdeeafd53 100644
--- a/drivers/gpu/drm/drm_aperture.c
+++ b/drivers/gpu/drm/drm_aperture.c
@@ -74,7 +74,7 @@
* given framebuffer memory. Ownership of the framebuffer memory is achieved
* by calling devm_aperture_acquire_from_firmware(). On success, the driver
* is the owner of the framebuffer range. The function fails if the
- * framebuffer is already by another driver. See below for an example.
+ * framebuffer is already owned by another driver. See below for an example.
*
* .. code-block:: c
*
@@ -112,7 +112,7 @@
*
* The generic driver is now subject to forced removal by other drivers. This
* only works for platform drivers that support hot unplug.
- * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al
+ * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al.
* for the registered framebuffer range, the aperture helpers call
* platform_device_unregister() and the generic driver unloads itself. It
* may not access the device's registers, framebuffer memory, ROM, etc
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
* @primary: also kick vga16fb if present
* @req_driver: requesting DRM driver
*
- * This function removes graphics device drivers which use memory range described by
+ * This function removes graphics device drivers which use the memory range described by
* @base and @size.
*
* Returns:
@@ -182,8 +182,8 @@ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
* @pdev: PCI device
* @req_driver: requesting DRM driver
*
- * This function removes graphics device drivers using memory range configured
- * for any of @pdev's memory bars. The function assumes that PCI device with
+ * This function removes graphics device drivers using the memory range configured
+ * for any of @pdev's memory bars. The function assumes that a PCI device with
* shadowed ROM drives a primary display and so kicks out vga16fb.
*
* Returns:
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8bf41aa24068..98cc3137c062 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -38,7 +38,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -703,8 +702,12 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (funcs->atomic_check)
ret = funcs->atomic_check(connector, state);
- if (ret)
+ if (ret) {
+ drm_dbg_atomic(dev,
+ "[CONNECTOR:%d:%s] driver check failed\n",
+ connector->base.id, connector->name);
return ret;
+ }
connectors_mask |= BIT(i);
}
@@ -746,8 +749,12 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (funcs->atomic_check)
ret = funcs->atomic_check(connector, state);
- if (ret)
+ if (ret) {
+ drm_dbg_atomic(dev,
+ "[CONNECTOR:%d:%s] driver check failed\n",
+ connector->base.id, connector->name);
return ret;
+ }
}
/*
@@ -779,6 +786,45 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
+ * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
+ * @encoder: encoder state to check
+ * @conn_state: connector state to check
+ *
+ * Checks if the writeback connector state is valid, and returns an error if it
+ * isn't.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int
+drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_writeback_job *wb_job = conn_state->writeback_job;
+ struct drm_property_blob *pixel_format_blob;
+ struct drm_framebuffer *fb;
+ size_t i, nformats;
+ u32 *formats;
+
+ if (!wb_job || !wb_job->fb)
+ return 0;
+
+ pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
+ nformats = pixel_format_blob->length / sizeof(u32);
+ formats = pixel_format_blob->data;
+ fb = wb_job->fb;
+
+ for (i = 0; i < nformats; i++)
+ if (fb->format->format == formats[i])
+ return 0;
+
+ drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
+
+/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
* @plane_state: plane state to check
* @crtc_state: CRTC state to check
@@ -1789,7 +1835,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
- int i, n_planes = 0;
+ int i, ret, n_planes = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
@@ -1800,19 +1846,34 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
n_planes++;
/* FIXME: we support only single plane updates for now */
- if (n_planes != 1)
+ if (n_planes != 1) {
+ drm_dbg_atomic(dev,
+ "only single plane async updates are supported\n");
return -EINVAL;
+ }
if (!new_plane_state->crtc ||
- old_plane_state->crtc != new_plane_state->crtc)
+ old_plane_state->crtc != new_plane_state->crtc) {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] async update cannot change CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
+ }
funcs = plane->helper_private;
- if (!funcs->atomic_async_update)
+ if (!funcs->atomic_async_update) {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] driver does not support async updates\n",
+ plane->base.id, plane->name);
return -EINVAL;
+ }
- if (new_plane_state->fence)
+ if (new_plane_state->fence) {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] missing fence for async update\n",
+ plane->base.id, plane->name);
return -EINVAL;
+ }
/*
* Don't do an async update if there is an outstanding commit modifying
@@ -1827,7 +1888,12 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
- return funcs->atomic_async_check(plane, state);
+ ret = funcs->atomic_async_check(plane, state);
+ if (ret != 0)
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] driver async check failed\n",
+ plane->base.id, plane->name);
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 6e433d465f41..cf92a9ae8034 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -140,14 +140,14 @@ struct drm_master *drm_master_create(struct drm_device *dev)
kref_init(&master->refcount);
drm_master_legacy_init(master);
- idr_init(&master->magic_map);
+ idr_init_base(&master->magic_map, 1);
master->dev = dev;
/* initialize the tree of output resource lessees */
INIT_LIST_HEAD(&master->lessees);
INIT_LIST_HEAD(&master->lessee_list);
idr_init(&master->leases);
- idr_init(&master->lessee_idr);
+ idr_init_base(&master->lessee_idr, 1);
return master;
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 6abf7a2407e9..1545c50fd1c8 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -847,8 +847,8 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
struct drm_connector_state *conn_state,
u32 out_bus_fmt)
{
+ unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- unsigned int num_in_bus_fmts, i;
struct drm_bridge *prev_bridge;
u32 *in_bus_fmts;
int ret;
@@ -969,7 +969,7 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
struct drm_connector *conn = conn_state->connector;
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
- unsigned int i, num_out_bus_fmts;
+ unsigned int i, num_out_bus_fmts = 0;
struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index af3b7395bf69..2b230b4d6942 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -264,7 +264,7 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
dumb_args.width = width;
dumb_args.height = height;
- dumb_args.bpp = info->cpp[0] * 8;
+ dumb_args.bpp = drm_format_info_bpp(info, 0);
ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
if (ret)
goto err_delete;
@@ -373,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
int ret;
info = drm_format_info(format);
- fb_req.bpp = info->cpp[0] * 8;
+ fb_req.bpp = drm_format_info_bpp(info, 0);
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 17c6c3eefcd6..d021497841b8 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -575,7 +575,7 @@ int drm_plane_create_color_properties(struct drm_plane *plane,
len++;
}
- prop = drm_property_create_enum(dev, 0, "COLOR_RANGE",
+ prop = drm_property_create_enum(dev, 0, "COLOR_RANGE",
enum_list, len);
if (!prop)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 1ab083b35e3b..e3142c8142b3 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -22,15 +22,16 @@
#include <drm/drm_auth.h>
#include <drm/drm_connector.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_panel.h>
-#include <drm/drm_utils.h>
#include <drm/drm_print.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_utils.h>
#include <linux/fb.h>
#include <linux/uaccess.h>
@@ -214,23 +215,11 @@ void drm_connector_free_work_fn(struct work_struct *work)
}
}
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @connector_type: user visible type of the connector
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
+static int __drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -278,6 +267,9 @@ int drm_connector_init(struct drm_device *dev,
goto out_put_type_id;
}
+ /* provide ddc symlink in sysfs */
+ connector->ddc = ddc;
+
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
@@ -334,6 +326,38 @@ out_put:
return ret;
}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * At driver unload time the driver's &drm_connector_funcs.destroy hook
+ * should call drm_connector_cleanup() and free the connector structure.
+ * The connector structure should not be allocated with devm_kzalloc().
+ *
+ * Note: consider using drmm_connector_init() instead of
+ * drm_connector_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+}
EXPORT_SYMBOL(drm_connector_init);
/**
@@ -347,8 +371,16 @@ EXPORT_SYMBOL(drm_connector_init);
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
+ * At driver unload time the driver's &drm_connector_funcs.destroy hook
+ * should call drm_connector_cleanup() and free the connector structure.
+ * The connector structure should not be allocated with devm_kzalloc().
+ *
* Ensures that the ddc field of the connector is correctly set.
*
+ * Note: consider using drmm_connector_init() instead of
+ * drm_connector_init_with_ddc() to let the DRM managed resource
+ * infrastructure take care of cleanup and deallocation.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -358,18 +390,63 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
int connector_type,
struct i2c_adapter *ddc)
{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+}
+EXPORT_SYMBOL(drm_connector_init_with_ddc);
+
+static void drm_connector_cleanup_action(struct drm_device *dev,
+ void *ptr)
+{
+ struct drm_connector *connector = ptr;
+
+ drm_connector_cleanup(connector);
+}
+
+/**
+ * drmm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: optional pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Cleanup is automatically handled with a call to
+ * drm_connector_cleanup() in a DRM-managed action.
+ *
+ * The connector structure should be allocated with drmm_kzalloc().
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
int ret;
- ret = drm_connector_init(dev, connector, funcs, connector_type);
+ if (drm_WARN_ON(dev, funcs && funcs->destroy))
+ return -EINVAL;
+
+ ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL);
if (ret)
return ret;
- /* provide ddc symlink in sysfs */
- connector->ddc = ddc;
+ ret = drmm_add_action_or_reset(dev, drm_connector_cleanup_action,
+ connector);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(drm_connector_init_with_ddc);
+EXPORT_SYMBOL(drmm_connector_init);
/**
* drm_connector_attach_edid_property - attach edid property.
@@ -517,6 +594,9 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* e.g. DP MST connectors. All other connectors will be registered automatically
* when calling drm_dev_register().
*
+ * When the connector is no longer available, callers must call
+ * drm_connector_unregister().
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -573,9 +653,8 @@ EXPORT_SYMBOL(drm_connector_register);
* @connector: the connector to unregister
*
* Unregister userspace interfaces for a connector. Only call this for
- * connectors which have registered explicitly by calling drm_dev_register(),
- * since connectors are unregistered automatically when drm_dev_unregister() is
- * called.
+ * connectors which have been registered explicitly by calling
+ * drm_connector_register().
*/
void drm_connector_unregister(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index cad2a7e5166f..df9bf3c9206e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -343,9 +343,10 @@ static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *
* The @primary and @cursor planes are only relevant for legacy uAPI, see
* &drm_crtc.primary and &drm_crtc.cursor.
*
- * Note: consider using drmm_crtc_alloc_with_planes() instead of
- * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure
- * take care of cleanup and deallocation.
+ * Note: consider using drmm_crtc_alloc_with_planes() or
+ * drmm_crtc_init_with_planes() instead of drm_crtc_init_with_planes()
+ * to let the DRM managed resource infrastructure take care of cleanup
+ * and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -370,14 +371,88 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
-static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev,
- void *ptr)
+static void drmm_crtc_init_with_planes_cleanup(struct drm_device *dev,
+ void *ptr)
{
struct drm_crtc *crtc = ptr;
drm_crtc_cleanup(crtc);
}
+__printf(6, 0)
+static int __drmm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name,
+ va_list args)
+{
+ int ret;
+
+ drm_WARN_ON(dev, funcs && funcs->destroy);
+
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, args);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, drmm_crtc_init_with_planes_cleanup,
+ crtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * drmm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
+ *
+ * Cleanup is automatically handled through registering
+ * drmm_crtc_cleanup() with drmm_add_action(). The crtc structure should
+ * be allocated with drmm_kzalloc().
+ *
+ * The @drm_crtc_funcs.destroy hook must be NULL.
+ *
+ * The @primary and @cursor planes are only relevant for legacy uAPI, see
+ * &drm_crtc.primary and &drm_crtc.cursor.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, name);
+ ret = __drmm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_crtc_init_with_planes);
+
void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
size_t size, size_t offset,
struct drm_plane *primary,
@@ -400,17 +475,12 @@ void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
crtc = container + offset;
va_start(ap, name);
- ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
- name, ap);
+ ret = __drmm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
- ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup,
- crtc);
- if (ret)
- return ERR_PTR(ret);
-
return container;
}
EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 8a6d54515f92..457448cc60f7 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -45,7 +45,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 937b699ac2a8..d8b2955e88fd 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -224,6 +224,7 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
const struct drm_plane_state *old_state,
const struct drm_plane_state *state)
{
+ struct drm_rect src;
memset(iter, 0, sizeof(*iter));
if (!state || !state->crtc || !state->fb || !state->visible)
@@ -233,10 +234,12 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter->num_clips = drm_plane_get_damage_clips_count(state);
/* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
- iter->plane_src.x1 = state->src.x1 >> 16;
- iter->plane_src.y1 = state->src.y1 >> 16;
- iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
- iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+ src = drm_plane_state_src(state);
+
+ iter->plane_src.x1 = src.x1 >> 16;
+ iter->plane_src.y1 = src.y1 >> 16;
+ iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
iter->clips = NULL;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 493922069c90..01ee3febb813 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -377,8 +377,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
- seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+ seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index bbc25e3b7220..4005dab6147d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5165,6 +5165,51 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
mode->clock = clock;
}
+static void drm_calculate_luminance_range(struct drm_connector *connector)
+{
+ struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
+ struct drm_luminance_range_info *luminance_range =
+ &connector->display_info.luminance_range;
+ static const u8 pre_computed_values[] = {
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98
+ };
+ u32 max_avg, min_cll, max, min, q, r;
+
+ if (!(hdr_metadata->metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1)))
+ return;
+
+ max_avg = hdr_metadata->max_fall;
+ min_cll = hdr_metadata->min_cll;
+
+ /*
+ * From the specification (CTA-861-G), for calculating the maximum
+ * luminance we need to use:
+ * Luminance = 50*2**(CV/32)
+ * Where CV is a one-byte value.
+ * For calculating this expression we may need float point precision;
+ * to avoid this complexity level, we take advantage that CV is divided
+ * by a constant. From the Euclids division algorithm, we know that CV
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+ * need to pre-compute the value of r/32. For pre-computing the values
+ * We just used the following Ruby line:
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+ * The results of the above expressions can be verified at
+ * pre_computed_values.
+ */
+ q = max_avg >> 5;
+ r = max_avg % 32;
+ max = (1 << q) * pre_computed_values[r];
+
+ /* min luminance: maxLum * (CV/255)^2 / 100 */
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+ luminance_range->min_luminance = min;
+ luminance_range->max_luminance = max;
+}
+
static uint8_t eotf_supported(const u8 *edid_ext)
{
return edid_ext[2] &
@@ -5196,8 +5241,12 @@ drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
if (len >= 5)
connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
- if (len >= 6)
+ if (len >= 6) {
connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+
+ /* Calculate only when all values are available */
+ drm_calculate_luminance_range(connector);
+ }
}
static void
@@ -5971,12 +6020,14 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static
-void get_monitor_range(const struct detailed_timing *timing,
- void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
{
- struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_info *info = &closure->connector->display_info;
+ struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
+ const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
@@ -5992,18 +6043,28 @@ void get_monitor_range(const struct detailed_timing *timing,
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ monitor_range->min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ monitor_range->max_vfreq += 255;
+ }
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
+ struct detailed_mode_closure closure = {
+ .connector = connector,
+ .drm_edid = drm_edid,
+ };
if (!version_greater(drm_edid, 1, 1))
return;
- drm_for_each_detailed_block(drm_edid, get_monitor_range,
- &info->monitor_range);
+ drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
info->monitor_range.min_vfreq,
@@ -6101,6 +6162,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
+ memset(&info->luminance_range, 0, sizeof(info->luminance_range));
info->mso_stream_count = 0;
info->mso_pixel_overlap = 0;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index a940024c8087..1143bc7f3252 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -27,6 +27,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -148,9 +149,9 @@ out_put:
* the encoder structure. The encoder structure should not be allocated with
* devm_kzalloc().
*
- * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to
- * let the DRM managed resource infrastructure take care of cleanup and
- * deallocation.
+ * Note: consider using drmm_encoder_alloc() or drmm_encoder_init()
+ * instead of drm_encoder_init() to let the DRM managed resource
+ * infrastructure take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -212,6 +213,30 @@ static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
drm_encoder_cleanup(encoder);
}
+__printf(5, 0)
+static int __drmm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type,
+ const char *name,
+ va_list args)
+{
+ int ret;
+
+ if (drm_WARN_ON(dev, funcs && funcs->destroy))
+ return -EINVAL;
+
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, args);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...)
@@ -221,9 +246,6 @@ void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
va_list ap;
int ret;
- if (WARN_ON(funcs && funcs->destroy))
- return ERR_PTR(-EINVAL);
-
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
@@ -231,19 +253,50 @@ void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
encoder = container + offset;
va_start(ap, name);
- ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
- ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
- if (ret)
- return ERR_PTR(ret);
-
return container;
}
EXPORT_SYMBOL(__drmm_encoder_alloc);
+/**
+ * drmm_encoder_init - Initialize a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initializes a preallocated encoder. Encoder should be subclassed as
+ * part of driver encoder objects. Cleanup is automatically handled
+ * through registering drm_encoder_cleanup() with drmm_add_action(). The
+ * encoder structure should be allocated with drmm_kzalloc().
+ *
+ * The @drm_encoder_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, name);
+ ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_encoder_init);
+
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 69c57273b184..3b535ad1b07c 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * drm kms/fb cma (contiguous memory allocator) helper functions
+ * drm kms/fb dma helper functions
*
* Copyright (C) 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
@@ -10,35 +10,40 @@
*/
#include <drm/drm_damage_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
/**
- * DOC: framebuffer cma helper functions
+ * DOC: framebuffer dma helper functions
*
- * Provides helper functions for creating a cma (contiguous memory allocator)
- * backed framebuffer.
+ * Provides helper functions for creating a DMA-contiguous framebuffer.
+ *
+ * Depending on the platform, the buffers may be physically non-contiguous and
+ * mapped through an IOMMU or a similar mechanism, or allocated from
+ * physically-contiguous memory (using, for instance, CMA or a pool of memory
+ * reserved at early boot). This is handled behind the scenes by the DMA mapping
+ * API.
*
* drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
- * callback function to create a cma backed framebuffer.
+ * callback function to create a DMA-contiguous framebuffer.
*/
/**
- * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * drm_fb_dma_get_gem_obj() - Get DMA GEM object for framebuffer
* @fb: The framebuffer
* @plane: Which plane
*
- * Return the CMA GEM object for given framebuffer.
+ * Return the DMA GEM object for given framebuffer.
*
* This function will usually be called from the CRTC callback functions.
*/
-struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
struct drm_gem_object *gem;
@@ -47,27 +52,27 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
if (!gem)
return NULL;
- return to_drm_gem_cma_obj(gem);
+ return to_drm_gem_dma_obj(gem);
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_obj);
/**
- * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer, for pixel
+ * drm_fb_dma_get_gem_addr() - Get DMA (bus) address for framebuffer, for pixel
* formats where values are grouped in blocks this will get you the beginning of
* the block
* @fb: The framebuffer
* @state: Which state of drm plane
* @plane: Which plane
- * Return the CMA GEM address for given framebuffer.
+ * Return the DMA GEM address for given framebuffer.
*
* This function will usually be called from the PLANE callback functions.
*/
-dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
+dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane)
{
- struct drm_gem_cma_object *obj;
- dma_addr_t paddr;
+ struct drm_gem_dma_object *obj;
+ dma_addr_t dma_addr;
u8 h_div = 1, v_div = 1;
u32 block_w = drm_format_info_block_width(fb->format, plane);
u32 block_h = drm_format_info_block_height(fb->format, plane);
@@ -77,11 +82,11 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
u32 block_start_y;
u32 num_hblocks;
- obj = drm_fb_cma_get_gem_obj(fb, plane);
+ obj = drm_fb_dma_get_gem_obj(fb, plane);
if (!obj)
return 0;
- paddr = obj->paddr + fb->offsets[plane];
+ dma_addr = obj->dma_addr + fb->offsets[plane];
if (plane > 0) {
h_div = fb->format->hsub;
@@ -93,43 +98,43 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
block_start_y = (sample_y / block_h) * block_h;
num_hblocks = sample_x / block_w;
- paddr += fb->pitches[plane] * block_start_y;
- paddr += block_size * num_hblocks;
+ dma_addr += fb->pitches[plane] * block_start_y;
+ dma_addr += block_size * num_hblocks;
- return paddr;
+ return dma_addr;
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
+EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_addr);
/**
- * drm_fb_cma_sync_non_coherent - Sync GEM object to non-coherent backing
+ * drm_fb_dma_sync_non_coherent - Sync GEM object to non-coherent backing
* memory
* @drm: DRM device
* @old_state: Old plane state
* @state: New plane state
*
* This function can be used by drivers that use damage clips and have
- * CMA GEM objects backed by non-coherent memory. Calling this function
+ * DMA GEM objects backed by non-coherent memory. Calling this function
* in a plane's .atomic_update ensures that all the data in the backing
* memory have been written to RAM.
*/
-void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
+void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
struct drm_plane_state *old_state,
struct drm_plane_state *state)
{
const struct drm_format_info *finfo = state->fb->format;
struct drm_atomic_helper_damage_iter iter;
- const struct drm_gem_cma_object *cma_obj;
+ const struct drm_gem_dma_object *dma_obj;
unsigned int offset, i;
struct drm_rect clip;
dma_addr_t daddr;
size_t nb_bytes;
for (i = 0; i < finfo->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(state->fb, i);
- if (!cma_obj->map_noncoherent)
+ dma_obj = drm_fb_dma_get_gem_obj(state->fb, i);
+ if (!dma_obj->map_noncoherent)
continue;
- daddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
+ daddr = drm_fb_dma_get_gem_addr(state->fb, state, i);
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
@@ -142,4 +147,4 @@ void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
}
}
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_sync_non_coherent);
+EXPORT_SYMBOL_GPL(drm_fb_dma_sync_non_coherent);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2d4cee6a10ff..71edb80fe0fb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -377,12 +377,31 @@ static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
- unsigned int cpp = fb->format->cpp[0];
- size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
- void *src = fb_helper->fbdev->screen_buffer + offset;
- size_t len = (clip->x2 - clip->x1) * cpp;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
unsigned int y;
+ void *src;
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->fbdev->screen_buffer + offset;
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
@@ -1274,19 +1293,23 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
}
static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
- u8 depth)
+ const struct drm_format_info *format)
{
- switch (depth) {
- case 8:
+ u8 depth = format->depth;
+
+ if (format->is_color_indexed) {
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
- var->red.length = 8; /* 8bit DAC */
- var->green.length = 8;
- var->blue.length = 8;
+ var->red.length = depth;
+ var->green.length = depth;
+ var->blue.length = depth;
var->transp.offset = 0;
var->transp.length = 0;
- break;
+ return;
+ }
+
+ switch (depth) {
case 15:
var->red.offset = 10;
var->green.offset = 5;
@@ -1341,7 +1364,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
+ const struct drm_format_info *format = fb->format;
struct drm_device *dev = fb_helper->dev;
+ unsigned int bpp;
if (in_dbg_master())
return -EINVAL;
@@ -1351,22 +1376,33 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
var->pixclock = 0;
}
- if ((drm_format_info_block_width(fb->format, 0) > 1) ||
- (drm_format_info_block_height(fb->format, 0) > 1))
- return -EINVAL;
+ switch (format->format) {
+ case DRM_FORMAT_C1:
+ case DRM_FORMAT_C2:
+ case DRM_FORMAT_C4:
+ /* supported format with sub-byte pixels */
+ break;
+
+ default:
+ if ((drm_format_info_block_width(format, 0) > 1) ||
+ (drm_format_info_block_height(format, 0) > 1))
+ return -EINVAL;
+ break;
+ }
/*
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
+ bpp = drm_format_info_bpp(format, 0);
+ if (var->bits_per_pixel > bpp ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
- fb->width, fb->height, fb->format->cpp[0] * 8);
+ fb->width, fb->height, bpp);
return -EINVAL;
}
@@ -1381,13 +1417,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
!var->blue.length && !var->transp.length &&
!var->red.msb_right && !var->green.msb_right &&
!var->blue.msb_right && !var->transp.msb_right) {
- drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
+ drm_fb_helper_fill_pixel_fmt(var, format);
}
/*
* Likewise, bits_per_pixel should be rounded up to a supported value.
*/
- var->bits_per_pixel = fb->format->cpp[0] * 8;
+ var->bits_per_pixel = bpp;
/*
* drm fbdev emulation doesn't support changing the pixel format at all,
@@ -1723,11 +1759,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
+ bool is_color_indexed)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
- FB_VISUAL_TRUECOLOR;
+ info->fix.visual = is_color_indexed ? FB_VISUAL_PSEUDOCOLOR
+ : FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
@@ -1744,19 +1780,31 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
+ const struct drm_format_info *format = fb->format;
+
+ switch (format->format) {
+ case DRM_FORMAT_C1:
+ case DRM_FORMAT_C2:
+ case DRM_FORMAT_C4:
+ /* supported format with sub-byte pixels */
+ break;
+
+ default:
+ WARN_ON((drm_format_info_block_width(format, 0) > 1) ||
+ (drm_format_info_block_height(format, 0) > 1));
+ break;
+ }
- WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) ||
- (drm_format_info_block_height(fb->format, 0) > 1));
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
- info->var.bits_per_pixel = fb->format->cpp[0] * 8;
+ info->var.bits_per_pixel = drm_format_info_bpp(format, 0);
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
- drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
+ drm_fb_helper_fill_pixel_fmt(&info->var, format);
info->var.xres = fb_width;
info->var.yres = fb_height;
@@ -1781,7 +1829,8 @@ void drm_fb_helper_fill_info(struct fb_info *info,
{
struct drm_framebuffer *fb = fb_helper->fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0],
+ fb->format->is_color_indexed);
drm_fb_helper_fill_var(info, fb_helper,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index dc7d2e5b16c8..a8b4d918e9a3 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -48,11 +48,6 @@
#include "drm_internal.h"
#include "drm_legacy.h"
-#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-#include <uapi/asm/mman.h>
-#include <drm/drm_vma_manager.h>
-#endif
-
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -131,7 +126,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* };
*
* For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
- * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
+ * DMA based drivers there is the DEFINE_DRM_GEM_DMA_FOPS() macro to make this
* simpler.
*
* The driver's &file_operations must be stored in &drm_driver.fops.
@@ -912,139 +907,3 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
-
-#ifdef CONFIG_MMU
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * drm_addr_inflate() attempts to construct an aligned area by inflating
- * the area size and skipping the unaligned start of the area.
- * adapted from shmem_get_unmapped_area()
- */
-static unsigned long drm_addr_inflate(unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags,
- unsigned long huge_size)
-{
- unsigned long offset, inflated_len;
- unsigned long inflated_addr;
- unsigned long inflated_offset;
-
- offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
- if (offset && offset + len < 2 * huge_size)
- return addr;
- if ((addr & (huge_size - 1)) == offset)
- return addr;
-
- inflated_len = len + huge_size - PAGE_SIZE;
- if (inflated_len > TASK_SIZE)
- return addr;
- if (inflated_len < len)
- return addr;
-
- inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
- 0, flags);
- if (IS_ERR_VALUE(inflated_addr))
- return addr;
- if (inflated_addr & ~PAGE_MASK)
- return addr;
-
- inflated_offset = inflated_addr & (huge_size - 1);
- inflated_addr += offset - inflated_offset;
- if (inflated_offset > offset)
- inflated_addr += huge_size;
-
- if (inflated_addr > TASK_SIZE - len)
- return addr;
-
- return inflated_addr;
-}
-
-/**
- * drm_get_unmapped_area() - Get an unused user-space virtual memory area
- * suitable for huge page table entries.
- * @file: The struct file representing the address space being mmap()'d.
- * @uaddr: Start address suggested by user-space.
- * @len: Length of the area.
- * @pgoff: The page offset into the address space.
- * @flags: mmap flags
- * @mgr: The address space manager used by the drm driver. This argument can
- * probably be removed at some point when all drivers use the same
- * address space manager.
- *
- * This function attempts to find an unused user-space virtual memory area
- * that can accommodate the size we want to map, and that is properly
- * aligned to facilitate huge page table entries matching actual
- * huge pages or huge page aligned memory in buffer objects. Buffer objects
- * are assumed to start at huge page boundary pfns (io memory) or be
- * populated by huge pages aligned to the start of the buffer object
- * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
- *
- * Return: aligned user-space address.
- */
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr)
-{
- unsigned long addr;
- unsigned long inflated_addr;
- struct drm_vma_offset_node *node;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /*
- * @pgoff is the file page-offset the huge page boundaries of
- * which typically aligns to physical address huge page boundaries.
- * That's not true for DRM, however, where physical address huge
- * page boundaries instead are aligned with the offset from
- * buffer object start. So adjust @pgoff to be the offset from
- * buffer object start.
- */
- drm_vma_offset_lock_lookup(mgr);
- node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
- if (node)
- pgoff -= node->vm_node.start;
- drm_vma_offset_unlock_lookup(mgr);
-
- addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
- if (IS_ERR_VALUE(addr))
- return addr;
- if (addr & ~PAGE_MASK)
- return addr;
- if (addr > TASK_SIZE - len)
- return addr;
-
- if (len < HPAGE_PMD_SIZE)
- return addr;
- if (flags & MAP_FIXED)
- return addr;
- /*
- * Our priority is to support MAP_SHARED mapped hugely;
- * and support MAP_PRIVATE mapped hugely too, until it is COWed.
- * But if caller specified an address hint, respect that as before.
- */
- if (uaddr)
- return addr;
-
- inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
- HPAGE_PMD_SIZE);
-
- if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
- len >= HPAGE_PUD_SIZE)
- inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
- flags, HPAGE_PUD_SIZE);
- return inflated_addr;
-}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr)
-{
- return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
-#endif /* CONFIG_MMU */
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index a3ccd8bc966f..e2f76621453c 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,9 +8,10 @@
* (at your option) any later version.
*/
+#include <linux/io.h>
+#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/io.h>
#include <drm/drm_device.h>
#include <drm/drm_format_helper.h>
@@ -40,11 +41,11 @@ unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info
}
EXPORT_SYMBOL(drm_fb_clip_offset);
-/* TODO: Make this functon work with multi-plane formats. */
-static int drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool vaddr_cached_hint,
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
+/* TODO: Make this function work with multi-plane formats. */
+static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool vaddr_cached_hint,
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
unsigned long lines = drm_rect_height(clip);
@@ -54,7 +55,7 @@ static int drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pix
const void *sbuf;
/*
- * Some source buffers, such as CMA memory, use write-combine
+ * Some source buffers, such as DMA memory, use write-combine
* caching, so reads are uncached. Speed up access by fetching
* one line at a time.
*/
@@ -83,11 +84,11 @@ static int drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pix
return 0;
}
-/* TODO: Make this functon work with multi-plane formats. */
-static int drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool vaddr_cached_hint,
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
+/* TODO: Make this function work with multi-plane formats. */
+static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool vaddr_cached_hint,
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
unsigned long lines = drm_rect_height(clip);
@@ -128,65 +129,82 @@ static int drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned
return 0;
}
-/**
- * drm_fb_memcpy - Copy clip buffer
- * @dst: Destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: Source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- *
- * This function does not apply clipping on dst, i.e. the destination
- * is at the top-left corner.
- */
-void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+/* TODO: Make this function work with multi-plane formats. */
+static int drm_fb_xfrm(struct iosys_map *dst,
+ const unsigned int *dst_pitch, const u8 *dst_pixsize,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool vaddr_cached_hint,
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
- unsigned int cpp = fb->format->cpp[0];
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y, lines = clip->y2 - clip->y1;
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
if (!dst_pitch)
- dst_pitch = len;
+ dst_pitch = default_dst_pitch;
- vaddr += clip_offset(clip, fb->pitches[0], cpp);
- for (y = 0; y < lines; y++) {
- memcpy(dst, vaddr, len);
- vaddr += fb->pitches[0];
- dst += dst_pitch;
- }
+ /* TODO: handle src in I/O memory here */
+ if (dst[0].is_iomem)
+ return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0],
+ src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
+ else
+ return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0],
+ src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
}
-EXPORT_SYMBOL(drm_fb_memcpy);
/**
- * drm_fb_memcpy_toio - Copy clip buffer
- * @dst: Destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: Source buffer
+ * drm_fb_memcpy - Copy clip buffer
+ * @dst: Array of destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * This function does not apply clipping on dst, i.e. the destination
- * is at the top-left corner.
+ * This function copies parts of a framebuffer to display memory. Destination and
+ * framebuffer formats must match. No conversion takes place. The parameters @dst,
+ * @dst_pitch and @src refer to arrays. Each array must have at least as many entries
+ * as there are planes in @fb's format. Each entry stores the value for the format's
+ * respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*/
-void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- unsigned int cpp = fb->format->cpp[0];
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y, lines = clip->y2 - clip->y1;
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
- if (!dst_pitch)
- dst_pitch = len;
+ const struct drm_format_info *format = fb->format;
+ unsigned int i, y, lines = drm_rect_height(clip);
- vaddr += clip_offset(clip, fb->pitches[0], cpp);
- for (y = 0; y < lines; y++) {
- memcpy_toio(dst, vaddr, len);
- vaddr += fb->pitches[0];
- dst += dst_pitch;
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int bpp_i = drm_format_info_bpp(format, i);
+ unsigned int cpp_i = DIV_ROUND_UP(bpp_i, 8);
+ size_t len_i = DIV_ROUND_UP(drm_rect_width(clip) * bpp_i, 8);
+ unsigned int dst_pitch_i = dst_pitch[i];
+ struct iosys_map dst_i = dst[i];
+ struct iosys_map src_i = src[i];
+
+ if (!dst_pitch_i)
+ dst_pitch_i = len_i;
+
+ iosys_map_incr(&src_i, clip_offset(clip, fb->pitches[i], cpp_i));
+ for (y = 0; y < lines; y++) {
+ /* TODO: handle src_i in I/O memory here */
+ iosys_map_memcpy_to(&dst_i, 0, src_i.vaddr, len_i);
+ iosys_map_incr(&src_i, fb->pitches[i]);
+ iosys_map_incr(&dst_i, dst_pitch_i);
+ }
}
}
-EXPORT_SYMBOL(drm_fb_memcpy_toio);
+EXPORT_SYMBOL(drm_fb_memcpy);
static void drm_fb_swab16_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -210,37 +228,47 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels
/**
* drm_fb_swab - Swap bytes into clip buffer
- * @dst: Destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @src: Source buffer
+ * @dst: Array of destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @cached: Source buffer is mapped cached (eg. not write-combined)
*
- * If @cached is false a temporary buffer is used to cache one pixel line at a
- * time to speed up slow uncached reads.
+ * This function copies parts of a framebuffer to display memory and swaps per-pixel
+ * bytes during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index. If @cached is
+ * false a temporary buffer is used to cache one pixel line at a time to speed up
+ * slow uncached reads.
*
- * This function does not apply clipping on dst, i.e. the destination
- * is at the top-left corner.
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*/
-void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip,
- bool cached)
+void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool cached)
{
- u8 cpp = fb->format->cpp[0];
+ const struct drm_format_info *format = fb->format;
+ u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8);
+ void (*swab_line)(void *dbuf, const void *sbuf, unsigned int npixels);
switch (cpp) {
case 4:
- drm_fb_xfrm(dst, dst_pitch, cpp, src, fb, clip, cached, drm_fb_swab32_line);
+ swab_line = drm_fb_swab32_line;
break;
case 2:
- drm_fb_xfrm(dst, dst_pitch, cpp, src, fb, clip, cached, drm_fb_swab16_line);
+ swab_line = drm_fb_swab16_line;
break;
default:
drm_warn_once(fb->dev, "Format %p4cc has unsupported pixel size.\n",
- &fb->format->format);
- break;
+ &format->format);
+ return;
}
+
+ drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line);
}
EXPORT_SYMBOL(drm_fb_swab);
@@ -261,32 +289,50 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne
/**
* drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
- * @dst: RGB332 destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @src: XRGB8888 source buffer
+ * @dst: Array of RGB332 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drivers can use this function for RGB332 devices that don't natively support XRGB8888.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB332 devices that don't support XRGB8888 natively.
*/
-void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm(dst, dst_pitch, 1, src, fb, clip, false, drm_fb_xrgb8888_to_rgb332_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 1,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgb332_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u16 *dbuf16 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- val16 = ((sbuf32[x] & 0x00F80000) >> 8) |
- ((sbuf32[x] & 0x0000FC00) >> 5) |
- ((sbuf32[x] & 0x000000F8) >> 3);
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
dbuf16[x] = val16;
}
}
@@ -295,146 +341,143 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
u16 *dbuf16 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- val16 = ((sbuf32[x] & 0x00F80000) >> 8) |
- ((sbuf32[x] & 0x0000FC00) >> 5) |
- ((sbuf32[x] & 0x000000F8) >> 3);
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
dbuf16[x] = swab16(val16);
}
}
/**
* drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * @dst: Array of RGB565 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @swab: Swap bytes
*
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
- */
-void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip,
- bool swab)
-{
- if (swab)
- drm_fb_xfrm(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_swab_line);
- else
- drm_fb_xfrm(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_line);
-}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
-
-/**
- * drm_fb_xrgb8888_to_rgb565_toio - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- * @swab: Swap bytes
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
*
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB565 devices that don't support XRGB8888 natively.
*/
-void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab)
+void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool swab)
{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
+
if (swab)
- drm_fb_xfrm_toio(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_swab_line);
+ xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
else
- drm_fb_xfrm_toio(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_line);
+ xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_toio);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- *dbuf8++ = (sbuf32[x] & 0x000000FF) >> 0;
- *dbuf8++ = (sbuf32[x] & 0x0000FF00) >> 8;
- *dbuf8++ = (sbuf32[x] & 0x00FF0000) >> 16;
+ pix = le32_to_cpu(sbuf32[x]);
+ *dbuf8++ = (pix & 0x000000FF) >> 0;
+ *dbuf8++ = (pix & 0x0000FF00) >> 8;
+ *dbuf8++ = (pix & 0x00FF0000) >> 16;
}
}
/**
* drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
- * @dst: RGB888 destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @src: XRGB8888 source buffer
+ * @dst: Array of RGB888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drivers can use this function for RGB888 devices that don't natively
- * support XRGB8888.
- */
-void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
-{
- drm_fb_xfrm(dst, dst_pitch, 3, src, fb, clip, false, drm_fb_xrgb8888_to_rgb888_line);
-}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
-
-/**
- * drm_fb_xrgb8888_to_rgb888_toio - Convert XRGB8888 to RGB888 clip buffer
- * @dst: RGB565 destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*
* Drivers can use this function for RGB888 devices that don't natively
* support XRGB8888.
*/
-void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 3, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb888_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 3,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgb888_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_toio);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_rgb565_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u32 *dbuf32 = dbuf;
- const u16 *sbuf16 = sbuf;
+ __le32 *dbuf32 = dbuf;
+ const __le16 *sbuf16 = sbuf;
unsigned int x;
- for (x = 0; x < pixels; x++, ++sbuf16, ++dbuf32) {
- u32 val32 = ((*sbuf16 & 0xf800) << 8) |
- ((*sbuf16 & 0x07e0) << 5) |
- ((*sbuf16 & 0x001f) << 3);
- *dbuf32 = 0xff000000 | val32 |
- ((val32 >> 3) & 0x00070007) |
- ((val32 >> 2) & 0x00000300);
+ for (x = 0; x < pixels; x++) {
+ u16 val16 = le16_to_cpu(sbuf16[x]);
+ u32 val32 = ((val16 & 0xf800) << 8) |
+ ((val16 & 0x07e0) << 5) |
+ ((val16 & 0x001f) << 3);
+ val32 = 0xff000000 | val32 |
+ ((val32 >> 3) & 0x00070007) |
+ ((val32 >> 2) & 0x00000300);
+ dbuf32[x] = cpu_to_le32(val32);
}
}
-static void drm_fb_rgb565_to_xrgb8888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+static void drm_fb_rgb565_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 4, vaddr, fb, clip, false,
- drm_fb_rgb565_to_xrgb8888_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_rgb565_to_xrgb8888_line);
}
static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u32 *dbuf32 = dbuf;
+ __le32 *dbuf32 = dbuf;
const u8 *sbuf8 = sbuf;
unsigned int x;
@@ -442,117 +485,159 @@ static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigne
u8 r = *sbuf8++;
u8 g = *sbuf8++;
u8 b = *sbuf8++;
- *dbuf32++ = 0xff000000 | (r << 16) | (g << 8) | b;
+ u32 pix = 0xff000000 | (r << 16) | (g << 8) | b;
+ dbuf32[x] = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb888_to_xrgb8888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+static void drm_fb_rgb888_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 4, vaddr, fb, clip, false,
- drm_fb_rgb888_to_xrgb8888_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_rgb888_to_xrgb8888_line);
}
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u32 *dbuf32 = dbuf;
- const u32 *sbuf32 = sbuf;
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 val32;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- val32 = ((sbuf32[x] & 0x000000FF) << 2) |
- ((sbuf32[x] & 0x0000FF00) << 4) |
- ((sbuf32[x] & 0x00FF0000) << 6);
- *dbuf32++ = val32 | ((val32 >> 8) & 0x00300C03);
+ pix = le32_to_cpu(sbuf32[x]);
+ val32 = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ pix = val32 | ((val32 >> 8) & 0x00300C03);
+ *dbuf32++ = cpu_to_le32(pix);
}
}
/**
- * drm_fb_xrgb8888_to_xrgb2101010_toio - Convert XRGB8888 to XRGB2101010 clip
- * buffer
- * @dst: XRGB2101010 destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * drm_fb_xrgb8888_to_xrgb2101010 - Convert XRGB8888 to XRGB2101010 clip buffer
+ * @dst: Array of XRGB2101010 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drivers can use this function for XRGB2101010 devices that don't natively
- * support XRGB8888.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XRGB2101010 devices that don't support XRGB8888
+ * natively.
*/
-void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst,
- unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 4, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_xrgb2101010_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xrgb2101010_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
for (x = 0; x < pixels; x++) {
- u8 r = (*sbuf32 & 0x00ff0000) >> 16;
- u8 g = (*sbuf32 & 0x0000ff00) >> 8;
- u8 b = *sbuf32 & 0x000000ff;
+ u32 pix = le32_to_cpu(sbuf32[x]);
+ u8 r = (pix & 0x00ff0000) >> 16;
+ u8 g = (pix & 0x0000ff00) >> 8;
+ u8 b = pix & 0x000000ff;
/* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
*dbuf8++ = (3 * r + 6 * g + b) / 10;
- sbuf32++;
}
}
/**
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
- * @dst: 8-bit grayscale destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * @dst: Array of 8-bit grayscale destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drm doesn't have native monochrome or grayscale support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
*
- * Monochrome drivers will use the most significant bit,
- * where 1 means foreground color and 0 background color.
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*
- * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
+ * DRM doesn't have native monochrome or grayscale support. Drivers can use this
+ * function for grayscale devices that don't support XRGB8888 natively.Such
+ * drivers can announce the commonly supported XR24 format to userspace and use
+ * this function to convert to the native format. Monochrome drivers will use the
+ * most significant bit, where 1 means foreground color and 0 background color.
+ * ITU BT.601 is being used for the RGB -> luma (brightness) conversion.
*/
-void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm(dst, dst_pitch, 1, vaddr, fb, clip, false, drm_fb_xrgb8888_to_gray8_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 1,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_gray8_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
/**
- * drm_fb_blit_toio - Copy parts of a framebuffer to display memory
- * @dst: The display memory to copy to
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * drm_fb_blit - Copy parts of a framebuffer to display memory
+ * @dst: Array of display-memory addresses to copy to
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
* @dst_format: FOURCC code of the display's color format
- * @vmap: The framebuffer memory to copy from
+ * @src: The framebuffer memory to copy from
* @fb: The framebuffer to copy from
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory. If the
* formats of the display and the framebuffer mismatch, the blit function
- * will attempt to convert between them.
+ * will attempt to convert between them during the process. The parameters @dst,
+ * @dst_pitch and @src refer to arrays. Each array must have at least as many
+ * entries as there are planes in @dst_format's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*
* Returns:
* 0 on success, or
* -EINVAL if the color-format conversion failed, or
* a negative error code otherwise.
*/
-int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format,
- const void *vmap, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
uint32_t fb_format = fb->format->format;
@@ -567,30 +652,30 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
dst_format = DRM_FORMAT_XRGB2101010;
if (dst_format == fb_format) {
- drm_fb_memcpy_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_RGB565) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb565_toio(dst, dst_pitch, vmap, fb, clip, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
}
} else if (dst_format == DRM_FORMAT_RGB888) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb888_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
return 0;
}
} else if (dst_format == DRM_FORMAT_XRGB8888) {
if (fb_format == DRM_FORMAT_RGB888) {
- drm_fb_rgb888_to_xrgb8888_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_rgb888_to_xrgb8888(dst, dst_pitch, src, fb, clip);
return 0;
} else if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_rgb565_to_xrgb8888_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_rgb565_to_xrgb8888(dst, dst_pitch, src, fb, clip);
return 0;
}
} else if (dst_format == DRM_FORMAT_XRGB2101010) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_xrgb2101010_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
}
}
@@ -600,8 +685,7 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
return -EINVAL;
}
-EXPORT_SYMBOL(drm_fb_blit_toio);
-
+EXPORT_SYMBOL(drm_fb_blit);
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -622,49 +706,67 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int
/**
* drm_fb_xrgb8888_to_mono - Convert XRGB8888 to monochrome
- * @dst: monochrome destination buffer (0=black, 1=white)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * @dst: Array of monochrome destination buffers (0=black, 1=white)
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * DRM doesn't have native monochrome support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner). The first pixel (upper left corner of the clip rectangle) will
+ * be converted and copied to the first bit (LSB) in the first byte of the monochrome
+ * destination buffer. If the caller requires that the first pixel in a byte must
+ * be located at an x-coordinate that is a multiple of 8, then the caller must take
+ * care itself of supplying a suitable clip rectangle.
+ *
+ * DRM doesn't have native monochrome support. Drivers can use this function for
+ * monochrome devices that don't support XRGB8888 natively. Such drivers can
+ * announce the commonly supported XR24 format to userspace and use this function
+ * to convert to the native format.
*
* This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and
* then the result is converted from grayscale to monochrome.
- *
- * The first pixel (upper left corner of the clip rectangle) will be converted
- * and copied to the first bit (LSB) in the first byte of the monochrome
- * destination buffer.
- * If the caller requires that the first pixel in a byte must be located at an
- * x-coordinate that is a multiple of 8, then the caller must take care itself
- * of supplying a suitable clip rectangle.
*/
-void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
unsigned int linepixels = drm_rect_width(clip);
unsigned int lines = drm_rect_height(clip);
unsigned int cpp = fb->format->cpp[0];
unsigned int len_src32 = linepixels * cpp;
struct drm_device *dev = fb->dev;
+ void *vaddr = src[0].vaddr;
+ unsigned int dst_pitch_0;
unsigned int y;
- u8 *mono = dst, *gray8;
+ u8 *mono = dst[0].vaddr, *gray8;
u32 *src32;
if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
return;
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+ dst_pitch_0 = dst_pitch[0];
+
/*
* The mono destination buffer contains 1 bit per pixel
*/
- if (!dst_pitch)
- dst_pitch = DIV_ROUND_UP(linepixels, 8);
+ if (!dst_pitch_0)
+ dst_pitch_0 = DIV_ROUND_UP(linepixels, 8);
/*
- * The cma memory is write-combined so reads are uncached.
+ * The dma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
*
* Also, format conversion from XR24 to monochrome are done
@@ -686,9 +788,117 @@ void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *vadd
drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
drm_fb_gray8_to_mono_line(mono, gray8, linepixels);
vaddr += fb->pitches[0];
- mono += dst_pitch;
+ mono += dst_pitch_0;
}
kfree(src32);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+
+static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
+{
+ const uint32_t *fourccs_end = fourccs + nfourccs;
+
+ while (fourccs < fourccs_end) {
+ if (*fourccs == fourcc)
+ return true;
+ ++fourccs;
+ }
+ return false;
+}
+
+/**
+ * drm_fb_build_fourcc_list - Filters a list of supported color formats against
+ * the device's native formats
+ * @dev: DRM device
+ * @native_fourccs: 4CC codes of natively supported color formats
+ * @native_nfourccs: The number of entries in @native_fourccs
+ * @driver_fourccs: 4CC codes of all driver-supported color formats
+ * @driver_nfourccs: The number of entries in @driver_fourccs
+ * @fourccs_out: Returns 4CC codes of supported color formats
+ * @nfourccs_out: The number of available entries in @fourccs_out
+ *
+ * This function create a list of supported color format from natively
+ * supported formats and the emulated formats.
+ * At a minimum, most userspace programs expect at least support for
+ * XRGB8888 on the primary plane. Devices that have to emulate the
+ * format, and possibly others, can use drm_fb_build_fourcc_list() to
+ * create a list of supported color formats. The returned list can
+ * be handed over to drm_universal_plane_init() et al. Native formats
+ * will go before emulated formats. Other heuristics might be applied
+ * to optimize the order. Formats near the beginning of the list are
+ * usually preferred over formats near the end of the list.
+ *
+ * Returns:
+ * The number of color-formats 4CC codes returned in @fourccs_out.
+ */
+size_t drm_fb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ const u32 *driver_fourccs, size_t driver_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out)
+{
+ u32 *fourccs = fourccs_out;
+ const u32 *fourccs_end = fourccs_out + nfourccs_out;
+ bool found_native = false;
+ size_t i;
+
+ /*
+ * The device's native formats go first.
+ */
+
+ for (i = 0; i < native_nfourccs; ++i) {
+ u32 fourcc = native_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
+
+ if (!found_native)
+ found_native = is_listed_fourcc(driver_fourccs, driver_nfourccs, fourcc);
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ /*
+ * The plane's atomic_update helper converts the framebuffer's color format
+ * to a native format when copying to device memory.
+ *
+ * If there is not a single format supported by both, device and
+ * driver, the native formats are likely not supported by the conversion
+ * helpers. Therefore *only* support the native formats and add a
+ * conversion helper ASAP.
+ */
+ if (!found_native) {
+ drm_warn(dev, "Format conversion helpers required to add extra formats.\n");
+ goto out;
+ }
+
+ /*
+ * The extra formats, emulated by the driver, go second.
+ */
+
+ for (i = 0; (i < driver_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = driver_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate and native entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+out:
+ return fourccs - fourccs_out;
+}
+EXPORT_SYMBOL(drm_fb_build_fourcc_list);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 07741b678798..e09331bb3bc7 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -43,6 +43,21 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
uint32_t fmt = DRM_FORMAT_INVALID;
switch (bpp) {
+ case 1:
+ if (depth == 1)
+ fmt = DRM_FORMAT_C1;
+ break;
+
+ case 2:
+ if (depth == 2)
+ fmt = DRM_FORMAT_C2;
+ break;
+
+ case 4:
+ if (depth == 4)
+ fmt = DRM_FORMAT_C4;
+ break;
+
case 8:
if (depth == 8)
fmt = DRM_FORMAT_C8;
@@ -132,7 +147,26 @@ EXPORT_SYMBOL(drm_driver_legacy_fb_format);
const struct drm_format_info *__drm_format_info(u32 format)
{
static const struct drm_format_info formats[] = {
- { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_C1, .depth = 1, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_C2, .depth = 2, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_C4, .depth = 4, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_D1, .depth = 1, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_D2, .depth = 2, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_D4, .depth = 4, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_D8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R1, .depth = 1, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R2, .depth = 2, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R4, .depth = 4, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
@@ -371,6 +405,25 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
EXPORT_SYMBOL(drm_format_info_block_height);
/**
+ * drm_format_info_bpp - number of bits per pixel
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The actual number of bits per pixel, depending on the plane index.
+ */
+unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ return info->char_per_block[plane] * 8 /
+ (drm_format_info_block_width(info, plane) *
+ drm_format_info_block_height(info, plane));
+}
+EXPORT_SYMBOL(drm_format_info_bpp);
+
+/**
* drm_format_info_min_pitch - computes the minimum required pitch in bytes
* @info: pixel format info
* @plane: plane index
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 4562a8b86579..2dd97473ca10 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -87,13 +87,13 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
- DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
- src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
- src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
- src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
- src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
- fb->width, fb->height);
+ drm_dbg_kms(fb->dev, "Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
@@ -125,7 +125,7 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth);
if (r.pixel_format == DRM_FORMAT_INVALID) {
- DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
+ drm_dbg_kms(dev, "bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
return -EINVAL;
}
@@ -177,18 +177,18 @@ static int framebuffer_check(struct drm_device *dev,
/* check if the format is supported at all */
if (!__drm_format_info(r->pixel_format)) {
- DRM_DEBUG_KMS("bad framebuffer format %p4cc\n",
- &r->pixel_format);
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
return -EINVAL;
}
if (r->width == 0) {
- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+ drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
}
if (r->height == 0) {
- DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ drm_dbg_kms(dev, "bad framebuffer height %u\n", r->height);
return -EINVAL;
}
@@ -202,12 +202,12 @@ static int framebuffer_check(struct drm_device *dev,
u64 min_pitch = drm_format_info_min_pitch(info, i, width);
if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
- DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i);
+ drm_dbg_kms(dev, "Format requires non-linear modifier for plane %d\n", i);
return -EINVAL;
}
if (!r->handles[i]) {
- DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ drm_dbg_kms(dev, "no buffer object handle for plane %d\n", i);
return -EINVAL;
}
@@ -218,20 +218,20 @@ static int framebuffer_check(struct drm_device *dev,
return -ERANGE;
if (block_size && r->pitches[i] < min_pitch) {
- DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ drm_dbg_kms(dev, "bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
+ drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
return -EINVAL;
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
r->modifier[i] != r->modifier[0]) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
+ drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
return -EINVAL;
}
@@ -244,7 +244,7 @@ static int framebuffer_check(struct drm_device *dev,
if (r->pixel_format != DRM_FORMAT_NV12 ||
width % 128 || height % 32 ||
r->pitches[i] % 128) {
- DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+ drm_dbg_kms(dev, "bad modifier data for plane %d\n", i);
return -EINVAL;
}
break;
@@ -256,7 +256,7 @@ static int framebuffer_check(struct drm_device *dev,
for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
- DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero modifier for unused plane %d\n", i);
return -EINVAL;
}
@@ -265,17 +265,17 @@ static int framebuffer_check(struct drm_device *dev,
continue;
if (r->handles[i]) {
- DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+ drm_dbg_kms(dev, "buffer object handle for unused plane %d\n", i);
return -EINVAL;
}
if (r->pitches[i]) {
- DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero pitch for unused plane %d\n", i);
return -EINVAL;
}
if (r->offsets[i]) {
- DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero offset for unused plane %d\n", i);
return -EINVAL;
}
}
@@ -293,24 +293,24 @@ drm_internal_framebuffer_create(struct drm_device *dev,
int ret;
if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ drm_dbg_kms(dev, "bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
- r->width, config->min_width, config->max_width);
+ drm_dbg_kms(dev, "bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
- r->height, config->min_height, config->max_height);
+ drm_dbg_kms(dev, "bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return ERR_PTR(-EINVAL);
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
dev->mode_config.fb_modifiers_not_supported) {
- DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+ drm_dbg_kms(dev, "driver does not support fb modifiers\n");
return ERR_PTR(-EINVAL);
}
@@ -320,7 +320,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("could not create framebuffer\n");
+ drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
}
@@ -356,7 +356,7 @@ int drm_mode_addfb2(struct drm_device *dev,
if (IS_ERR(fb))
return PTR_ERR(fb);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ drm_dbg_kms(dev, "[FB:%d]\n", fb->base.id);
r->fb_id = fb->base.id;
/* Transfer ownership to the filp for reaping on close */
@@ -384,7 +384,7 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev,
* then. So block it to make userspace fallback to
* ADDFB.
*/
- DRM_DEBUG_KMS("addfb2 broken on bigendian");
+ drm_dbg_kms(dev, "addfb2 broken on bigendian");
return -EOPNOTSUPP;
}
#endif
@@ -530,7 +530,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->height = fb->height;
r->width = fb->width;
r->depth = fb->format->depth;
- r->bpp = fb->format->cpp[0] * 8;
+ r->bpp = drm_format_info_bpp(fb->format, 0);
r->pitch = fb->pitches[0];
/* GET_FB() is an unprivileged ioctl so we must not return a
@@ -935,7 +935,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
* the id and get back -EINVAL. Obviously no concern at driver unload time.
*
* Also, the framebuffer will not be removed from the lookup idr - for
- * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * user-created framebuffers this will happen in the rmfb ioctl. For
* driver-private objects (e.g. for fbdev) drivers need to explicitly call
* drm_framebuffer_unregister_private.
*/
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 86d670c71286..8b68a3c1e6ab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -165,24 +165,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
+ INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -253,7 +239,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -951,6 +937,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
+ drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1274,3 +1261,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_lru_init - initialize a LRU
+ *
+ * @lru: The LRU to initialize
+ * @lock: The lock protecting the LRU
+ */
+void
+drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
+{
+ lru->lock = lock;
+ lru->count = 0;
+ INIT_LIST_HEAD(&lru->list);
+}
+EXPORT_SYMBOL(drm_gem_lru_init);
+
+static void
+drm_gem_lru_remove_locked(struct drm_gem_object *obj)
+{
+ obj->lru->count -= obj->size >> PAGE_SHIFT;
+ WARN_ON(obj->lru->count < 0);
+ list_del(&obj->lru_node);
+ obj->lru = NULL;
+}
+
+/**
+ * drm_gem_lru_remove - remove object from whatever LRU it is in
+ *
+ * If the object is currently in any LRU, remove it.
+ *
+ * @obj: The GEM object to remove from current LRU
+ */
+void
+drm_gem_lru_remove(struct drm_gem_object *obj)
+{
+ struct drm_gem_lru *lru = obj->lru;
+
+ if (!lru)
+ return;
+
+ mutex_lock(lru->lock);
+ drm_gem_lru_remove_locked(obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_remove);
+
+static void
+drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ lockdep_assert_held_once(lru->lock);
+
+ if (obj->lru)
+ drm_gem_lru_remove_locked(obj);
+
+ lru->count += obj->size >> PAGE_SHIFT;
+ list_add_tail(&obj->lru_node, &lru->list);
+ obj->lru = lru;
+}
+
+/**
+ * drm_gem_lru_move_tail - move the object to the tail of the LRU
+ *
+ * If the object is already in this LRU it will be moved to the
+ * tail. Otherwise it will be removed from whichever other LRU
+ * it is in (if any) and moved into this LRU.
+ *
+ * @lru: The LRU to move the object into.
+ * @obj: The GEM object to move into this LRU
+ */
+void
+drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ mutex_lock(lru->lock);
+ drm_gem_lru_move_tail_locked(lru, obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_move_tail);
+
+/**
+ * drm_gem_lru_scan - helper to implement shrinker.scan_objects
+ *
+ * If the shrink callback succeeds, it is expected that the driver
+ * move the object out of this LRU.
+ *
+ * If the LRU possibly contain active buffers, it is the responsibility
+ * of the shrink callback to check for this (ie. dma_resv_test_signaled())
+ * or if necessary block until the buffer becomes idle.
+ *
+ * @lru: The LRU to scan
+ * @nr_to_scan: The number of pages to try to reclaim
+ * @shrink: Callback to try to shrink/reclaim the object.
+ */
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ bool (*shrink)(struct drm_gem_object *obj))
+{
+ struct drm_gem_lru still_in_lru;
+ struct drm_gem_object *obj;
+ unsigned freed = 0;
+
+ drm_gem_lru_init(&still_in_lru, lru->lock);
+
+ mutex_lock(lru->lock);
+
+ while (freed < nr_to_scan) {
+ obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
+
+ if (!obj)
+ break;
+
+ drm_gem_lru_move_tail_locked(&still_in_lru, obj);
+
+ /*
+ * If it's in the process of being freed, gem_object->free()
+ * may be blocked on lock waiting to remove it. So just
+ * skip it.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ continue;
+
+ /*
+ * Now that we own a reference, we can drop the lock for the
+ * rest of the loop body, to reduce contention with other
+ * code paths that need the LRU lock
+ */
+ mutex_unlock(lru->lock);
+
+ /*
+ * Note that this still needs to be trylock, since we can
+ * hit shrinker in response to trying to get backing pages
+ * for this obj (ie. while it's lock is already held)
+ */
+ if (!dma_resv_trylock(obj->resv))
+ goto tail;
+
+ if (shrink(obj)) {
+ freed += obj->size >> PAGE_SHIFT;
+
+ /*
+ * If we succeeded in releasing the object's backing
+ * pages, we expect the driver to have moved the object
+ * out of this LRU
+ */
+ WARN_ON(obj->lru == &still_in_lru);
+ WARN_ON(obj->lru == lru);
+ }
+
+ dma_resv_unlock(obj->resv);
+
+tail:
+ drm_gem_object_put(obj);
+ mutex_lock(lru->lock);
+ }
+
+ /*
+ * Move objects we've skipped over out of the temporary still_in_lru
+ * back into this LRU
+ */
+ list_for_each_entry (obj, &still_in_lru.list, lru_node)
+ obj->lru = lru;
+ list_splice_tail(&still_in_lru.list, &lru->list);
+ lru->count += still_in_lru.count;
+
+ mutex_unlock(lru->lock);
+
+ return freed;
+}
+EXPORT_SYMBOL(drm_gem_lru_scan);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 42abee9a0f4f..f6901ff97bbb 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * drm gem CMA (contiguous memory allocator) helper functions
+ * drm gem DMA helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
*
@@ -20,20 +20,17 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vma_manager.h>
/**
- * DOC: cma helpers
+ * DOC: dma helpers
*
- * The DRM GEM/CMA helpers are a means to provide buffer objects that are
+ * The DRM GEM/DMA helpers are a means to provide buffer objects that are
* presented to the device as a contiguous chunk of memory. This is useful
* for devices that do not support scatter-gather DMA (either directly or
* by using an intimately attached IOMMU).
*
- * Despite the name, the DRM GEM/CMA helpers are not hardwired to use the
- * Contiguous Memory Allocator (CMA).
- *
* For devices that access the memory bus through an (external) IOMMU then
* the buffer objects are allocated using a traditional page-based
* allocator and may be scattered through physical memory. However they
@@ -44,36 +41,36 @@
* objects that are physically contiguous in memory.
*
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
- * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
- * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
+ * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
+ * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
*/
-static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
- .free = drm_gem_cma_object_free,
- .print_info = drm_gem_cma_object_print_info,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
- .mmap = drm_gem_cma_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
+ .free = drm_gem_dma_object_free,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = drm_gem_dma_object_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
/**
- * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+ * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
* @drm: DRM device
* @size: size of the object to allocate
* @private: true if used for internal purposes
*
- * This function creates and initializes a GEM CMA object of the given size,
+ * This function creates and initializes a GEM DMA object of the given size,
* but doesn't allocate any memory to back the object.
*
* Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
-static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
+static struct drm_gem_dma_object *
+__drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret = 0;
@@ -81,22 +78,22 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
gem_obj = drm->driver->gem_create_object(drm, size);
if (IS_ERR(gem_obj))
return ERR_CAST(gem_obj);
- cma_obj = to_drm_gem_cma_obj(gem_obj);
+ dma_obj = to_drm_gem_dma_obj(gem_obj);
} else {
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
return ERR_PTR(-ENOMEM);
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
}
if (!gem_obj->funcs)
- gem_obj->funcs = &drm_gem_cma_default_funcs;
+ gem_obj->funcs = &drm_gem_dma_default_funcs;
if (private) {
drm_gem_private_object_init(drm, gem_obj, size);
/* Always use writecombine for dma-buf mappings */
- cma_obj->map_noncoherent = false;
+ dma_obj->map_noncoherent = false;
} else {
ret = drm_gem_object_init(drm, gem_obj, size);
}
@@ -109,19 +106,19 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
goto error;
}
- return cma_obj;
+ return dma_obj;
error:
- kfree(cma_obj);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
/**
- * drm_gem_cma_create - allocate an object with the given size
+ * drm_gem_dma_create - allocate an object with the given size
* @drm: DRM device
* @size: size of the object to allocate
*
- * This function creates a CMA GEM object and allocates memory as backing store.
+ * This function creates a DMA GEM object and allocates memory as backing store.
* The allocated memory will occupy a contiguous chunk of bus address space.
*
* For devices that are directly connected to the memory bus then the allocated
@@ -131,78 +128,79 @@ error:
* requirements.
*
* Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
size_t size)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
int ret;
size = round_up(size, PAGE_SIZE);
- cma_obj = __drm_gem_cma_create(drm, size, false);
- if (IS_ERR(cma_obj))
- return cma_obj;
+ dma_obj = __drm_gem_dma_create(drm, size, false);
+ if (IS_ERR(dma_obj))
+ return dma_obj;
- if (cma_obj->map_noncoherent) {
- cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
- &cma_obj->paddr,
+ if (dma_obj->map_noncoherent) {
+ dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
+ &dma_obj->dma_addr,
DMA_TO_DEVICE,
GFP_KERNEL | __GFP_NOWARN);
} else {
- cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+ dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
+ &dma_obj->dma_addr,
GFP_KERNEL | __GFP_NOWARN);
}
- if (!cma_obj->vaddr) {
+ if (!dma_obj->vaddr) {
drm_dbg(drm, "failed to allocate buffer with size %zu\n",
size);
ret = -ENOMEM;
goto error;
}
- return cma_obj;
+ return dma_obj;
error:
- drm_gem_object_put(&cma_obj->base);
+ drm_gem_object_put(&dma_obj->base);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+EXPORT_SYMBOL_GPL(drm_gem_dma_create);
/**
- * drm_gem_cma_create_with_handle - allocate an object with the given size and
+ * drm_gem_dma_create_with_handle - allocate an object with the given size and
* return a GEM handle to it
* @file_priv: DRM file-private structure to register the handle for
* @drm: DRM device
* @size: size of the object to allocate
* @handle: return location for the GEM handle
*
- * This function creates a CMA GEM object, allocating a chunk of memory as
+ * This function creates a DMA GEM object, allocating a chunk of memory as
* backing store. The GEM object is then added to the list of object associated
* with the given file and a handle to it is returned.
*
* The allocated memory will occupy a contiguous chunk of bus address space.
- * See drm_gem_cma_create() for more details.
+ * See drm_gem_dma_create() for more details.
*
* Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
-static struct drm_gem_cma_object *
-drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+static struct drm_gem_dma_object *
+drm_gem_dma_create_with_handle(struct drm_file *file_priv,
struct drm_device *drm, size_t size,
uint32_t *handle)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
- cma_obj = drm_gem_cma_create(drm, size);
- if (IS_ERR(cma_obj))
- return cma_obj;
+ dma_obj = drm_gem_dma_create(drm, size);
+ if (IS_ERR(dma_obj))
+ return dma_obj;
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
/*
* allocate a id of idr table where the obj is registered
@@ -214,44 +212,44 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
if (ret)
return ERR_PTR(ret);
- return cma_obj;
+ return dma_obj;
}
/**
- * drm_gem_cma_free - free resources associated with a CMA GEM object
- * @cma_obj: CMA GEM object to free
+ * drm_gem_dma_free - free resources associated with a DMA GEM object
+ * @dma_obj: DMA GEM object to free
*
- * This function frees the backing memory of the CMA GEM object, cleans up the
+ * This function frees the backing memory of the DMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
*/
-void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
{
- struct drm_gem_object *gem_obj = &cma_obj->base;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr);
+ struct drm_gem_object *gem_obj = &dma_obj->base;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
if (gem_obj->import_attach) {
- if (cma_obj->vaddr)
+ if (dma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
- drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
- } else if (cma_obj->vaddr) {
- if (cma_obj->map_noncoherent)
- dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr,
+ drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
+ } else if (dma_obj->vaddr) {
+ if (dma_obj->map_noncoherent)
+ dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->dma_addr,
DMA_TO_DEVICE);
else
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->dma_addr);
}
drm_gem_object_release(gem_obj);
- kfree(cma_obj);
+ kfree(dma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_free);
+EXPORT_SYMBOL_GPL(drm_gem_dma_free);
/**
- * drm_gem_cma_dumb_create_internal - create a dumb buffer object
+ * drm_gem_dma_dumb_create_internal - create a dumb buffer object
* @file_priv: DRM file-private structure to create the dumb buffer for
* @drm: DRM device
* @args: IOCTL data
@@ -264,12 +262,12 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_free);
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
if (args->pitch < min_pitch)
args->pitch = min_pitch;
@@ -277,14 +275,14 @@ int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
- return PTR_ERR_OR_ZERO(cma_obj);
+ return PTR_ERR_OR_ZERO(dma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
+EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
/**
- * drm_gem_cma_dumb_create - create a dumb buffer object
+ * drm_gem_dma_dumb_create - create a dumb buffer object
* @file_priv: DRM file-private structure to create the dumb buffer for
* @drm: DRM device
* @args: IOCTL data
@@ -296,35 +294,35 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
*
* For hardware with additional restrictions, drivers can adjust the fields
* set up by userspace and pass the IOCTL data along to the
- * drm_gem_cma_dumb_create_internal() function.
+ * drm_gem_dma_dumb_create_internal() function.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->size = args->pitch * args->height;
- cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
- return PTR_ERR_OR_ZERO(cma_obj);
+ return PTR_ERR_OR_ZERO(dma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
-const struct vm_operations_struct drm_gem_cma_vm_ops = {
+const struct vm_operations_struct drm_gem_dma_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
#ifndef CONFIG_MMU
/**
- * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
+ * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
* @filp: file object
* @addr: memory address
* @len: buffer size
@@ -339,13 +337,13 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
* Returns:
* mapping address on success or a negative error code on failure.
*/
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj = NULL;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
@@ -384,35 +382,35 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
return -EACCES;
}
- cma_obj = to_drm_gem_cma_obj(obj);
+ dma_obj = to_drm_gem_dma_obj(obj);
drm_gem_object_put(obj);
- return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
+ return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
+EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
#endif
/**
- * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
- * @cma_obj: CMA GEM object
+ * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @dma_obj: DMA GEM object
* @p: DRM printer
* @indent: Tab indentation level
*
- * This function prints paddr and vaddr for use in e.g. debugfs output.
+ * This function prints dma_addr and vaddr for use in e.g. debugfs output.
*/
-void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
struct drm_printer *p, unsigned int indent)
{
- drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
- drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
+ drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
+ drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
}
-EXPORT_SYMBOL(drm_gem_cma_print_info);
+EXPORT_SYMBOL(drm_gem_dma_print_info);
/**
- * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
- * pages for a CMA GEM object
- * @cma_obj: CMA GEM object
+ * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
+ * pages for a DMA GEM object
+ * @dma_obj: DMA GEM object
*
* This function exports a scatter/gather table by calling the standard
* DMA mapping API.
@@ -420,9 +418,9 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
{
- struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_gem_object *obj = &dma_obj->base;
struct sg_table *sgt;
int ret;
@@ -430,8 +428,8 @@ struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
if (!sgt)
return ERR_PTR(-ENOMEM);
- ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
- cma_obj->paddr, obj->size);
+ ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
+ dma_obj->dma_addr, obj->size);
if (ret < 0)
goto out;
@@ -441,10 +439,10 @@ out:
kfree(sgt);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
/**
- * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
+ * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
* driver's scatter/gather table of pinned pages
* @dev: device to import into
* @attach: DMA-BUF attachment
@@ -453,7 +451,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
* This function imports a scatter/gather table exported via DMA-BUF by
* another driver. Imported buffers must be physically contiguous in memory
* (i.e. the scatter/gather table must contain a single entry). Drivers that
- * use the CMA helpers should set this as their
+ * use the DMA helpers should set this as their
* &drm_driver.gem_prime_import_sg_table callback.
*
* Returns:
@@ -461,56 +459,57 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
* error code on failure.
*/
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
return ERR_PTR(-EINVAL);
- /* Create a CMA GEM buffer. */
- cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
- if (IS_ERR(cma_obj))
- return ERR_CAST(cma_obj);
+ /* Create a DMA GEM buffer. */
+ dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
+ if (IS_ERR(dma_obj))
+ return ERR_CAST(dma_obj);
- cma_obj->paddr = sg_dma_address(sgt->sgl);
- cma_obj->sgt = sgt;
+ dma_obj->dma_addr = sg_dma_address(sgt->sgl);
+ dma_obj->sgt = sgt;
- DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
+ attach->dmabuf->size);
- return &cma_obj->base;
+ return &dma_obj->base;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
/**
- * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
+ * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
* address space
- * @cma_obj: CMA GEM object
- * @map: Returns the kernel virtual address of the CMA GEM object's backing
+ * @dma_obj: DMA GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing
* store.
*
* This function maps a buffer into the kernel's virtual address space.
- * Since the CMA buffers are already mapped into the kernel virtual address
+ * Since the DMA buffers are already mapped into the kernel virtual address
* space this simply returns the cached virtual address.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
struct iosys_map *map)
{
- iosys_map_set_vaddr(map, cma_obj->vaddr);
+ iosys_map_set_vaddr(map, dma_obj->vaddr);
return 0;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
/**
- * drm_gem_cma_mmap - memory-map an exported CMA GEM object
- * @cma_obj: CMA GEM object
+ * drm_gem_dma_mmap - memory-map an exported DMA GEM object
+ * @dma_obj: DMA GEM object
* @vma: VMA for the area to be mapped
*
* This function maps a buffer into a userspace process's address space.
@@ -520,9 +519,9 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
{
- struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_gem_object *obj = &dma_obj->base;
int ret;
/*
@@ -534,37 +533,38 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_DONTEXPAND;
- if (cma_obj->map_noncoherent) {
+ if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- ret = dma_mmap_pages(cma_obj->base.dev->dev,
+ ret = dma_mmap_pages(dma_obj->base.dev->dev,
vma, vma->vm_end - vma->vm_start,
- virt_to_page(cma_obj->vaddr));
+ virt_to_page(dma_obj->vaddr));
} else {
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
+ dma_obj->dma_addr,
+ vma->vm_end - vma->vm_start);
}
if (ret)
drm_gem_vm_close(vma);
return ret;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
/**
- * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
+ * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
* scatter/gather table and get the virtual address of the buffer
* @dev: DRM device
* @attach: DMA-BUF attachment
* @sgt: Scatter/gather table of pinned pages
*
* This function imports a scatter/gather table using
- * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
- * virtual address. This ensures that a CMA GEM object always has its virtual
+ * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a DMA GEM object always has its virtual
* address set. This address is released when the object is freed.
*
* This function can be used as the &drm_driver.gem_prime_import_sg_table
- * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
+ * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
* the necessary DRM driver operations.
*
* Returns:
@@ -572,11 +572,11 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
* error code on failure.
*/
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj;
struct iosys_map map;
int ret;
@@ -587,19 +587,19 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
return ERR_PTR(ret);
}
- obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, &map);
return obj;
}
- cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = map.vaddr;
+ dma_obj = to_drm_gem_dma_obj(obj);
+ dma_obj->vaddr = map.vaddr;
return obj;
}
-EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
+EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
-MODULE_DESCRIPTION("DRM CMA memory-management helpers");
+MODULE_DESCRIPTION("DRM DMA memory-management helpers");
MODULE_IMPORT_NS(DMA_BUF);
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 61339a9cd010..880a4975507f 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -490,6 +490,8 @@ void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_directi
}
EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
+// TODO Drop this function and replace by drm_format_info_bpp() once all
+// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -497,11 +499,6 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
info = drm_get_format_info(dev, mode_cmd);
- /* use whatever a driver has set */
- if (info->cpp[0])
- return info->cpp[0] * 8;
-
- /* guess otherwise */
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
@@ -510,11 +507,8 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
case DRM_FORMAT_VUY101010:
return 30;
default:
- break;
+ return drm_format_info_bpp(info, 0);
}
-
- /* all attempts failed */
- return 0;
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 904fc893c905..35138f8a375c 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -663,7 +663,7 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info);
* drm_gem_shmem_get_pages_sgt() instead.
*
* Returns:
- * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
*/
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index d607043716d3..125160b534be 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -226,9 +226,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
- ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, false, NULL, NULL,
- ttm_buffer_object_destroy);
+ ret = ttm_bo_init_validate(bdev, &gbo->bo, ttm_bo_type_device,
+ &gbo->placement, pg_align, false, NULL, NULL,
+ ttm_buffer_object_destroy);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8faad23dc1d8..ca2a6e6101dc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -472,7 +472,13 @@ EXPORT_SYMBOL(drm_invalid_op);
*/
static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
{
- int len;
+ size_t len;
+
+ /* don't attempt to copy a NULL pointer */
+ if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) {
+ *buf_len = 0;
+ return 0;
+ }
/* don't overflow userbuf */
len = strlen(value);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 2f61f53d472f..a6ac56580876 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -205,7 +205,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
- void *src;
+ struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
@@ -215,17 +215,16 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
ret = drm_gem_fb_vmap(fb, map, data);
if (ret)
goto out_drm_gem_fb_end_cpu_access;
- src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(dst, 0, src, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, data, fb, clip, !gem->import_attach);
else
- drm_fb_memcpy(dst, 0, src, fb, clip);
+ drm_fb_memcpy(&dst_map, NULL, data, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(dst, 0, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, data, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -311,6 +310,24 @@ err_drm_dev_exit:
}
/**
+ * mipi_dbi_pipe_mode_valid - MIPI DBI mode-valid helper
+ * @pipe: Simple display pipe
+ * @mode: The mode to test
+ *
+ * This function validates a given display mode against the MIPI DBI's hardware
+ * display. Drivers can use this as their &drm_simple_display_pipe_funcs->mode_valid
+ * callback.
+ */
+enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+
+ return drm_crtc_helper_mode_valid_fixed(&pipe->crtc, mode, &dbidev->mode);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_mode_valid);
+
+/**
* mipi_dbi_pipe_update - Display pipe update helper
* @pipe: Simple display pipe
* @old_state: Old plane state
@@ -416,26 +433,8 @@ EXPORT_SYMBOL(mipi_dbi_pipe_disable);
static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &dbidev->mode);
- if (!mode) {
- DRM_ERROR("Failed to duplicate mode\n");
- return 0;
- }
-
- if (mode->name[0] == '\0')
- drm_mode_set_name(mode);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- if (mode->width_mm) {
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- }
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &dbidev->mode);
}
static const struct drm_connector_helper_funcs mipi_dbi_connector_hfuncs = {
@@ -1136,7 +1135,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
/*
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in the GEM code
- * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical
+ * (e.g., drm_gem_dma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index c40bde96cfdf..3ec02748d56f 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -346,6 +346,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@@ -1236,7 +1237,9 @@ static int mipi_dsi_drv_remove(struct device *dev)
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- return drv->remove(dsi);
+ drv->remove(dsi);
+
+ return 0;
}
static void mipi_dsi_drv_shutdown(struct device *dev)
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 59b34f07cfce..939d621c9ad4 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -151,6 +151,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
+ continue;
+
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
@@ -412,8 +415,8 @@ int drmm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
- idr_init(&dev->mode_config.object_idr);
- idr_init(&dev->mode_config.tile_idr);
+ idr_init_base(&dev->mode_config.object_idr, 1);
+ idr_init_base(&dev->mode_config.tile_idr, 1);
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 0f08319453b2..f858dfedf2cf 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -100,45 +100,16 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
* formats than this and drivers may specify a more accurate list when
- * creating the primary plane. However drivers that still call
- * drm_plane_init() will use this minimal format list as the default.
+ * creating the primary plane.
*/
static const uint32_t safe_modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
-static struct drm_plane *create_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- /*
- * Remove the format_default field from drm_plane when dropping
- * this helper.
- */
- primary->format_default = true;
-
- /* possible_crtc's will be filled in later by crtc_init */
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- safe_modeset_formats,
- ARRAY_SIZE(safe_modeset_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
+static const struct drm_plane_funcs primary_plane_funcs = {
+ DRM_PLANE_NON_ATOMIC_FUNCS,
+};
/**
* drm_crtc_init - Legacy CRTC initialization function
@@ -171,10 +142,33 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
struct drm_plane *primary;
+ int ret;
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0,
+ &primary_plane_funcs,
+ safe_modeset_formats,
+ ARRAY_SIZE(safe_modeset_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
- primary = create_primary_plane(dev);
- return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
- NULL);
+ /*
+ * Remove the format_default field from drm_plane when dropping
+ * this helper.
+ */
+ primary->format_default = true;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, NULL);
+ if (ret)
+ goto err_drm_plane_cleanup;
+
+ return 0;
+
+err_drm_plane_cleanup:
+ drm_plane_cleanup(primary);
+ kfree(primary);
+ return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index fc1728d46ac2..8a0c0e0bb5bd 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -103,6 +103,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
+ .width = 1080,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -128,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Anbernic Win600 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"),
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -152,6 +164,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO AIR */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AIR"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 726f2f163c26..33357629a7f5 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -448,6 +448,44 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
}
EXPORT_SYMBOL(__drmm_universal_plane_alloc);
+void *__drm_universal_plane_alloc(struct drm_device *dev, size_t size,
+ size_t offset, uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_plane *plane;
+ va_list ap;
+ int ret;
+
+ if (drm_WARN_ON(dev, !funcs))
+ return ERR_PTR(-EINVAL);
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ plane = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ if (ret)
+ goto err_kfree;
+
+ return container;
+
+err_kfree:
+ kfree(container);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(__drm_universal_plane_alloc);
+
int drm_plane_register_all(struct drm_device *dev)
{
unsigned int num_planes = 0;
@@ -483,38 +521,6 @@ void drm_plane_unregister_all(struct drm_device *dev)
}
/**
- * drm_plane_init - Initialize a legacy plane
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @is_primary: plane type (primary vs overlay)
- *
- * Legacy API to initialize a DRM plane.
- *
- * New drivers should call drm_universal_plane_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary)
-{
- enum drm_plane_type type;
-
- type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
- formats, format_count,
- NULL, type, NULL);
-}
-EXPORT_SYMBOL(drm_plane_init);
-
-/**
* drm_plane_cleanup - Clean up the core plane usage
* @plane: plane to cleanup
*
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 838b32b70bce..865bd999b187 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -30,8 +30,10 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#define SUBPIXEL_MASK 0xffff
@@ -145,13 +147,36 @@ static int drm_plane_helper_check_update(struct drm_plane *plane,
return 0;
}
-static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
+/**
+ * drm_plane_helper_update_primary - Helper for updating primary planes
+ * @plane: plane to update
+ * @crtc: the plane's new CRTC
+ * @fb: the plane's new framebuffer
+ * @crtc_x: x coordinate within CRTC
+ * @crtc_y: y coordinate within CRTC
+ * @crtc_w: width coordinate within CRTC
+ * @crtc_h: height coordinate within CRTC
+ * @src_x: x coordinate within source
+ * @src_y: y coordinate within source
+ * @src_w: width coordinate within source
+ * @src_h: height coordinate within source
+ * @ctx: modeset locking context
+ *
+ * This helper validates the given parameters and updates the primary plane.
+ *
+ * This function is only useful for non-atomic modesetting. Don't use
+ * it in new drivers.
+ *
+ * Returns:
+ * Zero on success, or an errno code otherwise.
+ */
+int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_set set = {
.crtc = crtc,
@@ -172,15 +197,19 @@ static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *c
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
+ struct drm_device *dev = plane->dev;
struct drm_connector **connector_list;
int num_connectors, ret;
bool visible;
+ if (drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev)))
+ return -EINVAL;
+
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest,
DRM_MODE_ROTATE_0,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false, &visible);
if (ret)
return ret;
@@ -218,31 +247,74 @@ static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *c
kfree(connector_list);
return ret;
}
+EXPORT_SYMBOL(drm_plane_helper_update_primary);
-static int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+/**
+ * drm_plane_helper_disable_primary - Helper for disabling primary planes
+ * @plane: plane to disable
+ * @ctx: modeset locking context
+ *
+ * This helper returns an error when trying to disable the primary
+ * plane.
+ *
+ * This function is only useful for non-atomic modesetting. Don't use
+ * it in new drivers.
+ *
+ * Returns:
+ * An errno code.
+ */
+int drm_plane_helper_disable_primary(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *dev = plane->dev;
+
+ drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev));
+
return -EINVAL;
}
+EXPORT_SYMBOL(drm_plane_helper_disable_primary);
/**
- * drm_primary_helper_destroy() - Helper for primary plane destruction
+ * drm_plane_helper_destroy() - Helper for primary plane destruction
* @plane: plane to destroy
*
* Provides a default plane destroy handler for primary planes. This handler
* is called during CRTC destruction. We disable the primary plane, remove
* it from the DRM plane list, and deallocate the plane structure.
*/
-void drm_primary_helper_destroy(struct drm_plane *plane)
+void drm_plane_helper_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
kfree(plane);
}
-EXPORT_SYMBOL(drm_primary_helper_destroy);
-
-const struct drm_plane_funcs drm_primary_helper_funcs = {
- .update_plane = drm_primary_helper_update,
- .disable_plane = drm_primary_helper_disable,
- .destroy = drm_primary_helper_destroy,
-};
-EXPORT_SYMBOL(drm_primary_helper_funcs);
+EXPORT_SYMBOL(drm_plane_helper_destroy);
+
+/**
+ * drm_plane_helper_atomic_check() - Helper to check plane atomic-state
+ * @plane: plane to check
+ * @state: atomic state object
+ *
+ * Provides a default plane-state check handler for planes whose atomic-state
+ * scale and positioning are not expected to change since the plane is always
+ * a fullscreen scanout buffer.
+ *
+ * This is often the case for the primary plane of simple framebuffers.
+ *
+ * RETURNS:
+ * Zero on success, or an errno code otherwise.
+ */
+int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+EXPORT_SYMBOL(drm_plane_helper_atomic_check);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a3f180653b8b..eb09e86044c6 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bb427c5a4f1f..69b0b2b9cc1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -1015,6 +1015,30 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/**
+ * drm_crtc_helper_mode_valid_fixed - Validates a display mode
+ * @crtc: the crtc
+ * @mode: the mode to validate
+ * @fixed_mode: the display hardware's mode
+ *
+ * Returns:
+ * MODE_OK on success, or another mode-status code otherwise.
+ */
+enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *fixed_mode)
+{
+ if (mode->hdisplay != fixed_mode->hdisplay && mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_ONE_SIZE;
+ else if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_ONE_WIDTH;
+ else if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_ONE_HEIGHT;
+
+ return MODE_OK;
+}
+EXPORT_SYMBOL(drm_crtc_helper_mode_valid_fixed);
+
+/**
* drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID
* property from the connector's
* DDC channel
@@ -1051,6 +1075,46 @@ int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector)
EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc);
/**
+ * drm_connector_helper_get_modes_fixed - Duplicates a display mode for a connector
+ * @connector: the connector
+ * @fixed_mode: the display hardware's mode
+ *
+ * This function duplicates a display modes for a connector. Drivers for hardware
+ * that only supports a single fixed mode can use this function in their connector's
+ * get_modes helper.
+ *
+ * Returns:
+ * The number of created modes.
+ */
+int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
+ const struct drm_display_mode *fixed_mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(dev, fixed_mode);
+ if (!mode) {
+ drm_err(dev, "Failed to duplicate mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(fixed_mode));
+ return 0;
+ }
+
+ if (mode->name[0] == '\0')
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ if (mode->width_mm)
+ connector->display_info.width_mm = mode->width_mm;
+ if (mode->height_mm)
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+EXPORT_SYMBOL(drm_connector_helper_get_modes_fixed);
+
+/**
* drm_connector_helper_get_modes - Read EDID and update connector.
* @connector: The connector
*
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 36633590ebf3..e9f782119d3d 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -12,7 +12,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -223,8 +222,8 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
&pipe->crtc);
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 66e5f1e34044..7c3aa77186d3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 10b0036f8a2e..b7c11bdce2c8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -893,7 +893,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
if (!edid)
return -ENODEV;
- hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
+ hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
@@ -922,8 +922,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
return -EINVAL;
}
-static int hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 65260a658684..8d333db813b7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1045,7 +1045,7 @@ static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
-static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
+static enum drm_mode_status mixer_mode_valid(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct mixer_context *ctx = crtc->ctx;
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index e95e96c565ba..5ca71ef87325 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -3,7 +3,7 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 7a503bf08d0f..b4acc3422ba4 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -20,9 +20,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -150,13 +149,13 @@ static void fsl_dcu_unload(struct drm_device *dev)
dev->dev_private = NULL;
}
-DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(fsl_dcu_drm_fops);
static const struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index d763f53f480c..5b47000738e4 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -6,7 +6,6 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 0cd527f0c146..794a87d16f88 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -10,10 +10,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -84,7 +84,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
int index;
@@ -95,7 +95,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
if (index < 0)
return;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
@@ -136,7 +136,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
DCU_LAYER_POSY(new_state->crtc_y) |
DCU_LAYER_POSX(new_state->crtc_x));
regmap_write(fsl_dev->regmap,
- DCU_CTRLDESCLN(index, 3), gem->paddr);
+ DCU_CTRLDESCLN(index, 3), gem->dma_addr);
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
DCU_LAYER_EN |
DCU_LAYER_TRANS(0xff) |
@@ -171,16 +171,10 @@ static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
.atomic_update = fsl_dcu_drm_plane_atomic_update,
};
-static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(plane);
-}
-
static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
- .destroy = fsl_dcu_drm_plane_destroy,
+ .destroy = drm_plane_helper_destroy,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.update_plane = drm_atomic_helper_update_plane,
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 0cff20265f97..807b989e3c77 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -7,6 +7,8 @@ config DRM_GMA500
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
+ select X86_PLATFORM_DEVICES if ACPI
+ select ACPI_WMI if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
Intel GMA500 (Poulsbo), Intel GMA600 (Moorestown/Oak Trail) and
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 46b9c0f13d6d..577a4987b193 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -7,75 +7,109 @@
* Authors: Eric Knopp
*/
+#include <acpi/video.h>
+
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
#include "intel_bios.h"
#include "power.h"
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-static void do_gma_backlight_set(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- backlight_update_status(dev_priv->backlight_device);
-}
-#endif
-
void gma_backlight_enable(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
dev_priv->backlight_enabled = true;
- if (dev_priv->backlight_device) {
- dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
- do_gma_backlight_set(dev);
- }
-#endif
+ dev_priv->ops->backlight_set(dev, dev_priv->backlight_level);
}
void gma_backlight_disable(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
dev_priv->backlight_enabled = false;
- if (dev_priv->backlight_device) {
- dev_priv->backlight_device->props.brightness = 0;
- do_gma_backlight_set(dev);
- }
-#endif
+ dev_priv->ops->backlight_set(dev, 0);
}
void gma_backlight_set(struct drm_device *dev, int v)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
dev_priv->backlight_level = v;
- if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
- dev_priv->backlight_device->props.brightness = v;
- do_gma_backlight_set(dev);
- }
-#endif
+ if (dev_priv->backlight_enabled)
+ dev_priv->ops->backlight_set(dev, v);
+}
+
+static int gma_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
+ if (dev_priv->ops->backlight_get)
+ return dev_priv->ops->backlight_get(dev);
+
+ return dev_priv->backlight_level;
}
+static int gma_backlight_update_status(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int level = backlight_get_brightness(bd);
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ gma_backlight_set(dev, level);
+ return 0;
+}
+
+static const struct backlight_ops gma_backlight_ops __maybe_unused = {
+ .get_brightness = gma_backlight_get_brightness,
+ .update_status = gma_backlight_update_status,
+};
+
int gma_backlight_init(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct backlight_properties props __maybe_unused = {};
+ int ret;
+
dev_priv->backlight_enabled = true;
- return dev_priv->ops->backlight_init(dev);
-#else
- return 0;
+ dev_priv->backlight_level = 100;
+
+ ret = dev_priv->ops->backlight_init(dev);
+ if (ret)
+ return ret;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping %s backlight registration\n",
+ dev_priv->ops->backlight_name);
+ return 0;
+ }
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ props.brightness = dev_priv->backlight_level;
+ props.max_brightness = PSB_MAX_BRIGHTNESS;
+ props.type = BACKLIGHT_RAW;
+
+ dev_priv->backlight_device =
+ backlight_device_register(dev_priv->ops->backlight_name,
+ dev->dev, dev,
+ &gma_backlight_ops, &props);
+ if (IS_ERR(dev_priv->backlight_device))
+ return PTR_ERR(dev_priv->backlight_device);
#endif
+
+ return 0;
}
void gma_backlight_exit(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- if (dev_priv->backlight_device) {
- dev_priv->backlight_device->props.brightness = 0;
- backlight_update_status(dev_priv->backlight_device);
+
+ if (dev_priv->backlight_device)
backlight_device_unregister(dev_priv->backlight_device);
- }
#endif
}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index dd32b484dd82..3065596257e9 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -5,7 +5,6 @@
*
**************************************************************************/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <drm/drm.h>
@@ -62,14 +61,10 @@ static int cdv_output_init(struct drm_device *dev)
return 0;
}
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
/*
* Cedartrail Backlght Interfaces
*/
-static struct backlight_device *cdv_backlight_device;
-
static int cdv_backlight_combination_mode(struct drm_device *dev)
{
return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
@@ -92,9 +87,8 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
return max;
}
-static int cdv_get_brightness(struct backlight_device *bd)
+static int cdv_get_brightness(struct drm_device *dev)
{
- struct drm_device *dev = bl_get_data(bd);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
@@ -106,20 +100,13 @@ static int cdv_get_brightness(struct backlight_device *bd)
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
-
}
-static int cdv_set_brightness(struct backlight_device *bd)
+static void cdv_set_brightness(struct drm_device *dev, int level)
{
- struct drm_device *dev = bl_get_data(bd);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int level = bd->props.brightness;
u32 blc_pwm_ctl;
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
level *= cdv_get_max_backlight(dev);
level /= 100;
@@ -136,38 +123,18 @@ static int cdv_set_brightness(struct backlight_device *bd)
blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
- return 0;
}
-static const struct backlight_ops cdv_ops = {
- .get_brightness = cdv_get_brightness,
- .update_status = cdv_set_brightness,
-};
-
static int cdv_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- cdv_backlight_device = backlight_device_register("psb-bl",
- NULL, (void *)dev, &cdv_ops, &props);
- if (IS_ERR(cdv_backlight_device))
- return PTR_ERR(cdv_backlight_device);
-
- cdv_backlight_device->props.brightness =
- cdv_get_brightness(cdv_backlight_device);
- backlight_update_status(cdv_backlight_device);
- dev_priv->backlight_device = cdv_backlight_device;
- dev_priv->backlight_enabled = true;
+
+ dev_priv->backlight_level = cdv_get_brightness(dev);
+ cdv_set_brightness(dev, dev_priv->backlight_level);
+
return 0;
}
-#endif
-
/*
* Provide the Cedarview specific chip logic and low level methods
* for power management
@@ -581,11 +548,9 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = true;
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
psb_intel_opregion_init(dev);
@@ -615,9 +580,10 @@ const struct psb_ops cdv_chip_ops = {
.hotplug = cdv_hotplug_event,
.hotplug_enable = cdv_hotplug_enable,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = cdv_backlight_init,
-#endif
+ .backlight_get = cdv_get_brightness,
+ .backlight_set = cdv_set_brightness,
+ .backlight_name = "psb-bl",
.init_pm = cdv_init_pm,
.save_regs = cdv_save_display_registers,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index bb2e9d64018a..53b967282d6a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -115,7 +115,7 @@ i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
/*
* Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
+ * I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index dffe37490206..4b7627a72637 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
- drm_gem_object_release(obj);
-
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
+ drm_gem_object_release(obj);
+
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index bd40c040a2c9..fe7b8436f87a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
@@ -552,28 +555,11 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
return ret;
}
-int gma_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set, ctx);
-
- pm_runtime_forbid(dev->dev);
- ret = drm_crtc_helper_set_config(set, ctx);
- pm_runtime_allow(dev->dev);
-
- return ret;
-}
-
const struct drm_crtc_funcs gma_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
- .set_config = gma_crtc_set_config,
+ .set_config = drm_crtc_helper_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
.enable_vblank = gma_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 113cf048105e..c8b611a2f6c6 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -69,8 +69,6 @@ extern int gma_crtc_page_flip(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
struct drm_modeset_acquire_ctx *ctx);
-extern int gma_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
extern void gma_crtc_save(struct drm_crtc *crtc);
extern void gma_crtc_restore(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 6004390d647a..64761f46b434 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -310,7 +310,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
temp & ~PIPEACONF_ENABLE, i);
REG_READ_WITH_AUX(map->conf, i);
}
- /* Wait for for the pipe disable to take effect. */
+ /* Wait for the pipe disable to take effect. */
gma_wait_for_vblank(dev);
temp = REG_READ_WITH_AUX(map->dpll, i);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 5923a9c89312..2531959d3d77 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -5,7 +5,6 @@
*
**************************************************************************/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/module.h>
@@ -37,29 +36,18 @@ static int oaktrail_output_init(struct drm_device *dev)
* Provide the low level interfaces for the Moorestown backlight
*/
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
#define BLC_PWM_FREQ_CALC_CONSTANT 32
#define MHz 1000000
#define BLC_ADJUSTMENT_MAX 100
-static struct backlight_device *oaktrail_backlight_device;
-static int oaktrail_brightness;
-
-static int oaktrail_set_brightness(struct backlight_device *bd)
+static void oaktrail_set_brightness(struct drm_device *dev, int level)
{
- struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int level = bd->props.brightness;
u32 blc_pwm_ctl;
u32 max_pwm_blc;
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
if (gma_power_begin(dev, 0)) {
/* Calculate and set the brightness value */
max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
@@ -82,19 +70,9 @@ static int oaktrail_set_brightness(struct backlight_device *bd)
REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
gma_power_end(dev);
}
- oaktrail_brightness = level;
- return 0;
-}
-
-static int oaktrail_get_brightness(struct backlight_device *bd)
-{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return oaktrail_brightness;
}
-static int device_backlight_init(struct drm_device *dev)
+static int oaktrail_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
@@ -123,44 +101,11 @@ static int device_backlight_init(struct drm_device *dev)
REG_WRITE(BLC_PWM_CTL, value | (value << 16));
gma_power_end(dev);
}
- return 0;
-}
-
-static const struct backlight_ops oaktrail_ops = {
- .get_brightness = oaktrail_get_brightness,
- .update_status = oaktrail_set_brightness,
-};
-static int oaktrail_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
- NULL, (void *)dev, &oaktrail_ops, &props);
-
- if (IS_ERR(oaktrail_backlight_device))
- return PTR_ERR(oaktrail_backlight_device);
-
- ret = device_backlight_init(dev);
- if (ret < 0) {
- backlight_device_unregister(oaktrail_backlight_device);
- return ret;
- }
- oaktrail_backlight_device->props.brightness = 100;
- oaktrail_backlight_device->props.max_brightness = 100;
- backlight_update_status(oaktrail_backlight_device);
- dev_priv->backlight_device = oaktrail_backlight_device;
+ oaktrail_set_brightness(dev, PSB_MAX_BRIGHTNESS);
return 0;
}
-#endif
-
/*
* Provide the Moorestown specific chip logic and low level methods
* for power management
@@ -501,12 +446,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
-
+ dev_priv->use_msi = true;
dev_priv->regmap = oaktrail_regmap;
ret = mid_chip_setup(dev);
@@ -548,9 +490,9 @@ const struct psb_ops oaktrail_chip_ops = {
.output_init = oaktrail_output_init,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = oaktrail_backlight_init,
-#endif
+ .backlight_set = oaktrail_set_brightness,
+ .backlight_name = "oaktrail-bl",
.save_regs = oaktrail_save_display_registers,
.restore_regs = oaktrail_restore_display_registers,
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 4d98df189e10..75b4eb1c8884 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -61,7 +61,6 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
dev_priv->is_lvds_on = false;
- pm_request_idle(dev->dev);
}
gma_power_end(dev);
}
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index dc494df71a48..0c271072af63 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -150,21 +150,17 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
- struct backlight_device *bd = dev_priv->backlight_device;
DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
- if (bd == NULL)
- return ASLE_BACKLIGHT_FAILED;
-
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
- gma_backlight_set(dev, bclp * bd->props.max_brightness / 255);
+ gma_backlight_set(dev, bclp * PSB_MAX_BRIGHTNESS / 255);
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index b91de6d36e41..186af29bea6f 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -37,9 +37,6 @@
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-static struct mutex power_mutex; /* Serialize power ops */
-static DEFINE_SPINLOCK(power_ctrl_lock); /* Serialize power claim */
-
/**
* gma_power_init - initialise power manager
* @dev: our device
@@ -54,13 +51,23 @@ void gma_power_init(struct drm_device *dev)
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
dev_priv->ospm_base &= 0xffff;
- dev_priv->display_power = true; /* We start active */
- dev_priv->display_count = 0; /* Currently no users */
- dev_priv->suspended = false; /* And not suspended */
- mutex_init(&power_mutex);
-
if (dev_priv->ops->init_pm)
dev_priv->ops->init_pm(dev);
+
+ /*
+ * Runtime pm support is broken atm. So for now unconditionally
+ * call pm_runtime_get() here and put it again in psb_driver_unload()
+ *
+ * To fix this we need to call pm_runtime_get() once for each active
+ * pipe at boot and then put() / get() for each pipe disable / enable
+ * so that the device gets runtime suspended when no pipes are active.
+ * Once this is in place the pm_runtime_get() below should be replaced
+ * by a pm_runtime_allow() call to undo the pm_runtime_forbid() from
+ * pci_pm_init().
+ */
+ pm_runtime_get(dev->dev);
+
+ dev_priv->pm_initialized = true;
}
/**
@@ -71,8 +78,12 @@ void gma_power_init(struct drm_device *dev)
*/
void gma_power_uninit(struct drm_device *dev)
{
- pm_runtime_disable(dev->dev);
- pm_runtime_set_suspended(dev->dev);
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
+ if (!dev_priv->pm_initialized)
+ return;
+
+ pm_runtime_put_noidle(dev->dev);
}
/**
@@ -85,11 +96,8 @@ static void gma_suspend_display(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- if (dev_priv->suspended)
- return;
dev_priv->ops->save_regs(dev);
dev_priv->ops->power_down(dev);
- dev_priv->display_power = false;
}
/**
@@ -106,8 +114,6 @@ static void gma_resume_display(struct pci_dev *pdev)
/* turn on the display power island */
dev_priv->ops->power_up(dev);
- dev_priv->suspended = false;
- dev_priv->display_power = true;
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
pci_write_config_word(pdev, PSB_GMCH_CTRL,
@@ -131,21 +137,14 @@ static void gma_suspend_pci(struct pci_dev *pdev)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int bsm, vbt;
- if (dev_priv->suspended)
- return;
-
pci_save_state(pdev);
pci_read_config_dword(pdev, 0x5C, &bsm);
dev_priv->regs.saveBSM = bsm;
pci_read_config_dword(pdev, 0xFC, &vbt);
dev_priv->regs.saveVBT = vbt;
- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
-
- dev_priv->suspended = true;
}
/**
@@ -155,29 +154,17 @@ static void gma_suspend_pci(struct pci_dev *pdev)
* Perform the resume processing on our PCI device state - rewrite
* register state and re-enable the PCI device
*/
-static bool gma_resume_pci(struct pci_dev *pdev)
+static int gma_resume_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
-
- if (!dev_priv->suspended)
- return true;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
- /* restoring MSI address and data in PCIx space */
- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
- ret = pci_enable_device(pdev);
- if (ret != 0)
- dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
- else
- dev_priv->suspended = false;
- return !dev_priv->suspended;
+ return pci_enable_device(pdev);
}
/**
@@ -192,20 +179,10 @@ int gma_power_suspend(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- mutex_lock(&power_mutex);
- if (!dev_priv->suspended) {
- if (dev_priv->display_count) {
- mutex_unlock(&power_mutex);
- dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
- return -EBUSY;
- }
- gma_irq_uninstall(dev);
- gma_suspend_display(dev);
- gma_suspend_pci(pdev);
- }
- mutex_unlock(&power_mutex);
+ gma_irq_uninstall(dev);
+ gma_suspend_display(dev);
+ gma_suspend_pci(pdev);
return 0;
}
@@ -220,28 +197,13 @@ int gma_power_resume(struct device *_dev)
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- mutex_lock(&power_mutex);
gma_resume_pci(pdev);
gma_resume_display(pdev);
- gma_irq_preinstall(dev);
- gma_irq_postinstall(dev);
- mutex_unlock(&power_mutex);
+ gma_irq_install(dev);
return 0;
}
/**
- * gma_power_is_on - returne true if power is on
- * @dev: our DRM device
- *
- * Returns true if the display island power is on at this moment
- */
-bool gma_power_is_on(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- return dev_priv->display_power;
-}
-
-/**
* gma_power_begin - begin requiring power
* @dev: our DRM device
* @force_on: true to force power on
@@ -251,35 +213,10 @@ bool gma_power_is_on(struct drm_device *dev)
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&power_ctrl_lock, flags);
- /* Power already on ? */
- if (dev_priv->display_power) {
- dev_priv->display_count++;
- pm_runtime_get(dev->dev);
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
- return true;
- }
- if (force_on == false)
- goto out_false;
-
- /* Ok power up needed */
- ret = gma_resume_pci(pdev);
- if (ret == 0) {
- gma_irq_preinstall(dev);
- gma_irq_postinstall(dev);
- pm_runtime_get(dev->dev);
- dev_priv->display_count++;
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
- return true;
- }
-out_false:
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
- return false;
+ if (force_on)
+ return pm_runtime_resume_and_get(dev->dev) == 0;
+ else
+ return pm_runtime_get_if_in_use(dev->dev) == 1;
}
/**
@@ -291,46 +228,5 @@ out_false:
*/
void gma_power_end(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- unsigned long flags;
- spin_lock_irqsave(&power_ctrl_lock, flags);
- dev_priv->display_count--;
- WARN_ON(dev_priv->display_count < 0);
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
pm_runtime_put(dev->dev);
}
-
-int psb_runtime_suspend(struct device *dev)
-{
- return gma_power_suspend(dev);
-}
-
-int psb_runtime_resume(struct device *dev)
-{
- return gma_power_resume(dev);
-}
-
-int psb_runtime_idle(struct device *dev)
-{
- struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
- struct drm_psb_private *dev_priv = to_drm_psb_private(drmdev);
- if (dev_priv->display_count)
- return 0;
- else
- return 1;
-}
-
-int gma_power_thaw(struct device *_dev)
-{
- return gma_power_resume(_dev);
-}
-
-int gma_power_freeze(struct device *_dev)
-{
- return gma_power_suspend(_dev);
-}
-
-int gma_power_restore(struct device *_dev)
-{
- return gma_power_resume(_dev);
-}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 0c89c4d6ec20..063328d66652 100644
--- a/drivers/gpu/drm/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
@@ -43,9 +43,6 @@ void gma_power_uninit(struct drm_device *dev);
*/
int gma_power_suspend(struct device *dev);
int gma_power_resume(struct device *dev);
-int gma_power_thaw(struct device *dev);
-int gma_power_freeze(struct device *dev);
-int gma_power_restore(struct device *_dev);
/*
* These are the functions the driver should use to wrap all hw access
@@ -54,19 +51,4 @@ int gma_power_restore(struct device *_dev);
bool gma_power_begin(struct drm_device *dev, bool force);
void gma_power_end(struct drm_device *dev);
-/*
- * Use this function to do an instantaneous check for if the hw is on.
- * Only use this in cases where you know the mutex is already held such
- * as in irq install/uninstall and you need to
- * prevent a deadlock situation. Otherwise use gma_power_begin().
- */
-bool gma_power_is_on(struct drm_device *dev);
-
-/*
- * GFX-Runtime PM callbacks
- */
-int psb_runtime_suspend(struct device *dev);
-int psb_runtime_resume(struct device *dev);
-int psb_runtime_idle(struct device *dev);
-
#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 71534f4ca834..3c294c38bdb4 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -5,8 +5,6 @@
*
**************************************************************************/
-#include <linux/backlight.h>
-
#include <drm/drm.h>
#include "gma_device.h"
@@ -24,8 +22,6 @@ static int psb_output_init(struct drm_device *dev)
return 0;
}
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
/*
* Poulsbo Backlight Interfaces
*/
@@ -41,18 +37,6 @@ static int psb_output_init(struct drm_device *dev)
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
-static int psb_brightness;
-static struct backlight_device *psb_backlight_device;
-
-static int psb_get_brightness(struct backlight_device *bd)
-{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return psb_brightness;
-}
-
-
static int psb_backlight_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@@ -86,62 +70,13 @@ static int psb_backlight_setup(struct drm_device *dev)
REG_WRITE(BLC_PWM_CTL,
(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
}
- return 0;
-}
-
-static int psb_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(psb_backlight_device);
- int level = bd->props.brightness;
-
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
- psb_intel_lvds_set_brightness(dev, level);
- psb_brightness = level;
- return 0;
-}
-
-static const struct backlight_ops psb_ops = {
- .get_brightness = psb_get_brightness,
- .update_status = psb_set_brightness,
-};
-
-static int psb_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- psb_backlight_device = backlight_device_register("psb-bl",
- NULL, (void *)dev, &psb_ops, &props);
- if (IS_ERR(psb_backlight_device))
- return PTR_ERR(psb_backlight_device);
-
- ret = psb_backlight_setup(dev);
- if (ret < 0) {
- backlight_device_unregister(psb_backlight_device);
- psb_backlight_device = NULL;
- return ret;
- }
- psb_backlight_device->props.brightness = 100;
- psb_backlight_device->props.max_brightness = 100;
- backlight_update_status(psb_backlight_device);
- dev_priv->backlight_device = psb_backlight_device;
+ psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
/* This must occur after the backlight is properly initialised */
psb_lid_timer_init(dev_priv);
-
return 0;
}
-#endif
-
/*
* Provide the Poulsbo specific chip logic and low level methods
* for power management
@@ -345,9 +280,9 @@ const struct psb_ops psb_chip_ops = {
.output_init = psb_output_init,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = psb_backlight_init,
-#endif
+ .backlight_init = psb_backlight_setup,
+ .backlight_set = psb_intel_lvds_set_brightness,
+ .backlight_name = "psb-bl",
.init_pm = psb_init_pm,
.save_regs = psb_save_display_registers,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1d8744f3e702..cd9c73f5a64a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -169,8 +169,7 @@ static void psb_driver_unload(struct drm_device *dev)
/* TODO: Kill vblank etc here */
- if (dev_priv->backlight_device)
- gma_backlight_exit(dev);
+ gma_backlight_exit(dev);
psb_modeset_cleanup(dev);
gma_irq_uninstall(dev);
@@ -383,7 +382,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- gma_irq_install(dev, pdev->irq);
+ gma_irq_install(dev);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -399,6 +398,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (gma_encoder->type == INTEL_OUTPUT_LVDS ||
gma_encoder->type == INTEL_OUTPUT_MIPI) {
ret = gma_backlight_init(dev);
+ if (ret == 0)
+ acpi_video_register_backlight();
break;
}
}
@@ -407,11 +408,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
return ret;
psb_intel_opregion_enable_asle(dev);
-#if 0
- /* Enable runtime pm at last */
- pm_runtime_enable(dev->dev);
- pm_runtime_set_active(dev->dev);
-#endif
return devm_add_action_or_reset(dev->dev, psb_device_release, dev);
@@ -420,33 +416,6 @@ out_err:
return ret;
}
-static inline void get_brightness(struct backlight_device *bd)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- if (bd) {
- bd->props.brightness = bd->ops->get_brightness(bd);
- backlight_update_status(bd);
- }
-#endif
-}
-
-static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- static unsigned int runtime_allowed;
-
- if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
- runtime_allowed++;
- pm_runtime_allow(dev->dev);
- dev_priv->rpm_enabled = 1;
- }
- return drm_ioctl(filp, cmd, arg);
- /* FIXME: do we need to wrap the other side of this */
-}
-
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_psb_private *dev_priv;
@@ -493,22 +462,13 @@ static void psb_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
}
-static const struct dev_pm_ops psb_pm_ops = {
- .resume = gma_power_resume,
- .suspend = gma_power_suspend,
- .thaw = gma_power_thaw,
- .freeze = gma_power_freeze,
- .restore = gma_power_restore,
- .runtime_suspend = psb_runtime_suspend,
- .runtime_resume = psb_runtime_resume,
- .runtime_idle = psb_runtime_idle,
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(psb_pm_ops, gma_power_suspend, gma_power_resume, NULL);
static const struct file_operations psb_gem_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = psb_unlocked_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0ea3d23575f3..ae544b69fc47 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -172,6 +172,8 @@
#define PSB_WATCHDOG_DELAY (HZ * 2)
#define PSB_LID_DELAY (HZ / 10)
+#define PSB_MAX_BRIGHTNESS 100
+
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -426,9 +428,7 @@ struct drm_psb_private {
spinlock_t irqmask_lock;
/* Power */
- bool suspended;
- bool display_power;
- int display_count;
+ bool pm_initialized;
/* Modesetting */
struct psb_intel_mode_device mode_dev;
@@ -486,10 +486,8 @@ struct drm_psb_private {
unsigned int core_freq;
uint32_t iLVDS_enable;
- /* Runtime PM state */
- int rpm_enabled;
-
/* MID specific */
+ bool use_msi;
bool has_gct;
struct oaktrail_gct_data gct_data;
@@ -499,10 +497,6 @@ struct drm_psb_private {
/* Register state */
struct psb_save_area regs;
- /* MSI reg save */
- uint32_t msi_addr;
- uint32_t msi_data;
-
/* Hotplug handling */
struct work_struct hotplug_work;
@@ -530,10 +524,6 @@ struct drm_psb_private {
struct drm_fb_helper *fb_helper;
- /* Panel brightness */
- int brightness;
- int brightness_adjusted;
-
bool dsr_enable;
u32 dsr_fb_update;
bool dpi_panel_on[3];
@@ -602,10 +592,13 @@ struct psb_ops {
void (*disable_sr)(struct drm_device *dev);
void (*lvds_bl_power)(struct drm_device *dev, bool on);
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
/* Backlight */
int (*backlight_init)(struct drm_device *dev);
-#endif
+ void (*backlight_set)(struct drm_device *dev, int level);
+ int (*backlight_get)(struct drm_device *dev);
+ const char *backlight_name;
+
int i2c_bus; /* I2C bus identifier for Moorestown */
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 9a5ea06a1a8e..531c1781a8fb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -9,8 +9,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <drm/drm_plane_helper.h>
-
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 8ccba116821b..8a1111fe714b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -197,8 +197,6 @@ extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
extern void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
-extern void oaktrail_dsi_init(struct drm_device *dev,
- struct psb_intel_mode_device *mode_dev);
struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev);
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
@@ -219,9 +217,6 @@ extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
int pipe);
extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
int sdvoB);
-extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
-extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
- int enable);
extern int intelfb_probe(struct drm_device *dev);
extern int intelfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index a85aace25548..bdced46dd333 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -400,26 +400,38 @@ static const struct _sdvo_cmd_name {
#define IS_SDVOB(reg) (reg == SDVOB)
#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
-static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo,
+ u8 cmd, const void *args, int args_len)
{
- int i;
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ int i, pos = 0;
+ char buffer[73];
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
+
+ for (i = 0; i < args_len; i++) {
+ BUF_PRINT("%02X ", ((u8 *)args)[i]);
+ }
+
+ for (; i < 8; i++) {
+ BUF_PRINT(" ");
+ }
- DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(psb_intel_sdvo), cmd);
- for (i = 0; i < args_len; i++)
- DRM_DEBUG_KMS("%02X ", ((u8 *)args)[i]);
- for (; i < 8; i++)
- DRM_DEBUG_KMS(" ");
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- DRM_DEBUG_KMS("(%s)", sdvo_cmd_names[i].name);
+ BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
+
if (i == ARRAY_SIZE(sdvo_cmd_names))
- DRM_DEBUG_KMS("(%02X)", cmd);
- DRM_DEBUG_KMS("\n");
+ BUF_PRINT("(%02X)", cmd);
+
+ drm_WARN_ON(dev, pos >= sizeof(buffer) - 1);
+#undef BUF_PRINT
+
+ DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(psb_intel_sdvo), cmd, buffer);
}
static const char *cmd_status_names[] = {
@@ -490,13 +502,13 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
}
static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
- void *response, int response_len)
+ void *response, int response_len)
{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ char buffer[73];
+ int i, pos = 0;
u8 retry = 5;
u8 status;
- int i;
-
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
/*
* The documentation states that all commands will be
@@ -520,10 +532,13 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
goto log_fail;
}
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
+
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_DEBUG_KMS("(%s)", cmd_status_names[status]);
+ BUF_PRINT("(%s)", cmd_status_names[status]);
else
- DRM_DEBUG_KMS("(??? %d)", status);
+ BUF_PRINT("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
@@ -534,13 +549,18 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
- DRM_DEBUG_KMS(" %02X", ((u8 *)response)[i]);
+ BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- DRM_DEBUG_KMS("\n");
+
+ drm_WARN_ON(dev, pos >= sizeof(buffer) - 1);
+#undef BUF_PRINT
+
+ DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(psb_intel_sdvo), buffer);
return true;
log_fail:
- DRM_DEBUG_KMS("... failed\n");
+ DRM_DEBUG_KMS("%s: R: ... failed %s\n",
+ SDVO_NAME(psb_intel_sdvo), buffer);
return false;
}
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6e6d61bbeab..d421031462df 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -228,7 +228,7 @@ static irqreturn_t gma_irq_handler(int irq, void *arg)
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
- if (dsp_int && gma_power_is_on(dev)) {
+ if (dsp_int) {
gma_vdc_interrupt(dev, vdc_stat);
handled = 1;
}
@@ -264,13 +264,12 @@ void gma_irq_preinstall(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
- if (gma_power_is_on(dev)) {
- PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
- PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
- PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
- PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
- }
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+
if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
if (dev->vblank[1].enabled)
@@ -316,17 +315,24 @@ void gma_irq_postinstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-int gma_irq_install(struct drm_device *dev, unsigned int irq)
+int gma_irq_install(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (irq == IRQ_NOTCONNECTED)
+ if (dev_priv->use_msi && pci_enable_msi(pdev)) {
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = false;
+ }
+
+ if (pdev->irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
gma_irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
if (ret)
return ret;
@@ -369,6 +375,8 @@ void gma_irq_uninstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
free_irq(pdev->irq, dev);
+ if (dev_priv->use_msi)
+ pci_disable_msi(pdev);
}
int gma_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index b51e395194ff..7648f69824a5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -17,7 +17,7 @@ struct drm_device;
void gma_irq_preinstall(struct drm_device *dev);
void gma_irq_postinstall(struct drm_device *dev);
-int gma_irq_install(struct drm_device *dev, unsigned int irq);
+int gma_irq_install(struct drm_device *dev);
void gma_irq_uninstall(struct drm_device *dev);
int gma_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 4873f9799f41..7c6dc2bcd14a 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -59,6 +59,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
unsigned int bits_per_pixel = 8 / block_width;
unsigned int x, y, width, height;
u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
+ struct iosys_map dst_map, vmap;
size_t len;
void *buf;
@@ -74,7 +75,9 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
if (!buf)
return 0;
- drm_fb_xrgb8888_to_gray8(buf, 0, src, fb, rect);
+ iosys_map_set_vaddr(&dst_map, buf);
+ iosys_map_set_vaddr(&vmap, src);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
pix8 = buf;
for (y = 0; y < height; y++) {
@@ -105,7 +108,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
unsigned int bits_per_pixel = 8 / block_width;
u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
unsigned int x, y, width;
- u32 *pix32;
+ __le32 *sbuf32;
+ u32 pix32;
size_t len;
/* Start on a byte boundary */
@@ -114,8 +118,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
for (y = rect->y1; y < rect->y2; y++) {
- pix32 = src + (y * fb->pitches[0]);
- pix32 += rect->x1;
+ sbuf32 = src + (y * fb->pitches[0]);
+ sbuf32 += rect->x1;
for (x = 0; x < width; x++) {
unsigned int pixpos = x % block_width; /* within byte from the left */
@@ -126,9 +130,10 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
*block = 0;
}
- r = *pix32 >> 16;
- g = *pix32 >> 8;
- b = *pix32++;
+ pix32 = le32_to_cpu(*sbuf32++);
+ r = pix32 >> 16;
+ g = pix32 >> 8;
+ b = pix32;
switch (format->format) {
case GUD_DRM_FORMAT_XRGB1111:
@@ -154,6 +159,7 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
u8 compression = gdrm->compression;
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
+ struct iosys_map dst;
void *vaddr, *buf;
size_t pitch, len;
int ret = 0;
@@ -177,6 +183,7 @@ retry:
buf = gdrm->compress_buf;
else
buf = gdrm->bulk_buf;
+ iosys_map_set_vaddr(&dst, buf);
/*
* Imported buffers are assumed to be write-combined and thus uncached
@@ -190,23 +197,24 @@ retry:
goto end_cpu_access;
}
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(buf, 0, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(buf, 0, vaddr, fb, rect, gud_is_big_endian());
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+ gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(buf, 0, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(buf, 0, vaddr, fb, rect, !import_attach);
+ drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
} else if (compression && !import_attach && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
- drm_fb_memcpy(buf, 0, vaddr, fb, rect);
+ drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
}
memset(req, 0, sizeof(*req));
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 073adfe438dd..4e41c144a290 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index b770f7662830..c5265675bf0c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -3,7 +3,7 @@ config DRM_HISI_KIRIN
tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
depends on DRM && OF && ARM64
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 61c29c2834e6..871f79a6b17e 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -24,11 +24,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -549,13 +548,13 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
- struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
- u32 addr = (u32)obj->paddr + y * stride;
+ u32 addr = (u32) obj->dma_addr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
- ch + 1, y, in_h, stride, (u32)obj->paddr);
+ ch + 1, y, in_h, stride, (u32) obj->dma_addr);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
addr, fb->width, fb->height, fmt,
&fb->format->format);
@@ -920,12 +919,12 @@ static const struct drm_mode_config_funcs ade_mode_config_funcs = {
};
-DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ade_fops);
static const struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
.date = "20150718",
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 2af51df6dca7..73ee7f25f734 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -19,9 +19,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 6d11e7938c83..f84d39762a72 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -23,9 +23,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
}
ret = hyperv_setup_vram(hv, hdev);
-
if (ret)
goto err_vmbus_close;
@@ -150,18 +146,20 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
ret = hyperv_mode_config_init(hv);
if (ret)
- goto err_vmbus_close;
+ goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
- goto err_vmbus_close;
+ goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
+err_free_mmio:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index b8e64dd8d3a6..28e732f94bf2 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -21,19 +21,18 @@
#include "hyperv_drm.h"
static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
- const struct iosys_map *map,
+ const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct hyperv_drm_device *hv = to_hv(fb->dev);
- void __iomem *dst = hv->vram;
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
int idx;
if (!drm_dev_enter(&hv->dev, &idx))
return -ENODEV;
- dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect);
- drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect);
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, rect));
+ drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect);
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b91e48d2190d..578b738859b9 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -417,11 +417,9 @@ fail:
return -ENODEV;
}
-static int ch7006_remove(struct i2c_client *client)
+static void ch7006_remove(struct i2c_client *client)
{
ch7006_dbg(client, "\n");
-
- return 0;
}
static int ch7006_resume(struct device *dev)
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 741886b54419..1bc0b5de4499 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -370,12 +370,6 @@ sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int
-sil164_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static struct i2c_client *
sil164_detect_slave(struct i2c_client *client)
{
@@ -427,7 +421,6 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
- .remove = sil164_remove,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5b03fdd1eaa4..9ed54e7ccff2 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -478,14 +478,12 @@ static int tda9950_probe(struct i2c_client *client,
return 0;
}
-static int tda9950_remove(struct i2c_client *client)
+static void tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
-
- return 0;
}
static struct i2c_device_id tda9950_ids[] = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f8eb6f69be05..d444e7fffb54 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2076,11 +2076,10 @@ tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
}
-static int tda998x_remove(struct i2c_client *client)
+static void tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
tda998x_destroy(&client->dev);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7ae3b7d67fcf..3efce05d7b57 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,8 @@ config DRM_I915
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
+ select X86_PLATFORM_DEVICES if ACPI
+ select ACPI_WMI if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 522ef9b4aff3..a26edcdadc21 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -123,6 +123,7 @@ gt-y += \
gt/intel_ring.o \
gt/intel_ring_submission.o \
gt/intel_rps.o \
+ gt/intel_sa_media.o \
gt/intel_sseu.o \
gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
@@ -257,7 +258,8 @@ i915-y += \
display/intel_vga.o \
display/i9xx_plane.o \
display/skl_scaler.o \
- display/skl_universal_plane.o
+ display/skl_universal_plane.o \
+ display/skl_watermark.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 82ad8fe7440c..e3e3d27ffb53 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -1169,7 +1169,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -1223,7 +1223,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, DEISR) & bit;
}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 861dcd2eb890..a5be4af792cb 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -202,7 +202,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
* Should measure whether using a lower cdclk w/o IPS
*/
if (IS_BROADWELL(i915) &&
- crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100)
+ crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100)
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 592e5adfed8b..5afbe3e98ee8 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -126,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
- return dev_priv->fbc[INTEL_FBC_A];
+ return dev_priv->display.fbc[INTEL_FBC_A];
else
return NULL;
}
@@ -326,8 +325,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
return ret;
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
i9xx_plane_has_windowing(plane));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 5dcfa7feffa9..ed4d93942dbd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -33,6 +33,7 @@
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
@@ -641,13 +642,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -657,13 +658,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
@@ -693,7 +694,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -709,7 +710,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static void
@@ -1629,6 +1630,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@@ -2070,8 +2073,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index e78430001f07..9df78e7caa2b 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <acpi/video.h>
#include "i915_drv.h"
#include "intel_acpi.h"
@@ -331,3 +332,29 @@ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
*/
fwnode_handle_put(fwnode);
}
+
+void intel_acpi_video_register(struct drm_i915_private *i915)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ acpi_video_register();
+
+ /*
+ * If i915 is driving an internal panel without registering its native
+ * backlight handler try to register the acpi_video backlight.
+ * For panels not driven by i915 another GPU driver may still register
+ * a native backlight later and acpi_video_register_backlight() should
+ * only be called after any native backlights have been registered.
+ */
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_panel *panel = &to_intel_connector(connector)->panel;
+
+ if (panel->backlight.funcs && !panel->backlight.device) {
+ acpi_video_register_backlight();
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 4a760a2baed9..6a0007452f95 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -14,6 +14,7 @@ void intel_unregister_dsm_handler(void);
void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
void intel_acpi_device_id_update(struct drm_i915_private *i915);
void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
+void intel_acpi_video_register(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
@@ -23,6 +24,8 @@ static inline
void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
static inline
void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_video_register(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 40da7910f845..18f0a5ae3bac 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -32,7 +32,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -63,9 +62,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property)
+ if (property == dev_priv->display.properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->broadcast_rgb_property)
+ else if (property == dev_priv->display.properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
drm_dbg_atomic(&dev_priv->drm,
@@ -96,12 +95,12 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property) {
+ if (property == dev_priv->display.properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->broadcast_rgb_property) {
+ if (property == dev_priv->display.properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index efe8591619e3..aaa6708256d5 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "gt/intel_rps.h"
@@ -43,9 +42,9 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
-#include "intel_pm.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
+#include "skl_watermark.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 6c9ee905f132..aacbc6da84ef 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -393,7 +393,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
@@ -441,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -496,7 +496,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* Disable timestamps */
tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
@@ -514,7 +514,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
@@ -532,7 +532,7 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
vdsc_bpp = crtc_state->dsc.compressed_bpp;
- cdclk = i915->cdclk.hw.cdclk;
+ cdclk = i915->display.cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
@@ -639,7 +639,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
@@ -677,7 +677,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
@@ -814,7 +814,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
@@ -838,17 +838,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->audio.funcs)
- dev_priv->audio.funcs->audio_codec_enable(encoder,
- crtc_state,
- conn_state);
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->audio.encoder_map[pipe] = encoder;
- mutex_unlock(&dev_priv->audio.mutex);
+ dev_priv->display.audio.encoder_map[pipe] = encoder;
+ mutex_unlock(&dev_priv->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -878,7 +878,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_connector *connector = old_conn_state->connector;
enum port port = encoder->port;
@@ -891,15 +891,15 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name, pipe_name(pipe));
- if (dev_priv->audio.funcs)
- dev_priv->audio.funcs->audio_codec_disable(encoder,
- old_crtc_state,
- old_conn_state);
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
encoder->audio_connector = NULL;
- dev_priv->audio.encoder_map[pipe] = NULL;
- mutex_unlock(&dev_priv->audio.mutex);
+ dev_priv->display.audio.encoder_map[pipe] = NULL;
+ mutex_unlock(&dev_priv->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -935,13 +935,13 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
- dev_priv->audio.funcs = &g4x_audio_funcs;
+ dev_priv->display.funcs.audio = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->audio.funcs = &ilk_audio_funcs;
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->audio.funcs = &hsw_audio_funcs;
+ dev_priv->display.funcs.audio = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->audio.funcs = &ilk_audio_funcs;
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
}
}
@@ -971,7 +971,7 @@ void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
struct aud_ts_cdclk_m_n aud_ts;
if (DISPLAY_VER(i915) >= 13) {
- get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts);
+ get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
@@ -1046,13 +1046,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (dev_priv->audio.power_refcount++ == 0) {
+ if (dev_priv->display.audio.power_refcount++ == 0) {
if (DISPLAY_VER(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
- dev_priv->audio.freq_cntrl);
+ dev_priv->display.audio.freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio.freq_cntrl);
+ dev_priv->display.audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -1073,7 +1073,7 @@ static void i915_audio_component_put_power(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--dev_priv->audio.power_refcount == 0)
+ if (--dev_priv->display.audio.power_refcount == 0)
if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
@@ -1119,7 +1119,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
return -ENODEV;
- return dev_priv->cdclk.hw.cdclk;
+ return dev_priv->display.cdclk.hw.cdclk;
}
/*
@@ -1140,10 +1140,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
if (drm_WARN_ON(&dev_priv->drm,
- pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map)))
+ pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map)))
return NULL;
- encoder = dev_priv->audio.encoder_map[pipe];
+ encoder = dev_priv->display.audio.encoder_map[pipe];
/*
* when bootup, audio driver may not know it is
* MST or not. So it will poll all the port & pipe
@@ -1159,7 +1159,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
return NULL;
for_each_pipe(dev_priv, pipe) {
- encoder = dev_priv->audio.encoder_map[pipe];
+ encoder = dev_priv->display.audio.encoder_map[pipe];
if (encoder == NULL)
continue;
@@ -1177,7 +1177,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1187,7 +1187,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
return 0;
cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
@@ -1206,7 +1206,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -1220,13 +1220,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
const u8 *eld;
int ret = -EINVAL;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
port_name(port));
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
return ret;
}
@@ -1238,7 +1238,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
return ret;
}
@@ -1273,7 +1273,7 @@ static int i915_audio_component_bind(struct device *i915_kdev,
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- dev_priv->audio.component = acomp;
+ dev_priv->display.audio.component = acomp;
drm_modeset_unlock_all(&dev_priv->drm);
return 0;
@@ -1288,14 +1288,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- dev_priv->audio.component = NULL;
+ dev_priv->display.audio.component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
- if (dev_priv->audio.power_refcount)
+ if (dev_priv->display.audio.power_refcount)
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
- dev_priv->audio.power_refcount);
+ dev_priv->display.audio.power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1359,13 +1359,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- dev_priv->audio.freq_cntrl = aud_freq;
+ dev_priv->display.audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
intel_audio_cdclk_change_post(dev_priv);
- dev_priv->audio.component_registered = true;
+ dev_priv->display.audio.component_registered = true;
}
/**
@@ -1377,11 +1377,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
*/
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->audio.component_registered)
+ if (!dev_priv->display.audio.component_registered)
return;
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
- dev_priv->audio.component_registered = false;
+ dev_priv->display.audio.component_registered = false;
}
/**
@@ -1403,7 +1403,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_audio_deinit(struct drm_i915_private *dev_priv)
{
- if ((dev_priv)->audio.lpe.platdev != NULL)
+ if (dev_priv->display.audio.lpe.platdev != NULL)
intel_lpe_audio_teardown(dev_priv);
else
i915_audio_component_cleanup(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 110fc98ec280..beba39a38c87 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -8,7 +8,10 @@
#include <linux/pwm.h>
#include <linux/string_helpers.h>
+#include <acpi/video.h>
+
#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -16,6 +19,8 @@
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#include "intel_pci_config.h"
+#include "intel_pps.h"
+#include "intel_quirks.h"
/**
* scale - scale values from one range to another
@@ -86,7 +91,7 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
return val;
if (dev_priv->params.invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -126,7 +131,7 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -303,7 +308,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
@@ -319,7 +324,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -463,14 +468,14 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
return;
}
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -813,11 +818,11 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -827,12 +832,12 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
@@ -860,7 +865,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
@@ -870,7 +875,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
@@ -950,6 +955,11 @@ int intel_backlight_device_register(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(&i915->drm, "Skipping intel_backlight registration\n");
+ return 0;
+ }
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
@@ -971,26 +981,24 @@ int intel_backlight_device_register(struct intel_connector *connector)
if (!name)
return -ENOMEM;
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ bd = backlight_device_get_by_name(name);
+ if (bd) {
+ put_device(&bd->dev);
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
}
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
drm_err(&i915->drm,
@@ -1113,7 +1121,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_PINEVIEW(dev_priv))
clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1131,7 +1139,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_G4X(dev_priv))
clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1591,11 +1599,11 @@ void intel_backlight_update(struct intel_atomic_state *state,
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
@@ -1605,7 +1613,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) {
drm_dbg_kms(&dev_priv->drm,
"no backlight present per VBT, but present per quirk\n");
} else {
@@ -1620,9 +1628,9 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
@@ -1773,9 +1781,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ connector->panel.backlight.power = intel_pps_backlight_power;
+ }
/* We're using a standard PWM backlight interface */
panel->backlight.funcs = &pwm_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
new file mode 100644
index 000000000000..50c1210f6d5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_REGS_H__
+#define __INTEL_BACKLIGHT_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+ _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+ _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+ _VLV_BLC_HIST_CTL_B)
+
+/* Backlight control */
+#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
+#define BLM_TRANSCODER_B BLM_PIPE_B
+#define BLM_TRANSCODER_C BLM_PIPE_C
+#define BLM_TRANSCODER_EDP (3 << 29)
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLM_HISTOGRAM_ENABLE (1 << 31)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
+#define BLC_PWM_CPU_CTL _MMIO(0x48254)
+
+#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
+
+/* BXT backlight register definition. */
+#define _BXT_BLC_PWM_CTL1 0xC8250
+#define BXT_BLC_PWM_ENABLE (1 << 31)
+#define BXT_BLC_PWM_POLARITY (1 << 29)
+#define _BXT_BLC_PWM_FREQ1 0xC8254
+#define _BXT_BLC_PWM_DUTY1 0xC8258
+
+#define _BXT_BLC_PWM_CTL2 0xC8350
+#define _BXT_BLC_PWM_FREQ2 0xC8354
+#define _BXT_BLC_PWM_DUTY2 0xC8358
+
+#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
+#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
+#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
+
+/* Utility pin */
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
+
+#endif /* __INTEL_BACKLIGHT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 51dde5bfd956..28bdb936cd1f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -135,18 +135,6 @@ static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id)
return block - bdb;
}
-/* size of the block excluding the header */
-static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id)
-{
- const void *block;
-
- block = find_raw_section(bdb, section_id);
- if (!block)
- return 0;
-
- return get_blocksize(block);
-}
-
struct bdb_block_entry {
struct list_head node;
enum bdb_block_id section_id;
@@ -159,7 +147,7 @@ find_section(struct drm_i915_private *i915,
{
struct bdb_block_entry *entry;
- list_for_each_entry(entry, &i915->vbt.bdb_blocks, node) {
+ list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) {
if (entry->section_id == section_id)
return entry->data + 3;
}
@@ -231,9 +219,14 @@ static bool validate_lfp_data_ptrs(const void *bdb,
{
int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size;
int data_block_size, lfp_data_size;
+ const void *data_block;
int i;
- data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA);
+ data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!data_block)
+ return false;
+
+ data_block_size = get_blocksize(data_block);
if (data_block_size == 0)
return false;
@@ -261,21 +254,6 @@ static bool validate_lfp_data_ptrs(const void *bdb,
if (16 * lfp_data_size > data_block_size)
return false;
- /*
- * Except for vlv/chv machines all real VBTs seem to have 6
- * unaccounted bytes in the fp_timing table. And it doesn't
- * appear to be a really intentional hole as the fp_timing
- * 0xffff terminator is always within those 6 missing bytes.
- */
- if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size &&
- fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
- return false;
-
- if (ptrs->ptr[0].fp_timing.offset + fp_timing_size > ptrs->ptr[0].dvo_timing.offset ||
- ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
- ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
- return false;
-
/* make sure the table entries have uniform size */
for (i = 1; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size ||
@@ -289,6 +267,23 @@ static bool validate_lfp_data_ptrs(const void *bdb,
return false;
}
+ /*
+ * Except for vlv/chv machines all real VBTs seem to have 6
+ * unaccounted bytes in the fp_timing table. And it doesn't
+ * appear to be a really intentional hole as the fp_timing
+ * 0xffff terminator is always within those 6 missing bytes.
+ */
+ if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size)
+ fp_timing_size += 6;
+
+ if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset ||
+ ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
+ ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
+ return false;
+
/* make sure the tables fit inside the data block */
for (i = 0; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size ||
@@ -300,6 +295,15 @@ static bool validate_lfp_data_ptrs(const void *bdb,
if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size)
return false;
+ /* make sure fp_timing terminators are present at expected locations */
+ for (i = 0; i < 16; i++) {
+ const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset +
+ fp_timing_size - 2;
+
+ if (*t != 0xffff)
+ return false;
+ }
+
return true;
}
@@ -333,18 +337,6 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
return validate_lfp_data_ptrs(bdb, ptrs);
}
-static const void *find_fp_timing_terminator(const u8 *data, int size)
-{
- int i;
-
- for (i = 0; i < size - 1; i++) {
- if (data[i] == 0xff && data[i+1] == 0xff)
- return &data[i];
- }
-
- return NULL;
-}
-
static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
int table_size, int total_size)
{
@@ -368,11 +360,22 @@ static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
const void *bdb)
{
- int i, size, table_size, block_size, offset;
- const void *t0, *t1, *block;
+ int i, size, table_size, block_size, offset, fp_timing_size;
struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const void *block;
void *ptrs_block;
+ /*
+ * The hardcoded fp_timing_size is only valid for
+ * modernish VBTs. All older VBTs definitely should
+ * include block 41 and thus we don't need to
+ * generate one.
+ */
+ if (i915->display.vbt.version < 155)
+ return NULL;
+
+ fp_timing_size = 38;
+
block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
if (!block)
return NULL;
@@ -381,17 +384,8 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
block_size = get_blocksize(block);
- size = block_size;
- t0 = find_fp_timing_terminator(block, size);
- if (!t0)
- return NULL;
-
- size -= t0 - block - 2;
- t1 = find_fp_timing_terminator(t0 + 2, size);
- if (!t1)
- return NULL;
-
- size = t1 - t0;
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
if (size * 16 > block_size)
return NULL;
@@ -409,7 +403,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
table_size = sizeof(struct lvds_dvo_timing);
size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
- table_size = t0 - block + 2;
+ table_size = fp_timing_size;
size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
if (ptrs->ptr[0].fp_timing.table_size)
@@ -424,14 +418,14 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
return NULL;
}
- size = t1 - t0;
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
for (i = 1; i < 16; i++) {
next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
}
- size = t1 - t0;
table_size = sizeof(struct lvds_lfp_panel_name);
if (16 * (size + table_size) <= block_size) {
@@ -479,6 +473,13 @@ init_bdb_block(struct drm_i915_private *i915,
block_size = get_blocksize(block);
+ /*
+ * Version number and new block size are considered
+ * part of the header for MIPI sequenece block v3+.
+ */
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+ block_size += 5;
+
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
GFP_KERNEL);
if (!entry) {
@@ -501,7 +502,7 @@ init_bdb_block(struct drm_i915_private *i915,
return;
}
- list_add_tail(&entry->node, &i915->vbt.bdb_blocks);
+ list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks);
}
static void init_bdb_blocks(struct drm_i915_private *i915,
@@ -604,6 +605,19 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
+static void dump_pnp_id(struct drm_i915_private *i915,
+ const struct lvds_pnp_id *pnp_id,
+ const char *name)
+{
+ u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
+ char vend[4];
+
+ drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
+ name, drm_edid_decode_mfg_id(mfg_name, vend),
+ pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
+ pnp_id->mfg_week, pnp_id->mfg_year + 1990);
+}
+
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
@@ -655,6 +669,8 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
edid_id_nodate.mfg_week = 0;
edid_id_nodate.mfg_year = 0;
+ dump_pnp_id(i915, edid_id, "EDID");
+
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
@@ -861,6 +877,7 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
@@ -874,11 +891,18 @@ parse_lfp_data(struct drm_i915_private *i915,
if (!panel->vbt.lfp_lvds_vbt_mode)
parse_lfp_panel_dtd(i915, panel, data, ptrs);
+ pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
+ dump_pnp_id(i915, pnp_id, "Panel");
+
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
- if (i915->vbt.version >= 188) {
+ drm_dbg_kms(&i915->drm, "Panel name: %.*s\n",
+ (int)sizeof(tail->panel_name[0].name),
+ tail->panel_name[panel_type].name);
+
+ if (i915->display.vbt.version >= 188) {
panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
drm_dbg_kms(&i915->drm,
@@ -904,7 +928,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
* first on VBT >= 229, but still fall back to trying the old LFP
* block if that fails.
*/
- if (i915->vbt.version < 229)
+ if (i915->display.vbt.version < 229)
return;
generic_dtd = find_section(i915, BDB_GENERIC_DTD);
@@ -1008,12 +1032,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
- if (i915->vbt.version >= 191) {
+ if (i915->display.vbt.version >= 191) {
size_t exp_size;
- if (i915->vbt.version >= 236)
+ if (i915->display.vbt.version >= 236)
exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (i915->vbt.version >= 234)
+ else if (i915->display.vbt.version >= 234)
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
else
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
@@ -1030,14 +1054,14 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
- if (i915->vbt.version >= 234) {
+ if (i915->display.vbt.version >= 234) {
u16 min_level;
bool scale;
level = backlight_data->brightness_level[panel_type].level;
min_level = backlight_data->brightness_min_level[panel_type].level;
- if (i915->vbt.version >= 236)
+ if (i915->display.vbt.version >= 236)
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
else
scale = level > 255;
@@ -1134,37 +1158,37 @@ parse_general_features(struct drm_i915_private *i915)
if (!general)
return;
- i915->vbt.int_tv_support = general->int_tv_support;
+ i915->display.vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
- if (i915->vbt.version >= 155 &&
+ if (i915->display.vbt.version >= 155 &&
(HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
- i915->vbt.int_crt_support = general->int_crt_support;
- i915->vbt.lvds_use_ssc = general->enable_ssc;
- i915->vbt.lvds_ssc_freq =
+ i915->display.vbt.int_crt_support = general->int_crt_support;
+ i915->display.vbt.lvds_use_ssc = general->enable_ssc;
+ i915->display.vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(i915, general->ssc_freq);
- i915->vbt.display_clock_mode = general->display_clock_mode;
- i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
- if (i915->vbt.version >= 181) {
- i915->vbt.orientation = general->rotate_180 ?
+ i915->display.vbt.display_clock_mode = general->display_clock_mode;
+ i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (i915->display.vbt.version >= 181) {
+ i915->display.vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
- i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- if (i915->vbt.version >= 249 && general->afc_startup_config) {
- i915->vbt.override_afc_startup = true;
- i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ if (i915->display.vbt.version >= 249 && general->afc_startup_config) {
+ i915->display.vbt.override_afc_startup = true;
+ i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
}
drm_dbg_kms(&i915->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- i915->vbt.int_tv_support,
- i915->vbt.int_crt_support,
- i915->vbt.lvds_use_ssc,
- i915->vbt.lvds_ssc_freq,
- i915->vbt.display_clock_mode,
- i915->vbt.fdi_rx_polarity_inverted);
+ i915->display.vbt.int_tv_support,
+ i915->display.vbt.int_crt_support,
+ i915->display.vbt.lvds_use_ssc,
+ i915->display.vbt.lvds_ssc_freq,
+ i915->display.vbt.display_clock_mode,
+ i915->display.vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -1190,7 +1214,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
return;
}
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
@@ -1214,7 +1238,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1];
+ mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
mapping->slave_addr = child->slave_addr;
@@ -1265,7 +1289,7 @@ parse_driver_features(struct drm_i915_private *i915)
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
- i915->vbt.int_lvds_support = 0;
+ i915->display.vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
@@ -1278,10 +1302,10 @@ parse_driver_features(struct drm_i915_private *i915)
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
- if (i915->vbt.version >= 134 &&
+ if (i915->display.vbt.version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
- i915->vbt.int_lvds_support = 0;
+ i915->display.vbt.int_lvds_support = 0;
}
}
@@ -1295,7 +1319,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
if (!driver)
return;
- if (i915->vbt.version < 228) {
+ if (i915->display.vbt.version < 228) {
drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
@@ -1328,7 +1352,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.vrr = true; /* matches Windows behaviour */
- if (i915->vbt.version < 228)
+ if (i915->display.vbt.version < 228)
return;
power = find_section(i915, BDB_LFP_POWER);
@@ -1354,10 +1378,10 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.drrs_type = DRRS_TYPE_NONE;
}
- if (i915->vbt.version >= 232)
+ if (i915->display.vbt.version >= 232)
panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
- if (i915->vbt.version >= 233)
+ if (i915->display.vbt.version >= 233)
panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
panel_type);
}
@@ -1393,7 +1417,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.pps = *edp_pps;
- if (i915->vbt.version >= 224) {
+ if (i915->display.vbt.version >= 224) {
panel->vbt.edp.rate =
edp->edp_fast_link_training_rate[panel_type] * 20;
} else {
@@ -1472,7 +1496,7 @@ parse_edp(struct drm_i915_private *i915,
break;
}
- if (i915->vbt.version >= 173) {
+ if (i915->display.vbt.version >= 173) {
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
@@ -1488,7 +1512,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.drrs_msa_timing_delay =
panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
- if (i915->vbt.version >= 244)
+ if (i915->display.vbt.version >= 244)
panel->vbt.edp.max_link_rate =
edp->edp_max_port_link_rate[panel_type] * 20;
}
@@ -1520,7 +1544,7 @@ parse_psr(struct drm_i915_private *i915,
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
- if (i915->vbt.version >= 205 &&
+ if (i915->display.vbt.version >= 205 &&
(DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
@@ -1566,7 +1590,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
- if (i915->vbt.version >= 226) {
+ if (i915->display.vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = panel_bits(wakeup_time, panel_type, 2);
@@ -1596,7 +1620,9 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
struct intel_panel *panel,
enum port port)
{
- if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+
+ if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
panel->vbt.dsi.cabc_ports = BIT(port);
@@ -1609,11 +1635,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
break;
}
@@ -1625,12 +1651,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.cabc_ports =
- BIT(PORT_A) | BIT(PORT_C);
+ BIT(PORT_A) | BIT(port_bc);
break;
}
}
@@ -2051,7 +2077,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
u16 block_size;
int index;
- if (i915->vbt.version < 198)
+ if (i915->display.vbt.version < 198)
return;
params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
@@ -2071,7 +2097,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!child->compression_enable)
@@ -2205,7 +2231,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->vbt.ports[port];
+ devdata = i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2254,7 +2280,7 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- child = &i915->vbt.ports[p]->child;
+ child = &i915->display.vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
@@ -2271,7 +2297,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->vbt.ports[port];
+ devdata = i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2306,7 +2332,7 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- child = &i915->vbt.ports[p]->child;
+ child = &i915->display.vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
child->aux_channel = 0;
@@ -2418,7 +2444,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
[PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
};
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
ARRAY_SIZE(xelpd_port_mapping[0]),
xelpd_port_mapping,
@@ -2480,15 +2506,23 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 216)
+ if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
- if (devdata->i915->vbt.version >= 230)
+ if (devdata->i915->display.vbt.version >= 230)
return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
else
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
+static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 244)
+ return 0;
+
+ return devdata->child.dp_max_lane_count + 1;
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2544,7 +2578,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 158)
+ if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
return devdata->child.hdmi_level_shifter_value;
@@ -2552,7 +2586,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 204)
+ if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
switch (devdata->child.hdmi_max_data_rate) {
@@ -2661,7 +2695,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
return;
}
- if (i915->vbt.ports[port]) {
+ if (i915->display.vbt.ports[port]) {
drm_dbg_kms(&i915->drm,
"More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -2676,7 +2710,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
if (intel_bios_encoder_supports_dp(devdata))
sanitize_aux_ch(devdata, port);
- i915->vbt.ports[port] = devdata;
+ i915->display.vbt.ports[port] = devdata;
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -2692,12 +2726,12 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
if (!has_ddi_port_info(i915))
return;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node)
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
parse_ddi_port(devdata);
for_each_port(port) {
- if (i915->vbt.ports[port])
- print_ddi_port(i915->vbt.ports[port], port);
+ if (i915->display.vbt.ports[port])
+ print_ddi_port(i915->display.vbt.ports[port], port);
}
}
@@ -2730,33 +2764,33 @@ parse_general_definitions(struct drm_i915_private *i915)
bus_pin = defs->crt_ddc_gmbus_pin;
drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(i915, bus_pin))
- i915->vbt.crt_ddc_pin = bus_pin;
+ i915->display.vbt.crt_ddc_pin = bus_pin;
- if (i915->vbt.version < 106) {
+ if (i915->display.vbt.version < 106) {
expected_size = 22;
- } else if (i915->vbt.version < 111) {
+ } else if (i915->display.vbt.version < 111) {
expected_size = 27;
- } else if (i915->vbt.version < 195) {
+ } else if (i915->display.vbt.version < 195) {
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (i915->vbt.version == 195) {
+ } else if (i915->display.vbt.version == 195) {
expected_size = 37;
- } else if (i915->vbt.version <= 215) {
+ } else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 237) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
drm_dbg(&i915->drm,
"Expected child device config size for VBT version %u not known; assuming %u\n",
- i915->vbt.version, expected_size);
+ i915->display.vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
drm_err(&i915->drm,
"Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, i915->vbt.version);
+ defs->child_dev_size, expected_size, i915->display.vbt.version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
@@ -2792,10 +2826,10 @@ parse_general_definitions(struct drm_i915_private *i915)
memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- list_add_tail(&devdata->node, &i915->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
}
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
drm_dbg_kms(&i915->drm,
"no child dev is parsed from VBT\n");
}
@@ -2804,25 +2838,25 @@ parse_general_definitions(struct drm_i915_private *i915)
static void
init_vbt_defaults(struct drm_i915_private *i915)
{
- i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+ i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
- i915->vbt.int_tv_support = 1;
- i915->vbt.int_crt_support = 1;
+ i915->display.vbt.int_tv_support = 1;
+ i915->display.vbt.int_crt_support = 1;
/* driver features */
- i915->vbt.int_lvds_support = 1;
+ i915->display.vbt.int_lvds_support = 1;
/* Default to using SSC */
- i915->vbt.lvds_use_ssc = 1;
+ i915->display.vbt.lvds_use_ssc = 1;
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
- i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
- !HAS_PCH_SPLIT(i915));
+ i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
+ !HAS_PCH_SPLIT(i915));
drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
- i915->vbt.lvds_ssc_freq);
+ i915->display.vbt.lvds_ssc_freq);
}
/* Common defaults which may be overridden by VBT. */
@@ -2883,7 +2917,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (port == PORT_A)
child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
- list_add_tail(&devdata->node, &i915->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
drm_dbg_kms(&i915->drm,
"Generating default VBT child device with type 0x04%x on port %c\n",
@@ -2891,7 +2925,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
}
/* Bypass some minimum baseline VBT version checks */
- i915->vbt.version = 155;
+ i915->display.vbt.version = 155;
}
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
@@ -3078,12 +3112,12 @@ err_unmap_oprom:
*/
void intel_bios_init(struct drm_i915_private *i915)
{
- const struct vbt_header *vbt = i915->opregion.vbt;
+ const struct vbt_header *vbt = i915->display.opregion.vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- INIT_LIST_HEAD(&i915->vbt.display_devices);
- INIT_LIST_HEAD(&i915->vbt.bdb_blocks);
+ INIT_LIST_HEAD(&i915->display.vbt.display_devices);
+ INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks);
if (!HAS_DISPLAY(i915)) {
drm_dbg_kms(&i915->drm,
@@ -3111,11 +3145,11 @@ void intel_bios_init(struct drm_i915_private *i915)
goto out;
bdb = get_bdb_header(vbt);
- i915->vbt.version = bdb->version;
+ i915->display.vbt.version = bdb->version;
drm_dbg_kms(&i915->drm,
"VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, i915->vbt.version);
+ (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version);
init_bdb_blocks(i915, bdb);
@@ -3172,13 +3206,13 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
struct intel_bios_encoder_data *devdata, *nd;
struct bdb_block_entry *entry, *ne;
- list_for_each_entry_safe(devdata, nd, &i915->vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
- list_for_each_entry_safe(entry, ne, &i915->vbt.bdb_blocks, node) {
+ list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) {
list_del(&entry->node);
kfree(entry);
}
@@ -3212,13 +3246,13 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (!i915->vbt.int_tv_support)
+ if (!i915->display.vbt.int_tv_support)
return false;
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
/*
@@ -3255,10 +3289,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
/* If the device type is not LFP, continue.
@@ -3285,7 +3319,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (i915->opregion.vbt)
+ if (i915->display.opregion.vbt)
return true;
}
@@ -3304,7 +3338,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->vbt.ports[port];
+ return i915->display.vbt.ports[port];
}
/**
@@ -3364,7 +3398,7 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
const struct child_device_config *child;
u8 dvo_port;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -3463,7 +3497,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -3494,7 +3528,7 @@ bool
intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
if (drm_WARN_ON_ONCE(&i915->drm,
!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
@@ -3514,7 +3548,7 @@ bool
intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
}
@@ -3530,7 +3564,7 @@ bool
intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
return devdata && devdata->child.lane_reversal;
}
@@ -3538,7 +3572,7 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
if (!devdata || !devdata->child.aux_channel) {
@@ -3576,7 +3610,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_D_XELPD;
else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC3;
@@ -3586,7 +3620,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_E_XELPD;
else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC4;
@@ -3594,25 +3628,25 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC1;
else
aux_ch = AUX_CH_F;
break;
case DP_AUX_G:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC2;
else
aux_ch = AUX_CH_G;
break;
case DP_AUX_H:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC3;
else
aux_ch = AUX_CH_H;
break;
case DP_AUX_I:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC4;
else
aux_ch = AUX_CH_I;
@@ -3632,7 +3666,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_max_tmds_clock(devdata);
}
@@ -3641,14 +3675,14 @@ int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_hdmi_level_shift(devdata);
}
int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.dp_iboost_level);
@@ -3656,7 +3690,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.hdmi_iboost_level);
@@ -3665,15 +3699,23 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_dp_max_link_rate(devdata);
}
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_dp_max_lane_count(devdata);
+}
+
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
if (!devdata || !devdata->child.ddc_pin)
return 0;
@@ -3683,16 +3725,16 @@ int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->vbt.version >= 195 && devdata->child.dp_usb_type_c;
+ return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c;
}
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->vbt.version >= 209 && devdata->child.tbt;
+ return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->vbt.ports[port];
+ return i915->display.vbt.ports[port];
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e47582b0de0a..e375405a7828 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -258,6 +258,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 79269d2c476b..4ace026b29bd 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,15 +5,17 @@
#include <drm/drm_atomic_state_helper.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "skl_watermark.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -137,6 +139,42 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return 0;
}
+static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp, int point)
+{
+ u32 val, val2;
+ u16 dclk;
+
+ val = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
+ val2 = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
+ dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
+ sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
+ sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
+
+ sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
+ sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int
+intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return mtl_read_qgv_point_info(dev_priv, sp, point);
+ else if (IS_DG1(dev_priv))
+ return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ else
+ return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+}
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi,
bool is_y_tile)
@@ -147,7 +185,32 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = 4;
+ qi->max_numchannels = 2;
+ qi->channel_width = 64;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_LPDDR4:
+ case INTEL_DRAM_LPDDR5:
+ qi->t_bl = 16;
+ qi->max_numchannels = 8;
+ qi->channel_width = 16;
+ qi->deinterleave = 4;
+ break;
+ default:
+ MISSING_CASE(dram_info->type);
+ return -EINVAL;
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -181,7 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
@@ -193,11 +256,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- if (IS_DG1(dev_priv))
- ret = dg1_mchbar_read_qgv_point_info(dev_priv, sp, i);
- else
- ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
-
+ ret = intel_read_qgv_point_info(dev_priv, sp, i);
if (ret)
return ret;
@@ -284,6 +343,13 @@ static const struct intel_sa_info adlp_sa_info = {
.derating = 20,
};
+static const struct intel_sa_info mtl_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -292,7 +358,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
@@ -308,7 +374,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
int clpchgroup;
int j;
@@ -346,9 +412,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
return 0;
}
@@ -363,7 +429,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
@@ -399,20 +465,22 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
- if (i < num_groups - 1)
- bi_next = &dev_priv->max_bw[i + 1];
-
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
- if (i < num_groups - 1 && clpchgroup < clperchgroup)
- bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
- else
- bi_next->num_planes = 0;
+ if (i < num_groups - 1) {
+ bi_next = &dev_priv->display.bw.max[i + 1];
+
+ if (clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) /
+ clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
+ }
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
@@ -456,9 +524,9 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
return 0;
}
@@ -466,7 +534,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
static void dg2_get_bw_info(struct drm_i915_private *i915)
{
unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->max_bw);
+ int num_groups = ARRAY_SIZE(i915->display.bw.max);
int i;
/*
@@ -477,7 +545,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->max_bw[i];
+ struct intel_bw_info *bi = &i915->display.bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -485,7 +553,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
}
static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
@@ -498,9 +566,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->max_bw[i];
+ &dev_priv->display.bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -526,9 +594,9 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->max_bw[i];
+ &dev_priv->display.bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -541,14 +609,14 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
return bi->deratedbw[qgv_point];
}
- return dev_priv->max_bw[0].deratedbw[qgv_point];
+ return dev_priv->display.bw.max[0].deratedbw[qgv_point];
}
static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->max_bw[0];
+ &dev_priv->display.bw.max[0];
return bi->psf_bw[psf_gv_point];
}
@@ -558,7 +626,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_DG2(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 14)
+ tgl_get_bw_info(dev_priv, &mtl_sa_info);
+ else if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
else if (IS_ALDERLAKE_P(dev_priv))
tgl_get_bw_info(dev_priv, &adlp_sa_info);
@@ -667,7 +737,7 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -678,7 +748,7 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -689,7 +759,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
@@ -896,8 +966,8 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
{
- unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points;
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -970,8 +1040,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
int i, ret;
u16 qgv_points = 0, psf_points = 0;
unsigned int max_bw_point = 0, max_bw = 0;
- unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
- unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+ unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
bool changed = false;
/* FIXME earlier gens need some checks too */
@@ -1126,7 +1196,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
&state->base, &intel_bw_funcs);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 6e80162632dd..ed05070b7307 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -79,26 +79,26 @@ struct intel_cdclk_funcs {
void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
+ dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config);
}
static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe);
+ dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe);
}
static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_config)
{
- return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config);
+ return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config);
}
static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
int cdclk)
{
- return dev_priv->cdclk_funcs->calc_voltage_level(cdclk);
+ return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk);
}
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
@@ -548,7 +548,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
else
default_credits = PFI_CREDIT(8);
- if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
credits = PFI_CREDIT_63;
@@ -1026,7 +1026,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(&dev_priv->drm, "DPLL0 not locked\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
/* We'll want to keep using the current vco from now on. */
skl_set_preferred_cdclk_vco(dev_priv, vco);
@@ -1040,7 +1040,7 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
@@ -1049,7 +1049,7 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
switch (cdclk) {
default:
drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
+ cdclk != dev_priv->display.cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
fallthrough;
case 308571:
@@ -1098,13 +1098,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco) {
+ if (dev_priv->display.cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
@@ -1116,7 +1116,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
intel_de_posting_read(dev_priv, CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
/* Wa Display #1183: skl,kbl,cfl */
@@ -1151,11 +1151,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
goto sanitize;
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1166,7 +1166,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
*/
cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+ skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk);
if (cdctl == expected)
/* All well; nothing to sanitize */
return;
@@ -1175,9 +1175,9 @@ sanitize:
drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = -1;
}
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1186,19 +1186,19 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
skl_sanitize_cdclk(dev_priv);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0) {
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0) {
/*
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
if (dev_priv->skl_preferred_vco_freq == 0)
skl_set_preferred_cdclk_vco(dev_priv,
- dev_priv->cdclk.hw.vco);
+ dev_priv->display.cdclk.hw.vco);
return;
}
- cdclk_config = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
@@ -1211,7 +1211,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
@@ -1352,35 +1352,35 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = {
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
drm_WARN(&dev_priv->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ min_cdclk, dev_priv->display.cdclk.hw.ref);
return 0;
}
static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk == cdclk)
- return dev_priv->cdclk.hw.ref * table[i].ratio;
+ return dev_priv->display.cdclk.hw.ref * table[i].ratio;
drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ cdclk, dev_priv->display.cdclk.hw.ref);
return 0;
}
@@ -1554,12 +1554,12 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
@@ -1571,7 +1571,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
@@ -1583,12 +1583,12 @@ static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
u32 val;
val = ICL_CDCLK_PLL_RATIO(ratio);
@@ -1601,12 +1601,12 @@ static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
u32 val;
/* Write PLL ratio without disabling */
@@ -1625,7 +1625,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
val &= ~BXT_DE_PLL_FREQ_REQ;
intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1655,7 +1655,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
+ cdclk != dev_priv->display.cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
fallthrough;
case 2:
@@ -1672,19 +1672,19 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk == cdclk)
return table[i].waveform;
drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ cdclk, dev_priv->display.cdclk.hw.ref);
return 0xffff;
}
@@ -1721,22 +1721,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
- if (dev_priv->cdclk.hw.vco != vco)
+ if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
+ if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
icl_cdclk_pll_disable(dev_priv);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
icl_cdclk_pll_enable(dev_priv, vco);
} else {
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
bxt_de_pll_disable(dev_priv);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
bxt_de_pll_enable(dev_priv, vco);
}
@@ -1803,7 +1803,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
+ dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1812,10 +1812,10 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
int cdclk, clock, vco;
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1833,32 +1833,32 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
/* Make sure this is a legal cdclk value for the platform */
- cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
- if (cdclk != dev_priv->cdclk.hw.cdclk)
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
+ if (cdclk != dev_priv->display.cdclk.hw.cdclk)
goto sanitize;
/* Make sure the VCO is correct for the cdclk */
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- if (vco != dev_priv->cdclk.hw.vco)
+ if (vco != dev_priv->display.cdclk.hw.vco)
goto sanitize;
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
if (has_cdclk_squasher(dev_priv))
- clock = dev_priv->cdclk.hw.vco / 2;
+ clock = dev_priv->display.cdclk.hw.vco / 2;
else
- clock = dev_priv->cdclk.hw.cdclk;
+ clock = dev_priv->display.cdclk.hw.cdclk;
expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
- dev_priv->cdclk.hw.vco);
+ dev_priv->display.cdclk.hw.vco);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->cdclk.hw.cdclk >= 500000)
+ dev_priv->display.cdclk.hw.cdclk >= 500000)
expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (cdctl == expected)
@@ -1869,10 +1869,10 @@ sanitize:
drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = -1;
}
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1881,11 +1881,11 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
bxt_sanitize_cdclk(dev_priv);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0)
return;
- cdclk_config = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->display.cdclk.hw;
/*
* FIXME:
@@ -1902,7 +1902,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
@@ -1916,7 +1916,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
* intel_cdclk_init_hw - Initialize CDCLK hardware
* @i915: i915 device
*
- * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and
* sanitizing the state of the hardware if needed. This is generally done only
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
@@ -2077,10 +2077,10 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
+ if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
return;
intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
@@ -2098,12 +2098,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
* functions use cdclk. Not all platforms/ports do,
* but we'll lock them all for simplicity.
*/
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&dev_priv->display.gmbus.mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
- &dev_priv->gmbus_mutex);
+ &dev_priv->display.gmbus.mutex);
}
intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
@@ -2113,7 +2113,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
mutex_unlock(&intel_dp->aux.hw_mutex);
}
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&dev_priv->display.gmbus.mutex);
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2124,9 +2124,9 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_audio_cdclk_change_post(dev_priv);
if (drm_WARN(&dev_priv->drm,
- intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]");
intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]");
}
}
@@ -2300,7 +2300,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
/*
- * HACK. Currently for TGL platforms we calculate
+ * HACK. Currently for TGL/DG2 platforms we calculate
* min_cdclk initially based on pixel_rate divided
* by 2, accounting for also plane requirements,
* however in some cases the lowest possible CDCLK
@@ -2308,14 +2308,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* Explicitly stating here that this seems to be currently
* rather a Hack, than final solution.
*/
- if (IS_TIGERLAKE(dev_priv)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
/*
* Clamp to max_cdclk_freq in case pixel rate is higher,
* in order not to break an 8K, but still leave W/A at place.
*/
min_cdclk = max_t(int, min_cdclk,
min_t(int, crtc_state->pixel_rate,
- dev_priv->max_cdclk_freq));
+ dev_priv->display.cdclk.max_cdclk_freq));
}
return min_cdclk;
@@ -2368,10 +2368,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
for_each_pipe(dev_priv, pipe)
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
- if (min_cdclk > dev_priv->max_cdclk_freq) {
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
drm_dbg_kms(&dev_priv->drm,
"required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
+ min_cdclk, dev_priv->display.cdclk.max_cdclk_freq);
return -EINVAL;
}
@@ -2643,7 +2643,7 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *cdclk_state;
- cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj);
if (IS_ERR(cdclk_state))
return ERR_CAST(cdclk_state);
@@ -2693,7 +2693,7 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv)
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
@@ -2799,7 +2799,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
- int max_cdclk_freq = dev_priv->max_cdclk_freq;
+ int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq;
if (DISPLAY_VER(dev_priv) >= 10)
return 2 * max_cdclk_freq;
@@ -2825,19 +2825,19 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
if (IS_JSL_EHL(dev_priv)) {
- if (dev_priv->cdclk.hw.ref == 24000)
- dev_priv->max_cdclk_freq = 552000;
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 552000;
else
- dev_priv->max_cdclk_freq = 556800;
+ dev_priv->display.cdclk.max_cdclk_freq = 556800;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->cdclk.hw.ref == 24000)
- dev_priv->max_cdclk_freq = 648000;
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 648000;
else
- dev_priv->max_cdclk_freq = 652800;
+ dev_priv->display.cdclk.max_cdclk_freq = 652800;
} else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->max_cdclk_freq = 316800;
+ dev_priv->display.cdclk.max_cdclk_freq = 316800;
} else if (IS_BROXTON(dev_priv)) {
- dev_priv->max_cdclk_freq = 624000;
+ dev_priv->display.cdclk.max_cdclk_freq = 624000;
} else if (DISPLAY_VER(dev_priv) == 9) {
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -2859,7 +2859,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
else
max_cdclk = 308571;
- dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
@@ -2868,26 +2868,26 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* available? PCI ID, VTB, something else?
*/
if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
- dev_priv->max_cdclk_freq = 450000;
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev_priv))
- dev_priv->max_cdclk_freq = 450000;
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
else if (IS_BDW_ULT(dev_priv))
- dev_priv->max_cdclk_freq = 540000;
+ dev_priv->display.cdclk.max_cdclk_freq = 540000;
else
- dev_priv->max_cdclk_freq = 675000;
+ dev_priv->display.cdclk.max_cdclk_freq = 675000;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 320000;
+ dev_priv->display.cdclk.max_cdclk_freq = 320000;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 400000;
+ dev_priv->display.cdclk.max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
- dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
+ dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
}
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
+ dev_priv->display.cdclk.max_cdclk_freq);
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
dev_priv->max_dotclk_freq);
@@ -2901,7 +2901,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+ intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -2911,7 +2911,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_de_write(dev_priv, GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000));
}
static int dg1_rawclk(struct drm_i915_private *dev_priv)
@@ -3036,6 +3036,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
freq = dg1_rawclk(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ /*
+ * MTL always uses a 38.4 MHz rawclk. The bspec tells us
+ * "RAWCLK_FREQ defaults to the values for 38.4 and does
+ * not need to be programmed."
+ */
+ freq = 38400;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -3187,78 +3194,78 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = dg2_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- dev_priv->cdclk.table = adlp_a_step_cdclk_table;
+ dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
else
- dev_priv->cdclk.table = adlp_cdclk_table;
+ dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = rkl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (IS_JSL_EHL(dev_priv)) {
- dev_priv->cdclk_funcs = &ehl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->cdclk_funcs = &icl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &icl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- dev_priv->cdclk_funcs = &bxt_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs;
if (IS_GEMINILAKE(dev_priv))
- dev_priv->cdclk.table = glk_cdclk_table;
+ dev_priv->display.cdclk.table = glk_cdclk_table;
else
- dev_priv->cdclk.table = bxt_cdclk_table;
+ dev_priv->display.cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->cdclk_funcs = &skl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &skl_cdclk_funcs;
} else if (IS_BROADWELL(dev_priv)) {
- dev_priv->cdclk_funcs = &bdw_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->cdclk_funcs = &hsw_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &chv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &chv_cdclk_funcs;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &vlv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_IRONLAKE(dev_priv)) {
- dev_priv->cdclk_funcs = &ilk_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs;
} else if (IS_GM45(dev_priv)) {
- dev_priv->cdclk_funcs = &gm45_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs;
} else if (IS_G45(dev_priv)) {
- dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I965GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i965gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs;
} else if (IS_I965G(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &pnv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs;
} else if (IS_G33(dev_priv)) {
- dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I945GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i945gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs;
} else if (IS_I945G(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_I915GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i915gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs;
} else if (IS_I915G(dev_priv)) {
- dev_priv->cdclk_funcs = &i915g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs;
} else if (IS_I865G(dev_priv)) {
- dev_priv->cdclk_funcs = &i865g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs;
} else if (IS_I85X(dev_priv)) {
- dev_priv->cdclk_funcs = &i85x_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs;
} else if (IS_I845G(dev_priv)) {
- dev_priv->cdclk_funcs = &i845g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs;
} else if (IS_I830(dev_priv)) {
- dev_priv->cdclk_funcs = &i830_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
}
- if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs,
+ if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk,
"Unknown platform. Assuming i830\n"))
- dev_priv->cdclk_funcs = &i830_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index b535cf6a7d9e..c674879a84a5 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -77,9 +77,9 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
#define intel_atomic_get_old_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
int intel_cdclk_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 9583d17e858d..6bda4274eae9 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -26,6 +26,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
+#include "intel_dsb.h"
#include "vlv_dsi_pll.h"
struct intel_color_funcs {
@@ -1167,22 +1168,22 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->load_luts(crtc_state);
+ dev_priv->display.funcs.color->load_luts(crtc_state);
}
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->color_funcs->color_commit_noarm)
- dev_priv->color_funcs->color_commit_noarm(crtc_state);
+ if (dev_priv->display.funcs.color->color_commit_noarm)
+ dev_priv->display.funcs.color->color_commit_noarm(crtc_state);
}
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->color_commit_arm(crtc_state);
+ dev_priv->display.funcs.color->color_commit_arm(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -1238,15 +1239,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- return dev_priv->color_funcs->color_check(crtc_state);
+ return dev_priv->display.funcs.color->color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->color_funcs->read_luts)
- dev_priv->color_funcs->read_luts(crtc_state);
+ if (dev_priv->display.funcs.color->read_luts)
+ dev_priv->display.funcs.color->read_luts(crtc_state);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -2225,28 +2226,28 @@ void intel_color_init(struct intel_crtc *crtc)
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->color_funcs = &chv_color_funcs;
+ dev_priv->display.funcs.color = &chv_color_funcs;
} else if (DISPLAY_VER(dev_priv) >= 4) {
- dev_priv->color_funcs = &i965_color_funcs;
+ dev_priv->display.funcs.color = &i965_color_funcs;
} else {
- dev_priv->color_funcs = &i9xx_color_funcs;
+ dev_priv->display.funcs.color = &i9xx_color_funcs;
}
} else {
if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->color_funcs = &icl_color_funcs;
+ dev_priv->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(dev_priv) == 10)
- dev_priv->color_funcs = &glk_color_funcs;
+ dev_priv->display.funcs.color = &glk_color_funcs;
else if (DISPLAY_VER(dev_priv) == 9)
- dev_priv->color_funcs = &skl_color_funcs;
+ dev_priv->display.funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(dev_priv) == 8)
- dev_priv->color_funcs = &bdw_color_funcs;
+ dev_priv->display.funcs.color = &bdw_color_funcs;
else if (DISPLAY_VER(dev_priv) == 7) {
if (IS_HASWELL(dev_priv))
- dev_priv->color_funcs = &hsw_color_funcs;
+ dev_priv->display.funcs.color = &hsw_color_funcs;
else
- dev_priv->color_funcs = &ivb_color_funcs;
+ dev_priv->display.funcs.color = &ivb_color_funcs;
} else
- dev_priv->color_funcs = &ilk_color_funcs;
+ dev_priv->display.funcs.color = &ilk_color_funcs;
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1dcc268927a2..6d5cbeb8df4d 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -229,7 +229,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
- prop = dev_priv->force_audio_property;
+ prop = dev_priv->display.properties.force_audio;
if (prop == NULL) {
prop = drm_property_create_enum(dev, 0,
"audio",
@@ -238,7 +238,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
if (prop == NULL)
return;
- dev_priv->force_audio_property = prop;
+ dev_priv->display.properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -256,7 +256,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
- prop = dev_priv->broadcast_rgb_property;
+ prop = dev_priv->display.properties.broadcast_rgb;
if (prop == NULL) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
@@ -265,7 +265,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
if (prop == NULL)
return;
- dev_priv->broadcast_rgb_property = prop;
+ dev_priv->display.properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 6a3893c8ff22..4a8ff2f97608 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -46,6 +46,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -444,6 +445,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
/* FDI must always be 2.7 GHz */
pipe_config->port_clock = 135000 * 2;
+ adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+
return 0;
}
@@ -643,9 +646,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct i2c_adapter *i2c;
bool ret = false;
- BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
-
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
@@ -931,7 +932,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev_priv))
goto out;
@@ -1110,8 +1111,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = intel_de_read(dev_priv,
- FDI_RX_CTL(PIPE_A)) & fdi_config;
+ dev_priv->display.fdi.rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 4442aa355f86..6792a9056f46 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank_work.h>
#include "i915_irq.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 4ca6e9493ff2..e9212f69c360 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -134,8 +134,8 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
if (plane_state->uapi.visible)
drm_dbg_kms(&i915->drm,
"\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
@@ -262,10 +262,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (DISPLAY_VER(i915) >= 9)
drm_dbg_kms(&i915->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
if (HAS_GMCH(i915))
drm_dbg_kms(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index c2797ad2d313..87899e89b3a7 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
#include "intel_atomic.h"
@@ -20,9 +19,9 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_watermark.h"
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
@@ -144,8 +143,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
}
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2330604b0bcc..da8472cdc135 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -57,6 +57,7 @@
#include "intel_lspcon.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_quirks.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
@@ -323,28 +324,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
-{
- int dotclock;
-
- if (intel_crtc_has_dp_encoder(pipe_config))
- dotclock = intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
- dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
- else
- dotclock = pipe_config->port_clock;
-
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- !intel_crtc_has_dp_encoder(pipe_config))
- dotclock *= 2;
-
- if (pipe_config->pixel_multiplier)
- dotclock /= pipe_config->pixel_multiplier;
-
- return dotclock;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
/* CRT dotclock is determined via other means */
@@ -631,7 +610,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
- if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
drm_dbg_kms(&dev_priv->drm,
"Quirk Increase DDI disabled time\n");
@@ -1425,7 +1404,7 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
@@ -1435,17 +1414,17 @@ static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
*/
intel_de_rmw(i915, reg, clk_off, 0);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, reg, 0, clk_off);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
@@ -1720,12 +1699,12 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
intel_de_write(i915, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
@@ -1734,12 +1713,12 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
enum port port = encoder->port;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
@@ -1824,7 +1803,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
@@ -1832,7 +1811,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder,
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
@@ -1840,12 +1819,12 @@ static void skl_ddi_disable_clock(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum port port = encoder->port;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
@@ -2691,10 +2670,14 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
@@ -2862,6 +2845,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ u32 buf_ctl;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
@@ -2919,8 +2904,12 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* On ADL_P the PHY link rate and lane count must be programmed but
* these are both 0 for HDMI.
*/
- intel_de_write(dev_priv, DDI_BUF_CTL(port),
- dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+ buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -3611,10 +3600,22 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(crtc_state))
- return intel_dp_initial_fastset_check(encoder, crtc_state);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool fastset = true;
- return true;
+ if (intel_phy_is_tc(i915, phy)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ !intel_dp_initial_fastset_check(encoder, crtc_state))
+ fastset = false;
+
+ return fastset;
}
static enum intel_output_type
@@ -4028,7 +4029,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -4036,7 +4037,7 @@ static bool lpt_digital_port_connected(struct intel_encoder *encoder)
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, DEISR) & bit;
}
@@ -4044,7 +4045,7 @@ static bool hsw_digital_port_connected(struct intel_encoder *encoder)
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index fc5d94862ef3..dd008ba8afe3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -92,6 +91,7 @@
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
#include "intel_dpt.h"
+#include "intel_dsb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
@@ -118,6 +118,7 @@
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "skl_watermark.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
@@ -164,16 +165,16 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
*/
void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
- if (dev_priv->wm_disp->update_wm)
- dev_priv->wm_disp->update_wm(dev_priv);
+ if (dev_priv->display.funcs.wm->update_wm)
+ dev_priv->display.funcs.wm->update_wm(dev_priv);
}
static int intel_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->compute_pipe_wm)
- return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
+ if (dev_priv->display.funcs.wm->compute_pipe_wm)
+ return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
return 0;
}
@@ -181,20 +182,20 @@ static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->wm_disp->compute_intermediate_wm)
+ if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
return 0;
if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->wm_disp->compute_pipe_wm))
+ !dev_priv->display.funcs.wm->compute_pipe_wm))
return 0;
- return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
+ return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
}
static bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->initial_watermarks) {
- dev_priv->wm_disp->initial_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->initial_watermarks) {
+ dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
return true;
}
return false;
@@ -204,23 +205,23 @@ static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->atomic_update_watermarks)
- dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->atomic_update_watermarks)
+ dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
}
static void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->optimize_watermarks)
- dev_priv->wm_disp->optimize_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->optimize_watermarks)
+ dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
}
static int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->compute_global_watermarks)
- return dev_priv->wm_disp->compute_global_watermarks(state);
+ if (dev_priv->display.funcs.wm->compute_global_watermarks)
+ return dev_priv->display.funcs.wm->compute_global_watermarks(state);
return 0;
}
@@ -619,7 +620,10 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
+ FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
+ else if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
@@ -671,7 +675,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
return DISPLAY_VER(dev_priv) < 4 ||
(plane->fbc &&
- plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
+ plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
/*
@@ -1487,7 +1491,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
* Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
* TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/
- if (i915->dpll.mgr) {
+ if (i915->display.dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -1839,7 +1843,9 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+ i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
+ CHICKEN_TRANS(transcoder);
u32 val;
val = intel_de_read(dev_priv, reg);
@@ -2081,22 +2087,20 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
- else if (IS_DG2(dev_priv))
- /*
- * DG2 outputs labelled as "combo PHY" in the bspec use
- * SNPS PHYs with completely different programming,
- * hence we always return false here.
- */
- return false;
else if (IS_ALDERLAKE_S(dev_priv))
return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
else if (IS_JSL_EHL(dev_priv))
return phy <= PHY_C;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
return phy <= PHY_B;
else
+ /*
+ * DG2 outputs labelled as "combo PHY" in the bspec use
+ * SNPS PHYs with completely different programming,
+ * hence we always return false here.
+ */
return false;
}
@@ -2402,7 +2406,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (!dev_priv->wm_disp->initial_watermarks)
+ if (!dev_priv->display.funcs.wm->initial_watermarks)
intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
@@ -2661,7 +2665,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
if (DISPLAY_VER(i915) < 4) {
- clock_limit = i915->max_cdclk_freq * 9 / 10;
+ clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2693,6 +2697,10 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_crtc_compute_pipe_src(crtc_state);
if (ret)
return ret;
@@ -2719,19 +2727,11 @@ intel_reduce_m_n_ratio(u32 *num, u32 *den)
}
}
-static void compute_m_n(unsigned int m, unsigned int n,
- u32 *ret_m, u32 *ret_n,
- bool constant_n)
+static void compute_m_n(u32 *ret_m, u32 *ret_n,
+ u32 m, u32 n, u32 constant_n)
{
- /*
- * Several DP dongles in particular seem to be fussy about
- * too large link M/N values. Give N value as 0x8000 that
- * should be acceptable by specific devices. 0x8000 is the
- * specified fixed N value for asynchronous clock mode,
- * which the devices expect also in synchronous clock mode.
- */
if (constant_n)
- *ret_n = DP_LINK_CONSTANT_N_VALUE;
+ *ret_n = constant_n;
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -2743,22 +2743,28 @@ void
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n, bool fec_enable)
+ bool fec_enable)
{
u32 data_clock = bits_per_pixel * pixel_clock;
if (fec_enable)
data_clock = intel_dp_mode_to_fec_clock(data_clock);
+ /*
+ * Windows/BIOS uses fixed M/N values always. Follow suit.
+ *
+ * Also several DP dongles in particular seem to be fussy
+ * about too large link M/N values. Presumably the 20bit
+ * value used by Windows/BIOS is acceptable to everyone.
+ */
m_n->tu = 64;
- compute_m_n(data_clock,
- link_clock * nlanes * 8,
- &m_n->data_m, &m_n->data_n,
- constant_n);
+ compute_m_n(&m_n->data_m, &m_n->data_n,
+ data_clock, link_clock * nlanes * 8,
+ 0x8000000);
- compute_m_n(pixel_clock, link_clock,
- &m_n->link_m, &m_n->link_n,
- constant_n);
+ compute_m_n(&m_n->link_m, &m_n->link_n,
+ pixel_clock, link_clock,
+ 0x80000);
}
static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
@@ -2774,12 +2780,12 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
drm_dbg_kms(&dev_priv->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
str_enabled_disabled(bios_lvds_use_ssc),
- str_enabled_disabled(dev_priv->vbt.lvds_use_ssc));
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
+ dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
}
@@ -4127,7 +4133,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
+ MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
+ CHICKEN_TRANS(pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -4146,7 +4154,7 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display->get_pipe_config(crtc, crtc_state))
+ if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -4375,7 +4383,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->vbt.lvds_ssc_freq;
+ return dev_priv->display.vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
else if (DISPLAY_VER(dev_priv) != 2)
@@ -4493,7 +4501,31 @@ int intel_dotclock_calculate(int link_freq,
if (!m_n->link_n)
return 0;
- return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
+ m_n->link_n);
+}
+
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
+{
+ int dotclock;
+
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
+ pipe_config->pipe_bpp);
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ !intel_crtc_has_dp_encoder(pipe_config))
+ dotclock *= 2;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ return dotclock;
}
/* Returns the currently programmed mode of the given encoder. */
@@ -4754,7 +4786,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->ipc_enabled)
+ skl_watermark_ipc_enabled(dev_priv))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -4800,10 +4832,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed) {
- ret = intel_dpll_crtc_compute_clock(state, crtc);
- if (ret)
- return ret;
-
ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
if (ret)
return ret;
@@ -5368,46 +5396,14 @@ bool intel_fuzzy_clock_check(int clock1, int clock2)
}
static bool
-intel_compare_m_n(unsigned int m, unsigned int n,
- unsigned int m2, unsigned int n2,
- bool exact)
-{
- if (m == m2 && n == n2)
- return true;
-
- if (exact || !m || !n || !m2 || !n2)
- return false;
-
- BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
-
- if (n > n2) {
- while (n > n2) {
- m2 <<= 1;
- n2 <<= 1;
- }
- } else if (n < n2) {
- while (n < n2) {
- m <<= 1;
- n <<= 1;
- }
- }
-
- if (n != n2)
- return false;
-
- return intel_fuzzy_clock_check(m, m2);
-}
-
-static bool
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
- const struct intel_link_m_n *m2_n2,
- bool exact)
+ const struct intel_link_m_n *m2_n2)
{
return m_n->tu == m2_n2->tu &&
- intel_compare_m_n(m_n->data_m, m_n->data_n,
- m2_n2->data_m, m2_n2->data_n, exact) &&
- intel_compare_m_n(m_n->link_m, m_n->link_n,
- m2_n2->link_m, m2_n2->link_n, exact);
+ m_n->data_m == m2_n2->data_m &&
+ m_n->data_n == m2_n2->data_n &&
+ m_n->link_m == m2_n2->link_m &&
+ m_n->link_n == m2_n2->link_n;
}
static bool
@@ -5601,8 +5597,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name,\
- !fastset)) { \
+ &pipe_config->name)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"found tu %i, data %i/%i link %i/%i)", \
@@ -5649,9 +5644,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
*/
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name, !fastset) && \
+ &pipe_config->name) && \
!intel_compare_link_m_n(&current_config->alt_name, \
- &pipe_config->name, !fastset)) { \
+ &pipe_config->name)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"or tu %i data %i/%i link %i/%i, " \
@@ -5686,16 +5681,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
- if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "(expected %i, found %i)", \
- current_config->name, \
- pipe_config->name); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
@@ -5751,8 +5736,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) {
- PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (!fastset || !pipe_config->seamless_m_n)
+ PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
} else {
PIPE_CONF_CHECK_M_N(dp_m_n);
PIPE_CONF_CHECK_M_N(dp_m2_n2);
@@ -5814,7 +5800,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_RECT(pch_pfit.dst);
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
- PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+ PIPE_CONF_CHECK_I(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
if (IS_CHERRYVIEW(dev_priv))
@@ -5841,7 +5827,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->dpll.mgr) {
+ if (dev_priv->display.dpll.mgr) {
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
@@ -5884,9 +5870,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ if (!fastset || !pipe_config->seamless_m_n) {
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+ }
+ PIPE_CONF_CHECK_I(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
@@ -5928,7 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
-#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_CHECK_TIMINGS
#undef PIPE_CONF_CHECK_RECT
@@ -6050,20 +6037,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
}
}
-static void intel_modeset_clear_plls(struct intel_atomic_state *state)
-{
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state))
- continue;
-
- intel_release_shared_dplls(state, crtc);
- }
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -6164,23 +6137,6 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->update_pipe = true;
}
-static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- /*
- * If we're not doing the full modeset we want to
- * keep the current M/N values as they may be
- * sufficiently different to the computed values
- * to cause problems.
- *
- * FIXME: should really copy more fuzzy state here
- */
- new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
- new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
- new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
- new_crtc_state->has_drrs = old_crtc_state->has_drrs;
-}
-
static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
u8 plane_ids_mask)
@@ -6837,9 +6793,11 @@ static int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- ret = intel_modeset_pipe_config_late(state, crtc);
- if (ret)
- goto fail;
+ if (new_crtc_state->hw.enable) {
+ ret = intel_modeset_pipe_config_late(state, crtc);
+ if (ret)
+ goto fail;
+ }
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -6890,15 +6848,12 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (intel_crtc_needs_modeset(new_crtc_state)) {
- any_ms = true;
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- }
- if (!new_crtc_state->update_pipe)
- continue;
+ any_ms = true;
- intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
+ intel_release_shared_dplls(state, crtc);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -6939,8 +6894,6 @@ static int intel_atomic_check(struct drm_device *dev,
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
-
- intel_modeset_clear_plls(state);
}
ret = intel_atomic_check_crtcs(state);
@@ -7059,6 +7012,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
+
+ if (new_crtc_state->seamless_m_n)
+ intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+ &new_crtc_state->dp_m_n);
}
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@ -7121,7 +7078,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display->crtc_enable(state, crtc);
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
return;
@@ -7200,7 +7157,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display->crtc_disable(state, crtc);
+ dev_priv->display.funcs.display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
@@ -7411,7 +7368,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
struct intel_atomic_state *state, *next;
struct llist_node *freed;
- freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
@@ -7419,7 +7376,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+ container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
intel_atomic_helper_free_state(dev_priv);
}
@@ -7532,6 +7489,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_atomic_commit_fence_wait(state);
drm_atomic_helper_wait_for_dependencies(&state->base);
+ drm_dp_mst_atomic_wait_for_dependencies(&state->base);
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
@@ -7588,7 +7546,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display->commit_modeset_enables(state);
+ dev_priv->display.funcs.display->commit_modeset_enables(state);
intel_encoders_update_complete(state);
@@ -7711,7 +7669,7 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
case FENCE_FREE:
{
struct intel_atomic_helper *helper =
- &to_i915(state->base.dev)->atomic_helper;
+ &to_i915(state->base.dev)->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
schedule_work(&helper->free_work);
@@ -7814,12 +7772,12 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) {
- queue_work(dev_priv->modeset_wq, &state->base.commit_work);
+ queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) {
- queue_work(dev_priv->flip_wq, &state->base.commit_work);
+ queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
} else {
if (state->modeset)
- flush_workqueue(dev_priv->modeset_wq);
+ flush_workqueue(dev_priv->display.wq.modeset);
intel_atomic_commit_tail(state);
}
@@ -7925,7 +7883,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
- if (!dev_priv->vbt.int_crt_support)
+ if (!dev_priv->display.vbt.int_crt_support)
return false;
return true;
@@ -8060,7 +8018,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
intel_crt_init(dev_priv);
/*
@@ -8319,7 +8277,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_free = intel_atomic_state_free,
};
-static const struct drm_i915_display_funcs skl_display_funcs = {
+static const struct intel_display_funcs skl_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
.crtc_disable = hsw_crtc_disable,
@@ -8327,7 +8285,7 @@ static const struct drm_i915_display_funcs skl_display_funcs = {
.get_initial_plane_config = skl_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs ddi_display_funcs = {
+static const struct intel_display_funcs ddi_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
.crtc_disable = hsw_crtc_disable,
@@ -8335,7 +8293,7 @@ static const struct drm_i915_display_funcs ddi_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs pch_split_display_funcs = {
+static const struct intel_display_funcs pch_split_display_funcs = {
.get_pipe_config = ilk_get_pipe_config,
.crtc_enable = ilk_crtc_enable,
.crtc_disable = ilk_crtc_disable,
@@ -8343,7 +8301,7 @@ static const struct drm_i915_display_funcs pch_split_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs vlv_display_funcs = {
+static const struct intel_display_funcs vlv_display_funcs = {
.get_pipe_config = i9xx_get_pipe_config,
.crtc_enable = valleyview_crtc_enable,
.crtc_disable = i9xx_crtc_disable,
@@ -8351,7 +8309,7 @@ static const struct drm_i915_display_funcs vlv_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs i9xx_display_funcs = {
+static const struct intel_display_funcs i9xx_display_funcs = {
.get_pipe_config = i9xx_get_pipe_config,
.crtc_enable = i9xx_crtc_enable,
.crtc_disable = i9xx_crtc_disable,
@@ -8374,16 +8332,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_dpll_init_clock_hook(dev_priv);
if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display = &skl_display_funcs;
+ dev_priv->display.funcs.display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display = &ddi_display_funcs;
+ dev_priv->display.funcs.display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display = &pch_split_display_funcs;
+ dev_priv->display.funcs.display = &pch_split_display_funcs;
} else if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display = &vlv_display_funcs;
+ dev_priv->display.funcs.display = &vlv_display_funcs;
} else {
- dev_priv->display = &i9xx_display_funcs;
+ dev_priv->display.funcs.display = &i9xx_display_funcs;
}
intel_fdi_init_hook(dev_priv);
@@ -8396,11 +8354,11 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
+ cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
intel_update_cdclk(i915);
- intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+ intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -8456,7 +8414,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->wm_disp->optimize_watermarks)
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
@@ -8600,6 +8558,10 @@ out:
return ret;
}
+static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
static void intel_mode_config_init(struct drm_i915_private *i915)
{
struct drm_mode_config *mode_config = &i915->drm.mode_config;
@@ -8614,6 +8576,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->prefer_shadow = 1;
mode_config->funcs = &intel_mode_funcs;
+ mode_config->helper_private = &intel_mode_config_funcs;
mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
@@ -8683,11 +8646,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
intel_dmc_ucode_init(i915);
- i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
- i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
- WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
-
- i915->window2_delay = 0; /* No DSB so no window2 delay */
+ i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
intel_mode_config_init(i915);
@@ -8703,8 +8664,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- init_llist_head(&i915->atomic_helper.free_list);
- INIT_WORK(&i915->atomic_helper.free_work,
+ init_llist_head(&i915->display.atomic_helper.free_list);
+ INIT_WORK(&i915->display.atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
intel_init_quirks(i915);
@@ -8764,7 +8725,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_hdcp_component_init(i915);
- if (i915->max_cdclk_freq == 0)
+ if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
/*
@@ -8828,7 +8789,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_hpd_init(i915);
intel_hpd_poll_disable(i915);
- intel_init_ipc(i915);
+ skl_watermark_ipc_init(i915);
return 0;
}
@@ -8959,7 +8920,7 @@ void intel_display_resume(struct drm_device *dev)
if (!ret)
ret = __intel_display_resume(i915, state, &ctx);
- intel_enable_ipc(i915);
+ skl_watermark_ipc_update(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -8994,11 +8955,18 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- flush_workqueue(i915->flip_wq);
- flush_workqueue(i915->modeset_wq);
+ flush_workqueue(i915->display.wq.flip);
+ flush_workqueue(i915->display.wq.modeset);
+
+ flush_work(&i915->display.atomic_helper.free_work);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
- flush_work(&i915->atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
}
/* part #2: call after irq uninstall */
@@ -9013,13 +8981,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
- /*
- * MST topology needs to be suspended so we don't have any calls to
- * fbdev after it's finalized. MST will be destroyed later as part of
- * drm_mode_config_cleanup()
- */
- intel_dp_mst_suspend(i915);
-
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(i915);
@@ -9036,8 +8997,8 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
intel_gmbus_teardown(i915);
- destroy_workqueue(i915->flip_wq);
- destroy_workqueue(i915->modeset_wq);
+ destroy_workqueue(i915->display.wq.flip);
+ destroy_workqueue(i915->display.wq.modeset);
intel_fbc_cleanup(i915);
}
@@ -9084,7 +9045,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
/* Must be done after probing outputs */
intel_opregion_register(i915);
- acpi_video_register();
+ intel_acpi_video_register(i915);
intel_audio_init(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index fa5371036239..884e8e67b17c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -45,7 +45,7 @@ struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_address_space;
-struct i915_ggtt_view;
+struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -375,7 +375,7 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p))
+ for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for_each_pipe(__dev_priv, __p) \
@@ -383,7 +383,7 @@ enum hpd_pin {
#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t))
+ for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
for_each_cpu_transcoder(__dev_priv, __t) \
@@ -547,7 +547,7 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n, bool fec_enable);
+ bool fec_enable);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
new file mode 100644
index 000000000000..96cf994b0ad1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_CORE_H__
+#define __INTEL_DISPLAY_CORE_H__
+
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_connector.h>
+
+#include "intel_cdclk.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_dmc.h"
+#include "intel_dpll_mgr.h"
+#include "intel_fbc.h"
+#include "intel_global_state.h"
+#include "intel_gmbus.h"
+#include "intel_opregion.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct drm_property;
+struct i915_audio_component;
+struct i915_hdcp_comp_master;
+struct intel_atomic_state;
+struct intel_audio_funcs;
+struct intel_bios_encoder_data;
+struct intel_cdclk_funcs;
+struct intel_cdclk_vals;
+struct intel_color_funcs;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dpll_funcs;
+struct intel_dpll_mgr;
+struct intel_fbdev;
+struct intel_fdi_funcs;
+struct intel_hotplug_funcs;
+struct intel_initial_plane_config;
+struct intel_overlay;
+
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
+/* Amount of PSF GV points, BSpec precisely defines this */
+#define I915_NUM_PSF_GV_POINTS 3
+
+struct intel_display_funcs {
+ /*
+ * Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state.
+ */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+};
+
+/* functions used for watermark calcs for display. */
+struct intel_wm_funcs {
+ /* update_wm is for legacy wm management */
+ void (*update_wm)(struct drm_i915_private *dev_priv);
+ int (*compute_pipe_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_intermediate_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
+};
+
+struct intel_audio {
+ /* hda/i915 audio component */
+ struct i915_audio_component *component;
+ bool component_registered;
+ /* mutex for audio/video sync */
+ struct mutex mutex;
+ int power_refcount;
+ u32 freq_cntrl;
+
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *encoder_map[I915_MAX_PIPES];
+
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe;
+};
+
+/*
+ * dpll and cdclk state is protected by connection_mutex dpll.lock serializes
+ * intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
+ * dpll, because on some platforms plls share registers.
+ */
+struct intel_dpll {
+ struct mutex lock;
+
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
+
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+};
+
+struct intel_frontbuffer_tracking {
+ spinlock_t lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
+struct intel_hotplug {
+ struct delayed_work hotplug_work;
+
+ const u32 *hpd, *pch_hpd;
+
+ struct {
+ unsigned long last_jiffies;
+ int count;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } state;
+ } stats[HPD_NUM_PINS];
+ u32 event_bits;
+ u32 retry_bits;
+ struct delayed_work reenable_work;
+
+ u32 long_port_mask;
+ u32 short_port_mask;
+ struct work_struct dig_port_work;
+
+ struct work_struct poll_init_work;
+ bool poll_enabled;
+
+ unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+};
+
+struct intel_vbt_data {
+ /* bdb version */
+ u16 version;
+
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ enum drm_panel_orientation orientation;
+
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
+ int crt_ddc_pin;
+
+ struct list_head display_devices;
+ struct list_head bdb_blocks;
+
+ struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
+ struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+ } sdvo_mappings[2];
+};
+
+struct intel_wm {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ u16 pri_latency[5];
+ /* sprite */
+ u16 spr_latency[5];
+ /* cursor */
+ u16 cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ u16 skl_latency[8];
+
+ /* current hardware state */
+ union {
+ struct ilk_wm_values hw;
+ struct vlv_wm_values vlv;
+ struct g4x_wm_values g4x;
+ };
+
+ u8 max_level;
+
+ /*
+ * Should be held around atomic WM register writing; also
+ * protects * intel_crtc->wm.active and
+ * crtc_state->wm.need_postvbl_update.
+ */
+ struct mutex wm_mutex;
+
+ bool ipc_enabled;
+};
+
+struct intel_display {
+ /* Display functions */
+ struct {
+ /* Top level crtc-ish functions */
+ const struct intel_display_funcs *display;
+
+ /* Display CDCLK functions */
+ const struct intel_cdclk_funcs *cdclk;
+
+ /* Display pll funcs */
+ const struct intel_dpll_funcs *dpll;
+
+ /* irq display functions */
+ const struct intel_hotplug_funcs *hotplug;
+
+ /* pm display functions */
+ const struct intel_wm_funcs *wm;
+
+ /* fdi display functions */
+ const struct intel_fdi_funcs *fdi;
+
+ /* Display internal color functions */
+ const struct intel_color_funcs *color;
+
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *audio;
+ } funcs;
+
+ /* Grouping using anonymous structs. Keep sorted. */
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
+ struct {
+ /* backlight registers and fields in struct intel_panel */
+ struct mutex lock;
+ } backlight;
+
+ struct {
+ struct intel_global_obj obj;
+
+ struct intel_bw_info {
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
+ /* for each PSF GV point */
+ unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+ u8 num_planes;
+ } max[6];
+ } bw;
+
+ struct {
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
+
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
+ struct intel_global_obj obj;
+
+ unsigned int max_cdclk_freq;
+ } cdclk;
+
+ struct {
+ /* The current hardware dbuf configuration */
+ u8 enabled_slices;
+
+ struct intel_global_obj obj;
+ } dbuf;
+
+ struct {
+ /* VLV/CHV/BXT/GLK DSI MMIO register base address */
+ u32 mmio_base;
+ } dsi;
+
+ struct {
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+ struct work_struct suspend_work;
+ } fbdev;
+
+ struct {
+ unsigned int pll_freq;
+ u32 rx_config;
+ } fdi;
+
+ struct {
+ /*
+ * Base address of where the gmbus and gpio blocks are located
+ * (either on PCH or on SoC for platforms without PCH).
+ */
+ u32 mmio_base;
+
+ /*
+ * gmbus.mutex protects against concurrent usage of the single
+ * hw gmbus controller on different i2c buses.
+ */
+ struct mutex mutex;
+
+ struct intel_gmbus *bus[GMBUS_NUM_PINS];
+
+ wait_queue_head_t wait_queue;
+ } gmbus;
+
+ struct {
+ struct i915_hdcp_comp_master *master;
+ bool comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex comp_mutex;
+ } hdcp;
+
+ struct {
+ struct i915_power_domains domains;
+
+ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
+ u32 chv_phy_control;
+
+ /* perform PHY state sanity checks? */
+ bool chv_phy_assert[2];
+ } power;
+
+ struct {
+ u32 mmio_base;
+
+ /* protects panel power sequencer state */
+ struct mutex mutex;
+ } pps;
+
+ struct {
+ struct drm_property *broadcast_rgb;
+ struct drm_property *force_audio;
+ } properties;
+
+ struct {
+ unsigned long mask;
+ } quirks;
+
+ struct {
+ enum {
+ I915_SAGV_UNKNOWN = 0,
+ I915_SAGV_DISABLED,
+ I915_SAGV_ENABLED,
+ I915_SAGV_NOT_CONTROLLED
+ } status;
+
+ u32 block_time_us;
+ } sagv;
+
+ struct {
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset;
+
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip;
+ } wq;
+
+ /* Grouping using named structs. Keep sorted. */
+ struct intel_audio audio;
+ struct intel_dmc dmc;
+ struct intel_dpll dpll;
+ struct intel_fbc *fbc[I915_MAX_FBCS];
+ struct intel_frontbuffer_tracking fb_tracking;
+ struct intel_hotplug hotplug;
+ struct intel_opregion opregion;
+ struct intel_overlay *overlay;
+ struct intel_vbt_data vbt;
+ struct intel_wm wm;
+};
+
+#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 6c3954479047..7c7253a2541c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -26,6 +26,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_watermark.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -37,10 +38,10 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->fb_tracking.busy_bits);
+ dev_priv->display.fb_tracking.busy_bits);
seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->fb_tracking.flip_bits);
+ dev_priv->display.fb_tracking.flip_bits);
return 0;
}
@@ -103,7 +104,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
@@ -113,7 +115,8 @@ static int i915_opregion(struct seq_file *m, void *unused)
static int i915_vbt(struct seq_file *m, void *unused)
{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->vbt)
seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -129,7 +132,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_framebuffer *drm_fb;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev);
+ fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev);
if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
@@ -722,10 +725,11 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
/* Not all platformas have a scaler */
if (num_scalers) {
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d",
num_scalers,
crtc_state->scaler_state.scaler_users,
- crtc_state->scaler_state.scaler_id);
+ crtc_state->scaler_state.scaler_id,
+ crtc_state->hw.scaling_filter);
for (i = 0; i < num_scalers; i++) {
const struct intel_scaler *sc =
@@ -932,11 +936,11 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
drm_modeset_lock_all(dev);
seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
- dev_priv->dpll.ref_clks.nssc,
- dev_priv->dpll.ref_clks.ssc);
+ dev_priv->display.dpll.ref_clks.nssc,
+ dev_priv->display.dpll.ref_clks.ssc);
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
@@ -979,58 +983,6 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(dev_priv->ipc_enabled));
- return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (!HAS_IPC(dev_priv))
- return -ENODEV;
-
- return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- intel_wakeref_t wakeref;
- bool enable;
- int ret;
-
- ret = kstrtobool_from_user(ubuf, len, &enable);
- if (ret < 0)
- return ret;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (!dev_priv->ipc_enabled && enable)
- drm_info(&dev_priv->drm,
- "Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- }
-
- return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
- .owner = THIS_MODULE,
- .open = i915_ipc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_ipc_status_write
-};
-
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1427,9 +1379,9 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.pri_latency;
+ latencies = dev_priv->display.wm.pri_latency;
wm_latency_show(m, latencies);
@@ -1442,9 +1394,9 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.spr_latency;
+ latencies = dev_priv->display.wm.spr_latency;
wm_latency_show(m, latencies);
@@ -1457,9 +1409,9 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.cur_latency;
+ latencies = dev_priv->display.wm.cur_latency;
wm_latency_show(m, latencies);
@@ -1550,9 +1502,9 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.pri_latency;
+ latencies = dev_priv->display.wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1565,9 +1517,9 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.spr_latency;
+ latencies = dev_priv->display.wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1580,9 +1532,9 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.cur_latency;
+ latencies = dev_priv->display.wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1617,14 +1569,14 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+ flush_work(&dev_priv->display.hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1639,7 +1591,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1678,7 +1630,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
return len;
}
@@ -1702,7 +1654,7 @@ static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled));
+ str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1720,7 +1672,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
char *newline;
char tmp[16];
int i;
@@ -1756,7 +1708,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
return len;
}
@@ -1907,7 +1859,6 @@ static const struct {
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
@@ -1931,6 +1882,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
+ skl_watermark_ipc_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -2137,7 +2089,7 @@ static const struct file_operations i915_dsc_fec_support_fops = {
.write = i915_dsc_fec_support_write
};
-static int i915_dsc_bpp_show(struct seq_file *m, void *data)
+static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct drm_device *dev = connector->dev;
@@ -2160,14 +2112,14 @@ static int i915_dsc_bpp_show(struct seq_file *m, void *data)
}
crtc_state = to_intel_crtc_state(crtc->state);
- seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
+ seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
-static ssize_t i915_dsc_bpp_write(struct file *file,
+static ssize_t i915_dsc_bpc_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
@@ -2175,33 +2127,32 @@ static ssize_t i915_dsc_bpp_write(struct file *file,
((struct seq_file *)file->private_data)->private;
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- int dsc_bpp = 0;
+ int dsc_bpc = 0;
int ret;
- ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
+ ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc);
if (ret < 0)
return ret;
- intel_dp->force_dsc_bpp = dsc_bpp;
+ intel_dp->force_dsc_bpc = dsc_bpc;
*offp += len;
return len;
}
-static int i915_dsc_bpp_open(struct inode *inode,
+static int i915_dsc_bpc_open(struct inode *inode,
struct file *file)
{
- return single_open(file, i915_dsc_bpp_show,
- inode->i_private);
+ return single_open(file, i915_dsc_bpc_show, inode->i_private);
}
-static const struct file_operations i915_dsc_bpp_fops = {
+static const struct file_operations i915_dsc_bpc_fops = {
.owner = THIS_MODULE,
- .open = i915_dsc_bpp_open,
+ .open = i915_dsc_bpc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .write = i915_dsc_bpp_write
+ .write = i915_dsc_bpc_write
};
/*
@@ -2271,8 +2222,8 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
- debugfs_create_file("i915_dsc_bpp", 0644, root,
- connector, &i915_dsc_bpp_fops);
+ debugfs_create_file("i915_dsc_bpc", 0644, root,
+ connector, &i915_dsc_bpc_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 589af257edeb..1e608b9e5055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_de.h"
@@ -18,8 +19,8 @@
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_snps_phy.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
@@ -243,7 +244,7 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
struct i915_power_domains *power_domains;
bool ret;
- power_domains = &dev_priv->power_domains;
+ power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
ret = __intel_display_power_is_enabled(dev_priv, domain);
@@ -268,7 +269,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
+ if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -291,7 +292,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
{
struct i915_power_well *power_well;
bool dc_off_enabled;
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
@@ -301,7 +302,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->dmc.target_dc_state)
+ if (state == dev_priv->display.dmc.target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->dmc.target_dc_state = state;
+ dev_priv->display.dmc.target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -339,7 +340,7 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
return !drm_WARN_ON(&i915->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
@@ -352,7 +353,7 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
@@ -375,7 +376,7 @@ static void print_power_domains(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
enum intel_display_power_domain domain;
drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
@@ -390,7 +391,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %u\n",
power_domains->async_put_wakeref);
@@ -445,7 +446,7 @@ static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -474,7 +475,7 @@ static void
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
if (intel_display_power_grab_async_put_ref(dev_priv, domain))
@@ -501,7 +502,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
@@ -527,7 +528,7 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
@@ -563,7 +564,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
- power_domains = &dev_priv->power_domains;
+ power_domains = &dev_priv->display.power.domains;
drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
@@ -583,7 +584,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
static void __intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
__intel_display_power_put_domain(dev_priv, domain);
@@ -596,7 +597,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
@@ -610,7 +611,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
{
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
- power_domains);
+ display.power.domains);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
@@ -637,8 +638,8 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- power_domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ display.power.domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = 0;
@@ -698,7 +699,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -746,7 +747,7 @@ out_verify:
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -779,7 +780,7 @@ out_verify:
static void
intel_display_power_flush_work_sync(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_display_power_flush_work(i915);
cancel_delayed_work_sync(&power_domains->async_put_work);
@@ -908,7 +909,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return 0;
if (IS_DG2(dev_priv))
- max_dc = 0;
+ max_dc = 1;
else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
@@ -976,15 +977,15 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
*/
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->dmc.allowed_dc_mask =
+ dev_priv->display.dmc.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->dmc.target_dc_state =
+ dev_priv->display.dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1003,12 +1004,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
- intel_display_power_map_cleanup(&dev_priv->power_domains);
+ intel_display_power_map_cleanup(&dev_priv->display.power.domains);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
@@ -1037,7 +1038,7 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
enum dbuf_slice slice;
@@ -1060,14 +1061,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for_each_dbuf_slice(dev_priv, slice)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
- dev_priv->dbuf.enabled_slices = req_slices;
+ dev_priv->display.dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- dev_priv->dbuf.enabled_slices =
+ dev_priv->display.dbuf.enabled_slices =
intel_enabled_dbuf_slices_mask(dev_priv);
/*
@@ -1075,7 +1076,7 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
* figure out later which slices we have and what we need.
*/
gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
- dev_priv->dbuf.enabled_slices);
+ dev_priv->display.dbuf.enabled_slices);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -1101,7 +1102,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
u32 mask, val, i;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
@@ -1309,7 +1310,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
}
/*
@@ -1381,6 +1382,9 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
+ if (DISPLAY_VER(dev_priv) >= 14)
+ reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
+
val = intel_de_read(dev_priv, reg);
if (enable)
@@ -1394,7 +1398,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1426,13 +1430,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
@@ -1459,7 +1464,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1493,13 +1498,14 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
@@ -1601,7 +1607,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
u32 val;
@@ -1668,13 +1674,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ intel_dmc_disable_program(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -1712,7 +1719,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* power well state and lane status to reconstruct the
* expected initial value.
*/
- dev_priv->chv_phy_control =
+ dev_priv->display.power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -1734,27 +1741,27 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
} else {
- dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
}
if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
@@ -1766,21 +1773,21 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
} else {
- dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
}
drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
/* Defer application of initial phy_control to enabling the powerwell */
}
@@ -1864,7 +1871,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
power_domains->initializing = true;
@@ -1905,8 +1912,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
/* Disable power support if the user asked so. */
if (!i915->params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
- i915->power_domains.disable_wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_INIT);
+ i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(i915);
@@ -1927,12 +1934,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.init_wakeref);
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->power_domains.disable_wakeref));
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -1954,7 +1961,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
*/
void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
@@ -1988,7 +1995,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.init_wakeref);
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -2003,7 +2010,7 @@ void intel_power_domains_enable(struct drm_i915_private *i915)
*/
void intel_power_domains_disable(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
@@ -2026,7 +2033,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
@@ -2039,7 +2046,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2053,7 +2060,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
*/
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->power_domains.disable_wakeref));
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -2080,7 +2087,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
*/
void intel_power_domains_resume(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
if (power_domains->display_core_suspended) {
intel_power_domains_init_hw(i915, true);
@@ -2098,7 +2105,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
for_each_power_well(i915, power_well) {
@@ -2126,7 +2133,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
*/
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
@@ -2232,10 +2239,10 @@ void intel_display_power_resume(struct drm_i915_private *i915)
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->dmc.allowed_dc_mask &
+ if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->dmc.allowed_dc_mask &
+ else if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
@@ -2243,7 +2250,7 @@ void intel_display_power_resume(struct drm_i915_private *i915)
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
@@ -2252,7 +2259,7 @@ void intel_display_power_resume(struct drm_i915_private *i915)
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
int i;
mutex_lock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 97b367f39f35..dc04afc6cc8f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1350,6 +1350,117 @@ static const struct i915_power_well_desc_list xelpd_power_wells[] = {
I915_PW_DESCRIPTORS(xelpd_power_wells_main),
};
+/*
+ * MTL is based on XELPD power domains with the exception of power gating for:
+ * - DDI_IO (moved to PLL logic)
+ * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on)
+ */
+#define XELPDP_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_TBT1);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_TBT2);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_TBT3);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpdp_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpdp_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
+ I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+};
+
static void init_power_well_domains(const struct i915_power_well_instance *inst,
struct i915_power_well *power_well)
{
@@ -1388,7 +1499,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
u64 power_well_ids = 0;
const struct i915_power_well_desc_list *desc_list;
const struct i915_power_well_desc *desc;
@@ -1447,7 +1558,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
@@ -1457,7 +1568,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 14)
+ return set_power_wells(power_domains, xelpdp_power_wells);
+ else if (DISPLAY_VER(i915) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
else if (IS_DG1(i915))
return set_power_wells(power_domains, dg1_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 91cfd5890f46..df7ee4969ef1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_crt.h"
@@ -16,10 +17,10 @@
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_tc.h"
#include "intel_vga.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
#include "vlv_sideband_reg.h"
@@ -84,7 +85,7 @@ lookup_power_well(struct drm_i915_private *i915,
drm_WARN(&i915->drm, 1,
"Power well %d not defined for this platform\n",
power_well_id);
- return &i915->power_domains.power_wells[0];
+ return &i915->display.power.domains.power_wells[0];
}
void intel_power_well_enable(struct drm_i915_private *i915,
@@ -710,8 +711,8 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->dmc.dc_state, val);
- dev_priv->dmc.dc_state = val;
+ dev_priv->display.dmc.dc_state, val);
+ dev_priv->display.dmc.dc_state = val;
}
/**
@@ -746,8 +747,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->dmc.allowed_dc_mask))
- state &= dev_priv->dmc.allowed_dc_mask;
+ state & ~dev_priv->display.dmc.allowed_dc_mask))
+ state &= dev_priv->display.dmc.allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -755,16 +756,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->dmc.dc_state)
+ if ((val & mask) != dev_priv->display.dmc.dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->dmc.dc_state, val & mask);
+ dev_priv->display.dmc.dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->dmc.dc_state = val & mask;
+ dev_priv->display.dmc.dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -945,7 +946,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
+ u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
drm_WARN(&dev_priv->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
@@ -958,7 +959,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -971,7 +972,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1000,7 +1001,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->dmc.target_dc_state) {
+ switch (dev_priv->display.dmc.target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1156,10 +1157,10 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1207,7 +1208,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->power_domains.initializing)
+ if (dev_priv->display.power.domains.initializing)
return;
intel_hpd_init(dev_priv);
@@ -1302,7 +1303,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_control = dev_priv->display.power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1313,7 +1314,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
@@ -1321,7 +1322,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
@@ -1397,7 +1398,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
+ phy_status, dev_priv->display.power.chv_phy_control);
}
#undef BITS_SET
@@ -1457,13 +1458,13 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ phy, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
}
@@ -1487,18 +1488,18 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, PIPE_C);
}
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
drm_dbg_kms(&dev_priv->drm,
"Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ phy, dev_priv->display.power.chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
+ dev_priv->display.power.chv_phy_assert[phy] = true;
assert_chv_phy_status(dev_priv);
}
@@ -1516,7 +1517,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->chv_phy_assert[phy])
+ if (!dev_priv->display.power.chv_phy_assert[phy])
return;
if (ch == DPIO_CH0)
@@ -1570,27 +1571,27 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
bool was_override;
mutex_lock(&power_domains->lock);
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
if (override == was_override)
goto out;
if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
+ phy, ch, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1604,26 +1605,26 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
+ phy, ch, mask, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1701,7 +1702,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1797,6 +1798,43 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
return intel_power_well_refcount(power_well);
}
+static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
+
+ /*
+ * The power status flag cannot be used to determine whether aux
+ * power wells have finished powering up. Instead we're
+ * expected to just wait a fixed 600us after raising the request
+ * bit.
+ */
+ usleep_range(600, 1200);
+}
+
+static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ 0);
+ usleep_range(10, 30);
+}
+
+static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) &
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
+}
const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -1910,3 +1948,10 @@ const struct i915_power_well_ops tgl_tc_cold_off_ops = {
.disable = tgl_tc_cold_off_power_well_disable,
.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
};
+
+const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = xelpdp_aux_power_well_enable,
+ .disable = xelpdp_aux_power_well_disable,
+ .is_enabled = xelpdp_aux_power_well_enabled,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index d0624642dcb6..e13b521e322a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -14,15 +14,15 @@ struct drm_i915_private;
struct i915_power_well;
#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
+ (__dev_priv)->display.power.domains.power_well_count; \
(__power_well)++)
#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
+ (__dev_priv)->display.power.domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
(__power_well)--)
/*
@@ -80,6 +80,9 @@ struct i915_power_well_instance {
*/
u8 idx;
} hsw;
+ struct {
+ u8 aux_ch;
+ } xelpdp;
};
};
@@ -169,5 +172,6 @@ extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
extern const struct i915_power_well_ops icl_aux_power_well_ops;
extern const struct i915_power_well_ops icl_ddi_power_well_ops;
extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
+extern const struct i915_power_well_ops xelpdp_aux_power_well_ops;
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 0da9b208d56e..298d00a11f47 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -105,7 +105,7 @@ struct intel_fb_view {
* In the normal view the FB object's backing store sg list is used
* directly and hence the remap information here is not used.
*/
- struct i915_ggtt_view gtt;
+ struct i915_gtt_view gtt;
/*
* The GTT view (gtt.type) specific information for each FB color
@@ -1130,6 +1130,7 @@ struct intel_crtc_state {
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ bool seamless_m_n;
/* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;
@@ -1712,7 +1713,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
- int force_dsc_bpp;
+ int force_dsc_bpc;
bool hobl_failed;
bool hobl_active;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa9ef591b885..e52ecc0738a6 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,8 +52,8 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
-#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 07)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07)
MODULE_FIRMWARE(DG2_DMC_PATH);
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
@@ -250,7 +250,7 @@ struct stepping_info {
static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
{
- return i915->dmc.dmc_info[dmc_id].payload;
+ return i915->display.dmc.dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -277,6 +277,17 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
+static void disable_event_handler(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ intel_de_write(i915, ctl_reg,
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE));
+ intel_de_write(i915, htp_reg, 0);
+}
+
static void
disable_flip_queue_event(struct drm_i915_private *i915,
i915_reg_t ctl_reg, i915_reg_t htp_reg)
@@ -299,12 +310,7 @@ disable_flip_queue_event(struct drm_i915_private *i915,
return;
}
- intel_de_write(i915, ctl_reg,
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE));
- intel_de_write(i915, htp_reg, 0);
+ disable_event_handler(i915, ctl_reg, htp_reg);
}
static bool
@@ -356,6 +362,51 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
}
}
+static void disable_all_event_handlers(struct drm_i915_private *i915)
+{
+ int id;
+
+ /* TODO: disable the event handlers on pre-GEN12 platforms as well */
+ if (DISPLAY_VER(i915) < 12)
+ return;
+
+ for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ int handler;
+
+ if (!has_dmc_id_fw(i915, id))
+ continue;
+
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(i915,
+ DMC_EVT_CTL(i915, id, handler),
+ DMC_EVT_HTP(i915, id, handler));
+ }
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ enum pipe pipe;
+
+ if (DISPLAY_VER(i915) != 13)
+ return;
+
+ /*
+ * Wa_16015201720:adl-p,dg2
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ * For pipe C and D clock gating needs to be disabled only
+ * during initializing the firmware.
+ */
+ if (enable)
+ for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ 0, PIPEDMC_GATING_DIS);
+ else
+ for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ PIPEDMC_GATING_DIS, 0);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
@@ -366,12 +417,16 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
*/
void intel_dmc_load_program(struct drm_i915_private *dev_priv)
{
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
u32 id, i;
if (!intel_dmc_has_payload(dev_priv))
return;
+ pipedmc_clock_gating_wa(dev_priv, true);
+
+ disable_all_event_handlers(dev_priv);
+
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
@@ -393,7 +448,7 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
}
}
- dev_priv->dmc.dc_state = 0;
+ dev_priv->display.dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
@@ -403,12 +458,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
* here.
*/
disable_all_flip_queue_events(dev_priv);
+
+ pipedmc_clock_gating_wa(dev_priv, false);
+}
+
+/**
+ * intel_dmc_disable_program() - disable the firmware
+ * @i915: i915 drm device
+ *
+ * Disable all event handlers in the firmware, making sure the firmware is
+ * inactive after the display is uninitialized.
+ */
+void intel_dmc_disable_program(struct drm_i915_private *i915)
+{
+ if (!intel_dmc_has_payload(i915))
+ return;
+
+ pipedmc_clock_gating_wa(i915, true);
+ disable_all_event_handlers(i915);
+ pipedmc_clock_gating_wa(i915, false);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
{
drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -445,7 +519,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
{
unsigned int i, id;
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
for (i = 0; i < num_entries; i++) {
id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
@@ -473,7 +547,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
int header_ver, u8 dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
u32 start_range, end_range;
int i;
@@ -511,7 +585,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -622,7 +696,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -676,7 +750,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -713,7 +787,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
u32 readcount = 0;
@@ -740,7 +814,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->dmc.dmc_info[id].present)
+ if (!dev_priv->display.dmc.dmc_info[id].present)
continue;
offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
@@ -756,15 +830,15 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- dev_priv->dmc.wakeref =
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ dev_priv->display.dmc.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->dmc.wakeref);
+ fetch_and_zero(&dev_priv->display.dmc.wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
@@ -775,10 +849,10 @@ static void dmc_load_work_fn(struct work_struct *work)
struct intel_dmc *dmc;
const struct firmware *fw = NULL;
- dev_priv = container_of(work, typeof(*dev_priv), dmc.work);
- dmc = &dev_priv->dmc;
+ dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
+ dmc = &dev_priv->display.dmc;
- request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev);
+ request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
parse_dmc_fw(dev_priv, fw);
if (intel_dmc_has_payload(dev_priv)) {
@@ -787,7 +861,7 @@ static void dmc_load_work_fn(struct work_struct *work)
drm_info(&dev_priv->drm,
"Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
drm_notice(&dev_priv->drm,
@@ -810,9 +884,9 @@ static void dmc_load_work_fn(struct work_struct *work)
*/
void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
{
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
- INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn);
+ INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
if (!HAS_DMC(dev_priv))
return;
@@ -895,7 +969,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
}
drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->dmc.work);
+ schedule_work(&dev_priv->display.dmc.work);
}
/**
@@ -911,7 +985,7 @@ void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
if (!HAS_DMC(dev_priv))
return;
- flush_work(&dev_priv->dmc.work);
+ flush_work(&dev_priv->display.dmc.work);
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(dev_priv))
@@ -953,16 +1027,16 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->dmc.dmc_info[id].payload);
+ kfree(dev_priv->display.dmc.dmc_info[id].payload);
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->dmc;
+ struct intel_dmc *dmc = &i915->display.dmc;
if (!HAS_DMC(i915))
return;
@@ -984,7 +1058,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->dmc;
+ dmc = &i915->display.dmc;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 41091aee3b47..67e03315ef99 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -47,6 +47,7 @@ struct intel_dmc {
void intel_dmc_ucode_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
+void intel_dmc_disable_program(struct drm_i915_private *i915);
void intel_dmc_ucode_fini(struct drm_i915_private *i915);
void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
void intel_dmc_ucode_resume(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 238620b55966..5e5e41644ddf 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -28,6 +28,8 @@
#define _DMC_REG(i915, dmc_id, reg) \
((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+#define DMC_EVENT_HANDLER_COUNT_GEN12 8
+
#define _DMC_EVT_HTP_0 0x8f004
#define DMC_EVT_HTP(i915, dmc_id, handler) \
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 32292c0be2bd..c9be61d2348e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -286,11 +286,22 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
+static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
+{
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int max_lanes = dig_port->max_lanes;
+
+ if (vbt_max_lanes)
+ max_lanes = min(max_lanes, vbt_max_lanes);
+
+ return max_lanes;
+}
+
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- int source_max = dig_port->max_lanes;
+ int source_max = intel_dp_max_source_lane_count(dig_port);
int sink_max = intel_dp->max_sink_lane_count;
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
@@ -389,23 +400,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
- u32 voltage;
-
- voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
- return voltage == VOLTAGE_INFO_0_85V;
-}
-
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) &&
- (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -413,23 +414,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
- return 540000;
-
- return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ if (intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -491,7 +476,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = dg1_max_source_rate(intel_dp);
+ max_rate = 810000;
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -720,7 +705,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
if (bigjoiner) {
u32 max_bpp_bigjoiner =
- i915->max_cdclk_freq * 48 /
+ i915->display.cdclk.max_cdclk_freq * 48 /
intel_dp_mode_to_fec_clock(mode_clock);
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
@@ -1312,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
}
+static bool has_seamless_m_n(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ /*
+ * Seamless M/N reprogramming only implemented
+ * for BDW+ double buffered M/N registers so far.
+ */
+ return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /* FIXME a bit of a mess wrt clock vs. crtc_clock */
+ if (has_seamless_m_n(connector))
+ return intel_panel_highest_mode(connector, adjusted_mode)->clock;
+ else
+ return adjusted_mode->crtc_clock;
+}
+
/* Optimize link config in order: max bpp, min clock, min lanes */
static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, i, lane_count;
+ int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int mode_rate, link_rate, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
+ mode_rate = intel_dp_link_required(clock, output_bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) {
link_rate = intel_dp_common_rate(intel_dp, i);
@@ -1377,7 +1386,18 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
return 0;
}
-#define DSC_SUPPORTED_VERSION_MIN 1
+static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+}
+
+static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
+ DP_DSC_MINOR_SHIFT;
+}
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
@@ -1395,6 +1415,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
* DP_DSC_RC_BUF_SIZE for this.
*/
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
/*
* Slice Height of 8 works for all currently available panels. So start
@@ -1416,9 +1437,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
vdsc_cfg->dsc_version_minor =
- min(DSC_SUPPORTED_VERSION_MIN,
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+ min(intel_dp_source_dsc_version_minor(intel_dp),
+ intel_dp_sink_dsc_version_minor(intel_dp));
vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
DP_DSC_RGB;
@@ -1464,6 +1484,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ if (intel_dp->force_dsc_bpc) {
+ pipe_bpp = intel_dp->force_dsc_bpc * 3;
+ drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp);
+ }
+
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
drm_dbg_kms(&dev_priv->drm,
@@ -1515,28 +1540,12 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
- /* As of today we support DSC for only RGB */
- if (intel_dp->force_dsc_bpp) {
- if (intel_dp->force_dsc_bpp >= 8 &&
- intel_dp->force_dsc_bpp < pipe_bpp) {
- drm_dbg_kms(&dev_priv->drm,
- "DSC BPP forced to %d",
- intel_dp->force_dsc_bpp);
- pipe_config->dsc.compressed_bpp =
- intel_dp->force_dsc_bpp;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid DSC BPP %d",
- intel_dp->force_dsc_bpp);
- }
- }
-
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
+ if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
pipe_config->bigjoiner_pipes) {
if (pipe_config->dsc.slice_count < 2) {
drm_dbg_kms(&dev_priv->drm,
@@ -1626,7 +1635,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* Optimize for slow and wide for everything, because there are some
* eDP 1.3 and 1.4 panels don't work well with fast and narrow.
*/
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits);
if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
@@ -1869,8 +1878,7 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
- /* M1/N1 is double buffered */
- if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
@@ -1908,13 +1916,16 @@ static bool can_enable_drrs(struct intel_connector *connector,
static void
intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n)
+ int output_bpp)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
+ if (has_seamless_m_n(connector))
+ pipe_config->seamless_m_n = true;
+
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
intel_zero_m_n(&pipe_config->dp_m2_n2);
@@ -1932,7 +1943,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
pipe_config->port_clock, &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
+ pipe_config->fec_enable);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2007,7 +2018,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
struct intel_connector *connector = intel_dp->attached_connector;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
@@ -2086,7 +2096,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- constant_n, pipe_config->fec_enable);
+ pipe_config->fec_enable);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2097,8 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_dp_drrs_compute_config(connector, pipe_config,
- output_bpp, constant_n);
+ intel_dp_drrs_compute_config(connector, pipe_config, output_bpp);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -4992,12 +5001,21 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
{
struct drm_i915_private *dev_priv = to_i915(conn->dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
+ struct intel_connector *intel_conn = to_intel_connector(conn);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
int ret;
ret = intel_digital_connector_atomic_check(conn, &state->base);
if (ret)
return ret;
+ if (intel_dp_mst_source_support(intel_dp)) {
+ ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
+ if (ret)
+ return ret;
+ }
+
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
@@ -5023,9 +5041,9 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->dev);
spin_lock_irq(&i915->irq_lock);
- i915->hotplug.event_bits |= BIT(encoder->hpd_pin);
+ i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock);
- queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -5183,7 +5201,7 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
return;
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- i915->vbt.orientation,
+ i915->display.vbt.orientation,
fixed_mode->hdisplay,
fixed_mode->vdisplay);
}
@@ -5293,8 +5311,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(intel_connector);
- if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
- intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe);
intel_edp_add_properties(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 2bc119374555..48c375c65a41 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -42,7 +42,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ done = wait_event_timeout(i915->display.gmbus.wait_queue, C,
msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
@@ -86,7 +86,7 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
+ freq = dev_priv->display.cdclk.hw.cdclk;
else
freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
@@ -150,6 +150,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 ret;
/*
@@ -170,6 +171,13 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;
+ /*
+ * Power request bit is already set during aux power well enable.
+ * Preserve the bit across aux transactions.
+ */
+ if (DISPLAY_VER(i915) >= 14)
+ ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
+
return ret;
}
@@ -629,6 +637,46 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
}
}
+static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
void intel_dp_aux_fini(struct intel_dp *intel_dp)
{
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
@@ -644,7 +692,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
} else if (DISPLAY_VER(dev_priv) >= 9) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index c92d5bb2326a..83af95bce98d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -278,6 +278,8 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
+ struct drm_luminance_range_info *luminance_range =
+ &connector->base.display_info.luminance_range;
int ret;
if (panel->backlight.edp.intel.sdr_uses_aux) {
@@ -293,8 +295,17 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
}
}
- panel->backlight.max = 512;
- panel->backlight.min = 0;
+ if (luminance_range->max_luminance) {
+ panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.min = luminance_range->min_luminance;
+ } else {
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ }
+
+ drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
+ panel->backlight.max);
+
panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
panel->backlight.enabled = panel->backlight.level != 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index a7640dbcf00e..88689124c013 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -17,6 +17,7 @@
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9feaf1a589f3..3d3efcf02011 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -37,17 +37,6 @@ static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}
-static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
- char *buf, size_t buf_size)
-{
- if (dp_phy == DP_PHY_DPRX)
- snprintf(buf, buf_size, "DPRX");
- else
- snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
-
- return buf;
-}
-
static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
@@ -60,20 +49,19 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
- char phy_name[10];
-
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
- encoder->base.base.id, encoder->base.name, phy_name,
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
@@ -423,14 +411,13 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
int lane;
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
"TX FFE request: " TRAIN_REQ_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_REQ_TX_FFE_ARGS(link_status));
} else {
@@ -438,7 +425,7 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
"vswing request: " TRAIN_REQ_FMT ", "
"pre-emphasis request: " TRAIN_REQ_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_REQ_VSWING_ARGS(link_status),
TRAIN_REQ_PREEMPH_ARGS(link_status));
@@ -503,13 +490,12 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- char phy_name[10];
if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
@@ -546,13 +532,12 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
"TX FFE presets: " TRAIN_SET_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
} else {
@@ -560,7 +545,7 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
"vswing levels: " TRAIN_SET_FMT ", "
"pre-emphasis levels: " TRAIN_SET_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
@@ -671,6 +656,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
&link_bw, &rate_select);
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
if (link_bw)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
@@ -732,12 +739,11 @@ intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
}
@@ -757,21 +763,19 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
int voltage_tries, cr_tries, max_cr_tries;
u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
- char phy_name[10];
int delay_us;
delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
-
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -795,14 +799,16 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Clock recovery OK\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return true;
}
@@ -810,7 +816,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -818,7 +825,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -828,7 +836,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to update link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -846,7 +855,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
- encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy), max_cr_tries);
return false;
}
@@ -924,15 +934,12 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
- char phy_name[10];
int delay_us;
delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
-
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
if (training_pattern != DP_TRAINING_PATTERN_4)
@@ -944,7 +951,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
encoder->base.base.id, encoder->base.name,
- phy_name);
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -955,7 +962,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
link_status) < 0) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to get link status\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -966,7 +974,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
"continue channel equalization\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -975,7 +984,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
channel_eq = true;
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -985,7 +995,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to update link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
}
@@ -995,7 +1006,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
}
return channel_eq;
@@ -1070,7 +1082,6 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
{
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- char phy_name[10];
bool ret = false;
if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
@@ -1086,7 +1097,7 @@ out:
"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
ret ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 14d2a64193b2..03604a37931c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -52,30 +52,36 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct drm_dp_mst_topology_state *mst_state;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
+ // TODO: Handle pbn_div changes by adding a new MST helper
+ if (!mst_state->pbn_div) {
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
+ limits->max_rate,
+ limits->max_lane_count);
+ }
+
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
crtc_state->pipe_bpp = bpp;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
crtc_state->pipe_bpp,
false);
-
- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- connector->port,
- crtc_state->pbn,
- drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- crtc_state->port_clock,
- crtc_state->lane_count));
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port, crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -93,7 +99,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
- constant_n, crtc_state->fec_enable);
+ crtc_state->fec_enable);
crtc_state->dp_m_n.tu = slots;
return 0;
@@ -308,14 +314,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(&state->base, connector);
- struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(&state->base, connector);
struct intel_connector *intel_connector =
to_intel_connector(connector);
- struct drm_crtc *new_crtc = new_conn_state->crtc;
- struct drm_dp_mst_topology_mgr *mgr;
int ret;
ret = intel_digital_connector_atomic_check(connector, &state->base);
@@ -326,28 +326,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- if (!old_conn_state->crtc)
- return 0;
-
- /* We only want to free VCPI if this state disables the CRTC on this
- * connector
- */
- if (new_crtc) {
- struct intel_crtc *crtc = to_intel_crtc(new_crtc);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
- crtc_state->uapi.enable)
- return 0;
- }
-
- mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
- ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
- intel_connector->port);
-
- return ret;
+ return drm_dp_atomic_release_time_slots(&state->base,
+ &intel_connector->mst_port->mst_mgr,
+ intel_connector->port);
}
static void clear_act_sent(struct intel_encoder *encoder,
@@ -383,21 +364,17 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1;
- int ret;
drm_dbg_kms(&i915->drm, "active links %d\n",
intel_dp->active_mst_links);
intel_hdcp_disable(intel_mst->connector);
- drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
-
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
- if (ret) {
- drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
- }
+ drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
@@ -425,8 +402,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
- drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
clear_act_sent(encoder, old_crtc_state);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
@@ -434,8 +409,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, old_crtc_state);
- drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
-
intel_ddi_disable_transcoder_func(old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9)
@@ -502,7 +475,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1;
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
int ret;
bool first_mst_stream;
@@ -528,16 +502,13 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
- ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- connector->port,
- pipe_config->pbn,
- pipe_config->dp_m_n.tu);
- if (!ret)
- drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
-
intel_dp->active_mst_links++;
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
+ ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ if (ret < 0)
+ drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
+ connector->base.name, ret);
/*
* Before Gen 12 this is not done as part of
@@ -560,7 +531,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -588,9 +562,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, pipe_config);
- drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
- if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
+ if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
+ FECSTALL_DIS_DPTSTREAM_DPTTG);
+ else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
FECSTALL_DIS_DPTSTREAM_DPTTG);
@@ -972,8 +950,6 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
- int max_source_rate =
- intel_dp->source_rates[intel_dp->num_source_rates - 1];
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
return 0;
@@ -989,10 +965,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
/* create encoders */
intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
- &intel_dp->aux, 16, 3,
- dig_port->max_lanes,
- max_source_rate,
- conn_base_id);
+ &intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index cc6abe761f5e..8732b8722ed7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -484,7 +484,7 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
- lockdep_assert_held(&dev_priv->power_domains.lock);
+ lockdep_assert_held(&dev_priv->display.power.domains.lock);
was_enabled = true;
if (rcomp_phy != -1)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 5262f16b45ac..b15ba78d64d6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -938,12 +938,25 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- return intel_compute_shared_dplls(state, crtc, encoder);
+ ret = intel_compute_shared_dplls(state, crtc, encoder);
+ if (ret)
+ return ret;
+
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /* CRT dotclock is determined via other means */
+ if (!crtc_state->has_pch_encoder)
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
}
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -969,8 +982,15 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_mpllb_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
- return intel_mpllb_calc_state(crtc_state, encoder);
+ return 0;
}
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
@@ -991,7 +1011,7 @@ static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
factor = 21;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev_priv) &&
intel_is_dual_link_lvds(dev_priv)))
factor = 25;
@@ -1096,6 +1116,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 120000;
+ int ret;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
@@ -1105,8 +1126,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_panel_use_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ dev_priv->display.vbt.lvds_ssc_freq);
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
}
if (intel_is_dual_link_lvds(dev_priv)) {
@@ -1132,7 +1153,14 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- return intel_compute_shared_dplls(state, crtc, NULL);
+ ret = intel_compute_shared_dplls(state, crtc, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return ret;
}
static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -1198,6 +1226,13 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
chv_compute_dpll(crtc_state);
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1217,6 +1252,13 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
vlv_compute_dpll(crtc_state);
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1231,7 +1273,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1259,6 +1301,11 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1273,7 +1320,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1292,6 +1339,9 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1306,7 +1356,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1325,6 +1375,11 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1339,7 +1394,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1360,6 +1415,9 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1411,16 +1469,13 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
- return 0;
-
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (!crtc_state->hw.enable)
return 0;
- ret = i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
@@ -1439,17 +1494,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
- if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
- return 0;
-
- if (!crtc_state->hw.enable)
+ if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->dpll_funcs->crtc_get_shared_dpll)
+ if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
@@ -1463,23 +1516,23 @@ void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv))
- dev_priv->dpll_funcs = &dg2_dpll_funcs;
+ dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->dpll_funcs = &hsw_dpll_funcs;
+ dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->dpll_funcs = &ilk_dpll_funcs;
+ dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->dpll_funcs = &chv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &chv_dpll_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->dpll_funcs = &vlv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->dpll_funcs = &g4x_dpll_funcs;
+ dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
else if (IS_PINEVIEW(dev_priv))
- dev_priv->dpll_funcs = &pnv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->dpll_funcs = &i9xx_dpll_funcs;
+ dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->dpll_funcs = &i8xx_dpll_funcs;
+ dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
}
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 118598c9a809..e5fb66a5dd02 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -113,8 +113,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -149,7 +149,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->dpll.shared_dplls[id];
+ return &dev_priv->display.dpll.shared_dplls[id];
}
/**
@@ -164,11 +164,11 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- long pll_idx = pll - dev_priv->dpll.shared_dplls;
+ long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
if (drm_WARN_ON(&dev_priv->drm,
pll_idx < 0 ||
- pll_idx >= dev_priv->dpll.num_shared_dpll))
+ pll_idx >= dev_priv->display.dpll.num_shared_dpll))
return -1;
return pll_idx;
@@ -245,7 +245,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
old_mask = pll->active_mask;
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
@@ -271,7 +271,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
/**
@@ -294,7 +294,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
"%s not used by [CRTC:%d:%s]\n", pll->info->name,
crtc->base.base.id, crtc->base.name))
@@ -317,7 +317,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static struct intel_shared_dpll *
@@ -336,7 +336,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->dpll.shared_dplls[i];
+ pll = &dev_priv->display.dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].pipe_mask == 0) {
@@ -436,9 +436,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->dpll.shared_dplls[i];
+ &dev_priv->display.dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -537,7 +537,7 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->dpll.shared_dplls[i];
+ pll = &dev_priv->display.dpll.shared_dplls[i];
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
@@ -905,37 +905,6 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static int
-hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- unsigned int p, n2, r2;
-
- hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
-
- crtc_state->dpll_hw_state.wrpll =
- WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- return 0;
-}
-
-static struct intel_shared_dpll *
-hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- return intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_WRPLL2) |
- BIT(DPLL_ID_WRPLL1));
-}
-
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
@@ -948,7 +917,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
case WRPLL_REF_SPECIAL_HSW:
/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- refclk = dev_priv->dpll.ref_clks.nssc;
+ refclk = dev_priv->display.dpll.ref_clks.nssc;
break;
}
fallthrough;
@@ -958,7 +927,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
* code only cares about 5% accuracy, and spread is a max of
* 0.5% downspread.
*/
- refclk = dev_priv->dpll.ref_clks.ssc;
+ refclk = dev_priv->display.dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -977,6 +946,41 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
}
static int
+hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
+
+ crtc_state->dpll_hw_state.wrpll =
+ WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
+}
+
+static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -1145,12 +1149,12 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
{
- i915->dpll.ref_clks.ssc = 135000;
+ i915->display.dpll.ref_clks.ssc = 135000;
/* Non-SSC is only used on non-ULT HSW. */
if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- i915->dpll.ref_clks.nssc = 24000;
+ i915->display.dpll.ref_clks.nssc = 24000;
else
- i915->dpll.ref_clks.nssc = 135000;
+ i915->display.dpll.ref_clks.nssc = 135000;
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1618,48 +1622,11 @@ skip_remaining_dividers:
return 0;
}
-static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wrpll_params wrpll_params = {};
- u32 ctrl1, cfgcr1, cfgcr2;
- int ret;
-
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
- i915->dpll.ref_clks.nssc, &wrpll_params);
- if (ret)
- return ret;
-
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
- DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
- wrpll_params.dco_integer;
-
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
- DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
- DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
- DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq;
-
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
- return 0;
-}
-
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
{
- int ref_clock = i915->dpll.ref_clks.nssc;
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
@@ -1726,6 +1693,46 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wrpll_params wrpll_params = {};
+ u32 ctrl1, cfgcr1, cfgcr2;
+ int ret;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1858,7 +1865,7 @@ static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
/* No SSC ref */
- i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2171,7 +2178,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
}
}
- chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
+ chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
clk_div->dot != crtc_state->port_clock);
@@ -2245,6 +2252,23 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
return 0;
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+
+ return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
+}
+
static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2258,28 +2282,20 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct dpll clk_div = {};
+ int ret;
bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
-}
-
-static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
- const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
+ ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+ if (ret)
+ return ret;
- clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+ crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
- return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+ return 0;
}
static int bxt_compute_dpll(struct intel_atomic_state *state,
@@ -2324,8 +2340,8 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
{
- i915->dpll.ref_clks.ssc = 100000;
- i915->dpll.ref_clks.nssc = 100000;
+ i915->display.dpll.ref_clks.ssc = 100000;
+ i915->display.dpll.ref_clks.nssc = 100000;
/* DSI non-SSC ref 19.2MHz */
}
@@ -2468,7 +2484,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
- i915->dpll.ref_clks.nssc == 38400;
+ i915->display.dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -2562,7 +2578,7 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->dpll.ref_clks.nssc == 24000 ?
+ dev_priv->display.dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2585,9 +2601,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (DISPLAY_VER(dev_priv) >= 12) {
- switch (dev_priv->dpll.ref_clks.nssc) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2598,9 +2614,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->dpll.ref_clks.nssc) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2630,7 +2646,7 @@ static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
{
- int ref_clock = i915->dpll.ref_clks.nssc;
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
/*
* For ICL+, the spec states: if reference frequency is 38.4,
@@ -2769,8 +2785,8 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- if (i915->vbt.override_afc_startup)
- pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
+ if (i915->display.vbt.override_afc_startup)
+ pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
@@ -2857,7 +2873,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->dpll.ref_clks.nssc;
+ int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2965,8 +2981,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
- if (dev_priv->vbt.override_afc_startup) {
- u8 val = dev_priv->vbt.override_afc_startup_val;
+ if (dev_priv->display.vbt.override_afc_startup) {
+ u8 val = dev_priv->display.vbt.override_afc_startup_val;
pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
@@ -3063,7 +3079,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- ref_clock = dev_priv->dpll.ref_clks.nssc;
+ ref_clock = dev_priv->display.dpll.ref_clks.nssc;
if (DISPLAY_VER(dev_priv) >= 12) {
m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
@@ -3197,6 +3213,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+
+ crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
return 0;
}
@@ -3282,6 +3304,12 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
if (ret)
return ret;
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+
+ crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
return 0;
}
@@ -3440,7 +3468,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias =
intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->dpll.ref_clks.nssc == 38400) {
+ if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3502,7 +3530,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
val = DKL_PLL_DIV0_MASK;
- if (dev_priv->vbt.override_afc_startup)
+ if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
hw_state->mg_pll_div0 &= val;
@@ -3566,7 +3594,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR1(id));
- if (dev_priv->vbt.override_afc_startup) {
+ if (dev_priv->display.vbt.override_afc_startup) {
hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
}
@@ -3638,9 +3666,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
- drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
+ drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
!i915_mmio_reg_valid(div0_reg));
- if (dev_priv->vbt.override_afc_startup &&
+ if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
hw_state->div0);
@@ -3732,7 +3760,7 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
val = DKL_PLL_DIV0_MASK;
- if (dev_priv->vbt.override_afc_startup)
+ if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
hw_state->mg_pll_div0);
@@ -3967,7 +3995,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
/* No SSC ref */
- i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -4192,22 +4220,24 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->dpll.num_shared_dpll = 0;
+ dev_priv->display.dpll.num_shared_dpll = 0;
return;
}
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
+ break;
+
drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
- dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
+ dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll.mgr = dpll_mgr;
- dev_priv->dpll.num_shared_dpll = i;
- mutex_init(&dev_priv->dpll.lock);
-
- BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
+ dev_priv->display.dpll.mgr = dpll_mgr;
+ dev_priv->display.dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->display.dpll.lock);
}
/**
@@ -4229,7 +4259,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return -EINVAL;
@@ -4262,7 +4292,7 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return -EINVAL;
@@ -4285,7 +4315,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -4314,7 +4344,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
@@ -4385,16 +4415,16 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
{
- if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
- i915->dpll.mgr->update_ref_clks(i915);
+ if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
+ i915->display.dpll.mgr->update_ref_clks(i915);
}
void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
}
static void sanitize_dpll_state(struct drm_i915_private *i915,
@@ -4420,8 +4450,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
}
/**
@@ -4434,8 +4464,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll.mgr) {
- dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->display.dpll.mgr) {
+ dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
@@ -4533,7 +4563,7 @@ void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i],
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index c4affcb216fd..fc9c3e41c333 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -9,6 +9,36 @@
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dsb.h"
+
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
#define DSB_BUF_SIZE (2 * PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 6cb9c580cdca..74dd2b3343bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -11,34 +11,6 @@
#include "i915_reg_defs.h"
struct intel_crtc_state;
-struct i915_vma;
-
-enum dsb_id {
- INVALID_DSB = -1,
- DSB1,
- DSB2,
- DSB3,
- MAX_DSB_PER_PIPE
-};
-
-struct intel_dsb {
- enum dsb_id id;
- u32 *cmd_buf;
- struct i915_vma *vma;
-
- /*
- * free_pos will point the first free entry position
- * and help in calculating tail of command buffer.
- */
- int free_pos;
-
- /*
- * ins_start_offset will help to store start address of the dsb
- * instuction and help in identifying the batch of auto-increment
- * register.
- */
- u32 ins_start_offset;
-};
void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 35e121cd226c..5efdd471ac2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -106,7 +106,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->vbt.orientation;
+ orientation = dev_priv->display.vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index eafef0a87fea..ce80bd8be519 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -89,9 +89,6 @@ struct intel_dsi {
u8 escape_clk_div;
u8 dual_link;
- u16 dcs_backlight_ports;
- u16 dcs_cabc_ports;
-
/* RGB or BGR */
bool bgr_enabled;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 1bc7118c56a2..20e466d843ce 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -53,7 +53,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused
enum port port;
size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
&data, len);
@@ -80,7 +80,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
data[1] = level;
}
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mode_flags = dsi_device->mode_flags;
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
@@ -93,12 +93,13 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
dcs_set_backlight(conn_state, 0);
- for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
u8 cabc = POWER_SAVE_OFF;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -106,7 +107,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state,
&cabc, sizeof(cabc));
}
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -127,10 +128,11 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -146,7 +148,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&ctrl, sizeof(ctrl));
}
- for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
u8 cabc = POWER_SAVE_MEDIUM;
dsi_device = intel_dsi->dsi_hosts[port]->device;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index d96c3cc46e50..50205f064d93 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -75,8 +75,8 @@ struct intel_dvo_dev_ops {
*
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
- int (*mode_valid)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode);
+ enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
/*
* Callback for preparing mode changes on an output
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b191915ab351..eefa33c555ac 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1395,7 +1395,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
plane_view_height_tiles(fb, color_plane, dims, y));
}
- if (view->gtt.type == I915_GGTT_VIEW_ROTATED) {
+ if (view->gtt.type == I915_GTT_VIEW_ROTATED) {
drm_WARN_ON(&i915->drm, remap_info->linear);
check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
@@ -1420,7 +1420,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
/* rotate the tile dimensions to match the GTT view */
swap(tile_width, tile_height);
} else {
- drm_WARN_ON(&i915->drm, view->gtt.type != I915_GGTT_VIEW_REMAPPED);
+ drm_WARN_ON(&i915->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED);
check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
@@ -1503,12 +1503,12 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
}
static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
- enum i915_ggtt_view_type view_type)
+ enum i915_gtt_view_type view_type)
{
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
- if (view_type == I915_GGTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
+ if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
@@ -1530,16 +1530,16 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
int i, num_planes = fb->base.format->num_planes;
unsigned int tile_size = intel_tile_size(i915);
- intel_fb_view_init(i915, &fb->normal_view, I915_GGTT_VIEW_NORMAL);
+ intel_fb_view_init(i915, &fb->normal_view, I915_GTT_VIEW_NORMAL);
drm_WARN_ON(&i915->drm,
intel_fb_supports_90_270_rotation(fb) &&
intel_fb_needs_pot_stride_remap(fb));
if (intel_fb_supports_90_270_rotation(fb))
- intel_fb_view_init(i915, &fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ intel_fb_view_init(i915, &fb->rotated_view, I915_GTT_VIEW_ROTATED);
if (intel_fb_needs_pot_stride_remap(fb))
- intel_fb_view_init(i915, &fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+ intel_fb_view_init(i915, &fb->remapped_view, I915_GTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -1620,8 +1620,8 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
u32 gtt_offset = 0;
intel_fb_view_init(i915, &plane_state->view,
- drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED :
- I915_GGTT_VIEW_REMAPPED);
+ drm_rotation_90_or_270(rotation) ? I915_GTT_VIEW_ROTATED :
+ I915_GTT_VIEW_REMAPPED);
src_x = plane_state->uapi.src.x1 >> 16;
src_y = plane_state->uapi.src.y1 >> 16;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index bd6e7c98e751..c86e5d4ee016 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -18,7 +18,7 @@
static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags,
struct i915_address_space *vm)
@@ -79,7 +79,7 @@ err:
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index e4fcd0218d9d..de0efaa25905 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -11,12 +11,12 @@
struct drm_framebuffer;
struct i915_vma;
struct intel_plane_state;
-struct i915_ggtt_view;
+struct i915_gtt_view;
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 16537830ccf0..f38175304928 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,11 +55,11 @@
#define for_each_fbc_id(__dev_priv, __fbc_id) \
for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id))
+ for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
for_each_fbc_id((__dev_priv), (__fbc_id)) \
- for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])
+ for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
struct intel_fbc_funcs {
void (*activate)(struct intel_fbc *fbc);
@@ -1098,6 +1098,12 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+ /* Wa_14016291713 */
+ if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) {
+ plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
+ return 0;
+ }
+
if (!pixel_format_is_valid(plane_state)) {
plane_state->no_fbc_reason = "pixel format not supported";
return 0;
@@ -1704,17 +1710,17 @@ void intel_fbc_init(struct drm_i915_private *i915)
enum intel_fbc_id fbc_id;
if (!drm_mm_initialized(&i915->mm.stolen))
- mkwrite_device_info(i915)->display.fbc_mask = 0;
+ RUNTIME_INFO(i915)->fbc_mask = 0;
if (need_fbc_vtd_wa(i915))
- mkwrite_device_info(i915)->display.fbc_mask = 0;
+ RUNTIME_INFO(i915)->fbc_mask = 0;
i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
i915->params.enable_fbc);
for_each_fbc_id(i915, fbc_id)
- i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+ i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
}
/**
@@ -1834,7 +1840,7 @@ void intel_fbc_debugfs_register(struct drm_i915_private *i915)
struct drm_minor *minor = i915->drm.primary;
struct intel_fbc *fbc;
- fbc = i915->fbc[INTEL_FBC_A];
+ fbc = i915->display.fbc[INTEL_FBC_A];
if (fbc)
intel_fbc_debugfs_add(fbc, minor->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index db60143295ec..4adb98afe6ff 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,6 +19,7 @@ struct intel_plane_state;
enum intel_fbc_id {
INTEL_FBC_A,
+ INTEL_FBC_B,
I915_MAX_FBCS,
};
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 221336178991..112aa0447a0d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -198,8 +198,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
- const struct i915_ggtt_view view = {
- .type = I915_GGTT_VIEW_NORMAL,
+ const struct i915_gtt_view view = {
+ .type = I915_GTT_VIEW_NORMAL,
};
intel_wakeref_t wakeref;
struct fb_info *info;
@@ -210,6 +210,12 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_gem_object *obj;
int ret;
+ mutex_lock(&ifbdev->hpd_lock);
+ ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
+ mutex_unlock(&ifbdev->hpd_lock);
+ if (ret)
+ return ret;
+
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
@@ -500,7 +506,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
{
intel_fbdev_set_suspend(&container_of(work,
struct drm_i915_private,
- fbdev_suspend_work)->drm,
+ display.fbdev.suspend_work)->drm,
FBINFO_STATE_RUNNING,
true);
}
@@ -530,8 +536,8 @@ int intel_fbdev_init(struct drm_device *dev)
return ret;
}
- dev_priv->fbdev = ifbdev;
- INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+ dev_priv->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
return 0;
}
@@ -548,7 +554,7 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
void intel_fbdev_initial_config_async(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -568,12 +574,13 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
- cancel_work_sync(&dev_priv->fbdev_suspend_work);
+ intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
+
if (!current_is_async())
intel_fbdev_sync(ifbdev);
@@ -582,7 +589,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
void intel_fbdev_fini(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
+ struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
if (!ifbdev)
return;
@@ -596,7 +603,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
*/
static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
{
- struct intel_fbdev *ifbdev = i915->fbdev;
+ struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
bool send_hpd = false;
mutex_lock(&ifbdev->hpd_lock);
@@ -614,11 +621,11 @@ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
struct fb_info *info;
if (!ifbdev || !ifbdev->vma)
- return;
+ goto set_suspend;
info = ifbdev->helper.fbdev;
@@ -631,7 +638,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* ourselves, so only flush outstanding work upon suspend!
*/
if (state != FBINFO_STATE_RUNNING)
- flush_work(&dev_priv->fbdev_suspend_work);
+ flush_work(&dev_priv->display.fbdev.suspend_work);
console_lock();
} else {
@@ -645,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
- schedule_work(&dev_priv->fbdev_suspend_work);
+ schedule_work(&dev_priv->display.fbdev.suspend_work);
return;
}
}
@@ -661,12 +668,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
+set_suspend:
intel_fbdev_hpd_set_suspend(dev_priv, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
bool send_hpd;
if (!ifbdev)
@@ -685,7 +693,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
void intel_fbdev_restore_mode(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 67d2484afbaa..7f47e5c85c81 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -113,7 +113,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
+ dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
}
/* units of 100MHz */
@@ -210,14 +210,14 @@ void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
u32 fdi_pll_clk =
intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
- i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
- i915->fdi_pll_freq = 270000;
+ i915->display.fdi.pll_freq = 270000;
} else {
return;
}
- drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
+ drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
}
int intel_fdi_link_freq(struct drm_i915_private *i915,
@@ -226,7 +226,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915,
if (HAS_DDI(i915))
return pipe_config->port_clock; /* SPLL */
else
- return i915->fdi_pll_freq;
+ return i915->display.fdi.pll_freq;
}
int ilk_fdi_compute_config(struct intel_crtc *crtc,
@@ -256,7 +256,7 @@ retry:
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
+ link_bw, &pipe_config->fdi_m_n, false);
ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
if (ret == -EDEADLK)
@@ -789,7 +789,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
@@ -1066,11 +1066,11 @@ void
intel_fdi_init_hook(struct drm_i915_private *dev_priv)
{
if (IS_IRONLAKE(dev_priv)) {
- dev_priv->fdi_funcs = &ilk_funcs;
+ dev_priv->display.funcs.fdi = &ilk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->fdi_funcs = &gen6_funcs;
+ dev_priv->display.funcs.fdi = &gen6_funcs;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->fdi_funcs = &ivb_funcs;
+ dev_priv->display.funcs.fdi = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 791248f812aa..d80e3e8a9b01 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -81,9 +81,9 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->fb_tracking.lock);
- frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -111,11 +111,11 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
- i915->fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
/**
@@ -131,11 +131,11 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->fb_tracking.flip_bits;
- i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
if (frontbuffer_bits)
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
@@ -155,10 +155,10 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
@@ -170,10 +170,10 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->fb_tracking.lock);
- i915->fb_tracking.busy_bits |= frontbuffer_bits;
- i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin);
@@ -191,11 +191,11 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Filter out new bits since rendering started. */
- frontbuffer_bits &= i915->fb_tracking.busy_bits;
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= i915->display.fb_tracking.busy_bits;
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
if (frontbuffer_bits)
@@ -221,7 +221,7 @@ static void frontbuffer_retire(struct i915_active *ref)
}
static void frontbuffer_release(struct kref *ref)
- __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+ __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
@@ -238,7 +238,7 @@ static void frontbuffer_release(struct kref *ref)
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
- spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+ spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock);
i915_active_fini(&front->write);
@@ -268,7 +268,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
frontbuffer_retire,
I915_ACTIVE_RETIRE_SLEEPS);
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
if (rcu_access_pointer(obj->frontbuffer)) {
kfree(front);
front = rcu_dereference_protected(obj->frontbuffer, true);
@@ -277,7 +277,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
i915_gem_object_get(obj);
rcu_assign_pointer(obj->frontbuffer, front);
}
- spin_unlock(&i915->fb_tracking.lock);
+ spin_unlock(&i915->display.fb_tracking.lock);
return front;
}
@@ -286,7 +286,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
kref_put_lock(&front->ref,
frontbuffer_release,
- &to_i915(front->obj->base.dev)->fb_tracking.lock);
+ &to_i915(front->obj->base.dev)->display.fb_tracking.lock);
}
/**
@@ -311,6 +311,8 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
BITS_PER_TYPE(atomic_t));
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32);
+ BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
drm_WARN_ON(old->obj->base.dev,
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index ff0c37b079aa..3c474ed937fb 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -25,6 +25,7 @@
#define __INTEL_FRONTBUFFER_H__
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/kref.h>
#include "gem/i915_gem_object_types.h"
@@ -48,6 +49,23 @@ struct intel_frontbuffer {
struct rcu_head rcu;
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-wise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
+#define INTEL_FRONTBUFFER(pipe, plane_id) \
+ BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
+ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a6ba7fb72339..74443f57f62d 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -37,6 +37,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
+#include "intel_gmbus_regs.h"
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -45,7 +46,7 @@ struct intel_gmbus {
u32 reg0;
i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
};
struct gmbus_pin {
@@ -116,6 +117,18 @@ static const struct gmbus_pin gmbus_pins_dg2[] = {
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
};
+static const struct gmbus_pin gmbus_pins_mtp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+ [GMBUS_PIN_5_MTP] = { "dpe", GPIOF },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
+};
+
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
unsigned int pin)
{
@@ -128,6 +141,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
+ pins = gmbus_pins_mtp;
+ size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
@@ -170,55 +186,55 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_gmbus_reset(struct drm_i915_private *dev_priv)
+intel_gmbus_reset(struct drm_i915_private *i915)
{
- intel_de_write(dev_priv, GMBUS0, 0);
- intel_de_write(dev_priv, GMBUS4, 0);
+ intel_de_write(i915, GMBUS0(i915), 0);
+ intel_de_write(i915, GMBUS4(i915), 0);
}
-static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(i915, DSPCLK_GATE_D(i915));
if (!enable)
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(i915, DSPCLK_GATE_D(i915), val);
}
-static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
if (!enable)
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
}
-static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
- val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
+ val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
if (!enable)
val |= BXT_GMBUS_GATING_DIS;
else
val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
+ intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *i915 = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
@@ -234,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -249,7 +265,7 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -264,7 +280,7 @@ static int get_data(void *data)
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -283,7 +299,7 @@ static void set_clock(void *data, int state_high)
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -301,12 +317,12 @@ static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- intel_gmbus_reset(dev_priv);
+ intel_gmbus_reset(i915);
- if (IS_PINEVIEW(dev_priv))
- pnv_gmbus_clock_gating(dev_priv, false);
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, false);
set_data(bus, 1);
set_clock(bus, 1);
@@ -318,13 +334,13 @@ static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(dev_priv))
- pnv_gmbus_clock_gating(dev_priv, true);
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, true);
}
static void
@@ -356,7 +372,7 @@ static bool has_gmbus_irq(struct drm_i915_private *i915)
return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
}
-static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
+static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
{
DEFINE_WAIT(wait);
u32 gmbus2;
@@ -366,21 +382,21 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves.
*/
- if (!has_gmbus_irq(dev_priv))
+ if (!has_gmbus_irq(i915))
irq_en = 0;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- intel_de_write_fw(dev_priv, GMBUS4, irq_en);
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
2);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
50);
- intel_de_write_fw(dev_priv, GMBUS4, 0);
- remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
@@ -389,7 +405,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
}
static int
-gmbus_wait_idle(struct drm_i915_private *dev_priv)
+gmbus_wait_idle(struct drm_i915_private *i915)
{
DEFINE_WAIT(wait);
u32 irq_enable;
@@ -397,35 +413,35 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
/* Important: The hw handles only the first bit, so set only one! */
irq_enable = 0;
- if (has_gmbus_irq(dev_priv))
+ if (has_gmbus_irq(i915))
irq_enable = GMBUS_IDLE_EN;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_wait_for_register_fw(&dev_priv->uncore,
- GMBUS2, GMBUS_ACTIVE, 0,
+ ret = intel_wait_for_register_fw(&i915->uncore,
+ GMBUS2(i915), GMBUS_ACTIVE, 0,
10);
- intel_de_write_fw(dev_priv, GMBUS4, 0);
- remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
return ret;
}
-static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915)
{
- return DISPLAY_VER(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
}
static int
-gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_read_chunk(struct drm_i915_private *i915,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus0_reg, u32 gmbus1_index)
{
unsigned int size = len;
- bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+ bool burst_read = len > gmbus_max_xfer_size(i915);
bool extra_byte_added = false;
if (burst_read) {
@@ -438,21 +454,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
len++;
}
size = len % 256 + 256;
- intel_de_write_fw(dev_priv, GMBUS0,
+ intel_de_write_fw(i915, GMBUS0(i915),
gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- intel_de_write_fw(dev_priv, GMBUS1,
+ intel_de_write_fw(i915, GMBUS1(i915),
gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
- val = intel_de_read_fw(dev_priv, GMBUS3);
+ val = intel_de_read_fw(i915, GMBUS3(i915));
do {
if (extra_byte_added && len == 1)
break;
@@ -463,7 +479,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (burst_read && len == size - 4)
/* Reset the override bit */
- intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg);
}
return 0;
@@ -480,7 +496,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -489,12 +505,12 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- if (HAS_GMBUS_BURST_READ(dev_priv))
+ if (HAS_GMBUS_BURST_READ(i915))
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
else
- len = min(rx_size, gmbus_max_xfer_size(dev_priv));
+ len = min(rx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+ ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len,
gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -507,7 +523,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_write_chunk(struct drm_i915_private *i915,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
@@ -520,8 +536,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- intel_de_write_fw(dev_priv, GMBUS3, val);
- intel_de_write_fw(dev_priv, GMBUS1,
+ intel_de_write_fw(i915, GMBUS3(i915), val);
+ intel_de_write_fw(i915, GMBUS1(i915),
gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -531,9 +547,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- intel_de_write_fw(dev_priv, GMBUS3, val);
+ intel_de_write_fw(i915, GMBUS3(i915), val);
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
}
@@ -542,7 +558,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -551,9 +567,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, gmbus_max_xfer_size(dev_priv));
+ len = min(tx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
+ ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len,
gmbus1_index);
if (ret)
return ret;
@@ -580,7 +596,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
@@ -596,17 +612,17 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
+ intel_de_write_fw(i915, GMBUS5(i915), gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+ ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg,
gmbus1_index);
else
- ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- intel_de_write_fw(dev_priv, GMBUS5, 0);
+ intel_de_write_fw(i915, GMBUS5(i915), 0);
return ret;
}
@@ -616,34 +632,34 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
u32 gmbus0_source)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_gmbus_clock_gating(dev_priv, false);
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
- pch_gmbus_clock_gating(dev_priv, false);
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, false);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, false);
retry:
- intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(dev_priv, &msgs[i],
+ ret = gmbus_index_xfer(i915, &msgs[i],
gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(dev_priv, &msgs[i],
+ ret = gmbus_xfer_read(i915, &msgs[i],
gmbus0_source | bus->reg0, 0);
} else {
- ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
+ ret = gmbus_xfer_write(i915, &msgs[i], 0);
}
if (!ret)
- ret = gmbus_wait(dev_priv,
+ ret = gmbus_wait(i915,
GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
@@ -655,19 +671,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (gmbus_wait_idle(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
}
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
ret = ret ?: i;
goto out;
@@ -686,8 +702,8 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (gmbus_wait_idle(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -697,11 +713,11 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
- intel_de_write_fw(dev_priv, GMBUS1, 0);
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
+ intel_de_write_fw(i915, GMBUS1(i915), 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
- drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
@@ -712,7 +728,7 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] NAK on first message, retry\n",
adapter->name);
goto retry;
@@ -721,10 +737,10 @@ clear_err:
goto out;
timeout:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -734,10 +750,10 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_gmbus_clock_gating(dev_priv, true);
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
- pch_gmbus_clock_gating(dev_priv, true);
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, true);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, true);
return ret;
}
@@ -746,11 +762,11 @@ static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -762,7 +778,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -770,7 +786,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
struct i2c_msg msgs[] = {
@@ -790,8 +806,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- mutex_lock(&dev_priv->gmbus_mutex);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+ mutex_lock(&i915->display.gmbus.mutex);
/*
* In order to output Aksv to the receiver, use an indexed write to
@@ -800,8 +816,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
*/
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
- mutex_unlock(&dev_priv->gmbus_mutex);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ mutex_unlock(&i915->display.gmbus.mutex);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -824,27 +840,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&i915->display.gmbus.mutex);
}
static int gmbus_trylock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- return mutex_trylock(&dev_priv->gmbus_mutex);
+ return mutex_trylock(&i915->display.gmbus.mutex);
}
static void gmbus_unlock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&i915->display.gmbus.mutex);
}
static const struct i2c_lock_operations gmbus_lock_ops = {
@@ -855,31 +871,31 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*/
-int intel_gmbus_setup(struct drm_i915_private *dev_priv)
+int intel_gmbus_setup(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
- else if (!HAS_GMCH(dev_priv))
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE;
+ else if (!HAS_GMCH(i915))
/*
* Broxton uses the same PCH offsets for South Display Engine,
* even though it doesn't have a PCH.
*/
- dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE;
+ i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE;
- mutex_init(&dev_priv->gmbus_mutex);
- init_waitqueue_head(&dev_priv->gmbus_wait_queue);
+ mutex_init(&i915->display.gmbus.mutex);
+ init_waitqueue_head(&i915->display.gmbus.wait_queue);
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
const struct gmbus_pin *gmbus_pin;
struct intel_gmbus *bus;
- gmbus_pin = get_gmbus_pin(dev_priv, pin);
+ gmbus_pin = get_gmbus_pin(i915, pin);
if (!gmbus_pin)
continue;
@@ -896,7 +912,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
"i915 gmbus %s", gmbus_pin->name);
bus->adapter.dev.parent = &pdev->dev;
- bus->dev_priv = dev_priv;
+ bus->i915 = i915;
bus->adapter.algo = &gmbus_algorithm;
bus->adapter.lock_ops = &gmbus_lock_ops;
@@ -911,10 +927,10 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev_priv))
+ if (IS_I830(i915))
bus->force_bit = 1;
- intel_gpio_setup(bus, GPIO(gmbus_pin->gpio));
+ intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio));
ret = i2c_add_adapter(&bus->adapter);
if (ret) {
@@ -922,43 +938,43 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
goto err;
}
- dev_priv->gmbus[pin] = bus;
+ i915->display.gmbus.bus[pin] = bus;
}
- intel_gmbus_reset(dev_priv);
+ intel_gmbus_reset(i915);
return 0;
err:
- intel_gmbus_teardown(dev_priv);
+ intel_gmbus_teardown(i915);
return ret;
}
-struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915,
unsigned int pin)
{
- if (drm_WARN_ON(&dev_priv->drm, pin >= ARRAY_SIZE(dev_priv->gmbus) ||
- !dev_priv->gmbus[pin]))
+ if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) ||
+ !i915->display.gmbus.bus[pin]))
return NULL;
- return &dev_priv->gmbus[pin]->adapter;
+ return &i915->display.gmbus.bus[pin]->adapter;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&i915->display.gmbus.mutex);
bus->force_bit += force_bit ? 1 : -1;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&i915->display.gmbus.mutex);
}
bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -968,20 +984,20 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
return bus->force_bit;
}
-void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
+void intel_gmbus_teardown(struct drm_i915_private *i915)
{
unsigned int pin;
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
struct intel_gmbus *bus;
- bus = dev_priv->gmbus[pin];
+ bus = i915->display.gmbus.bus[pin];
if (!bus)
continue;
i2c_del_adapter(&bus->adapter);
kfree(bus);
- dev_priv->gmbus[pin] = NULL;
+ i915->display.gmbus.bus[pin] = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index 8edc2e99cf53..20f704bd4e70 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -24,6 +24,7 @@ struct i2c_adapter;
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_5_MTP 5
#define GMBUS_PIN_9_TC1_ICP 9
#define GMBUS_PIN_10_TC2_ICP 10
#define GMBUS_PIN_11_TC3_ICP 11
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
new file mode 100644
index 000000000000..53aacbda983c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_REGS_H__
+#define __INTEL_GMBUS_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base)
+
+#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio))
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+/* clock/port select */
+#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100)
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
+
+/* command/status */
+#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
+
+/* status */
+#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
+
+/* data buffer bytes 3-0 */
+#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c)
+
+/* interrupt mask (Pineview+) */
+#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
+
+/* byte index */
+#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
+
+#endif /* __INTEL_GMBUS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 8ea66a2e1b09..6406fd487ee5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
@@ -30,8 +31,30 @@
static int intel_conn_to_vcpi(struct intel_connector *connector)
{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_topology_state *mst_state;
+ int vcpi = 0;
+
/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- return connector->port ? connector->port->vcpi.vcpi : 0;
+ if (!connector->port)
+ return 0;
+ mgr = connector->port->mgr;
+
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
+ payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
+ if (drm_WARN_ON(mgr->dev, !payload))
+ goto out;
+
+ vcpi = payload->vcpi;
+ if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
+ vcpi = 0;
+ goto out;
+ }
+out:
+ drm_modeset_unlock(&mgr->base.lock);
+ return vcpi;
}
/*
@@ -187,12 +210,12 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return false;
/* MEI interface is solid */
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return false;
}
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
/* Sink's capability for HDCP2.2 */
hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
@@ -1109,8 +1132,8 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
- return INTEL_INFO(dev_priv)->display.has_hdcp &&
- (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
+ return RUNTIME_INFO(dev_priv)->has_hdcp &&
+ (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -1123,11 +1146,11 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1135,7 +1158,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1153,11 +1176,11 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1167,7 +1190,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1181,18 +1204,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1207,11 +1230,11 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1219,7 +1242,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1234,11 +1257,11 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1246,7 +1269,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1261,11 +1284,11 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1273,7 +1296,7 @@ hdcp2_verify_lprime(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1287,11 +1310,11 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1299,7 +1322,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1316,11 +1339,11 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1330,7 +1353,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1345,18 +1368,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1369,11 +1392,11 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1381,7 +1404,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1393,17 +1416,17 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
&dig_port->hdcp_port_data);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -2121,10 +2144,10 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
- dev_priv->hdcp_master->mei_dev = mei_kdev;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
}
@@ -2135,9 +2158,9 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_master = NULL;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = NULL;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
static const struct component_ops i915_hdcp_component_ops = {
@@ -2228,19 +2251,19 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
if (!is_hdcp2_supported(dev_priv))
return;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
- dev_priv->hdcp_comp_added = true;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->display.hdcp.comp_added = true;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
ret);
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_comp_added = false;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return;
}
}
@@ -2453,14 +2476,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- if (!dev_priv->hdcp_comp_added) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return;
}
- dev_priv->hdcp_comp_added = false;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
new file mode 100644
index 000000000000..2a3733e8966c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_REGS_H__
+#define __INTEL_HDCP_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* HDCP Key Registers */
+#define HDCP_KEY_CONF _MMIO(0x66c00)
+#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
+#define HDCP_CLEAR_KEYS_TRIGGER REG_BIT(30)
+#define HDCP_KEY_LOAD_TRIGGER REG_BIT(8)
+#define HDCP_KEY_STATUS _MMIO(0x66c04)
+#define HDCP_FUSE_IN_PROGRESS REG_BIT(7)
+#define HDCP_FUSE_ERROR REG_BIT(6)
+#define HDCP_FUSE_DONE REG_BIT(5)
+#define HDCP_KEY_LOAD_STATUS REG_BIT(1)
+#define HDCP_KEY_LOAD_DONE REG_BIT(0)
+#define HDCP_AKSV_LO _MMIO(0x66c10)
+#define HDCP_AKSV_HI _MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT REG_BIT(31)
+#define HDCP_TRANSB_REP_PRESENT REG_BIT(30)
+#define HDCP_TRANSC_REP_PRESENT REG_BIT(29)
+#define HDCP_TRANSD_REP_PRESENT REG_BIT(28)
+#define HDCP_DDIB_REP_PRESENT REG_BIT(30)
+#define HDCP_DDIA_REP_PRESENT REG_BIT(29)
+#define HDCP_DDIC_REP_PRESENT REG_BIT(28)
+#define HDCP_DDID_REP_PRESENT REG_BIT(27)
+#define HDCP_DDIF_REP_PRESENT REG_BIT(26)
+#define HDCP_DDIE_REP_PRESENT REG_BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
+#define HDCP_DDIB_SHA1_M0 (1 << 20)
+#define HDCP_DDIA_SHA1_M0 (2 << 20)
+#define HDCP_DDIC_SHA1_M0 (3 << 20)
+#define HDCP_DDID_SHA1_M0 (4 << 20)
+#define HDCP_DDIF_SHA1_M0 (5 << 20)
+#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
+#define HDCP_SHA1_BUSY REG_BIT(16)
+#define HDCP_SHA1_READY REG_BIT(17)
+#define HDCP_SHA1_COMPLETE REG_BIT(18)
+#define HDCP_SHA1_V_MATCH REG_BIT(19)
+#define HDCP_SHA1_TEXT_32 (1 << 1)
+#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
+#define HDCP_SHA1_TEXT_24 (4 << 1)
+#define HDCP_SHA1_TEXT_16 (5 << 1)
+#define HDCP_SHA1_TEXT_8 (6 << 1)
+#define HDCP_SHA1_TEXT_0 (7 << 1)
+#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
+#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
+#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
+#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
+#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
+#define HDCP_SHA_TEXT _MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _PORTA_HDCP_AUTHENC 0x66800
+#define _PORTB_HDCP_AUTHENC 0x66500
+#define _PORTC_HDCP_AUTHENC 0x66600
+#define _PORTD_HDCP_AUTHENC 0x66700
+#define _PORTE_HDCP_AUTHENC 0x66A00
+#define _PORTF_HDCP_AUTHENC 0x66900
+#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
+ _PORTA_HDCP_AUTHENC, \
+ _PORTB_HDCP_AUTHENC, \
+ _PORTC_HDCP_AUTHENC, \
+ _PORTD_HDCP_AUTHENC, \
+ _PORTE_HDCP_AUTHENC, \
+ _PORTF_HDCP_AUTHENC) + (x))
+#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
+#define HDCP_CONF_CAPTURE_AN REG_BIT(0)
+#define HDCP_CONF_AUTH_AND_ENC (REG_BIT(1) | REG_BIT(0))
+#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
+#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
+#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
+#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
+#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
+#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
+#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
+#define HDCP_STATUS_STREAM_A_ENC REG_BIT(31)
+#define HDCP_STATUS_STREAM_B_ENC REG_BIT(30)
+#define HDCP_STATUS_STREAM_C_ENC REG_BIT(29)
+#define HDCP_STATUS_STREAM_D_ENC REG_BIT(28)
+#define HDCP_STATUS_AUTH REG_BIT(21)
+#define HDCP_STATUS_ENC REG_BIT(20)
+#define HDCP_STATUS_RI_MATCH REG_BIT(19)
+#define HDCP_STATUS_R0_READY REG_BIT(18)
+#define HDCP_STATUS_AN_READY REG_BIT(17)
+#define HDCP_STATUS_CIPHER REG_BIT(16)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
+#define AUTH_LINK_AUTHENTICATED REG_BIT(31)
+#define AUTH_LINK_TYPE REG_BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
+#define AUTH_CLR_KEYS REG_BIT(18)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
+#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
+#define LINK_TYPE_STATUS REG_BIT(22)
+#define LINK_AUTH_STATUS REG_BIT(21)
+#define LINK_ENCRYPTION_STATUS REG_BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
+
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
+#define STREAM_TYPE_STATUS REG_BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE REG_BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
+#endif /* __INTEL_HDCP_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ebd91aa69dd2..7816b2a33fee 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -50,6 +50,7 @@
#include "intel_dp.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
@@ -1891,7 +1892,7 @@ int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
* 1.5x for 12bpc
* 1.25x for 10bpc
*/
- return clock * bpc / 8;
+ return DIV_ROUND_CLOSEST(clock * bpc, 8);
}
static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
@@ -2001,6 +2002,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock *= 2;
}
+ /*
+ * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be
+ * enumerated only if FRL is supported. Current platforms do not support
+ * FRL so prune the higher resolution modes that require doctclock more
+ * than 600MHz.
+ */
+ if (clock > 600000)
+ return MODE_CLOCK_HIGH;
+
ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 5f8b4f481cff..f7a2f485b177 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -119,13 +119,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
+ * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -140,7 +140,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
enum hpd_pin pin, bool long_hpd)
{
- struct i915_hotplug *hpd = &dev_priv->hotplug;
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -148,7 +148,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -191,7 +191,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
drm_info(&dev_priv->drm,
@@ -199,7 +199,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -209,7 +209,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
- mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -218,7 +218,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
- hotplug.reenable_work.work);
+ display.hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -233,7 +233,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
@@ -245,8 +245,8 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
}
intel_hpd_irq_setup(dev_priv);
@@ -297,16 +297,16 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+ container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
u32 long_port_mask, short_port_mask;
struct intel_encoder *encoder;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->hotplug.long_port_mask;
- dev_priv->hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->hotplug.short_port_mask;
- dev_priv->hotplug.short_port_mask = 0;
+ long_port_mask = dev_priv->display.hotplug.long_port_mask;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ short_port_mask = dev_priv->display.hotplug.short_port_mask;
+ dev_priv->display.hotplug.short_port_mask = 0;
spin_unlock_irq(&dev_priv->irq_lock);
for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -335,9 +335,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.event_bits |= old_bits;
+ dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -353,10 +353,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
spin_lock_irq(&i915->irq_lock);
- i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
+ i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
spin_unlock_irq(&i915->irq_lock);
- queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
+ queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
}
/*
@@ -366,7 +366,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- hotplug.hotplug_work.work);
+ display.hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -379,10 +379,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
- hpd_event_bits = dev_priv->hotplug.event_bits;
- dev_priv->hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->hotplug.retry_bits;
- dev_priv->hotplug.retry_bits = 0;
+ hpd_event_bits = dev_priv->display.hotplug.event_bits;
+ dev_priv->display.hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
+ dev_priv->display.hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -435,10 +435,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
retry &= ~changed;
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.retry_bits |= retry;
+ dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
}
}
@@ -502,10 +502,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->hotplug.long_port_mask |= BIT(port);
+ dev_priv->display.hotplug.long_port_mask |= BIT(port);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->hotplug.short_port_mask |= BIT(port);
+ dev_priv->display.hotplug.short_port_mask |= BIT(port);
}
}
@@ -516,7 +516,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
@@ -529,7 +529,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
continue;
}
- if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
+ if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -540,13 +540,13 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->hotplug.event_bits |= BIT(pin);
+ dev_priv->display.hotplug.event_bits |= BIT(pin);
long_hpd = true;
queue_hp = true;
}
if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->hotplug.event_bits &= ~BIT(pin);
+ dev_priv->display.hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -567,9 +567,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+ queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
}
/**
@@ -594,8 +594,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
return;
for_each_hpd_pin(i) {
- dev_priv->hotplug.stats[i].count = 0;
- dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ dev_priv->display.hotplug.stats[i].count = 0;
+ dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
}
/*
@@ -611,7 +611,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- hotplug.poll_init_work);
+ display.hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -619,7 +619,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+ enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
@@ -672,7 +672,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
!INTEL_DISPLAY_ENABLED(dev_priv))
return;
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -680,7 +680,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- schedule_work(&dev_priv->hotplug.poll_init_work);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
/**
@@ -707,17 +707,17 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
- schedule_work(&dev_priv->hotplug.poll_init_work);
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
- INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+ INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
}
@@ -728,17 +728,17 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.long_port_mask = 0;
- dev_priv->hotplug.short_port_mask = 0;
- dev_priv->hotplug.event_bits = 0;
- dev_priv->hotplug.retry_bits = 0;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ dev_priv->display.hotplug.short_port_mask = 0;
+ dev_priv->display.hotplug.event_bits = 0;
+ dev_priv->display.hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- cancel_work_sync(&dev_priv->hotplug.dig_port_work);
- cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
- cancel_work_sync(&dev_priv->hotplug.poll_init_work);
- cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+ cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
+ cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
}
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
@@ -749,8 +749,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
return false;
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
ret = true;
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -764,6 +764,6 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
return;
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
spin_unlock_irq(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 4970bf146c4a..dca6003ccac8 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -73,8 +73,9 @@
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"
+#include "intel_pci_config.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL)
+#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL)
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
@@ -96,13 +97,13 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
- rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq;
+ rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
- rsc[1].start = pci_resource_start(pdev, 0) +
+ rsc[1].start = pci_resource_start(pdev, GTTMMADR_BAR) +
I915_HDMI_LPE_AUDIO_BASE;
- rsc[1].end = pci_resource_start(pdev, 0) +
+ rsc[1].end = pci_resource_start(pdev, GTTMMADR_BAR) +
I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
@@ -148,7 +149,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
- platform_device_unregister(dev_priv->audio.lpe.platdev);
+ platform_device_unregister(dev_priv->display.audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -167,7 +168,7 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->audio.lpe.irq;
+ int irq = dev_priv->display.audio.lpe.irq;
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
@@ -204,15 +205,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
{
int ret;
- dev_priv->audio.lpe.irq = irq_alloc_desc(0);
- if (dev_priv->audio.lpe.irq < 0) {
+ dev_priv->display.audio.lpe.irq = irq_alloc_desc(0);
+ if (dev_priv->display.audio.lpe.irq < 0) {
drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
- dev_priv->audio.lpe.irq);
- ret = dev_priv->audio.lpe.irq;
+ dev_priv->display.audio.lpe.irq);
+ ret = dev_priv->display.audio.lpe.irq;
goto err;
}
- drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq);
ret = lpe_audio_irq_init(dev_priv);
@@ -223,10 +224,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
goto err_free_irq;
}
- dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
+ dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
- if (IS_ERR(dev_priv->audio.lpe.platdev)) {
- ret = PTR_ERR(dev_priv->audio.lpe.platdev);
+ if (IS_ERR(dev_priv->display.audio.lpe.platdev)) {
+ ret = PTR_ERR(dev_priv->display.audio.lpe.platdev);
drm_err(&dev_priv->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
@@ -241,10 +242,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
return 0;
err_free_irq:
- irq_free_desc(dev_priv->audio.lpe.irq);
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
err:
- dev_priv->audio.lpe.irq = -1;
- dev_priv->audio.lpe.platdev = NULL;
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
return ret;
}
@@ -262,7 +263,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
if (!HAS_LPE_AUDIO(dev_priv))
return;
- ret = generic_handle_irq(dev_priv->audio.lpe.irq);
+ ret = generic_handle_irq(dev_priv->display.audio.lpe.irq);
if (ret)
drm_err_ratelimited(&dev_priv->drm,
"error handling LPE audio irq: %d\n", ret);
@@ -303,10 +304,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
- irq_free_desc(dev_priv->audio.lpe.irq);
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
- dev_priv->audio.lpe.irq = -1;
- dev_priv->audio.lpe.platdev = NULL;
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
}
/**
@@ -333,7 +334,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (!HAS_LPE_AUDIO(dev_priv))
return;
- pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev);
+ pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
@@ -361,7 +362,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B);
+ pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 730480ac3300..9aa38e8141b5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -837,12 +837,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+ drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->vbt.int_lvds_support) {
+ if (!dev_priv->display.vbt.int_lvds_support) {
drm_dbg_kms(&dev_priv->drm,
"Internal LVDS support disabled by VBT\n");
return;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index f0e04d3904c6..cbfabd58b75a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -23,6 +23,7 @@
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
#include "intel_pm.h"
+#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
@@ -30,11 +31,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct intel_encoder *encoder;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->bw_obj.state);
+ to_intel_bw_state(i915->display.bw.obj.state);
struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -70,7 +71,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
- i915->display->crtc_disable(to_intel_atomic_state(state), crtc);
+ i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
@@ -415,9 +416,9 @@ static void readout_plane_state(struct drm_i915_private *i915)
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -535,7 +536,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->bw_obj.state);
+ to_intel_bw_state(i915->display.bw.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a91586d77cb6..0fdcf2e6d57f 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -15,8 +15,8 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
-#include "intel_pm.h"
#include "intel_snps_phy.h"
+#include "skl_watermark.h"
/*
* Cross check the actual hw state with our own modeset state tracking (and its
@@ -94,10 +94,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
/*
* FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
+ * Yell if the encoder disagrees. Allow for slight
+ * rounding differences.
*/
- drm_WARN(&dev_priv->drm,
- !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 1c0c745c142d..caa07ef34f21 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -252,7 +252,7 @@ struct opregion_asle_ext {
static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = i915->opregion.swsci;
+ struct opregion_swsci *swsci = i915->display.opregion.swsci;
u32 main_function, sub_function;
if (!swsci)
@@ -265,11 +265,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((i915->opregion.swsci_sbcb_sub_functions &
+ if ((i915->display.opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((i915->opregion.swsci_gbda_sub_functions &
+ if ((i915->display.opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
@@ -280,7 +280,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
- struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 scic, dslp;
u16 swsci_val;
@@ -462,7 +462,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
struct drm_device *dev = &dev_priv->drm;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@@ -586,8 +586,8 @@ static void asle_work(struct work_struct *work)
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
- container_of(opregion, struct drm_i915_private, opregion);
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ container_of(opregion, struct drm_i915_private, display.opregion);
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -635,8 +635,8 @@ static void asle_work(struct work_struct *work)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
- if (dev_priv->opregion.asle)
- schedule_work(&dev_priv->opregion.asle_work);
+ if (dev_priv->display.opregion.asle)
+ schedule_work(&dev_priv->display.opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -692,7 +692,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -731,7 +731,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -761,7 +761,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -839,7 +839,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
const struct firmware *fw = NULL;
const char *name = dev_priv->params.vbt_firmware;
int ret;
@@ -879,7 +879,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
@@ -1106,7 +1106,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
const void *in_edid;
const struct edid *edid;
struct edid *new_edid;
@@ -1141,7 +1141,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
struct opregion_header *header = opregion->header;
if (!header || header->over.major < 2 ||
@@ -1153,7 +1153,7 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
void intel_opregion_register(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1169,7 +1169,7 @@ void intel_opregion_register(struct drm_i915_private *i915)
void intel_opregion_resume(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1200,7 +1200,7 @@ void intel_opregion_resume(struct drm_i915_private *i915)
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1210,7 +1210,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
- cancel_work_sync(&i915->opregion.asle_work);
+ cancel_work_sync(&i915->display.opregion.asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
@@ -1218,7 +1218,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
void intel_opregion_unregister(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
intel_opregion_suspend(i915, PCI_D1);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 79ed8bd04a07..c12bdca8da9b 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -211,9 +211,9 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
else
- intel_de_write(dev_priv, DSPCLK_GATE_D,
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
@@ -487,7 +487,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
void intel_overlay_reset(struct drm_i915_private *dev_priv)
{
- struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay *overlay = dev_priv->display.overlay;
if (!overlay)
return;
@@ -1113,7 +1113,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *new_bo;
int ret;
- overlay = dev_priv->overlay;
+ overlay = dev_priv->display.overlay;
if (!overlay) {
drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
@@ -1273,7 +1273,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
int ret;
- overlay = dev_priv->overlay;
+ overlay = dev_priv->display.overlay;
if (!overlay) {
drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
@@ -1416,7 +1416,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
- dev_priv->overlay = overlay;
+ dev_priv->display.overlay = overlay;
drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
@@ -1428,7 +1428,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->overlay);
+ overlay = fetch_and_zero(&dev_priv->display.overlay);
if (!overlay)
return;
@@ -1457,7 +1457,7 @@ struct intel_overlay_error_state {
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay *overlay = dev_priv->display.overlay;
struct intel_overlay_error_state *error;
if (!overlay || !overlay->active)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 237a40623dd7..a3a3f9fe4342 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -37,13 +37,14 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_panel.h"
+#include "intel_quirks.h"
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
if (i915->params.panel_use_ssc >= 0)
return i915->params.panel_use_ssc != 0;
- return i915->vbt.lvds_use_ssc
- && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
+ return i915->display.vbt.lvds_use_ssc &&
+ !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
}
const struct drm_display_mode *
@@ -81,15 +82,14 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
mode->clock != preferred_mode->clock;
}
-static bool is_alt_vrr_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode)
+static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
{
return drm_mode_match(mode, preferred_mode,
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS) &&
mode->hdisplay == preferred_mode->hdisplay &&
- mode->vdisplay == preferred_mode->vdisplay &&
- mode->clock != preferred_mode->clock;
+ mode->vdisplay == preferred_mode->vdisplay;
}
const struct drm_display_mode *
@@ -114,6 +114,21 @@ intel_panel_downclock_mode(struct intel_connector *connector,
return best_mode;
}
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode;
+
+ /* pick the fixed_mode that has the highest clock */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ if (fixed_mode->clock > best_mode->clock)
+ best_mode = fixed_mode;
+ }
+
+ return best_mode;
+}
+
int intel_panel_get_modes(struct intel_connector *connector)
{
const struct drm_display_mode *fixed_mode;
@@ -172,19 +187,7 @@ int intel_panel_compute_config(struct intel_connector *connector,
return 0;
}
-static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode,
- bool has_vrr)
-{
- /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */
- if (has_vrr)
- return is_alt_vrr_mode(mode, preferred_mode);
- else
- return is_alt_drrs_mode(mode, preferred_mode);
-}
-
-static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector,
- bool has_vrr)
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *preferred_mode =
@@ -192,7 +195,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
- if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr))
+ if (!is_alt_fixed_mode(mode, preferred_mode))
continue;
drm_dbg_kms(&dev_priv->drm,
@@ -255,7 +258,7 @@ void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
{
intel_panel_add_edid_preferred_mode(connector);
if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr))
- intel_panel_add_edid_alt_fixed_modes(connector, has_vrr);
+ intel_panel_add_edid_alt_fixed_modes(connector);
intel_panel_destroy_probed_modes(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b087c0c3cc6d..eff3ffd3d082 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -31,6 +31,9 @@ intel_panel_fixed_mode(struct intel_connector *connector,
const struct drm_display_mode *
intel_panel_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *adjusted_mode);
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode);
int intel_panel_get_modes(struct intel_connector *connector);
enum drrs_type intel_panel_drrs_type(struct intel_connector *connector);
enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9934c8a9e240..a66097cdc1e0 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -167,6 +167,15 @@ static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
}
}
+int lpt_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct iclkip_params p;
+
+ lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
+
+ return lpt_iclkip_freq(&p);
+}
+
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
@@ -179,6 +188,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
lpt_disable_iclkip(dev_priv);
lpt_compute_iclkip(&p, clock);
+ drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
@@ -514,7 +524,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
if (HAS_PCH_IBX(dev_priv)) {
- has_ck505 = dev_priv->vbt.display_clock_mode;
+ has_ck505 = dev_priv->display.vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
@@ -522,7 +532,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
@@ -654,7 +664,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- BUG_ON(val != final);
+ drm_WARN_ON(&dev_priv->drm, val != final);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index 12ab2c75a800..9bcf56629f24 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -14,6 +14,7 @@ struct intel_crtc_state;
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+int lpt_iclkip(const struct intel_crtc_state *crtc_state);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index d10f27d0b7b0..76be796df255 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -311,7 +311,7 @@ void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display->get_initial_plane_config(crtc, &plane_config);
+ dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 1b21a341962f..21944f5bf3a8 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -12,6 +12,7 @@
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_pps.h"
+#include "intel_quirks.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -28,7 +29,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
* See intel_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
- mutex_lock(&dev_priv->pps_mutex);
+ mutex_lock(&dev_priv->display.pps.mutex);
return wakeref;
}
@@ -38,7 +39,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- mutex_unlock(&dev_priv->pps_mutex);
+ mutex_unlock(&dev_priv->display.pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
@@ -163,7 +164,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* We should never land here with regular DP ports */
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
@@ -212,7 +213,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
struct intel_connector *connector = intel_dp->attached_connector;
int backlight_controller = connector->panel.vbt.backlight.controller;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* We should never land here with regular DP ports */
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
@@ -282,7 +283,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum port port = dig_port->base.port;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
@@ -407,7 +408,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
@@ -420,7 +421,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
@@ -463,7 +464,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
intel_pps_verify_state(intel_dp);
@@ -556,7 +557,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
@@ -580,7 +581,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return false;
@@ -657,7 +658,7 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
@@ -748,7 +749,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -771,7 +772,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -832,7 +833,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -991,7 +992,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1021,7 +1022,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
@@ -1064,7 +1065,7 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -1176,7 +1177,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
@@ -1202,7 +1203,7 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
drm_dbg_kms(&dev_priv->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
@@ -1223,7 +1224,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -1246,7 +1247,7 @@ static void pps_init_delays(struct intel_dp *intel_dp)
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* already initialized? */
if (pps_delays_valid(final))
@@ -1312,7 +1313,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
intel_pps_get_registers(intel_dp, &regs);
@@ -1487,11 +1488,11 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
void intel_pps_setup(struct drm_i915_private *i915)
{
if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->pps_mmio_base = PCH_PPS_BASE;
+ i915->display.pps.mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->pps_mmio_base = VLV_PPS_BASE;
+ i915->display.pps.mmio_base = VLV_PPS_BASE;
else
- i915->pps_mmio_base = PPS_BASE;
+ i915->display.pps.mmio_base = PPS_BASE;
}
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index e6a870641cd2..9def8d9fade6 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -706,7 +706,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -805,13 +805,14 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
- /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
- req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
+ /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
+ req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
if ((hblank_ns - req_ns) > 100)
return true;
- if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ /* Not supported <13 / Wa_22012279113:adl-p */
+ if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1721,8 +1722,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
new_plane_state, i) {
struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
.x2 = INT_MAX };
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
@@ -1767,22 +1766,18 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
continue;
}
- drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ src = drm_plane_state_src(&new_plane_state->uapi);
+ drm_rect_fp_to_int(&src, &src);
- drm_atomic_helper_damage_iter_init(&iter,
- &old_plane_state->uapi,
- &new_plane_state->uapi);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (drm_rect_intersect(&clip, &src))
- clip_area_update(&damaged_area, &clip,
- &crtc_state->pipe_src);
- }
-
- if (damaged_area.y1 == -1)
+ if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
+ &new_plane_state->uapi, &damaged_area))
continue;
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
}
@@ -1863,7 +1858,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
@@ -1871,7 +1868,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
- crtc_state->uapi.encoder_mask) {
+ old_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_psr *psr = &intel_dp->psr;
bool needs_to_disable = false;
@@ -1884,10 +1881,10 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
* - All planes will go inactive
* - Changing between PSR versions
*/
- needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
- needs_to_disable |= !crtc_state->has_psr;
- needs_to_disable |= !crtc_state->active_planes;
- needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
+ needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
+ needs_to_disable |= !new_crtc_state->has_psr;
+ needs_to_disable |= !new_crtc_state->active_planes;
+ needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
if (psr->enabled && needs_to_disable)
intel_psr_disable_locked(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index c8488f5ebd04..6e48d3bcdfec 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -9,12 +9,17 @@
#include "intel_display_types.h"
#include "intel_quirks.h"
+static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ i915->display.quirks.mask |= BIT(quirk);
+}
+
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
static void quirk_ssc_force_disable(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
}
@@ -24,14 +29,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
*/
static void quirk_invert_brightness(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
drm_info(&i915->drm, "applying backlight present quirk\n");
}
@@ -40,7 +45,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
*/
static void quirk_increase_t12_delay(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+ intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
drm_info(&i915->drm, "Applying T12 delay quirk\n");
}
@@ -50,13 +55,13 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
*/
static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK;
+ intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
}
@@ -191,6 +196,9 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
};
void intel_init_quirks(struct drm_i915_private *i915)
@@ -213,3 +221,8 @@ void intel_init_quirks(struct drm_i915_private *i915)
intel_dmi_quirks[i].hook(i915);
}
}
+
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ return i915->display.quirks.mask & BIT(quirk);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index b0fcff142a56..10a4d163149f 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -6,8 +6,20 @@
#ifndef __INTEL_QUIRKS_H__
#define __INTEL_QUIRKS_H__
+#include <linux/types.h>
+
struct drm_i915_private;
-void intel_init_quirks(struct drm_i915_private *dev_priv);
+enum intel_quirk_id {
+ QUIRK_BACKLIGHT_PRESENT,
+ QUIRK_INCREASE_DDI_DISABLED_TIME,
+ QUIRK_INCREASE_T12_DELAY,
+ QUIRK_INVERT_BRIGHTNESS,
+ QUIRK_LVDS_SSC_DISABLE,
+ QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
+};
+
+void intel_init_quirks(struct drm_i915_private *i915);
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 19122bc6d2ab..f5b744bef18f 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2016,7 +2016,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
- dev_priv->vbt.crt_ddc_pin));
+ dev_priv->display.vbt.crt_ddc_pin));
}
static enum drm_connector_status
@@ -2581,9 +2581,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->vbt.sdvo_mappings[0];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->vbt.sdvo_mappings[1];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2599,9 +2599,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
u8 pin;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->vbt.sdvo_mappings[0];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->vbt.sdvo_mappings[1];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized &&
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
@@ -2639,11 +2639,11 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
- my_mapping = &dev_priv->vbt.sdvo_mappings[0];
- other_mapping = &dev_priv->vbt.sdvo_mappings[1];
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
} else {
- my_mapping = &dev_priv->vbt.sdvo_mappings[1];
- other_mapping = &dev_priv->vbt.sdvo_mappings[0];
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 0bdbedc67d7d..937cefd6f78f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -518,6 +518,1086 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
};
/* values in the below table are calculted using the algo */
+static const struct intel_mpllb_state dg2_hdmi_25200 = {
+ .clock = 25200,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_27027 = {
+ .clock = 27027,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_28320 = {
+ .clock = 28320,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_30240 = {
+ .clock = 30240,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_31500 = {
+ .clock = 31500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_36000 = {
+ .clock = 36000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_40000 = {
+ .clock = 40000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_49500 = {
+ .clock = 49500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_50000 = {
+ .clock = 50000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_57284 = {
+ .clock = 57284,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_58000 = {
+ .clock = 58000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_65000 = {
+ .clock = 65000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_71000 = {
+ .clock = 71000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_74176 = {
+ .clock = 74176,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_75000 = {
+ .clock = 75000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_78750 = {
+ .clock = 78750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_85500 = {
+ .clock = 85500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_88750 = {
+ .clock = 88750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_106500 = {
+ .clock = 106500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_108000 = {
+ .clock = 108000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_115500 = {
+ .clock = 115500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_119000 = {
+ .clock = 119000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_135000 = {
+ .clock = 135000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_138500 = {
+ .clock = 138500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_147160 = {
+ .clock = 147160,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_148352 = {
+ .clock = 148352,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_154000 = {
+ .clock = 154000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_162000 = {
+ .clock = 162000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_209800 = {
+ .clock = 209800,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_262750 = {
+ .clock = 262750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ .clock = 268500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_296703 = {
+ .clock = 296703,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_241500 = {
+ .clock = 241500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ .clock = 497750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_592000 = {
+ .clock = 592000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_593407 = {
+ .clock = 593407,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_297 = {
.clock = 297000,
.ref_control =
@@ -584,6 +1664,42 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_148_5,
&dg2_hdmi_297,
&dg2_hdmi_594,
+ &dg2_hdmi_25200,
+ &dg2_hdmi_27027,
+ &dg2_hdmi_28320,
+ &dg2_hdmi_30240,
+ &dg2_hdmi_31500,
+ &dg2_hdmi_36000,
+ &dg2_hdmi_40000,
+ &dg2_hdmi_49500,
+ &dg2_hdmi_50000,
+ &dg2_hdmi_57284,
+ &dg2_hdmi_58000,
+ &dg2_hdmi_65000,
+ &dg2_hdmi_71000,
+ &dg2_hdmi_74176,
+ &dg2_hdmi_75000,
+ &dg2_hdmi_78750,
+ &dg2_hdmi_85500,
+ &dg2_hdmi_88750,
+ &dg2_hdmi_106500,
+ &dg2_hdmi_108000,
+ &dg2_hdmi_115500,
+ &dg2_hdmi_119000,
+ &dg2_hdmi_135000,
+ &dg2_hdmi_138500,
+ &dg2_hdmi_147160,
+ &dg2_hdmi_148352,
+ &dg2_hdmi_154000,
+ &dg2_hdmi_162000,
+ &dg2_hdmi_209800,
+ &dg2_hdmi_241500,
+ &dg2_hdmi_262750,
+ &dg2_hdmi_268500,
+ &dg2_hdmi_296703,
+ &dg2_hdmi_497750,
+ &dg2_hdmi_592000,
+ &dg2_hdmi_593407,
NULL,
};
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 2713faad0625..7649c50b5445 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -39,7 +39,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include "i915_drv.h"
@@ -1355,8 +1354,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int min_scale = DRM_PLANE_NO_SCALING;
+ int max_scale = DRM_PLANE_NO_SCALING;
int ret;
if (g4x_fb_scalable(plane_state->hw.fb)) {
@@ -1426,8 +1425,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return ret;
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6773840f6cc7..e5af955b5600 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -246,7 +246,7 @@ static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
- u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
u32 mask = 0;
u32 val;
@@ -279,7 +279,7 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
- u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
struct intel_uncore *uncore = &i915->uncore;
u32 val, mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9379f3463344..dcf89d701f0f 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -39,6 +39,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
@@ -982,10 +983,10 @@ intel_tv_mode_vdisplay(const struct tv_mode *tv_mode)
static void
intel_tv_mode_to_mode(struct drm_display_mode *mode,
- const struct tv_mode *tv_mode)
+ const struct tv_mode *tv_mode,
+ int clock)
{
- mode->clock = tv_mode->clock /
- (tv_mode->oversample >> !tv_mode->progressive);
+ mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive);
/*
* tv_mode horizontal timings:
@@ -1143,7 +1144,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
xsize = tmp >> 16;
ysize = tmp & 0xffff;
- intel_tv_mode_to_mode(&mode, &tv_mode);
+ intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock);
drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&mode));
@@ -1184,6 +1185,9 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(pipe_config->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
@@ -1192,6 +1196,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
int hdisplay = adjusted_mode->crtc_hdisplay;
int vdisplay = adjusted_mode->crtc_vdisplay;
+ int ret;
if (!tv_mode)
return -EINVAL;
@@ -1206,7 +1211,13 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = tv_mode->clock;
- intel_tv_mode_to_mode(adjusted_mode, tv_mode);
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ pipe_config->clock_set = true;
+
+ intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock);
drm_mode_set_crtcinfo(adjusted_mode, 0);
if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
@@ -1804,7 +1815,7 @@ intel_tv_get_modes(struct drm_connector *connector)
* about the actual timings of the mode. We
* do ignore the margins though.
*/
- intel_tv_mode_to_mode(mode, tv_mode);
+ intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock);
if (count == 0) {
drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 509b0a419c20..a9f44abfc9fc 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -76,6 +76,20 @@ struct bdb_header {
} __packed;
/*
+ * BDB version number dependencies are documented as:
+ *
+ * <start>+
+ * indicates the field was introduced in version <start>
+ * and is still valid
+ *
+ * <start>-<end>
+ * indicates the field was introduced in version <start>
+ * and obsoleted in version <end>+1.
+ *
+ * ??? indicates the specific version number is unknown
+ */
+
+/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
@@ -144,12 +158,12 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rotate_180:1; /* 181 */
+ u8 rotate_180:1; /* 181+ */
u8 fdi_rx_polarity_inverted:1;
- u8 vbios_extended_mode:1; /* 160 */
- u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
- u8 panel_best_fit_timing:1; /* 160 */
- u8 ignore_strap_state:1; /* 160 */
+ u8 vbios_extended_mode:1; /* 160+ */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160+ */
+ u8 panel_best_fit_timing:1; /* 160+ */
+ u8 ignore_strap_state:1; /* 160+ */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -164,11 +178,11 @@ struct bdb_general_features {
u8 rsvd11:2; /* finish byte */
/* bits 6 */
- u8 tc_hpd_retry_timeout:7; /* 242 */
+ u8 tc_hpd_retry_timeout:7; /* 242+ */
u8 rsvd12:1;
/* bits 7 */
- u8 afc_startup_config:2;/* 249 */
+ u8 afc_startup_config:2; /* 249+ */
u8 rsvd13:6;
} __packed;
@@ -183,6 +197,15 @@ struct bdb_general_features {
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
/* Device handle */
+#define DEVICE_HANDLE_CRT 0x0001
+#define DEVICE_HANDLE_EFP1 0x0004
+#define DEVICE_HANDLE_EFP2 0x0040
+#define DEVICE_HANDLE_EFP3 0x0020
+#define DEVICE_HANDLE_EFP4 0x0010 /* 194+ */
+#define DEVICE_HANDLE_EFP5 0x0002 /* 215+ */
+#define DEVICE_HANDLE_EFP6 0x0001 /* 217+ */
+#define DEVICE_HANDLE_EFP7 0x0100 /* 217+ */
+#define DEVICE_HANDLE_EFP8 0x0200 /* 217+ */
#define DEVICE_HANDLE_LFP1 0x0008
#define DEVICE_HANDLE_LFP2 0x0080
@@ -275,27 +298,27 @@ struct bdb_general_features {
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11 /* 193 */
-#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_DPE 11 /* 193+ */
+#define DVO_PORT_HDMIE 12 /* 193+ */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
-#define DVO_PORT_DPG 15 /* 217 */
-#define DVO_PORT_HDMIG 16 /* 217 */
-#define DVO_PORT_DPH 17 /* 217 */
-#define DVO_PORT_HDMIH 18 /* 217 */
-#define DVO_PORT_DPI 19 /* 217 */
-#define DVO_PORT_HDMII 20 /* 217 */
-#define DVO_PORT_MIPIA 21 /* 171 */
-#define DVO_PORT_MIPIB 22 /* 171 */
-#define DVO_PORT_MIPIC 23 /* 171 */
-#define DVO_PORT_MIPID 24 /* 171 */
-
-#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */
-#define HDMI_MAX_DATA_RATE_297 1 /* 204 */
-#define HDMI_MAX_DATA_RATE_165 2 /* 204 */
-#define HDMI_MAX_DATA_RATE_594 3 /* 249 */
-#define HDMI_MAX_DATA_RATE_340 4 /* 249 */
-#define HDMI_MAX_DATA_RATE_300 5 /* 249 */
+#define DVO_PORT_DPG 15 /* 217+ */
+#define DVO_PORT_HDMIG 16 /* 217+ */
+#define DVO_PORT_DPH 17 /* 217+ */
+#define DVO_PORT_HDMIH 18 /* 217+ */
+#define DVO_PORT_DPI 19 /* 217+ */
+#define DVO_PORT_HDMII 20 /* 217+ */
+#define DVO_PORT_MIPIA 21 /* 171+ */
+#define DVO_PORT_MIPIB 22 /* 171+ */
+#define DVO_PORT_MIPIC 23 /* 171+ */
+#define DVO_PORT_MIPID 24 /* 171+ */
+
+#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204+ */
+#define HDMI_MAX_DATA_RATE_297 1 /* 204+ */
+#define HDMI_MAX_DATA_RATE_165 2 /* 204+ */
+#define HDMI_MAX_DATA_RATE_594 3 /* 249+ */
+#define HDMI_MAX_DATA_RATE_340 4 /* 249+ */
+#define HDMI_MAX_DATA_RATE_300 5 /* 249+ */
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
@@ -362,10 +385,10 @@ enum vbt_gmbus_ddi {
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
- * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
- * space for the full structure below, and initialize the tail not actually
- * present in VBT to zeros. Accessing those fields is fine, as long as the
- * default zero is taken into account, again according to the BDB version.
+ * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * reserve space for the full structure below, and initialize the tail not
+ * actually present in VBT to zeros. Accessing those fields is fine, as long as
+ * the default zero is taken into account, again according to the BDB version.
*
* BDB versions 155 and below are considered legacy, and version 155 seems to be
* a baseline for some of the VBT documentation. When adding new fields, please
@@ -379,20 +402,30 @@ struct child_device_config {
u8 device_id[10]; /* ascii string */
struct {
u8 i2c_speed;
- u8 dp_onboard_redriver; /* 158 */
- u8 dp_ondock_redriver; /* 158 */
- u8 hdmi_level_shifter_value:5; /* 169 */
- u8 hdmi_max_data_rate:3; /* 204 */
- u16 dtd_buf_ptr; /* 161 */
- u8 edidless_efp:1; /* 161 */
- u8 compression_enable:1; /* 198 */
- u8 compression_method_cps:1; /* 198 */
- u8 ganged_edp:1; /* 202 */
- u8 reserved0:4;
- u8 compression_structure_index:4; /* 198 */
- u8 reserved1:4;
- u8 slave_port; /* 202 */
- u8 reserved2;
+ u8 dp_onboard_redriver_preemph:3; /* 158+ */
+ u8 dp_onboard_redriver_vswing:3; /* 158+ */
+ u8 dp_onboard_redriver_present:1; /* 158+ */
+ u8 reserved0:1;
+ u8 dp_ondock_redriver_preemph:3; /* 158+ */
+ u8 dp_ondock_redriver_vswing:3; /* 158+ */
+ u8 dp_ondock_redriver_present:1; /* 158+ */
+ u8 reserved1:1;
+ u8 hdmi_level_shifter_value:5; /* 158+ */
+ u8 hdmi_max_data_rate:3; /* 204+ */
+ u16 dtd_buf_ptr; /* 161+ */
+ u8 edidless_efp:1; /* 161+ */
+ u8 compression_enable:1; /* 198+ */
+ u8 compression_method_cps:1; /* 198+ */
+ u8 ganged_edp:1; /* 202+ */
+ u8 lttpr_non_transparent:1; /* 235+ */
+ u8 disable_compression_for_ext_disp:1; /* 251+ */
+ u8 reserved2:2;
+ u8 compression_structure_index:4; /* 198+ */
+ u8 reserved3:4;
+ u8 hdmi_max_frl_rate:4; /* 237+ */
+ u8 hdmi_max_frl_rate_valid:1; /* 237+ */
+ u8 reserved4:3; /* 237+ */
+ u8 reserved5;
} __packed;
} __packed;
@@ -412,16 +445,16 @@ struct child_device_config {
u8 ddc2_pin;
} __packed;
struct {
- u8 efp_routed:1; /* 158 */
- u8 lane_reversal:1; /* 184 */
- u8 lspcon:1; /* 192 */
- u8 iboost:1; /* 196 */
- u8 hpd_invert:1; /* 196 */
- u8 use_vbt_vswing:1; /* 218 */
- u8 flag_reserved:2;
- u8 hdmi_support:1; /* 158 */
- u8 dp_support:1; /* 158 */
- u8 tmds_support:1; /* 158 */
+ u8 efp_routed:1; /* 158+ */
+ u8 lane_reversal:1; /* 184+ */
+ u8 lspcon:1; /* 192+ */
+ u8 iboost:1; /* 196+ */
+ u8 hpd_invert:1; /* 196+ */
+ u8 use_vbt_vswing:1; /* 218+ */
+ u8 dp_max_lane_count:2; /* 244+ */
+ u8 hdmi_support:1; /* 158+ */
+ u8 dp_support:1; /* 158+ */
+ u8 tmds_support:1; /* 158+ */
u8 support_reserved:5;
u8 aux_channel;
u8 dongle_detect;
@@ -429,7 +462,7 @@ struct child_device_config {
} __packed;
u8 pipe_cap:2;
- u8 sdvo_stall:1; /* 158 */
+ u8 sdvo_stall:1; /* 158+ */
u8 hpd_status:2;
u8 integrated_encoder:1;
u8 capabilities_reserved:2;
@@ -437,21 +470,21 @@ struct child_device_config {
union {
u8 dvo2_wiring;
- u8 mipi_bridge_type; /* 171 */
+ u8 mipi_bridge_type; /* 171+ */
} __packed;
u16 extended_type;
u8 dvo_function;
- u8 dp_usb_type_c:1; /* 195 */
- u8 tbt:1; /* 209 */
- u8 flags2_reserved:2; /* 195 */
- u8 dp_port_trace_length:4; /* 209 */
- u8 dp_gpio_index; /* 195 */
- u16 dp_gpio_pin_num; /* 195 */
- u8 dp_iboost_level:4; /* 196 */
- u8 hdmi_iboost_level:4; /* 196 */
- u8 dp_max_link_rate:3; /* 216/230 GLK+ */
- u8 dp_max_link_rate_reserved:5; /* 216/230 */
+ u8 dp_usb_type_c:1; /* 195+ */
+ u8 tbt:1; /* 209+ */
+ u8 flags2_reserved:2; /* 195+ */
+ u8 dp_port_trace_length:4; /* 209+ */
+ u8 dp_gpio_index; /* 195+ */
+ u16 dp_gpio_pin_num; /* 195+ */
+ u8 dp_iboost_level:4; /* 196+ */
+ u8 hdmi_iboost_level:4; /* 196+ */
+ u8 dp_max_link_rate:3; /* 216+ */
+ u8 dp_max_link_rate_reserved:5; /* 216+ */
} __packed;
struct bdb_general_definitions {
@@ -459,7 +492,7 @@ struct bdb_general_definitions {
u8 crt_ddc_gmbus_pin;
/* DPMS bits */
- u8 dpms_acpi:1;
+ u8 dpms_non_acpi:1;
u8 skip_boot_crt_detect:1;
u8 dpms_aim:1;
u8 rsvd1:5; /* finish byte */
@@ -488,25 +521,25 @@ struct bdb_general_definitions {
struct psr_table {
/* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
+ u8 full_link:1; /* 165+ */
+ u8 require_aux_to_wakeup:1; /* 165+ */
u8 feature_bits_rsvd:6;
/* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
+ u8 idle_frames:4; /* 165+ */
+ u8 lines_to_wait:3; /* 165+ */
u8 wait_times_rsvd:1;
/* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
+ u16 tp1_wakeup_time; /* 165+ */
+ u16 tp2_tp3_wakeup_time; /* 165+ */
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
/* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
+ u32 psr2_tp2_tp3_wakeup_time; /* 226+ */
} __packed;
/*
@@ -519,9 +552,10 @@ struct bdb_psr {
#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
+ /* Driver bits */
u8 boot_dev_algorithm:1;
- u8 block_display_switch:1;
- u8 allow_display_switch:1;
+ u8 allow_display_switch_dvd:1;
+ u8 allow_display_switch_dos:1;
u8 hotplug_dvo:1;
u8 dual_view_zoom:1;
u8 int15h_hook:1;
@@ -533,6 +567,7 @@ struct bdb_driver_features {
u8 boot_mode_bpp;
u8 boot_mode_refresh;
+ /* Extended Driver Bits 1 */
u16 enable_lfp_primary:1;
u16 selective_mode_pruning:1;
u16 dual_frequency:1;
@@ -548,29 +583,40 @@ struct bdb_driver_features {
u16 tv_hotplug:1;
u16 hdmi_config:2;
- u8 static_display:1;
- u8 reserved2:7;
+ /* Driver Flags 1 */
+ u8 static_display:1; /* 163+ */
+ u8 embedded_platform:1; /* 163+ */
+ u8 display_subsystem_enable:1; /* 163+ */
+ u8 reserved0:5;
+
u16 legacy_crt_max_x;
u16 legacy_crt_max_y;
u8 legacy_crt_max_refresh;
- u8 hdmi_termination;
- u8 custom_vbt_version;
- /* Driver features data block */
- u16 rmpm_enabled:1;
- u16 s2ddt_enabled:1;
- u16 dpst_enabled:1;
- u16 bltclt_enabled:1;
- u16 adb_enabled:1;
- u16 drrs_enabled:1;
- u16 grs_enabled:1;
- u16 gpmt_enabled:1;
- u16 tbt_enabled:1;
- u16 psr_enabled:1;
- u16 ips_enabled:1;
- u16 reserved3:1;
- u16 dmrrs_enabled:1;
- u16 reserved4:2;
+ /* Extended Driver Bits 2 */
+ u8 hdmi_termination:1;
+ u8 cea861d_hdmi_support:1;
+ u8 self_refresh_enable:1;
+ u8 reserved1:5;
+
+ u8 custom_vbt_version; /* 155+ */
+
+ /* Driver Feature Flags */
+ u16 rmpm_enabled:1; /* 165+ */
+ u16 s2ddt_enabled:1; /* 165+ */
+ u16 dpst_enabled:1; /* 165-227 */
+ u16 bltclt_enabled:1; /* 165+ */
+ u16 adb_enabled:1; /* 165-227 */
+ u16 drrs_enabled:1; /* 165-227 */
+ u16 grs_enabled:1; /* 165+ */
+ u16 gpmt_enabled:1; /* 165+ */
+ u16 tbt_enabled:1; /* 165+ */
+ u16 psr_enabled:1; /* 165-227 */
+ u16 ips_enabled:1; /* 165+ */
+ u16 dpfs_enabled:1; /* 165+ */
+ u16 dmrrs_enabled:1; /* 174-227 */
+ u16 adt_enabled:1; /* ???-228 */
+ u16 hpd_wake:1; /* 201-240 */
u16 pc_feature_valid:1;
} __packed;
@@ -657,7 +703,7 @@ struct bdb_sdvo_panel_dtds {
struct edp_fast_link_params {
- u8 rate:4;
+ u8 rate:4; /* ???-223 */
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
@@ -690,18 +736,18 @@ struct bdb_edp {
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature; /* 162 */
- u16 edp_t3_optimization; /* 165 */
- u64 edp_vswing_preemph; /* 173 */
- u16 fast_link_training; /* 182 */
- u16 dpcd_600h_write_required; /* 185 */
- struct edp_pwm_delays pwm_delays[16]; /* 186 */
- u16 full_link_params_provided; /* 199 */
- struct edp_full_link_params full_link_params[16]; /* 199 */
- u16 apical_enable; /* 203 */
- struct edp_apical_params apical_params[16]; /* 203 */
- u16 edp_fast_link_training_rate[16]; /* 224 */
- u16 edp_max_port_link_rate[16]; /* 244 */
+ u16 edp_s3d_feature; /* 162+ */
+ u16 edp_t3_optimization; /* 165+ */
+ u64 edp_vswing_preemph; /* 173+ */
+ u16 fast_link_training; /* 182+ */
+ u16 dpcd_600h_write_required; /* 185+ */
+ struct edp_pwm_delays pwm_delays[16]; /* 186+ */
+ u16 full_link_params_provided; /* 199+ */
+ struct edp_full_link_params full_link_params[16]; /* 199+ */
+ u16 apical_enable; /* 203+ */
+ struct edp_apical_params apical_params[16]; /* 203+ */
+ u16 edp_fast_link_training_rate[16]; /* 224+ */
+ u16 edp_max_port_link_rate[16]; /* 244+ */
} __packed;
/*
@@ -710,14 +756,14 @@ struct bdb_edp {
struct bdb_lvds_options {
u8 panel_type;
- u8 panel_type2; /* 212 */
+ u8 panel_type2; /* 212+ */
/* LVDS capabilities, stored in a dword */
u8 pfit_mode:2;
u8 pfit_text_mode_enhanced:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_ratio_auto:1;
u8 pixel_dither:1;
- u8 lvds_edid:1;
+ u8 lvds_edid:1; /* ???-240 */
u8 rsvd2:1;
u8 rsvd4;
/* LVDS Panel channel bits stored here */
@@ -731,11 +777,11 @@ struct bdb_lvds_options {
/* LVDS panel type bits stored here */
u32 dps_panel_type_bits;
/* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
+ u32 blt_control_type_bits; /* ???-240 */
- u16 lcdvcc_s0_enable; /* 200 */
- u32 rotation; /* 228 */
- u32 position; /* 240 */
+ u16 lcdvcc_s0_enable; /* 200+ */
+ u32 rotation; /* 228+ */
+ u32 position; /* 240+ */
} __packed;
/*
@@ -756,7 +802,7 @@ struct lvds_lfp_data_ptr {
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries;
struct lvds_lfp_data_ptr ptr[16];
- struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */
+ struct lvds_lfp_data_ptr_table panel_name; /* (156-163?)+ */
} __packed;
/*
@@ -808,20 +854,20 @@ struct lvds_lfp_panel_name {
} __packed;
struct lvds_lfp_black_border {
- u8 top; /* 227 */
- u8 bottom; /* 227 */
- u8 left; /* 238 */
- u8 right; /* 238 */
+ u8 top; /* 227+ */
+ u8 bottom; /* 227+ */
+ u8 left; /* 238+ */
+ u8 right; /* 238+ */
} __packed;
struct bdb_lvds_lfp_data_tail {
- struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */
- u16 scaling_enable; /* 187 */
- u8 seamless_drrs_min_refresh_rate[16]; /* 188 */
- u8 pixel_overlap_count[16]; /* 208 */
- struct lvds_lfp_black_border black_border[16]; /* 227 */
- u16 dual_lfp_port_sync_enable; /* 231 */
- u16 gpu_dithering_for_banding_artifacts; /* 245 */
+ struct lvds_lfp_panel_name panel_name[16]; /* (156-163?)+ */
+ u16 scaling_enable; /* 187+ */
+ u8 seamless_drrs_min_refresh_rate[16]; /* 188+ */
+ u8 pixel_overlap_count[16]; /* 208+ */
+ struct lvds_lfp_black_border black_border[16]; /* 227+ */
+ u16 dual_lfp_port_sync_enable; /* 231+ */
+ u16 gpu_dithering_for_banding_artifacts; /* 245+ */
} __packed;
/*
@@ -836,7 +882,7 @@ struct lfp_backlight_data_entry {
u8 active_low_pwm:1;
u8 obsolete1:5;
u16 pwm_freq_hz;
- u8 min_brightness; /* Obsolete from 234+ */
+ u8 min_brightness; /* ???-233 */
u8 obsolete2;
u8 obsolete3;
} __packed;
@@ -859,7 +905,7 @@ struct lfp_brightness_level {
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
- u8 level[16]; /* Obsolete from 234+ */
+ u8 level[16]; /* ???-233 */
struct lfp_backlight_control_method backlight_control[16];
struct lfp_brightness_level brightness_level[16]; /* 234+ */
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
@@ -874,8 +920,8 @@ struct lfp_power_features {
u8 reserved1:1;
u8 power_conservation_pref:3;
u8 reserved2:1;
- u8 lace_enabled_status:1;
- u8 lace_support:1;
+ u8 lace_enabled_status:1; /* 210+ */
+ u8 lace_support:1; /* 210+ */
u8 als_enable:1;
} __packed;
@@ -895,24 +941,24 @@ struct aggressiveness_profile2_entry {
} __packed;
struct bdb_lfp_power {
- struct lfp_power_features features;
+ struct lfp_power_features features; /* ???-227 */
struct als_data_entry als[5];
- u8 lace_aggressiveness_profile:3;
+ u8 lace_aggressiveness_profile:3; /* 210-227 */
u8 reserved1:5;
- u16 dpst;
- u16 psr;
- u16 drrs;
- u16 lace_support;
- u16 adt;
- u16 dmrrs;
- u16 adb;
- u16 lace_enabled_status;
- struct aggressiveness_profile_entry aggressiveness[16];
- u16 hobl; /* 232+ */
- u16 vrr_feature_enabled; /* 233+ */
- u16 elp; /* 247+ */
- u16 opst; /* 247+ */
- struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+ u16 dpst; /* 228+ */
+ u16 psr; /* 228+ */
+ u16 drrs; /* 228+ */
+ u16 lace_support; /* 228+ */
+ u16 adt; /* 228+ */
+ u16 dmrrs; /* 228+ */
+ u16 adb; /* 228+ */
+ u16 lace_enabled_status; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ u16 hobl; /* 232+ */
+ u16 vrr_feature_enabled; /* 233+ */
+ u16 elp; /* 247+ */
+ u16 opst; /* 247+ */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
} __packed;
/*
@@ -922,10 +968,10 @@ struct bdb_lfp_power {
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */
- struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */
- u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175+ */
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177+ */
+ struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186+ */
+ u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 43e1bbc1e303..269f9792390d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -344,7 +344,7 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!INTEL_INFO(i915)->display.has_dsc)
+ if (!RUNTIME_INFO(i915)->has_dsc)
return false;
if (DISPLAY_VER(i915) >= 12)
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
@@ -597,6 +596,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_VER_MIN_SHIFT |
vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->dsc_version_minor == 2)
+ pps_val |= DSC_ALT_ICH_SEL;
if (vdsc_cfg->block_pred_enable)
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 04250a0fec3c..5eac99021875 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -142,11 +142,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* For XE_LPD+, we use guardband and pipeline override
* is deprecated.
*/
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 13) {
+ /*
+ * FIXME: Subtract Window2 delay from below value.
+ *
+ * Window2 specifies time required to program DSB (Window2) in
+ * number of scan lines. Assuming 0 for no DSB.
+ */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
- i915->window2_delay;
- else
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+ } else {
/*
* FIXME: s/4/framestart_delay/ to get consistent
* earliest/latest points for register latching regardless
@@ -159,6 +164,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.pipeline_full =
min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ }
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index c11e15a93164..7cb713043408 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_atomic_plane.h"
@@ -15,11 +14,11 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbc.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "skl_watermark.h"
#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
@@ -1856,8 +1855,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int min_scale = DRM_PLANE_NO_SCALING;
+ int max_scale = DRM_PLANE_NO_SCALING;
int ret;
ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1929,7 +1928,7 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum intel_fbc_id fbc_id, enum plane_id plane_id)
{
- if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0)
+ if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0)
return false;
return plane_id == PLANE_PRIMARY;
@@ -1941,7 +1940,7 @@ static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe);
if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id))
- return dev_priv->fbc[fbc_id];
+ return dev_priv->display.fbc[fbc_id];
else
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
new file mode 100644
index 000000000000..01b0932757ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -0,0 +1,3562 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_blend.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_bw.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "skl_watermark.h"
+
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+
+static void skl_sagv_disable(struct drm_i915_private *i915);
+
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
+{
+ u8 enabled_slices = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (intel_uncore_read(&i915->uncore,
+ DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ enabled_slices |= BIT(slice);
+ }
+
+ return enabled_slices;
+}
+
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9;
+}
+
+static bool
+intel_has_sagv(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+}
+
+static u32
+intel_sagv_block_time(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14) {
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
+
+ return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = snb_pcode_read(&i915->uncore,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ return 0;
+ }
+
+ return val;
+ } else if (DISPLAY_VER(i915) == 11) {
+ return 10;
+ } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ return 30;
+ } else {
+ return 0;
+ }
+}
+
+static void intel_sagv_init(struct drm_i915_private *i915)
+{
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+
+ /*
+ * Probe to see if we have working SAGV control.
+ * For icl+ this was already determined by intel_bw_init_hw().
+ */
+ if (DISPLAY_VER(i915) < 11)
+ skl_sagv_disable(i915);
+
+ drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+
+ i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+
+ drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+
+ /* avoid overflow when adding with wm0 latency/etc. */
+ if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ "Excessive SAGV block time %u, ignoring\n",
+ i915->display.sagv.block_time_us))
+ i915->display.sagv.block_time_us = 0;
+
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.block_time_us = 0;
+}
+
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
+ */
+static void skl_sagv_enable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
+
+ /* We don't need to wait for SAGV when enabling */
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to enable SAGV\n");
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_ENABLED;
+}
+
+static void skl_sagv_disable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_DISABLED;
+}
+
+static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (!intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_disable(i915);
+}
+
+static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_enable(i915);
+}
+
+static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_pre_plane_update(state);
+ else
+ skl_sagv_pre_plane_update(state);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_post_plane_update(state);
+ else
+ skl_sagv_post_plane_update(state);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ int max_level = INT_MAX;
+
+ if (!intel_has_sagv(i915))
+ return false;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ int level;
+
+ /* Skip this plane if it's not enabled */
+ if (!wm->wm[0].enable)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(i915);
+ !wm->wm[level].enable; --level)
+ { }
+
+ /* Highest common enabled wm level for all planes */
+ max_level = min(level, max_level);
+ }
+
+ /* No enabled planes? */
+ if (max_level == INT_MAX)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * All enabled planes must have enabled a common wm level that
+ * can tolerate memory latencies higher than sagv_block_time_us
+ */
+ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
+ return false;
+ }
+
+ return true;
+}
+
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (wm->wm[0].enable && !wm->sagv.wm0.enable)
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_crtc_can_enable_sagv(crtc_state);
+ else
+ return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(i915) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ int ret;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_can_enable_sagv(i915, new_bw_state) !=
+ intel_can_enable_sagv(i915, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case)
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_can_enable_sagv(i915, new_bw_state);
+ }
+
+ return 0;
+}
+
+static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
+ u16 start, u16 end)
+{
+ entry->start = start;
+ entry->end = end;
+
+ return end;
+}
+
+static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->display.dbuf.size /
+ hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
+}
+
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
+}
+
+static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+{
+ struct skl_ddb_entry ddb;
+
+ if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
+ slice_mask = BIT(DBUF_S1);
+ else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
+ slice_mask = BIT(DBUF_S3);
+
+ skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+
+ return ddb.start;
+}
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+ enum dbuf_slice start_slice, end_slice;
+ u8 slice_mask = 0;
+
+ if (!skl_ddb_entry_size(entry))
+ return 0;
+
+ start_slice = entry->start / slice_size;
+ end_slice = (entry->end - 1) / slice_size;
+
+ /*
+ * Per plane DDB entry can in a really worst case be on multiple slices
+ * but single entry is anyway contigious.
+ */
+ while (start_slice <= end_slice) {
+ slice_mask |= BIT(start_slice);
+ start_slice++;
+ }
+
+ return slice_mask;
+}
+
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ /*
+ * Watermark/ddb requirement highly depends upon width of the
+ * framebuffer, So instead of allocating DDB equally among pipes
+ * distribute DDB based on resolution/width of the display.
+ */
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+
+ return hdisplay;
+}
+
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
+
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(i915, pipe) {
+ int weight = dbuf_state->weight[pipe];
+
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
+ continue;
+
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
+
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset = 0;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
+
+ if (new_dbuf_state->weight[pipe] == 0) {
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
+ goto out;
+ }
+
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
+
+ skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
+ ddb_slices.start - mbus_offset + start,
+ ddb_slices.start - mbus_offset + end);
+
+out:
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
+ skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
+
+ return 0;
+}
+
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
+{
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ drm_WARN_ON(&i915->drm, ret);
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ skl_ddb_entry_init(entry,
+ REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
+ REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
+ if (entry->end)
+ entry->end++;
+}
+
+static void
+skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+ const enum pipe pipe,
+ const enum plane_id plane_id,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ u32 val;
+
+ /* Cursor doesn't support NV12/planar, so no extra calculation needed */
+ if (plane_id == PLANE_CURSOR) {
+ val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(ddb, val);
+ return;
+ }
+
+ val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb, val);
+
+ if (DISPLAY_VER(i915) >= 11)
+ return;
+
+ val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb_y, val);
+}
+
+static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+ enum plane_id plane_id;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ wakeref = intel_display_power_get_if_enabled(i915, power_domain);
+ if (!wakeref)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(i915, pipe,
+ plane_id,
+ &ddb[plane_id],
+ &ddb_y[plane_id]);
+
+ intel_display_power_put(i915, power_domain, wakeref);
+}
+
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+ bool join_mbus;
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {}
+
+};
+
+static bool check_mbus_joined(u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].join_mbus;
+ }
+ return false;
+}
+
+static bool adlp_check_mbus_joined(u8 active_pipes)
+{
+ return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
+}
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
+}
+
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
+}
+
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_DG2(i915))
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) >= 13)
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 12)
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 11)
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
+}
+
+static bool
+use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
+static u64
+skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ u64 data_rate = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->rel_data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->rel_data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id,
+ int level)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (level == 0 && pipe_wm->use_sagv_wm)
+ return &wm->sagv.wm0;
+
+ return &wm->wm[level];
+}
+
+static const struct skl_wm_level *
+skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (pipe_wm->use_sagv_wm)
+ return &wm->sagv.trans_wm;
+
+ return &wm->trans_wm;
+}
+
+/*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+static void
+skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
+ memset(wm, 0, sizeof(*wm));
+}
+
+static void
+skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
+ const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ memset(wm, 0, sizeof(*wm));
+ memset(uv_wm, 0, sizeof(*uv_wm));
+ }
+}
+
+static bool icl_need_wm1_wa(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ /*
+ * Wa_1408961008:icl, ehl
+ * Wa_14012656716:tgl, adl
+ * Underruns with WM1+ disabled
+ */
+ return DISPLAY_VER(i915) == 11 ||
+ (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+}
+
+struct skl_plane_ddb_iter {
+ u64 data_rate;
+ u16 start, size;
+};
+
+static void
+skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
+ const struct skl_wm_level *wm,
+ u64 data_rate)
+{
+ u16 size, extra = 0;
+
+ if (data_rate) {
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate,
+ iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+ }
+
+ /*
+ * Keep ddb entry of all disabled planes explicitly zeroed
+ * to avoid skl_ddb_add_affected_planes() adding them to
+ * the state when other planes change their allocations.
+ */
+ size = wm->min_ddb_alloc + extra;
+ if (size)
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + size);
+}
+
+static int
+skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
+ struct skl_plane_ddb_iter iter;
+ enum plane_id plane_id;
+ u16 cursor_size;
+ u32 blocks;
+ int level;
+
+ /* Clear the partitioning for disabled planes. */
+ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ iter.start = alloc->start;
+ iter.size = skl_ddb_entry_size(alloc);
+ if (iter.size == 0)
+ return 0;
+
+ /* Allocate fixed number of blocks for cursor. */
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
+
+ iter.data_rate = skl_total_relative_data_rate(crtc_state);
+
+ /*
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
+ */
+ for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ drm_WARN_ON(&i915->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
+ blocks = U32_MAX;
+ break;
+ }
+ continue;
+ }
+
+ blocks += wm->wm[level].min_ddb_alloc;
+ blocks += wm->uv_wm[level].min_ddb_alloc;
+ }
+
+ if (blocks <= iter.size) {
+ iter.size -= blocks;
+ break;
+ }
+ }
+
+ if (level < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ blocks, iter.size);
+ return -EINVAL;
+ }
+
+ /* avoid the WARN later when we don't allocate any extra DDB */
+ if (iter.data_rate == 0)
+ iter.size = 0;
+
+ /*
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ } else {
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ }
+ }
+ drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id))
+ skl_check_nv12_wm_level(&wm->wm[level],
+ &wm->uv_wm[level],
+ ddb_y, ddb);
+ else
+ skl_check_wm_level(&wm->wm[level], ddb);
+
+ if (icl_need_wm1_wa(i915, plane_id) &&
+ level == 1 && wm->wm[0].enable) {
+ wm->wm[level].blocks = wm->wm[0].blocks;
+ wm->wm[level].lines = wm->wm[0].lines;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
+ }
+ }
+
+ /*
+ * Go back and disable the transition and SAGV watermarks
+ * if it turns out we don't have enough DDB blocks for them.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_check_wm_level(&wm->trans_wm, ddb_y);
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+
+ skl_check_wm_level(&wm->trans_wm, ddb);
+ }
+
+ skl_check_wm_level(&wm->sagv.wm0, ddb);
+ skl_check_wm_level(&wm->sagv.trans_wm, ddb);
+ }
+
+ return 0;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and cpp should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+ */
+static uint_fixed_16_16_t
+skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * cpp;
+ ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ ret = add_fixed16_u32(ret, 1);
+
+ return ret;
+}
+
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate;
+ wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
+ pipe_htotal * 1000);
+ ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
+ return ret;
+}
+
+static uint_fixed_16_16_t
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 pixel_rate;
+ u32 crtc_htotal;
+ uint_fixed_16_16_t linetime_us;
+
+ if (!crtc_state->hw.active)
+ return u32_to_fixed16(0);
+
+ pixel_rate = crtc_state->pixel_rate;
+
+ if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ return u32_to_fixed16(0);
+
+ crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
+ linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
+
+ return linetime_us;
+}
+
+static int
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 interm_pbpl;
+
+ /* only planar format has two planes */
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ drm_dbg_kms(&i915->drm,
+ "Non planar format have single plane\n");
+ return -EINVAL;
+ }
+
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_4_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
+
+ wp->width = width;
+ if (color_plane == 1 && wp->is_planar)
+ wp->width /= 2;
+
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ wp->dbuf_block_size = 256;
+ else
+ wp->dbuf_block_size = 512;
+
+ if (drm_rotation_90_or_270(rotation)) {
+ switch (wp->cpp) {
+ case 1:
+ wp->y_min_scanlines = 16;
+ break;
+ case 2:
+ wp->y_min_scanlines = 8;
+ break;
+ case 4:
+ wp->y_min_scanlines = 4;
+ break;
+ default:
+ MISSING_CASE(wp->cpp);
+ return -EINVAL;
+ }
+ } else {
+ wp->y_min_scanlines = 4;
+ }
+
+ if (skl_needs_memory_bw_wa(i915))
+ wp->y_min_scanlines *= 2;
+
+ wp->plane_bytes_per_line = wp->width * wp->cpp;
+ if (wp->y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
+ wp->y_min_scanlines,
+ wp->dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
+ wp->y_min_scanlines);
+ } else {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
+ wp->dbuf_block_size);
+
+ if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ }
+
+ wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
+ wp->plane_blocks_per_line);
+
+ wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+
+ return 0;
+}
+
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int width;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->hw.rotation,
+ intel_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
+static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+{
+ if (DISPLAY_VER(i915) >= 10)
+ return true;
+
+ /* The number of lines are ignored for the level 0 watermark. */
+ return level > 0;
+}
+
+static int skl_wm_max_lines(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 255;
+ else
+ return 31;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t selected_result;
+ u32 blocks, lines, min_ddb_alloc = 0;
+
+ if (latency == 0 ||
+ (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
+ latency += 15;
+
+ method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ wp->cpp, latency, wp->dbuf_block_size);
+ method2 = skl_wm_method2(wp->plane_pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ latency,
+ wp->plane_blocks_per_line);
+
+ if (wp->y_tiled) {
+ selected_result = max_fixed16(method2, wp->y_tile_minimum);
+ } else {
+ if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
+ wp->dbuf_block_size < 1) &&
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (DISPLAY_VER(i915) == 9)
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
+ selected_result = method1;
+ }
+ }
+
+ blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(i915, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
+ lines = div_round_up_fixed16(selected_result,
+ wp->plane_blocks_per_line);
+
+ if (DISPLAY_VER(i915) == 9) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ lines += wp->y_min_scanlines;
+ } else {
+ blocks++;
+ }
+
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->blocks > blocks)
+ blocks = result_prev->blocks;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 11) {
+ if (wp->y_tiled) {
+ int extra_lines;
+
+ if (lines % wp->y_min_scanlines == 0)
+ extra_lines = wp->y_min_scanlines;
+ else
+ extra_lines = wp->y_min_scanlines * 2 -
+ lines % wp->y_min_scanlines;
+
+ min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
+ wp->plane_blocks_per_line);
+ } else {
+ min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
+ }
+ }
+
+ if (!skl_wm_has_lines(i915, level))
+ lines = 0;
+
+ if (lines > skl_wm_max_lines(i915)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * If lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
+ result->blocks = blocks;
+ result->lines = lines;
+ /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+ result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
+ result->enable = true;
+
+ if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
+ result->can_sagv = latency >= i915->display.sagv.block_time_us;
+}
+
+static void
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_wm_level *levels)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level *result_prev = &levels[0];
+
+ for (level = 0; level <= max_level; level++) {
+ struct skl_wm_level *result = &levels[level];
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency,
+ wm_params, result_prev, result);
+
+ result_prev = result;
+ }
+}
+
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_plane_wm *plane_wm)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
+ struct skl_wm_level *levels = plane_wm->wm;
+ unsigned int latency = 0;
+
+ if (i915->display.sagv.block_time_us)
+ latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+
+ skl_compute_plane_wm(crtc_state, plane, 0, latency,
+ wm_params, &levels[0],
+ sagv_wm);
+}
+
+static void skl_compute_transition_wm(struct drm_i915_private *i915,
+ struct skl_wm_level *trans_wm,
+ const struct skl_wm_level *wm0,
+ const struct skl_wm_params *wp)
+{
+ u16 trans_min, trans_amount, trans_y_tile_min;
+ u16 wm0_blocks, trans_offset, blocks;
+
+ /* Transition WM don't make any sense if ipc is disabled */
+ if (!skl_watermark_ipc_enabled(i915))
+ return;
+
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (DISPLAY_VER(i915) == 9)
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (DISPLAY_VER(i915) == 10)
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
+
+ trans_offset = trans_min + trans_amount;
+
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->blocks is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_blocks = wm0->blocks - 1;
+
+ if (wp->y_tiled) {
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
+ blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
+ } else {
+ blocks = wm0_blocks + trans_offset;
+ }
+ blocks++;
+
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ trans_wm->blocks = blocks;
+ trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
+ trans_wm->enable = true;
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane, int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
+
+ skl_compute_transition_wm(i915, &wm->trans_wm,
+ &wm->wm[0], &wm_params);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
+
+ skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ &wm->sagv.wm0, &wm_params);
+ }
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->planar_slave)
+ return 0;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (plane_state->planar_linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ drm_WARN_ON(&i915->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state));
+ drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_state->planar_linked_plane, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int skl_build_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int ret, i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ /*
+ * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
+ * instead but we don't populate that correctly for NV12 Y
+ * planes so for now hack this.
+ */
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 11)
+ ret = icl_build_plane_wm(crtc_state, plane_state);
+ else
+ ret = skl_build_plane_wm(crtc_state, plane_state);
+ if (ret)
+ return ret;
+ }
+
+ crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
+
+ return 0;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ intel_de_write_fw(i915, reg,
+ PLANE_BUF_END(entry->end - 1) |
+ PLANE_BUF_START(entry->start));
+ else
+ intel_de_write_fw(i915, reg, 0);
+}
+
+static void skl_write_wm_level(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ u32 val = 0;
+
+ if (level->enable)
+ val |= PLANE_WM_EN;
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
+ val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
+
+ intel_de_write_fw(i915, reg, val);
+}
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915,
+ PLANE_BUF_CFG(pipe, plane_id), ddb);
+
+ if (DISPLAY_VER(i915) < 11)
+ skl_ddb_entry_write(i915,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
+}
+
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, CUR_WM(pipe, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
+}
+
+static bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
+{
+ return l1->enable == l2->enable &&
+ l1->ignore_lines == l2->ignore_lines &&
+ l1->lines == l2->lines &&
+ l1->blocks == l2->blocks;
+}
+
+static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
+ skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
+ skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
+}
+
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ return a->start < b->end && b->start < a->end;
+}
+
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(i915, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
+static int
+skl_compute_ddb(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_MBUS_JOINING(i915))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
+ old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ /* TODO: Implement vblank synchronized MBUS joining changes */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
+ old_dbuf_state->enabled_slices,
+ new_dbuf_state->enabled_slices,
+ INTEL_INFO(i915)->display.dbuf.slice_mask,
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus));
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_crtc_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
+static void
+skl_print_wm_changes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_ddb_entry *old, *new;
+
+ old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+ }
+ }
+}
+
+static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
+ const struct skl_pipe_wm *old_pipe_wm,
+ const struct skl_pipe_wm *new_pipe_wm)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
+ skl_plane_wm_level(new_pipe_wm, plane->id, level)))
+ return false;
+ }
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
+ const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
+
+ if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
+ !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
+ return false;
+ }
+
+ return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
+ skl_plane_trans_wm(new_pipe_wm, plane->id));
+}
+
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
+ skl_plane_selected_wm_equals(plane,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static int
+skl_compute_wm(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_build_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ ret = intel_compute_sagv_mask(state);
+ if (ret)
+ return ret;
+
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ skl_print_wm_changes(state);
+
+ return 0;
+}
+
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+{
+ level->enable = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
+ level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
+ level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+}
+
+static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ int level, max_level;
+ enum plane_id plane_id;
+ u32 val;
+
+ max_level = ilk_wm_max_level(i915);
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm = &out->planes[plane_id];
+
+ for (level = 0; level <= max_level; level++) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
+
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ }
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ wm->sagv.wm0 = wm->wm[0];
+ wm->sagv.trans_wm = wm->trans_wm;
+ }
+ }
+}
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_crtc *crtc;
+
+ if (HAS_MBUS_JOINING(i915))
+ dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset;
+ enum plane_id plane_id;
+ u8 slices;
+
+ memset(&crtc_state->wm.skl.optimal, 0,
+ sizeof(crtc_state->wm.skl.optimal));
+ if (crtc_state->hw.active)
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
+
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ plane_id, ddb, ddb_y);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ }
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(i915, slices);
+ crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
+ str_yes_no(dbuf_state->joined_mbus));
+ }
+
+ dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(i915);
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+{
+ return i915->display.wm.ipc_enabled;
+}
+
+void skl_watermark_ipc_update(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+}
+
+static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+{
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(i915))
+ return false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
+ return i915->dram_info.symmetric_memory;
+
+ return true;
+}
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+
+ skl_watermark_ipc_update(i915);
+}
+
+static void
+adjust_wm_latency(struct drm_i915_private *i915,
+ u16 wm[], int max_level, int read_latency)
+{
+ bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ int i, level;
+
+ /*
+ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+ * need to be disabled. We make sure to sanitize the values out
+ * of the punit to satisfy this requirement.
+ */
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0) {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ max_level = level - 1;
+ break;
+ }
+ }
+
+ /*
+ * WaWmMemoryReadLatency
+ *
+ * punit doesn't take into account the read latency so we need
+ * to add proper adjustement to each valid level we retrieve
+ * from the punit when level 0 response data is 0us.
+ */
+ if (wm[0] == 0) {
+ for (level = 0; level <= max_level; level++)
+ wm[level] += read_latency;
+ }
+
+ /*
+ * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * If we could not get dimm info enable this WA to prevent from
+ * any underrun. If not able to get Dimm info assume 16GB dimm
+ * to avoid any underrun.
+ */
+ if (wm_lv_0_adjust_needed)
+ wm[0] += 1;
+}
+
+static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ int max_level = ilk_wm_max_level(i915);
+ u32 val;
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+ wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+ wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+ wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ adjust_wm_latency(i915, wm, max_level, 6);
+}
+
+static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ int max_level = ilk_wm_max_level(i915);
+ int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
+ int mult = IS_DG2(i915) ? 2 : 1;
+ u32 val;
+ int ret;
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ adjust_wm_latency(i915, wm, max_level, read_latency);
+}
+
+static void skl_setup_wm_latency(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14)
+ mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, i915->display.wm.skl_latency);
+
+ intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
+static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return NULL;
+
+ return &dbuf_state->base;
+}
+
+static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_funcs = {
+ .atomic_duplicate_state = intel_dbuf_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_destroy_state,
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ if (IS_ERR(dbuf_state))
+ return ERR_CAST(dbuf_state);
+
+ return to_intel_dbuf_state(dbuf_state);
+}
+
+int intel_dbuf_init(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ &dbuf_state->base, &intel_dbuf_funcs);
+
+ return 0;
+}
+
+/*
+ * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
+ * update the request state of all DBUS slices.
+ */
+static void update_mbus_pre_enable(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ u32 mbus_ctl, dbuf_min_tracker_val;
+ enum dbuf_slice slice;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ /*
+ * TODO: Implement vblank synchronized MBUS joining changes.
+ * Must be properly coordinated with dbuf reprogramming.
+ */
+ if (dbuf_state->joined_mbus) {
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ } else {
+ mbus_ctl = MBUS_HASHING_MODE_2x2 |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ }
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ dbuf_min_tracker_val);
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ update_mbus_pre_enable(state);
+ gen9_dbuf_slices_update(i915,
+ old_dbuf_state->enabled_slices |
+ new_dbuf_state->enabled_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915,
+ new_dbuf_state->enabled_slices);
+}
+
+static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return !(active_pipes & BIT(PIPE_D));
+ case PIPE_D:
+ return !(active_pipes & BIT(PIPE_A));
+ case PIPE_B:
+ return !(active_pipes & BIT(PIPE_C));
+ case PIPE_C:
+ return !(active_pipes & BIT(PIPE_B));
+ default: /* to suppress compiler warning */
+ MISSING_CASE(pipe);
+ break;
+ }
+
+ return false;
+}
+
+void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc *crtc;
+ u32 val = 0;
+ int i;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= MBUS_DBOX_I_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
+ MBUS_DBOX_A_CREDIT(8);
+ else if (IS_ALDERLAKE_P(i915))
+ /* Wa_22010947358:adl-p */
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
+ MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 14) {
+ val |= MBUS_DBOX_B_CREDIT(0xA);
+ } else if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ u32 pipe_val = val;
+
+ if (!new_crtc_state->hw.active)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 14) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
+ new_dbuf_state->active_pipes))
+ pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
+
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
+ }
+}
+
+static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *i915 = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ str_yes_no(skl_watermark_ipc_enabled(i915)));
+ return 0;
+}
+
+static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+
+ return single_open(file, skl_watermark_ipc_status_show, i915);
+}
+
+static ssize_t skl_watermark_ipc_status_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (!skl_watermark_ipc_enabled(i915) && enable)
+ drm_info(&i915->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ i915->display.wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(i915);
+ }
+
+ return len;
+}
+
+static const struct file_operations skl_watermark_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = skl_watermark_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = skl_watermark_ipc_status_write
+};
+
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ if (!HAS_IPC(i915))
+ return;
+
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
new file mode 100644
index 000000000000..7a5a4e67cd73
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SKL_WATERMARK_H__
+#define __SKL_WATERMARK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_global_state.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_bw_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915);
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state);
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry);
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915);
+void skl_wm_sanitize(struct drm_i915_private *i915);
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state);
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915);
+void skl_watermark_ipc_update(struct drm_i915_private *i915);
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+
+void skl_wm_init(struct drm_i915_private *i915);
+
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ bool joined_mbus;
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
+
+#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+
+int intel_dbuf_init(struct drm_i915_private *i915);
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+void intel_mbus_dbox_update(struct intel_atomic_state *state);
+
+#endif /* __SKL_WATERMARK_H__ */
+
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b9b1fed99874..b3f5ca280ef2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -822,9 +822,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
u32 val;
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -998,9 +998,9 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
}
/* Assert reset */
@@ -1277,13 +1277,12 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
- if (intel_dsi->dual_link)
- pclk *= 2;
+ pipe_config->port_clock = pclk;
- if (pclk) {
- pipe_config->hw.adjusted_mode.crtc_clock = pclk;
- pipe_config->port_clock = pclk;
- }
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
}
/* return txclkesc cycles in terms of divider and duration in us */
@@ -1872,9 +1871,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
return;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
+ dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1933,8 +1932,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 5894b0138343..af7402127cd9 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -113,6 +113,61 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
return 0;
}
+static int vlv_dsi_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_clock;
+ u32 pll_ctl, pll_div;
+ u32 m = 0, p = 0, n;
+ int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int i;
+
+ pll_ctl = config->dsi_pll.ctrl;
+ pll_div = config->dsi_pll.div;
+
+ /* mask out other bits and extract the P1 divisor */
+ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+ pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+ /* N1 divisor */
+ n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
+ n = 1 << n; /* register has log2(N1) */
+
+ /* mask out the other bits and extract the M1 divisor */
+ pll_div &= DSI_PLL_M1_DIV_MASK;
+ pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+ while (pll_ctl) {
+ pll_ctl = pll_ctl >> 1;
+ p++;
+ }
+ p--;
+
+ if (!p) {
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+ if (lfsr_converts[i] == pll_div)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lfsr_converts)) {
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ return 0;
+ }
+
+ m = i + 62;
+
+ dsi_clock = (m * refclk) / (p * n);
+
+ return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+}
+
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
@@ -122,8 +177,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int ret;
- u32 dsi_clk;
+ int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
@@ -145,6 +199,14 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
+ pclk = vlv_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
return 0;
}
@@ -262,13 +324,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
- u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
- int i;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -280,65 +336,31 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
- /* mask out other bits and extract the P1 divisor */
- pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
- pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
-
- /* N1 divisor */
- n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
- n = 1 << n; /* register has log2(N1) */
-
- /* mask out the other bits and extract the M1 divisor */
- pll_div &= DSI_PLL_M1_DIV_MASK;
- pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
-
- while (pll_ctl) {
- pll_ctl = pll_ctl >> 1;
- p++;
- }
- p--;
-
- if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
- if (lfsr_converts[i] == pll_div)
- break;
- }
-
- if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
- return 0;
- }
-
- m = i + 62;
+ return vlv_dsi_pclk(encoder, config);
+}
- dsi_clock = (m * refclk) / (p * n);
+static int bxt_dsi_pclk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_ratio, dsi_clk;
- pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+ dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+ dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
- return pclk;
+ return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
}
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- u32 pclk;
- u32 dsi_clk;
- u32 dsi_ratio;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 pclk;
config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
-
- dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
-
- pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
+ pclk = bxt_dsi_pclk(encoder, config);
drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
return pclk;
@@ -463,6 +485,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
+ int pclk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
@@ -502,6 +525,14 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
+ pclk = bxt_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
index 356e51515346..e065b8f2ee08 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -11,6 +11,8 @@
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
+#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
@@ -96,8 +98,8 @@
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
+#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
@@ -106,11 +108,11 @@
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
+#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
+#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
@@ -145,8 +147,8 @@
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
@@ -168,76 +170,76 @@
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
+#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
+#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
+#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
+#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
+#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
+#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
@@ -247,27 +249,27 @@
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
+#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
+#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
@@ -276,8 +278,8 @@
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
+#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
#define BXT_DPHY_DEFEATURE_EN (1 << 8)
@@ -290,35 +292,35 @@
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
+#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
+#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
+#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
+#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
+#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
+#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
@@ -330,8 +332,8 @@
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
@@ -348,15 +350,15 @@
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
+#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
@@ -367,34 +369,34 @@
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
-#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
+#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
+#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -407,8 +409,8 @@
/* MIPI adapter registers */
-#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
+#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
@@ -440,21 +442,21 @@
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
#define GLK_MIPIIO_ENABLE (1 << 0)
-#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
+#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
+#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
@@ -462,18 +464,18 @@
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
+#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
+#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dabdfe09f5e5..0bcde53c50c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
@@ -1521,10 +1525,6 @@ static void context_close(struct i915_gem_context *ctx)
ctx->file_priv = ERR_PTR(-EBADF);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
-
client = ctx->client;
if (client) {
spin_lock(&client->ctx_lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 1674b0c5802b..d44a152ce680 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -397,7 +397,7 @@ struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
u32 alignment,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
unsigned int flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -434,7 +434,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
*/
vma = ERR_PTR(-ENOSPC);
if ((flags & PIN_MAPPABLE) == 0 &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL))
+ (!view || view->type == I915_GTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
flags | PIN_MAPPABLE |
PIN_NONBLOCK);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 1b88ea13435c..5a7a14e85c3f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -12,8 +12,6 @@ struct drm_i915_private;
struct drm_i915_gem_object;
struct intel_memory_region;
-extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
-
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0c5c43852e24..73d9eda1d6b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -194,17 +194,17 @@ int i915_gem_mmap_gtt_version(void)
return 4;
}
-static inline struct i915_ggtt_view
+static inline struct i915_gtt_view
compute_partial_view(const struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
view.partial.size =
min_t(unsigned int, chunk,
@@ -212,7 +212,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
/* If the partial covers the entire object, just create a normal VMA. */
if (chunk >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
+ view.type = I915_GTT_VIEW_NORMAL;
return view;
}
@@ -341,12 +341,12 @@ retry:
PIN_NOEVICT);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
/* Use a partial view if it is bigger than available space */
- struct i915_ggtt_view view =
+ struct i915_gtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
flags = PIN_MAPPABLE | PIN_NOSEARCH;
- if (view.type == I915_GGTT_VIEW_NORMAL)
+ if (view.type == I915_GTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
/*
@@ -357,7 +357,7 @@ retry:
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
flags = PIN_MAPPABLE;
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
@@ -394,7 +394,7 @@ retry:
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
@@ -413,7 +413,7 @@ retry:
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
+ intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
@@ -550,6 +550,20 @@ out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
+void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_device *bdev = bo->bdev;
+
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+
+ if (obj->userfault_count) {
+ /* rpm wakeref provide exclusive access */
+ list_del(&obj->userfault_link);
+ obj->userfault_count = 0;
+ }
+}
+
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
@@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_lock(&obj->mmo.lock);
}
spin_unlock(&obj->mmo.lock);
+
+ if (obj->userfault_count) {
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_del(&obj->userfault_link);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ obj->userfault_count = 0;
+ }
}
static struct i915_mmap_offset *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index efee9e0d2508..1fa91b3033b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ccec4055fde3..7ff9c7877bec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
/* Skip serialisation and waking the device if known to be not used. */
- if (obj->userfault_count)
+ if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
i915_gem_object_release_mmap_gtt(obj);
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- assert_object_held(obj);
+ assert_object_held_shared(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}
- if (!i915_gem_object_trylock(obj, NULL)) {
- /* busy, toss it back to the pile */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
- continue;
- }
-
__i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work.work);
+ container_of(work, struct drm_i915_private, mm.free_work);
i915_gem_flush_free_objects(i915);
}
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -731,6 +723,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
bool lmem_placement = false;
int i;
+ if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+ return false;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
@@ -745,7 +740,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 6f0a3ce35567..7317d4102955 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -543,7 +543,7 @@ struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
u32 alignment,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
unsigned int flags);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5cf36a130061..40305e2bcd49 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -298,7 +298,8 @@ struct drm_i915_gem_object {
};
/**
- * Whether the object is currently in the GGTT mmap.
+ * Whether the object is currently in the GGTT or any other supported
+ * fake offset mmap backed by lmem.
*/
unsigned int userfault_count;
struct list_head userfault_link;
@@ -335,7 +336,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
-#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
@@ -616,6 +616,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
+
+ u32 tlb;
} mm;
struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..4df50b049cea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -6,20 +6,21 @@
#include <drm/drm_cache.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-#include "gt/intel_gt.h"
-
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
bool shrinkable;
int i;
@@ -65,7 +66,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
shrinkable = i915_gem_object_is_shrinkable(obj);
if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
GEM_BUG_ON(!list_empty(&obj->mm.link));
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!obj->mm.tlb)
+ return;
+
+ intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+ obj->mm.tlb = 0;
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
- if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
- intel_gt_invalidate_tlbs(to_gt(i915));
- }
+ flush_tlb_invalidate(obj);
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 00359ec9d58b..3428f735e786 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
- intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
+ intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4eed3dd90ba8..f42ca1179f37 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -75,7 +75,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
if (size > resource_size(&mr->region))
return -ENOMEM;
- if (sg_alloc_table(st, page_count, GFP_KERNEL))
+ if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
return -ENOMEM;
/*
@@ -137,7 +137,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
* trigger the out-of-memory killer and for
* this we want __GFP_RETRY_MAYFAIL.
*/
- gfp |= __GFP_RETRY_MAYFAIL;
+ gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
}
} while (1);
@@ -209,7 +209,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
rebuild_st:
- st = kmalloc(sizeof(*st), GFP_KERNEL);
+ st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
if (!st)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 166d0a4b9e8c..acc561c0f0aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -18,10 +18,12 @@
#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_pci.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_mchbar_regs.h"
+#include "intel_pci_config.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -428,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
reserved_base = stolen_top;
reserved_size = 0;
- switch (GRAPHICS_VER(i915)) {
- case 2:
- case 3:
- break;
- case 4:
- if (!IS_G4X(i915))
- break;
- fallthrough;
- case 5:
- g4x_get_stolen_reserved(i915, uncore,
+ if (GRAPHICS_VER(i915) >= 11) {
+ icl_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
- break;
- case 6:
- gen6_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- break;
- case 7:
- if (IS_VALLEYVIEW(i915))
- vlv_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- else
- gen7_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- break;
- case 8:
- case 9:
+ } else if (GRAPHICS_VER(i915) >= 8) {
if (IS_LP(i915))
chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
- break;
- default:
- MISSING_CASE(GRAPHICS_VER(i915));
- fallthrough;
- case 11:
- case 12:
- icl_get_stolen_reserved(i915, uncore,
- &reserved_base,
- &reserved_size);
- break;
+ } else if (GRAPHICS_VER(i915) >= 7) {
+ if (IS_VALLEYVIEW(i915))
+ vlv_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ else
+ gen7_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ gen6_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
+ g4x_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
}
/*
@@ -827,10 +810,13 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
if (WARN_ON_ONCE(instance))
return ERR_PTR(-ENODEV);
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
if (IS_DG1(uncore->i915)) {
- lmem_size = pci_resource_len(pdev, 2);
+ lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
} else {
@@ -842,11 +828,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
}
dsm_size = lmem_size - dsm_base;
- if (pci_resource_len(pdev, 2) < lmem_size) {
+ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
- io_start = pci_resource_start(pdev, 2) + dsm_base;
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
io_size = dsm_size;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 85518b28cd72..fd42b89b7162 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
*/
if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -458,7 +458,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f131dc065f47..e3fc38dd5db0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -297,7 +297,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ if (i915_gem_object_needs_ccs_pages(obj))
ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
NUM_BYTES_PER_CCS_BYTE),
PAGE_SIZE);
@@ -361,7 +361,6 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct ttm_resource *res = bo->resource;
if (!obj)
return false;
@@ -378,45 +377,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
if (!i915_gem_object_evictable(obj))
return false;
- switch (res->mem_type) {
- case I915_PL_LMEM0: {
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
- struct i915_ttm_buddy_resource *bman_res =
- to_ttm_buddy_resource(res);
- struct drm_buddy *mm = bman_res->mm;
- struct drm_buddy_block *block;
-
- if (!place->fpfn && !place->lpfn)
- return true;
-
- GEM_BUG_ON(!place->lpfn);
-
- /*
- * If we just want something mappable then we can quickly check
- * if the current victim resource is using any of the CPU
- * visible portion.
- */
- if (!place->fpfn &&
- place->lpfn == i915_ttm_buddy_man_visible_size(man))
- return bman_res->used_visible_size > 0;
-
- /* Real range allocation */
- list_for_each_entry(block, &bman_res->blocks, link) {
- unsigned long fpfn =
- drm_buddy_block_offset(block) >> PAGE_SHIFT;
- unsigned long lpfn = fpfn +
- (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
-
- if (place->fpfn < lpfn && place->lpfn > fpfn)
- return true;
- }
- return false;
- } default:
- break;
- }
-
- return true;
+ return ttm_bo_eviction_valuable(bo, place);
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@ -548,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = 0;
+
+ if (bo->resource && likely(obj)) {
+ /* ttm_bo_release() already has dma_resv_lock */
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
- if (likely(obj)) {
__i915_gem_object_pages_fini(obj);
+
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
i915_ttm_free_cached_io_rsgt(obj);
}
}
@@ -1020,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
@@ -1041,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1062,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible\n");
dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ goto out_rpm;
}
}
@@ -1073,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out_rpm;
+
+ /* ttm_bo_vm_reserve() already has dma_resv_lock */
+ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ obj->userfault_count = 1;
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ }
+
+ if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
return ret;
}
@@ -1242,9 +1235,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* Similarly, in delayed_destroy, we can't call ttm_bo_put()
* until successful initialization.
*/
- ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
- bo_type, &i915_sys_placement,
- page_size >> PAGE_SHIFT,
+ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+ &i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
if (ret)
return i915_ttm_err_to_gem(ret);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 9aad84059d56..07e49f22f2de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -79,7 +79,12 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
goto out_no_populate;
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
- GEM_WARN_ON(err);
+ if (err) {
+ drm_err(&i915->drm,
+ "Unable to copy from device to system memory, err:%pe\n",
+ ERR_PTR(err));
+ goto out_no_populate;
+ }
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 72ce2c9f42fd..c570cf780079 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -358,7 +358,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err;
@@ -419,7 +419,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+ unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i, j, single;
@@ -438,7 +438,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
combination |= page_sizes[j];
}
- mkwrite_device_info(i915)->page_sizes = combination;
+ RUNTIME_INFO(i915)->page_sizes = combination;
for (single = 0; single <= 1; ++single) {
obj = fake_huge_pages_object(i915, combination, !!single);
@@ -485,7 +485,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
out_put:
i915_gem_object_put(obj);
out_device:
- mkwrite_device_info(i915)->page_sizes = saved_mask;
+ RUNTIME_INFO(i915)->page_sizes = saved_mask;
return err;
}
@@ -495,7 +495,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -573,7 +573,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
int err;
@@ -1390,7 +1390,7 @@ out_put:
static int igt_ppgtt_sanity_check(void *arg)
{
struct drm_i915_private *i915 = arg;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
struct {
igt_create_fn fn;
unsigned int flags;
@@ -1764,8 +1764,8 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- mkwrite_device_info(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3cfc621ef363..9a6a6b5b722b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -711,7 +711,7 @@ static bool bad_swizzling(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
return true;
if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 13b088cc787e..a666d7e610f5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -434,5 +434,5 @@ int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_coherency),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 62c61af77a42..51ed824b020c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -476,5 +476,5 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3ced9948a331..b73c91aa5450 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -93,7 +93,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
{
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
struct i915_vma *vma;
unsigned long page;
u32 __iomem *io;
@@ -210,7 +210,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
}
for_each_prime_number_from(page, 1, npages) {
- struct i915_ggtt_view view =
+ struct i915_gtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
u32 __iomem *io;
struct page *p;
@@ -367,7 +367,7 @@ static int igt_partial_tiling(void *arg)
unsigned int pitch;
struct tile tile;
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
/*
* The swizzling pattern is actually unknown as it
* varies based on physical address of each page.
@@ -464,7 +464,7 @@ static int igt_smoke_tiling(void *arg)
* Remember to look at the st_seed if we see a flip-flop in BAT!
*/
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
return 0;
obj = huge_gem_object(i915,
@@ -1844,5 +1844,5 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index fe0a890775e2..bdf5bb40ccf1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -95,5 +95,5 @@ int i915_gem_object_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_huge),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 1bb766c79dcb..5aaacc53fa4c 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -247,6 +247,7 @@ err_scratch1:
i915_gem_object_put(vm->scratch[1]);
err_scratch0:
i915_gem_object_put(vm->scratch[0]);
+ vm->scratch[0] = NULL;
return ret;
}
@@ -268,9 +269,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
- mutex_destroy(&ppgtt->flush);
+ if (ppgtt->base.pd)
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+ mutex_destroy(&ppgtt->flush);
}
static void pd_vma_bind(struct i915_address_space *vm,
@@ -449,19 +451,17 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_free;
+ goto err_put;
ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
if (IS_ERR(ppgtt->base.pd)) {
err = PTR_ERR(ppgtt->base.pd);
- goto err_scratch;
+ goto err_put;
}
return &ppgtt->base;
-err_scratch:
- free_scratch(&ppgtt->base.vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->base.vm);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 98645797962f..e49fa6fa6aee 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -165,10 +165,12 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
-u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
+u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
{
+ u32 gsi_offset = gt->uncore->gsi_offset;
+
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
- *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
@@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_GFX_CCS_AUX_NV);
}
*cs++ = preparser_disable(false);
@@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_VD0_AUX_NV);
else
- cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_VE0_AUX_NV);
}
if (mode & EMIT_INVALIDATE)
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index 32e3d2b831bb..e4d24c811dd6 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
+struct intel_gt;
struct i915_request;
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
@@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
-u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
+u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index c7bd5d71b03e..2128b7a72a25 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -196,7 +196,10 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ if (ppgtt->pd)
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+ gen8_pd_top_count(vm), vm->top);
+
free_scratch(vm);
}
@@ -803,8 +806,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
struct drm_i915_gem_object *obj;
obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto free_scratch;
+ }
ret = map_pt_dma(vm, obj);
if (ret) {
@@ -823,7 +828,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
free_scratch:
while (i--)
i915_gem_object_put(vm->scratch[i]);
- return -ENOMEM;
+ vm->scratch[0] = NULL;
+ return ret;
}
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -901,6 +907,7 @@ err_pd:
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
+ struct i915_page_directory *pd;
struct i915_ppgtt *ppgtt;
int err;
@@ -946,21 +953,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
}
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_free_scratch;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
@@ -971,22 +964,31 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
ppgtt->vm.foreach = gen8_ppgtt_foreach;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.pte_encode = gen8_pte_encode;
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
+ pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
+ goto err_put;
+ }
+ ppgtt->pd = pd;
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_put;
+ }
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
return ppgtt;
-err_free_pd:
- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
- free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->vm);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 37fa813af766..1f7188129cd1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt,
*/
if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
return false;
- else if (GRAPHICS_VER(i915) == 12)
+ else if (MEDIA_VER(i915) >= 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
- else if (GRAPHICS_VER(i915) == 11)
+ else if (MEDIA_VER(i915) == 11)
return logical_vdbox % 2 == 0;
- MISSING_CASE(GRAPHICS_VER(i915));
return false;
}
+static void engine_mask_apply_media_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ unsigned int logical_vdbox = 0;
+ unsigned int i;
+ u32 media_fuse, fuse1;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+
+ if (MEDIA_VER(gt->i915) < 11)
+ return;
+
+ /*
+ * On newer platforms the fusing register is called 'enable' and has
+ * enable semantics, while on older platforms it is called 'disable'
+ * and bits have disable semantices.
+ */
+ media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ media_fuse = ~media_fuse;
+
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
+ } else {
+ gt->info.sfc_mask = ~0;
+ }
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(gt, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vdbox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VCS(i));
+ drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ continue;
+ }
+
+ if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
+ gt->info.vdbox_sfc_access |= BIT(i);
+ logical_vdbox++;
+ }
+ drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(gt));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(gt, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vebox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VECS(i));
+ drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ }
+ }
+ drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(gt));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+}
+
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -672,7 +739,10 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
unsigned long ccs_mask;
unsigned int i;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) < 11)
+ return;
+
+ if (hweight32(CCS_MASK(gt)) <= 1)
return;
ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
@@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
unsigned long meml3_mask;
unsigned long quad;
+ if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
+ return;
+
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
@@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
*/
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = &gt->info;
- struct intel_uncore *uncore = gt->uncore;
- unsigned int logical_vdbox = 0;
- unsigned int i;
- u32 media_fuse, fuse1;
- u16 vdbox_mask;
- u16 vebox_mask;
-
- info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
-
- if (GRAPHICS_VER(i915) < 11)
- return info->engine_mask;
- /*
- * On newer platforms the fusing register is called 'enable' and has
- * enable semantics, while on older platforms it is called 'disable'
- * and bits have disable semantices.
- */
- media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
- media_fuse = ~media_fuse;
-
- vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
- gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
- } else {
- gt->info.sfc_mask = ~0;
- }
-
- for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(gt, _VCS(i))) {
- vdbox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vdbox_mask)) {
- info->engine_mask &= ~BIT(_VCS(i));
- drm_dbg(&i915->drm, "vcs%u fused off\n", i);
- continue;
- }
-
- if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
- gt->info.vdbox_sfc_access |= BIT(i);
- logical_vdbox++;
- }
- drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(gt));
- GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
-
- for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(gt, _VECS(i))) {
- vebox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vebox_mask)) {
- info->engine_mask &= ~BIT(_VECS(i));
- drm_dbg(&i915->drm, "vecs%u fused off\n", i);
- }
- }
- drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(gt));
- GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+ GEM_BUG_ON(!info->engine_mask);
+ engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
@@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine)
return false;
/* Caller disables interrupts */
- spin_lock(&engine->gt->irq_lock);
+ spin_lock(engine->gt->irq_lock);
engine->irq_enable(engine);
- spin_unlock(&engine->gt->irq_lock);
+ spin_unlock(engine->gt->irq_lock);
return true;
}
@@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->gt->irq_lock);
+ spin_lock(engine->gt->irq_lock);
engine->irq_disable(engine);
- spin_unlock(&engine->gt->irq_lock);
+ spin_unlock(engine->gt->irq_lock);
}
void intel_engines_reset_default_submission(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 889f0df3940b..fe1a0d5fd4b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -110,6 +110,7 @@
#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BB_OFFSET(base) _MMIO((base) + 0x158)
#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 633a7e5dba3b..6b5d4ea22b67 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -166,6 +166,21 @@ struct intel_engine_execlists {
struct timer_list preempt;
/**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
* @ccid: identifier for contexts submitted to this engine
*/
u32 ccid;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4b909cb88cdf..c718e6dc40b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq)
return 0;
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15a915bb4088..30cf5c3369d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -16,7 +16,9 @@
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
+#include "intel_pci_config.h"
#include "i915_drv.h"
+#include "i915_pci.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
@@ -869,8 +871,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
u32 pte_flags;
int ret;
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+ GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915);
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
@@ -930,7 +932,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl;
if (!HAS_LMEM(i915)) {
- ggtt->gmadr = pci_resource(pdev, 2);
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
}
@@ -1084,7 +1089,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
unsigned int size;
u16 snb_gmch_ctl;
- ggtt->gmadr = pci_resource(pdev, 2);
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 6ebda3d65086..ea775e601686 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -727,7 +727,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
- i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
@@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
detect_bit_6_swizzle(ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 0e494028b81d..7af6db3194dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -7,6 +7,7 @@
#include <linux/mei_aux.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
@@ -36,10 +37,56 @@ static int gsc_irq_init(int irq)
return irq_set_chip_data(irq, NULL);
}
+static int
+gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_CPU_CLEAR);
+ if (IS_ERR(obj)) {
+ drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
+ return PTR_ERR(obj);
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
+ goto out_put;
+ }
+
+ intf->gem_obj = obj;
+
+ return 0;
+
+out_put:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
+{
+ struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
+
+ if (!obj)
+ return;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+
+ i915_gem_object_put(obj);
+}
+
struct gsc_def {
const char *name;
unsigned long bar;
size_t bar_size;
+ bool use_polling;
+ bool slow_firmware;
+ size_t lmem_size;
};
/* gsc resources and definitions (HECI1 and HECI2) */
@@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
+static const struct gsc_def gsc_def_xehpsdv[] = {
+ {
+ /* HECI1 not enabled on the device. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ .use_polling = true,
+ .slow_firmware = true,
+ }
+};
+
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
.bar = DG2_GSC_HECI1_BASE,
.bar_size = GSC_BAR_LENGTH,
+ .lmem_size = SZ_4M,
},
{
.name = "mei-gscfi",
@@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev)
kfree(adev);
}
-static void gsc_destroy_one(struct intel_gsc_intf *intf)
+static void gsc_destroy_one(struct drm_i915_private *i915,
+ struct intel_gsc *gsc, unsigned int intf_id)
{
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
+
if (intf->adev) {
auxiliary_device_delete(&intf->adev->aux_dev);
auxiliary_device_uninit(&intf->adev->aux_dev);
intf->adev = NULL;
}
+
if (intf->irq >= 0)
irq_free_desc(intf->irq);
intf->irq = -1;
+
+ gsc_ext_om_destroy(intf);
}
-static void gsc_init_one(struct drm_i915_private *i915,
- struct intel_gsc_intf *intf,
+static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
unsigned int intf_id)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct mei_aux_device *adev;
struct auxiliary_device *aux_dev;
const struct gsc_def *def;
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
int ret;
intf->irq = -1;
@@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
+ } else if (IS_XEHPSDV(i915)) {
+ def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
@@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
}
+ /* skip irq initialization */
+ if (def->use_polling)
+ goto add_device;
+
intf->irq = irq_alloc_desc(0);
if (intf->irq < 0) {
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
- return;
+ goto fail;
}
ret = gsc_irq_init(intf->irq);
@@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915,
goto fail;
}
+add_device:
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
goto fail;
+ if (def->lmem_size) {
+ drm_dbg(&i915->drm, "setting up GSC lmem\n");
+
+ if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
+ drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
+ kfree(adev);
+ goto fail;
+ }
+
+ adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
+ adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
+ }
+
adev->irq = intf->irq;
adev->bar.parent = &pdev->resource[0];
adev->bar.start = def->bar + pdev->resource[0].start;
adev->bar.end = adev->bar.start + def->bar_size - 1;
adev->bar.flags = IORESOURCE_MEM;
adev->bar.desc = IORES_DESC_NONE;
+ adev->slow_firmware = def->slow_firmware;
aux_dev = &adev->aux_dev;
aux_dev->name = def->name;
@@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
fail:
- gsc_destroy_one(intf);
+ gsc_destroy_one(i915, gsc, intf->id);
}
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
@@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
return;
}
- if (gt->gsc.intf[intf_id].irq < 0) {
- drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
+ if (gt->gsc.intf[intf_id].irq < 0)
return;
- }
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
@@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
- gsc_init_one(i915, &gsc->intf[i], i);
+ gsc_init_one(i915, gsc, i);
}
void intel_gsc_fini(struct intel_gsc *gsc)
@@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
- gsc_destroy_one(&gsc->intf[i]);
+ gsc_destroy_one(gt->i915, gsc, i);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index 68582f912b21..fcac1775e9c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -20,11 +20,14 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
+ *
+ * @gem_obj: scratch memory GSC operations
* @intf : gsc interface
*/
struct intel_gsc {
struct intel_gsc_intf {
struct mei_aux_device *adev;
+ struct drm_i915_gem_object *gem_obj;
int irq;
unsigned int id;
} intf[INTEL_GSC_NUM_INTERFACES];
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 68c2b0d8f187..d0b03a928b9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
@@ -24,20 +26,22 @@
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
+#include "intel_pci_config.h"
#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
+#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-static void __intel_gt_init_early(struct intel_gt *gt)
+void intel_gt_common_init_early(struct intel_gt *gt)
{
- spin_lock_init(&gt->irq_lock);
-
- mutex_init(&gt->tlb_invalidate_lock);
+ spin_lock_init(gt->irq_lock);
+ INIT_LIST_HEAD(&gt->lmem_userfault_list);
+ mutex_init(&gt->lmem_userfault_lock);
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -48,6 +52,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
@@ -55,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt)
}
/* Preliminary initialization of Tile 0 */
-void intel_root_gt_init_early(struct drm_i915_private *i915)
+int intel_root_gt_init_early(struct drm_i915_private *i915)
{
struct intel_gt *gt = to_gt(i915);
gt->i915 = i915;
gt->uncore = &i915->uncore;
+ gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+ if (!gt->irq_lock)
+ return -ENOMEM;
+
+ intel_gt_common_init_early(gt);
- __intel_gt_init_early(gt);
+ return 0;
}
static int intel_gt_probe_lmem(struct intel_gt *gt)
@@ -768,6 +779,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
@@ -777,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
int ret;
if (!gt_is_root(gt)) {
- struct intel_uncore_mmio_debug *mmio_debug;
struct intel_uncore *uncore;
+ spinlock_t *irq_lock;
- uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
+ uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
if (!uncore)
return -ENOMEM;
- mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
- if (!mmio_debug) {
- kfree(uncore);
+ irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+ if (!irq_lock)
return -ENOMEM;
- }
gt->uncore = uncore;
- gt->uncore->debug = mmio_debug;
+ gt->irq_lock = irq_lock;
- __intel_gt_init_early(gt);
+ intel_gt_common_init_early(gt);
}
intel_uncore_init_early(gt->uncore, gt);
+ intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
if (ret)
@@ -807,27 +818,17 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
return 0;
}
-static void
-intel_gt_tile_cleanup(struct intel_gt *gt)
-{
- intel_uncore_cleanup_mmio(gt->uncore);
-
- if (!gt_is_root(gt)) {
- kfree(gt->uncore->debug);
- kfree(gt->uncore);
- kfree(gt);
- }
-}
-
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_gt *gt = &i915->gt0;
+ const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
+ unsigned int i;
int ret;
- mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
phys_addr = pci_resource_start(pdev, mmio_bar);
/*
@@ -835,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
* and it has been already initialized early during probe
* in i915_driver_probe()
*/
+ gt->i915 = i915;
+ gt->name = "Primary GT";
+ gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
i915->gt[0] = gt;
- /* TODO: add more tiles */
+ if (!HAS_EXTRA_GT_LIST(i915))
+ return 0;
+
+ for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
+ gtdef->name != NULL;
+ i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
+ gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gt->i915 = i915;
+ gt->name = gtdef->name;
+ gt->type = gtdef->type;
+ gt->info.engine_mask = gtdef->engine_mask;
+ gt->info.id = i;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ if (GEM_WARN_ON(range_overflows_t(resource_size_t,
+ gtdef->mapping_base,
+ SZ_16M,
+ pci_resource_len(pdev, mmio_bar)))) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ switch (gtdef->type) {
+ case GT_TILE:
+ ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
+ break;
+
+ case GT_MEDIA:
+ ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
+ gtdef->gsi_offset);
+ break;
+
+ case GT_PRIMARY:
+ /* Primary GT should not appear in extra GT list */
+ default:
+ MISSING_CASE(gtdef->type);
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ goto err;
+
+ i915->gt[i] = gt;
+ }
+
return 0;
+
+err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+ intel_gt_release_all(i915);
+
+ return ret;
}
int intel_gt_tiles_init(struct drm_i915_private *i915)
@@ -865,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915)
struct intel_gt *gt;
unsigned int id;
- for_each_gt(gt, i915, id) {
- intel_gt_tile_cleanup(gt);
+ for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
- }
}
void intel_gt_info_print(const struct intel_gt_info *info,
@@ -906,7 +965,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
@@ -924,13 +983,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +1002,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;
- GEM_TRACE("\n");
-
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
}
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
spin_unlock_irq(&uncore->lock);
- for_each_engine(engine, gt, id) {
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +1044,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
- struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
@@ -996,5 +1062,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
- mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 82d6f248d876..2ee582e287c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
-void intel_root_gt_init_early(struct drm_i915_private *i915);
+void intel_gt_common_init_early(struct intel_gt *gt);
+int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
@@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
-
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index d5d1b04dbcad..3f656d3dba9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore)
return base_freq + frac_freq;
}
-static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock =
- (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
@@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
}
}
-static u32 read_clock_frequency(struct intel_uncore *uncore)
+static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
{
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * Note that on gen11+, the clock frequency may be reconfigured.
+ * We do not, and we assume nobody else does.
+ *
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
- if (GRAPHICS_VER(uncore->i915) <= 4) {
- /*
- * PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configuration”
- * (“CLKCFG”) MCHBAR register)
- */
- return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
- } else if (GRAPHICS_VER(uncore->i915) <= 8) {
/*
- * PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
*/
- return f12_5_mhz;
- } else if (GRAPHICS_VER(uncore->i915) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(uncore);
- } else {
- freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (GRAPHICS_VER(uncore->i915) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+}
+
+static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
/*
- * First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
*/
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(uncore);
- } else {
- u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (GRAPHICS_VER(uncore->i915) >= 11)
- freq = gen11_get_crystal_clock_freq(uncore, c0);
- else
- freq = gen9_get_crystal_clock_freq(uncore, c0);
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
+ return freq;
}
-void intel_gt_init_clock_frequency(struct intel_gt *gt)
+static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
{
/*
- * Note that on gen11+, the clock frequency may be reconfigured.
- * We do not, and we assume nobody else does.
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return 12500000;
+}
+
+static u32 gen2_read_clock_frequency(struct intel_uncore *uncore)
+{
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
*/
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ if (GRAPHICS_VER(uncore->i915) >= 11)
+ return gen11_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 9)
+ return gen9_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 5)
+ return gen5_read_clock_frequency(uncore);
+ else
+ return gen2_read_clock_frequency(uncore);
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
gt->clock_frequency = read_clock_frequency(gt->uncore);
- if (gt->clock_frequency)
- gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
if (GRAPHICS_VER(gt->i915) == 11)
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
+ else if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 3a72d4fd0214..f26882fdc24c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt,
u32 timeout_ts;
u32 ident;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
@@ -59,11 +59,17 @@ static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
+ struct intel_gt *media_gt = gt->i915->media_gt;
+
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
+ return guc_irq_handler(&media_gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
+ if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
+ return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(&gt->pxp, iir);
@@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
{
struct intel_engine_cs *engine;
+ /*
+ * Platforms with standalone media have their media engines in another
+ * GT.
+ */
+ if (MEDIA_VER(gt->i915) >= 13 &&
+ (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) {
+ if (!gt->i915->media_gt)
+ goto err;
+
+ gt = gt->i915->media_gt;
+ }
+
if (instance <= MAX_ENGINE_INSTANCE)
engine = gt->engine_class[class][instance];
else
@@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
if (likely(engine))
return intel_engine_cs_irq(engine, iir);
+err:
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
}
@@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
unsigned long intr_dw;
unsigned int bit;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
@@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
unsigned int bank;
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
}
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
@@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt,
void __iomem * const regs = gt->uncore->regs;
u32 dw;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
@@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
if (!HAS_L3_DPF(gt->i915))
return;
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
gt->i915->l3_parity.which_slice |= 1 << 1;
@@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index bc898df7a48c..6c9a46452364 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,17 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+/**
+ * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
+ * it to sleep, run some code and then asynchrously put the reference
+ * away.
+ *
+ * @gt: pointer to the gt
+ * @wf: pointer to a temporary wakeref.
+ */
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 40bdd4cb629f..108b9e76c32e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -504,8 +504,8 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_puts(p, "no P-state info available\n");
}
- drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
- drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
+ drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
intel_runtime_pm_put(uncore->rpm, wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
index 11060f5a4c89..52f2a28b2058 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
new_val = gt->pm_imr;
new_val &= ~interrupt_mask;
@@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
struct intel_uncore *uncore = gt->uncore;
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
intel_uncore_write(uncore, reg, reset_mask);
intel_uncore_write(uncore, reg, reset_mask);
@@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt)
void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
gt->pm_ier |= enable_mask;
write_pm_ier(gt);
@@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
gt->pm_ier &= ~disable_mask;
gen6_gt_pm_mask_irq(gt, disable_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 60d6eb5f245b..2275ee47da95 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -259,6 +259,9 @@
#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+#define DRAW_WATERMARK _MMIO(0x26c0)
+#define VERT_WM_VAL REG_GENMASK(9, 0)
+
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
@@ -374,6 +377,9 @@
#define CHICKEN_RASTER_1 _MMIO(0x6204)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+#define CHICKEN_RASTER_2 _MMIO(0x6208)
+#define TBIMR_FAST_CLIP REG_BIT(5)
+
#define VFLSKPD _MMIO(0x62a8)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
@@ -1007,6 +1013,8 @@
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
+#define GUCPMTIMESTAMP _MMIO(0xc3e8)
+
#define __GEN9_RCS0_MOCS0 0xc800
#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
#define __GEN9_VCS0_MOCS0 0xc900
@@ -1078,6 +1086,7 @@
#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
@@ -1101,6 +1110,8 @@
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define THREAD_EX_ARB_MODE REG_GENMASK(3, 2)
+#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -1123,6 +1134,8 @@
#define RT_CTRL _MMIO(0xe530)
#define DIS_NULL_QUERY REG_BIT(10)
+#define STACKID_CTRL REG_GENMASK(6, 5)
+#define STACKID_CTRL_512 REG_FIELD_PREP(STACKID_CTRL, 0x2)
#define EU_PERF_CNTL1 _MMIO(0xe558)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
@@ -1541,6 +1554,8 @@
#define OTHER_GTPM_INSTANCE 1
#define OTHER_KCR_INSTANCE 4
#define OTHER_GSC_INSTANCE 6
+#define OTHER_MEDIA_GUC_INSTANCE 16
+#define OTHER_MEDIA_GTPM_INSTANCE 17
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
@@ -1565,4 +1580,12 @@
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+/*
+ * Standalone Media's non-engine GT registers are located at their regular GT
+ * offsets plus 0x380000. This extra offset is stored inside the intel_uncore
+ * structure so that the existing code can be used for both GTs without
+ * modification.
+ */
+#define MTL_MEDIA_GSI_BASE 0x380000
+
#endif /* __INTEL_GT_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 9e4ebf53379b..d651ccd0ab20 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -22,11 +22,6 @@ bool is_object_gt(struct kobject *kobj)
return !strncmp(kobj->name, "gt", 2);
}
-static struct intel_gt *kobj_to_gt(struct kobject *kobj)
-{
- return container_of(kobj, struct intel_gt, sysfs_gt);
-}
-
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name)
{
@@ -101,6 +96,10 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
+ gt->sysfs_defaults = kobject_create_and_add(".defaults", &gt->sysfs_gt);
+ if (!gt->sysfs_defaults)
+ goto exit_fail;
+
intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
@@ -113,5 +112,6 @@ exit_fail:
void intel_gt_sysfs_unregister(struct intel_gt *gt)
{
+ kobject_put(gt->sysfs_defaults);
kobject_put(&gt->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
index a99aa7e8b01a..6232923a420d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -10,6 +10,7 @@
#include <linux/kobject.h>
#include "i915_gem.h" /* GEM_BUG_ON() */
+#include "intel_gt_types.h"
struct intel_gt;
@@ -22,6 +23,11 @@ intel_gt_create_kobj(struct intel_gt *gt,
struct kobject *dir,
const char *name);
+static inline struct intel_gt *kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct intel_gt, sysfs_gt);
+}
+
void intel_gt_sysfs_register(struct intel_gt *gt);
void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 73a8b46e0234..180dd6f3ef57 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
-static const struct attribute *freq_attrs[] = {
- &dev_attr_punit_req_freq_mhz.attr,
+static const struct attribute *throttle_reason_attrs[] = {
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
@@ -727,6 +726,34 @@ static const struct attribute *media_perf_power_attrs[] = {
NULL
};
+static ssize_t
+default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.min_freq);
+}
+
+static struct kobj_attribute default_min_freq_mhz =
+__ATTR(rps_min_freq_mhz, 0444, default_min_freq_mhz_show, NULL);
+
+static ssize_t
+default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.max_freq);
+}
+
+static struct kobj_attribute default_max_freq_mhz =
+__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
+
+static const struct attribute * const rps_defaults_attrs[] = {
+ &default_min_freq_mhz.attr,
+ &default_max_freq_mhz.attr,
+ NULL
+};
+
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
const struct attribute * const *attrs)
{
@@ -763,12 +790,20 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (!is_object_gt(kobj))
return;
- ret = sysfs_create_files(kobj, freq_attrs);
+ ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
if (ret)
drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
+ "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
gt->info.id, ERR_PTR(ret));
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+ }
+
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
@@ -776,4 +811,10 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
"failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
gt->info.id, ERR_PTR(ret));
}
+
+ ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to add gt%u rps defaults (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index df708802889d..f19c2de77ff6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -75,15 +76,44 @@ enum intel_submission_method {
INTEL_SUBMISSION_GUC,
};
+struct gt_defaults {
+ u32 min_freq;
+ u32 max_freq;
+};
+
+enum intel_gt_type {
+ GT_PRIMARY,
+ GT_TILE,
+ GT_MEDIA,
+};
+
struct intel_gt {
struct drm_i915_private *i915;
+ const char *name;
+ enum intel_gt_type type;
+
struct intel_uncore *uncore;
struct i915_ggtt *ggtt;
struct intel_uc uc;
struct intel_gsc gsc;
- struct mutex tlb_invalidate_lock;
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
struct i915_wa_list wa_list;
@@ -111,6 +141,20 @@ struct intel_gt {
struct intel_wakeref wakeref;
atomic_t user_wakeref;
+ /**
+ * Protects access to lmem usefault list.
+ * It is required, if we are outside of the runtime suspend path,
+ * access to @lmem_userfault_list requires always first grabbing the
+ * runtime pm, to ensure we can't race against runtime suspend.
+ * Once we have that we also need to grab @lmem_userfault_lock,
+ * at which point we have exclusive access.
+ * The runtime suspend path is special since it doesn't really hold any locks,
+ * but instead has exclusive access by virtue of all other accesses requiring
+ * holding the runtime pm wakeref.
+ */
+ struct mutex lmem_userfault_lock;
+ struct list_head lmem_userfault_list;
+
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
@@ -126,6 +170,9 @@ struct intel_gt {
*/
intel_wakeref_t awake;
+ /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
u32 clock_frequency;
u32 clock_period_ns;
@@ -133,7 +180,7 @@ struct intel_gt {
struct intel_rc6 rc6;
struct intel_rps rps;
- spinlock_t irq_lock;
+ spinlock_t *irq_lock;
u32 gt_imr;
u32 pm_ier;
u32 pm_imr;
@@ -235,6 +282,18 @@ struct intel_gt {
/* gt/gtN sysfs */
struct kobject sysfs_gt;
+
+ /* sysfs defaults per gt */
+ struct gt_defaults defaults;
+ struct kobject *sysfs_defaults;
+};
+
+struct intel_gt_definition {
+ enum intel_gt_type type;
+ char *name;
+ u32 mapping_base;
+ u32 gsi_offset;
+ intel_engine_mask_t engine_mask;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b67831833c9a..2eaeba14319e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -405,6 +405,9 @@ void free_scratch(struct i915_address_space *vm)
{
int i;
+ if (!vm->scratch[0])
+ return;
+
for (i = 0; i <= vm->top; i++)
i915_gem_object_put(vm->scratch[i]);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index e639434e97fd..c0ca53cba9f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -386,9 +386,6 @@ struct i915_ggtt {
*/
struct list_head userfault_list;
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
struct mutex error_mutex;
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 14fe65812e42..1d19c073ba2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -12,6 +12,7 @@
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_rps.h"
struct ia_constants {
unsigned int min_gpu_freq;
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
if (!HAS_LLC(i915) || IS_DGFX(i915))
return false;
- if (rps->max_freq <= rps->min_freq)
- return false;
-
consts->max_ia_freq = cpu_max_MHz();
consts->min_ring_freq =
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
/* convert DDR frequency from units of 266.6MHz to bandwidth */
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
- consts->min_gpu_freq = rps->min_freq;
- consts->max_gpu_freq = rps->max_freq;
- if (GRAPHICS_VER(i915) >= 9) {
- /* Convert GT frequency to 50 HZ units */
- consts->min_gpu_freq /= GEN9_FREQ_SCALER;
- consts->max_gpu_freq /= GEN9_FREQ_SCALER;
- }
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
return true;
}
@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
return;
/*
+ * Although this is unlikely on any platform during initialization,
+ * let's ensure we don't get accidentally into infinite loop
+ */
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
+ return;
+ /*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eec73c66406c..3955292483a6 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -662,6 +662,21 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
return -1;
}
+static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return 0x80;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x70;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return 0x64;
+ else if (GRAPHICS_VER(engine->i915) >= 8 &&
+ engine->class == RENDER_CLASS)
+ return 0xc4;
+ else
+ return -1;
+}
+
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
@@ -768,6 +783,7 @@ static void init_common_regs(u32 * const regs,
bool inhibit)
{
u32 ctl;
+ int loc;
ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -779,6 +795,10 @@ static void init_common_regs(u32 * const regs,
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
+
+ loc = lrc_ring_bb_offset(engine);
+ if (loc != -1)
+ regs[loc + 1] = 0;
}
static void init_wa_bb_regs(u32 * const regs,
@@ -1242,6 +1262,23 @@ dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
return cs;
}
+/*
+ * The bspec's tuning guide asks us to program a vertical watermark value of
+ * 0x3FF. However this register is not saved/restored properly by the
+ * hardware, so we're required to apply the desired value via INDIRECT_CTX
+ * batch buffer to ensure the value takes effect properly. All other bits
+ * in this register should remain at 0 (the hardware default).
+ */
+static u32 *
+dg2_emit_draw_watermark_setting(u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(DRAW_WATERMARK);
+ *cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF);
+
+ return cs;
+}
+
static u32 *
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
{
@@ -1261,7 +1298,12 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915))
- cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_GFX_CCS_AUX_NV);
+
+ /* Wa_16014892111 */
+ if (IS_DG2(ce->engine->i915))
+ cs = dg2_emit_draw_watermark_setting(cs);
return cs;
}
@@ -1283,9 +1325,11 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915)) {
if (ce->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_VD0_AUX_NV);
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_VE0_AUX_NV);
}
return cs;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2c35324b5f68..aaaf1906026c 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -511,44 +511,16 @@ static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
return cmd;
}
-static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
-{
- u32 num_cmds, num_blks, total_size;
-
- if (!GET_CCS_BYTES(i915, size))
- return 0;
-
- /*
- * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
- * blocks. one XY_CTRL_SURF_COPY_BLT command can
- * transfer upto 1024 blocks.
- */
- num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
- NUM_CCS_BYTES_PER_BLOCK);
- num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
- total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
-
- /*
- * Adding a flush before and after XY_CTRL_SURF_COPY_BLT
- */
- total_size += 2 * MI_FLUSH_DW_SIZE;
-
- return total_size;
-}
-
static int emit_copy_ccs(struct i915_request *rq,
u32 dst_offset, u8 dst_access,
u32 src_offset, u8 src_access, int size)
{
struct drm_i915_private *i915 = rq->engine->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
- u32 num_ccs_blks, ccs_ring_size;
+ u32 num_ccs_blks;
u32 *cs;
- ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
- WARN_ON(!ccs_ring_size);
-
- cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
+ cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -583,8 +555,7 @@ static int emit_copy_ccs(struct i915_request *rq,
FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
- if (ccs_ring_size & 1)
- *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -638,40 +609,38 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
-static int scatter_list_length(struct scatterlist *sg)
+static u64 scatter_list_length(struct scatterlist *sg)
{
- int len = 0;
+ u64 len = 0;
while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg);
sg = sg_next(sg);
- };
+ }
return len;
}
-static void
+static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
- int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
+ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{
- if (ccs_bytes_to_cpy) {
- if (!src_is_lmem)
- /*
- * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
- * will be taken for the blt. in Flat-ccs supported
- * platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
- * for main memory
- */
- *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
- } else { /* ccs handling is not required */
- *src_sz = CHUNK_SZ;
- }
+ if (ccs_bytes_to_cpy && !src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
+ else
+ return CHUNK_SZ;
}
-static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{
- u32 len;
+ u64 len;
do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
@@ -702,13 +671,13 @@ intel_context_migrate_copy(struct intel_context *ce,
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
- u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
- int src_sz, dst_sz;
- bool ccs_is_src;
+ u64 src_sz, dst_sz;
+ bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +718,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -788,8 +759,8 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- calculate_chunk_sz(i915, src_is_lmem, &src_sz,
- bytes_to_cpy, ccs_bytes_to_cpy);
+ src_sz = calculate_chunk_sz(i915, src_is_lmem,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
src_offset, src_sz);
@@ -852,6 +823,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
}
/* Arbitration is re-enabled between requests. */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d8b94d638559..7ecfa672f738 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
- if (vma_res->allocated)
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
@@ -308,7 +312,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = i915->drm.dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
dma_resv_init(&ppgtt->vm._resv);
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6e90032e12e9..f3ad93db0b21 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -4,8 +4,10 @@
*/
#include "i915_drv.h"
+#include "i915_pci.h"
#include "i915_reg.h"
#include "intel_memory_region.h"
+#include "intel_pci_config.h"
#include "intel_region_lmem.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
@@ -15,6 +17,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
@@ -44,7 +47,6 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
}
-#define LMEM_BAR_NUM 2
static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
@@ -55,15 +57,14 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
u32 pci_cmd;
int i;
- current_size = roundup_pow_of_two(pci_resource_len(pdev, LMEM_BAR_NUM));
+ current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
if (i915->params.lmem_bar_size) {
u32 bar_sizes;
rebar_size = i915->params.lmem_bar_size *
(resource_size_t)SZ_1M;
- bar_sizes = pci_rebar_get_possible_sizes(pdev,
- LMEM_BAR_NUM);
+ bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
if (rebar_size == current_size)
return;
@@ -106,11 +107,14 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_write_config_dword(pdev, PCI_COMMAND,
pci_cmd & ~PCI_COMMAND_MEMORY);
- _resize_bar(i915, LMEM_BAR_NUM, rebar_size);
+ _resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
static int
region_lmem_release(struct intel_memory_region *mem)
@@ -198,6 +202,9 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (!IS_DGFX(i915))
return ERR_PTR(-ENODEV);
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
if (HAS_FLAT_CCS(i915)) {
resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
@@ -232,8 +239,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
mul_u32_u32(i915->params.lmem_size, SZ_1M));
}
- io_start = pci_resource_start(pdev, 2);
- io_size = min(pci_resource_len(pdev, 2), lmem_size);
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
+ io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size);
if (!io_size)
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c68d36fb5bbd..b36674356986 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -776,7 +776,7 @@ static void revoke_mmaps(struct intel_gt *gt)
continue;
node = &vma->mmo->vma_node;
- vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1281,9 +1281,6 @@ static void intel_gt_reset_global(struct intel_gt *gt,
intel_wedge_on_timeout(&w, gt, 5 * HZ) {
intel_display_prepare_reset(gt->i915);
- /* Flush everyone using a resource about to be clobbered */
- synchronize_srcu_expedited(&gt->reset.backoff_srcu);
-
intel_gt_reset(gt, engine_mask, reason);
intel_display_finish_reset(gt->i915);
@@ -1392,6 +1389,9 @@ void intel_gt_handle_error(struct intel_gt *gt,
}
}
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
+
intel_gt_reset_global(gt, engine_mask, msg);
if (!intel_uc_uses_guc_submission(&gt->uc)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fb3f57ee450b..6b86250c31ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps)
rps_reset_ei(rps);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
@@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
if (GRAPHICS_VER(gt->i915) >= 11)
gen11_rps_reset_interrupts(rps);
else
gen6_rps_reset_interrupts(rps);
rps->pm_iir = 0;
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void rps_disable_interrupts(struct intel_rps *rps)
@@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
@@ -1107,7 +1107,12 @@ void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *c
caps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ if (GRAPHICS_VER(i915) >= 10)
+ caps->rp1_freq = REG_FIELD_GET(RPE_MASK,
+ intel_uncore_read(to_gt(i915)->uncore,
+ GEN10_FREQ_INFO_REC));
+ else
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
caps->min_freq = (rp_state_cap >> 16) & 0xff;
}
@@ -1546,6 +1551,9 @@ void intel_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ if (!intel_rps_is_enabled(rps))
+ return;
+
intel_rps_clear_enabled(rps);
intel_rps_clear_interrupts(rps);
intel_rps_clear_timer(rps);
@@ -1789,10 +1797,10 @@ static void rps_work(struct work_struct *work)
int new_freq, adj, min, max;
u32 pm_iir = 0;
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
if (!pm_iir && !client_boost)
@@ -1865,9 +1873,9 @@ static void rps_work(struct work_struct *work)
mutex_unlock(&rps->lock);
out:
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_unmask_irq(gt, rps->pm_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
@@ -1875,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
struct intel_gt *gt = rps_to_gt(rps);
const u32 events = rps->pm_events & pm_iir;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
if (unlikely(!events))
return;
@@ -1895,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
events = pm_iir & rps->pm_events;
if (events) {
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
GT_TRACE(gt, "irq events:%x\n", events);
@@ -1903,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
rps->pm_iir |= events;
schedule_work(&rps->work);
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
}
if (GRAPHICS_VER(gt->i915) >= 8)
@@ -1979,7 +1987,9 @@ void intel_rps_init(struct intel_rps *rps)
/* Derive initial user preferences/limits from the hardware limits */
rps->max_freq_softlimit = rps->max_freq;
+ rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
rps->min_freq_softlimit = rps->min_freq;
+ rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
/* After setting max-softlimit, find the overclock max freq */
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
@@ -2126,6 +2136,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->max_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
@@ -2214,6 +2249,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->min_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 1e8d56491308..4509dfdc52e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
new file mode 100644
index 000000000000..e8f3d18c12b8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "i915_drv.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_sa_media.h"
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore;
+
+ uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->gsi_offset = gsi_offset;
+
+ gt->irq_lock = to_gt(i915)->irq_lock;
+ intel_gt_common_init_early(gt);
+ intel_uncore_init_early(uncore, gt);
+
+ /*
+ * Standalone media shares the general MMIO space with the primary
+ * GT. We'll re-use the primary GT's mapping.
+ */
+ uncore->regs = i915->uncore.regs;
+ if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
+ return -EIO;
+
+ gt->uncore = uncore;
+ gt->phys_addr = phys_addr;
+
+ /*
+ * For current platforms we can assume there's only a single
+ * media GT and cache it for quick lookup.
+ */
+ drm_WARN_ON(&i915->drm, i915->media_gt);
+ i915->media_gt = gt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.h b/drivers/gpu/drm/i915/gt/intel_sa_media.h
new file mode 100644
index 000000000000..3afb310de932
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __INTEL_SA_MEDIA__
+#define __INTEL_SA_MEDIA__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset);
+
+#endif /* __INTEL_SA_MEDIA_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index c6d3050604c8..66f21c735d54 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -382,7 +382,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
static void gen9_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_device_info *info = mkwrite_device_info(i915);
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
u32 fuse2, eu_disable, subslice_mask;
@@ -471,10 +470,10 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
if (IS_GEN9_LP(i915)) {
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
+ RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
sseu->min_eu_in_pool = 0;
- if (info->has_pooled_eu) {
+ if (HAS_POOLED_EU(i915)) {
if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
sseu->min_eu_in_pool = 3;
else if (IS_SS_DISABLED(1))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e8111fce56d0..6d2003d598e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -568,6 +568,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_add(wal,
@@ -2102,13 +2103,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* Wa_1509235366:dg2 */
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
-
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
}
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
@@ -2119,6 +2113,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_1509727124:dg2 */
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ SC_DISABLE_POWER_OPTIMIZATION_EBB);
+ }
+
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
@@ -2195,15 +2196,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
- IS_DG2_G11(i915)) {
- /* Wa_22012654132:dg2 */
- wa_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
- 0 /* write-only, so skip validation */,
- true);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2397,7 +2389,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
FF_DOP_CLOCK_GATE_DISABLE);
}
- if (HAS_PERCTX_PREEMPT_CTRL(i915)) {
+ if (IS_GRAPHICS_VER(i915, 9, 12)) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -2670,6 +2662,56 @@ ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them with the same
+ * workaround infrastructure to ensure that they're automatically added to
+ * the GuC save/restore lists, re-applied at the right times, and checked for
+ * any conflicting programming requested by real workarounds.
+ *
+ * Programming settings should be added here only if their registers are not
+ * part of an engine's register state context. If a register is part of a
+ * context, then any tuning settings should be programmed in an appropriate
+ * function invoked by __intel_engine_init_ctx_wa().
+ */
+static void
+add_render_compute_tuning_settings(struct drm_i915_private *i915,
+ struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(i915)) {
+ wa_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ }
+
+ if (IS_DG2(i915)) {
+ wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
+
+ /*
+ * This is also listed as Wa_22012654132 for certain DG2
+ * steppings, but the tuning setting programming is a superset
+ * since it applies to all DG2 variants and steppings.
+ *
+ * Note that register 0xE420 is write-only and cannot be read
+ * back for verification on DG2 (due to Wa_14012342262), so
+ * we need to explicitly skip the readback.
+ */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
+ }
+
+ /*
+ * This tuning setting proves beneficial only on ATS-M designs; the
+ * default "age based" setting is optimal on regular DG2 and other
+ * platforms.
+ */
+ if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
+ wa_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
+ THREAD_EX_ARB_MODE_RR_AFTER_DEP);
+}
+
+/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
* specific engine. Since all render+compute engines get reset
@@ -2683,14 +2725,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_PONTEVECCHIO(i915)) {
- /*
- * The following is not actually a "workaround" but rather
- * a recommended tuning setting documented in the bspec's
- * performance guide section.
- */
- wa_write(wal, XEHPC_L3SCRUB, SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ add_render_compute_tuning_settings(i915, wal);
+ if (IS_PONTEVECCHIO(i915)) {
/* Wa_16016694945 */
wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 09f8cd2d0e2c..1e08b2473b99 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -2077,7 +2077,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out;
}
- intel_context_set_banned(rq->context);
+ intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2136,7 +2136,7 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
if (err)
goto out;
- intel_context_set_banned(rq[1]->context);
+ intel_context_ban(rq[1]->context, rq[1]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2219,7 +2219,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- intel_context_set_banned(rq[2]->context);
+ intel_context_ban(rq[2]->context, rq[2]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2234,7 +2234,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out;
}
- if (rq[1]->fence.error != 0) {
+ /*
+ * The behavior between having semaphores and not is different. With
+ * semaphores the subsequent request is on the hardware and not cancelled
+ * while without the request is held in the driver and cancelled.
+ */
+ if (intel_engine_has_semaphores(rq[1]->engine) &&
+ rq[1]->fence.error != 0) {
pr_err("Normal inflight1 request did not complete\n");
err = -EINVAL;
goto out;
@@ -2282,7 +2288,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out;
}
- intel_context_set_banned(rq->context);
+ intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 6493265d5f64..7f3bb1d34dfb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1302,13 +1302,15 @@ static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
+ engine = intel_selftest_find_any_engine(gt);
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
@@ -1432,7 +1434,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
int (*fn)(void *),
unsigned int flags)
{
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
@@ -1444,6 +1446,8 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
return 0;
+ engine = intel_selftest_find_any_engine(gt);
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
@@ -1819,12 +1823,14 @@ static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct hang h;
struct i915_request *rq;
struct i915_gpu_coredump *error;
int err;
+ engine = intel_selftest_find_any_engine(gt);
+
/* Check that we can issue a global GPU and engine reset */
if (!intel_has_reset_engine(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 1109088fe8f6..82d3f8058995 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -27,6 +27,9 @@
#define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+#define LRI_HEADER MI_INSTR(0x22, 0)
+#define LRI_LENGTH_MASK GENMASK(7, 0)
+
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
@@ -202,7 +205,7 @@ static int live_lrc_layout(void *arg)
continue;
}
- if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
err = -EINVAL;
@@ -357,6 +360,11 @@ static int live_lrc_fixed(void *arg)
lrc_ring_cmd_buf_cctl(engine),
"RING_CMD_BUF_CCTL"
},
+ {
+ i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
+ lrc_ring_bb_offset(engine),
+ "RING_BB_OFFSET"
+ },
{ },
}, *t;
u32 *hw;
@@ -987,18 +995,40 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /*
+ * Keep it simple, skip parsing complex commands
+ *
+ * At present, there are no more MI_LOAD_REGISTER_IMM
+ * commands after the first 3D state command. Rather
+ * than include a table (see i915_cmd_parser.c) of all
+ * the possible commands and their instruction lengths
+ * (or mask for variable length instructions), assume
+ * we have gathered the complete list of registers and
+ * bail out.
+ */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ /* Assume all other MI commands match LRI length mask */
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1150,18 +1180,29 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
*cs++ = MI_LOAD_REGISTER_IMM(len);
@@ -1292,18 +1333,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1343,6 +1395,30 @@ err_A0:
return err;
}
+static struct i915_vma *
+create_result_vma(struct i915_address_space *vm, unsigned long sz)
+{
+ struct i915_vma *vma;
+ void *ptr;
+
+ vma = create_user_vma(vm, sz);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Set the results to a known value distinct from the poison */
+ ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ i915_vma_put(vma);
+ return ERR_CAST(ptr);
+ }
+
+ memset(ptr, POISON_INUSE, vma->size);
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return vma;
+}
+
static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
{
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
@@ -1361,13 +1437,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
goto err_A;
}
- ref[0] = create_user_vma(A->vm, SZ_64K);
+ ref[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[0])) {
err = PTR_ERR(ref[0]);
goto err_B;
}
- ref[1] = create_user_vma(A->vm, SZ_64K);
+ ref[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[1])) {
err = PTR_ERR(ref[1]);
goto err_ref0;
@@ -1389,13 +1465,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
i915_request_put(rq);
- result[0] = create_user_vma(A->vm, SZ_64K);
+ result[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[0])) {
err = PTR_ERR(result[0]);
goto err_ref1;
}
- result[1] = create_user_vma(A->vm, SZ_64K);
+ result[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[1])) {
err = PTR_ERR(result[1]);
goto err_result0;
@@ -1408,18 +1484,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
err = poison_registers(B, poison, sema);
- if (err) {
- WRITE_ONCE(*sema, -1);
- i915_request_put(rq);
- goto err_result1;
- }
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- i915_request_put(rq);
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("%s(%s): wait for results timed out\n",
+ __func__, engine->name);
err = -ETIME;
- goto err_result1;
}
+
+ /* Always cancel the semaphore wait, just in case the GPU gets stuck */
+ WRITE_ONCE(*sema, -1);
i915_request_put(rq);
+ if (err)
+ goto err_result1;
err = compare_isolation(engine, ref, result, A, poison);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index ac29691e0b1a..f8a1d27df272 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -166,6 +166,15 @@ static int run_test(struct intel_gt *gt, int test_type)
return -EIO;
}
+ /*
+ * FIXME: With efficient frequency enabled, GuC can request
+ * frequencies higher than the SLPC max. While this is fixed
+ * in GuC, we level set these tests with RPn as min.
+ */
+ err = slpc_set_min_freq(slpc, slpc->min_freq);
+ if (err)
+ return err;
+
if (slpc->min_freq == slpc->rp0_freq) {
pr_err("Min/Max are fused to the same value\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
index df83c1cc7c7a..28b8387f97b7 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -37,6 +37,7 @@
* | | | - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large) |
* | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) |
* | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) |
+ * | | | - _`GUC_CTB_STATUS_UNUSED` = 8 (CTB is not in use) |
* +---+-------+--------------------------------------------------------------+
* |...| | RESERVED = MBZ |
* +---+-------+--------------------------------------------------------------+
@@ -49,9 +50,10 @@ struct guc_ct_buffer_desc {
u32 tail;
u32 status;
#define GUC_CTB_STATUS_NO_ERROR 0
-#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
-#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
-#define GUC_CTB_STATUS_MISMATCH (1 << 2)
+#define GUC_CTB_STATUS_OVERFLOW BIT(0)
+#define GUC_CTB_STATUS_UNDERFLOW BIT(1)
+#define GUC_CTB_STATUS_MISMATCH BIT(2)
+#define GUC_CTB_STATUS_UNUSED BIT(3)
u32 reserved[13];
} __packed;
static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2706a8c65090..bac06e3d6f2c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen9_enable_guc_interrupts(struct intel_guc *guc)
@@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen9_disable_guc_interrupts(struct intel_guc *guc)
@@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen9_reset_guc_interrupts(guc);
@@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
@@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
intel_uncore_write(gt->uncore,
GEN11_GUC_SG_INTR_ENABLE, events);
intel_uncore_write(gt->uncore,
GEN11_GUC_SG_INTR_MASK, ~events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen11_reset_guc_interrupts(guc);
@@ -224,53 +224,22 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
- u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
- u32 flags;
-
- #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
- #define LOG_UNIT SZ_1M
- #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
- #else
- #define LOG_UNIT SZ_4K
- #define LOG_FLAG 0
- #endif
-
- #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
- #define CAPTURE_UNIT SZ_1M
- #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
- #else
- #define CAPTURE_UNIT SZ_4K
- #define CAPTURE_FLAG 0
- #endif
-
- BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
- BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
- BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
-
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
- BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
- (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
+ struct intel_guc_log *log = &guc->log;
+ u32 offset, flags;
+
+ GEM_BUG_ON(!log->sizes_initialised);
+
+ offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
- CAPTURE_FLAG |
- LOG_FLAG |
- ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
- ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) |
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
+ log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
+ (log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
(offset << GUC_LOG_BUF_ADDR_SHIFT);
- #undef LOG_UNIT
- #undef LOG_FLAG
- #undef CAPTURE_UNIT
- #undef CAPTURE_FLAG
-
return flags;
}
@@ -389,6 +358,23 @@ void intel_guc_write_params(struct intel_guc *guc)
intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
}
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ u32 stamp = 0;
+ u64 ktime;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
+ ktime = ktime_get_boottime_ns();
+
+ drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
+ drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
+ drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
+}
+
int intel_guc_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index a7acffbf15d1..804133df1ac9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -464,4 +464,6 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
void intel_guc_write_barrier(struct intel_guc *guc);
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ba7541f3ca61..74cbe8eaf531 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -464,7 +464,11 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
}
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
-#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE)
+#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+ XEHP_LR_HW_CONTEXT_SIZE : \
+ LR_HW_CONTEXT_SIZE)
+#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
static int guc_prep_golden_context(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -525,7 +529,7 @@ static int guc_prep_golden_context(struct intel_guc *guc)
* on all engines).
*/
ads_blob_write(guc, ads.eng_state_size[guc_class],
- real_size - LRC_SKIP_SIZE);
+ real_size - LRC_SKIP_SIZE(gt->i915));
ads_blob_write(guc, ads.golden_context_lrca[guc_class],
addr_ggtt);
@@ -599,7 +603,7 @@ static void guc_init_golden_context(struct intel_guc *guc)
}
GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
- real_size - LRC_SKIP_SIZE);
+ real_size - LRC_SKIP_SIZE(gt->i915));
GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
addr_ggtt += alloc_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 75257bd20ff0..8f1165146013 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -600,10 +600,8 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
return 0;
}
-#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
-
-int
-intel_guc_capture_output_min_size_est(struct intel_guc *guc)
+static int
+guc_capture_output_min_size_est(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -623,13 +621,8 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
* For each engine instance, there would be 1 x guc_state_capture_group_t output
* followed by 3 x guc_state_capture_t lists. The latter is how the register
* dumps are split across different register types (where the '3' are global vs class
- * vs instance). Finally, let's multiply the whole thing by 3x (just so we are
- * not limited to just 1 round of data in a worst case full register dump log)
- *
- * NOTE: intel_guc_log that allocates the log buffer would round this size up to
- * a power of two.
+ * vs instance).
*/
-
for_each_engine(engine, gt, id) {
worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
(3 * sizeof(struct guc_state_capture_header_t));
@@ -649,7 +642,31 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
- return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER);
+ return worst_min_size;
+}
+
+/*
+ * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
+ * before the i915 can read the data out and process it
+ */
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+static void check_guc_capture_size(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int min_size = guc_capture_output_min_size_est(guc);
+ int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+
+ if (min_size < 0)
+ drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ min_size);
+ else if (min_size > buffer_size)
+ drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
+ buffer_size, min_size);
+ else if (spare_size > buffer_size)
+ drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
+ buffer_size, spare_size, min_size);
}
/*
@@ -1278,7 +1295,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
log_buf_state = guc->log.buf_addr +
(sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
- src_data = guc->log.buf_addr + intel_guc_get_log_buffer_offset(GUC_CAPTURE_LOG_BUFFER);
+ src_data = guc->log.buf_addr +
+ intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
/*
* Make a copy of the state structure, inside GuC log buffer
@@ -1286,7 +1304,7 @@ static void __guc_capture_process_output(struct intel_guc *guc)
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
- buffer_size = intel_guc_get_log_buffer_size(GUC_CAPTURE_LOG_BUFFER);
+ buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_count = log_buf_state_local.buffer_full_cnt;
@@ -1365,33 +1383,22 @@ guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
return NULL;
}
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define __out(a, ...) \
- do { \
- drm_warn((&(a)->i915->drm), __VA_ARGS__); \
- i915_error_printf((a), __VA_ARGS__); \
- } while (0)
-#else
-#define __out(a, ...) \
- i915_error_printf(a, __VA_ARGS__)
-#endif
-
#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
do { \
- __out(ebuf, " i915-Eng-Name: %s command stream\n", \
- (eng)->name); \
- __out(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
- __out(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
- __out(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
- (eng)->logical_mask); \
+ i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
+ (eng)->name); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
+ i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
+ (eng)->logical_mask); \
} while (0)
#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
do { \
- __out(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
- (node)->eng_inst); \
- __out(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
- __out(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
+ i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
+ (node)->eng_inst); \
+ i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
+ i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
} while (0)
int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
@@ -1423,57 +1430,57 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
guc = &ee->engine->gt->uc.guc;
- __out(ebuf, "global --- GuC Error Capture on %s command stream:\n",
- ee->engine->name);
+ i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
+ ee->engine->name);
node = ee->guc_capture_node;
if (!node) {
- __out(ebuf, " No matching ee-node\n");
+ i915_error_printf(ebuf, " No matching ee-node\n");
return 0;
}
- __out(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
+ i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
- __out(ebuf, " RegListType: %s\n",
- datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
- __out(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
+ i915_error_printf(ebuf, " RegListType: %s\n",
+ datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
+ i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
switch (i) {
case GUC_CAPTURE_LIST_TYPE_GLOBAL:
default:
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
- __out(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
- __out(ebuf, " i915-Eng-Class: %d\n",
- guc_class_to_engine_class(node->eng_class));
+ i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
+ i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
+ guc_class_to_engine_class(node->eng_class));
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
if (eng)
GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
else
- __out(ebuf, " i915-Eng-Lookup Fail!\n");
+ i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
GCAP_PRINT_GUC_INST_INFO(ebuf, node);
break;
}
numregs = node->reginfo[i].num_regs;
- __out(ebuf, " NumRegs: %d\n", numregs);
+ i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
j = 0;
while (numregs--) {
regs = node->reginfo[i].regs;
str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
node->eng_class, 0, regs[j].offset, &is_ext);
if (!str)
- __out(ebuf, " REG-0x%08x", regs[j].offset);
+ i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
else
- __out(ebuf, " %s", str);
+ i915_error_printf(ebuf, " %s", str);
if (is_ext)
- __out(ebuf, "[%ld][%ld]",
- FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
- FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
- __out(ebuf, ": 0x%08x\n", regs[j].value);
+ i915_error_printf(ebuf, "[%ld][%ld]",
+ FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
+ FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
+ i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
++j;
}
}
@@ -1580,5 +1587,7 @@ int intel_guc_capture_init(struct intel_guc *guc)
INIT_LIST_HEAD(&guc->capture->outlist);
INIT_LIST_HEAD(&guc->capture->cachelist);
+ check_guc_capture_size(guc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
index d3d7bd0b6db6..fbd3713c7832 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -21,7 +21,6 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
struct intel_context *ce);
void intel_guc_capture_process(struct intel_guc *guc);
-int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
void **outptr);
int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index f01325cd1b62..2b22065e87bf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -455,6 +455,7 @@ corrupted:
/**
* wait_for_ct_request_update - Wait for CT request state update.
+ * @ct: pointer to CT
* @req: pointer to pending request
* @status: placeholder for status
*
@@ -467,9 +468,10 @@ corrupted:
* * 0 response received (status is valid)
* * -ETIMEDOUT no response within hardcoded timeout
*/
-static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
+static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
{
int err;
+ bool ct_enabled;
/*
* Fast commands should complete in less than 10us, so sample quickly
@@ -481,12 +483,15 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
#define done \
- (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
+ (!(ct_enabled = intel_guc_ct_enabled(ct)) || \
+ FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
GUC_HXG_ORIGIN_GUC)
err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
if (err)
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
#undef done
+ if (!ct_enabled)
+ err = -ENODEV;
*status = req->status;
return err;
@@ -703,11 +708,18 @@ retry:
intel_guc_notify(ct_to_guc(ct));
- err = wait_for_ct_request_update(&request, status);
+ err = wait_for_ct_request_update(ct, &request, status);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
if (unlikely(err)) {
- CT_ERROR(ct, "No response for request %#x (fence %u)\n",
- action[0], request.fence);
+ if (err == -ENODEV)
+ /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
+ * In this case, output is debug rather than error info
+ */
+ CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
+ action[0], request.fence);
+ else
+ CT_ERROR(ct, "No response for request %#x (fence %u)\n",
+ action[0], request.fence);
goto unlink;
}
@@ -771,8 +783,9 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
- action[0], ERR_PTR(ret), status);
+ if (ret != -ENODEV)
+ CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
+ action[0], ERR_PTR(ret), status);
} else if (unlikely(ret)) {
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
action[0], ret, ret);
@@ -816,8 +829,22 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
if (unlikely(ctb->broken))
return -EPIPE;
- if (unlikely(desc->status))
- goto corrupted;
+ if (unlikely(desc->status)) {
+ u32 status = desc->status;
+
+ if (status & GUC_CTB_STATUS_UNUSED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
+ status &= ~GUC_CTB_STATUS_UNUSED;
+ }
+
+ if (status)
+ goto corrupted;
+ }
GEM_BUG_ON(head > size);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 25b2d7ce6640..55d3ef93e86f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -13,8 +13,163 @@
#include "intel_guc_capture.h"
#include "intel_guc_log.h"
+#if defined(CONFIG_DRM_I915_DEBUG_GUC)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#else
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_2M
+#endif
+
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
+struct guc_log_section {
+ u32 max;
+ u32 flag;
+ u32 default_val;
+ const char *name;
+};
+
+static void _guc_log_init_sizes(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
+ {
+ GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE,
+ "crash dump"
+ },
+ {
+ GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE,
+ "debug",
+ },
+ {
+ GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT,
+ GUC_LOG_CAPTURE_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE,
+ "capture",
+ }
+ };
+ int i;
+
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
+ log->sizes[i].bytes = sections[i].default_val;
+
+ /* If debug size > 1MB then bump default crash size to keep the same units */
+ if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M)
+ log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M;
+
+ /* Prepare the GuC API structure fields: */
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) {
+ /* Convert to correct units */
+ if ((log->sizes[i].bytes % SZ_1M) == 0) {
+ log->sizes[i].units = SZ_1M;
+ log->sizes[i].flag = sections[i].flag;
+ } else {
+ log->sizes[i].units = SZ_4K;
+ log->sizes[i].flag = 0;
+ }
+
+ if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
+ drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
+ sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
+ log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
+
+ if (!log->sizes[i].count) {
+ drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
+ } else {
+ /* Size is +1 unit */
+ log->sizes[i].count--;
+ }
+
+ /* Clip to field size */
+ if (log->sizes[i].count > sections[i].max) {
+ drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
+ sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
+ log->sizes[i].count = sections[i].max;
+ }
+ }
+
+ if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
+ drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units,
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
+ log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0;
+ }
+
+ log->sizes_initialised = true;
+}
+
+static void guc_log_init_sizes(struct intel_guc_log *log)
+{
+ if (log->sizes_initialised)
+ return;
+
+ _guc_log_init_sizes(log);
+}
+
+static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes;
+}
+
+static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes;
+}
+
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes;
+}
+
+static u32 intel_guc_log_size(struct intel_guc_log *log)
+{
+ /*
+ * GuC Log buffer Layout:
+ *
+ * NB: Ordering must follow "enum guc_log_buffer_type".
+ *
+ * +===============================+ 00B
+ * | Debug state header |
+ * +-------------------------------+ 32B
+ * | Crash dump state header |
+ * +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Debug logs |
+ * +===============================+ + DEBUG_SIZE
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
+ */
+ return PAGE_SIZE +
+ intel_guc_log_section_size_crash(log) +
+ intel_guc_log_section_size_debug(log) +
+ intel_guc_log_section_size_capture(log);
+}
+
/**
* DOC: GuC firmware log
*
@@ -139,7 +294,8 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE);
+ relay_reserve(log->relay.channel, log->vma->obj->base.size -
+ intel_guc_log_section_size_capture(log));
/* Switch to the next sub buffer */
relay_flush(log->relay.channel);
@@ -184,15 +340,16 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
return overflow;
}
-unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
- return DEBUG_BUFFER_SIZE;
+ return intel_guc_log_section_size_debug(log);
case GUC_CRASH_DUMP_LOG_BUFFER:
- return CRASH_BUFFER_SIZE;
+ return intel_guc_log_section_size_crash(log);
case GUC_CAPTURE_LOG_BUFFER:
- return CAPTURE_BUFFER_SIZE;
+ return intel_guc_log_section_size_capture(log);
default:
MISSING_CASE(type);
}
@@ -200,7 +357,8 @@ unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
return 0;
}
-size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
{
enum guc_log_buffer_type i;
size_t offset = PAGE_SIZE;/* for the log_buffer_states */
@@ -208,7 +366,7 @@ size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
if (i == type)
break;
- offset += intel_guc_get_log_buffer_size(i);
+ offset += intel_guc_get_log_buffer_size(log, i);
}
return offset;
@@ -259,7 +417,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
- buffer_size = intel_guc_get_log_buffer_size(type);
+ buffer_size = intel_guc_get_log_buffer_size(log, type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
@@ -374,7 +532,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
* Keep the size of sub buffers same as shared log buffer
* but GuC log-events excludes the error-state-capture logs
*/
- subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE;
+ subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log);
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -461,32 +619,7 @@ int intel_guc_log_create(struct intel_guc_log *log)
GEM_BUG_ON(log->vma);
- /*
- * GuC Log buffer Layout
- * (this ordering must follow "enum guc_log_buffer_type" definition)
- *
- * +===============================+ 00B
- * | Debug state header |
- * +-------------------------------+ 32B
- * | Crash dump state header |
- * +-------------------------------+ 64B
- * | Capture state header |
- * +-------------------------------+ 96B
- * | |
- * +===============================+ PAGE_SIZE (4KB)
- * | Debug logs |
- * +===============================+ + DEBUG_SIZE
- * | Crash Dump logs |
- * +===============================+ + CRASH_SIZE
- * | Capture logs |
- * +===============================+ + CAPTURE_SIZE
- */
- if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
- DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
- CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
-
- guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
- CAPTURE_BUFFER_SIZE;
+ guc_log_size = intel_guc_log_size(log);
vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
@@ -749,8 +882,9 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
struct intel_guc *guc = log_to_guc(log);
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
struct drm_i915_gem_object *obj = NULL;
- u32 *map;
- int i = 0;
+ void *map;
+ u32 *page;
+ int i, j;
if (!intel_guc_is_supported(guc))
return -ENODEV;
@@ -763,21 +897,34 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
if (!obj)
return 0;
+ page = (u32 *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ intel_guc_dump_time_info(guc, p);
+
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
DRM_DEBUG("Failed to pin object\n");
drm_puts(p, "(log data unaccessible)\n");
+ free_page((unsigned long)page);
return PTR_ERR(map);
}
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
- drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(map + i), *(map + i + 1),
- *(map + i + 2), *(map + i + 3));
+ for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
+ if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
+ memcpy(page, map + i, PAGE_SIZE);
+
+ for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(page + j + 0), *(page + j + 1),
+ *(page + j + 2), *(page + j + 3));
+ }
drm_puts(p, "\n");
i915_gem_object_unpin_map(obj);
+ free_page((unsigned long)page);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 18007e639be9..02127703be80 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -15,20 +15,6 @@
struct intel_guc;
-#if defined(CONFIG_DRM_I915_DEBUG_GUC)
-#define CRASH_BUFFER_SIZE SZ_2M
-#define DEBUG_BUFFER_SIZE SZ_16M
-#define CAPTURE_BUFFER_SIZE SZ_4M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
-#define CRASH_BUFFER_SIZE SZ_1M
-#define DEBUG_BUFFER_SIZE SZ_2M
-#define CAPTURE_BUFFER_SIZE SZ_1M
-#else
-#define CRASH_BUFFER_SIZE SZ_8K
-#define DEBUG_BUFFER_SIZE SZ_64K
-#define CAPTURE_BUFFER_SIZE SZ_16K
-#endif
-
/*
* While we're using plain log level in i915, GuC controls are much more...
* "elaborate"? We have a couple of bits for verbosity, separate bit for actual
@@ -46,10 +32,30 @@ struct intel_guc;
#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2)
#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
+enum {
+ GUC_LOG_SECTIONS_CRASH,
+ GUC_LOG_SECTIONS_DEBUG,
+ GUC_LOG_SECTIONS_CAPTURE,
+ GUC_LOG_SECTIONS_LIMIT
+};
+
struct intel_guc_log {
u32 level;
+
+ /* Allocation settings */
+ struct {
+ s32 bytes; /* Size in bytes */
+ s32 units; /* GuC API units - 1MB or 4KB */
+ s32 count; /* Number of API units */
+ u32 flag; /* GuC API units flag */
+ } sizes[GUC_LOG_SECTIONS_LIMIT];
+ bool sizes_initialised;
+
+ /* Combined buffer allocation */
struct i915_vma *vma;
void *buf_addr;
+
+ /* RelayFS support */
struct {
bool buf_in_use;
bool started;
@@ -58,6 +64,7 @@ struct intel_guc_log {
struct mutex lock;
u32 full_count;
} relay;
+
/* logging related stats */
struct {
u32 sampled_overflow;
@@ -69,8 +76,9 @@ struct intel_guc_log {
void intel_guc_log_init_early(struct intel_guc_log *log);
bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type,
unsigned int full_cnt);
-unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type);
-size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type);
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type);
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log, enum guc_log_buffer_type type);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
@@ -92,4 +100,6 @@ void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
bool dump_load_err);
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 8dc063f087eb..a7092f711e9c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -102,6 +102,10 @@
#define GUC_SEND_TRIGGER (1<<0)
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
+#define GEN12_GUC_SEM_INTR_ENABLES _MMIO(0xc71c)
+#define GUC_SEM_INTR_ROUTE_TO_GUC BIT(31)
+#define GUC_SEM_INTR_ENABLE_ALL (0xff)
+
#define GUC_NUM_DOORBELLS 256
/* format of the HW-monitored doorbell cacheline */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ec9c4ca0f615..fdd895f73f9f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -137,17 +137,6 @@ static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
return ret > 0 ? -EPROTO : ret;
}
-static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
-{
- u32 request[] = {
- GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
- id,
- };
-
- return intel_guc_send(guc, request, ARRAY_SIZE(request));
-}
-
static bool slpc_is_running(struct intel_guc_slpc *slpc)
{
return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
@@ -201,16 +190,6 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
return ret;
}
-static int slpc_unset_param(struct intel_guc_slpc *slpc,
- u8 id)
-{
- struct intel_guc *guc = slpc_to_guc(slpc);
-
- GEM_BUG_ON(id >= SLPC_MAX_PARAM);
-
- return guc_action_slpc_unset_param(guc, id);
-}
-
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -488,23 +467,33 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
/* Need a lock now since waitboost can be modifying min as well */
mutex_lock(&slpc->lock);
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-
- ret = slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- val);
-
- /* Return standardized err code for sysfs calls */
- if (ret)
- ret = -EIO;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ /* Ignore efficient freq if lower min freq is requested */
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val < slpc->rp1_freq);
+ if (ret) {
+ i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
+ ERR_PTR(ret));
+ goto out;
}
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ val);
+
if (!ret)
slpc->min_freq_softlimit = val;
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
+ /* Return standardized err code for sysfs calls */
+ if (ret)
+ ret = -EIO;
+
return ret;
}
@@ -575,45 +564,28 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
* unless they have deviated from defaults, in which case,
* we retain the values and set min/max accordingly.
*/
- if (!slpc->max_freq_softlimit)
+ if (!slpc->max_freq_softlimit) {
slpc->max_freq_softlimit = slpc->rp0_freq;
- else if (slpc->max_freq_softlimit != slpc->rp0_freq)
+ slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
+ } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
ret = intel_guc_slpc_set_max_freq(slpc,
slpc->max_freq_softlimit);
+ }
if (unlikely(ret))
return ret;
- if (!slpc->min_freq_softlimit)
- slpc->min_freq_softlimit = slpc->min_freq;
- else if (slpc->min_freq_softlimit != slpc->min_freq)
+ if (!slpc->min_freq_softlimit) {
+ ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
+ if (unlikely(ret))
+ return ret;
+ slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
+ } else if (slpc->min_freq_softlimit != slpc->min_freq) {
return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
-
- return 0;
-}
-
-static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
-{
- int ret = 0;
-
- if (ignore) {
- ret = slpc_set_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
- ignore);
- if (!ret)
- return slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- slpc->min_freq);
- } else {
- ret = slpc_unset_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
- if (!ret)
- return slpc_unset_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
}
- return ret;
+ return 0;
}
static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
@@ -675,14 +647,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
slpc_get_rp_values(slpc);
- /* Ignore efficient freq and set min to platform min */
- ret = slpc_ignore_eff_freq(slpc, true);
- if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
- ERR_PTR(ret));
- return ret;
- }
-
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 76916aed897a..22ba66e48a9b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
- cancel_delayed_work(&guc->timestamp.work);
+ /*
+ * There is a race with suspend flow where the worker runs after suspend
+ * and causes an unclaimed register access warning. Cancel the worker
+ * synchronously here.
+ */
+ cancel_delayed_work_sync(&guc->timestamp.work);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1532,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
__reset_guc_busyness_stats(guc);
/* Flush IRQ handler */
- spin_lock_irq(&guc_to_gt(guc)->irq_lock);
- spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
+ spin_lock_irq(guc_to_gt(guc)->irq_lock);
+ spin_unlock_irq(guc_to_gt(guc)->irq_lock);
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
@@ -1868,7 +1873,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (guc->submission_initialized)
return 0;
- if (guc->fw.major_ver_found < 70) {
+ if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
ret = guc_lrc_desc_pool_create_v69(guc);
if (ret)
return ret;
@@ -2303,7 +2308,7 @@ static int register_context(struct intel_context *ce, bool loop)
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- if (guc->fw.major_ver_found >= 70)
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
ret = register_context_v70(guc, ce, loop);
else
ret = register_context_v69(guc, ce, loop);
@@ -2315,7 +2320,7 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (guc->fw.major_ver_found >= 70)
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
guc_context_policy_init_v70(ce, loop);
}
@@ -2420,7 +2425,6 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
- bool missing = false;
unsigned long flags;
int ret;
@@ -2438,32 +2442,9 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
ret = __guc_context_set_context_policies(guc, &policy, loop);
- missing = ret != 0;
-
- if (!missing && intel_context_is_parent(ce)) {
- struct intel_context *child;
-
- for_each_child(ce, child) {
- __guc_context_policy_start_klv(&policy, child->guc_id.id);
-
- if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
- __guc_context_policy_add_preempt_to_idle(&policy, 1);
-
- child->guc_state.prio = ce->guc_state.prio;
- __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
- __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
- __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
-
- ret = __guc_context_set_context_policies(guc, &policy, loop);
- if (ret) {
- missing = true;
- break;
- }
- }
- }
spin_lock_irqsave(&ce->guc_state.lock, flags);
- if (missing)
+ if (ret != 0)
set_context_policy_required(ce);
else
clr_context_policy_required(ce);
@@ -2945,7 +2926,7 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- if (guc->fw.major_ver_found >= 70) {
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, guc_id);
@@ -3210,7 +3191,7 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- if (guc->fw.major_ver_found >= 70) {
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
@@ -4027,6 +4008,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
xa_destroy(&guc->context_lookup);
/*
+ * A reset might have occurred while we had a pending stalled request,
+ * so make sure we clean that up.
+ */
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+
+ /*
* Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping.
* Also, after a reset the of the GuC we want to make sure that the
@@ -4191,13 +4179,27 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
void intel_guc_submission_enable(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /* Enable and route to GuC */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
+ GUC_SEM_INTR_ROUTE_TO_GUC |
+ GUC_SEM_INTR_ENABLE_ALL);
+
guc_init_lrc_mapping(guc);
guc_init_engine_stats(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
/* Note: By the time we're here, GuC may have already been reset */
+
+ /* Disable and route to host */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -5163,4 +5165,5 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_guc.c"
#include "selftest_guc_multi_lrc.c"
+#include "selftest_guc_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index f2e7c82985ef..dbd048b77e19 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
intel_guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
@@ -435,9 +435,11 @@ static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- drm_info(&i915->drm, "%s firmware %s version %u.%u\n",
- intel_uc_fw_type_repr(fw->type), fw->path,
- fw->major_ver_found, fw->minor_ver_found);
+ drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
+ fw->file_selected.major_ver,
+ fw->file_selected.minor_ver,
+ fw->file_selected.patch_ver);
}
static int __uc_init_hw(struct intel_uc *uc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 56a0d80f88ba..b91ad4aede1f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -41,7 +41,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
"%s firmware -> %s\n",
intel_uc_fw_type_repr(uc_fw->type),
status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->path : intel_uc_fw_status_repr(status));
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -51,84 +51,153 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
*
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
* firmware as TGL.
+ *
+ * Version numbers:
+ * Originally, the driver required an exact match major/minor/patch furmware
+ * file and only supported that one version for any given platform. However,
+ * the new direction from upstream is to be backwards compatible with all
+ * prior releases and to be as flexible as possible as to what firmware is
+ * loaded.
+ *
+ * For GuC, the major version number signifies a backwards breaking API change.
+ * So, new format GuC firmware files are labelled by their major version only.
+ * For HuC, there is no KMD interaction, hence no version matching requirement.
+ * So, new format HuC firmware files have no version number at all.
+ *
+ * All of which means that the table below must keep all old format files with
+ * full three point version number. But newer files have reduced requirements.
+ * Having said that, the driver still needs to track the minor version number
+ * for GuC at least. As it is useful to report to the user that they are not
+ * running with a recent enough version for all KMD supported features,
+ * security fixes, etc. to be enabled.
+ */
+#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
+ fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
+ fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \
+ fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
+ fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
+ fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
+ fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
+ fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
+
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
+ fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
+ fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
+ fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_raw(dg1)) \
+ fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
+ fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
+
+/*
+ * Set of macros for producing a list of filenames from the above table.
*/
-#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \
- fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
-
-#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3))
-
-#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
- fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \
- fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \
- fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \
- fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \
- fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \
- fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \
- fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0))
-
-#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
+#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
+ "i915/" \
+ __stringify(prefix_) name_ ".bin"
+
+#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) ".bin"
+
+#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) name_ \
__stringify(major_) "." \
__stringify(minor_) "." \
__stringify(patch_) ".bin"
-#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
+/* Minor for internal driver use, not part of file name */
+#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
+ __MAKE_UC_FW_PATH_MAJOR(prefix_, "_guc_", major_)
+
+#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_guc_", major_, minor_, patch_)
-#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
- __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
+#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
-/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
+
+/*
+ * All blobs need to be declared via MODULE_FIRMWARE().
+ * This first expansion of the table macros is solely to provide
+ * that declaration.
+ */
#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
MODULE_FIRMWARE(uc_);
-INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
-INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
-INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP)
-/* The below structs and macros are used to iterate across the list of blobs */
+/*
+ * The next expansion of the table macros (in __uc_fw_auto_select below) provides
+ * actual data structures with both the filename and the version information.
+ * These structure arrays are then iterated over to the list of suitable files
+ * for the current platform and to then attempt to load those files, in the order
+ * listed, until one is successfully found.
+ */
struct __packed uc_fw_blob {
+ const char *path;
+ bool legacy;
u8 major;
u8 minor;
- const char *path;
+ u8 patch;
};
-#define UC_FW_BLOB(major_, minor_, path_) \
- { .major = major_, .minor = minor_, .path = path_ }
+#define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .major = major_, \
+ .minor = minor_, \
+ .patch = patch_, \
+ .path = path_,
+
+#define UC_FW_BLOB_NEW(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = false }
-#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
- UC_FW_BLOB(major_, minor_, \
- MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
+#define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = true }
-#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
- UC_FW_BLOB(major_, minor_, \
- MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
+#define GUC_FW_BLOB(prefix_, major_, minor_) \
+ UC_FW_BLOB_NEW(major_, minor_, 0, \
+ MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_))
+
+#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_) \
+ UC_FW_BLOB_NEW(0, 0, 0, MAKE_HUC_FW_PATH_BLANK(prefix_))
+
+#define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
@@ -152,23 +221,22 @@ static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
static const struct uc_fw_platform_requirement blobs_guc[] = {
- INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
- };
- static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
- INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
};
static const struct uc_fw_platform_requirement blobs_huc[] = {
- INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP)
};
static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
+ static bool verified;
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
+ bool found;
/*
* The only difference between the ADL GuC FWs is the HWConfig support.
@@ -183,50 +251,102 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
fw_blobs = blobs_all[uc_fw->type].blobs;
fw_count = blobs_all[uc_fw->type].count;
+ found = false;
for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
- if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
- const struct uc_fw_blob *blob = &fw_blobs[i].blob;
- uc_fw->path = blob->path;
- uc_fw->wanted_path = blob->path;
- uc_fw->major_ver_wanted = blob->major;
- uc_fw->minor_ver_wanted = blob->minor;
- break;
- }
- }
+ const struct uc_fw_blob *blob = &fw_blobs[i].blob;
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
- const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
- u32 count = ARRAY_SIZE(blobs_guc_fallback);
+ if (p != fw_blobs[i].p)
+ continue;
- for (i = 0; i < count && p <= blobs[i].p; i++) {
- if (p == blobs[i].p && rev >= blobs[i].rev) {
- const struct uc_fw_blob *blob = &blobs[i].blob;
+ if (rev < fw_blobs[i].rev)
+ continue;
- uc_fw->fallback.path = blob->path;
- uc_fw->fallback.major_ver = blob->major;
- uc_fw->fallback.minor_ver = blob->minor;
- break;
- }
+ if (uc_fw->file_selected.path) {
+ if (uc_fw->file_selected.path == blob->path)
+ uc_fw->file_selected.path = NULL;
+
+ continue;
}
+
+ uc_fw->file_selected.path = blob->path;
+ uc_fw->file_wanted.path = blob->path;
+ uc_fw->file_wanted.major_ver = blob->major;
+ uc_fw->file_wanted.minor_ver = blob->minor;
+ found = true;
+ break;
+ }
+
+ if (!found && uc_fw->file_selected.path) {
+ /* Failed to find a match for the last attempt?! */
+ uc_fw->file_selected.path = NULL;
}
/* make sure the list is ordered as expected */
- if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
+ verified = true;
+
for (i = 1; i < fw_count; i++) {
+ /* Next platform is good: */
if (fw_blobs[i].p < fw_blobs[i - 1].p)
continue;
+ /* Next platform revision is good: */
if (fw_blobs[i].p == fw_blobs[i - 1].p &&
fw_blobs[i].rev < fw_blobs[i - 1].rev)
continue;
- pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
- intel_platform_name(fw_blobs[i - 1].p),
- fw_blobs[i - 1].rev,
- intel_platform_name(fw_blobs[i].p),
- fw_blobs[i].rev);
+ /* Platform/revision must be in order: */
+ if (fw_blobs[i].p != fw_blobs[i - 1].p ||
+ fw_blobs[i].rev != fw_blobs[i - 1].rev)
+ goto bad;
+
+ /* Next major version is good: */
+ if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
+ continue;
+
+ /* New must be before legacy: */
+ if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
+ goto bad;
+
+ /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
+ if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
+ if (!fw_blobs[i - 1].blob.major)
+ continue;
+
+ if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
+ continue;
+ }
+
+ /* Major versions must be in order: */
+ if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
+ goto bad;
+
+ /* Next minor version is good: */
+ if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
+ continue;
+
+ /* Minor versions must be in order: */
+ if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
+ goto bad;
+
+ /* Patch versions must be in order: */
+ if (fw_blobs[i].blob.patch <= fw_blobs[i - 1].blob.patch)
+ continue;
- uc_fw->path = NULL;
+bad:
+ drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
+ fw_blobs[i - 1].blob.legacy ? "L" : "v",
+ fw_blobs[i - 1].blob.major,
+ fw_blobs[i - 1].blob.minor,
+ fw_blobs[i - 1].blob.patch,
+ intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
+ fw_blobs[i].blob.legacy ? "L" : "v",
+ fw_blobs[i].blob.major,
+ fw_blobs[i].blob.minor,
+ fw_blobs[i].blob.patch);
+
+ uc_fw->file_selected.path = NULL;
}
}
}
@@ -259,7 +379,7 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
}
if (unlikely(path)) {
- uc_fw->path = path;
+ uc_fw->file_selected.path = path;
uc_fw->user_overridden = true;
}
}
@@ -283,7 +403,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
*/
BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
GEM_BUG_ON(uc_fw->status);
- GEM_BUG_ON(uc_fw->path);
+ GEM_BUG_ON(uc_fw->file_selected.path);
uc_fw->type = type;
@@ -292,7 +412,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
__uc_fw_user_override(i915, uc_fw);
}
- intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+ intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
INTEL_UC_FIRMWARE_SELECTED :
INTEL_UC_FIRMWARE_DISABLED :
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
@@ -305,32 +425,32 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
- uc_fw->path = "<invalid>";
+ uc_fw->file_selected.path = "<invalid>";
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
- uc_fw->major_ver_wanted += 1;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver += 1;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
- uc_fw->minor_ver_wanted += 1;
+ uc_fw->file_wanted.minor_ver += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->major_ver_wanted &&
+ } else if (uc_fw->file_wanted.major_ver &&
i915_inject_probe_error(i915, e)) {
/* require prev major version */
- uc_fw->major_ver_wanted -= 1;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver -= 1;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->minor_ver_wanted &&
+ } else if (uc_fw->file_wanted.minor_ver &&
i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
- uc_fw->minor_ver_wanted -= 1;
+ uc_fw->file_wanted.minor_ver -= 1;
uc_fw->user_overridden = user;
} else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
- uc_fw->major_ver_wanted = 0;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver = 0;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = true;
}
}
@@ -339,10 +459,12 @@ static int check_gsc_manifest(const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
u32 *dw = (u32 *)fw->data;
- u32 version = dw[HUC_GSC_VERSION_DW];
+ u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
+ u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
- uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
- uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
+ uc_fw->file_selected.major_ver = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.minor_ver = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.patch_ver = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
return 0;
}
@@ -357,7 +479,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -370,7 +492,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -385,7 +507,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, size);
return -ENOEXEC;
}
@@ -394,16 +516,18 @@ static int check_ccs_header(struct drm_i915_private *i915,
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
size, (size_t)i915->wopcm.size);
return -E2BIG;
}
/* Get version numbers from the CSS header */
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->sw_version);
+ uc_fw->file_selected.major_ver = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->file_selected.minor_ver = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
+ uc_fw->file_selected.patch_ver = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
+ css->sw_version);
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
@@ -422,9 +546,11 @@ static int check_ccs_header(struct drm_i915_private *i915,
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct intel_uc_fw_file file_ideal;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
+ bool old_ver = false;
int err;
GEM_BUG_ON(!i915->wopcm.size);
@@ -437,24 +563,32 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = firmware_request_nowarn(&fw, uc_fw->path, dev);
- if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
- err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
- if (!err) {
- drm_notice(&i915->drm,
- "%s firmware %s is recommended, but only %s was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->wanted_path,
- uc_fw->fallback.path);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
-
- uc_fw->path = uc_fw->fallback.path;
- uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
- uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
+
+ /* Any error is terminal if overriding. Don't bother searching for older versions */
+ if (err && intel_uc_fw_is_overridden(uc_fw))
+ goto fail;
+
+ while (err == -ENOENT) {
+ old_ver = true;
+
+ __uc_fw_auto_select(i915, uc_fw);
+ if (!uc_fw->file_selected.path) {
+ /*
+ * No more options! But set the path back to something
+ * valid just in case it gets dereferenced.
+ */
+ uc_fw->file_selected.path = file_ideal.path;
+
+ /* Also, preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+ break;
}
+
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
}
+
if (err)
goto fail;
@@ -465,18 +599,39 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (err)
goto fail;
- if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- if (!intel_uc_fw_is_overridden(uc_fw)) {
- err = -ENOEXEC;
- goto fail;
+ if (uc_fw->file_wanted.major_ver) {
+ /* Check the file's major version was as it claimed */
+ if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) {
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
+ } else {
+ if (uc_fw->file_selected.minor_ver < uc_fw->file_wanted.minor_ver)
+ old_ver = true;
}
}
+ if (old_ver) {
+ /* Preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+
+ drm_notice(&i915->drm,
+ "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+ }
+
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
if (!IS_ERR(obj))
@@ -503,7 +658,7 @@ fail:
INTEL_UC_FIRMWARE_ERROR);
i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
@@ -645,7 +800,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
fail:
i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
err);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
@@ -863,19 +1018,34 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
*/
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
+ u32 ver_sel, ver_want;
+
drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
- if (uc_fw->fallback.path) {
- drm_printf(p, "%s firmware fallback: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
- drm_printf(p, "fallback selected: %s\n",
- str_yes_no(uc_fw->path == uc_fw->fallback.path));
- }
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
+ if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
+ drm_printf(p, "%s firmware wanted: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
- drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ ver_sel = MAKE_UC_VER(uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ ver_want = MAKE_UC_VER(uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver);
+ if (ver_sel < ver_want)
+ drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
+ uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver,
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ else
+ drm_printf(p, "\tversion: found %u.%u.%u\n",
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 7aa2644400b9..cb586f7df270 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -65,6 +65,18 @@ enum intel_uc_fw_type {
#define INTEL_UC_FW_NUM_TYPES 2
/*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+struct intel_uc_fw_file {
+ const char *path;
+ u16 major_ver;
+ u16 minor_ver;
+ u16 patch_ver;
+};
+
+/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the uC.
*/
@@ -74,11 +86,12 @@ struct intel_uc_fw {
const enum intel_uc_fw_status status;
enum intel_uc_fw_status __status; /* no accidental overwrites */
};
- const char *wanted_path;
- const char *path;
+ struct intel_uc_fw_file file_wanted;
+ struct intel_uc_fw_file file_selected;
bool user_overridden;
size_t size;
struct drm_i915_gem_object *obj;
+
/**
* @dummy: A vma used in binding the uc fw to ggtt. We can't define this
* vma on the stack as it can lead to a stack overflow, so we define it
@@ -89,30 +102,18 @@ struct intel_uc_fw {
struct i915_vma_resource dummy;
struct i915_vma *rsa_data;
- /*
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- */
- u16 major_ver_wanted;
- u16 minor_ver_wanted;
- u16 major_ver_found;
- u16 minor_ver_found;
-
- struct {
- const char *path;
- u16 major_ver;
- u16 minor_ver;
- } fallback;
-
u32 rsa_size;
u32 ucode_size;
-
u32 private_data_size;
bool loaded_via_gsc;
};
+#define MAKE_UC_VER(maj, min, pat) ((pat) | ((min) << 8) | ((maj) << 16))
+#define GET_UC_VER(uc) (MAKE_UC_VER((uc)->fw.file_selected.major_ver, \
+ (uc)->fw.file_selected.minor_ver, \
+ (uc)->fw.file_selected.patch_ver))
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index b05e0e35b734..7a411178bdbf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -83,8 +83,10 @@ struct uc_css_header {
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
-#define HUC_GSC_VERSION_DW 44
-#define HUC_GSC_MAJOR_VER_MASK (0xFF << 0)
-#define HUC_GSC_MINOR_VER_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_HI_DW 44
+#define HUC_GSC_MAJOR_VER_HI_MASK (0xFF << 0)
+#define HUC_GSC_MINOR_VER_HI_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_LO_DW 45
+#define HUC_GSC_PATCH_VER_LO_MASK (0xFF << 0)
#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 1df71d0796ae..e28518fe8b90 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -54,6 +54,9 @@ static int intel_guc_scrub_ctbs(void *arg)
struct intel_engine_cs *engine;
struct intel_context *ce;
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
@@ -62,7 +65,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- pr_err("Failed to create context, %d: %d\n", i, ret);
+ drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
goto err;
}
@@ -83,7 +86,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed to create request, %d: %d\n", i, ret);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
goto err;
}
@@ -93,7 +96,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- pr_err("Last request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
goto err;
}
i915_request_put(last[i]);
@@ -110,7 +113,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
goto err;
}
@@ -150,7 +153,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- pr_err("Context array allocation failed\n");
+ drm_err(&gt->i915->drm, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -164,24 +167,24 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
ce[context_index] = NULL;
- pr_err("Failed to create context: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- pr_err("Failed to create spinner: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- pr_err("Failed to create spinner request: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- pr_err("Failed to add Spinner request: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
goto err_spin_rq;
}
@@ -191,7 +194,7 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index--]);
ce[context_index] = NULL;
- pr_err("Failed to create context: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_spin_rq;
}
@@ -200,8 +203,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- pr_err("Failed to create request, %d: %d\n",
- context_index, ret);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
+ context_index, ret);
goto err_spin_rq;
}
} else {
@@ -215,7 +218,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- pr_err("Spin request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
i915_request_put(last);
goto err_spin_rq;
}
@@ -227,7 +230,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- pr_err("Last request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
goto err_spin_rq;
}
@@ -235,7 +238,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret);
+ drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
goto err_spin_rq;
}
@@ -243,21 +246,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- pr_err("Request with stolen guc_id failed to complete: %d\n",
- ret);
+ drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- pr_err("No guc_id was stolen");
+ drm_err(&gt->i915->drm, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
new file mode 100644
index 000000000000..01f8cd3c3134
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+#define BEAT_INTERVAL 100
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_hang_guc(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ unsigned int reset_count;
+ u32 guc_status;
+ u32 old_beat;
+
+ ctx = kernel_context(gt->i915, NULL);
+ if (IS_ERR(ctx)) {
+ drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ return PTR_ERR(ctx);
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(gt->engine[BCS0]);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err;
+ }
+
+ engine = ce->engine;
+ reset_count = i915_reset_count(global);
+
+ old_beat = engine->props.heartbeat_interval_ms;
+ ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ goto err;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ goto err_spin;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ if (!(guc_status & GS_MIA_IN_RESET)) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ ret = -EIO;
+ goto err_spin;
+ }
+
+ /* Wait for the heartbeat to cause a reset */
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ goto err_spin;
+ }
+
+ if (i915_reset_count(global) == reset_count) {
+ drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ ret = -EINVAL;
+ goto err_spin;
+ }
+
+err_spin:
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ intel_engine_set_heartbeat(engine, old_beat);
+
+ if (ret == 0) {
+ rq = nop_request(engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ goto err;
+ }
+ }
+
+err:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kernel_context_close(ctx);
+
+ return ret;
+}
+
+int intel_guc_hang_check(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_hang_guc),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index 812220a43df8..d17982c36d25 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -115,30 +115,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- pr_err("Failed creating contexts: %ld", PTR_ERR(parent));
+ drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
return PTR_ERR(parent);
} else if (!parent) {
- pr_debug("Not enough engines in class: %d", class);
+ drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed creating requests: %d", ret);
+ drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- pr_err("Failed waiting on request: %d", ret);
+ drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
}
out:
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 557f3314291a..3b81a6d35a7b 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -298,7 +298,7 @@ no_enough_resource:
}
/**
- * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
@@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
}
/**
- * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index dad3a6054335..eef3bba8a41b 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "gvt.h"
+#include "intel_pci_config.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
@@ -353,9 +354,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(pdev, 0);
+ pci_resource_len(pdev, GTTMMADR_BAR);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(pdev, 2);
+ pci_resource_len(pdev, GTT_APERTURE_BAR);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index a30ba2d7b7ba..1b509c1a1e33 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -32,9 +32,10 @@
*
*/
+#include "display/intel_gmbus_regs.h"
+#include "gvt.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "gvt.h"
#define GMBUS1_TOTAL_BYTES_SHIFT 16
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b4f69364f9a1..ce0eb03709c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
- * settting the shadow entry to point to a scratch page
+ * setting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index beea5895e499..daac2050d77d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -498,7 +498,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
+ refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -529,7 +529,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
+ int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {0};
@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
@@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_t_default_mmio_write - default MMIO write handler
+ * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c85bafe7539e..1c6e941c9666 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
}
/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @engine: the engine
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 94e5c29d2ee3..ae987e92251d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
- intel_device_info_print_static(INTEL_INFO(i915), &p);
- intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
@@ -188,47 +187,47 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
stringify_page_sizes(vma->resource->page_sizes_gtt,
NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
- switch (vma->ggtt_view.type) {
- case I915_GGTT_VIEW_NORMAL:
+ switch (vma->gtt_view.type) {
+ case I915_GTT_VIEW_NORMAL:
seq_puts(m, ", normal");
break;
- case I915_GGTT_VIEW_PARTIAL:
+ case I915_GTT_VIEW_PARTIAL:
seq_printf(m, ", partial [%08llx+%x]",
- vma->ggtt_view.partial.offset << PAGE_SHIFT,
- vma->ggtt_view.partial.size << PAGE_SHIFT);
+ vma->gtt_view.partial.offset << PAGE_SHIFT,
+ vma->gtt_view.partial.size << PAGE_SHIFT);
break;
- case I915_GGTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_ROTATED:
seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
- vma->ggtt_view.rotated.plane[0].width,
- vma->ggtt_view.rotated.plane[0].height,
- vma->ggtt_view.rotated.plane[0].src_stride,
- vma->ggtt_view.rotated.plane[0].dst_stride,
- vma->ggtt_view.rotated.plane[0].offset,
- vma->ggtt_view.rotated.plane[1].width,
- vma->ggtt_view.rotated.plane[1].height,
- vma->ggtt_view.rotated.plane[1].src_stride,
- vma->ggtt_view.rotated.plane[1].dst_stride,
- vma->ggtt_view.rotated.plane[1].offset);
+ vma->gtt_view.rotated.plane[0].width,
+ vma->gtt_view.rotated.plane[0].height,
+ vma->gtt_view.rotated.plane[0].src_stride,
+ vma->gtt_view.rotated.plane[0].dst_stride,
+ vma->gtt_view.rotated.plane[0].offset,
+ vma->gtt_view.rotated.plane[1].width,
+ vma->gtt_view.rotated.plane[1].height,
+ vma->gtt_view.rotated.plane[1].src_stride,
+ vma->gtt_view.rotated.plane[1].dst_stride,
+ vma->gtt_view.rotated.plane[1].offset);
break;
- case I915_GGTT_VIEW_REMAPPED:
+ case I915_GTT_VIEW_REMAPPED:
seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
- vma->ggtt_view.remapped.plane[0].width,
- vma->ggtt_view.remapped.plane[0].height,
- vma->ggtt_view.remapped.plane[0].src_stride,
- vma->ggtt_view.remapped.plane[0].dst_stride,
- vma->ggtt_view.remapped.plane[0].offset,
- vma->ggtt_view.remapped.plane[1].width,
- vma->ggtt_view.remapped.plane[1].height,
- vma->ggtt_view.remapped.plane[1].src_stride,
- vma->ggtt_view.remapped.plane[1].dst_stride,
- vma->ggtt_view.remapped.plane[1].offset);
+ vma->gtt_view.remapped.plane[0].width,
+ vma->gtt_view.remapped.plane[0].height,
+ vma->gtt_view.remapped.plane[0].src_stride,
+ vma->gtt_view.remapped.plane[0].dst_stride,
+ vma->gtt_view.remapped.plane[0].offset,
+ vma->gtt_view.remapped.plane[1].width,
+ vma->gtt_view.remapped.plane[1].height,
+ vma->gtt_view.remapped.plane[1].src_stride,
+ vma->gtt_view.remapped.plane[1].dst_stride,
+ vma->gtt_view.remapped.plane[1].offset);
break;
default:
- MISSING_CASE(vma->ggtt_view.type);
+ MISSING_CASE(vma->gtt_view.type);
break;
}
}
@@ -411,7 +410,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
@@ -493,7 +492,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- str_enabled_disabled(!dev_priv->power_domains.init_wakeref));
+ str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index deb8a8b76965..c459eb362c47 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -61,6 +61,7 @@
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
+#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_create.h"
@@ -105,6 +106,12 @@ static const char irst_name[] = "INT3392";
static const struct drm_driver i915_drm_driver;
+static void i915_release_bridge_dev(struct drm_device *dev,
+ void *bridge)
+{
+ pci_dev_put(bridge);
+}
+
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
@@ -115,7 +122,9 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "bridge device not found\n");
return -EIO;
}
- return 0;
+
+ return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
+ dev_priv->bridge_dev);
}
/* Allocate space for the MCH regs if needed, return nonzero on error */
@@ -252,8 +261,8 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->wq == NULL)
goto out_err;
- dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->hotplug.dp_wq == NULL)
+ dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->display.hotplug.dp_wq == NULL)
goto out_free_wq;
return 0;
@@ -268,7 +277,7 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
- destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -302,8 +311,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(to_gt(i915), ALL_ENGINES);
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ __intel_gt_reset(gt, ALL_ENGINES);
+ }
}
/**
@@ -326,19 +340,19 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
- intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
+ intel_uncore_mmio_debug_init_early(dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->backlight_lock);
+ mutex_init(&dev_priv->display.backlight.lock);
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->audio.mutex);
- mutex_init(&dev_priv->wm.wm_mutex);
- mutex_init(&dev_priv->pps_mutex);
- mutex_init(&dev_priv->hdcp_comp_mutex);
+ mutex_init(&dev_priv->display.audio.mutex);
+ mutex_init(&dev_priv->display.wm.wm_mutex);
+ mutex_init(&dev_priv->display.pps.mutex);
+ mutex_init(&dev_priv->display.hdcp.comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -357,7 +371,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- intel_root_gt_init_early(dev_priv);
+ ret = intel_root_gt_init_early(dev_priv);
+ if (ret < 0)
+ goto err_rootgt;
i915_drm_clients_init(&dev_priv->clients, dev_priv);
@@ -382,6 +398,7 @@ err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release_all(dev_priv);
i915_drm_clients_fini(&dev_priv->clients);
+err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -423,7 +440,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
@@ -432,17 +450,27 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_init_mmio(&dev_priv->uncore);
- if (ret)
- return ret;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_uncore_init_mmio(gt->uncore);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(&dev_priv->drm,
+ intel_uncore_fini_mmio,
+ gt->uncore);
+ if (ret)
+ return ret;
+ }
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
intel_device_info_runtime_init(dev_priv);
- ret = intel_gt_init_mmio(to_gt(dev_priv));
- if (ret)
- goto err_uncore;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_gt_init_mmio(gt);
+ if (ret)
+ goto err_uncore;
+ }
/* As early as possible, scrub existing GPU state before clobbering */
sanitize_gpu(dev_priv);
@@ -451,8 +479,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
- intel_uncore_fini_mmio(&dev_priv->uncore);
- pci_dev_put(dev_priv->bridge_dev);
return ret;
}
@@ -464,8 +490,6 @@ err_uncore:
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
- intel_uncore_fini_mmio(&dev_priv->uncore);
- pci_dev_put(dev_priv->bridge_dev);
}
/**
@@ -715,6 +739,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct intel_gt *gt;
+ unsigned int i;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -734,7 +760,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
- intel_gt_driver_register(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_register(gt);
intel_display_driver_register(dev_priv);
@@ -753,6 +780,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt;
+ unsigned int i;
+
i915_switcheroo_unregister(dev_priv);
intel_unregister_dsm_handler();
@@ -762,7 +792,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
- intel_gt_driver_unregister(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_unregister(gt);
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -784,6 +815,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
struct drm_printer p = drm_debug_printer("i915 device info:");
+ struct intel_gt *gt;
+ unsigned int i;
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
@@ -793,10 +826,11 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
INTEL_INFO(dev_priv)->platform),
GRAPHICS_VER(dev_priv));
- intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
- intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_device_info_print(INTEL_INFO(dev_priv),
+ RUNTIME_INFO(dev_priv), &p);
i915_print_iommu_status(dev_priv, &p);
- intel_gt_info_print(&to_gt(dev_priv)->info, &p);
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_info_print(&gt->info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -814,6 +848,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
+ struct intel_runtime_info *runtime;
struct drm_i915_private *i915;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
@@ -829,7 +864,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
- RUNTIME_INFO(i915)->device_id = pdev->device;
+
+ /* Initialize initial runtime info from static const data and pdev. */
+ runtime = RUNTIME_INFO(i915);
+ memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
+ runtime->device_id = pdev->device;
return i915;
}
@@ -948,7 +987,9 @@ out_fini:
void i915_driver_remove(struct drm_i915_private *i915)
{
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
i915_driver_unregister(i915);
@@ -972,18 +1013,19 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_hw_remove(i915);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t wakeref;
if (!dev_priv->do_release)
return;
- disable_rpm_wakeref_asserts(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
i915_gem_driver_release(dev_priv);
@@ -994,7 +1036,8 @@ static void i915_driver_release(struct drm_device *dev)
i915_driver_mmio_release(dev_priv);
- enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_put(rpm, wakeref);
+
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
@@ -1206,13 +1249,15 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
disable_rpm_wakeref_asserts(rpm);
i915_gem_suspend_late(dev_priv);
- intel_uncore_suspend(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@@ -1344,7 +1389,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -1398,9 +1444,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
drm_err(&dev_priv->drm,
"Resume prepare failed: %d, continuing anyway\n", ret);
- intel_uncore_resume_early(&dev_priv->uncore);
-
- intel_gt_check_and_clear_faults(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i) {
+ intel_uncore_resume_early(gt->uncore);
+ intel_gt_check_and_clear_faults(gt);
+ }
intel_display_power_resume_early(dev_priv);
@@ -1580,7 +1627,8 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -1595,11 +1643,13 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_gt_runtime_suspend(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_suspend(gt);
intel_runtime_pm_disable_interrupts(dev_priv);
- intel_uncore_suspend(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
intel_display_power_suspend(dev_priv);
@@ -1663,7 +1713,8 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -1683,7 +1734,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
- intel_uncore_runtime_resume(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_runtime_resume(gt->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1691,7 +1743,8 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- intel_gt_runtime_resume(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_resume(gt);
/*
* On VLV/CHV display interrupts are part of the display
@@ -1703,7 +1756,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_hpd_poll_disable(dev_priv);
}
- intel_enable_ipc(dev_priv);
+ skl_watermark_ipc_update(dev_priv);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d25647be25d1..bdc81db76dbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,20 +34,10 @@
#include <linux/pm_qos.h>
-#include <drm/drm_connector.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_cdclk.h"
#include "display/intel_display.h"
-#include "display/intel_display_power.h"
-#include "display/intel_dmc.h"
-#include "display/intel_dpll_mgr.h"
-#include "display/intel_dsb.h"
-#include "display/intel_fbc.h"
-#include "display/intel_frontbuffer.h"
-#include "display/intel_global_state.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_opregion.h"
+#include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_lmem.h"
@@ -70,80 +60,24 @@
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
-#include "intel_pm_types.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
-struct dpll;
struct drm_i915_clock_gating_funcs;
struct drm_i915_gem_object;
struct drm_i915_private;
-struct intel_atomic_state;
-struct intel_audio_funcs;
-struct intel_cdclk_config;
-struct intel_cdclk_funcs;
-struct intel_cdclk_state;
-struct intel_cdclk_vals;
-struct intel_color_funcs;
struct intel_connector;
-struct intel_crtc;
struct intel_dp;
-struct intel_dpll_funcs;
struct intel_encoder;
-struct intel_fbdev;
-struct intel_fdi_funcs;
-struct intel_gmbus;
-struct intel_hotplug_funcs;
-struct intel_initial_plane_config;
struct intel_limit;
-struct intel_overlay;
struct intel_overlay_error_state;
struct vlv_s0ix_state;
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
-struct i915_hotplug {
- struct delayed_work hotplug_work;
-
- const u32 *hpd, *pch_hpd;
-
- struct {
- unsigned long last_jiffies;
- int count;
- enum {
- HPD_ENABLED = 0,
- HPD_DISABLED = 1,
- HPD_MARK_DISABLED = 2
- } state;
- } stats[HPD_NUM_PINS];
- u32 event_bits;
- u32 retry_bits;
- struct delayed_work reenable_work;
-
- u32 long_port_mask;
- u32 short_port_mask;
- struct work_struct dig_port_work;
-
- struct work_struct poll_init_work;
- bool poll_enabled;
-
- unsigned int hpd_storm_threshold;
- /* Whether or not to count short HPD IRQs in HPD storms */
- u8 hpd_short_storm_enabled;
-
- /*
- * if we get a HPD irq from DP and a HPD irq from non-DP
- * the non-DP HPD could block the workqueue on a mode config
- * mutex getting, that userspace may have taken. However
- * userspace is waiting on the DP workqueue to run which is
- * blocked behind the non-DP one.
- */
- struct workqueue_struct *dp_wq;
-};
-
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -151,55 +85,9 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-struct sdvo_device_mapping {
- u8 initialized;
- u8 dvo_port;
- u8 slave_addr;
- u8 dvo_wiring;
- u8 i2c_pin;
- u8 ddc_pin;
-};
-
-/* functions used for watermark calcs for display. */
-struct drm_i915_wm_disp_funcs {
- /* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
- int (*compute_pipe_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_intermediate_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_global_watermarks)(struct intel_atomic_state *state);
-};
-
-struct drm_i915_display_funcs {
- /* Returns the active state of the crtc, and if the crtc is active,
- * fills out the pipe-config with the hw state. */
- bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_state *);
- void (*get_initial_plane_config)(struct intel_crtc *,
- struct intel_initial_plane_config *);
- void (*crtc_enable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*crtc_disable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*commit_modeset_enables)(struct intel_atomic_state *state);
-};
-
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-#define QUIRK_LVDS_SSC_DISABLE (1<<1)
-#define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_BACKLIGHT_PRESENT (1<<3)
-#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
-#define QUIRK_INCREASE_T12_DELAY (1<<6)
-#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
-#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
struct i915_suspend_saved_registers {
u32 saveDSPARB;
@@ -247,7 +135,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct delayed_work free_work;
+ struct work_struct free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -289,51 +177,8 @@ i915_fence_timeout(const struct drm_i915_private *i915)
return i915_fence_context_timeout(i915, U64_MAX);
}
-/* Amount of SAGV/QGV points, BSpec precisely defines this */
-#define I915_NUM_QGV_POINTS 8
-
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-/* Amount of PSF GV points, BSpec precisely defines this */
-#define I915_NUM_PSF_GV_POINTS 3
-
-struct intel_vbt_data {
- /* bdb version */
- u16 version;
-
- /* Feature bits */
- unsigned int int_tv_support:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int int_lvds_support:1;
- unsigned int display_clock_mode:1;
- unsigned int fdi_rx_polarity_inverted:1;
- int lvds_ssc_freq;
- enum drm_panel_orientation orientation;
-
- bool override_afc_startup;
- u8 override_afc_startup_val;
-
- int crt_ddc_pin;
-
- struct list_head display_devices;
- struct list_head bdb_blocks;
-
- struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
- struct sdvo_device_mapping sdvo_mappings[2];
-};
-
-struct i915_frontbuffer_tracking {
- spinlock_t lock;
-
- /*
- * Tracking bits for delayed frontbuffer flushing du to gpu activity or
- * scheduled flips.
- */
- unsigned busy_bits;
- unsigned flip_bits;
-};
-
struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
@@ -348,32 +193,11 @@ struct i915_selftest_stash {
struct ida mock_region_instances;
};
-/* intel_audio.c private */
-struct intel_audio_private {
- /* Display internal audio functions */
- const struct intel_audio_funcs *funcs;
-
- /* hda/i915 audio component */
- struct i915_audio_component *component;
- bool component_registered;
- /* mutex for audio/video sync */
- struct mutex mutex;
- int power_refcount;
- u32 freq_cntrl;
-
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *encoder_map[I915_MAX_PIPES];
-
- /* necessary resource sharing with HDMI LPE audio driver. */
- struct {
- struct platform_device *platdev;
- int irq;
- } lpe;
-};
-
struct drm_i915_private {
struct drm_device drm;
+ struct intel_display display;
+
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
@@ -417,27 +241,6 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_dmc dmc;
-
- struct intel_gmbus *gmbus[GMBUS_NUM_PINS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of where the gmbus and gpio blocks are located (either
- * on PCH or on SoC for platforms without PCH).
- */
- u32 gpio_mmio_base;
-
- /* MMIO base address for MIPI regs */
- u32 mipi_mmio_base;
-
- u32 pps_mmio_base;
-
- wait_queue_head_t gmbus_wait_queue;
-
struct pci_dev *bridge_dev;
struct rb_root uabi_engines;
@@ -461,48 +264,15 @@ struct drm_i915_private {
};
u32 pipestat_irq_mask[I915_MAX_PIPES];
- struct i915_hotplug hotplug;
- struct intel_fbc *fbc[I915_MAX_FBCS];
- struct intel_opregion opregion;
- struct intel_vbt_data vbt;
-
bool preserve_bios_swizzle;
- /* overlay */
- struct intel_overlay *overlay;
-
- /* backlight registers and fields in struct intel_panel */
- struct mutex backlight_lock;
-
- /* protects panel power sequencer state */
- struct mutex pps_mutex;
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
- unsigned int fdi_pll_freq;
unsigned int czclk_freq;
- struct {
- /* The current hardware cdclk configuration */
- struct intel_cdclk_config hw;
-
- /* cdclk, divider, and ratio table from bspec */
- const struct intel_cdclk_vals *table;
-
- struct intel_global_obj obj;
- } cdclk;
-
- struct {
- /* The current hardware dbuf configuration */
- u8 enabled_slices;
-
- struct intel_global_obj obj;
- } dbuf;
-
/**
* wq - Driver workqueue for GEM.
*
@@ -512,40 +282,14 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
- /* ordered wq for modesets */
- struct workqueue_struct *modeset_wq;
- /* unbound hipri wq for page flips/plane updates */
- struct workqueue_struct *flip_wq;
-
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* pm display functions */
- const struct drm_i915_wm_disp_funcs *wm_disp;
-
- /* irq display functions */
- const struct intel_hotplug_funcs *hotplug_funcs;
-
- /* fdi display functions */
- const struct intel_fdi_funcs *fdi_funcs;
-
- /* display pll funcs */
- const struct intel_dpll_funcs *dpll_funcs;
-
- /* Display functions */
- const struct drm_i915_display_funcs *display;
-
- /* Display internal color functions */
- const struct intel_color_funcs *color_funcs;
-
- /* Display CDCLK functions */
- const struct intel_cdclk_funcs *cdclk_funcs;
-
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
- unsigned long quirks;
+ unsigned long gem_quirks;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
@@ -554,34 +298,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- /**
- * dpll and cdclk state is protected by connection_mutex
- * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms plls
- * share registers.
- */
- struct {
- struct mutex lock;
-
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *mgr;
-
- struct {
- int nssc;
- int ssc;
- } ref_clks;
- } dpll;
-
struct list_head global_obj_list;
- struct i915_frontbuffer_tracking fb_tracking;
-
- struct intel_atomic_helper {
- struct llist_head free_list;
- struct work_struct free_work;
- } atomic_helper;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -600,21 +318,8 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- struct i915_power_domains power_domains;
-
struct i915_gpu_error gpu_error;
- /* list of fbdev register on this device */
- struct intel_fbdev *fbdev;
- struct work_struct fbdev_suspend_work;
-
- struct drm_property *broadcast_rgb_property;
- struct drm_property *force_audio_property;
-
- u32 fdi_rx_config;
-
- /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
- u32 chv_phy_control;
/*
* Shadows for CHV DPLL_MD regs to keep the state
* checker somewhat working in the presence hardware
@@ -627,51 +332,6 @@ struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
- enum {
- I915_SAGV_UNKNOWN = 0,
- I915_SAGV_DISABLED,
- I915_SAGV_ENABLED,
- I915_SAGV_NOT_CONTROLLED
- } sagv_status;
-
- u32 sagv_block_time_us;
-
- struct {
- /*
- * Raw watermark latency values:
- * in 0.1us units for WM0,
- * in 0.5us units for WM1+.
- */
- /* primary */
- u16 pri_latency[5];
- /* sprite */
- u16 spr_latency[5];
- /* cursor */
- u16 cur_latency[5];
- /*
- * Raw watermark memory latency values
- * for SKL for all 8 levels
- * in 1us units.
- */
- u16 skl_latency[8];
-
- /* current hardware state */
- union {
- struct ilk_wm_values hw;
- struct vlv_wm_values vlv;
- struct g4x_wm_values g4x;
- };
-
- u8 max_level;
-
- /*
- * Should be held around atomic WM register writing; also
- * protects * intel_crtc->wm.active and
- * crtc_state->wm.need_postvbl_update.
- */
- struct mutex wm_mutex;
- } wm;
-
struct dram_info {
bool wm_lv_0_adjust_needed;
u8 num_channels;
@@ -689,18 +349,6 @@ struct drm_i915_private {
u8 num_psf_gv_points;
} dram_info;
- struct intel_bw_info {
- /* for each QGV point */
- unsigned int deratedbw[I915_NUM_QGV_POINTS];
- /* for each PSF GV point */
- unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- u8 num_planes;
- } max_bw[6];
-
- struct intel_global_obj bw_obj;
-
struct intel_runtime_pm runtime_pm;
struct i915_perf perf;
@@ -716,6 +364,9 @@ struct drm_i915_private {
struct kobject *sysfs_gt;
+ /* Quick lookup of media GT (current platforms only have one) */
+ struct intel_gt *media_gt;
+
struct {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
@@ -733,9 +384,6 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
- /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
- u8 window2_delay;
-
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -743,31 +391,16 @@ struct drm_i915_private {
bool irq_enabled;
- union {
- /* perform PHY state sanity checks? */
- bool chv_phy_assert[2];
-
- /*
- * DG2: Mask of PHYs that were not calibrated by the firmware
- * and should not be used.
- */
- u8 snps_phy_failed_calibration;
- };
-
- bool ipc_enabled;
-
- struct intel_audio_private audio;
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 snps_phy_failed_calibration;
struct i915_pmu pmu;
struct i915_drm_clients clients;
- struct i915_hdcp_comp_master *hdcp_master;
- bool hdcp_comp_added;
-
- /* Mutex to protect the above hdcp component related values. */
- struct mutex hdcp_comp_mutex;
-
/* The TTM device structure. */
struct ttm_device bdev;
@@ -826,28 +459,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(engine__) && (engine__)->uabi_class == (class__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define I915_GTT_OFFSET_NONE ((u32)-1)
-
-/*
- * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
- * considered to be the frontbuffer for the given plane interface-wise. This
- * doesn't mean that the hw necessarily already scans it out, but that any
- * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
- *
- * We have one bit per pipe and per scanout plane type.
- */
-#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
- BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
- BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
-})
-#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
- BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
- GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
- INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
@@ -856,19 +467,19 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver)
-#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \
- INTEL_INFO(i915)->graphics.rel)
+#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver)
+#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
+ RUNTIME_INFO(i915)->graphics.ip.rel)
#define IS_GRAPHICS_VER(i915, from, until) \
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
-#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
-#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \
- INTEL_INFO(i915)->media.rel)
+#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver)
+#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
+ RUNTIME_INFO(i915)->media.ip.rel)
#define IS_MEDIA_VER(i915, from, until) \
(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
-#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver)
+#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver)
#define IS_DISPLAY_VER(i915, from, until) \
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
@@ -1210,7 +821,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
+#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
@@ -1218,7 +829,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
- ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
+ ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
})
#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
@@ -1249,13 +860,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
-#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0)
+#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
-#define HAS_DP20(dev_priv) (IS_DG2(dev_priv))
+#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+
+#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
@@ -1264,7 +877,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
-#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1272,7 +885,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
-#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
+#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
#define HAS_HECI_PXP(dev_priv) \
(INTEL_INFO(dev_priv)->has_heci_pxp)
@@ -1302,9 +915,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
+
/*
* Platform has the dedicated compression control state for each lmem surfaces
* stored in lmem to support the 3D and media compression formats.
@@ -1313,7 +928,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1335,9 +950,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
+#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
@@ -1352,91 +967,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
-#define HAS_PERCTX_PREEMPT_CTRL(i915) \
- ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
-
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
-#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
+#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
-/* i915_gem.c */
-void i915_gem_init_early(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-
-static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
-{
- /*
- * A single pass should suffice to release all the freed objects (along
- * most call paths) , but be a little more paranoid in that freeing
- * the objects does take a little amount of time, during which the rcu
- * callbacks could have added new objects into the freed list, and
- * armed the work again.
- */
- while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
- rcu_barrier();
- }
-}
-
-static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
-{
- /*
- * Similar to objects above (see i915_gem_drain_freed-objects), in
- * general we have workers that are armed by RCU and then rearm
- * themselves in their callbacks. To be paranoid, we need to
- * drain the workqueue a second time after waiting for the RCU
- * grace period so that we catch work queued via RCU from the first
- * pass. As neither drain_workqueue() nor flush_workqueue() report
- * a result, we make an assumption that we only don't require more
- * than 3 passes to catch all _recursive_ RCU delayed work.
- *
- */
- int pass = 3;
- do {
- flush_workqueue(i915->wq);
- rcu_barrier();
- i915_gem_drain_freed_objects(i915);
- } while (--pass);
- drain_workqueue(i915->wq);
-}
-
-struct i915_vma * __must_check
-i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
- struct i915_gem_ww_ctx *ww,
- const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
-struct i915_vma * __must_check
-i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
- unsigned long flags);
-#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
-#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
-#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
-#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
-
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-
-int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-
-int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-void i915_gem_driver_register(struct drm_i915_private *i915);
-void i915_gem_driver_unregister(struct drm_i915_private *i915);
-void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
-void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-
-int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..2bdddb61ebd7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
&to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
+ list_for_each_entry_safe(obj, on,
+ &to_gt(i915)->lmem_userfault_list, userfault_link)
+ i915_gem_object_runtime_pm_release_mmap_offset(obj);
+
/*
* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
@@ -885,7 +889,7 @@ static void discard_ggtt_vma(struct i915_vma *vma)
struct i915_vma *
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -896,7 +900,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
GEM_WARN_ON(!ww);
if (flags & PIN_MAPPABLE &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
+ (!view || view->type == I915_GTT_VIEW_NORMAL)) {
/*
* If the required space is larger than the available
* aperture, we will not able to find a slot for the
@@ -987,7 +991,7 @@ new_vma:
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct i915_gem_ww_ctx ww;
@@ -1035,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -1085,14 +1089,50 @@ out:
return err;
}
+/*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
+ rcu_barrier();
+ }
+}
+
+/*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ flush_workqueue(i915->wq);
+ rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
+ }
+
+ drain_workqueue(i915->wq);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
- mkwrite_device_info(dev_priv)->page_sizes =
- I915_GTT_PAGE_SIZE_4K;
+ RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
ret = i915_gem_init_userptr(dev_priv);
if (ret)
@@ -1173,7 +1213,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
+ intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(to_gt(dev_priv));
@@ -1191,7 +1231,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
- i915_gem_drain_freed_objects(dev_priv);
+ /* Flush any outstanding work, including i915_gem_context.release_work. */
+ i915_gem_drain_workqueue(dev_priv);
drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
@@ -1213,7 +1254,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
- spin_lock_init(&dev_priv->fb_tracking.lock);
+ spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 68d8d52bd541..a5cdf6662d01 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,12 +26,55 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/types.h>
#include <drm/drm_drv.h>
#include "i915_utils.h"
+struct drm_file;
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct i915_gem_ww_ctx;
+struct i915_gtt_view;
+struct i915_vma;
+
+void i915_gem_init_early(struct drm_i915_private *i915);
+void i915_gem_cleanup_early(struct drm_i915_private *i915);
+
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915);
+void i915_gem_drain_workqueue(struct drm_i915_private *i915);
+
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_gtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_gtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
+#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
+#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
+
+void i915_gem_runtime_suspend(struct drm_i915_private *i915);
+
+int __must_check i915_gem_init(struct drm_i915_private *i915);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
+void i915_gem_driver_remove(struct drm_i915_private *i915);
+void i915_gem_driver_release(struct drm_i915_private *i915);
+
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
+
+/* FIXME: All of the below belong somewhere else. */
#ifdef CONFIG_DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 6fd15b39570c..342c8ca6414e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -36,7 +36,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
- value = !!i915->overlay;
+ value = !!i915->display.overlay;
break;
case I915_PARAM_HAS_BSD:
value = !!intel_engine_lookup_user(i915,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 32e92651ef7c..9ea2fe34e7d3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -646,8 +646,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_print_static(&error->device_info, &p);
- intel_device_info_print_runtime(&error->runtime_info, &p);
+ intel_device_info_print(&error->device_info, &error->runtime_info, &p);
intel_driver_caps_print(&error->driver_caps, &p);
}
@@ -671,6 +670,18 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
}
+static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
+ const char *name,
+ const struct intel_ctb_coredump *ctb)
+{
+ if (!ctb->size)
+ return;
+
+ err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
+ name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
+ ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
+}
+
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct intel_uc_coredump *error_uc)
{
@@ -678,7 +689,12 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
+ err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
+ err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
+ err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
+ err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -720,6 +736,8 @@ static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
int i;
err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
+ err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
err_printf(m, "EIR: 0x%08x\n", gt->eir);
err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
@@ -851,7 +869,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt) {
bool print_guc_capture = false;
- if (error->gt->uc && error->gt->uc->is_guc_capture)
+ if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
err_print_gt_display(m, error->gt);
@@ -1004,9 +1022,12 @@ static void cleanup_params(struct i915_gpu_coredump *error)
static void cleanup_uc(struct intel_uc_coredump *uc)
{
- kfree(uc->guc_fw.path);
- kfree(uc->huc_fw.path);
- i915_vma_coredump_free(uc->guc_log);
+ kfree(uc->guc_fw.file_selected.path);
+ kfree(uc->huc_fw.file_selected.path);
+ kfree(uc->guc_fw.file_wanted.path);
+ kfree(uc->huc_fw.file_wanted.path);
+ i915_vma_coredump_free(uc->guc.vma_log);
+ i915_vma_coredump_free(uc->guc.vma_ctb);
kfree(uc);
}
@@ -1655,6 +1676,23 @@ gt_record_engines(struct intel_gt_coredump *gt,
}
}
+static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
+ const struct intel_guc_ct_buffer *ctb,
+ const void *blob_ptr, struct intel_guc *guc)
+{
+ if (!ctb || !ctb->desc)
+ return;
+
+ saved->raw_status = ctb->desc->status;
+ saved->raw_head = ctb->desc->head;
+ saved->raw_tail = ctb->desc->tail;
+ saved->head = ctb->head;
+ saved->tail = ctb->tail;
+ saved->size = ctb->size;
+ saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
+ saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
+}
+
static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
struct i915_vma_compress *compress)
@@ -1669,14 +1707,26 @@ gt_record_uc(struct intel_gt_coredump *gt,
memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
- /* Non-default firmware paths will be specified by the modparam.
- * As modparams are generally accesible from the userspace make
- * explicit copies of the firmware paths.
+ error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
+ error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
+ error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
+ error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
+
+ /*
+ * Save the GuC log and include a timestamp reference for converting the
+ * log times to system times (in conjunction with the error->boottime and
+ * gt->clock_frequency fields saved elsewhere).
*/
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
- "GuC log buffer", compress);
+ error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
+ error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
+ "GuC log buffer", compress);
+ error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
+ "GuC CT buffer", compress);
+ error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
+ gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
+ uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
+ gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
+ uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
return error_uc;
}
@@ -1833,6 +1883,8 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
static void gt_record_info(struct intel_gt_coredump *gt)
{
memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
+ gt->clock_frequency = gt->_gt->clock_frequency;
+ gt->clock_period_ns = gt->_gt->clock_period_ns;
}
/*
@@ -2027,9 +2079,9 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
error->gt->uc = gt_record_uc(error->gt, compress);
if (error->gt->uc) {
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
- error->gt->uc->is_guc_capture = true;
+ error->gt->uc->guc.is_guc_capture = true;
else
- GEM_BUG_ON(error->gt->uc->is_guc_capture);
+ GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 55a143b92d10..efc75cc2ffdb 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -125,6 +125,15 @@ struct intel_engine_coredump {
struct intel_engine_coredump *next;
};
+struct intel_ctb_coredump {
+ u32 raw_head, head;
+ u32 raw_tail, tail;
+ u32 raw_status;
+ u32 desc_offset;
+ u32 cmds_offset;
+ u32 size;
+};
+
struct intel_gt_coredump {
const struct intel_gt *_gt;
bool awake;
@@ -150,6 +159,8 @@ struct intel_gt_coredump {
u32 gtt_cache;
u32 aux_err; /* gen12 */
u32 gam_done; /* gen12 */
+ u32 clock_frequency;
+ u32 clock_period_ns;
/* Display related */
u32 derrmr;
@@ -163,8 +174,14 @@ struct intel_gt_coredump {
struct intel_uc_coredump {
struct intel_uc_fw guc_fw;
struct intel_uc_fw huc_fw;
- struct i915_vma_coredump *guc_log;
- bool is_guc_capture;
+ struct guc_info {
+ struct intel_ctb_coredump ctb[2];
+ struct i915_vma_coredump *vma_ctb;
+ struct i915_vma_coredump *vma_log;
+ u32 timestamp;
+ u16 last_fence;
+ bool is_guc_capture;
+ } guc;
} *uc;
struct intel_gt_coredump *next;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 73cebc6aa650..86a42d9e8041 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -65,7 +65,7 @@
/*
* Interrupt statistic for PMU. Increments the counter only if the
- * interrupt originated from the the GPU so interrupts from a device which
+ * interrupt originated from the GPU so interrupts from a device which
* shares the interrupt line are not accounted.
*/
static inline void pmu_irq_stats(struct drm_i915_private *i915,
@@ -185,7 +185,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
- struct i915_hotplug *hpd = &dev_priv->hotplug;
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
if (HAS_GMCH(dev_priv)) {
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
@@ -595,7 +595,7 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle)
+ if (!dev_priv->display.opregion.asle)
return false;
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
@@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work)
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -1272,7 +1272,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
u32 enabled_irqs = 0;
for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
@@ -1304,12 +1304,12 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
{
- wake_up_all(&dev_priv->gmbus_wait_queue);
+ wake_up_all(&dev_priv->display.gmbus.wait_queue);
}
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
{
- wake_up_all(&dev_priv->gmbus_wait_queue);
+ wake_up_all(&dev_priv->display.gmbus.wait_queue);
}
#if defined(CONFIG_DEBUG_FS)
@@ -1637,7 +1637,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (hotplug_trigger) {
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1841,7 +1841,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
pch_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1986,7 +1986,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
@@ -1998,7 +1998,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
@@ -2024,7 +2024,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
@@ -2036,7 +2036,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
@@ -2057,7 +2057,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
ilk_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2237,7 +2237,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
bxt_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2257,7 +2257,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
gen11_port_hotplug_long_detect);
}
@@ -2269,7 +2269,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
gen11_port_hotplug_long_detect);
}
@@ -2653,9 +2653,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
static u32
-gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
+gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -2669,10 +2669,10 @@ gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
}
static void
-gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
{
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(gt->i915);
+ intel_opregion_asle_intr(i915);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -2736,11 +2736,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ)
gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -2801,11 +2801,11 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ)
gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -3313,8 +3313,8 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3383,8 +3383,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
@@ -3460,8 +3460,8 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
@@ -3538,8 +3538,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3578,8 +3578,8 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
if (DISPLAY_VER(dev_priv) >= 8)
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3636,8 +3636,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -4370,8 +4370,8 @@ HPD_FUNCS(ilk);
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->hotplug_funcs)
- i915->hotplug_funcs->hpd_irq_setup(i915);
+ if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
+ i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
/**
@@ -4413,33 +4413,33 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display_irqs_enabled = false;
- dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
+ dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->hotplug_funcs = &i915_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
} else {
if (HAS_PCH_DG2(dev_priv))
- dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (HAS_PCH_DG1(dev_priv))
- dev_priv->hotplug_funcs = &dg1_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->hotplug_funcs = &gen11_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->hotplug_funcs = &bxt_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->hotplug_funcs = &spt_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
else
- dev_priv->hotplug_funcs = &ilk_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index aacc10f2e73f..cd4487a1d3be 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,16 +26,22 @@
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_sa_media.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
+#include "intel_pci_config.h"
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
- .graphics.ver = (x), \
- .media.ver = (x), \
- .display.ver = (x)
+ .__runtime.graphics.ip.ver = (x), \
+ .__runtime.media.ip.ver = (x), \
+ .__runtime.display.ip.ver = (x)
+
+#define NO_DISPLAY .__runtime.pipe_mask = 0
#define I845_PIPE_OFFSETS \
.display.pipe_offsets = { \
@@ -159,16 +165,16 @@
/* Keep in gen based order, and chronological order within a gen */
#define GEN_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN_DEFAULT_REGIONS \
- .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
+ .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -177,7 +183,7 @@
.has_3d_pipeline = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -189,8 +195,8 @@
#define I845_FEATURES \
GEN(2), \
- .display.pipe_mask = BIT(PIPE_A), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \
+ .__runtime.pipe_mask = BIT(PIPE_A), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -198,7 +204,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -221,22 +227,22 @@ static const struct intel_device_info i845g_info = {
static const struct intel_device_info i85x_info = {
I830_FEATURES,
PLATFORM(INTEL_I85X),
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
#define GEN3_FEATURES \
GEN(3), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -266,7 +272,7 @@ static const struct intel_device_info i915gm_info = {
.display.has_overlay = 1,
.display.overlay_needs_physical = 1,
.display.supports_tv = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -291,7 +297,7 @@ static const struct intel_device_info i945gm_info = {
.display.has_overlay = 1,
.display.overlay_needs_physical = 1,
.display.supports_tv = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -323,12 +329,12 @@ static const struct intel_device_info pnv_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -351,7 +357,7 @@ static const struct intel_device_info i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
.is_mobile = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.display.has_overlay = 1,
.display.supports_tv = 1,
.hws_needs_physical = 1,
@@ -361,7 +367,7 @@ static const struct intel_device_info i965gm_info = {
static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -369,18 +375,18 @@ static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.display.supports_tv = 1,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
GEN(5), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -403,16 +409,16 @@ static const struct intel_device_info ilk_m_info = {
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
.has_rps = true,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
#define GEN6_FEATURES \
GEN(6), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -420,8 +426,8 @@ static const struct intel_device_info ilk_m_info = {
.has_rc6p = 1, \
.has_rps = true, \
.dma_mask_size = 40, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
- .ppgtt_size = 31, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .__runtime.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
@@ -460,11 +466,11 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -473,8 +479,8 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
- .ppgtt_size = 31, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .__runtime.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
@@ -515,9 +521,8 @@ static const struct intel_device_info ivb_m_gt2_info = {
static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
+ NO_DISPLAY,
.gt = 2,
- .display.pipe_mask = 0, /* legal, last one wins */
- .display.cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -525,8 +530,8 @@ static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_reset_engine = true,
@@ -534,11 +539,11 @@ static const struct intel_device_info vlv_info = {
.display.has_gmch = 1,
.display.has_hotplug = 1,
.dma_mask_size = 40,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
- .ppgtt_size = 31,
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING,
+ .__runtime.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display.mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
@@ -549,8 +554,8 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
@@ -584,8 +589,8 @@ static const struct intel_device_info hsw_gt3_info = {
GEN(8), \
.has_logical_ring_contexts = 1, \
.dma_mask_size = 39, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
- .ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
+ .__runtime.ppgtt_size = 48, \
.has_64bit_reloc = 1
#define BDW_PLATFORM \
@@ -613,18 +618,18 @@ static const struct intel_device_info bdw_rsvd_info = {
static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
@@ -632,8 +637,8 @@ static const struct intel_device_info chv_info = {
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.dma_mask_size = 39,
- .ppgtt_type = INTEL_PPGTT_FULL,
- .ppgtt_size = 32,
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL,
+ .__runtime.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -646,16 +651,16 @@ static const struct intel_device_info chv_info = {
};
#define GEN9_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K
#define GEN9_FEATURES \
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.has_gt_uc = 1, \
- .display.has_hdcp = 1, \
+ .__runtime.has_hdcp = 1, \
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
@@ -678,7 +683,7 @@ static const struct intel_device_info skl_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .platform_engine_mask = \
+ .__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
@@ -697,29 +702,29 @@ static const struct intel_device_info skl_gt4_info = {
.is_lp = 1, \
.display.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .display.has_hdcp = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.has_hdcp = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.has_rc6 = 1, \
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
- .ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
+ .__runtime.ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
@@ -739,7 +744,7 @@ static const struct intel_device_info bxt_info = {
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
- .display.ver = 10,
+ .__runtime.display.ip.ver = 10,
.display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
GLK_COLORS,
};
@@ -761,7 +766,7 @@ static const struct intel_device_info kbl_gt2_info = {
static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -782,7 +787,7 @@ static const struct intel_device_info cfl_gt2_info = {
static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -801,15 +806,15 @@ static const struct intel_device_info cml_gt2_info = {
};
#define GEN11_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
#define GEN11_FEATURES \
GEN9_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
.display.abox_mask = BIT(0), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.display.pipe_offsets = { \
@@ -832,37 +837,37 @@ static const struct intel_device_info cml_gt2_info = {
ICL_COLORS, \
.display.dbuf.size = 2048, \
.display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
- .display.has_dsc = 1, \
+ .__runtime.has_dsc = 1, \
.has_coherent_ggtt = false, \
.has_logical_ring_elsq = 1
static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
- .ppgtt_size = 36,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .__runtime.ppgtt_size = 36,
};
static const struct intel_device_info jsl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_JASPERLAKE),
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
- .ppgtt_size = 36,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .__runtime.ppgtt_size = 36,
};
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
.display.abox_mask = GENMASK(2, 1), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.display.pipe_offsets = { \
@@ -890,7 +895,7 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.display.has_modular_fia = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
@@ -898,17 +903,17 @@ static const struct intel_device_info rkl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ROCKETLAKE),
.display.abox_mask = BIT(0),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
};
#define DGFX_FEATURES \
- .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
+ .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_llc = 0, \
.has_pxp = 0, \
.has_snoop = 1, \
@@ -918,24 +923,24 @@ static const struct intel_device_info rkl_info = {
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 10,
+ .__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
/* Wa_16011227922 */
- .ppgtt_size = 47,
+ .__runtime.ppgtt_size = 47,
};
static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
.dma_mask_size = 39,
};
@@ -951,18 +956,18 @@ static const struct intel_device_info adl_s_info = {
.display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
.display.has_ddi = 1, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.display.has_dp_mst = 1, \
.display.has_dsb = 1, \
- .display.has_dsc = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.has_dsc = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
.display.has_fpga_dbg = 1, \
- .display.has_hdcp = 1, \
+ .__runtime.has_hdcp = 1, \
.display.has_hotplug = 1, \
.display.has_ipc = 1, \
.display.has_psr = 1, \
- .display.ver = 13, \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .__runtime.display.ip.ver = 13, \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -985,28 +990,28 @@ static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
.display.has_cdclk_crawl = 1,
.display.has_modular_fia = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
- .ppgtt_size = 48,
+ .__runtime.ppgtt_size = 48,
.dma_mask_size = 39,
};
#undef GEN
#define XE_HP_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .graphics.ver = 12, \
- .graphics.rel = 50, \
+ .__runtime.graphics.ip.ver = 12, \
+ .__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
.has_3d_pipeline = 1, \
@@ -1022,12 +1027,12 @@ static const struct intel_device_info adl_p_info = {
.has_reset_engine = 1, \
.has_rps = 1, \
.has_runtime_pm = 1, \
- .ppgtt_size = 48, \
- .ppgtt_type = INTEL_PPGTT_FULL
+ .__runtime.ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL
#define XE_HPM_FEATURES \
- .media.ver = 12, \
- .media.rel = 50
+ .__runtime.media.ip.ver = 12, \
+ .__runtime.media.ip.rel = 50
__maybe_unused
static const struct intel_device_info xehpsdv_info = {
@@ -1035,11 +1040,11 @@ static const struct intel_device_info xehpsdv_info = {
XE_HPM_FEATURES,
DGFX_FEATURES,
PLATFORM(INTEL_XEHPSDV),
- .display = { },
+ NO_DISPLAY,
.has_64k_pages = 1,
.needs_compact_pt = 1,
.has_media_ratio_mode = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
@@ -1052,8 +1057,8 @@ static const struct intel_device_info xehpsdv_info = {
XE_HP_FEATURES, \
XE_HPM_FEATURES, \
DGFX_FEATURES, \
- .graphics.rel = 55, \
- .media.rel = 55, \
+ .__runtime.graphics.ip.rel = 55, \
+ .__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
.has_4tile = 1, \
.has_64k_pages = 1, \
@@ -1061,7 +1066,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_heci_pxp = 1, \
.needs_compact_pt = 1, \
.has_media_ratio_mode = 1, \
- .platform_engine_mask = \
+ .__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
BIT(VCS0) | BIT(VCS2) | \
@@ -1070,15 +1075,16 @@ static const struct intel_device_info xehpsdv_info = {
static const struct intel_device_info dg2_info = {
DG2_FEATURES,
XE_LPD_FEATURES,
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
.require_force_probe = 1,
};
static const struct intel_device_info ats_m_info = {
DG2_FEATURES,
- .display = { 0 },
+ NO_DISPLAY,
.require_force_probe = 1,
+ .tuning_thread_rr_after_dep = 1,
};
#define XE_HPC_FEATURES \
@@ -1095,12 +1101,12 @@ static const struct intel_device_info pvc_info = {
XE_HPC_FEATURES,
XE_HPM_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 60,
- .media.rel = 60,
+ .__runtime.graphics.ip.rel = 60,
+ .__runtime.media.ip.rel = 60,
PLATFORM(INTEL_PONTEVECCHIO),
- .display = { 0 },
+ NO_DISPLAY,
.has_flat_ccs = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(BCS0) |
BIT(VCS0) |
BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
@@ -1109,8 +1115,19 @@ static const struct intel_device_info pvc_info = {
#define XE_LPDP_FEATURES \
XE_LPD_FEATURES, \
- .display.ver = 14, \
- .display.has_cdclk_crawl = 1
+ .__runtime.display.ip.ver = 14, \
+ .display.has_cdclk_crawl = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B)
+
+static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ {
+ .type = GT_MEDIA,
+ .name = "Standalone Media GT",
+ .gsi_offset = MTL_MEDIA_GSI_BASE,
+ .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ },
+ {}
+};
__maybe_unused
static const struct intel_device_info mtl_info = {
@@ -1120,15 +1137,16 @@ static const struct intel_device_info mtl_info = {
* Real graphics IP version will be obtained from hardware GMD_ID
* register. Value provided here is just for sanity checking.
*/
- .graphics.ver = 12,
- .graphics.rel = 70,
- .media.ver = 13,
+ .__runtime.graphics.ip.ver = 12,
+ .__runtime.graphics.ip.rel = 70,
+ .__runtime.media.ip.ver = 13,
PLATFORM(INTEL_METEORLAKE),
.display.has_modular_fia = 1,
+ .extra_gt_list = xelpmp_extra_gt,
.has_flat_ccs = 0,
.has_snoop = 1,
- .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
+ .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
.require_force_probe = 1,
};
@@ -1262,6 +1280,27 @@ static bool force_probe(u16 device_id, const char *devices)
return ret;
}
+bool i915_pci_resource_valid(struct pci_dev *pdev, int bar)
+{
+ if (!pci_resource_flags(pdev, bar))
+ return false;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET)
+ return false;
+
+ if (!pci_resource_len(pdev, bar))
+ return false;
+
+ return true;
+}
+
+static bool intel_mmio_bar_valid(struct pci_dev *pdev, struct intel_device_info *intel_info)
+{
+ int gttmmaddr_bar = intel_info->__runtime.graphics.ip.ver == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
+
+ return i915_pci_resource_valid(pdev, gttmmaddr_bar);
+}
+
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
@@ -1287,6 +1326,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
+ if (!intel_mmio_bar_valid(pdev, intel_info))
+ return -ENXIO;
+
/* Detect if we need to wait for other drivers early on */
if (intel_modeset_probe_defer(pdev))
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h
index ee048c238174..8dfe19f9a775 100644
--- a/drivers/gpu/drm/i915/i915_pci.h
+++ b/drivers/gpu/drm/i915/i915_pci.h
@@ -6,7 +6,13 @@
#ifndef __I915_PCI_H__
#define __I915_PCI_H__
+#include <linux/types.h>
+
+struct pci_dev;
+
int i915_pci_register_driver(void);
void i915_pci_unregister_driver(void);
+bool i915_pci_resource_valid(struct pci_dev *pdev, int bar);
+
#endif /* __I915_PCI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f3c23fe9ad9c..0defbb43ceea 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1376,7 +1376,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct i915_perf *perf = stream->perf;
- BUG_ON(stream != perf->exclusive_stream);
+ if (WARN_ON(stream != perf->exclusive_stream))
+ return;
/*
* Unset exclusive_stream first, it will be checked while disabling
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3168d7007e10..1a9bd829fc7e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1125,8 +1125,12 @@
#define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */
#define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14)
#define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x)
+#define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2)
+#define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3)
#define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8)
#define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x)
+#define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5)
+#define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x)
#define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0)
#define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x)
@@ -1462,69 +1466,6 @@
#define FBC_REND_CACHE_CLEAN REG_BIT(1)
/*
- * GPIO regs
- */
-#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
- 4 * (gpio))
-
-# define GPIO_CLOCK_DIR_MASK (1 << 0)
-# define GPIO_CLOCK_DIR_IN (0 << 1)
-# define GPIO_CLOCK_DIR_OUT (1 << 1)
-# define GPIO_CLOCK_VAL_MASK (1 << 2)
-# define GPIO_CLOCK_VAL_OUT (1 << 3)
-# define GPIO_CLOCK_VAL_IN (1 << 4)
-# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-# define GPIO_DATA_DIR_MASK (1 << 8)
-# define GPIO_DATA_DIR_IN (0 << 9)
-# define GPIO_DATA_DIR_OUT (1 << 9)
-# define GPIO_DATA_VAL_MASK (1 << 10)
-# define GPIO_DATA_VAL_OUT (1 << 11)
-# define GPIO_DATA_VAL_IN (1 << 12)
-# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-
-#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1 << 11)
-#define GMBUS_RATE_100KHZ (0 << 8)
-#define GMBUS_RATE_50KHZ (1 << 8)
-#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
-#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
-
-#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1 << 31)
-#define GMBUS_SW_RDY (1 << 30)
-#define GMBUS_ENT (1 << 29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0 << 25)
-#define GMBUS_CYCLE_WAIT (1 << 25)
-#define GMBUS_CYCLE_INDEX (2 << 25)
-#define GMBUS_CYCLE_STOP (4 << 25)
-#define GMBUS_BYTE_COUNT_SHIFT 16
-#define GMBUS_BYTE_COUNT_MAX 256U
-#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
-#define GMBUS_SLAVE_INDEX_SHIFT 8
-#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1 << 0)
-#define GMBUS_SLAVE_WRITE (0 << 0)
-#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1 << 15)
-#define GMBUS_HW_WAIT_PHASE (1 << 14)
-#define GMBUS_STALL_TIMEOUT (1 << 13)
-#define GMBUS_INT (1 << 12)
-#define GMBUS_HW_RDY (1 << 11)
-#define GMBUS_SATOER (1 << 10)
-#define GMBUS_ACTIVE (1 << 9)
-#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
-#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
-#define GMBUS_NAK_EN (1 << 3)
-#define GMBUS_IDLE_EN (1 << 2)
-#define GMBUS_HW_WAIT_EN (1 << 1)
-#define GMBUS_HW_RDY_EN (1 << 0)
-#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1 << 31)
-
-/*
* Clock control & power management
*/
#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
@@ -1700,7 +1641,7 @@
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
+#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1857,14 +1798,14 @@
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
-#define PROCHOT_MASK REG_BIT(1)
-#define THERMAL_LIMIT_MASK REG_BIT(2)
-#define RATL_MASK REG_BIT(6)
-#define VR_THERMALERT_MASK REG_BIT(7)
-#define VR_TDC_MASK REG_BIT(8)
-#define POWER_LIMIT_4_MASK REG_BIT(9)
-#define POWER_LIMIT_1_MASK REG_BIT(11)
-#define POWER_LIMIT_2_MASK REG_BIT(12)
+#define PROCHOT_MASK REG_BIT(0)
+#define THERMAL_LIMIT_MASK REG_BIT(1)
+#define RATL_MASK REG_BIT(5)
+#define VR_THERMALERT_MASK REG_BIT(6)
+#define VR_TDC_MASK REG_BIT(7)
+#define POWER_LIMIT_4_MASK REG_BIT(8)
+#define POWER_LIMIT_1_MASK REG_BIT(10)
+#define POWER_LIMIT_2_MASK REG_BIT(11)
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@@ -1916,6 +1857,13 @@
#define CLKGATE_DIS_PSL(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
/*
* Display engine regs
*/
@@ -2822,7 +2770,7 @@
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
#define PCH_PPS_BASE 0xC7200
-#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \
+#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \
PPS_BASE + (reg) + \
(pps_idx) * 0x100)
@@ -2918,118 +2866,6 @@
#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
-#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
- _VLV_BLC_PWM_CTL2_B)
-
-#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
-#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
- _VLV_BLC_PWM_CTL_B)
-
-#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
-#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
- _VLV_BLC_HIST_CTL_B)
-
-/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
-#define BLM_PWM_ENABLE (1 << 31)
-#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
-#define BLM_PIPE_SELECT (1 << 29)
-#define BLM_PIPE_SELECT_IVB (3 << 29)
-#define BLM_PIPE_A (0 << 29)
-#define BLM_PIPE_B (1 << 29)
-#define BLM_PIPE_C (2 << 29) /* ivb + */
-#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
-#define BLM_TRANSCODER_B BLM_PIPE_B
-#define BLM_TRANSCODER_C BLM_PIPE_C
-#define BLM_TRANSCODER_EDP (3 << 29)
-#define BLM_PIPE(pipe) ((pipe) << 29)
-#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
-#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
-#define BLM_PHASE_IN_ENABLE (1 << 25)
-#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
-#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
-#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
-#define BLM_PHASE_IN_COUNT_SHIFT (8)
-#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
-#define BLM_PHASE_IN_INCR_SHIFT (0)
-#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
-/*
- * This is the number of cycles out of the backlight modulation cycle for which
- * the backlight is on.
- *
- * This field must be no greater than the number of cycles in the complete
- * backlight modulation cycle.
- */
-#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
-#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
-#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
-#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-
-#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define BLM_HISTOGRAM_ENABLE (1 << 31)
-
-/* New registers for PCH-split platforms. Safe where new bits show up, the
- * register layout machtes with gen4 BLC_PWM_CTL[12]. */
-#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
-#define BLC_PWM_CPU_CTL _MMIO(0x48254)
-
-#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
-
-/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
- * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
-#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
-#define BLM_PCH_PWM_ENABLE (1 << 31)
-#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
-#define BLM_PCH_POLARITY (1 << 29)
-#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
-
-#define UTIL_PIN_CTL _MMIO(0x48400)
-#define UTIL_PIN_ENABLE (1 << 31)
-#define UTIL_PIN_PIPE_MASK (3 << 29)
-#define UTIL_PIN_PIPE(x) ((x) << 29)
-#define UTIL_PIN_MODE_MASK (0xf << 24)
-#define UTIL_PIN_MODE_DATA (0 << 24)
-#define UTIL_PIN_MODE_PWM (1 << 24)
-#define UTIL_PIN_MODE_VBLANK (4 << 24)
-#define UTIL_PIN_MODE_VSYNC (5 << 24)
-#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
-#define UTIL_PIN_OUTPUT_DATA (1 << 23)
-#define UTIL_PIN_POLARITY (1 << 22)
-#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
-#define UTIL_PIN_INPUT_DATA (1 << 16)
-
-/* BXT backlight register definition. */
-#define _BXT_BLC_PWM_CTL1 0xC8250
-#define BXT_BLC_PWM_ENABLE (1 << 31)
-#define BXT_BLC_PWM_POLARITY (1 << 29)
-#define _BXT_BLC_PWM_FREQ1 0xC8254
-#define _BXT_BLC_PWM_DUTY1 0xC8258
-
-#define _BXT_BLC_PWM_CTL2 0xC8350
-#define _BXT_BLC_PWM_FREQ2 0xC8354
-#define _BXT_BLC_PWM_DUTY2 0xC8358
-
-#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
-#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
-#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
-
#define PCH_GTC_CTL _MMIO(0xe7000)
#define PCH_GTC_ENABLE (1 << 31)
@@ -3619,6 +3455,34 @@
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define _XELPDP_USBC1_AUX_CH_CTL 0x16F210
+#define _XELPDP_USBC2_AUX_CH_CTL 0x16F410
+#define _XELPDP_USBC3_AUX_CH_CTL 0x16F610
+#define _XELPDP_USBC4_AUX_CH_CTL 0x16F810
+
+#define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_CTL, \
+ _DPB_AUX_CH_CTL, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_CTL, \
+ _XELPDP_USBC2_AUX_CH_CTL, \
+ _XELPDP_USBC3_AUX_CH_CTL, \
+ _XELPDP_USBC4_AUX_CH_CTL))
+
+#define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214
+#define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414
+#define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614
+#define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814
+
+#define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_DATA1, \
+ _XELPDP_USBC2_AUX_CH_DATA1, \
+ _XELPDP_USBC3_AUX_CH_DATA1, \
+ _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4)
+
#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
#define DP_AUX_CH_CTL_DONE (1 << 30)
#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
@@ -3631,6 +3495,8 @@
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19)
+#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18)
#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
@@ -5862,6 +5728,13 @@
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
[TRANSCODER_C] = _CHICKEN_TRANS_C, \
[TRANSCODER_D] = _CHICKEN_TRANS_D))
+
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
@@ -5926,7 +5799,8 @@
_BW_BUDDY1_PAGE_MASK))
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
@@ -6718,10 +6592,10 @@
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
-#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
-#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
-#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
+#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
+#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
+#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
@@ -6937,265 +6811,6 @@ enum skl_power_gate {
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
-/* HDCP Key Registers */
-#define HDCP_KEY_CONF _MMIO(0x66c00)
-#define HDCP_AKSV_SEND_TRIGGER BIT(31)
-#define HDCP_CLEAR_KEYS_TRIGGER BIT(30)
-#define HDCP_KEY_LOAD_TRIGGER BIT(8)
-#define HDCP_KEY_STATUS _MMIO(0x66c04)
-#define HDCP_FUSE_IN_PROGRESS BIT(7)
-#define HDCP_FUSE_ERROR BIT(6)
-#define HDCP_FUSE_DONE BIT(5)
-#define HDCP_KEY_LOAD_STATUS BIT(1)
-#define HDCP_KEY_LOAD_DONE BIT(0)
-#define HDCP_AKSV_LO _MMIO(0x66c10)
-#define HDCP_AKSV_HI _MMIO(0x66c14)
-
-/* HDCP Repeater Registers */
-#define HDCP_REP_CTL _MMIO(0x66d00)
-#define HDCP_TRANSA_REP_PRESENT BIT(31)
-#define HDCP_TRANSB_REP_PRESENT BIT(30)
-#define HDCP_TRANSC_REP_PRESENT BIT(29)
-#define HDCP_TRANSD_REP_PRESENT BIT(28)
-#define HDCP_DDIB_REP_PRESENT BIT(30)
-#define HDCP_DDIA_REP_PRESENT BIT(29)
-#define HDCP_DDIC_REP_PRESENT BIT(28)
-#define HDCP_DDID_REP_PRESENT BIT(27)
-#define HDCP_DDIF_REP_PRESENT BIT(26)
-#define HDCP_DDIE_REP_PRESENT BIT(25)
-#define HDCP_TRANSA_SHA1_M0 (1 << 20)
-#define HDCP_TRANSB_SHA1_M0 (2 << 20)
-#define HDCP_TRANSC_SHA1_M0 (3 << 20)
-#define HDCP_TRANSD_SHA1_M0 (4 << 20)
-#define HDCP_DDIB_SHA1_M0 (1 << 20)
-#define HDCP_DDIA_SHA1_M0 (2 << 20)
-#define HDCP_DDIC_SHA1_M0 (3 << 20)
-#define HDCP_DDID_SHA1_M0 (4 << 20)
-#define HDCP_DDIF_SHA1_M0 (5 << 20)
-#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
-#define HDCP_SHA1_BUSY BIT(16)
-#define HDCP_SHA1_READY BIT(17)
-#define HDCP_SHA1_COMPLETE BIT(18)
-#define HDCP_SHA1_V_MATCH BIT(19)
-#define HDCP_SHA1_TEXT_32 (1 << 1)
-#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
-#define HDCP_SHA1_TEXT_24 (4 << 1)
-#define HDCP_SHA1_TEXT_16 (5 << 1)
-#define HDCP_SHA1_TEXT_8 (6 << 1)
-#define HDCP_SHA1_TEXT_0 (7 << 1)
-#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
-#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
-#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
-#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
-#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
-#define HDCP_SHA_TEXT _MMIO(0x66d18)
-
-/* HDCP Auth Registers */
-#define _PORTA_HDCP_AUTHENC 0x66800
-#define _PORTB_HDCP_AUTHENC 0x66500
-#define _PORTC_HDCP_AUTHENC 0x66600
-#define _PORTD_HDCP_AUTHENC 0x66700
-#define _PORTE_HDCP_AUTHENC 0x66A00
-#define _PORTF_HDCP_AUTHENC 0x66900
-#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
- _PORTA_HDCP_AUTHENC, \
- _PORTB_HDCP_AUTHENC, \
- _PORTC_HDCP_AUTHENC, \
- _PORTD_HDCP_AUTHENC, \
- _PORTE_HDCP_AUTHENC, \
- _PORTF_HDCP_AUTHENC) + (x))
-#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
-#define _TRANSA_HDCP_CONF 0x66400
-#define _TRANSB_HDCP_CONF 0x66500
-#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
- _TRANSB_HDCP_CONF)
-#define HDCP_CONF(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_CONF(trans) : \
- PORT_HDCP_CONF(port))
-
-#define HDCP_CONF_CAPTURE_AN BIT(0)
-#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
-#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
-#define _TRANSA_HDCP_ANINIT 0x66404
-#define _TRANSB_HDCP_ANINIT 0x66504
-#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_ANINIT, \
- _TRANSB_HDCP_ANINIT)
-#define HDCP_ANINIT(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANINIT(trans) : \
- PORT_HDCP_ANINIT(port))
-
-#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
-#define _TRANSA_HDCP_ANLO 0x66408
-#define _TRANSB_HDCP_ANLO 0x66508
-#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
- _TRANSB_HDCP_ANLO)
-#define HDCP_ANLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANLO(trans) : \
- PORT_HDCP_ANLO(port))
-
-#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
-#define _TRANSA_HDCP_ANHI 0x6640C
-#define _TRANSB_HDCP_ANHI 0x6650C
-#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
- _TRANSB_HDCP_ANHI)
-#define HDCP_ANHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANHI(trans) : \
- PORT_HDCP_ANHI(port))
-
-#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
-#define _TRANSA_HDCP_BKSVLO 0x66410
-#define _TRANSB_HDCP_BKSVLO 0x66510
-#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_BKSVLO, \
- _TRANSB_HDCP_BKSVLO)
-#define HDCP_BKSVLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_BKSVLO(trans) : \
- PORT_HDCP_BKSVLO(port))
-
-#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
-#define _TRANSA_HDCP_BKSVHI 0x66414
-#define _TRANSB_HDCP_BKSVHI 0x66514
-#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_BKSVHI, \
- _TRANSB_HDCP_BKSVHI)
-#define HDCP_BKSVHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_BKSVHI(trans) : \
- PORT_HDCP_BKSVHI(port))
-
-#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
-#define _TRANSA_HDCP_RPRIME 0x66418
-#define _TRANSB_HDCP_RPRIME 0x66518
-#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_RPRIME, \
- _TRANSB_HDCP_RPRIME)
-#define HDCP_RPRIME(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_RPRIME(trans) : \
- PORT_HDCP_RPRIME(port))
-
-#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
-#define _TRANSA_HDCP_STATUS 0x6641C
-#define _TRANSB_HDCP_STATUS 0x6651C
-#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_STATUS, \
- _TRANSB_HDCP_STATUS)
-#define HDCP_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_STATUS(trans) : \
- PORT_HDCP_STATUS(port))
-
-#define HDCP_STATUS_STREAM_A_ENC BIT(31)
-#define HDCP_STATUS_STREAM_B_ENC BIT(30)
-#define HDCP_STATUS_STREAM_C_ENC BIT(29)
-#define HDCP_STATUS_STREAM_D_ENC BIT(28)
-#define HDCP_STATUS_AUTH BIT(21)
-#define HDCP_STATUS_ENC BIT(20)
-#define HDCP_STATUS_RI_MATCH BIT(19)
-#define HDCP_STATUS_R0_READY BIT(18)
-#define HDCP_STATUS_AN_READY BIT(17)
-#define HDCP_STATUS_CIPHER BIT(16)
-#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
-
-/* HDCP2.2 Registers */
-#define _PORTA_HDCP2_BASE 0x66800
-#define _PORTB_HDCP2_BASE 0x66500
-#define _PORTC_HDCP2_BASE 0x66600
-#define _PORTD_HDCP2_BASE 0x66700
-#define _PORTE_HDCP2_BASE 0x66A00
-#define _PORTF_HDCP2_BASE 0x66900
-#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
- _PORTA_HDCP2_BASE, \
- _PORTB_HDCP2_BASE, \
- _PORTC_HDCP2_BASE, \
- _PORTD_HDCP2_BASE, \
- _PORTE_HDCP2_BASE, \
- _PORTF_HDCP2_BASE) + (x))
-
-#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
-#define _TRANSA_HDCP2_AUTH 0x66498
-#define _TRANSB_HDCP2_AUTH 0x66598
-#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
- _TRANSB_HDCP2_AUTH)
-#define AUTH_LINK_AUTHENTICATED BIT(31)
-#define AUTH_LINK_TYPE BIT(30)
-#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
-#define AUTH_CLR_KEYS BIT(18)
-#define HDCP2_AUTH(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_AUTH(trans) : \
- PORT_HDCP2_AUTH(port))
-
-#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
-#define _TRANSA_HDCP2_CTL 0x664B0
-#define _TRANSB_HDCP2_CTL 0x665B0
-#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
- _TRANSB_HDCP2_CTL)
-#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-#define HDCP2_CTL(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_CTL(trans) : \
- PORT_HDCP2_CTL(port))
-
-#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define _TRANSA_HDCP2_STATUS 0x664B4
-#define _TRANSB_HDCP2_STATUS 0x665B4
-#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_STATUS, \
- _TRANSB_HDCP2_STATUS)
-#define LINK_TYPE_STATUS BIT(22)
-#define LINK_AUTH_STATUS BIT(21)
-#define LINK_ENCRYPTION_STATUS BIT(20)
-#define HDCP2_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_STATUS(trans) : \
- PORT_HDCP2_STATUS(port))
-
-#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
-#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
-#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
-#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
-#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
- _PIPEA_HDCP2_STREAM_STATUS, \
- _PIPEB_HDCP2_STREAM_STATUS, \
- _PIPEC_HDCP2_STREAM_STATUS, \
- _PIPED_HDCP2_STREAM_STATUS))
-
-#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
-#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
-#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_STREAM_STATUS, \
- _TRANSB_HDCP2_STREAM_STATUS)
-#define STREAM_ENCRYPTION_STATUS BIT(31)
-#define STREAM_TYPE_STATUS BIT(30)
-#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_STREAM_STATUS(trans) : \
- PIPE_HDCP2_STREAM_STATUS(pipe))
-
-#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
-#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
-#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
- _PORTA_HDCP2_AUTH_STREAM, \
- _PORTB_HDCP2_AUTH_STREAM)
-#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
-#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
-#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_AUTH_STREAM, \
- _TRANSB_HDCP2_AUTH_STREAM)
-#define AUTH_STREAM_TYPE BIT(31)
-#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_AUTH_STREAM(trans) : \
- PORT_HDCP2_AUTH_STREAM(port))
-
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -7503,16 +7118,16 @@ enum skl_power_gate {
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK (3 << 26)
-#define CDCLK_FREQ_450_432 (0 << 26)
-#define CDCLK_FREQ_540 (1 << 26)
-#define CDCLK_FREQ_337_308 (2 << 26)
-#define CDCLK_FREQ_675_617 (3 << 26)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
@@ -8367,6 +7982,7 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_ALT_ICH_SEL (1 << 20)
#define DSC_VBR_ENABLE (1 << 19)
#define DSC_422_ENABLE (1 << 18)
#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
@@ -8717,4 +8333,27 @@ enum skl_power_gate {
#define GEN12_CULLBIT2 _MMIO(0x7030)
#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
+#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
+#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784)
+#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788)
+#define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0)
+#define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16)
+
+#define MTL_LATENCY_SAGV _MMIO(0x4578b)
+#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0)
+
+#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
+#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
+#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(0x45710 + (point) * 2)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(0x45714 + (point) * 2)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index ae984c66c48a..6fc0d1b89690 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -241,8 +241,6 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
- BUG_ON(!fn);
-
__init_waitqueue_head(&fence->wait, name, key);
fence->fn = fn;
#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index a7c603bc1b01..619fc5a22f0c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -48,11 +48,15 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
do { \
static struct lock_class_key __key; \
\
+ BUILD_BUG_ON((fn) == NULL); \
__i915_sw_fence_init((fence), (fn), #fence, &__key); \
} while (0)
#else
#define i915_sw_fence_init(fence, fn) \
- __i915_sw_fence_init((fence), (fn), NULL, NULL)
+do { \
+ BUILD_BUG_ON((fn) == NULL); \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL); \
+} while (0)
#endif
void i915_sw_fence_reinit(struct i915_sw_fence *fence);
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 427de1aaab36..e19452f0e100 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -173,6 +173,77 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
kfree(bman_res);
}
+static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct drm_buddy *mm = &bman->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ GEM_BUG_ON(!place->lpfn);
+
+ /*
+ * If we just want something mappable then we can quickly check
+ * if the current victim resource is using any of the CPU
+ * visible portion.
+ */
+ if (!place->fpfn &&
+ place->lpfn == i915_ttm_buddy_man_visible_size(man))
+ return bman_res->used_visible_size > 0;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn && place->lpfn > fpfn)
+ return true;
+ }
+
+ return false;
+}
+
+static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct drm_buddy *mm = &bman->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ GEM_BUG_ON(!place->lpfn);
+
+ if (!place->fpfn &&
+ place->lpfn == i915_ttm_buddy_man_visible_size(man))
+ return bman_res->used_visible_size == res->num_pages;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (fpfn < place->fpfn || lpfn > place->lpfn)
+ return false;
+ }
+
+ return true;
+}
+
static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
@@ -200,6 +271,8 @@ static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
.alloc = i915_ttm_buddy_man_alloc,
.free = i915_ttm_buddy_man_free,
+ .intersects = i915_ttm_buddy_man_intersects,
+ .compatible = i915_ttm_buddy_man_compatible,
.debug = i915_ttm_buddy_man_debug,
};
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c10d68cdc3ca..6c14d13364bf 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -360,10 +360,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
-#define KBps(x) (1000 * (x))
-#define MBps(x) KBps(1000 * (x))
-#define GBps(x) ((u64)1000 * MBps((x)))
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef3b04c7e153..f17c09ead7d7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -109,7 +109,7 @@ static void __i915_vma_retire(struct i915_active *ref)
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
@@ -141,9 +141,9 @@ vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->obj_link);
RB_CLEAR_NODE(&vma->obj_node);
- if (view && view->type != I915_GGTT_VIEW_NORMAL) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ vma->gtt_view = *view;
+ if (view->type == I915_GTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
view->partial.offset,
view->partial.size,
@@ -151,10 +151,10 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
GEM_BUG_ON(vma->size > obj->base.size);
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ } else if (view->type == I915_GTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
+ } else if (view->type == I915_GTT_VIEW_REMAPPED) {
vma->size = intel_remapped_info_size(&view->remapped);
vma->size <<= PAGE_SHIFT;
}
@@ -248,7 +248,7 @@ err_vma:
static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct rb_node *rb;
@@ -286,7 +286,7 @@ i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *vma;
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags);
}
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -1205,7 +1203,7 @@ err_st_alloc:
}
static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
+intel_partial_pages(const struct i915_gtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
@@ -1249,33 +1247,33 @@ __i915_vma_get_pages(struct i915_vma *vma)
*/
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
- switch (vma->ggtt_view.type) {
+ switch (vma->gtt_view.type) {
default:
- GEM_BUG_ON(vma->ggtt_view.type);
+ GEM_BUG_ON(vma->gtt_view.type);
fallthrough;
- case I915_GGTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_NORMAL:
pages = vma->obj->mm.pages;
break;
- case I915_GGTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_ROTATED:
pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
break;
- case I915_GGTT_VIEW_REMAPPED:
+ case I915_GTT_VIEW_REMAPPED:
pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
break;
- case I915_GGTT_VIEW_PARTIAL:
- pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ case I915_GTT_VIEW_PARTIAL:
+ pages = intel_partial_pages(&vma->gtt_view, vma->obj);
break;
}
if (IS_ERR(pages)) {
drm_err(&vma->vm->i915->drm,
"Failed to get pages for VMA view type %u (%ld)!\n",
- vma->ggtt_view.type, PTR_ERR(pages));
+ vma->gtt_view.type, PTR_ERR(pages));
return PTR_ERR(pages);
}
@@ -1310,6 +1308,19 @@ err_unpin:
return err;
}
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+ /*
+ * Before we release the pages that were bound by this vma, we
+ * must invalidate all the TLBs that may still have a reference
+ * back to our physical address. It only needs to be done once,
+ * so after updating the PTE to point away from the pages, record
+ * the most recent TLB invalidation seqno, and if we have not yet
+ * flushed the TLBs upon release, perform a full invalidation.
+ */
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
@@ -1795,7 +1806,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
GEM_BUG_ON(!vma->obj->userfault_count);
node = &vma->mmo->vma_node;
- vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
@@ -1871,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
enum dma_resv_usage usage;
int idx;
- obj->read_domains = 0;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
} else {
usage = DMA_RESV_USAGE_READ;
+ obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
@@ -1941,7 +1953,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
- unbind_fence = i915_vma_resource_unbind(vma_res);
+ if (async)
+ unbind_fence = i915_vma_resource_unbind(vma_res,
+ &vma->obj->mm.tlb);
+ else
+ unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1966,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
i915_vma_detach(vma);
- if (!async && unbind_fence) {
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
- unbind_fence = NULL;
+ if (!async) {
+ if (unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 88ca0bd9c900..aecd9c64486b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -43,7 +43,7 @@
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+ const struct i915_gtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
#define I915_VMA_RELEASE_MAP BIT(0)
@@ -160,7 +160,7 @@ static inline void i915_vma_put(struct i915_vma *vma)
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
ptrdiff_t cmp;
@@ -170,8 +170,8 @@ i915_vma_compare(struct i915_vma *vma,
if (cmp)
return cmp;
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
- cmp = vma->ggtt_view.type;
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0);
+ cmp = vma->gtt_view.type;
if (!view)
return cmp;
@@ -181,7 +181,7 @@ i915_vma_compare(struct i915_vma *vma,
assert_i915_gem_gtt_types();
- /* ggtt_view.type also encodes its size so that we both distinguish
+ /* gtt_view.type also encodes its size so that we both distinguish
* different views using it as a "type" and also use a compact (no
* accessing of uninitialised padding bytes) memcmp without storing
* an extra parameter or adding more code.
@@ -191,14 +191,14 @@ i915_vma_compare(struct i915_vma *vma,
* we assert above that all branches have the same address, and that
* each branch has a unique type/size.
*/
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
- BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
- BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL);
+ BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), remapped));
- return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
+ return memcmp(&vma->gtt_view.partial, &view->partial, view->type);
}
struct i915_vma_work *i915_vma_work(void);
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 27c55027387a..de1342dbfa12 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -216,6 +216,10 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
/**
* i915_vma_resource_unbind - Unbind a vma resource
* @vma_res: The vma resource to unbind.
+ * @tlb: pointer to vma->obj->mm.tlb associated with the resource
+ * to be stored at vma_res->tlb. When not-NULL, it will be used
+ * to do TLB cache invalidation before freeing a VMA resource.
+ * Used only for async unbind.
*
* At this point this function does little more than publish a fence that
* signals immediately unless signaling is held back.
@@ -223,10 +227,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
* Return: A refcounted pointer to a dma-fence that signals when unbinding is
* complete.
*/
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb)
{
struct i915_address_space *vm = vma_res->vm;
+ vma_res->tlb = tlb;
+
/* Reference for the sw fence */
i915_vma_resource_get(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 5d8427caa2ba..06923d1816e7 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -67,6 +67,7 @@ struct i915_page_sizes {
* taken when the unbind is scheduled.
* @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
* needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
bool immediate_unbind:1;
bool needs_wakeref:1;
bool skip_pte_rewrite:1;
+
+ u32 *tlb;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
void i915_vma_resource_free(struct i915_vma_resource *vma_res);
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb);
void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index be6e028c3b57..ec0f6c9f57d0 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -67,30 +67,30 @@ enum i915_cache_level;
* Implementation and usage
*
* GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
+ * i915_gtt_view_type and struct i915_gtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
* added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * renaming in large amounts of code. They take the struct i915_gtt_view
* parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * globally const i915_gtt_view_normal singleton instance exists. All old core
* GEM API functions, the ones not taking the view parameter, are operating on,
* or with the normal GGTT view.
*
* Code wanting to add or use a new GGTT view needs to:
*
* 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 2. Extend the metadata in the i915_gtt_view structure if required.
* 3. Add support to i915_get_vma_pages().
*
* New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * i915_get_vma_pages function. This table is stored in the vma.gtt_view and
* exists for the lifetime of an VMA.
*
* Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
+ * struct i915_gtt_view does not need to be persistent (left around after
* calling the core API functions).
*
*/
@@ -130,11 +130,11 @@ struct intel_partial_info {
unsigned int size;
} __packed;
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+enum i915_gtt_view_type {
+ I915_GTT_VIEW_NORMAL = 0,
+ I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
};
static inline void assert_i915_gem_gtt_types(void)
@@ -152,18 +152,18 @@ static inline void assert_i915_gem_gtt_types(void)
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
*/
- switch ((enum i915_ggtt_view_type)0) {
- case I915_GGTT_VIEW_NORMAL:
- case I915_GGTT_VIEW_PARTIAL:
- case I915_GGTT_VIEW_ROTATED:
- case I915_GGTT_VIEW_REMAPPED:
+ switch ((enum i915_gtt_view_type)0) {
+ case I915_GTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_PARTIAL:
+ case I915_GTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_REMAPPED:
/* gcc complains if these are identical cases */
break;
}
}
-struct i915_ggtt_view {
- enum i915_ggtt_view_type type;
+struct i915_gtt_view {
+ enum i915_gtt_view_type type;
union {
/* Members need to contain no holes/padding */
struct intel_partial_info partial;
@@ -280,11 +280,11 @@ struct i915_vma {
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * i915_gtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GTT_VIEW_NORMAL) is default and also
* assumed in GEM functions which take no ggtt view parameter.
*/
- struct i915_ggtt_view ggtt_view;
+ struct i915_gtt_view gtt_view;
/** This object's place on the active/inactive lists */
struct list_head vm_link;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d98fbbd589aa..20575eb77ea7 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -88,46 +88,57 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_print_static(const struct intel_device_info *info,
- struct drm_printer *p)
+void intel_device_info_print(const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
+ struct drm_printer *p)
{
- if (info->graphics.rel)
- drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver,
- info->graphics.rel);
+ if (runtime->graphics.ip.rel)
+ drm_printf(p, "graphics version: %u.%02u\n",
+ runtime->graphics.ip.ver,
+ runtime->graphics.ip.rel);
else
- drm_printf(p, "graphics version: %u\n", info->graphics.ver);
+ drm_printf(p, "graphics version: %u\n",
+ runtime->graphics.ip.ver);
- if (info->media.rel)
- drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel);
+ if (runtime->media.ip.rel)
+ drm_printf(p, "media version: %u.%02u\n",
+ runtime->media.ip.ver,
+ runtime->media.ip.rel);
else
- drm_printf(p, "media version: %u\n", info->media.ver);
+ drm_printf(p, "media version: %u\n",
+ runtime->media.ip.ver);
- if (info->display.rel)
- drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel);
+ if (runtime->display.ip.rel)
+ drm_printf(p, "display version: %u.%02u\n",
+ runtime->display.ip.ver,
+ runtime->display.ip.rel);
else
- drm_printf(p, "display version: %u\n", info->display.ver);
+ drm_printf(p, "display version: %u\n",
+ runtime->display.ip.ver);
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: %x\n", info->memory_regions);
- drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+ drm_printf(p, "memory-regions: %x\n", runtime->memory_regions);
+ drm_printf(p, "page-sizes: %x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
- drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
- drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+ drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
+ drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+ drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-}
-void intel_device_info_print_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p)
-{
- drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
+ drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
+ drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
+ drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+
+ drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
#undef INTEL_VGA_DEVICE
@@ -364,55 +375,55 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
- info->display.pipe_mask = 0;
- info->display.cpu_transcoder_mask = 0;
- info->display.fbc_mask = 0;
+ runtime->pipe_mask = 0;
+ runtime->cpu_transcoder_mask = 0;
+ runtime->fbc_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
- info->display.pipe_mask &= ~BIT(PIPE_C);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ runtime->pipe_mask &= ~BIT(PIPE_C);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_A);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
- info->display.fbc_mask &= ~BIT(INTEL_FBC_A);
+ runtime->pipe_mask &= ~BIT(PIPE_A);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ runtime->fbc_mask &= ~BIT(INTEL_FBC_A);
}
if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_B);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ runtime->pipe_mask &= ~BIT(PIPE_B);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
}
if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_C);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ runtime->pipe_mask &= ~BIT(PIPE_C);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
if (DISPLAY_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
- info->display.pipe_mask &= ~BIT(PIPE_D);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ runtime->pipe_mask &= ~BIT(PIPE_D);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
}
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
- info->display.has_hdcp = 0;
+ runtime->has_hdcp = 0;
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
- info->display.fbc_mask = 0;
+ runtime->fbc_mask = 0;
if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
- info->display.has_dmc = 0;
+ runtime->has_dmc = 0;
if (DISPLAY_VER(dev_priv) >= 10 &&
(dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
- info->display.has_dsc = 0;
+ runtime->has_dsc = 0;
}
if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
- info->ppgtt_type = INTEL_PPGTT_NONE;
+ runtime->ppgtt_type = INTEL_PPGTT_NONE;
}
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
@@ -422,8 +433,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
memset(&info->display, 0, sizeof(info->display));
+
+ runtime->cpu_transcoder_mask = 0;
memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites));
memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers));
+ runtime->fbc_mask = 0;
+ runtime->has_hdcp = false;
+ runtime->has_dmc = false;
+ runtime->has_dsc = false;
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 23bf230aa104..d638235e1d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -37,6 +37,7 @@
struct drm_printer;
struct drm_i915_private;
+struct intel_gt_definition;
/* Keep in gen based order, and chronological order within a gen */
enum intel_platform {
@@ -164,7 +165,6 @@ enum intel_ppgtt_type {
func(has_media_ratio_mode); \
func(has_mslice_steering); \
func(has_one_eu_per_fuse_bit); \
- func(has_pooled_eu); \
func(has_pxp); \
func(has_rc6); \
func(has_rc6p); \
@@ -172,6 +172,7 @@ enum intel_ppgtt_type {
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
+ func(tuning_thread_rr_after_dep); \
func(unfenced_needs_alignment); \
func(hws_needs_physical);
@@ -179,14 +180,11 @@ enum intel_ppgtt_type {
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
func(has_cdclk_crawl); \
- func(has_dmc); \
func(has_ddi); \
func(has_dp_mst); \
func(has_dsb); \
- func(has_dsc); \
func(has_fpga_dbg); \
func(has_gmch); \
- func(has_hdcp); \
func(has_hotplug); \
func(has_hti); \
func(has_ipc); \
@@ -202,23 +200,67 @@ struct ip_version {
u8 rel;
};
-struct intel_device_info {
- struct ip_version graphics;
- struct ip_version media;
+struct intel_runtime_info {
+ struct {
+ struct ip_version ip;
+ } graphics;
+ struct {
+ struct ip_version ip;
+ } media;
+ struct {
+ struct ip_version ip;
+ } display;
+
+ /*
+ * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+ * single runtime conditionals, and also to provide groundwork for
+ * future per platform, or per SKU build optimizations.
+ *
+ * Array can be extended when necessary if the corresponding
+ * BUILD_BUG_ON is hit.
+ */
+ u32 platform_mask[2];
+
+ u16 device_id;
intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
- enum intel_platform platform;
+ u32 rawclk_freq;
- unsigned int dma_mask_size; /* available DMA address bits */
+ struct intel_step_info step;
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
- unsigned int page_sizes; /* page sizes supported by the HW */
-
u32 memory_regions; /* regions supported by the HW */
+ bool has_pooled_eu;
+
+ /* display */
+ struct {
+ u8 pipe_mask;
+ u8 cpu_transcoder_mask;
+
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ u8 fbc_mask;
+
+ bool has_hdcp;
+ bool has_dmc;
+ bool has_dsc;
+ };
+};
+
+struct intel_device_info {
+ enum intel_platform platform;
+
+ unsigned int dma_mask_size; /* available DMA address bits */
+
+ const struct intel_gt_definition *extra_gt_list;
+
u8 gt; /* GT number, 0 if undefined */
#define DEFINE_FLAG(name) u8 name:1
@@ -226,12 +268,6 @@ struct intel_device_info {
#undef DEFINE_FLAG
struct {
- u8 ver;
- u8 rel;
-
- u8 pipe_mask;
- u8 cpu_transcoder_mask;
- u8 fbc_mask;
u8 abox_mask;
struct {
@@ -258,27 +294,11 @@ struct intel_device_info {
u32 gamma_lut_tests;
} color;
} display;
-};
-struct intel_runtime_info {
/*
- * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
- * into single runtime conditionals, and also to provide groundwork
- * for future per platform, or per SKU build optimizations.
- *
- * Array can be extended when necessary if the corresponding
- * BUILD_BUG_ON is hit.
+ * Initial runtime info. Do not access outside of i915_driver_create().
*/
- u32 platform_mask[2];
-
- u16 device_id;
-
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- u32 rawclk_freq;
-
- struct intel_step_info step;
+ const struct intel_runtime_info __runtime;
};
struct intel_driver_caps {
@@ -291,10 +311,9 @@ const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_print_static(const struct intel_device_info *info,
- struct drm_printer *p);
-void intel_device_info_print_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p);
+void intel_device_info_print(const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
+ struct drm_printer *p);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 437447119770..2403ccd52c74 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -466,6 +466,43 @@ static int gen12_get_dram_info(struct drm_i915_private *i915)
return icl_pcode_read_mem_global_info(i915);
}
+static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+{
+ u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
+ struct dram_info *dram_info = &i915->dram_info;
+
+ val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val);
+ switch (val) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val);
+ return -EINVAL;
+ }
+
+ dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val);
+ dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
+ /* PSF GV points not supported in D14+ */
+
+ return 0;
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
@@ -480,7 +517,9 @@ void intel_dram_detect(struct drm_i915_private *i915)
*/
dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
- if (GRAPHICS_VER(i915) >= 12)
+ if (DISPLAY_VER(i915) >= 14)
+ ret = xelpdp_get_dram_info(i915);
+ else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915);
else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 157e166672d7..8279dc580a3e 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -4,6 +4,7 @@
*/
#include "display/intel_audio_regs.h"
+#include "display/intel_backlight_regs.h"
#include "display/intel_dmc_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
@@ -1076,7 +1077,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
index 2aad2f0cc8db..ffc702b79579 100644
--- a/drivers/gpu/drm/i915/intel_mchbar_regs.h
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -196,6 +196,9 @@
#define RP1_CAP_MASK REG_GENMASK(15, 8)
#define RPN_CAP_MASK REG_GENMASK(23, 16)
+#define GEN10_FREQ_INFO_REC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
+#define RPE_MASK REG_GENMASK(15, 8)
+
/* snb MCH registers for priority tuning */
#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define SSKPD_NEW_WM0_MASK_HSW REG_GENMASK64(63, 56)
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 0fec25be146a..ba9843cb1b13 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -138,6 +138,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
+ case INTEL_PCH_MTP_DEVICE_ID_TYPE:
+ case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
+ return PCH_MTP;
default:
return PCH_NONE;
}
@@ -166,7 +171,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ if (IS_METEORLAKE(dev_priv))
+ id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
+ else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 7c8ce9781d1a..32aff5a70d04 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -25,6 +25,7 @@ enum intel_pch {
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
+ PCH_MTP, /* Meteor Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
@@ -57,12 +58,15 @@ enum intel_pch {
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
+#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h
index 12cd9d4f23de..4977a524ce6f 100644
--- a/drivers/gpu/drm/i915/intel_pci_config.h
+++ b/drivers/gpu/drm/i915/intel_pci_config.h
@@ -6,6 +6,13 @@
#ifndef __INTEL_PCI_CONFIG_H__
#define __INTEL_PCI_CONFIG_H__
+/* PCI BARs */
+#define GTTMMADR_BAR 0
+#define GEN2_GTTMMADR_BAR 1
+#define GFXMEM_BAR 2
+#define GTT_APERTURE_BAR GFXMEM_BAR
+#define GEN12_LMEM_BAR GFXMEM_BAR
+
/* BSM in include/drm/i915_drm.h */
#define MCHBAR_I915 0x44
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f06babdb3a8c..8f86f56e7ca4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,61 +25,22 @@
*
*/
-#include <linux/module.h>
-#include <linux/string_helpers.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
-
-#include "display/intel_atomic.h"
-#include "display/intel_atomic_plane.h"
-#include "display/intel_bw.h"
#include "display/intel_de.h"
#include "display/intel_display_trace.h"
-#include "display/intel_display_types.h"
-#include "display/intel_fb.h"
-#include "display/intel_fbc.h"
-#include "display/intel_sprite.h"
-#include "display/skl_universal_plane.h"
+#include "display/skl_watermark.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
-#include "gt/intel_llc.h"
#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_irq.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
#include "intel_pm.h"
#include "vlv_sideband.h"
-#include "../../../platform/x86/intel_ips.h"
-
-static void skl_sagv_disable(struct drm_i915_private *dev_priv);
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
@@ -469,13 +430,13 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
ret = _intel_set_memory_cxsr(dev_priv, enable);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->wm.vlv.cxsr = enable;
+ dev_priv->display.wm.vlv.cxsr = enable;
else if (IS_G4X(dev_priv))
- dev_priv->wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ dev_priv->display.wm.g4x.cxsr = enable;
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
return ret;
}
@@ -835,11 +796,11 @@ static bool is_enabling(int old, int new, int threshold)
static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
{
- return dev_priv->wm.max_level + 1;
+ return dev_priv->display.wm.max_level + 1;
}
-static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -1094,11 +1055,11 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
{
/* all latencies in usec */
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+ dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -1151,7 +1112,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1325,7 +1286,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (level > dev_priv->wm.max_level)
+ if (level > dev_priv->display.wm.max_level)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1584,7 +1545,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
struct g4x_wm_values new_wm = {};
g4x_merge_wm(dev_priv, &new_wm);
@@ -1610,10 +1571,10 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
@@ -1626,10 +1587,10 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1651,15 +1612,15 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
/* all latencies in usec */
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
}
}
@@ -1673,7 +1634,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->wm.pri_latency[level] == 0)
+ if (dev_priv->display.wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1694,7 +1655,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->wm.pri_latency[level] * 10);
+ dev_priv->display.wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -2159,7 +2120,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->wm.max_level;
+ wm->level = dev_priv->display.wm.max_level;
wm->cxsr = true;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2198,7 +2159,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
struct vlv_wm_values new_wm = {};
vlv_merge_wm(dev_priv, &new_wm);
@@ -2236,10 +2197,10 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
@@ -2252,10 +2213,10 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void i965_update_wm(struct drm_i915_private *dev_priv)
@@ -2836,9 +2797,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->wm.pri_latency[level];
- u16 spr_latency = dev_priv->wm.spr_latency[level];
- u16 cur_latency = dev_priv->wm.cur_latency[level];
+ u16 pri_latency = dev_priv->display.wm.pri_latency[level];
+ u16 spr_latency = dev_priv->display.wm.spr_latency[level];
+ u16 cur_latency = dev_priv->display.wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2862,119 +2823,43 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[])
+static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (DISPLAY_VER(dev_priv) >= 9) {
- u32 val;
- int ret, i;
- int level, max_level = ilk_wm_max_level(dev_priv);
- int mult = IS_DG2(dev_priv) ? 2 : 1;
+ u64 sskpd;
- /* read the first set of memory latencies[0:3] */
- val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
- &val, NULL);
+ sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
- if (ret) {
- drm_err(&dev_priv->drm,
- "SKL Mailbox read error = %d\n", ret);
- return;
- }
-
- wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
-
- /* read the second set of memory latencies[4:7] */
- val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
- &val, NULL);
- if (ret) {
- drm_err(&dev_priv->drm,
- "SKL Mailbox read error = %d\n", ret);
- return;
- }
-
- wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
+ if (wm[0] == 0)
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
+}
- /*
- * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
- * need to be disabled. We make sure to sanitize the values out
- * of the punit to satisfy this requirement.
- */
- for (level = 1; level <= max_level; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
- wm[i] = 0;
+static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 sskpd;
- max_level = level - 1;
+ sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
- break;
- }
- }
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
+}
- /*
- * WaWmMemoryReadLatency
- *
- * punit doesn't take into account the read latency so we need
- * to add proper adjustement to each valid level we retrieve
- * from the punit when level 0 response data is 0us.
- */
- if (wm[0] == 0) {
- u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
+static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 mltr;
- for (level = 0; level <= max_level; level++)
- wm[level] += adjust;
- }
+ mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
- /*
- * WA Level-0 adjustment for 16GB DIMMs: SKL+
- * If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get Dimm info assume 16GB dimm
- * to avoid any underrun.
- */
- if (dev_priv->dram_info.wm_lv_0_adjust_needed)
- wm[0] += 1;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
- if (wm[0] == 0)
- wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
- wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
- wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
- wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
- wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
- } else if (DISPLAY_VER(dev_priv) >= 6) {
- u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
- wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
- wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
- wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
- } else if (DISPLAY_VER(dev_priv) >= 5) {
- u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
-
- /* ILK primary LP0 latency is 700 ns */
- wm[0] = 7;
- wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
- wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
- } else {
- MISSING_CASE(INTEL_DEVID(dev_priv));
- }
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
@@ -3008,9 +2893,8 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
return 2;
}
-static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name,
- const u16 wm[])
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -3062,18 +2946,18 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
if (!changed)
return;
drm_dbg_kms(&dev_priv->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
}
static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
@@ -3089,37 +2973,42 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->wm.pri_latency[3] == 0 &&
- dev_priv->wm.spr_latency[3] == 0 &&
- dev_priv->wm.cur_latency[3] == 0)
+ if (dev_priv->display.wm.pri_latency[3] == 0 &&
+ dev_priv->display.wm.spr_latency[3] == 0 &&
+ dev_priv->display.wm.cur_latency[3] == 0)
return;
- dev_priv->wm.pri_latency[3] = 0;
- dev_priv->wm.spr_latency[3] = 0;
- dev_priv->wm.cur_latency[3] = 0;
+ dev_priv->display.wm.pri_latency[3] = 0;
+ dev_priv->display.wm.spr_latency[3] = 0;
+ dev_priv->display.wm.cur_latency[3] = 0;
drm_dbg_kms(&dev_priv->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
}
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else if (DISPLAY_VER(dev_priv) >= 6)
+ snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else
+ ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
- sizeof(dev_priv->wm.pri_latency));
- memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
- sizeof(dev_priv->wm.pri_latency));
+ memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
if (DISPLAY_VER(dev_priv) == 6) {
snb_wm_latency_quirk(dev_priv);
@@ -3127,12 +3016,6 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
-}
-
static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
@@ -3387,7 +3270,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
- return dev_priv->wm.pri_latency[level];
+ return dev_priv->display.wm.pri_latency[level];
}
static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
@@ -3539,7 +3422,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
@@ -3573,7 +3456,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
unsigned int dirty;
u32 val;
@@ -3635,7 +3518,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->wm.hw = *results;
+ dev_priv->display.wm.hw = *results;
}
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
@@ -3643,2765 +3526,6 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
-{
- u8 enabled_slices = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(dev_priv, slice) {
- if (intel_uncore_read(&dev_priv->uncore,
- DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
- enabled_slices |= BIT(slice);
- }
-
- return enabled_slices;
-}
-
-/*
- * FIXME: We still don't have the proper code detect if we need to apply the WA,
- * so assume we'll always need it in order to avoid underruns.
- */
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) == 9;
-}
-
-static bool
-intel_has_sagv(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
- dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
-}
-
-static u32
-intel_sagv_block_time(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 12) {
- u32 val = 0;
- int ret;
-
- ret = snb_pcode_read(&dev_priv->uncore,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n");
- return 0;
- }
-
- return val;
- } else if (DISPLAY_VER(dev_priv) == 11) {
- return 10;
- } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) {
- return 30;
- } else {
- return 0;
- }
-}
-
-static void intel_sagv_init(struct drm_i915_private *i915)
-{
- if (!intel_has_sagv(i915))
- i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
-
- /*
- * Probe to see if we have working SAGV control.
- * For icl+ this was already determined by intel_bw_init_hw().
- */
- if (DISPLAY_VER(i915) < 11)
- skl_sagv_disable(i915);
-
- drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
-
- i915->sagv_block_time_us = intel_sagv_block_time(i915);
-
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
-
- /* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
- "Excessive SAGV block time %u, ignoring\n",
- i915->sagv_block_time_us))
- i915->sagv_block_time_us = 0;
-
- if (!intel_has_sagv(i915))
- i915->sagv_block_time_us = 0;
-}
-
-/*
- * SAGV dynamically adjusts the system agent voltage and clock frequencies
- * depending on power and performance requirements. The display engine access
- * to system memory is blocked during the adjustment time. Because of the
- * blocking time, having this enabled can cause full system hangs and/or pipe
- * underruns if we don't meet all of the following requirements:
- *
- * - <= 1 pipe enabled
- * - All planes can enable watermarks for latencies >= SAGV engine block time
- * - We're not using an interlaced display configuration
- */
-static void skl_sagv_enable(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (!intel_has_sagv(dev_priv))
- return;
-
- if (dev_priv->sagv_status == I915_SAGV_ENABLED)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
-
- /* We don't need to wait for SAGV when enabling */
-
- /*
- * Some skl systems, pre-release machines in particular,
- * don't actually have SAGV.
- */
- if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return;
- } else if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
- return;
- }
-
- dev_priv->sagv_status = I915_SAGV_ENABLED;
-}
-
-static void skl_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (!intel_has_sagv(dev_priv))
- return;
-
- if (dev_priv->sagv_status == I915_SAGV_DISABLED)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
- /* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
- 1);
- /*
- * Some skl systems, pre-release machines in particular,
- * don't actually have SAGV.
- */
- if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return;
- } else if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
- return;
- }
-
- dev_priv->sagv_status = I915_SAGV_DISABLED;
-}
-
-static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
-}
-
-static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
-}
-
-static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask;
- new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Restrict required qgv points before updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
-}
-
-static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- new_mask = new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
-}
-
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(i915))
- return;
-
- if (DISPLAY_VER(i915) >= 11)
- icl_sagv_pre_plane_update(state);
- else
- skl_sagv_pre_plane_update(state);
-}
-
-void intel_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(i915))
- return;
-
- if (DISPLAY_VER(i915) >= 11)
- icl_sagv_post_plane_update(state);
- else
- skl_sagv_post_plane_update(state);
-}
-
-static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane_id plane_id;
- int max_level = INT_MAX;
-
- if (!intel_has_sagv(dev_priv))
- return false;
-
- if (!crtc_state->hw.active)
- return true;
-
- if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
- return false;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
- int level;
-
- /* Skip this plane if it's not enabled */
- if (!wm->wm[0].enable)
- continue;
-
- /* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev_priv);
- !wm->wm[level].enable; --level)
- { }
-
- /* Highest common enabled wm level for all planes */
- max_level = min(level, max_level);
- }
-
- /* No enabled planes? */
- if (max_level == INT_MAX)
- return true;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- /*
- * All enabled planes must have enabled a common wm level that
- * can tolerate memory latencies higher than sagv_block_time_us
- */
- if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
- return false;
- }
-
- return true;
-}
-
-static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum plane_id plane_id;
-
- if (!crtc_state->hw.active)
- return true;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (wm->wm[0].enable && !wm->sagv.wm0.enable)
- return false;
- }
-
- return true;
-}
-
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(dev_priv) >= 12)
- return tgl_crtc_can_enable_sagv(crtc_state);
- else
- return skl_crtc_can_enable_sagv(crtc_state);
-}
-
-bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
- const struct intel_bw_state *bw_state)
-{
- if (DISPLAY_VER(dev_priv) < 11 &&
- bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
- return false;
-
- return bw_state->pipe_sagv_reject == 0;
-}
-
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
- intel_can_enable_sagv(dev_priv, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case)
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
- DISPLAY_VER(dev_priv) >= 12 &&
- intel_can_enable_sagv(dev_priv, new_bw_state);
- }
-
- return 0;
-}
-
-static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
- u16 start, u16 end)
-{
- entry->start = start;
- entry->end = end;
-
- return end;
-}
-
-static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
-{
- return INTEL_INFO(dev_priv)->display.dbuf.size /
- hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
-}
-
-static void
-skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
- struct skl_ddb_entry *ddb)
-{
- int slice_size = intel_dbuf_slice_size(dev_priv);
-
- if (!slice_mask) {
- ddb->start = 0;
- ddb->end = 0;
- return;
- }
-
- ddb->start = (ffs(slice_mask) - 1) * slice_size;
- ddb->end = fls(slice_mask) * slice_size;
-
- WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
-}
-
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
-{
- struct skl_ddb_entry ddb;
-
- if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
- slice_mask = BIT(DBUF_S1);
- else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
- slice_mask = BIT(DBUF_S3);
-
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
-
- return ddb.start;
-}
-
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry *entry)
-{
- int slice_size = intel_dbuf_slice_size(dev_priv);
- enum dbuf_slice start_slice, end_slice;
- u8 slice_mask = 0;
-
- if (!skl_ddb_entry_size(entry))
- return 0;
-
- start_slice = entry->start / slice_size;
- end_slice = (entry->end - 1) / slice_size;
-
- /*
- * Per plane DDB entry can in a really worst case be on multiple slices
- * but single entry is anyway contigious.
- */
- while (start_slice <= end_slice) {
- slice_mask |= BIT(start_slice);
- start_slice++;
- }
-
- return slice_mask;
-}
-
-static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
-{
- const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int hdisplay, vdisplay;
-
- if (!crtc_state->hw.active)
- return 0;
-
- /*
- * Watermark/ddb requirement highly depends upon width of the
- * framebuffer, So instead of allocating DDB equally among pipes
- * distribute DDB based on resolution/width of the display.
- */
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
-
- return hdisplay;
-}
-
-static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
- enum pipe for_pipe,
- unsigned int *weight_start,
- unsigned int *weight_end,
- unsigned int *weight_total)
-{
- struct drm_i915_private *dev_priv =
- to_i915(dbuf_state->base.state->base.dev);
- enum pipe pipe;
-
- *weight_start = 0;
- *weight_end = 0;
- *weight_total = 0;
-
- for_each_pipe(dev_priv, pipe) {
- int weight = dbuf_state->weight[pipe];
-
- /*
- * Do not account pipes using other slice sets
- * luckily as of current BSpec slice sets do not partially
- * intersect(pipes share either same one slice or same slice set
- * i.e no partial intersection), so it is enough to check for
- * equality for now.
- */
- if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
- continue;
-
- *weight_total += weight;
- if (pipe < for_pipe) {
- *weight_start += weight;
- *weight_end += weight;
- } else if (pipe == for_pipe) {
- *weight_end += weight;
- }
- }
-}
-
-static int
-skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- unsigned int weight_total, weight_start, weight_end;
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- struct intel_crtc_state *crtc_state;
- struct skl_ddb_entry ddb_slices;
- enum pipe pipe = crtc->pipe;
- unsigned int mbus_offset = 0;
- u32 ddb_range_size;
- u32 dbuf_slice_mask;
- u32 start, end;
- int ret;
-
- if (new_dbuf_state->weight[pipe] == 0) {
- skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
- goto out;
- }
-
- dbuf_slice_mask = new_dbuf_state->slices[pipe];
-
- skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
- ddb_range_size = skl_ddb_entry_size(&ddb_slices);
-
- intel_crtc_dbuf_weights(new_dbuf_state, pipe,
- &weight_start, &weight_end, &weight_total);
-
- start = ddb_range_size * weight_start / weight_total;
- end = ddb_range_size * weight_end / weight_total;
-
- skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
- ddb_slices.start - mbus_offset + start,
- ddb_slices.start - mbus_offset + end);
-
-out:
- if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
- skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
- &new_dbuf_state->ddb[pipe]))
- return 0;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- /*
- * Used for checking overlaps, so we need absolute
- * offsets instead of MBUS relative offsets.
- */
- crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
- crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
- crtc->base.base.id, crtc->base.name,
- old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
- old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
- new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
- old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
-
- return 0;
-}
-
-static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
- int width, const struct drm_format_info *format,
- u64 modifier, unsigned int rotation,
- u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane);
-
-static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- int level,
- unsigned int latency,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */);
-
-static unsigned int
-skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
- int num_active)
-{
- struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct skl_wm_level wm = {};
- int ret, min_ddb_alloc = 0;
- struct skl_wm_params wp;
-
- ret = skl_compute_wm_params(crtc_state, 256,
- drm_format_info(DRM_FORMAT_ARGB8888),
- DRM_FORMAT_MOD_LINEAR,
- DRM_MODE_ROTATE_0,
- crtc_state->pixel_rate, &wp, 0);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = dev_priv->wm.skl_latency[level];
-
- skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
- if (wm.min_ddb_alloc == U16_MAX)
- break;
-
- min_ddb_alloc = wm.min_ddb_alloc;
- }
-
- return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
-}
-
-static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
-{
- skl_ddb_entry_init(entry,
- REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
- REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
- if (entry->end)
- entry->end++;
-}
-
-static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
- const enum pipe pipe,
- const enum plane_id plane_id,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
-{
- u32 val;
-
- /* Cursor doesn't support NV12/planar, so no extra calculation needed */
- if (plane_id == PLANE_CURSOR) {
- val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(ddb, val);
- return;
- }
-
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb, val);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- return;
-
- val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb_y, val);
-}
-
-static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_display_power_domain power_domain;
- enum pipe pipe = crtc->pipe;
- intel_wakeref_t wakeref;
- enum plane_id plane_id;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id,
- &ddb[plane_id],
- &ddb_y[plane_id]);
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-}
-
-struct dbuf_slice_conf_entry {
- u8 active_pipes;
- u8 dbuf_mask[I915_MAX_PIPES];
- bool join_mbus;
-};
-
-/*
- * Table taken from Bspec 12716
- * Pipes do have some preferred DBuf slice affinity,
- * plus there are some hardcoded requirements on how
- * those should be distributed for multipipe scenarios.
- * For more DBuf slices algorithm can get even more messy
- * and less readable, so decided to use a table almost
- * as is from BSpec itself - that way it is at least easier
- * to compare, change and check.
- */
-static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
-/* Autogenerated with igt/tools/intel_dbuf_map tool: */
-{
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {}
-};
-
-/*
- * Table taken from Bspec 49255
- * Pipes do have some preferred DBuf slice affinity,
- * plus there are some hardcoded requirements on how
- * those should be distributed for multipipe scenarios.
- * For more DBuf slices algorithm can get even more messy
- * and less readable, so decided to use a table almost
- * as is from BSpec itself - that way it is at least easier
- * to compare, change and check.
- */
-static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
-/* Autogenerated with igt/tools/intel_dbuf_map tool: */
-{
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {}
-};
-
-static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {}
-};
-
-static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
- /*
- * Keep the join_mbus cases first so check_mbus_joined()
- * will prefer them over the !join_mbus cases.
- */
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = true,
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = true,
- },
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- .join_mbus = false,
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = false,
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {}
-
-};
-
-static bool check_mbus_joined(u8 active_pipes,
- const struct dbuf_slice_conf_entry *dbuf_slices)
-{
- int i;
-
- for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
- return dbuf_slices[i].join_mbus;
- }
- return false;
-}
-
-static bool adlp_check_mbus_joined(u8 active_pipes)
-{
- return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
-}
-
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
- const struct dbuf_slice_conf_entry *dbuf_slices)
-{
- int i;
-
- for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes &&
- dbuf_slices[i].join_mbus == join_mbus)
- return dbuf_slices[i].dbuf_mask[pipe];
- }
- return 0;
-}
-
-/*
- * This function finds an entry with same enabled pipe configuration and
- * returns correspondent DBuf slice mask as stated in BSpec for particular
- * platform.
- */
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- /*
- * FIXME: For ICL this is still a bit unclear as prev BSpec revision
- * required calculating "pipe ratio" in order to determine
- * if one or two slices can be used for single pipe configurations
- * as additional constraint to the existing table.
- * However based on recent info, it should be not "pipe ratio"
- * but rather ratio between pixel_rate and cdclk with additional
- * constants, so for now we are using only table until this is
- * clarified. Also this is the reason why crtc_state param is
- * still here - we will need it once those additional constraints
- * pop up.
- */
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- icl_allowed_dbufs);
-}
-
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- tgl_allowed_dbufs);
-}
-
-static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- adlp_allowed_dbufs);
-}
-
-static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- dg2_allowed_dbufs);
-}
-
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- /*
- * For anything else just return one slice yet.
- * Should be extended for other platforms.
- */
- return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
-}
-
-static bool
-use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
-static u64
-skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum plane_id plane_id;
- u64 data_rate = 0;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->rel_data_rate[plane_id];
-
- if (DISPLAY_VER(i915) < 11)
- data_rate += crtc_state->rel_data_rate_y[plane_id];
- }
-
- return data_rate;
-}
-
-static const struct skl_wm_level *
-skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- int level)
-{
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- if (level == 0 && pipe_wm->use_sagv_wm)
- return &wm->sagv.wm0;
-
- return &wm->wm[level];
-}
-
-static const struct skl_wm_level *
-skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id)
-{
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- if (pipe_wm->use_sagv_wm)
- return &wm->sagv.trans_wm;
-
- return &wm->trans_wm;
-}
-
-/*
- * We only disable the watermarks for each plane if
- * they exceed the ddb allocation of said plane. This
- * is done so that we don't end up touching cursor
- * watermarks needlessly when some other plane reduces
- * our max possible watermark level.
- *
- * Bspec has this to say about the PLANE_WM enable bit:
- * "All the watermarks at this level for all enabled
- * planes must be enabled before the level will be used."
- * So this is actually safe to do.
- */
-static void
-skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
-{
- if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
- memset(wm, 0, sizeof(*wm));
-}
-
-static void
-skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
- const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
-{
- if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
- uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- memset(wm, 0, sizeof(*wm));
- memset(uv_wm, 0, sizeof(*uv_wm));
- }
-}
-
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
-{
- /*
- * Wa_1408961008:icl, ehl
- * Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
- */
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
-}
-
-struct skl_plane_ddb_iter {
- u64 data_rate;
- u16 start, size;
-};
-
-static void
-skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
- struct skl_ddb_entry *ddb,
- const struct skl_wm_level *wm,
- u64 data_rate)
-{
- u16 size, extra = 0;
-
- if (data_rate) {
- extra = min_t(u16, iter->size,
- DIV64_U64_ROUND_UP(iter->size * data_rate,
- iter->data_rate));
- iter->size -= extra;
- iter->data_rate -= data_rate;
- }
-
- /*
- * Keep ddb entry of all disabled planes explicitly zeroed
- * to avoid skl_ddb_add_affected_planes() adding them to
- * the state when other planes change their allocations.
- */
- size = wm->min_ddb_alloc + extra;
- if (size)
- iter->start = skl_ddb_entry_init(ddb, iter->start,
- iter->start + size);
-}
-
-static int
-skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
- int num_active = hweight8(dbuf_state->active_pipes);
- struct skl_plane_ddb_iter iter;
- enum plane_id plane_id;
- u16 cursor_size;
- u32 blocks;
- int level;
-
- /* Clear the partitioning for disabled planes. */
- memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
- memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
-
- if (!crtc_state->hw.active)
- return 0;
-
- iter.start = alloc->start;
- iter.size = skl_ddb_entry_size(alloc);
- if (iter.size == 0)
- return 0;
-
- /* Allocate fixed number of blocks for cursor. */
- cursor_size = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= cursor_size;
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - cursor_size, alloc->end);
-
- iter.data_rate = skl_total_relative_data_rate(crtc_state);
-
- /*
- * Find the highest watermark level for which we can satisfy the block
- * requirement of active planes.
- */
- for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
- blocks = 0;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (plane_id == PLANE_CURSOR) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
-
- if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&dev_priv->drm,
- wm->wm[level].min_ddb_alloc != U16_MAX);
- blocks = U32_MAX;
- break;
- }
- continue;
- }
-
- blocks += wm->wm[level].min_ddb_alloc;
- blocks += wm->uv_wm[level].min_ddb_alloc;
- }
-
- if (blocks <= iter.size) {
- iter.size -= blocks;
- break;
- }
- }
-
- if (level < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
- blocks, iter.size);
- return -EINVAL;
- }
-
- /* avoid the WARN later when we don't allocate any extra DDB */
- if (iter.data_rate == 0)
- iter.size = 0;
-
- /*
- * Grant each plane the blocks it requires at the highest achievable
- * watermark level, plus an extra share of the leftover blocks
- * proportional to its relative data rate.
- */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
- crtc_state->rel_data_rate_y[plane_id]);
- skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
- crtc_state->rel_data_rate[plane_id]);
- } else {
- skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
- crtc_state->rel_data_rate[plane_id]);
- }
- }
- drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
-
- /*
- * When we calculated watermark values we didn't know how high
- * of a level we'd actually be able to hit, so we just marked
- * all levels as "enabled." Go back now and disable the ones
- * that aren't actually possible.
- */
- for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id))
- skl_check_nv12_wm_level(&wm->wm[level],
- &wm->uv_wm[level],
- ddb_y, ddb);
- else
- skl_check_wm_level(&wm->wm[level], ddb);
-
- if (icl_need_wm1_wa(dev_priv, plane_id) &&
- level == 1 && wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
- }
- }
- }
-
- /*
- * Go back and disable the transition and SAGV watermarks
- * if it turns out we don't have enough DDB blocks for them.
- */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- skl_check_wm_level(&wm->trans_wm, ddb_y);
- } else {
- WARN_ON(skl_ddb_entry_size(ddb_y));
-
- skl_check_wm_level(&wm->trans_wm, ddb);
- }
-
- skl_check_wm_level(&wm->sagv.wm0, ddb);
- skl_check_wm_level(&wm->sagv.trans_wm, ddb);
- }
-
- return 0;
-}
-
-/*
- * The max latency should be 257 (max the punit can code is 255 and we add 2us
- * for the read latency) and cpp should always be <= 8, so that
- * should allow pixel_rate up to ~2 GHz which seems sufficient since max
- * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
-*/
-static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
- u8 cpp, u32 latency, u32 dbuf_block_size)
-{
- u32 wm_intermediate_val;
- uint_fixed_16_16_t ret;
-
- if (latency == 0)
- return FP_16_16_MAX;
-
- wm_intermediate_val = latency * pixel_rate * cpp;
- ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- ret = add_fixed16_u32(ret, 1);
-
- return ret;
-}
-
-static uint_fixed_16_16_t
-skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
- uint_fixed_16_16_t plane_blocks_per_line)
-{
- u32 wm_intermediate_val;
- uint_fixed_16_16_t ret;
-
- if (latency == 0)
- return FP_16_16_MAX;
-
- wm_intermediate_val = latency * pixel_rate;
- wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
- pipe_htotal * 1000);
- ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
- return ret;
-}
-
-static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 pixel_rate;
- u32 crtc_htotal;
- uint_fixed_16_16_t linetime_us;
-
- if (!crtc_state->hw.active)
- return u32_to_fixed16(0);
-
- pixel_rate = crtc_state->pixel_rate;
-
- if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
- return u32_to_fixed16(0);
-
- crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
- linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
-
- return linetime_us;
-}
-
-static int
-skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
- int width, const struct drm_format_info *format,
- u64 modifier, unsigned int rotation,
- u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 interm_pbpl;
-
- /* only planar format has two planes */
- if (color_plane == 1 &&
- !intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&dev_priv->drm,
- "Non planar format have single plane\n");
- return -EINVAL;
- }
-
- wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
- modifier == I915_FORMAT_MOD_4_TILED ||
- modifier == I915_FORMAT_MOD_Yf_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
-
- wp->width = width;
- if (color_plane == 1 && wp->is_planar)
- wp->width /= 2;
-
- wp->cpp = format->cpp[color_plane];
- wp->plane_pixel_rate = plane_pixel_rate;
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
- wp->dbuf_block_size = 256;
- else
- wp->dbuf_block_size = 512;
-
- if (drm_rotation_90_or_270(rotation)) {
- switch (wp->cpp) {
- case 1:
- wp->y_min_scanlines = 16;
- break;
- case 2:
- wp->y_min_scanlines = 8;
- break;
- case 4:
- wp->y_min_scanlines = 4;
- break;
- default:
- MISSING_CASE(wp->cpp);
- return -EINVAL;
- }
- } else {
- wp->y_min_scanlines = 4;
- }
-
- if (skl_needs_memory_bw_wa(dev_priv))
- wp->y_min_scanlines *= 2;
-
- wp->plane_bytes_per_line = wp->width * wp->cpp;
- if (wp->y_tiled) {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
- wp->y_min_scanlines,
- wp->dbuf_block_size);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- interm_pbpl++;
-
- wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
- wp->y_min_scanlines);
- } else {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size);
-
- if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
- interm_pbpl++;
-
- wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
- }
-
- wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
- wp->plane_blocks_per_line);
-
- wp->linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(crtc_state));
-
- return 0;
-}
-
-static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct skl_wm_params *wp, int color_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- return skl_compute_wm_params(crtc_state, width,
- fb->format, fb->modifier,
- plane_state->hw.rotation,
- intel_plane_pixel_rate(crtc_state, plane_state),
- wp, color_plane);
-}
-
-static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
-{
- if (DISPLAY_VER(dev_priv) >= 10)
- return true;
-
- /* The number of lines are ignored for the level 0 watermark. */
- return level > 0;
-}
-
-static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 13)
- return 255;
- else
- return 31;
-}
-
-static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- int level,
- unsigned int latency,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- uint_fixed_16_16_t method1, method2;
- uint_fixed_16_16_t selected_result;
- u32 blocks, lines, min_ddb_alloc = 0;
-
- if (latency == 0 ||
- (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
- /* reject it */
- result->min_ddb_alloc = U16_MAX;
- return;
- }
-
- /*
- * WaIncreaseLatencyIPCEnabled: kbl,cfl
- * Display WA #1141: kbl,cfl
- */
- if ((IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
- latency += 4;
-
- if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
- latency += 15;
-
- method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
- wp->cpp, latency, wp->dbuf_block_size);
- method2 = skl_wm_method2(wp->plane_pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- latency,
- wp->plane_blocks_per_line);
-
- if (wp->y_tiled) {
- selected_result = max_fixed16(method2, wp->y_tile_minimum);
- } else {
- if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
- wp->dbuf_block_size < 1) &&
- (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
- selected_result = method2;
- } else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(dev_priv) == 9)
- selected_result = min_fixed16(method1, method2);
- else
- selected_result = method2;
- } else {
- selected_result = method1;
- }
- }
-
- blocks = fixed16_to_u32_round_up(selected_result) + 1;
- /*
- * Lets have blocks at minimum equivalent to plane_blocks_per_line
- * as there will be at minimum one line for lines configuration. This
- * is a work around for FIFO underruns observed with resolutions like
- * 4k 60 Hz in single channel DRAM configurations.
- *
- * As per the Bspec 49325, if the ddb allocation can hold at least
- * one plane_blocks_per_line, we should have selected method2 in
- * the above logic. Assuming that modern versions have enough dbuf
- * and method2 guarantees blocks equivalent to at least 1 line,
- * select the blocks as plane_blocks_per_line.
- *
- * TODO: Revisit the logic when we have better understanding on DRAM
- * channels' impact on the level 0 memory latency and the relevant
- * wm calculations.
- */
- if (skl_wm_has_lines(dev_priv, level))
- blocks = max(blocks,
- fixed16_to_u32_round_up(wp->plane_blocks_per_line));
- lines = div_round_up_fixed16(selected_result,
- wp->plane_blocks_per_line);
-
- if (DISPLAY_VER(dev_priv) == 9) {
- /* Display WA #1125: skl,bxt,kbl */
- if (level == 0 && wp->rc_surface)
- blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
-
- /* Display WA #1126: skl,bxt,kbl */
- if (level >= 1 && level <= 7) {
- if (wp->y_tiled) {
- blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
- lines += wp->y_min_scanlines;
- } else {
- blocks++;
- }
-
- /*
- * Make sure result blocks for higher latency levels are
- * atleast as high as level below the current level.
- * Assumption in DDB algorithm optimization for special
- * cases. Also covers Display WA #1125 for RC.
- */
- if (result_prev->blocks > blocks)
- blocks = result_prev->blocks;
- }
- }
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- if (wp->y_tiled) {
- int extra_lines;
-
- if (lines % wp->y_min_scanlines == 0)
- extra_lines = wp->y_min_scanlines;
- else
- extra_lines = wp->y_min_scanlines * 2 -
- lines % wp->y_min_scanlines;
-
- min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
- wp->plane_blocks_per_line);
- } else {
- min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
- }
- }
-
- if (!skl_wm_has_lines(dev_priv, level))
- lines = 0;
-
- if (lines > skl_wm_max_lines(dev_priv)) {
- /* reject it */
- result->min_ddb_alloc = U16_MAX;
- return;
- }
-
- /*
- * If lines is valid, assume we can use this watermark level
- * for now. We'll come back and disable it after we calculate the
- * DDB allocation if it turns out we don't actually have enough
- * blocks to satisfy it.
- */
- result->blocks = blocks;
- result->lines = lines;
- /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
- result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
- result->enable = true;
-
- if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us)
- result->can_sagv = latency >= dev_priv->sagv_block_time_us;
-}
-
-static void
-skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- const struct skl_wm_params *wm_params,
- struct skl_wm_level *levels)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct skl_wm_level *result_prev = &levels[0];
-
- for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = &levels[level];
- unsigned int latency = dev_priv->wm.skl_latency[level];
-
- skl_compute_plane_wm(crtc_state, plane, level, latency,
- wm_params, result_prev, result);
-
- result_prev = result;
- }
-}
-
-static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- const struct skl_wm_params *wm_params,
- struct skl_plane_wm *plane_wm)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
- struct skl_wm_level *levels = plane_wm->wm;
- unsigned int latency = 0;
-
- if (dev_priv->sagv_block_time_us)
- latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0];
-
- skl_compute_plane_wm(crtc_state, plane, 0, latency,
- wm_params, &levels[0],
- sagv_wm);
-}
-
-static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
- struct skl_wm_level *trans_wm,
- const struct skl_wm_level *wm0,
- const struct skl_wm_params *wp)
-{
- u16 trans_min, trans_amount, trans_y_tile_min;
- u16 wm0_blocks, trans_offset, blocks;
-
- /* Transition WM don't make any sense if ipc is disabled */
- if (!dev_priv->ipc_enabled)
- return;
-
- /*
- * WaDisableTWM:skl,kbl,cfl,bxt
- * Transition WM are not recommended by HW team for GEN9
- */
- if (DISPLAY_VER(dev_priv) == 9)
- return;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- trans_min = 4;
- else
- trans_min = 14;
-
- /* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(dev_priv) == 10)
- trans_amount = 0;
- else
- trans_amount = 10; /* This is configurable amount */
-
- trans_offset = trans_min + trans_amount;
-
- /*
- * The spec asks for Selected Result Blocks for wm0 (the real value),
- * not Result Blocks (the integer value). Pay attention to the capital
- * letters. The value wm_l0->blocks is actually Result Blocks, but
- * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
- * and since we later will have to get the ceiling of the sum in the
- * transition watermarks calculation, we can just pretend Selected
- * Result Blocks is Result Blocks minus 1 and it should work for the
- * current platforms.
- */
- wm0_blocks = wm0->blocks - 1;
-
- if (wp->y_tiled) {
- trans_y_tile_min =
- (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
- blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
- } else {
- blocks = wm0_blocks + trans_offset;
- }
- blocks++;
-
- /*
- * Just assume we can enable the transition watermark. After
- * computing the DDB we'll come back and disable it if that
- * assumption turns out to be false.
- */
- trans_wm->blocks = blocks;
- trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
- trans_wm->enable = true;
-}
-
-static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct intel_plane *plane, int color_plane)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
- struct skl_wm_params wm_params;
- int ret;
-
- ret = skl_compute_plane_wm_params(crtc_state, plane_state,
- &wm_params, color_plane);
- if (ret)
- return ret;
-
- skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
-
- skl_compute_transition_wm(dev_priv, &wm->trans_wm,
- &wm->wm[0], &wm_params);
-
- if (DISPLAY_VER(dev_priv) >= 12) {
- tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
-
- skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
- &wm->sagv.wm0, &wm_params);
- }
-
- return 0;
-}
-
-static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct intel_plane *plane)
-{
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
- struct skl_wm_params wm_params;
- int ret;
-
- wm->is_planar = true;
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- ret = skl_compute_plane_wm_params(crtc_state, plane_state,
- &wm_params, 1);
- if (ret)
- return ret;
-
- skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
-
- return 0;
-}
-
-static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
-
- memset(wm, 0, sizeof(*wm));
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 0);
- if (ret)
- return ret;
-
- if (fb->format->is_yuv && fb->format->num_planes > 1) {
- ret = skl_build_plane_wm_uv(crtc_state, plane_state,
- plane);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
- int ret;
-
- /* Watermarks calculated in master */
- if (plane_state->planar_slave)
- return 0;
-
- memset(wm, 0, sizeof(*wm));
-
- if (plane_state->planar_linked_plane) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- drm_WARN_ON(&dev_priv->drm,
- !intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
- fb->format->num_planes == 1);
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane_state->planar_linked_plane, 0);
- if (ret)
- return ret;
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 1);
- if (ret)
- return ret;
- } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int skl_build_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- int ret, i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- /*
- * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
- * instead but we don't populate that correctly for NV12 Y
- * planes so for now hack this.
- */
- if (plane->pipe != crtc->pipe)
- continue;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- ret = icl_build_plane_wm(crtc_state, plane_state);
- else
- ret = skl_build_plane_wm(crtc_state, plane_state);
- if (ret)
- return ret;
- }
-
- crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
-
- return 0;
-}
-
-static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const struct skl_ddb_entry *entry)
-{
- if (entry->end)
- intel_de_write_fw(dev_priv, reg,
- PLANE_BUF_END(entry->end - 1) |
- PLANE_BUF_START(entry->start));
- else
- intel_de_write_fw(dev_priv, reg, 0);
-}
-
-static void skl_write_wm_level(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const struct skl_wm_level *level)
-{
- u32 val = 0;
-
- if (level->enable)
- val |= PLANE_WM_EN;
- if (level->ignore_lines)
- val |= PLANE_WM_IGNORE_LINES;
- val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
- val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
-
- intel_de_write_fw(dev_priv, reg, val);
-}
-
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- for (level = 0; level <= max_level; level++)
- skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
- &wm->sagv.wm0);
- skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb);
-
- if (DISPLAY_VER(dev_priv) < 11)
- skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
-}
-
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
-
- for (level = 0; level <= max_level; level++)
- skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
- &wm->sagv.wm0);
- skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
-}
-
-static bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2)
-{
- return l1->enable == l2->enable &&
- l1->ignore_lines == l2->ignore_lines &&
- l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
-}
-
-static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
- const struct skl_plane_wm *wm1,
- const struct skl_plane_wm *wm2)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- /*
- * We don't check uv_wm as the hardware doesn't actually
- * use it. It only gets used for calculating the required
- * ddb allocation.
- */
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
- return false;
- }
-
- return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
- skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
- skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
-}
-
-static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
-{
- return a->start < b->end && b->start < a->end;
-}
-
-static void skl_ddb_entry_union(struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
-{
- if (a->end && b->end) {
- a->start = min(a->start, b->start);
- a->end = max(a->end, b->end);
- } else if (b->end) {
- a->start = b->start;
- a->end = b->end;
- }
-}
-
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry *entries,
- int num_entries, int ignore_idx)
-{
- int i;
-
- for (i = 0; i < num_entries; i++) {
- if (i != ignore_idx &&
- skl_ddb_entries_overlap(ddb, &entries[i]))
- return true;
- }
-
- return false;
-}
-
-static int
-skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
- enum plane_id plane_id = plane->id;
-
- if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
- &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
- skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- new_crtc_state->update_planes |= BIT(plane_id);
- }
-
- return 0;
-}
-
-static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
-{
- struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
- u8 enabled_slices;
- enum pipe pipe;
-
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- enabled_slices = BIT(DBUF_S1);
-
- for_each_pipe(dev_priv, pipe)
- enabled_slices |= dbuf_state->slices[pipe];
-
- return enabled_slices;
-}
-
-static int
-skl_compute_ddb(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *old_dbuf_state;
- struct intel_dbuf_state *new_dbuf_state = NULL;
- const struct intel_crtc_state *old_crtc_state;
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int ret, i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- break;
- }
-
- if (!new_dbuf_state)
- return 0;
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- if (HAS_MBUS_JOINING(dev_priv))
- new_dbuf_state->joined_mbus =
- adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum pipe pipe = crtc->pipe;
-
- new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
- new_dbuf_state->joined_mbus);
-
- if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
- continue;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
- old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes(state);
- if (ret)
- return ret;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
- old_dbuf_state->enabled_slices,
- new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
- str_yes_no(old_dbuf_state->joined_mbus),
- str_yes_no(new_dbuf_state->joined_mbus));
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- enum pipe pipe = crtc->pipe;
-
- new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
-
- if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
- continue;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- ret = skl_crtc_allocate_ddb(state, crtc);
- if (ret)
- return ret;
- }
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_crtc_allocate_plane_ddb(state, crtc);
- if (ret)
- return ret;
-
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static char enast(bool enable)
-{
- return enable ? '*' : ' ';
-}
-
-static void
-skl_print_wm_changes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_crtc_state *old_crtc_state;
- const struct intel_crtc_state *new_crtc_state;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- int i;
-
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
-
- old_pipe_wm = &old_crtc_state->wm.skl.optimal;
- new_pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- enum plane_id plane_id = plane->id;
- const struct skl_ddb_entry *old, *new;
-
- old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
- new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
-
- if (skl_ddb_entry_equal(old, new))
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
- plane->base.base.id, plane->base.name,
- old->start, old->end, new->start, new->end,
- skl_ddb_entry_size(old), skl_ddb_entry_size(new));
- }
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- enum plane_id plane_id = plane->id;
- const struct skl_plane_wm *old_wm, *new_wm;
-
- old_wm = &old_pipe_wm->planes[plane_id];
- new_wm = &new_pipe_wm->planes[plane_id];
-
- if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
- enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
- enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
- enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
- enast(old_wm->trans_wm.enable),
- enast(old_wm->sagv.wm0.enable),
- enast(old_wm->sagv.trans_wm.enable),
- enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
- enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
- enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
- enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
- enast(new_wm->trans_wm.enable),
- enast(new_wm->sagv.wm0.enable),
- enast(new_wm->sagv.trans_wm.enable));
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
- enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
- enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
- enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
- enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].blocks, old_wm->wm[1].blocks,
- old_wm->wm[2].blocks, old_wm->wm[3].blocks,
- old_wm->wm[4].blocks, old_wm->wm[5].blocks,
- old_wm->wm[6].blocks, old_wm->wm[7].blocks,
- old_wm->trans_wm.blocks,
- old_wm->sagv.wm0.blocks,
- old_wm->sagv.trans_wm.blocks,
- new_wm->wm[0].blocks, new_wm->wm[1].blocks,
- new_wm->wm[2].blocks, new_wm->wm[3].blocks,
- new_wm->wm[4].blocks, new_wm->wm[5].blocks,
- new_wm->wm[6].blocks, new_wm->wm[7].blocks,
- new_wm->trans_wm.blocks,
- new_wm->sagv.wm0.blocks,
- new_wm->sagv.trans_wm.blocks);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv.wm0.min_ddb_alloc,
- old_wm->sagv.trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv.wm0.min_ddb_alloc,
- new_wm->sagv.trans_wm.min_ddb_alloc);
- }
- }
-}
-
-static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
- const struct skl_pipe_wm *old_pipe_wm,
- const struct skl_pipe_wm *new_pipe_wm)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
-
- for (level = 0; level <= max_level; level++) {
- /*
- * We don't check uv_wm as the hardware doesn't actually
- * use it. It only gets used for calculating the required
- * ddb allocation.
- */
- if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
- skl_plane_wm_level(new_pipe_wm, plane->id, level)))
- return false;
- }
-
- if (HAS_HW_SAGV_WM(i915)) {
- const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
- const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
-
- if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
- !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
- return false;
- }
-
- return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
- skl_plane_trans_wm(new_pipe_wm, plane->id));
-}
-
-/*
- * To make sure the cursor watermark registers are always consistent
- * with our computed state the following scenario needs special
- * treatment:
- *
- * 1. enable cursor
- * 2. move cursor entirely offscreen
- * 3. disable cursor
- *
- * Step 2. does call .disable_plane() but does not zero the watermarks
- * (since we consider an offscreen cursor still active for the purposes
- * of watermarks). Step 3. would not normally call .disable_plane()
- * because the actual plane visibility isn't changing, and we don't
- * deallocate the cursor ddb until the pipe gets disabled. So we must
- * force step 3. to call .disable_plane() to update the watermark
- * registers properly.
- *
- * Other planes do not suffer from this issues as their watermarks are
- * calculated based on the actual plane visibility. The only time this
- * can trigger for the other planes is during the initial readout as the
- * default value of the watermarks registers is not zero.
- */
-static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
- enum plane_id plane_id = plane->id;
-
- /*
- * Force a full wm update for every plane on modeset.
- * Required because the reset value of the wm registers
- * is non-zero, whereas we want all disabled planes to
- * have zero watermarks. So if we turn off the relevant
- * power well the hardware state will go out of sync
- * with the software state.
- */
- if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
- skl_plane_selected_wm_equals(plane,
- &old_crtc_state->wm.skl.optimal,
- &new_crtc_state->wm.skl.optimal))
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- new_crtc_state->update_planes |= BIT(plane_id);
- }
-
- return 0;
-}
-
-static int
-skl_compute_wm(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- int ret, i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = skl_build_pipe_wm(state, crtc);
- if (ret)
- return ret;
- }
-
- ret = skl_compute_ddb(state);
- if (ret)
- return ret;
-
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
- /*
- * skl_compute_ddb() will have adjusted the final watermarks
- * based on how much ddb is available. Now we can actually
- * check if the final watermarks changed.
- */
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
-
- skl_print_wm_changes(state);
-
- return 0;
-}
-
static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
@@ -6459,10 +3583,10 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
@@ -6475,210 +3599,17 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
-{
- level->enable = val & PLANE_WM_EN;
- level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
- level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
- level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
-}
-
-static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int level, max_level;
- enum plane_id plane_id;
- u32 val;
-
- max_level = ilk_wm_max_level(dev_priv);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_plane_wm *wm = &out->planes[plane_id];
-
- for (level = 0; level <= max_level; level++) {
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
- else
- val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
-
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
- }
-
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore,
- PLANE_WM_SAGV(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore,
- CUR_WM_SAGV(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
-
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore,
- PLANE_WM_SAGV_TRANS(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore,
- CUR_WM_SAGV_TRANS(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- wm->sagv.wm0 = wm->wm[0];
- wm->sagv.trans_wm = wm->trans_wm;
- }
- }
-}
-
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- struct intel_crtc *crtc;
-
- if (HAS_MBUS_JOINING(dev_priv))
- dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- enum pipe pipe = crtc->pipe;
- unsigned int mbus_offset;
- enum plane_id plane_id;
- u8 slices;
-
- skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
- crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
-
- memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
- plane_id, ddb, ddb_y);
-
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
- }
-
- dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
-
- /*
- * Used for checking overlaps, so we need absolute
- * offsets instead of MBUS relative offsets.
- */
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(dev_priv, slices);
- crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
- crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
-
- /* The slices actually used by the planes on the pipe */
- dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
- crtc->base.base.id, crtc->base.name,
- dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
- dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
- str_yes_no(dbuf_state->joined_mbus));
- }
-
- dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
@@ -6826,7 +3757,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
struct intel_crtc *crtc;
g4x_read_wm_values(dev_priv, wm);
@@ -6920,7 +3851,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
@@ -6968,12 +3899,12 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -7007,7 +3938,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -7076,7 +4007,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
@@ -7117,7 +4048,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
/*
@@ -7138,7 +4069,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc *crtc;
ilk_init_lp_watermarks(dev_priv);
@@ -7167,168 +4098,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
-void intel_wm_state_verify(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct intel_plane *plane;
- u8 hw_enabled_slices;
-
- if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->dbuf.enabled_slices)
- drm_err(&dev_priv->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
-}
-
-void intel_enable_ipc(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!HAS_IPC(dev_priv))
- return;
-
- val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
-
- if (dev_priv->ipc_enabled)
- val |= DISP_IPC_ENABLE;
- else
- val &= ~DISP_IPC_ENABLE;
-
- intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
-}
-
-static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
-{
- /* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv))
- return false;
-
- /* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- return dev_priv->dram_info.symmetric_memory;
-
- return true;
-}
-
-void intel_init_ipc(struct drm_i915_private *dev_priv)
-{
- if (!HAS_IPC(dev_priv))
- return;
-
- dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
-
- intel_enable_ipc(dev_priv);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -7436,7 +4205,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (dev_priv->vbt.fdi_rx_polarity_inverted)
+ if (dev_priv->display.vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
@@ -7587,9 +4356,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */
- if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
+ /* Wa_1409120013 */
+ if (DISPLAY_VER(dev_priv) == 12)
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
@@ -7966,7 +4734,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7977,7 +4745,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0);
intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
@@ -8169,18 +4937,14 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
}
-static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
-};
-
-static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
+static const struct intel_wm_funcs ilk_wm_funcs = {
.compute_pipe_wm = ilk_compute_pipe_wm,
.compute_intermediate_wm = ilk_compute_intermediate_wm,
.initial_watermarks = ilk_initial_watermarks,
.optimize_watermarks = ilk_optimize_watermarks,
};
-static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
+static const struct intel_wm_funcs vlv_wm_funcs = {
.compute_pipe_wm = vlv_compute_pipe_wm,
.compute_intermediate_wm = vlv_compute_intermediate_wm,
.initial_watermarks = vlv_initial_watermarks,
@@ -8188,67 +4952,67 @@ static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
.atomic_update_watermarks = vlv_atomic_update_fifo,
};
-static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
+static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_pipe_wm = g4x_compute_pipe_wm,
.compute_intermediate_wm = g4x_compute_intermediate_wm,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
};
-static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
+static const struct intel_wm_funcs pnv_wm_funcs = {
.update_wm = pnv_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
+static const struct intel_wm_funcs i965_wm_funcs = {
.update_wm = i965_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
+static const struct intel_wm_funcs i9xx_wm_funcs = {
.update_wm = i9xx_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
+static const struct intel_wm_funcs i845_wm_funcs = {
.update_wm = i845_update_wm,
};
-static const struct drm_i915_wm_disp_funcs nop_funcs = {
+static const struct intel_wm_funcs nop_funcs = {
};
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ skl_wm_init(dev_priv);
+ return;
+ }
+
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
pnv_get_mem_freq(dev_priv);
else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
- intel_sagv_init(dev_priv);
-
/* For FIFO watermark updates */
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &skl_wm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
- dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->wm_disp = &ilk_wm_funcs;
+ if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
+ dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
+ (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
+ dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
} else {
drm_dbg_kms(&dev_priv->drm,
"Failed to read display plane latency. "
"Disable CxSR\n");
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &vlv_wm_funcs;
+ dev_priv->display.funcs.wm = &vlv_wm_funcs;
} else if (IS_G4X(dev_priv)) {
g4x_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &g4x_wm_funcs;
+ dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
@@ -8262,22 +5026,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
} else
- dev_priv->wm_disp = &pnv_wm_funcs;
+ dev_priv->display.funcs.wm = &pnv_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->wm_disp = &i965_wm_funcs;
+ dev_priv->display.funcs.wm = &i965_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->wm_disp = &i9xx_wm_funcs;
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 2) {
if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->wm_disp = &i845_wm_funcs;
+ dev_priv->display.funcs.wm = &i845_wm_funcs;
else
- dev_priv->wm_disp = &i9xx_wm_funcs;
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
} else {
drm_err(&dev_priv->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
}
}
@@ -8286,183 +5050,3 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
-{
- struct intel_dbuf_state *dbuf_state;
-
- dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
- if (!dbuf_state)
- return NULL;
-
- return &dbuf_state->base;
-}
-
-static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
- struct intel_global_state *state)
-{
- kfree(state);
-}
-
-static const struct intel_global_state_funcs intel_dbuf_funcs = {
- .atomic_duplicate_state = intel_dbuf_duplicate_state,
- .atomic_destroy_state = intel_dbuf_destroy_state,
-};
-
-struct intel_dbuf_state *
-intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_global_state *dbuf_state;
-
- dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
- if (IS_ERR(dbuf_state))
- return ERR_CAST(dbuf_state);
-
- return to_intel_dbuf_state(dbuf_state);
-}
-
-int intel_dbuf_init(struct drm_i915_private *dev_priv)
-{
- struct intel_dbuf_state *dbuf_state;
-
- dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
- if (!dbuf_state)
- return -ENOMEM;
-
- intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
- &dbuf_state->base, &intel_dbuf_funcs);
-
- return 0;
-}
-
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
- enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- if (!HAS_MBUS_JOINING(dev_priv))
- return;
-
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
- } else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
- }
-
- intel_de_rmw(dev_priv, MBUS_CTL,
- MBUS_HASHING_MODE_MASK | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
-
- for_each_dbuf_slice(dev_priv, slice)
- intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
-}
-
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
- && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(dev_priv,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
-}
-
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
- && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- gen9_dbuf_slices_update(dev_priv,
- new_dbuf_state->enabled_slices);
-}
-
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
- const struct intel_crtc *crtc;
- u32 val = 0;
- int i;
-
- if (DISPLAY_VER(i915) < 11)
- return;
-
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
- return;
-
- if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
- val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
- val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
- }
-
- /* Wa_22010947358:adl-p */
- if (IS_ALDERLAKE_P(i915))
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
- else
- val |= MBUS_DBOX_A_CREDIT(2);
-
- if (IS_ALDERLAKE_P(i915)) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(12);
- } else {
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.active ||
- !intel_crtc_needs_modeset(new_crtc_state))
- continue;
-
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val);
- }
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 945503ae493e..c09b872d65c8 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,22 +8,9 @@
#include <linux/types.h>
-#include "display/intel_display.h"
-#include "display/intel_global_state.h"
-
-#include "i915_drv.h"
-
-struct drm_device;
struct drm_i915_private;
-struct i915_request;
-struct intel_atomic_state;
-struct intel_bw_state;
-struct intel_crtc;
struct intel_crtc_state;
-struct intel_plane;
-struct skl_ddb_entry;
-struct skl_pipe_wm;
-struct skl_wm_level;
+struct intel_plane_state;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
void intel_suspend_hw(struct drm_i915_private *dev_priv);
@@ -34,56 +21,14 @@ void intel_pm_setup(struct drm_i915_private *dev_priv);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void intel_wm_state_verify(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state);
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry *entry);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-void skl_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
- const struct intel_bw_state *bw_state);
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
-void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry *entries,
- int num_entries, int ignore_idx);
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[]);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
-struct intel_dbuf_state {
- struct intel_global_state base;
-
- struct skl_ddb_entry ddb[I915_MAX_PIPES];
- unsigned int weight[I915_MAX_PIPES];
- u8 slices[I915_MAX_PIPES];
- u8 enabled_slices;
- u8 active_pipes;
- bool joined_mbus;
-};
-
-struct intel_dbuf_state *
-intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-
-#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
-#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
-#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
-
-int intel_dbuf_init(struct drm_i915_private *dev_priv);
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_mbus_dbox_update(struct intel_atomic_state *state);
-
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a852c471d1b3..5cd423c7b646 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
#include "gt/intel_engine_regs.h"
@@ -44,29 +45,47 @@ fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
}
void
-intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
{
- spin_lock_init(&mmio_debug->lock);
- mmio_debug->unclaimed_mmio_check = 1;
+ spin_lock_init(&i915->mmio_debug.lock);
+ i915->mmio_debug.unclaimed_mmio_check = 1;
+
+ i915->uncore.debug = &i915->mmio_debug;
}
-static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+static void mmio_debug_suspend(struct intel_uncore *uncore)
{
- lockdep_assert_held(&mmio_debug->lock);
+ if (!uncore->debug)
+ return;
+
+ spin_lock(&uncore->debug->lock);
/* Save and disable mmio debugging for the user bypass */
- if (!mmio_debug->suspend_count++) {
- mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
- mmio_debug->unclaimed_mmio_check = 0;
+ if (!uncore->debug->suspend_count++) {
+ uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
+ uncore->debug->unclaimed_mmio_check = 0;
}
+
+ spin_unlock(&uncore->debug->lock);
}
-static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
+
+static void mmio_debug_resume(struct intel_uncore *uncore)
{
- lockdep_assert_held(&mmio_debug->lock);
+ if (!uncore->debug)
+ return;
+
+ spin_lock(&uncore->debug->lock);
+
+ if (!--uncore->debug->suspend_count)
+ uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
+
+ if (check_for_unclaimed_mmio(uncore))
+ drm_info(&uncore->i915->drm,
+ "Invalid mmio detected during user access\n");
- if (!--mmio_debug->suspend_count)
- mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+ spin_unlock(&uncore->debug->lock);
}
static const char * const forcewake_domain_names[] = {
@@ -112,8 +131,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
* trying to reset here does exist at this point (engines could be fused
* off in ICL+), so no waiting for acks
*/
- /* WaRsClearFWBitsAtReset:bdw,skl */
- fw_clear(d, 0xffff);
+ /* WaRsClearFWBitsAtReset */
+ if (GRAPHICS_VER(d->uncore->i915) >= 12)
+ fw_clear(d, 0xefff);
+ else
+ fw_clear(d, 0xffff);
}
static inline void
@@ -674,9 +696,7 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
spin_lock_irq(&uncore->lock);
if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
- spin_lock(&uncore->debug->lock);
- mmio_debug_suspend(uncore->debug);
- spin_unlock(&uncore->debug->lock);
+ mmio_debug_suspend(uncore);
}
spin_unlock_irq(&uncore->lock);
}
@@ -692,14 +712,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
if (!--uncore->user_forcewake_count) {
- spin_lock(&uncore->debug->lock);
- mmio_debug_resume(uncore->debug);
-
- if (check_for_unclaimed_mmio(uncore))
- drm_info(&uncore->i915->drm,
- "Invalid mmio detected during user access\n");
- spin_unlock(&uncore->debug->lock);
-
+ mmio_debug_resume(uncore);
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
spin_unlock_irq(&uncore->lock);
@@ -915,6 +928,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
{
const struct intel_forcewake_range *entry;
+ if (IS_GSI_REG(offset))
+ offset += uncore->gsi_offset;
+
entry = BSEARCH(offset,
uncore->fw_domains_table,
uncore->fw_domains_table_entries,
@@ -1130,6 +1146,9 @@ static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
return false;
+ if (IS_GSI_REG(offset))
+ offset += uncore->gsi_offset;
+
return BSEARCH(offset,
uncore->shadowed_reg_table,
uncore->shadowed_reg_table_entries,
@@ -1701,7 +1720,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (likely(!uncore->i915->params.mmio_debug))
+ if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
return;
/* interrupts are disabled and re-enabled around uncore->lock usage */
@@ -1982,8 +2001,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->uncore = uncore;
d->wake_count = 0;
- d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
- d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
d->id = domain_id;
@@ -2067,7 +2086,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
if (GRAPHICS_VER(i915) >= 11) {
/* we'll prune the domains of missing engines later */
- intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
+ intel_engine_mask_t emask = RUNTIME_INFO(i915)->platform_engine_mask;
int i;
uncore->fw_get_funcs = &uncore_get_fallback;
@@ -2220,6 +2239,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
+{
+ iounmap(regs);
+}
+
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
{
struct drm_i915_private *i915 = uncore->i915;
@@ -2232,14 +2256,15 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
- * For dgfx chips register range is expanded to 4MB.
+ * For dgfx chips register range is expanded to 4MB, and this larger
+ * range is also used for integrated gpus beginning with Meteor Lake.
*/
- if (GRAPHICS_VER(i915) < 5)
- mmio_size = 512 * 1024;
- else if (IS_DGFX(i915))
+ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
mmio_size = 4 * 1024 * 1024;
- else
+ else if (GRAPHICS_VER(i915) >= 5)
mmio_size = 2 * 1024 * 1024;
+ else
+ mmio_size = 512 * 1024;
uncore->regs = ioremap(phys_addr, mmio_size);
if (uncore->regs == NULL) {
@@ -2247,12 +2272,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
return -EIO;
}
- return 0;
-}
-
-void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
-{
- iounmap(uncore->regs);
+ return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2262,7 +2282,6 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
uncore->i915 = gt->i915;
uncore->gt = gt;
uncore->rpm = &gt->i915->runtime_pm;
- uncore->debug = &gt->i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
@@ -2442,8 +2461,11 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
}
}
-void intel_uncore_fini_mmio(struct intel_uncore *uncore)
+/* Called via drm-managed action */
+void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
{
+ struct intel_uncore *uncore = data;
+
if (intel_uncore_has_forcewake(uncore)) {
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
@@ -2573,6 +2595,9 @@ bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret;
+ if (!uncore->debug)
+ return false;
+
spin_lock_irq(&uncore->debug->lock);
ret = check_for_unclaimed_mmio(uncore);
spin_unlock_irq(&uncore->debug->lock);
@@ -2585,6 +2610,9 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
+ if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
+ return false;
+
spin_lock_irq(&uncore->debug->lock);
if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b1fa912a65e7..5022bac80b67 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -33,6 +33,7 @@
#include "i915_reg_defs.h"
+struct drm_device;
struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
@@ -135,6 +136,16 @@ struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ /*
+ * Do we need to apply an additional offset to reach the beginning
+ * of the basic non-engine GT registers (referred to as "GSI" on
+ * newer platforms, or "GT block" on older platforms)? If so, we'll
+ * track that here and apply it transparently to registers in the
+ * appropriate range to maintain compatibility with our existing
+ * register definitions and GT code.
+ */
+ u32 gsi_offset;
+
unsigned int flags;
#define UNCORE_HAS_FORCEWAKE BIT(0)
#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
@@ -210,8 +221,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-void
-intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
+void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct intel_gt *gt);
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
@@ -221,7 +231,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
-void intel_uncore_fini_mmio(struct intel_uncore *uncore);
+void intel_uncore_fini_mmio(struct drm_device *dev, void *data);
void intel_uncore_suspend(struct intel_uncore *uncore);
void intel_uncore_resume_early(struct intel_uncore *uncore);
void intel_uncore_runtime_resume(struct intel_uncore *uncore);
@@ -294,19 +304,27 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
2, timeout_ms, NULL);
}
+#define IS_GSI_REG(reg) ((reg) < 0x40000)
+
/* register access functions */
#define __raw_read(x__, s__) \
static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
i915_reg_t reg) \
{ \
- return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+ u32 offset = i915_mmio_reg_offset(reg); \
+ if (IS_GSI_REG(offset)) \
+ offset += uncore->gsi_offset; \
+ return read##s__(uncore->regs + offset); \
}
#define __raw_write(x__, s__) \
static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
i915_reg_t reg, u##x__ val) \
{ \
- write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+ u32 offset = i915_mmio_reg_offset(reg); \
+ if (IS_GSI_REG(offset)) \
+ offset += uncore->gsi_offset; \
+ write##s__(val, uncore->regs + offset); \
}
__raw_read(8, b)
__raw_read(16, w)
@@ -447,6 +465,18 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
return (reg_val & mask) != expected_val ? -EINVAL : 0;
}
+/*
+ * The raw_reg_{read,write} macros are intended as a micro-optimization for
+ * interrupt handlers so that the pointer indirection on uncore->regs can
+ * be computed once (and presumably cached in a register) instead of generating
+ * extra load instructions for each MMIO access.
+ *
+ * Given that these macros are only intended for non-GSI interrupt registers
+ * (and the goal is to avoid extra instructions generated by the compiler),
+ * these macros do not account for uncore->gsi_offset. Any caller that needs
+ * to use these macros on a GSI register is responsible for adding the
+ * appropriate GSI offset to the 'base' parameter.
+ */
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 15311eaed848..69cdaaddc4a9 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -169,11 +169,23 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
* We want to get the same effect as if we received a termination
* interrupt, so just pretend that we did.
*/
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST;
queue_work(system_unbound_wq, &pxp->session_work);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static bool pxp_component_bound(struct intel_pxp *pxp)
+{
+ bool bound = false;
+
+ mutex_lock(&pxp->tee_mutex);
+ if (pxp->pxp_component)
+ bound = true;
+ mutex_unlock(&pxp->tee_mutex);
+
+ return bound;
}
/*
@@ -187,6 +199,9 @@ int intel_pxp_start(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return -ENODEV;
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return -ENXIO;
+
mutex_lock(&pxp->arb_mutex);
if (pxp->arb_is_valid)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index e888b5124a07..4359e8be4101 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -47,9 +47,9 @@ static int pxp_terminate_set(void *data, u64 val)
return -ENODEV;
/* simulate a termination interrupt */
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
if (!wait_for_completion_timeout(&pxp->termination,
msecs_to_jiffies(100)))
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 04745f914407..c28be430718a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
return;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
if (unlikely(!iir))
return;
@@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
static inline void pxp_irq_reset(struct intel_gt *gt)
{
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void intel_pxp_irq_enable(struct intel_pxp *pxp)
{
struct intel_gt *gt = pxp_to_gt(pxp);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
if (!pxp->irq_enabled)
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
@@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp)
__pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
pxp->irq_enabled = true;
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void intel_pxp_irq_disable(struct intel_pxp *pxp)
@@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp)
*/
GEM_WARN_ON(intel_pxp_is_active(pxp));
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
pxp->irq_enabled = false;
__pxp_set_interrupts(gt, 0);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
pxp_irq_reset(gt);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 92b00b4de240..1bb5b5249157 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work)
intel_wakeref_t wakeref;
u32 events = 0;
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
events = fetch_and_zero(&pxp->session_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
if (!events)
return;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ab9f17fc85bc..e050a2de5fd1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1080,7 +1080,7 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL;
- obj = i915_gem_object_create_region(mr, size, 0, 0);
+ obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
if (IS_ERR(obj)) {
/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
if (PTR_ERR(obj) == -ENODEV && is_stolen)
@@ -2324,5 +2324,5 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index bdd290f2bf3c..aaf8a380e5c7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -49,5 +49,6 @@ selftest(perf, i915_perf_live_selftests)
selftest(slpc, intel_slpc_live_selftests)
selftest(guc, intel_guc_live_selftests)
selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
+selftest(guc_hang, intel_guc_hang_check)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 88db2e3d81d0..429c6d73b159 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -431,7 +431,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = i915_subtests(tests, i915);
+ err = i915_live_subtests(tests, i915);
destroy_empty_config(&i915->perf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index c56a0c2cd2f7..818a4909c1f3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -971,7 +971,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- /* Force the wait wait now to avoid including it in the benchmark */
+ /* Force the wait now to avoid including it in the benchmark */
err = i915_vma_sync(vma);
if (err)
goto err_pin;
@@ -1821,7 +1821,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
static int switch_to_kernel_sync(struct intel_context *ce, int err)
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 6921ba128015..71b52d5efef4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -51,9 +51,9 @@ static bool assert_vma(struct i915_vma *vma,
ok = false;
}
- if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
pr_err("VMA created with wrong type [%d]\n",
- vma->ggtt_view.type);
+ vma->gtt_view.type);
ok = false;
}
@@ -63,7 +63,7 @@ static bool assert_vma(struct i915_vma *vma,
static struct i915_vma *
checked_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *vma;
bool ok = true;
@@ -91,7 +91,7 @@ checked_vma_instance(struct drm_i915_gem_object *obj,
}
if (i915_vma_compare(vma, vma->vm,
- i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
+ i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) {
pr_err("i915_vma_compare failed with itself\n");
return ERR_PTR(-EINVAL);
}
@@ -530,12 +530,12 @@ assert_remapped(struct drm_i915_gem_object *obj,
return sg;
}
-static unsigned int remapped_size(enum i915_ggtt_view_type view_type,
+static unsigned int remapped_size(enum i915_gtt_view_type view_type,
const struct intel_remapped_plane_info *a,
const struct intel_remapped_plane_info *b)
{
- if (view_type == I915_GGTT_VIEW_ROTATED)
+ if (view_type == I915_GTT_VIEW_ROTATED)
return a->dst_stride * a->width + b->dst_stride * b->width;
else
return a->dst_stride * a->height + b->dst_stride * b->height;
@@ -569,9 +569,9 @@ static int igt_vma_rotate_remap(void *arg)
{ }
}, *a, *b;
- enum i915_ggtt_view_type types[] = {
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_REMAPPED,
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
0,
}, *t;
const unsigned int max_pages = 64;
@@ -588,7 +588,7 @@ static int igt_vma_rotate_remap(void *arg)
for (t = types; *t; t++) {
for (a = planes; a->width; a++) {
for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
- struct i915_ggtt_view view = {
+ struct i915_gtt_view view = {
.type = *t,
.remapped.plane[0] = *a,
.remapped.plane[1] = *b,
@@ -602,11 +602,11 @@ static int igt_vma_rotate_remap(void *arg)
max_offset = max_pages - max_offset;
if (!plane_info[0].dst_stride)
- plane_info[0].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
plane_info[0].height :
plane_info[0].width;
if (!plane_info[1].dst_stride)
- plane_info[1].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[1].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
plane_info[1].height :
plane_info[1].width;
@@ -630,7 +630,7 @@ static int igt_vma_rotate_remap(void *arg)
expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]);
- if (view.type == I915_GGTT_VIEW_ROTATED &&
+ if (view.type == I915_GTT_VIEW_ROTATED &&
vma->size != expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * expected_pages, vma->size);
@@ -638,7 +638,7 @@ static int igt_vma_rotate_remap(void *arg)
goto out_object;
}
- if (view.type == I915_GGTT_VIEW_REMAPPED &&
+ if (view.type == I915_GTT_VIEW_REMAPPED &&
vma->size > expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * expected_pages, vma->size);
@@ -668,13 +668,13 @@ static int igt_vma_rotate_remap(void *arg)
sg = vma->pages->sgl;
for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
- if (view.type == I915_GGTT_VIEW_ROTATED)
+ if (view.type == I915_GTT_VIEW_ROTATED)
sg = assert_rotated(obj, &view.rotated, n, sg);
else
sg = assert_remapped(obj, &view.remapped, n, sg);
if (IS_ERR(sg)) {
pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n",
- view.type == I915_GGTT_VIEW_ROTATED ?
+ view.type == I915_GTT_VIEW_ROTATED ?
"rotated" : "remapped", n,
plane_info[0].width,
plane_info[0].height,
@@ -741,7 +741,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
}
static bool assert_pin(struct i915_vma *vma,
- struct i915_ggtt_view *view,
+ struct i915_gtt_view *view,
u64 size,
const char *name)
{
@@ -759,8 +759,8 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
- if (view && view->type != I915_GGTT_VIEW_NORMAL) {
- if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ if (memcmp(&vma->gtt_view, view, sizeof(*view))) {
pr_err("(%s) VMA mismatch upon creation!\n",
name);
ok = false;
@@ -772,9 +772,9 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
} else {
- if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
pr_err("Not the normal ggtt view! Found %d\n",
- vma->ggtt_view.type);
+ vma->gtt_view.type);
ok = false;
}
@@ -818,14 +818,14 @@ static int igt_vma_partial(void *arg)
nvma = 0;
for_each_prime_number_from(sz, 1, npages) {
for_each_prime_number_from(offset, 0, npages - sz) {
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
view.partial.offset = offset;
view.partial.size = sz;
if (sz == npages)
- view.type = I915_GGTT_VIEW_NORMAL;
+ view.type = I915_GTT_VIEW_NORMAL;
vma = checked_vma_instance(obj, vm, &view);
if (IS_ERR(vma)) {
@@ -976,9 +976,9 @@ static int igt_vma_remapped_gtt(void *arg)
{ }
}, *p;
- enum i915_ggtt_view_type types[] = {
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_REMAPPED,
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
0,
}, *t;
struct drm_i915_gem_object *obj;
@@ -996,7 +996,7 @@ static int igt_vma_remapped_gtt(void *arg)
for (t = types; *t; t++) {
for (p = planes; p->width; p++) {
- struct i915_ggtt_view view = {
+ struct i915_gtt_view view = {
.type = *t,
.rotated.plane[0] = *p,
};
@@ -1012,7 +1012,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
if (!plane_info[0].dst_stride)
- plane_info[0].dst_stride = *t == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].dst_stride = *t == I915_GTT_VIEW_ROTATED ?
p->height : p->width;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
@@ -1021,7 +1021,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- GEM_BUG_ON(vma->ggtt_view.type != *t);
+ GEM_BUG_ON(vma->gtt_view.type != *t);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -1035,7 +1035,7 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int offset;
u32 val = y << 16 | x;
- if (*t == I915_GGTT_VIEW_ROTATED)
+ if (*t == I915_GTT_VIEW_ROTATED)
offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE;
else
offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE;
@@ -1052,7 +1052,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
+ GEM_BUG_ON(vma->gtt_view.type != I915_GTT_VIEW_NORMAL);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -1067,7 +1067,7 @@ static int igt_vma_remapped_gtt(void *arg)
u32 exp = y << 16 | x;
u32 val;
- if (*t == I915_GGTT_VIEW_ROTATED)
+ if (*t == I915_GTT_VIEW_ROTATED)
src_idx = rotated_index(&view.rotated, 0, x, y);
else
src_idx = remapped_index(&view.remapped, 0, x, y);
@@ -1076,7 +1076,7 @@ static int igt_vma_remapped_gtt(void *arg)
val = ioread32(&map[offset / sizeof(*map)]);
if (val != exp) {
pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
- *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
+ *t == I915_GTT_VIEW_ROTATED ? "Rotated" : "Remapped",
exp, val);
i915_vma_unpin_iomap(vma);
err = -EINVAL;
@@ -1103,5 +1103,5 @@ int i915_vma_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_vma_remapped_gtt),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9c31a16f8380..fff11c90f1fa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -115,6 +115,7 @@ static struct dev_pm_domain pm_domain = {
static void mock_gt_probe(struct drm_i915_private *i915)
{
i915->gt[0] = &i915->gt0;
+ i915->gt[0]->name = "Mock GT";
}
struct drm_i915_private *mock_gem_device(void)
@@ -172,14 +173,14 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- mkwrite_device_info(i915)->graphics.ver = -1;
+ RUNTIME_INFO(i915)->graphics.ip.ver = -1;
- mkwrite_device_info(i915)->page_sizes =
+ RUNTIME_INFO(i915)->page_sizes =
I915_GTT_PAGE_SIZE_4K |
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
intel_memory_regions_hw_probe(i915);
spin_lock_init(&i915->gpu_error.lock);
@@ -209,7 +210,7 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(to_gt(i915));
to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
- mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
+ RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index bb9738c7c825..975de4ff7313 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -3,7 +3,7 @@ config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 5c2b2277afbf..3ffc061d392b 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,7 +2,7 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 9b84df34a6a1..b4f82ebca532 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -8,7 +8,7 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -17,7 +17,7 @@
#include "dcss-dev.h"
#include "dcss-kms.h"
-DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
+DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops);
static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
@@ -28,7 +28,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
static const struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
drm_kms_helper_poll_init(drm);
- drm_bridge_connector_enable_hpd(kms->connector);
-
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index c29f343f33e5..ab6d32bad756 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -6,10 +6,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
@@ -147,7 +147,7 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = new_plane_state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_crtc_state *crtc_state;
int hdisplay, vdisplay;
int min, max;
@@ -156,8 +156,8 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
if (!fb || !new_plane_state->crtc)
return 0;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- WARN_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ WARN_ON(!dma_obj);
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
@@ -218,26 +218,26 @@ static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
unsigned long p1_ba = 0, p2_ba = 0;
if (!format->is_yuv ||
format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
- p1_ba = cma_obj->paddr + fb->offsets[0] +
+ p1_ba = dma_obj->dma_addr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
format->char_per_block[0] * (state->src.x1 >> 16);
else if (format->format == DRM_FORMAT_UYVY ||
format->format == DRM_FORMAT_VYUY ||
format->format == DRM_FORMAT_YUYV ||
format->format == DRM_FORMAT_YVYU)
- p1_ba = cma_obj->paddr + fb->offsets[0] +
+ p1_ba = dma_obj->dma_addr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
2 * format->char_per_block[0] * (state->src.x1 >> 17);
if (format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
- p2_ba = cma_obj->paddr + fb->offsets[1] +
+ p2_ba = dma_obj->dma_addr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
(state->src.x1 >> 17)) << 1);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index a57812ec36b1..8dd8b0f912af 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -16,13 +16,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -34,7 +32,7 @@
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
-DEFINE_DRM_GEM_CMA_FOPS(imx_drm_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops);
void imx_drm_connector_destroy(struct drm_connector *connector)
{
@@ -154,7 +152,7 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
args->width = ALIGN(width, 8);
- ret = drm_gem_cma_dumb_create(file_priv, drm, args);
+ ret = drm_gem_dma_dumb_create(file_priv, drm, args);
if (ret)
return ret;
@@ -164,7 +162,7 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
static const struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
.ioctls = imx_drm_ioctls,
.num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
.fops = &imx_drm_driver_fops,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index c3e1a3f14d30..e721bebda2bd 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -32,7 +32,7 @@ extern struct platform_driver ipu_drm_driver;
void imx_drm_mode_config_init(struct drm_device *drm);
-struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
+struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index f7863d6dea80..5f26090b0c98 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -18,8 +18,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ea5f594955df..dba4f7d81d69 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -8,13 +8,12 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <video/imx-ipu-v3.h>
@@ -126,14 +125,14 @@ static inline unsigned long
drm_plane_state_to_eba(struct drm_plane_state *state, int plane)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, plane);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, plane);
+ BUG_ON(!dma_obj);
- return cma_obj->paddr + fb->offsets[plane] + fb->pitches[plane] * y +
+ return dma_obj->dma_addr + fb->offsets[plane] + fb->pitches[plane] * y +
fb->format->cpp[plane] * x;
}
@@ -141,18 +140,18 @@ static inline unsigned long
drm_plane_state_to_ubo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
unsigned long eba = drm_plane_state_to_eba(state, 0);
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 1);
+ BUG_ON(!dma_obj);
x /= fb->format->hsub;
y /= fb->format->vsub;
- return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ return dma_obj->dma_addr + fb->offsets[1] + fb->pitches[1] * y +
fb->format->cpp[1] * x - eba;
}
@@ -160,18 +159,18 @@ static inline unsigned long
drm_plane_state_to_vbo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
unsigned long eba = drm_plane_state_to_eba(state, 0);
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 2);
+ BUG_ON(!dma_obj);
x /= fb->format->hsub;
y /= fb->format->vsub;
- return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ return dma_obj->dma_addr + fb->offsets[2] + fb->pitches[2] * y +
fb->format->cpp[2] * x - eba;
}
@@ -393,8 +392,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
can_position, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 090830bcbde7..a53f475d33df 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -8,7 +8,7 @@ config DRM_INGENIC
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Ingenic SoCs.
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index eb8208bfe5ab..ab0515d2c420 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -30,8 +30,8 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
@@ -41,7 +41,6 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -482,8 +481,8 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(priv_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
priv->soc_info->has_osd,
true);
if (ret)
@@ -670,12 +669,12 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
if (newstate && newstate->fb) {
if (priv->soc_info->map_noncoherent)
- drm_fb_cma_sync_non_coherent(&priv->drm, oldstate, newstate);
+ drm_fb_dma_sync_non_coherent(&priv->drm, oldstate, newstate);
crtc_state = newstate->crtc->state;
plane_id = !!(priv->soc_info->has_osd && plane != &priv->f0);
- addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
+ addr = drm_fb_dma_get_gem_addr(newstate->fb, newstate, 0);
width = newstate->src_w >> 16;
height = newstate->src_h >> 16;
cpp = newstate->fb->format->cpp[0];
@@ -915,7 +914,7 @@ static struct drm_gem_object *
ingenic_drm_gem_create_object(struct drm_device *drm, size_t size)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
@@ -948,7 +947,7 @@ static void ingenic_drm_destroy_state(struct drm_private_obj *obj,
kfree(priv_state);
}
-DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -961,7 +960,7 @@ static const struct drm_driver ingenic_drm_driver_data = {
.fops = &ingenic_drm_fops,
.gem_create_object = ingenic_drm_gem_create_object,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -1464,21 +1463,22 @@ static int ingenic_drm_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused ingenic_drm_suspend(struct device *dev)
+static int ingenic_drm_suspend(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(&priv->drm);
}
-static int __maybe_unused ingenic_drm_resume(struct device *dev)
+static int ingenic_drm_resume(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(&priv->drm);
}
-static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops,
+ ingenic_drm_suspend, ingenic_drm_resume);
static const u32 jz4740_formats[] = {
DRM_FORMAT_XRGB1555,
@@ -1541,6 +1541,32 @@ static const struct jz_soc_info jz4725b_soc_info = {
.num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0),
};
+static const struct jz_soc_info jz4760_soc_info = {
+ .needs_dev_clk = false,
+ .has_osd = true,
+ .map_noncoherent = false,
+ .max_width = 1280,
+ .max_height = 720,
+ .max_burst = JZ_LCD_CTRL_BURST_32,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
+};
+
+static const struct jz_soc_info jz4760b_soc_info = {
+ .needs_dev_clk = false,
+ .has_osd = true,
+ .map_noncoherent = false,
+ .max_width = 1280,
+ .max_height = 720,
+ .max_burst = JZ_LCD_CTRL_BURST_64,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
+};
+
static const struct jz_soc_info jz4770_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
@@ -1572,6 +1598,8 @@ static const struct jz_soc_info jz4780_soc_info = {
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4760-lcd", .data = &jz4760_soc_info },
+ { .compatible = "ingenic,jz4760b-lcd", .data = &jz4760b_soc_info },
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ .compatible = "ingenic,jz4780-lcd", .data = &jz4780_soc_info },
{ /* sentinel */ },
@@ -1581,7 +1609,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
.name = "ingenic-drm",
- .pm = pm_ptr(&ingenic_drm_pm_ops),
+ .pm = pm_sleep_ptr(&ingenic_drm_pm_ops),
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
@@ -1616,4 +1644,4 @@ module_exit(ingenic_drm_exit);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("DRM driver for the Ingenic SoCs\n");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 32a50935aa6d..7a43505011a5 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -22,14 +22,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_property.h>
#include <drm/drm_vblank.h>
@@ -363,15 +362,15 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
}
if (ingenic_drm_map_noncoherent(ipu->master))
- drm_fb_cma_sync_non_coherent(ipu->drm, oldstate, newstate);
+ drm_fb_dma_sync_non_coherent(ipu->drm, oldstate, newstate);
/* New addresses will be committed in vblank handler... */
- ipu->addr_y = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
+ ipu->addr_y = drm_fb_dma_get_gem_addr(newstate->fb, newstate, 0);
if (finfo->num_planes > 1)
- ipu->addr_u = drm_fb_cma_get_gem_addr(newstate->fb, newstate,
+ ipu->addr_u = drm_fb_dma_get_gem_addr(newstate->fb, newstate,
1);
if (finfo->num_planes > 2)
- ipu->addr_v = drm_fb_cma_get_gem_addr(newstate->fb, newstate,
+ ipu->addr_v = drm_fb_dma_get_gem_addr(newstate->fb, newstate,
2);
if (!needs_modeset)
@@ -697,10 +696,12 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_crtc_state *crtc_state;
+ bool mode_changed;
if (property != ipu->sharpness_prop)
return -EINVAL;
+ mode_changed = val != ipu->sharpness;
ipu->sharpness = val;
if (state->crtc) {
@@ -708,7 +709,7 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- crtc_state->mode_changed = true;
+ crtc_state->mode_changed |= mode_changed;
}
return 0;
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index 5fdd43dad507..fd011367db1d 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -3,7 +3,7 @@ config DRM_KMB_DISPLAY
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have Intel's KeemBay SOC which integrates
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 76fef0880504..2382ccb3ee99 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -433,14 +433,14 @@ static void kmb_irq_uninstall(struct drm_device *drm)
free_irq(kmb->irq_lcd, drm);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
/* GEM Operations */
.fops = &fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 89d055a089a6..a42f63f6f957 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -8,13 +8,12 @@
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include "kmb_drv.h"
#include "kmb_plane.h"
@@ -136,8 +135,8 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
can_position, true);
}
@@ -404,7 +403,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
- addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
+ addr[Y_PLANE] = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id),
addr[Y_PLANE] + fb->offsets[0]);
val = get_pixel_format(fb->format->format);
@@ -416,7 +415,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
- addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state,
+ addr[U_PLANE] = drm_fb_dma_get_gem_addr(fb, new_plane_state,
U_PLANE);
/* check if Cb/Cr is swapped*/
if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER))
@@ -437,7 +436,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
LCD_LAYERn_DMA_CR_LINE_WIDTH(plane_id),
((width) * fb->format->cpp[0]));
- addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb,
+ addr[V_PLANE] = drm_fb_dma_get_gem_addr(fb,
new_plane_state,
V_PLANE);
diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
index 300b2be07385..fa7a88368809 100644
--- a/drivers/gpu/drm/logicvc/Kconfig
+++ b/drivers/gpu/drm/logicvc/Kconfig
@@ -3,7 +3,7 @@ config DRM_LOGICVC
depends on DRM
depends on OF || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
DRM display driver for the logiCVC programmable logic block from Xylon
diff --git a/drivers/gpu/drm/logicvc/logicvc_crtc.c b/drivers/gpu/drm/logicvc/logicvc_crtc.c
index c94bb9bb456b..43a675d03808 100644
--- a/drivers/gpu/drm/logicvc/logicvc_crtc.c
+++ b/drivers/gpu/drm/logicvc/logicvc_crtc.c
@@ -12,7 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 65a050176c33..cc9a4e965f77 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
@@ -29,9 +29,9 @@
#include "logicvc_of.h"
#include "logicvc_regs.h"
-DEFINE_DRM_GEM_CMA_FOPS(logicvc_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(logicvc_drm_fops);
-static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+static int logicvc_drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
{
@@ -40,7 +40,7 @@ static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
/* Stride is always fixed to its configuration value. */
args->pitch = logicvc->config.row_stride * DIV_ROUND_UP(args->bpp, 8);
- return drm_gem_cma_dumb_create_internal(file_priv, drm_dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm_dev, args);
}
static struct drm_driver logicvc_drm_driver = {
@@ -54,7 +54,7 @@ static struct drm_driver logicvc_drm_driver = {
.major = 1,
.minor = 0,
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create),
};
static struct regmap_config logicvc_drm_regmap_config = {
diff --git a/drivers/gpu/drm/logicvc/logicvc_interface.c b/drivers/gpu/drm/logicvc/logicvc_interface.c
index c73592f6c406..815cebb4c4ca 100644
--- a/drivers/gpu/drm/logicvc/logicvc_interface.c
+++ b/drivers/gpu/drm/logicvc/logicvc_interface.c
@@ -12,7 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c
index 441e3cfce4cf..464000aea765 100644
--- a/drivers/gpu/drm/logicvc/logicvc_layer.c
+++ b/drivers/gpu/drm/logicvc/logicvc_layer.c
@@ -10,11 +10,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
@@ -117,8 +116,8 @@ static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
}
}
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ min_scale = DRM_PLANE_NO_SCALING;
+ max_scale = DRM_PLANE_NO_SCALING;
can_position = (drm_plane->type == DRM_PLANE_TYPE_OVERLAY &&
layer->index != (logicvc->config.layers_count - 1) &&
@@ -158,7 +157,7 @@ static void logicvc_plane_atomic_update(struct drm_plane *drm_plane,
new_state->crtc_h - 1);
if (logicvc->caps->layer_address) {
- phys_addr_t fb_addr = drm_fb_cma_get_gem_addr(fb, new_state, 0);
+ phys_addr_t fb_addr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
regmap_write(logicvc->regmap, LOGICVC_LAYER_ADDRESS_REG(index),
fb_addr);
@@ -281,7 +280,7 @@ int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
return -ENOMEM;
}
- fb_addr = drm_fb_cma_get_gem_addr(fb, state, 0);
+ fb_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
if (fb_addr < logicvc->reserved_mem_base) {
drm_err(drm_dev,
"Framebuffer memory below reserved memory base!\n");
diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.c b/drivers/gpu/drm/logicvc/logicvc_mode.c
index 11940704f644..d8207ffda1af 100644
--- a/drivers/gpu/drm/logicvc/logicvc_mode.c
+++ b/drivers/gpu/drm/logicvc/logicvc_mode.c
@@ -10,9 +10,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index d0bf1bc8da3f..4f3d68e11bc1 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -10,7 +10,7 @@ config DRM_MCDE
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the ST-Ericsson MCDE
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 4df477540d07..52043a12a2e8 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -11,11 +11,11 @@
#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
@@ -165,7 +165,7 @@ static int mcde_display_check(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
- u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3) {
@@ -1424,7 +1424,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
* from the DRM core before the display is enabled.
*/
if (fb) {
- mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
+ mcde_set_extsrc(mcde, drm_fb_dma_get_gem_addr(fb, pstate, 0));
dev_info_once(mcde->dev, "first update of display contents\n");
/*
* Usually the flow is already active, unless we are in
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index e601baa87e55..1c4482ad507d 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -37,7 +37,7 @@
* (effectively using channels 0..3) for concurrent use.
*
* In the current DRM/KMS setup, we use one external source, one overlay,
- * one FIFO and one formatter which we connect to the simple CMA framebuffer
+ * one FIFO and one formatter which we connect to the simple DMA framebuffer
* helpers. We then provide a bridge to the DSI port, and on the DSI port
* bridge we connect hang a panel bridge or other bridge. This may be subject
* to change as we exploit more of the hardware capabilities.
@@ -68,10 +68,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
@@ -198,7 +198,7 @@ static int mcde_modeset_init(struct drm_device *drm)
return 0;
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver mcde_drm_driver = {
.driver_features =
@@ -212,7 +212,7 @@ static const struct drm_driver mcde_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static int mcde_drm_bind(struct device *dev)
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 2976d21e9a34..369e495d0c3e 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -7,7 +7,7 @@ config DRM_MEDIATEK
depends on HAVE_ARM_SMCCC
depends on OF
depends on MTK_MMSYS
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
@@ -21,6 +21,15 @@ config DRM_MEDIATEK
This driver provides kernel mode setting and
buffer management to userspace.
+config DRM_MEDIATEK_DP
+ tristate "DRM DPTX Support for MediaTek SoCs"
+ depends on DRM_MEDIATEK
+ select PHY_MTK_DP
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ help
+ DRM/KMS Display Port driver for MediaTek SoCs.
+
config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 6e604a933ed0..3517d1c65cd7 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -23,3 +23,5 @@ mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
+
+obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
new file mode 100644
index 000000000000..9d085c05c49c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -0,0 +1,2663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019-2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre
+ */
+
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+#include <sound/hdmi-codec.h>
+#include <video/videomode.h>
+
+#include "mtk_dp_reg.h"
+
+#define MTK_DP_SIP_CONTROL_AARCH32 MTK_SIP_SMC_CMD(0x523)
+#define MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE (BIT(0) | BIT(5))
+#define MTK_DP_SIP_ATF_VIDEO_UNMUTE BIT(5)
+
+#define MTK_DP_THREAD_CABLE_STATE_CHG BIT(0)
+#define MTK_DP_THREAD_HPD_EVENT BIT(1)
+
+#define MTK_DP_4P1T 4
+#define MTK_DP_HDE 2
+#define MTK_DP_PIX_PER_ADDR 2
+#define MTK_DP_AUX_WAIT_REPLY_COUNT 20
+#define MTK_DP_TBC_BUF_READ_START_ADDR 0x8
+#define MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY 5
+#define MTK_DP_TRAIN_DOWNSCALE_RETRY 10
+#define MTK_DP_VERSION 0x11
+#define MTK_DP_SDP_AUI 0x4
+
+enum {
+ MTK_DP_CAL_GLB_BIAS_TRIM = 0,
+ MTK_DP_CAL_CLKTX_IMPSE,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3,
+ MTK_DP_CAL_MAX,
+};
+
+struct mtk_dp_train_info {
+ bool sink_ssc;
+ bool cable_plugged_in;
+ /* link_rate is in multiple of 0.27Gbps */
+ int link_rate;
+ int lane_count;
+ unsigned int channel_eq_pattern;
+};
+
+struct mtk_dp_audio_cfg {
+ bool detect_monitor;
+ int sad_count;
+ int sample_rate;
+ int word_length_bits;
+ int channels;
+};
+
+struct mtk_dp_info {
+ enum dp_pixelformat format;
+ struct videomode vm;
+ struct mtk_dp_audio_cfg audio_cur_cfg;
+};
+
+struct mtk_dp_efuse_fmt {
+ unsigned short idx;
+ unsigned short shift;
+ unsigned short mask;
+ unsigned short min_val;
+ unsigned short max_val;
+ unsigned short default_val;
+};
+
+struct mtk_dp {
+ bool enabled;
+ bool need_debounce;
+ u8 max_lanes;
+ u8 max_linkrate;
+ u8 rx_cap[DP_RECEIVER_CAP_SIZE];
+ u32 cal_data[MTK_DP_CAL_MAX];
+ u32 irq_thread_handle;
+ /* irq_thread_lock is used to protect irq_thread_handle */
+ spinlock_t irq_thread_lock;
+
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector *conn;
+ struct drm_device *drm_dev;
+ struct drm_dp_aux aux;
+
+ const struct mtk_dp_data *data;
+ struct mtk_dp_info info;
+ struct mtk_dp_train_info train_info;
+
+ struct platform_device *phy_dev;
+ struct phy *phy;
+ struct regmap *regs;
+ struct timer_list debounce_timer;
+
+ /* For audio */
+ bool audio_enable;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct platform_device *audio_pdev;
+
+ struct device *codec_dev;
+ /* protect the plugged_cb as it's used in both bridge ops and audio */
+ struct mutex update_plugged_status_lock;
+};
+
+struct mtk_dp_data {
+ int bridge_type;
+ unsigned int smc_cmd;
+ const struct mtk_dp_efuse_fmt *efuse_fmt;
+ bool audio_supported;
+};
+
+static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 3,
+ .shift = 27,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 9,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 2,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 2,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 2,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 2,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 2,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 2,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 2,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 2,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
+static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 0,
+ .shift = 27,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 13,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 1,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 1,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 1,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 1,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 1,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 1,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 1,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 1,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
+static struct regmap_config mtk_dp_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SEC_OFFSET + 0x90,
+ .name = "mtk-dp-registers",
+};
+
+static struct mtk_dp *mtk_dp_from_bridge(struct drm_bridge *b)
+{
+ return container_of(b, struct mtk_dp, bridge);
+}
+
+static u32 mtk_dp_read(struct mtk_dp *mtk_dp, u32 offset)
+{
+ u32 read_val;
+ int ret;
+
+ ret = regmap_read(mtk_dp->regs, offset, &read_val);
+ if (ret) {
+ dev_err(mtk_dp->dev, "Failed to read register 0x%x: %d\n",
+ offset, ret);
+ return 0;
+ }
+
+ return read_val;
+}
+
+static int mtk_dp_write(struct mtk_dp *mtk_dp, u32 offset, u32 val)
+{
+ int ret = regmap_write(mtk_dp->regs, offset, val);
+
+ if (ret)
+ dev_err(mtk_dp->dev,
+ "Failed to write register 0x%x with value 0x%x\n",
+ offset, val);
+ return ret;
+}
+
+static int mtk_dp_update_bits(struct mtk_dp *mtk_dp, u32 offset,
+ u32 val, u32 mask)
+{
+ int ret = regmap_update_bits(mtk_dp->regs, offset, mask, val);
+
+ if (ret)
+ dev_err(mtk_dp->dev,
+ "Failed to update register 0x%x with value 0x%x, mask 0x%x\n",
+ offset, val, mask);
+ return ret;
+}
+
+static void mtk_dp_bulk_16bit_write(struct mtk_dp *mtk_dp, u32 offset, u8 *buf,
+ size_t length)
+{
+ int i;
+
+ /* 2 bytes per register */
+ for (i = 0; i < length; i += 2) {
+ u32 val = buf[i] | (i + 1 < length ? buf[i + 1] << 8 : 0);
+
+ if (mtk_dp_write(mtk_dp, offset + i * 2, val))
+ return;
+ }
+}
+
+static void mtk_dp_msa_bypass_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ u32 mask = HTOTAL_SEL_DP_ENC0_P0 | VTOTAL_SEL_DP_ENC0_P0 |
+ HSTART_SEL_DP_ENC0_P0 | VSTART_SEL_DP_ENC0_P0 |
+ HWIDTH_SEL_DP_ENC0_P0 | VHEIGHT_SEL_DP_ENC0_P0 |
+ HSP_SEL_DP_ENC0_P0 | HSW_SEL_DP_ENC0_P0 |
+ VSP_SEL_DP_ENC0_P0 | VSW_SEL_DP_ENC0_P0;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030, enable ? 0 : mask, mask);
+}
+
+static void mtk_dp_set_msa(struct mtk_dp *mtk_dp)
+{
+ struct drm_display_mode mode;
+ struct videomode *vm = &mtk_dp->info.vm;
+
+ drm_display_mode_from_videomode(vm, &mode);
+
+ /* horizontal */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3010,
+ mode.htotal, HTOTAL_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3018,
+ vm->hsync_len + vm->hback_porch,
+ HSTART_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028,
+ vm->hsync_len, HSW_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028,
+ 0, HSP_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3020,
+ vm->hactive, HWIDTH_SW_DP_ENC0_P0_MASK);
+
+ /* vertical */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3014,
+ mode.vtotal, VTOTAL_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_301C,
+ vm->vsync_len + vm->vback_porch,
+ VSTART_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C,
+ vm->vsync_len, VSW_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C,
+ 0, VSP_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3024,
+ vm->vactive, VHEIGHT_SW_DP_ENC0_P0_MASK);
+
+ /* horizontal */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3064,
+ vm->hactive, HDE_NUM_LAST_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3154,
+ mode.htotal, PGEN_HTOTAL_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3158,
+ vm->hfront_porch,
+ PGEN_HSYNC_RISING_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_315C,
+ vm->hsync_len,
+ PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3160,
+ vm->hback_porch + vm->hsync_len,
+ PGEN_HFDE_START_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3164,
+ vm->hactive,
+ PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK);
+
+ /* vertical */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3168,
+ mode.vtotal,
+ PGEN_VTOTAL_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_316C,
+ vm->vfront_porch,
+ PGEN_VSYNC_RISING_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3170,
+ vm->vsync_len,
+ PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3174,
+ vm->vback_porch + vm->vsync_len,
+ PGEN_VFDE_START_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3178,
+ vm->vactive,
+ PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK);
+}
+
+static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
+ enum dp_pixelformat color_format)
+{
+ u32 val;
+
+ /* update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ color_format << DP_TEST_COLOR_FORMAT_SHIFT,
+ DP_TEST_COLOR_FORMAT_MASK);
+
+ switch (color_format) {
+ case DP_PIXELFORMAT_YUV422:
+ val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
+ break;
+ case DP_PIXELFORMAT_RGB:
+ val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
+ break;
+ default:
+ drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
+ color_format);
+ return -EINVAL;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
+ return 0;
+}
+
+static void mtk_dp_set_color_depth(struct mtk_dp *mtk_dp)
+{
+ /* Only support 8 bits currently */
+ /* Update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ DP_MSA_MISC_8_BPC, DP_TEST_BIT_DEPTH_MASK);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT,
+ VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_config_mn_mode(struct mtk_dp *mtk_dp)
+{
+ /* 0: hw mode, 1: sw mode */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_set_sram_read_start(struct mtk_dp *mtk_dp, u32 val)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ val, SRAM_START_READ_THRD_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_setup_encoder(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ VIDEO_MN_GEN_EN_DP_ENC0_P0,
+ VIDEO_MN_GEN_EN_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040,
+ SDP_DOWN_CNT_DP_ENC0_P0_VAL,
+ SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364,
+ SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL,
+ SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3300,
+ VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL << 8,
+ VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364,
+ FIFO_READ_START_POINT_DP_ENC1_P0_VAL << 12,
+ FIFO_READ_START_POINT_DP_ENC1_P0_MASK);
+ mtk_dp_write(mtk_dp, MTK_DP_ENC1_P0_3368, DP_ENC1_P0_3368_VAL);
+}
+
+static void mtk_dp_pg_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3038,
+ enable ? VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK : 0,
+ VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31B0,
+ PGEN_PATTERN_SEL_VAL << 4, PGEN_PATTERN_SEL_MASK);
+}
+
+static void mtk_dp_audio_setup_channels(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ u32 channel_enable_bits;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3324,
+ AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX,
+ AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK);
+
+ /* audio channel count change reset */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4,
+ DP_ENC_DUMMY_RW_1, DP_ENC_DUMMY_RW_1);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3304,
+ AU_PRTY_REGEN_DP_ENC1_P0_MASK |
+ AU_CH_STS_REGEN_DP_ENC1_P0_MASK |
+ AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK,
+ AU_PRTY_REGEN_DP_ENC1_P0_MASK |
+ AU_CH_STS_REGEN_DP_ENC1_P0_MASK |
+ AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK);
+
+ switch (cfg->channels) {
+ case 2:
+ channel_enable_bits = AUDIO_2CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_2CH_EN_DP_ENC0_P0_MASK;
+ break;
+ case 8:
+ default:
+ channel_enable_bits = AUDIO_8CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_8CH_EN_DP_ENC0_P0_MASK;
+ break;
+ }
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088,
+ channel_enable_bits | AU_EN_DP_ENC0_P0,
+ AUDIO_2CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_2CH_EN_DP_ENC0_P0_MASK |
+ AUDIO_8CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_8CH_EN_DP_ENC0_P0_MASK |
+ AU_EN_DP_ENC0_P0);
+
+ /* audio channel count change reset */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4, 0, DP_ENC_DUMMY_RW_1);
+
+ /* enable audio reset */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4,
+ DP_ENC_DUMMY_RW_1_AUDIO_RST_EN,
+ DP_ENC_DUMMY_RW_1_AUDIO_RST_EN);
+}
+
+static void mtk_dp_audio_channel_status_set(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ struct snd_aes_iec958 iec = { 0 };
+
+ switch (cfg->sample_rate) {
+ case 32000:
+ iec.status[3] = IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ iec.status[3] = IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ iec.status[3] = IEC958_AES3_CON_FS_48000;
+ break;
+ case 88200:
+ iec.status[3] = IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ iec.status[3] = IEC958_AES3_CON_FS_96000;
+ break;
+ case 192000:
+ iec.status[3] = IEC958_AES3_CON_FS_192000;
+ break;
+ default:
+ iec.status[3] = IEC958_AES3_CON_FS_NOTID;
+ break;
+ }
+
+ switch (cfg->word_length_bits) {
+ case 16:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16;
+ break;
+ case 20:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16 |
+ IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ case 24:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_24_20 |
+ IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ default:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_NOTID;
+ }
+
+ /* IEC 60958 consumer channel status bits */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_308C,
+ 0, CH_STATUS_0_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3090,
+ iec.status[3] << 8, CH_STATUS_1_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3094,
+ iec.status[4], CH_STATUS_2_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_audio_sdp_asp_set_channels(struct mtk_dp *mtk_dp,
+ int channels)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_312C,
+ (min(8, channels) - 1) << 8,
+ ASP_HB2_DP_ENC0_P0_MASK | ASP_HB3_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_audio_set_divider(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30BC,
+ AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
+ AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_sdp_trigger_aui(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280,
+ MTK_DP_SDP_AUI, SDP_PACKET_TYPE_DP_ENC1_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280,
+ SDP_PACKET_W_DP_ENC1_P0, SDP_PACKET_W_DP_ENC1_P0);
+}
+
+static void mtk_dp_sdp_set_data(struct mtk_dp *mtk_dp, u8 *data_bytes)
+{
+ mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_ENC1_P0_3200,
+ data_bytes, 0x10);
+}
+
+static void mtk_dp_sdp_set_header_aui(struct mtk_dp *mtk_dp,
+ struct dp_sdp_header *header)
+{
+ u32 db_addr = MTK_DP_ENC0_P0_30D8 + (MTK_DP_SDP_AUI - 1) * 8;
+
+ mtk_dp_bulk_16bit_write(mtk_dp, db_addr, (u8 *)header, 4);
+}
+
+static void mtk_dp_disable_sdp_aui(struct mtk_dp *mtk_dp)
+{
+ /* Disable periodic send */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc, 0,
+ 0xff << ((MTK_DP_ENC0_P0_30A8 & 3) * 8));
+}
+
+static void mtk_dp_setup_sdp_aui(struct mtk_dp *mtk_dp,
+ struct dp_sdp *sdp)
+{
+ u32 shift;
+
+ mtk_dp_sdp_set_data(mtk_dp, sdp->db);
+ mtk_dp_sdp_set_header_aui(mtk_dp, &sdp->sdp_header);
+ mtk_dp_disable_sdp_aui(mtk_dp);
+
+ shift = (MTK_DP_ENC0_P0_30A8 & 3) * 8;
+
+ mtk_dp_sdp_trigger_aui(mtk_dp);
+ /* Enable periodic sending */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc,
+ 0x05 << shift, 0xff << shift);
+}
+
+static void mtk_dp_aux_irq_clear(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_write(mtk_dp, MTK_DP_AUX_P0_3640, DP_AUX_P0_3640_VAL);
+}
+
+static void mtk_dp_aux_set_cmd(struct mtk_dp *mtk_dp, u8 cmd, u32 addr)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3644,
+ cmd, MCU_REQUEST_COMMAND_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3648,
+ addr, MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_364C,
+ addr >> 16, MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK);
+}
+
+static void mtk_dp_aux_clear_fifo(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650,
+ MCU_ACK_TRAN_COMPLETE_AUX_TX_P0,
+ MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 |
+ PHY_FIFO_RST_AUX_TX_P0_MASK |
+ MCU_REQ_DATA_NUM_AUX_TX_P0_MASK);
+}
+
+static void mtk_dp_aux_request_ready(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3630,
+ AUX_TX_REQUEST_READY_AUX_TX_P0,
+ AUX_TX_REQUEST_READY_AUX_TX_P0);
+}
+
+static void mtk_dp_aux_fill_write_fifo(struct mtk_dp *mtk_dp, u8 *buf,
+ size_t length)
+{
+ mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_AUX_P0_3708, buf, length);
+}
+
+static void mtk_dp_aux_read_rx_fifo(struct mtk_dp *mtk_dp, u8 *buf,
+ size_t length, int read_delay)
+{
+ int read_pos;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620,
+ 0, AUX_RD_MODE_AUX_TX_P0_MASK);
+
+ for (read_pos = 0; read_pos < length; read_pos++) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620,
+ AUX_RX_FIFO_READ_PULSE_TX_P0,
+ AUX_RX_FIFO_READ_PULSE_TX_P0);
+
+ /* Hardware needs time to update the data */
+ usleep_range(read_delay, read_delay * 2);
+ buf[read_pos] = (u8)(mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3620) &
+ AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK);
+ }
+}
+
+static void mtk_dp_aux_set_length(struct mtk_dp *mtk_dp, size_t length)
+{
+ if (length > 0) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650,
+ (length - 1) << 12,
+ MCU_REQ_DATA_NUM_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
+ 0,
+ AUX_NO_LENGTH_AUX_TX_P0 |
+ AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
+ AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
+ } else {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
+ AUX_NO_LENGTH_AUX_TX_P0,
+ AUX_NO_LENGTH_AUX_TX_P0 |
+ AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
+ AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
+ }
+}
+
+static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read)
+{
+ int wait_reply = MTK_DP_AUX_WAIT_REPLY_COUNT;
+
+ while (--wait_reply) {
+ u32 aux_irq_status;
+
+ if (is_read) {
+ u32 fifo_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3618);
+
+ if (fifo_status &
+ (AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK |
+ AUX_RX_FIFO_FULL_AUX_TX_P0_MASK)) {
+ return 0;
+ }
+ }
+
+ aux_irq_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3640);
+ if (aux_irq_status & AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0)
+ return 0;
+
+ if (aux_irq_status & AUX_400US_TIMEOUT_IRQ_AUX_TX_P0)
+ return -ETIMEDOUT;
+
+ /* Give the hardware a chance to reach completion before retrying */
+ usleep_range(100, 500);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+ u32 addr, u8 *buf, size_t length)
+{
+ int ret;
+ u32 reply_cmd;
+
+ if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES ||
+ (cmd == DP_AUX_NATIVE_READ && !length)))
+ return -EINVAL;
+
+ if (!is_read)
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704,
+ AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0,
+ AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0);
+
+ /* We need to clear fifo and irq before sending commands to the sink device. */
+ mtk_dp_aux_clear_fifo(mtk_dp);
+ mtk_dp_aux_irq_clear(mtk_dp);
+
+ mtk_dp_aux_set_cmd(mtk_dp, cmd, addr);
+ mtk_dp_aux_set_length(mtk_dp, length);
+
+ if (!is_read) {
+ if (length)
+ mtk_dp_aux_fill_write_fifo(mtk_dp, buf, length);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704,
+ AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK,
+ AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK);
+ }
+
+ mtk_dp_aux_request_ready(mtk_dp);
+
+ /* Wait for feedback from sink device. */
+ ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read);
+
+ reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
+ AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
+
+ if (ret || reply_cmd) {
+ u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
+ AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
+ if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
+ drm_err(mtk_dp->drm_dev,
+ "AUX Rx Aux hang, need SW reset\n");
+ return -EIO;
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ if (!length) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
+ 0,
+ AUX_NO_LENGTH_AUX_TX_P0 |
+ AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
+ AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
+ } else if (is_read) {
+ int read_delay;
+
+ if (cmd == (DP_AUX_I2C_READ | DP_AUX_I2C_MOT) ||
+ cmd == DP_AUX_I2C_READ)
+ read_delay = 500;
+ else
+ read_delay = 100;
+
+ mtk_dp_aux_read_rx_fifo(mtk_dp, buf, length, read_delay);
+ }
+
+ return 0;
+}
+
+static void mtk_dp_set_swing_pre_emphasis(struct mtk_dp *mtk_dp, int lane_num,
+ int swing_val, int preemphasis)
+{
+ u32 lane_shift = lane_num * DP_TX1_VOLT_SWING_SHIFT;
+
+ dev_dbg(mtk_dp->dev,
+ "link training: swing_val = 0x%x, pre-emphasis = 0x%x\n",
+ swing_val, preemphasis);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
+ swing_val << (DP_TX0_VOLT_SWING_SHIFT + lane_shift),
+ DP_TX0_VOLT_SWING_MASK << lane_shift);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
+ preemphasis << (DP_TX0_PRE_EMPH_SHIFT + lane_shift),
+ DP_TX0_PRE_EMPH_MASK << lane_shift);
+}
+
+static void mtk_dp_reset_swing_pre_emphasis(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
+ 0,
+ DP_TX0_VOLT_SWING_MASK |
+ DP_TX1_VOLT_SWING_MASK |
+ DP_TX2_VOLT_SWING_MASK |
+ DP_TX3_VOLT_SWING_MASK |
+ DP_TX0_PRE_EMPH_MASK |
+ DP_TX1_PRE_EMPH_MASK |
+ DP_TX2_PRE_EMPH_MASK |
+ DP_TX3_PRE_EMPH_MASK);
+}
+
+static u32 mtk_dp_swirq_get_clear(struct mtk_dp *mtk_dp)
+{
+ u32 irq_status = mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_35D0) &
+ SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK;
+
+ if (irq_status) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8,
+ irq_status, SW_IRQ_CLR_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8,
+ 0, SW_IRQ_CLR_DP_TRANS_P0_MASK);
+ }
+
+ return irq_status;
+}
+
+static u32 mtk_dp_hwirq_get_clear(struct mtk_dp *mtk_dp)
+{
+ u32 irq_status = (mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3418) &
+ IRQ_STATUS_DP_TRANS_P0_MASK) >> 12;
+
+ if (irq_status) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
+ irq_status, IRQ_CLR_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
+ 0, IRQ_CLR_DP_TRANS_P0_MASK);
+ }
+
+ return irq_status;
+}
+
+static void mtk_dp_hwirq_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
+ enable ? 0 :
+ IRQ_MASK_DP_TRANS_P0_DISC_IRQ |
+ IRQ_MASK_DP_TRANS_P0_CONN_IRQ |
+ IRQ_MASK_DP_TRANS_P0_INT_IRQ,
+ IRQ_MASK_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_initialize_settings(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_342C,
+ XTAL_FREQ_DP_TRANS_P0_DEFAULT,
+ XTAL_FREQ_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3540,
+ FEC_CLOCK_EN_MODE_DP_TRANS_P0,
+ FEC_CLOCK_EN_MODE_DP_TRANS_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31EC,
+ AUDIO_CH_SRC_SEL_DP_ENC0_P0,
+ AUDIO_CH_SRC_SEL_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C,
+ 0, SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_IRQ_MASK,
+ IRQ_MASK_AUX_TOP_IRQ, IRQ_MASK_AUX_TOP_IRQ);
+}
+
+static void mtk_dp_initialize_hpd_detect_settings(struct mtk_dp *mtk_dp)
+{
+ u32 val;
+ /* Debounce threshold */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
+ 8, HPD_DEB_THD_DP_TRANS_P0_MASK);
+
+ val = (HPD_INT_THD_DP_TRANS_P0_LOWER_500US |
+ HPD_INT_THD_DP_TRANS_P0_UPPER_1100US) << 4;
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
+ val, HPD_INT_THD_DP_TRANS_P0_MASK);
+
+ /*
+ * Connect threshold 1.5ms + 5 x 0.1ms = 2ms
+ * Disconnect threshold 1.5ms + 5 x 0.1ms = 2ms
+ */
+ val = (5 << 8) | (5 << 12);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
+ val,
+ HPD_DISC_THD_DP_TRANS_P0_MASK |
+ HPD_CONN_THD_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3430,
+ HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT,
+ HPD_INT_THD_ECO_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_initialize_aux_settings(struct mtk_dp *mtk_dp)
+{
+ /* modify timeout threshold = 0x1595 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_360C,
+ AUX_TIMEOUT_THR_AUX_TX_P0_VAL,
+ AUX_TIMEOUT_THR_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3658,
+ 0, AUX_TX_OV_EN_AUX_TX_P0_MASK);
+ /* 25 for 26M */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3634,
+ AUX_TX_OVER_SAMPLE_RATE_FOR_26M << 8,
+ AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK);
+ /* 13 for 26M */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3614,
+ AUX_RX_UI_CNT_THR_AUX_FOR_26M,
+ AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_37C8,
+ MTK_ATOP_EN_AUX_TX_P0,
+ MTK_ATOP_EN_AUX_TX_P0);
+}
+
+static void mtk_dp_initialize_digital_settings(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C,
+ 0, VBID_VIDEO_MUTE_DP_ENC0_P0_MASK);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3368,
+ BS2BS_MODE_DP_ENC1_P0_VAL << 12,
+ BS2BS_MODE_DP_ENC1_P0_MASK);
+
+ /* dp tx encoder reset all sw */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0,
+ DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0);
+
+ /* Wait for sw reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0);
+}
+
+static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C,
+ DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0,
+ DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
+
+ /* Wait for sw reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C,
+ 0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
+}
+
+static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0,
+ lanes == 0 ? 0 : DP_TRANS_DUMMY_RW_0,
+ DP_TRANS_DUMMY_RW_0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
+ lanes, LANE_NUM_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_34A4,
+ lanes << 2, LANE_NUM_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp)
+{
+ const struct mtk_dp_efuse_fmt *fmt;
+ struct device *dev = mtk_dp->dev;
+ struct nvmem_cell *cell;
+ u32 *cal_data = mtk_dp->cal_data;
+ u32 *buf;
+ int i;
+ size_t len;
+
+ cell = nvmem_cell_get(dev, "dp_calibration_data");
+ if (IS_ERR(cell)) {
+ dev_warn(dev, "Failed to get nvmem cell dp_calibration_data\n");
+ goto use_default_val;
+ }
+
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf) || ((len / sizeof(u32)) != 4)) {
+ dev_warn(dev, "Failed to read nvmem_cell_read\n");
+
+ if (!IS_ERR(buf))
+ kfree(buf);
+
+ goto use_default_val;
+ }
+
+ for (i = 0; i < MTK_DP_CAL_MAX; i++) {
+ fmt = &mtk_dp->data->efuse_fmt[i];
+ cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask;
+
+ if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) {
+ dev_warn(mtk_dp->dev, "Invalid efuse data, idx = %d\n", i);
+ kfree(buf);
+ goto use_default_val;
+ }
+ }
+ kfree(buf);
+
+ return;
+
+use_default_val:
+ dev_warn(mtk_dp->dev, "Use default calibration data\n");
+ for (i = 0; i < MTK_DP_CAL_MAX; i++)
+ cal_data[i] = mtk_dp->data->efuse_fmt[i].default_val;
+}
+
+static void mtk_dp_set_calibration_data(struct mtk_dp *mtk_dp)
+{
+ u32 *cal_data = mtk_dp->cal_data;
+
+ mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_DPAUX_TX,
+ cal_data[MTK_DP_CAL_CLKTX_IMPSE] << 20,
+ RG_CKM_PT0_CKTX_IMPSEL);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_BIAS_GEN_00,
+ cal_data[MTK_DP_CAL_GLB_BIAS_TRIM] << 16,
+ RG_XTP_GLB_BIAS_INTR_CTRL);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] << 12,
+ RG_XTP_LN0_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] << 16,
+ RG_XTP_LN0_TX_IMPSEL_NMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] << 12,
+ RG_XTP_LN1_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] << 16,
+ RG_XTP_LN1_TX_IMPSEL_NMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] << 12,
+ RG_XTP_LN2_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] << 16,
+ RG_XTP_LN2_TX_IMPSEL_NMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] << 12,
+ RG_XTP_LN3_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] << 16,
+ RG_XTP_LN3_TX_IMPSEL_NMOS);
+}
+
+static int mtk_dp_phy_configure(struct mtk_dp *mtk_dp,
+ u32 link_rate, int lane_count)
+{
+ int ret;
+ union phy_configure_opts phy_opts = {
+ .dp = {
+ .link_rate = drm_dp_bw_code_to_link_rate(link_rate) / 100,
+ .set_rate = 1,
+ .lanes = lane_count,
+ .set_lanes = 1,
+ .ssc = mtk_dp->train_info.sink_ssc,
+ }
+ };
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, DP_PWR_STATE_BANDGAP,
+ DP_PWR_STATE_MASK);
+
+ ret = phy_configure(mtk_dp->phy, &phy_opts);
+ if (ret)
+ return ret;
+
+ mtk_dp_set_calibration_data(mtk_dp);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE, DP_PWR_STATE_MASK);
+
+ return 0;
+}
+
+static void mtk_dp_set_idle_pattern(struct mtk_dp *mtk_dp, bool enable)
+{
+ u32 val = POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK |
+ POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK |
+ POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK |
+ POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3580,
+ enable ? val : 0, val);
+}
+
+static void mtk_dp_train_set_pattern(struct mtk_dp *mtk_dp, int pattern)
+{
+ /* TPS1 */
+ if (pattern == 1)
+ mtk_dp_set_idle_pattern(mtk_dp, false);
+
+ mtk_dp_update_bits(mtk_dp,
+ MTK_DP_TRANS_P0_3400,
+ pattern ? BIT(pattern - 1) << 12 : 0,
+ PATTERN1_EN_DP_TRANS_P0_MASK |
+ PATTERN2_EN_DP_TRANS_P0_MASK |
+ PATTERN3_EN_DP_TRANS_P0_MASK |
+ PATTERN4_EN_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_set_enhanced_frame_mode(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
+ ENHANCED_FRAME_EN_DP_ENC0_P0,
+ ENHANCED_FRAME_EN_DP_ENC0_P0);
+}
+
+static void mtk_dp_training_set_scramble(struct mtk_dp *mtk_dp, bool enable)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3404,
+ enable ? DP_SCR_EN_DP_TRANS_P0_MASK : 0,
+ DP_SCR_EN_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_video_mute(struct mtk_dp *mtk_dp, bool enable)
+{
+ struct arm_smccc_res res;
+ u32 val = VIDEO_MUTE_SEL_DP_ENC0_P0 |
+ (enable ? VIDEO_MUTE_SW_DP_ENC0_P0 : 0);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
+ val,
+ VIDEO_MUTE_SEL_DP_ENC0_P0 |
+ VIDEO_MUTE_SW_DP_ENC0_P0);
+
+ arm_smccc_smc(MTK_DP_SIP_CONTROL_AARCH32,
+ mtk_dp->data->smc_cmd, enable,
+ 0, 0, 0, 0, 0, &res);
+
+ dev_dbg(mtk_dp->dev, "smc cmd: 0x%x, p1: %s, ret: 0x%lx-0x%lx\n",
+ mtk_dp->data->smc_cmd, enable ? "enable" : "disable", res.a0, res.a1);
+}
+
+static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute)
+{
+ u32 val[3];
+
+ if (mute) {
+ val[0] = VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 |
+ VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0;
+ val[1] = 0;
+ val[2] = 0;
+ } else {
+ val[0] = 0;
+ val[1] = AU_EN_DP_ENC0_P0;
+ /* Send one every two frames */
+ val[2] = 0x0F;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030,
+ val[0],
+ VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 |
+ VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088,
+ val[1], AU_EN_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A4,
+ val[2], AU_TS_CFG_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_power_enable(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
+ 0, SW_RST_B_PHYD);
+
+ /* Wait for power enable */
+ usleep_range(10, 200);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
+ SW_RST_B_PHYD, SW_RST_B_PHYD);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL, DP_PWR_STATE_MASK);
+ mtk_dp_write(mtk_dp, MTK_DP_1040,
+ RG_DPAUX_RX_VALID_DEGLITCH_EN | RG_XTP_GLB_CKDET_EN |
+ RG_DPAUX_RX_EN);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_0034, 0, DA_CKM_CKTX0_EN_FORCE_EN);
+}
+
+static void mtk_dp_power_disable(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_write(mtk_dp, MTK_DP_TOP_PWR_STATE, 0);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_0034,
+ DA_CKM_CKTX0_EN_FORCE_EN, DA_CKM_CKTX0_EN_FORCE_EN);
+
+ /* Disable RX */
+ mtk_dp_write(mtk_dp, MTK_DP_1040, 0);
+ mtk_dp_write(mtk_dp, MTK_DP_TOP_MEM_PD,
+ 0x550 | FUSE_SEL | MEM_ISO_EN);
+}
+
+static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp)
+{
+ mtk_dp->train_info.link_rate = DP_LINK_BW_5_4;
+ mtk_dp->train_info.lane_count = mtk_dp->max_lanes;
+ mtk_dp->train_info.cable_plugged_in = false;
+
+ mtk_dp->info.format = DP_PIXELFORMAT_RGB;
+ memset(&mtk_dp->info.vm, 0, sizeof(struct videomode));
+ mtk_dp->audio_enable = false;
+}
+
+static void mtk_dp_sdp_set_down_cnt_init(struct mtk_dp *mtk_dp,
+ u32 sram_read_start)
+{
+ u32 sdp_down_cnt_init = 0;
+ struct drm_display_mode mode;
+ struct videomode *vm = &mtk_dp->info.vm;
+
+ drm_display_mode_from_videomode(vm, &mode);
+
+ if (mode.clock > 0)
+ sdp_down_cnt_init = sram_read_start *
+ mtk_dp->train_info.link_rate * 2700 * 8 /
+ (mode.clock * 4);
+
+ switch (mtk_dp->train_info.lane_count) {
+ case 1:
+ sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x1A);
+ break;
+ case 2:
+ /* case for LowResolution && High Audio Sample Rate */
+ sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x10);
+ sdp_down_cnt_init += mode.vtotal <= 525 ? 4 : 0;
+ break;
+ case 4:
+ default:
+ sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 6);
+ break;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040,
+ sdp_down_cnt_init,
+ SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_sdp_set_down_cnt_init_in_hblank(struct mtk_dp *mtk_dp)
+{
+ int pix_clk_mhz;
+ u32 dc_offset;
+ u32 spd_down_cnt_init = 0;
+ struct drm_display_mode mode;
+ struct videomode *vm = &mtk_dp->info.vm;
+
+ drm_display_mode_from_videomode(vm, &mode);
+
+ pix_clk_mhz = mtk_dp->info.format == DP_PIXELFORMAT_YUV420 ?
+ mode.clock / 2000 : mode.clock / 1000;
+
+ switch (mtk_dp->train_info.lane_count) {
+ case 1:
+ spd_down_cnt_init = 0x20;
+ break;
+ case 2:
+ dc_offset = (mode.vtotal <= 525) ? 0x14 : 0x00;
+ spd_down_cnt_init = 0x18 + dc_offset;
+ break;
+ case 4:
+ default:
+ dc_offset = (mode.vtotal <= 525) ? 0x08 : 0x00;
+ if (pix_clk_mhz > mtk_dp->train_info.link_rate * 27)
+ spd_down_cnt_init = 0x8;
+ else
+ spd_down_cnt_init = 0x10 + dc_offset;
+ break;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364, spd_down_cnt_init,
+ SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK);
+}
+
+static void mtk_dp_setup_tu(struct mtk_dp *mtk_dp)
+{
+ u32 sram_read_start = min_t(u32, MTK_DP_TBC_BUF_READ_START_ADDR,
+ mtk_dp->info.vm.hactive /
+ mtk_dp->train_info.lane_count /
+ MTK_DP_4P1T / MTK_DP_HDE /
+ MTK_DP_PIX_PER_ADDR);
+ mtk_dp_set_sram_read_start(mtk_dp, sram_read_start);
+ mtk_dp_setup_encoder(mtk_dp);
+ mtk_dp_sdp_set_down_cnt_init_in_hblank(mtk_dp);
+ mtk_dp_sdp_set_down_cnt_init(mtk_dp, sram_read_start);
+}
+
+static void mtk_dp_set_tx_out(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_setup_tu(mtk_dp);
+}
+
+static void mtk_dp_train_update_swing_pre(struct mtk_dp *mtk_dp, int lanes,
+ u8 dpcd_adjust_req[2])
+{
+ int lane;
+
+ for (lane = 0; lane < lanes; ++lane) {
+ u8 val;
+ u8 swing;
+ u8 preemphasis;
+ int index = lane / 2;
+ int shift = lane % 2 ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 0;
+
+ swing = (dpcd_adjust_req[index] >> shift) &
+ DP_ADJUST_VOLTAGE_SWING_LANE0_MASK;
+ preemphasis = ((dpcd_adjust_req[index] >> shift) &
+ DP_ADJUST_PRE_EMPHASIS_LANE0_MASK) >>
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT;
+ val = swing << DP_TRAIN_VOLTAGE_SWING_SHIFT |
+ preemphasis << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (swing == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ val |= DP_TRAIN_MAX_SWING_REACHED;
+ if (preemphasis == 3)
+ val |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ mtk_dp_set_swing_pre_emphasis(mtk_dp, lane, swing, preemphasis);
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_LANE0_SET + lane,
+ val);
+ }
+}
+
+static void mtk_dp_pattern(struct mtk_dp *mtk_dp, bool is_tps1)
+{
+ int pattern;
+ unsigned int aux_offset;
+
+ if (is_tps1) {
+ pattern = 1;
+ aux_offset = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1;
+ } else {
+ aux_offset = mtk_dp->train_info.channel_eq_pattern;
+
+ switch (mtk_dp->train_info.channel_eq_pattern) {
+ case DP_TRAINING_PATTERN_4:
+ pattern = 4;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ pattern = 3;
+ aux_offset |= DP_LINK_SCRAMBLING_DISABLE;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ default:
+ pattern = 2;
+ aux_offset |= DP_LINK_SCRAMBLING_DISABLE;
+ break;
+ }
+ }
+
+ mtk_dp_train_set_pattern(mtk_dp, pattern);
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, aux_offset);
+}
+
+static int mtk_dp_train_setting(struct mtk_dp *mtk_dp, u8 target_link_rate,
+ u8 target_lane_count)
+{
+ int ret;
+
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LINK_BW_SET, target_link_rate);
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LANE_COUNT_SET,
+ target_lane_count | DP_LANE_COUNT_ENHANCED_FRAME_EN);
+
+ if (mtk_dp->train_info.sink_ssc)
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+
+ mtk_dp_set_lanes(mtk_dp, target_lane_count / 2);
+ ret = mtk_dp_phy_configure(mtk_dp, target_link_rate, target_lane_count);
+ if (ret)
+ return ret;
+
+ dev_dbg(mtk_dp->dev,
+ "Link train target_link_rate = 0x%x, target_lane_count = 0x%x\n",
+ target_link_rate, target_lane_count);
+
+ return 0;
+}
+
+static int mtk_dp_train_cr(struct mtk_dp *mtk_dp, u8 target_lane_count)
+{
+ u8 lane_adjust[2] = {};
+ u8 link_status[DP_LINK_STATUS_SIZE] = {};
+ u8 prev_lane_adjust = 0xff;
+ int train_retries = 0;
+ int voltage_retries = 0;
+
+ mtk_dp_pattern(mtk_dp, true);
+
+ /* In DP spec 1.4, the retry count of CR is defined as 10. */
+ do {
+ train_retries++;
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+ return -ENODEV;
+ }
+
+ drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+ lane_adjust, sizeof(lane_adjust));
+ mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count,
+ lane_adjust);
+
+ drm_dp_link_train_clock_recovery_delay(&mtk_dp->aux,
+ mtk_dp->rx_cap);
+
+ /* check link status from sink device */
+ drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status);
+ if (drm_dp_clock_recovery_ok(link_status,
+ target_lane_count)) {
+ dev_dbg(mtk_dp->dev, "Link train CR pass\n");
+ return 0;
+ }
+
+ /*
+ * In DP spec 1.4, if current voltage level is the same
+ * with previous voltage level, we need to retry 5 times.
+ */
+ if (prev_lane_adjust == link_status[4]) {
+ voltage_retries++;
+ /*
+ * Condition of CR fail:
+ * 1. Failed to pass CR using the same voltage
+ * level over five times.
+ * 2. Failed to pass CR when the current voltage
+ * level is the same with previous voltage
+ * level and reach max voltage level (3).
+ */
+ if (voltage_retries > MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY ||
+ (prev_lane_adjust & DP_ADJUST_VOLTAGE_SWING_LANE0_MASK) == 3) {
+ dev_dbg(mtk_dp->dev, "Link train CR fail\n");
+ break;
+ }
+ } else {
+ /*
+ * If the voltage level is changed, we need to
+ * re-calculate this retry count.
+ */
+ voltage_retries = 0;
+ }
+ prev_lane_adjust = link_status[4];
+ } while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY);
+
+ /* Failed to train CR, and disable pattern. */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+
+ return -ETIMEDOUT;
+}
+
+static int mtk_dp_train_eq(struct mtk_dp *mtk_dp, u8 target_lane_count)
+{
+ u8 lane_adjust[2] = {};
+ u8 link_status[DP_LINK_STATUS_SIZE] = {};
+ int train_retries = 0;
+
+ mtk_dp_pattern(mtk_dp, false);
+
+ do {
+ train_retries++;
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+ return -ENODEV;
+ }
+
+ drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+ lane_adjust, sizeof(lane_adjust));
+ mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count,
+ lane_adjust);
+
+ drm_dp_link_train_channel_eq_delay(&mtk_dp->aux,
+ mtk_dp->rx_cap);
+
+ /* check link status from sink device */
+ drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status);
+ if (drm_dp_channel_eq_ok(link_status, target_lane_count)) {
+ dev_dbg(mtk_dp->dev, "Link train EQ pass\n");
+
+ /* Training done, and disable pattern. */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+ return 0;
+ }
+ dev_dbg(mtk_dp->dev, "Link train EQ fail\n");
+ } while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY);
+
+ /* Failed to train EQ, and disable pattern. */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+
+ return -ETIMEDOUT;
+}
+
+static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+{
+ u8 val;
+ ssize_t ret;
+
+ drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+
+ if (drm_dp_tps4_supported(mtk_dp->rx_cap))
+ mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
+ else if (drm_dp_tps3_supported(mtk_dp->rx_cap))
+ mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_3;
+ else
+ mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_2;
+
+ mtk_dp->train_info.sink_ssc = drm_dp_max_downspread(mtk_dp->rx_cap);
+
+ ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val);
+ if (ret < 1) {
+ drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n");
+ return ret == 0 ? -EIO : ret;
+ }
+
+ if (val & DP_MST_CAP) {
+ /* Clear DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 */
+ ret = drm_dp_dpcd_readb(&mtk_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ &val);
+ if (ret < 1) {
+ drm_err(mtk_dp->drm_dev, "Read irq vector failed\n");
+ return ret == 0 ? -EIO : ret;
+ }
+
+ if (val)
+ drm_dp_dpcd_writeb(&mtk_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ val);
+ }
+
+ return 0;
+}
+
+static bool mtk_dp_edid_parse_audio_capabilities(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ if (!mtk_dp->data->audio_supported)
+ return false;
+
+ if (mtk_dp->info.audio_cur_cfg.sad_count <= 0) {
+ drm_info(mtk_dp->drm_dev, "The SADs is NULL\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void mtk_dp_train_change_mode(struct mtk_dp *mtk_dp)
+{
+ phy_reset(mtk_dp->phy);
+ mtk_dp_reset_swing_pre_emphasis(mtk_dp);
+}
+
+static int mtk_dp_training(struct mtk_dp *mtk_dp)
+{
+ int ret;
+ u8 lane_count, link_rate, train_limit, max_link_rate;
+
+ link_rate = min_t(u8, mtk_dp->max_linkrate,
+ mtk_dp->rx_cap[DP_MAX_LINK_RATE]);
+ max_link_rate = link_rate;
+ lane_count = min_t(u8, mtk_dp->max_lanes,
+ drm_dp_max_lane_count(mtk_dp->rx_cap));
+
+ /*
+ * TPS are generated by the hardware pattern generator. From the
+ * hardware setting we need to disable this scramble setting before
+ * use the TPS pattern generator.
+ */
+ mtk_dp_training_set_scramble(mtk_dp, false);
+
+ for (train_limit = 6; train_limit > 0; train_limit--) {
+ mtk_dp_train_change_mode(mtk_dp);
+
+ ret = mtk_dp_train_setting(mtk_dp, link_rate, lane_count);
+ if (ret)
+ return ret;
+
+ ret = mtk_dp_train_cr(mtk_dp, lane_count);
+ if (ret == -ENODEV) {
+ return ret;
+ } else if (ret) {
+ /* reduce link rate */
+ switch (link_rate) {
+ case DP_LINK_BW_1_62:
+ lane_count = lane_count / 2;
+ link_rate = max_link_rate;
+ if (lane_count == 0)
+ return -EIO;
+ break;
+ case DP_LINK_BW_2_7:
+ link_rate = DP_LINK_BW_1_62;
+ break;
+ case DP_LINK_BW_5_4:
+ link_rate = DP_LINK_BW_2_7;
+ break;
+ case DP_LINK_BW_8_1:
+ link_rate = DP_LINK_BW_5_4;
+ break;
+ default:
+ return -EINVAL;
+ };
+ continue;
+ }
+
+ ret = mtk_dp_train_eq(mtk_dp, lane_count);
+ if (ret == -ENODEV) {
+ return ret;
+ } else if (ret) {
+ /* reduce lane count */
+ if (lane_count == 0)
+ return -EIO;
+ lane_count /= 2;
+ continue;
+ }
+
+ /* if we can run to this, training is done. */
+ break;
+ }
+
+ if (train_limit == 0)
+ return -ETIMEDOUT;
+
+ mtk_dp->train_info.link_rate = link_rate;
+ mtk_dp->train_info.lane_count = lane_count;
+
+ /*
+ * After training done, we need to output normal stream instead of TPS,
+ * so we need to enable scramble.
+ */
+ mtk_dp_training_set_scramble(mtk_dp, true);
+ mtk_dp_set_enhanced_frame_mode(mtk_dp);
+
+ return 0;
+}
+
+static void mtk_dp_video_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ /* the mute sequence is different between enable and disable */
+ if (enable) {
+ mtk_dp_msa_bypass_enable(mtk_dp, false);
+ mtk_dp_pg_enable(mtk_dp, false);
+ mtk_dp_set_tx_out(mtk_dp);
+ mtk_dp_video_mute(mtk_dp, false);
+ } else {
+ mtk_dp_video_mute(mtk_dp, true);
+ mtk_dp_pg_enable(mtk_dp, true);
+ mtk_dp_msa_bypass_enable(mtk_dp, true);
+ }
+}
+
+static void mtk_dp_audio_sdp_setup(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ struct dp_sdp sdp;
+ struct hdmi_audio_infoframe frame;
+
+ hdmi_audio_infoframe_init(&frame);
+ frame.coding_type = HDMI_AUDIO_CODING_TYPE_PCM;
+ frame.channels = cfg->channels;
+ frame.sample_frequency = cfg->sample_rate;
+
+ switch (cfg->word_length_bits) {
+ case 16:
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ break;
+ case 20:
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_20;
+ break;
+ case 24:
+ default:
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_24;
+ break;
+ }
+
+ hdmi_audio_infoframe_pack_for_dp(&frame, &sdp, MTK_DP_VERSION);
+
+ mtk_dp_audio_sdp_asp_set_channels(mtk_dp, cfg->channels);
+ mtk_dp_setup_sdp_aui(mtk_dp, &sdp);
+}
+
+static void mtk_dp_audio_setup(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ mtk_dp_audio_sdp_setup(mtk_dp, cfg);
+ mtk_dp_audio_channel_status_set(mtk_dp, cfg);
+
+ mtk_dp_audio_setup_channels(mtk_dp, cfg);
+ mtk_dp_audio_set_divider(mtk_dp);
+}
+
+static int mtk_dp_video_config(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_config_mn_mode(mtk_dp);
+ mtk_dp_set_msa(mtk_dp);
+ mtk_dp_set_color_depth(mtk_dp);
+ return mtk_dp_set_color_format(mtk_dp, mtk_dp->info.format);
+}
+
+static void mtk_dp_init_port(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_set_idle_pattern(mtk_dp, true);
+ mtk_dp_initialize_priv_data(mtk_dp);
+
+ mtk_dp_initialize_settings(mtk_dp);
+ mtk_dp_initialize_aux_settings(mtk_dp);
+ mtk_dp_initialize_digital_settings(mtk_dp);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
+ mtk_dp_initialize_hpd_detect_settings(mtk_dp);
+
+ mtk_dp_digital_sw_reset(mtk_dp);
+}
+
+static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev)
+{
+ struct mtk_dp *mtk_dp = dev;
+ unsigned long flags;
+ u32 status;
+
+ if (mtk_dp->need_debounce && mtk_dp->train_info.cable_plugged_in)
+ msleep(100);
+
+ spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags);
+ status = mtk_dp->irq_thread_handle;
+ mtk_dp->irq_thread_handle = 0;
+ spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
+
+ if (status & MTK_DP_THREAD_CABLE_STATE_CHG) {
+ drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
+
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ mtk_dp_disable_sdp_aui(mtk_dp);
+ memset(&mtk_dp->info.audio_cur_cfg, 0,
+ sizeof(mtk_dp->info.audio_cur_cfg));
+
+ mtk_dp->need_debounce = false;
+ mod_timer(&mtk_dp->debounce_timer,
+ jiffies + msecs_to_jiffies(100) - 1);
+ }
+ }
+
+ if (status & MTK_DP_THREAD_HPD_EVENT)
+ dev_dbg(mtk_dp->dev, "Receive IRQ from sink devices\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_dp_hpd_event(int hpd, void *dev)
+{
+ struct mtk_dp *mtk_dp = dev;
+ bool cable_sta_chg = false;
+ unsigned long flags;
+ u32 irq_status = mtk_dp_swirq_get_clear(mtk_dp) |
+ mtk_dp_hwirq_get_clear(mtk_dp);
+
+ if (!irq_status)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags);
+
+ if (irq_status & MTK_DP_HPD_INTERRUPT)
+ mtk_dp->irq_thread_handle |= MTK_DP_THREAD_HPD_EVENT;
+
+ /* Cable state is changed. */
+ if (irq_status != MTK_DP_HPD_INTERRUPT) {
+ mtk_dp->irq_thread_handle |= MTK_DP_THREAD_CABLE_STATE_CHG;
+ cable_sta_chg = true;
+ }
+
+ spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
+
+ if (cable_sta_chg) {
+ if (!!(mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3414) &
+ HPD_DB_DP_TRANS_P0_MASK))
+ mtk_dp->train_info.cable_plugged_in = true;
+ else
+ mtk_dp->train_info.cable_plugged_in = false;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
+ struct platform_device *pdev)
+{
+ struct device_node *endpoint;
+ struct device *dev = &pdev->dev;
+ int ret;
+ void __iomem *base;
+ u32 linkrate;
+ int len;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mtk_dp->regs = devm_regmap_init_mmio(dev, base, &mtk_dp_regmap_config);
+ if (IS_ERR(mtk_dp->regs))
+ return PTR_ERR(mtk_dp->regs);
+
+ endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1);
+ len = of_property_count_elems_of_size(endpoint,
+ "data-lanes", sizeof(u32));
+ if (len < 0 || len > 4 || len == 3) {
+ dev_err(dev, "invalid data lane size: %d\n", len);
+ return -EINVAL;
+ }
+
+ mtk_dp->max_lanes = len;
+
+ ret = device_property_read_u32(dev, "max-linkrate-mhz", &linkrate);
+ if (ret) {
+ dev_err(dev, "failed to read max linkrate: %d\n", ret);
+ return ret;
+ }
+
+ mtk_dp->max_linkrate = drm_dp_link_rate_to_bw_code(linkrate * 100);
+
+ return 0;
+}
+
+static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
+{
+ mutex_lock(&mtk_dp->update_plugged_status_lock);
+ if (mtk_dp->plugged_cb && mtk_dp->codec_dev)
+ mtk_dp->plugged_cb(mtk_dp->codec_dev,
+ mtk_dp->enabled &
+ mtk_dp->info.audio_cur_cfg.detect_monitor);
+ mutex_unlock(&mtk_dp->update_plugged_status_lock);
+}
+
+static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ enum drm_connector_status ret = connector_status_disconnected;
+ bool enabled = mtk_dp->enabled;
+ u8 sink_count = 0;
+
+ if (!mtk_dp->train_info.cable_plugged_in)
+ return ret;
+
+ if (!enabled) {
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ /* power on panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ }
+ /*
+ * Some dongles still source HPD when they do not connect to any
+ * sink device. To avoid this, we need to read the sink count
+ * to make sure we do connect to sink devices. After this detect
+ * function, we just need to check the HPD connection to check
+ * whether we connect to a sink device.
+ */
+ drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
+ if (DP_GET_SINK_COUNT(sink_count))
+ ret = connector_status_connected;
+
+ if (!enabled) {
+ /* power off panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ }
+
+ return ret;
+}
+
+static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ bool enabled = mtk_dp->enabled;
+ struct edid *new_edid = NULL;
+ struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+ struct cea_sad *sads;
+
+ if (!enabled) {
+ drm_bridge_chain_pre_enable(bridge);
+
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ /* power on panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ }
+
+ new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
+
+ /*
+ * Parse capability here to let atomic_get_input_bus_fmts and
+ * mode_valid use the capability to calculate sink bitrates.
+ */
+ if (mtk_dp_parse_capabilities(mtk_dp)) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+ new_edid = NULL;
+ }
+
+ if (new_edid) {
+ audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
+ audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ }
+
+ if (!enabled) {
+ /* power off panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+
+ drm_bridge_chain_post_disable(bridge);
+ }
+
+ return new_edid;
+}
+
+static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct mtk_dp *mtk_dp;
+ bool is_read;
+ u8 request;
+ size_t accessed_bytes = 0;
+ int ret;
+
+ mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (msg->request) {
+ case DP_AUX_I2C_MOT:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE | DP_AUX_I2C_MOT:
+ request = msg->request & ~DP_AUX_I2C_WRITE_STATUS_UPDATE;
+ is_read = false;
+ break;
+ case DP_AUX_I2C_READ:
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ | DP_AUX_I2C_MOT:
+ request = msg->request;
+ is_read = true;
+ break;
+ default:
+ drm_err(mtk_aux->drm_dev, "invalid aux cmd = %d\n",
+ msg->request);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ do {
+ size_t to_access = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES,
+ msg->size - accessed_bytes);
+
+ ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request,
+ msg->address + accessed_bytes,
+ msg->buffer + accessed_bytes,
+ to_access);
+
+ if (ret) {
+ drm_info(mtk_dp->drm_dev,
+ "Failed to do AUX transfer: %d\n", ret);
+ goto err;
+ }
+ accessed_bytes += to_access;
+ } while (accessed_bytes < msg->size);
+
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK | DP_AUX_I2C_REPLY_ACK;
+ return msg->size;
+err:
+ msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK;
+ return ret;
+}
+
+static int mtk_dp_poweron(struct mtk_dp *mtk_dp)
+{
+ int ret;
+
+ ret = phy_init(mtk_dp->phy);
+ if (ret) {
+ dev_err(mtk_dp->dev, "Failed to initialize phy: %d\n", ret);
+ return ret;
+ }
+
+ mtk_dp_init_port(mtk_dp);
+ mtk_dp_power_enable(mtk_dp);
+
+ return 0;
+}
+
+static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_power_disable(mtk_dp);
+ phy_exit(mtk_dp->phy);
+}
+
+static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ dev_err(mtk_dp->dev, "Driver does not provide a connector!");
+ return -EINVAL;
+ }
+
+ mtk_dp->aux.drm_dev = bridge->dev;
+ ret = drm_dp_aux_register(&mtk_dp->aux);
+ if (ret) {
+ dev_err(mtk_dp->dev,
+ "failed to register DP AUX channel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_dp_poweron(mtk_dp);
+ if (ret)
+ goto err_aux_register;
+
+ if (mtk_dp->next_bridge) {
+ ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ &mtk_dp->bridge, flags);
+ if (ret) {
+ drm_warn(mtk_dp->drm_dev,
+ "Failed to attach external bridge: %d\n", ret);
+ goto err_bridge_attach;
+ }
+ }
+
+ mtk_dp->drm_dev = bridge->dev;
+
+ mtk_dp_hwirq_enable(mtk_dp, true);
+
+ return 0;
+
+err_bridge_attach:
+ mtk_dp_poweroff(mtk_dp);
+err_aux_register:
+ drm_dp_aux_unregister(&mtk_dp->aux);
+ return ret;
+}
+
+static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+
+ mtk_dp_hwirq_enable(mtk_dp, false);
+ mtk_dp->drm_dev = NULL;
+ mtk_dp_poweroff(mtk_dp);
+ drm_dp_aux_unregister(&mtk_dp->aux);
+}
+
+static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ int ret;
+
+ mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(old_state->base.state,
+ bridge->encoder);
+ if (!mtk_dp->conn) {
+ drm_err(mtk_dp->drm_dev,
+ "Can't enable bridge as connector is missing\n");
+ return;
+ }
+
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ if (mtk_dp->train_info.cable_plugged_in) {
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ }
+
+ /* Training */
+ ret = mtk_dp_training(mtk_dp);
+ if (ret) {
+ drm_err(mtk_dp->drm_dev, "Training failed, %d\n", ret);
+ goto power_off_aux;
+ }
+
+ ret = mtk_dp_video_config(mtk_dp);
+ if (ret)
+ goto power_off_aux;
+
+ mtk_dp_video_enable(mtk_dp, true);
+
+ mtk_dp->audio_enable =
+ mtk_dp_edid_parse_audio_capabilities(mtk_dp,
+ &mtk_dp->info.audio_cur_cfg);
+ if (mtk_dp->audio_enable) {
+ mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg);
+ mtk_dp_audio_mute(mtk_dp, false);
+ } else {
+ memset(&mtk_dp->info.audio_cur_cfg, 0,
+ sizeof(mtk_dp->info.audio_cur_cfg));
+ }
+
+ mtk_dp->enabled = true;
+ mtk_dp_update_plugged_status(mtk_dp);
+
+ return;
+power_off_aux:
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+}
+
+static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+
+ mtk_dp->enabled = false;
+ mtk_dp_update_plugged_status(mtk_dp);
+ mtk_dp_video_enable(mtk_dp, false);
+ mtk_dp_audio_mute(mtk_dp, true);
+
+ if (mtk_dp->train_info.cable_plugged_in) {
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+ }
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+
+ /* Ensure the sink is muted */
+ msleep(20);
+}
+
+static enum drm_mode_status
+mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
+ u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
+ drm_dp_max_lane_count(mtk_dp->rx_cap),
+ drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
+ mtk_dp->max_lanes);
+
+ if (rate < mode->clock * bpp / 8)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static u32 *mtk_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+ *num_output_fmts = 1;
+ output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ return output_fmts;
+}
+
+static const u32 mt8195_input_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+};
+
+static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_info *display_info =
+ &conn_state->connector->display_info;
+ u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
+ drm_dp_max_lane_count(mtk_dp->rx_cap),
+ drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
+ mtk_dp->max_lanes);
+
+ *num_input_fmts = 0;
+
+ /*
+ * If the linkrate is smaller than datarate of RGB888, larger than
+ * datarate of YUV422 and sink device supports YUV422, we output YUV422
+ * format. Use this condition, we can support more resolution.
+ */
+ if ((rate < (mode->clock * 24 / 8)) &&
+ (rate > (mode->clock * 16 / 8)) &&
+ (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
+ input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+ *num_input_fmts = 1;
+ input_fmts[0] = MEDIA_BUS_FMT_YUYV8_1X16;
+ } else {
+ input_fmts = kcalloc(ARRAY_SIZE(mt8195_input_fmts),
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = ARRAY_SIZE(mt8195_input_fmts);
+ memcpy(input_fmts, mt8195_input_fmts, sizeof(mt8195_input_fmts));
+ }
+
+ return input_fmts;
+}
+
+static int mtk_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ struct drm_crtc *crtc = conn_state->crtc;
+ unsigned int input_bus_format;
+
+ input_bus_format = bridge_state->input_bus_cfg.format;
+
+ dev_dbg(mtk_dp->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ if (input_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
+ mtk_dp->info.format = DP_PIXELFORMAT_YUV422;
+ else
+ mtk_dp->info.format = DP_PIXELFORMAT_RGB;
+
+ if (!crtc) {
+ drm_err(mtk_dp->drm_dev,
+ "Can't enable bridge as connector state doesn't have a crtc\n");
+ return -EINVAL;
+ }
+
+ drm_display_mode_to_videomode(&crtc_state->adjusted_mode, &mtk_dp->info.vm);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
+ .atomic_check = mtk_dp_bridge_atomic_check,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_output_bus_fmts = mtk_dp_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = mtk_dp_bridge_atomic_get_input_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = mtk_dp_bridge_attach,
+ .detach = mtk_dp_bridge_detach,
+ .atomic_enable = mtk_dp_bridge_atomic_enable,
+ .atomic_disable = mtk_dp_bridge_atomic_disable,
+ .mode_valid = mtk_dp_bridge_mode_valid,
+ .get_edid = mtk_dp_get_edid,
+ .detect = mtk_dp_bdg_detect,
+};
+
+static void mtk_dp_debounce_timer(struct timer_list *t)
+{
+ struct mtk_dp *mtk_dp = from_timer(mtk_dp, t, debounce_timer);
+
+ mtk_dp->need_debounce = true;
+}
+
+/*
+ * HDMI audio codec callbacks
+ */
+static int mtk_dp_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ if (!mtk_dp->enabled) {
+ dev_err(mtk_dp->dev, "%s, DP is not ready!\n", __func__);
+ return -ENODEV;
+ }
+
+ mtk_dp->info.audio_cur_cfg.channels = params->cea.channels;
+ mtk_dp->info.audio_cur_cfg.sample_rate = params->sample_rate;
+
+ mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg);
+
+ return 0;
+}
+
+static int mtk_dp_audio_startup(struct device *dev, void *data)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ mtk_dp_audio_mute(mtk_dp, false);
+
+ return 0;
+}
+
+static void mtk_dp_audio_shutdown(struct device *dev, void *data)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ mtk_dp_audio_mute(mtk_dp, true);
+}
+
+static int mtk_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ size_t len)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ if (mtk_dp->enabled)
+ memcpy(buf, mtk_dp->conn->eld, len);
+ else
+ memset(buf, 0, len);
+
+ return 0;
+}
+
+static int mtk_dp_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_dp *mtk_dp = data;
+
+ mutex_lock(&mtk_dp->update_plugged_status_lock);
+ mtk_dp->plugged_cb = fn;
+ mtk_dp->codec_dev = codec_dev;
+ mutex_unlock(&mtk_dp->update_plugged_status_lock);
+
+ mtk_dp_update_plugged_status(mtk_dp);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = {
+ .hw_params = mtk_dp_audio_hw_params,
+ .audio_startup = mtk_dp_audio_startup,
+ .audio_shutdown = mtk_dp_audio_shutdown,
+ .get_eld = mtk_dp_audio_get_eld,
+ .hook_plugged_cb = mtk_dp_audio_hook_plugged_cb,
+ .no_capture_mute = 1,
+};
+
+static int mtk_dp_register_audio_driver(struct device *dev)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &mtk_dp_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+ .data = mtk_dp,
+ };
+
+ mtk_dp->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev);
+}
+
+static int mtk_dp_probe(struct platform_device *pdev)
+{
+ struct mtk_dp *mtk_dp;
+ struct device *dev = &pdev->dev;
+ int ret, irq_num;
+
+ mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
+ if (!mtk_dp)
+ return -ENOMEM;
+
+ mtk_dp->dev = dev;
+ mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
+
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0)
+ return dev_err_probe(dev, irq_num,
+ "failed to request dp irq resource\n");
+
+ mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(mtk_dp->next_bridge) &&
+ PTR_ERR(mtk_dp->next_bridge) == -ENODEV)
+ mtk_dp->next_bridge = NULL;
+ else if (IS_ERR(mtk_dp->next_bridge))
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->next_bridge),
+ "Failed to get bridge\n");
+
+ ret = mtk_dp_dt_parse(mtk_dp, pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse dt\n");
+
+ drm_dp_aux_init(&mtk_dp->aux);
+ mtk_dp->aux.name = "aux_mtk_dp";
+ mtk_dp->aux.transfer = mtk_dp_aux_transfer;
+
+ spin_lock_init(&mtk_dp->irq_thread_lock);
+
+ ret = devm_request_threaded_irq(dev, irq_num, mtk_dp_hpd_event,
+ mtk_dp_hpd_event_thread,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
+ mtk_dp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to request mediatek dptx irq\n");
+
+ mutex_init(&mtk_dp->update_plugged_status_lock);
+
+ platform_set_drvdata(pdev, mtk_dp);
+
+ if (mtk_dp->data->audio_supported) {
+ ret = mtk_dp_register_audio_driver(dev);
+ if (ret) {
+ dev_err(dev, "Failed to register audio driver: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
+ PLATFORM_DEVID_AUTO,
+ &mtk_dp->regs,
+ sizeof(struct regmap *));
+ if (IS_ERR(mtk_dp->phy_dev))
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
+ "Failed to create device mediatek-dp-phy\n");
+
+ mtk_dp_get_calibration_data(mtk_dp);
+
+ mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
+
+ if (IS_ERR(mtk_dp->phy)) {
+ platform_device_unregister(mtk_dp->phy_dev);
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy),
+ "Failed to get phy\n");
+ }
+
+ mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
+ mtk_dp->bridge.of_node = dev->of_node;
+
+ mtk_dp->bridge.ops =
+ DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ mtk_dp->bridge.type = mtk_dp->data->bridge_type;
+
+ drm_bridge_add(&mtk_dp->bridge);
+
+ mtk_dp->need_debounce = true;
+ timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+}
+
+static int mtk_dp_remove(struct platform_device *pdev)
+{
+ struct mtk_dp *mtk_dp = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ del_timer_sync(&mtk_dp->debounce_timer);
+ drm_bridge_remove(&mtk_dp->bridge);
+ platform_device_unregister(mtk_dp->phy_dev);
+ if (mtk_dp->audio_pdev)
+ platform_device_unregister(mtk_dp->audio_pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_dp_suspend(struct device *dev)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ mtk_dp_power_disable(mtk_dp);
+ mtk_dp_hwirq_enable(mtk_dp, false);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int mtk_dp_resume(struct device *dev)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ mtk_dp_init_port(mtk_dp);
+ mtk_dp_hwirq_enable(mtk_dp, true);
+ mtk_dp_power_enable(mtk_dp);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
+
+static const struct mtk_dp_data mt8195_edp_data = {
+ .bridge_type = DRM_MODE_CONNECTOR_eDP,
+ .smc_cmd = MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE,
+ .efuse_fmt = mt8195_edp_efuse_fmt,
+ .audio_supported = false,
+};
+
+static const struct mtk_dp_data mt8195_dp_data = {
+ .bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
+ .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
+ .efuse_fmt = mt8195_dp_efuse_fmt,
+ .audio_supported = true,
+};
+
+static const struct of_device_id mtk_dp_of_match[] = {
+ {
+ .compatible = "mediatek,mt8195-edp-tx",
+ .data = &mt8195_edp_data,
+ },
+ {
+ .compatible = "mediatek,mt8195-dp-tx",
+ .data = &mt8195_dp_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_dp_of_match);
+
+static struct platform_driver mtk_dp_driver = {
+ .probe = mtk_dp_probe,
+ .remove = mtk_dp_remove,
+ .driver = {
+ .name = "mediatek-drm-dp",
+ .of_match_table = mtk_dp_of_match,
+ .pm = &mtk_dp_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_dp_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>");
+MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
new file mode 100644
index 000000000000..84e38cef03c2
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019-2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre
+ */
+#ifndef _MTK_DP_REG_H_
+#define _MTK_DP_REG_H_
+
+#define SEC_OFFSET 0x4000
+
+#define MTK_DP_HPD_DISCONNECT BIT(1)
+#define MTK_DP_HPD_CONNECT BIT(2)
+#define MTK_DP_HPD_INTERRUPT BIT(3)
+
+/* offset: 0x0 */
+#define DP_PHY_GLB_BIAS_GEN_00 0x0
+#define RG_XTP_GLB_BIAS_INTR_CTRL GENMASK(20, 16)
+#define DP_PHY_GLB_DPAUX_TX 0x8
+#define RG_CKM_PT0_CKTX_IMPSEL GENMASK(23, 20)
+#define MTK_DP_0034 0x34
+#define DA_XTP_GLB_CKDET_EN_FORCE_VAL BIT(15)
+#define DA_XTP_GLB_CKDET_EN_FORCE_EN BIT(14)
+#define DA_CKM_INTCKTX_EN_FORCE_VAL BIT(13)
+#define DA_CKM_INTCKTX_EN_FORCE_EN BIT(12)
+#define DA_CKM_CKTX0_EN_FORCE_VAL BIT(11)
+#define DA_CKM_CKTX0_EN_FORCE_EN BIT(10)
+#define DA_CKM_XTAL_CK_FORCE_VAL BIT(9)
+#define DA_CKM_XTAL_CK_FORCE_EN BIT(8)
+#define DA_CKM_BIAS_LPF_EN_FORCE_VAL BIT(7)
+#define DA_CKM_BIAS_LPF_EN_FORCE_EN BIT(6)
+#define DA_CKM_BIAS_EN_FORCE_VAL BIT(5)
+#define DA_CKM_BIAS_EN_FORCE_EN BIT(4)
+#define DA_XTP_GLB_AVD10_ON_FORCE_VAL BIT(3)
+#define DA_XTP_GLB_AVD10_ON_FORCE BIT(2)
+#define DA_XTP_GLB_LDO_EN_FORCE_VAL BIT(1)
+#define DA_XTP_GLB_LDO_EN_FORCE_EN BIT(0)
+#define DP_PHY_LANE_TX_0 0x104
+#define RG_XTP_LN0_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN0_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define DP_PHY_LANE_TX_1 0x204
+#define RG_XTP_LN1_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN1_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define DP_PHY_LANE_TX_2 0x304
+#define RG_XTP_LN2_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN2_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define DP_PHY_LANE_TX_3 0x404
+#define RG_XTP_LN3_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN3_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define MTK_DP_1040 0x1040
+#define RG_DPAUX_RX_VALID_DEGLITCH_EN BIT(2)
+#define RG_XTP_GLB_CKDET_EN BIT(1)
+#define RG_DPAUX_RX_EN BIT(0)
+
+/* offset: TOP_OFFSET (0x2000) */
+#define MTK_DP_TOP_PWR_STATE 0x2000
+#define DP_PWR_STATE_MASK GENMASK(1, 0)
+#define DP_PWR_STATE_BANDGAP BIT(0)
+#define DP_PWR_STATE_BANDGAP_TPLL BIT(1)
+#define DP_PWR_STATE_BANDGAP_TPLL_LANE GENMASK(1, 0)
+#define MTK_DP_TOP_SWING_EMP 0x2004
+#define DP_TX0_VOLT_SWING_MASK GENMASK(1, 0)
+#define DP_TX0_VOLT_SWING_SHIFT 0
+#define DP_TX0_PRE_EMPH_MASK GENMASK(3, 2)
+#define DP_TX0_PRE_EMPH_SHIFT 2
+#define DP_TX1_VOLT_SWING_MASK GENMASK(9, 8)
+#define DP_TX1_VOLT_SWING_SHIFT 8
+#define DP_TX1_PRE_EMPH_MASK GENMASK(11, 10)
+#define DP_TX2_VOLT_SWING_MASK GENMASK(17, 16)
+#define DP_TX2_PRE_EMPH_MASK GENMASK(19, 18)
+#define DP_TX3_VOLT_SWING_MASK GENMASK(25, 24)
+#define DP_TX3_PRE_EMPH_MASK GENMASK(27, 26)
+#define MTK_DP_TOP_RESET_AND_PROBE 0x2020
+#define SW_RST_B_PHYD BIT(4)
+#define MTK_DP_TOP_IRQ_MASK 0x202c
+#define IRQ_MASK_AUX_TOP_IRQ BIT(2)
+#define MTK_DP_TOP_MEM_PD 0x2038
+#define MEM_ISO_EN BIT(0)
+#define FUSE_SEL BIT(2)
+
+/* offset: ENC0_OFFSET (0x3000) */
+#define MTK_DP_ENC0_P0_3000 0x3000
+#define LANE_NUM_DP_ENC0_P0_MASK GENMASK(1, 0)
+#define VIDEO_MUTE_SW_DP_ENC0_P0 BIT(2)
+#define VIDEO_MUTE_SEL_DP_ENC0_P0 BIT(3)
+#define ENHANCED_FRAME_EN_DP_ENC0_P0 BIT(4)
+#define MTK_DP_ENC0_P0_3004 0x3004
+#define VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK BIT(8)
+#define DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0 BIT(9)
+#define MTK_DP_ENC0_P0_3010 0x3010
+#define HTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3014 0x3014
+#define VTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3018 0x3018
+#define HSTART_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_301C 0x301c
+#define VSTART_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3020 0x3020
+#define HWIDTH_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3024 0x3024
+#define VHEIGHT_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3028 0x3028
+#define HSW_SW_DP_ENC0_P0_MASK GENMASK(14, 0)
+#define HSP_SW_DP_ENC0_P0_MASK BIT(15)
+#define MTK_DP_ENC0_P0_302C 0x302c
+#define VSW_SW_DP_ENC0_P0_MASK GENMASK(14, 0)
+#define VSP_SW_DP_ENC0_P0_MASK BIT(15)
+#define MTK_DP_ENC0_P0_3030 0x3030
+#define HTOTAL_SEL_DP_ENC0_P0 BIT(0)
+#define VTOTAL_SEL_DP_ENC0_P0 BIT(1)
+#define HSTART_SEL_DP_ENC0_P0 BIT(2)
+#define VSTART_SEL_DP_ENC0_P0 BIT(3)
+#define HWIDTH_SEL_DP_ENC0_P0 BIT(4)
+#define VHEIGHT_SEL_DP_ENC0_P0 BIT(5)
+#define HSP_SEL_DP_ENC0_P0 BIT(6)
+#define HSW_SEL_DP_ENC0_P0 BIT(7)
+#define VSP_SEL_DP_ENC0_P0 BIT(8)
+#define VSW_SEL_DP_ENC0_P0 BIT(9)
+#define VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 BIT(11)
+#define VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0 BIT(12)
+#define MTK_DP_ENC0_P0_3034 0x3034
+#define MTK_DP_ENC0_P0_3038 0x3038
+#define VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK BIT(11)
+#define MTK_DP_ENC0_P0_303C 0x303c
+#define SRAM_START_READ_THRD_DP_ENC0_P0_MASK GENMASK(5, 0)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK GENMASK(10, 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_16BIT (0 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_12BIT (1 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_10BIT (2 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT (3 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_6BIT (4 << 8)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK GENMASK(14, 12)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB (0 << 12)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422 (1 << 12)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR420 (2 << 12)
+#define VIDEO_MN_GEN_EN_DP_ENC0_P0 BIT(15)
+#define MTK_DP_ENC0_P0_3040 0x3040
+#define SDP_DOWN_CNT_DP_ENC0_P0_VAL 0x20
+#define SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK GENMASK(11, 0)
+#define MTK_DP_ENC0_P0_304C 0x304c
+#define VBID_VIDEO_MUTE_DP_ENC0_P0_MASK BIT(2)
+#define SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK BIT(8)
+#define MTK_DP_ENC0_P0_3064 0x3064
+#define HDE_NUM_LAST_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3088 0x3088
+#define AU_EN_DP_ENC0_P0 BIT(6)
+#define AUDIO_8CH_EN_DP_ENC0_P0_MASK BIT(7)
+#define AUDIO_8CH_SEL_DP_ENC0_P0_MASK BIT(8)
+#define AUDIO_2CH_EN_DP_ENC0_P0_MASK BIT(14)
+#define AUDIO_2CH_SEL_DP_ENC0_P0_MASK BIT(15)
+#define MTK_DP_ENC0_P0_308C 0x308c
+#define CH_STATUS_0_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3090 0x3090
+#define CH_STATUS_1_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3094 0x3094
+#define CH_STATUS_2_DP_ENC0_P0_MASK GENMASK(7, 0)
+#define MTK_DP_ENC0_P0_30A4 0x30a4
+#define AU_TS_CFG_DP_ENC0_P0_MASK GENMASK(7, 0)
+#define MTK_DP_ENC0_P0_30A8 0x30a8
+#define MTK_DP_ENC0_P0_30BC 0x30bc
+#define ISRC_CONT_DP_ENC0_P0 BIT(0)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK GENMASK(10, 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_2 (1 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_4 (2 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_8 (3 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2 (5 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_4 (6 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_8 (7 << 8)
+#define MTK_DP_ENC0_P0_30D8 0x30d8
+#define MTK_DP_ENC0_P0_312C 0x312c
+#define ASP_HB2_DP_ENC0_P0_MASK GENMASK(7, 0)
+#define ASP_HB3_DP_ENC0_P0_MASK GENMASK(15, 8)
+#define MTK_DP_ENC0_P0_3154 0x3154
+#define PGEN_HTOTAL_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3158 0x3158
+#define PGEN_HSYNC_RISING_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_315C 0x315c
+#define PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3160 0x3160
+#define PGEN_HFDE_START_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3164 0x3164
+#define PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3168 0x3168
+#define PGEN_VTOTAL_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_316C 0x316c
+#define PGEN_VSYNC_RISING_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_3170 0x3170
+#define PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_3174 0x3174
+#define PGEN_VFDE_START_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_3178 0x3178
+#define PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_31B0 0x31b0
+#define PGEN_PATTERN_SEL_VAL 4
+#define PGEN_PATTERN_SEL_MASK GENMASK(6, 4)
+#define MTK_DP_ENC0_P0_31EC 0x31ec
+#define AUDIO_CH_SRC_SEL_DP_ENC0_P0 BIT(4)
+#define ISRC1_HB3_DP_ENC0_P0_MASK GENMASK(15, 8)
+
+/* offset: ENC1_OFFSET (0x3200) */
+#define MTK_DP_ENC1_P0_3200 0x3200
+#define MTK_DP_ENC1_P0_3280 0x3280
+#define SDP_PACKET_TYPE_DP_ENC1_P0_MASK GENMASK(4, 0)
+#define SDP_PACKET_W_DP_ENC1_P0 BIT(5)
+#define SDP_PACKET_W_DP_ENC1_P0_MASK BIT(5)
+#define MTK_DP_ENC1_P0_3300 0x3300
+#define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL 2
+#define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK GENMASK(9, 8)
+#define MTK_DP_ENC1_P0_3304 0x3304
+#define AU_PRTY_REGEN_DP_ENC1_P0_MASK BIT(8)
+#define AU_CH_STS_REGEN_DP_ENC1_P0_MASK BIT(9)
+#define AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK BIT(12)
+#define MTK_DP_ENC1_P0_3324 0x3324
+#define AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK GENMASK(9, 8)
+#define AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX 0
+#define MTK_DP_ENC1_P0_3364 0x3364
+#define SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL 0x20
+#define SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK GENMASK(11, 0)
+#define FIFO_READ_START_POINT_DP_ENC1_P0_VAL 4
+#define FIFO_READ_START_POINT_DP_ENC1_P0_MASK GENMASK(15, 12)
+#define MTK_DP_ENC1_P0_3368 0x3368
+#define VIDEO_SRAM_FIFO_CNT_RESET_SEL_DP_ENC1_P0 BIT(0)
+#define VIDEO_STABLE_CNT_THRD_DP_ENC1_P0 BIT(4)
+#define SDP_DP13_EN_DP_ENC1_P0 BIT(8)
+#define BS2BS_MODE_DP_ENC1_P0 BIT(12)
+#define BS2BS_MODE_DP_ENC1_P0_MASK GENMASK(13, 12)
+#define BS2BS_MODE_DP_ENC1_P0_VAL 1
+#define DP_ENC1_P0_3368_VAL (VIDEO_SRAM_FIFO_CNT_RESET_SEL_DP_ENC1_P0 | \
+ VIDEO_STABLE_CNT_THRD_DP_ENC1_P0 | \
+ SDP_DP13_EN_DP_ENC1_P0 | \
+ BS2BS_MODE_DP_ENC1_P0)
+#define MTK_DP_ENC1_P0_33F4 0x33f4
+#define DP_ENC_DUMMY_RW_1_AUDIO_RST_EN BIT(0)
+#define DP_ENC_DUMMY_RW_1 BIT(9)
+
+/* offset: TRANS_OFFSET (0x3400) */
+#define MTK_DP_TRANS_P0_3400 0x3400
+#define PATTERN1_EN_DP_TRANS_P0_MASK BIT(12)
+#define PATTERN2_EN_DP_TRANS_P0_MASK BIT(13)
+#define PATTERN3_EN_DP_TRANS_P0_MASK BIT(14)
+#define PATTERN4_EN_DP_TRANS_P0_MASK BIT(15)
+#define MTK_DP_TRANS_P0_3404 0x3404
+#define DP_SCR_EN_DP_TRANS_P0_MASK BIT(0)
+#define MTK_DP_TRANS_P0_340C 0x340c
+#define DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0 BIT(13)
+#define MTK_DP_TRANS_P0_3410 0x3410
+#define HPD_DEB_THD_DP_TRANS_P0_MASK GENMASK(3, 0)
+#define HPD_INT_THD_DP_TRANS_P0_MASK GENMASK(7, 4)
+#define HPD_INT_THD_DP_TRANS_P0_LOWER_500US (2 << 4)
+#define HPD_INT_THD_DP_TRANS_P0_UPPER_1100US (2 << 6)
+#define HPD_DISC_THD_DP_TRANS_P0_MASK GENMASK(11, 8)
+#define HPD_CONN_THD_DP_TRANS_P0_MASK GENMASK(15, 12)
+#define MTK_DP_TRANS_P0_3414 0x3414
+#define HPD_DB_DP_TRANS_P0_MASK BIT(2)
+#define MTK_DP_TRANS_P0_3418 0x3418
+#define IRQ_CLR_DP_TRANS_P0_MASK GENMASK(3, 0)
+#define IRQ_MASK_DP_TRANS_P0_MASK GENMASK(7, 4)
+#define IRQ_MASK_DP_TRANS_P0_DISC_IRQ (BIT(1) << 4)
+#define IRQ_MASK_DP_TRANS_P0_CONN_IRQ (BIT(2) << 4)
+#define IRQ_MASK_DP_TRANS_P0_INT_IRQ (BIT(3) << 4)
+#define IRQ_STATUS_DP_TRANS_P0_MASK GENMASK(15, 12)
+#define MTK_DP_TRANS_P0_342C 0x342c
+#define XTAL_FREQ_DP_TRANS_P0_DEFAULT (BIT(0) | BIT(3) | BIT(5) | BIT(6))
+#define XTAL_FREQ_DP_TRANS_P0_MASK GENMASK(7, 0)
+#define MTK_DP_TRANS_P0_3430 0x3430
+#define HPD_INT_THD_ECO_DP_TRANS_P0_MASK GENMASK(1, 0)
+#define HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT BIT(1)
+#define MTK_DP_TRANS_P0_34A4 0x34a4
+#define LANE_NUM_DP_TRANS_P0_MASK GENMASK(3, 2)
+#define MTK_DP_TRANS_P0_3540 0x3540
+#define FEC_EN_DP_TRANS_P0_MASK BIT(0)
+#define FEC_CLOCK_EN_MODE_DP_TRANS_P0 BIT(3)
+#define MTK_DP_TRANS_P0_3580 0x3580
+#define POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK BIT(8)
+#define POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK BIT(9)
+#define POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK BIT(10)
+#define POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK BIT(11)
+#define MTK_DP_TRANS_P0_35C8 0x35c8
+#define SW_IRQ_CLR_DP_TRANS_P0_MASK GENMASK(15, 0)
+#define SW_IRQ_STATUS_DP_TRANS_P0_MASK GENMASK(15, 0)
+#define MTK_DP_TRANS_P0_35D0 0x35d0
+#define SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK GENMASK(15, 0)
+#define MTK_DP_TRANS_P0_35F0 0x35f0
+#define DP_TRANS_DUMMY_RW_0 BIT(3)
+#define DP_TRANS_DUMMY_RW_0_MASK GENMASK(3, 2)
+
+/* offset: AUX_OFFSET (0x3600) */
+#define MTK_DP_AUX_P0_360C 0x360c
+#define AUX_TIMEOUT_THR_AUX_TX_P0_MASK GENMASK(12, 0)
+#define AUX_TIMEOUT_THR_AUX_TX_P0_VAL 0x1595
+#define MTK_DP_AUX_P0_3614 0x3614
+#define AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK GENMASK(6, 0)
+#define AUX_RX_UI_CNT_THR_AUX_FOR_26M 13
+#define MTK_DP_AUX_P0_3618 0x3618
+#define AUX_RX_FIFO_FULL_AUX_TX_P0_MASK BIT(9)
+#define AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3620 0x3620
+#define AUX_RD_MODE_AUX_TX_P0_MASK BIT(9)
+#define AUX_RX_FIFO_READ_PULSE_TX_P0 BIT(8)
+#define AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK GENMASK(7, 0)
+#define MTK_DP_AUX_P0_3624 0x3624
+#define AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3628 0x3628
+#define AUX_RX_PHY_STATE_AUX_TX_P0_MASK GENMASK(9, 0)
+#define AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE BIT(0)
+#define MTK_DP_AUX_P0_362C 0x362c
+#define AUX_NO_LENGTH_AUX_TX_P0 BIT(0)
+#define AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK BIT(1)
+#define AUX_RESERVED_RW_0_AUX_TX_P0_MASK GENMASK(15, 2)
+#define MTK_DP_AUX_P0_3630 0x3630
+#define AUX_TX_REQUEST_READY_AUX_TX_P0 BIT(3)
+#define MTK_DP_AUX_P0_3634 0x3634
+#define AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK GENMASK(15, 8)
+#define AUX_TX_OVER_SAMPLE_RATE_FOR_26M 25
+#define MTK_DP_AUX_P0_3640 0x3640
+#define AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(6)
+#define AUX_RX_EDID_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(5)
+#define AUX_RX_MCCS_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(4)
+#define AUX_RX_CMD_RECV_IRQ_AUX_TX_P0 BIT(3)
+#define AUX_RX_ADDR_RECV_IRQ_AUX_TX_P0 BIT(2)
+#define AUX_RX_DATA_RECV_IRQ_AUX_TX_P0 BIT(1)
+#define AUX_400US_TIMEOUT_IRQ_AUX_TX_P0 BIT(0)
+#define DP_AUX_P0_3640_VAL (AUX_400US_TIMEOUT_IRQ_AUX_TX_P0 | \
+ AUX_RX_DATA_RECV_IRQ_AUX_TX_P0 | \
+ AUX_RX_ADDR_RECV_IRQ_AUX_TX_P0 | \
+ AUX_RX_CMD_RECV_IRQ_AUX_TX_P0 | \
+ AUX_RX_MCCS_RECV_COMPLETE_IRQ_AUX_TX_P0 | \
+ AUX_RX_EDID_RECV_COMPLETE_IRQ_AUX_TX_P0 | \
+ AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0)
+#define MTK_DP_AUX_P0_3644 0x3644
+#define MCU_REQUEST_COMMAND_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3648 0x3648
+#define MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK GENMASK(15, 0)
+#define MTK_DP_AUX_P0_364C 0x364c
+#define MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3650 0x3650
+#define MCU_REQ_DATA_NUM_AUX_TX_P0_MASK GENMASK(15, 12)
+#define PHY_FIFO_RST_AUX_TX_P0_MASK BIT(9)
+#define MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 BIT(8)
+#define MTK_DP_AUX_P0_3658 0x3658
+#define AUX_TX_OV_EN_AUX_TX_P0_MASK BIT(0)
+#define MTK_DP_AUX_P0_3690 0x3690
+#define RX_REPLY_COMPLETE_MODE_AUX_TX_P0 BIT(8)
+#define MTK_DP_AUX_P0_3704 0x3704
+#define AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK BIT(1)
+#define AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0 BIT(2)
+#define MTK_DP_AUX_P0_3708 0x3708
+#define MTK_DP_AUX_P0_37C8 0x37c8
+#define MTK_ATOP_EN_AUX_TX_P0 BIT(0)
+
+#endif /*_MTK_DP_REG_H_*/
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 630a4e301ef6..508a6d994e83 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -11,7 +11,6 @@
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 42cc7052b050..112615817dcb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -15,7 +15,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 2d72cc5ddaba..6b6d5335c834 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0e4c77724b05..91f58db5915f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -833,11 +833,8 @@ static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
- int ret;
-
- ret = drm_mode_config_helper_suspend(drm);
- return ret;
+ return drm_mode_config_helper_suspend(drm);
}
static void mtk_drm_sys_complete(struct device *dev)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 139d7724c6d0..47e96b0289f9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -8,7 +8,7 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
@@ -22,7 +22,7 @@ static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
.vmap = mtk_drm_gem_prime_vmap,
.vunmap = mtk_drm_gem_prime_vunmap,
.mmap = mtk_drm_gem_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 5c0d9ce69931..2f5e007dd380 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -11,7 +11,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -108,8 +107,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
crtc_state = new_plane_state->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
@@ -202,8 +201,8 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9cc406e1eee1..3b7d13028fb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
dsi->enabled = false;
}
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 3196189429bc..4c80b6896dc3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -16,7 +16,6 @@
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 6c70fc3214af..823909da87db 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -4,7 +4,7 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
select REGMAP_MMIO
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 1b70938cfd2c..3b24a924b7b9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
@@ -87,16 +87,16 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
args->size = PAGE_ALIGN(args->pitch * args->height);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver meson_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- /* CMA Ops */
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
+ /* DMA Ops */
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
/* Misc */
.fops = &fops,
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
@@ -385,10 +388,14 @@ static void meson_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
- component_unbind_all(dev, drm);
free_irq(priv->vsync_irq, drm);
drm_dev_put(drm);
+ meson_encoder_hdmi_remove(priv);
+ meson_encoder_cvbs_remove(priv);
+
+ component_unbind_all(dev, drm);
+
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
}
@@ -490,6 +497,13 @@ static int meson_drv_probe(struct platform_device *pdev)
return 0;
};
+static int meson_drv_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &meson_drv_master_ops);
+
+ return 0;
+}
+
static struct meson_drm_match_data meson_drm_gxbb_data = {
.compat = VPU_COMPATIBLE_GXBB,
};
@@ -527,6 +541,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .remove = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 177dac3ca3be..c62ee358456f 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -25,6 +25,12 @@ enum vpu_compatible {
VPU_COMPATIBLE_G12A = 3,
};
+enum {
+ MESON_ENC_CVBS = 0,
+ MESON_ENC_HDMI,
+ MESON_ENC_LAST,
+};
+
struct meson_drm_match_data {
enum vpu_compatible compat;
struct meson_afbcd_ops *afbcd_ops;
@@ -51,6 +57,7 @@ struct meson_drm {
struct drm_crtc *crtc;
struct drm_plane *primary_plane;
struct drm_plane *overlay_plane;
+ void *encoders[MESON_ENC_LAST];
const struct meson_drm_soc_limits *limits;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 8110a6e39320..5675bc2a92cf 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -281,5 +281,18 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
}
drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
+ priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
+
return 0;
}
+
+void meson_encoder_cvbs_remove(struct meson_drm *priv)
+{
+ struct meson_encoder_cvbs *meson_encoder_cvbs;
+
+ if (priv->encoders[MESON_ENC_CVBS]) {
+ meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
+ drm_bridge_remove(&meson_encoder_cvbs->bridge);
+ drm_bridge_remove(meson_encoder_cvbs->next_bridge);
+ }
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
index 61d9d183ce7f..09710fec3c66 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
@@ -25,5 +25,6 @@ struct meson_cvbs_mode {
extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
int meson_encoder_cvbs_init(struct meson_drm *priv);
+void meson_encoder_cvbs_remove(struct meson_drm *priv);
#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 2f616c55c271..53231bfdf7e2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -452,6 +452,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->cec_notifier = notifier;
}
+ priv->encoders[MESON_ENC_HDMI] = meson_encoder_hdmi;
+
dev_dbg(priv->dev, "HDMI encoder initialized\n");
return 0;
@@ -460,3 +462,14 @@ err_put_node:
of_node_put(remote);
return ret;
}
+
+void meson_encoder_hdmi_remove(struct meson_drm *priv)
+{
+ struct meson_encoder_hdmi *meson_encoder_hdmi;
+
+ if (priv->encoders[MESON_ENC_HDMI]) {
+ meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
+ drm_bridge_remove(&meson_encoder_hdmi->bridge);
+ drm_bridge_remove(meson_encoder_hdmi->next_bridge);
+ }
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
index ed19494f0956..a6cd38eb5f71 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
@@ -8,5 +8,6 @@
#define __MESON_ENCODER_HDMI_H
int meson_encoder_hdmi_init(struct meson_drm *priv);
+void meson_encoder_hdmi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index b4a0518c1028..7f98de38842b 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -11,12 +11,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "meson_overlay.h"
#include "meson_registers.h"
@@ -477,7 +476,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
plane);
struct drm_framebuffer *fb = new_state->fb;
struct meson_drm *priv = meson_overlay->priv;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned long flags;
bool interlace_mode;
@@ -651,8 +650,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
switch (priv->viu.vd1_planes) {
case 3:
- gem = drm_fb_cma_get_gem_obj(fb, 2);
- priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
+ gem = drm_fb_dma_get_gem_obj(fb, 2);
+ priv->viu.vd1_addr2 = gem->dma_addr + fb->offsets[2];
priv->viu.vd1_stride2 = fb->pitches[2];
priv->viu.vd1_height2 =
drm_format_info_plane_height(fb->format,
@@ -663,8 +662,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_height2);
fallthrough;
case 2:
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
+ priv->viu.vd1_addr1 = gem->dma_addr + fb->offsets[1];
priv->viu.vd1_stride1 = fb->pitches[1];
priv->viu.vd1_height1 =
drm_format_info_plane_height(fb->format,
@@ -675,8 +674,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_height1);
fallthrough;
case 1:
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ priv->viu.vd1_addr0 = gem->dma_addr + fb->offsets[0];
priv->viu.vd1_stride0 = fb->pitches[0];
priv->viu.vd1_height0 =
drm_format_info_plane_height(fb->format,
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index b9ac932af8d0..815dfe30492b 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -15,12 +15,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "meson_plane.h"
#include "meson_registers.h"
@@ -95,7 +94,7 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
FRAC_16_16(1, 5),
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -140,7 +139,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
struct drm_rect dest = drm_plane_state_dest(new_state);
struct meson_drm *priv = meson_plane->priv;
struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned long flags;
int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
@@ -170,7 +169,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
@@ -366,9 +365,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
}
/* Update Canvas with buffer address */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
- priv->viu.osd1_addr = gem->paddr;
+ priv->viu.osd1_addr = gem->dma_addr;
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
priv->viu.osd1_width = fb->width;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index bb7e109534de..d4b907889a21 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 89558549c3af..182e224c460d 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
mgag200-y := \
+ mgag200_bmc.o \
mgag200_drv.o \
mgag200_g200.o \
mgag200_g200eh.o \
@@ -10,7 +11,6 @@ mgag200-y := \
mgag200_g200se.o \
mgag200_g200wb.o \
mgag200_i2c.o \
- mgag200_mode.o \
- mgag200_pll.o
+ mgag200_mode.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c
new file mode 100644
index 000000000000..2ba2e3c5086a
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/delay.h>
+
+#include "mgag200_drv.h"
+
+void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
+{
+ u8 tmp;
+ int iter_max;
+
+ /*
+ * 1 - The first step is to inform the BMC of an upcoming mode
+ * change. We are putting the misc<0> to output.
+ */
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /*
+ * 2- Second step to mask any further scan request. This is
+ * done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /*
+ * 3a- The third step is to verify if there is an active scan.
+ * We are waiting for a 0 on remhsyncsts <XSPAREREG<0>).
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /*
+ * 3b- This step occurs only if the remove is actually
+ * scanning. We are waiting for the end of the frame which is
+ * a 1 on remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
+{
+ u8 tmp;
+
+ /* Ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* Assert rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(10);
+
+ /* Deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* Remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* Put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 251a1bb648cc..ece6cd102dbb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -155,15 +155,16 @@ int mgag200_device_preinit(struct mga_device *mdev)
return 0;
}
-int mgag200_device_init(struct mga_device *mdev, enum mga_type type,
- const struct mgag200_device_info *info)
+int mgag200_device_init(struct mga_device *mdev,
+ const struct mgag200_device_info *info,
+ const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
u8 crtcext3, misc;
int ret;
mdev->info = info;
- mdev->type = type;
+ mdev->funcs = funcs;
ret = drmm_mutex_init(dev, &mdev->rmmio_lock);
if (ret)
@@ -226,29 +227,29 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (type) {
case G200_PCI:
case G200_AGP:
- mdev = mgag200_g200_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200_device_create(pdev, &mgag200_driver);
break;
case G200_SE_A:
case G200_SE_B:
mdev = mgag200_g200se_device_create(pdev, &mgag200_driver, type);
break;
case G200_WB:
- mdev = mgag200_g200wb_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200wb_device_create(pdev, &mgag200_driver);
break;
case G200_EV:
- mdev = mgag200_g200ev_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200ev_device_create(pdev, &mgag200_driver);
break;
case G200_EH:
- mdev = mgag200_g200eh_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200eh_device_create(pdev, &mgag200_driver);
break;
case G200_EH3:
- mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver);
break;
case G200_ER:
- mdev = mgag200_g200er_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200er_device_create(pdev, &mgag200_driver);
break;
case G200_EW3:
- mdev = mgag200_g200ew3_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200ew3_device_create(pdev, &mgag200_driver);
break;
default:
dev_err(&pdev->dev, "Device type %d is unsupported\n", type);
@@ -262,7 +263,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ /*
+ * FIXME: A 24-bit color depth does not work with 24 bpp on
+ * G200ER. Force 32 bpp.
+ */
+ drm_fbdev_generic_setup(dev, 32);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 301c4ab46539..f0c2349404b4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -15,11 +15,13 @@
#include <video/vga.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_plane.h>
#include "mgag200_reg.h"
@@ -123,11 +125,39 @@
#define MGA_MISC_OUT 0x1fc2
#define MGA_MISC_IN 0x1fcc
+/*
+ * TODO: This is a pretty large set of default values for all kinds of
+ * settings. It should be split and set in the various DRM helpers,
+ * such as the CRTC reset or atomic_enable helpers. The PLL values
+ * probably belong to each model's PLL code.
+ */
+#define MGAG200_DAC_DEFAULT(xvrefctrl, xpixclkctrl, xmiscctrl, xsyspllm, xsysplln, xsyspllp) \
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0, \
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 0x18: */ (xvrefctrl), \
+ /* 0x19: */ 0, \
+ /* 0x1a: */ (xpixclkctrl), \
+ /* 0x1b: */ 0xff, 0xbf, 0x20, \
+ /* 0x1e: */ (xmiscctrl), \
+ /* 0x1f: */ 0x20, \
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, \
+ /* 0x2c: */ (xsyspllm), \
+ /* 0x2d: */ (xsysplln), \
+ /* 0x2e: */ (xsyspllp), \
+ /* 0x2f: */ 0x40, \
+ /* 0x30: */ 0x00, 0xb0, 0x00, 0xc2, 0x34, 0x14, 0x02, 0x83, \
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3a, \
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0 \
+
+#define MGAG200_LUT_SIZE 256
+
#define MGAG200_MAX_FB_HEIGHT 4096
#define MGAG200_MAX_FB_WIDTH 4096
struct mga_device;
-struct mgag200_pll;
/*
* Stores parameters for programming the PLLs
@@ -146,20 +176,12 @@ struct mgag200_pll_values {
unsigned int s;
};
-struct mgag200_pll_funcs {
- int (*compute)(struct mgag200_pll *pll, long clock, struct mgag200_pll_values *pllc);
- void (*update)(struct mgag200_pll *pll, const struct mgag200_pll_values *pllc);
-};
-
-struct mgag200_pll {
- struct mga_device *mdev;
-
- const struct mgag200_pll_funcs *funcs;
-};
-
struct mgag200_crtc_state {
struct drm_crtc_state base;
+ /* Primary-plane format; required for modesetting and color mgmt. */
+ const struct drm_format_info *format;
+
struct mgag200_pll_values pixpllc;
};
@@ -188,8 +210,6 @@ enum mga_type {
G200_EW3,
};
-#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
-
struct mgag200_device_info {
u16 max_hdisplay;
u16 max_vdisplay;
@@ -230,10 +250,39 @@ struct mgag200_device_info {
.bug_no_startadd = (_bug_no_startadd), \
}
+struct mgag200_device_funcs {
+ /*
+ * Disables an external reset source (i.e., BMC) before programming
+ * a new display mode.
+ */
+ void (*disable_vidrst)(struct mga_device *mdev);
+
+ /*
+ * Enables an external reset source (i.e., BMC) after programming
+ * a new display mode.
+ */
+ void (*enable_vidrst)(struct mga_device *mdev);
+
+ /*
+ * Validate that the given state can be programmed into PIXPLLC. On
+ * success, the calculated parameters should be stored in the CRTC's
+ * state in struct @mgag200_crtc_state.pixpllc.
+ */
+ int (*pixpllc_atomic_check)(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+ /*
+ * Program PIXPLLC from the CRTC state. The parameters should have been
+ * stored in struct @mgag200_crtc_state.pixpllc by the corresponding
+ * implementation of @pixpllc_atomic_check.
+ */
+ void (*pixpllc_atomic_update)(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+};
+
struct mga_device {
struct drm_device base;
const struct mgag200_device_info *info;
+ const struct mgag200_device_funcs *funcs;
struct resource *rmmio_res;
void __iomem *rmmio;
@@ -243,12 +292,11 @@ struct mga_device {
void __iomem *vram;
resource_size_t vram_available;
- enum mga_type type;
-
- struct mgag200_pll pixpll;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct mga_i2c_chan i2c;
struct drm_connector connector;
- struct drm_simple_display_pipe display_pipe;
};
static inline struct mga_device *to_mga_device(struct drm_device *dev)
@@ -287,35 +335,113 @@ int mgag200_init_pci_options(struct pci_dev *pdev, u32 option, u32 option2);
resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size);
resource_size_t mgag200_device_probe_vram(struct mga_device *mdev);
int mgag200_device_preinit(struct mga_device *mdev);
-int mgag200_device_init(struct mga_device *mdev, enum mga_type type,
- const struct mgag200_device_info *info);
+int mgag200_device_init(struct mga_device *mdev,
+ const struct mgag200_device_info *info,
+ const struct mgag200_device_funcs *funcs);
/* mgag200_<device type>.c */
-struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
+struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv);
struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type);
-struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
+void mgag200_g200wb_init_registers(struct mga_device *mdev);
+void mgag200_g200wb_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv);
+struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv);
+void mgag200_g200eh_init_registers(struct mga_device *mdev);
+void mgag200_g200eh_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
+struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
+struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
+struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
- /* mgag200_mode.c */
-resource_size_t mgag200_device_probe_vram(struct mga_device *mdev);
-int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_fb_available);
+/*
+ * mgag200_mode.c
+ */
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_display_mode;
+struct drm_plane;
+struct drm_atomic_state;
+
+extern const uint32_t mgag200_primary_plane_formats[];
+extern const size_t mgag200_primary_plane_formats_size;
+extern const uint64_t mgag200_primary_plane_fmtmods[];
+
+int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state);
+void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *old_state);
+#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = mgag200_primary_plane_helper_atomic_check, \
+ .atomic_update = mgag200_primary_plane_helper_atomic_update, \
+ .atomic_disable = mgag200_primary_plane_helper_atomic_disable
+
+#define MGAG200_PRIMARY_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ .destroy = drm_plane_cleanup, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+
+#define MGAG200_CRTC_HELPER_FUNCS \
+ .mode_valid = mgag200_crtc_helper_mode_valid, \
+ .atomic_check = mgag200_crtc_helper_atomic_check, \
+ .atomic_flush = mgag200_crtc_helper_atomic_flush, \
+ .atomic_enable = mgag200_crtc_helper_atomic_enable, \
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+
+void mgag200_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define MGAG200_CRTC_FUNCS \
+ .reset = mgag200_crtc_reset, \
+ .destroy = drm_crtc_cleanup, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
+
+#define MGAG200_DAC_ENCODER_FUNCS \
+ .destroy = drm_encoder_cleanup
+
+int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector);
+
+#define MGAG200_VGA_CONNECTOR_HELPER_FUNCS \
+ .get_modes = mgag200_vga_connector_helper_get_modes
+
+#define MGAG200_VGA_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .destroy = drm_connector_cleanup, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode);
+void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_enable_display(struct mga_device *mdev);
+void mgag200_init_registers(struct mga_device *mdev);
+int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available);
+
+ /* mgag200_bmc.c */
+void mgag200_bmc_disable_vidrst(struct mga_device *mdev);
+void mgag200_bmc_enable_vidrst(struct mga_device *mdev);
/* mgag200_i2c.c */
int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c);
- /* mgag200_pll.c */
-int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev);
-
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index 674385921b7f..bf5d7fe525a3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -3,7 +3,11 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
@@ -30,6 +34,235 @@ static int mgag200_g200_init_pci_options(struct pci_dev *pdev)
return mgag200_init_pci_options(pdev, option, 0x00008000);
}
+static void mgag200_g200_init_registers(struct mgag200_g200_device *g200)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00, 0xc9, 0x1f,
+ 0x04, 0x2d, 0x19)
+ };
+
+ struct mga_device *mdev = &g200->base;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); ++i) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ static const int post_div_max = 7;
+ static const int in_div_min = 1;
+ static const int in_div_max = 6;
+ static const int feed_div_min = 7;
+ static const int feed_div_max = 127;
+
+ struct drm_device *dev = crtc->dev;
+ struct mgag200_g200_device *g200 = to_mgag200_g200_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ u8 testp, testm, testn;
+ u8 n = 0, m = 0, p, s;
+ long f_vco;
+ long computed;
+ long delta, tmp_delta;
+ long ref_clk = g200->ref_clk;
+ long p_clk_min = g200->pclk_min;
+ long p_clk_max = g200->pclk_max;
+
+ if (clock > p_clk_max) {
+ drm_err(dev, "Pixel Clock %ld too high\n", clock);
+ return -EINVAL;
+ }
+
+ if (clock < p_clk_min >> 3)
+ clock = p_clk_min >> 3;
+
+ f_vco = clock;
+ for (testp = 0;
+ testp <= post_div_max && f_vco < p_clk_min;
+ testp = (testp << 1) + 1, f_vco <<= 1)
+ ;
+ p = testp + 1;
+
+ delta = clock;
+
+ for (testm = in_div_min; testm <= in_div_max; testm++) {
+ for (testn = feed_div_min; testn <= feed_div_max; testn++) {
+ computed = ref_clk * (testn + 1) / (testm + 1);
+ if (computed < f_vco)
+ tmp_delta = f_vco - computed;
+ else
+ tmp_delta = computed - f_vco;
+ if (tmp_delta < delta) {
+ delta = tmp_delta;
+ m = testm + 1;
+ n = testn + 1;
+ }
+ }
+ }
+ f_vco = ref_clk * n / m;
+ if (f_vco < 100000)
+ s = 0;
+ else if (f_vco < 140000)
+ s = 1;
+ else if (f_vco < 180000)
+ s = 2;
+ else
+ s = 3;
+
+ drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
+ clock, f_vco, m, n, p, s);
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM Device
*/
@@ -160,8 +393,12 @@ out:
pci_unmap_rom(pdev, rom);
}
-struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mgag200_g200_device *g200;
struct mga_device *mdev;
@@ -187,15 +424,24 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
mgag200_g200_init_refclk(g200);
- ret = mgag200_device_init(mdev, type, &mgag200_g200_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200_device_info,
+ &mgag200_g200_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200_init_registers(g200);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 1b9a22728744..fad62453a91d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -1,11 +1,267 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+void mgag200_g200eh_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00, 0xc9,
+ MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS,
+ 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)) ||
+ ((i >= 0x44) && (i <= 0x4e)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200eh_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 800000;
+ static const unsigned int vcomin = 400000;
+ static const unsigned int pllreffreq = 33333;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 16; testp > 0; testp >>= 1) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 17; testn < 257; testn++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn;
+ m = testm;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+void mgag200_g200eh_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, xpixpllcp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200eh_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200eh_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200eh_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200eh_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200eh_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200eh_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200eh_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200eh_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200eh_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200eh_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200eh_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200eh_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200eh_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200eh_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200eh_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +269,12 @@
static const struct mgag200_device_info mgag200_g200eh_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 37500, false, 1, 0, false);
-struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200eh_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200eh_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -36,15 +296,24 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200eh_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200eh_device_info,
+ &mgag200_g200eh_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200eh_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200eh_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index 438cda1b14c9..0f7d8112cd49 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -2,20 +2,184 @@
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200eh3_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 3000000;
+ static const unsigned int vcomin = 1500000;
+ static const unsigned int pllreffreq = 25000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+ testp = 0;
+
+ for (testm = 150; testm >= 6; testm--) {
+ if (clock * testm > vcomax)
+ continue;
+ if (clock * testm < vcomin)
+ continue;
+ for (testn = 120; testn >= 60; testn--) {
+ computed = (pllreffreq * testn) / testm;
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn + 1;
+ m = testm + 1;
+ p = testp + 1;
+ }
+ if (delta == 0)
+ break;
+ }
+ if (delta == 0)
+ break;
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200eh3_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200eh3_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200eh3_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200eh3_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200eh3_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200eh3_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200eh3_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200eh3_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200eh3_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200eh3_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200eh3_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200eh3_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200eh3_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200eh3_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200eh3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false);
+static const struct mgag200_device_funcs mgag200_g200eh3_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200eh3_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update, // same as G200EH
+};
+
struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum mga_type type)
+ const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -37,15 +201,24 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200eh3_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200eh3_device_info,
+ &mgag200_g200eh3_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200eh_init_registers(mdev); // same as G200EH
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200eh3_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 0790d4e6463d..bce267e0f7de 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -1,11 +1,305 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+static void mgag200_g200er_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00, 0xc9, 0x1f, 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ WREG_DAC(0x90, 0); /* G200ER specific */
+
+ mgag200_init_registers(mdev);
+
+ WREG_ECRT(0x24, 0x5); /* G200ER specific */
+}
+
+static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev)
+{
+ static const uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */
+ u32 memctl;
+
+ memctl = RREG32(MGAREG_MEMCTL);
+
+ memctl |= RESET_FLAG;
+ WREG32(MGAREG_MEMCTL, memctl);
+
+ udelay(1000);
+
+ memctl &= ~RESET_FLAG;
+ WREG32(MGAREG_MEMCTL, memctl);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200er_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 1488000;
+ static const unsigned int vcomin = 1056000;
+ static const unsigned int pllreffreq = 48000;
+ static const unsigned int m_div_val[] = { 1, 2, 4, 8 };
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ int testr, testn, testm, testo;
+ unsigned int p, m, n, s;
+ unsigned int computed, vco;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ vco = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (vco < vcomin)
+ continue;
+ if (vco > vcomax)
+ continue;
+ computed = vco / (m_div_val[testm] * (testo + 1));
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = (testm | (testo << 3)) + 1;
+ n = testn + 1;
+ p = testr + 1;
+ s = testr;
+ }
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200er_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, xpixpllcp);
+
+ udelay(50);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200er_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200er_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
+
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
+
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
+
+ mgag200_g200er_reset_tagfifo(mdev);
+
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
+
+static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
+ .mode_valid = mgag200_crtc_helper_mode_valid,
+ .atomic_check = mgag200_crtc_helper_atomic_check,
+ .atomic_flush = mgag200_crtc_helper_atomic_flush,
+ .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+};
+
+static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200er_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200er_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200er_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200er_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200er_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200er_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200er_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200er_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200er_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200er_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200er_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +307,12 @@
static const struct mgag200_device_info mgag200_g200er_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 1, 0, false);
-struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200er_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200er_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200er_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -32,15 +330,24 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200er_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200er_device_info,
+ &mgag200_g200er_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200er_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200er_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 5353422d0eef..ac957f42abe1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -1,11 +1,306 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+static void mgag200_g200ev_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00,
+ MGA1064_PIX_CLK_CTL_SEL_PLL,
+ MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS,
+ 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)) ||
+ ((i >= 0x44) && (i <= 0x4e)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev)
+{
+ WREG_ECRT(0x06, 0x00);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200ev_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 550000;
+ static const unsigned int vcomin = 150000;
+ static const unsigned int pllreffreq = 50000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn;
+ m = testm;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200ev_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG8(DAC_DATA, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, xpixpllcp);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG8(DAC_DATA, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200ev_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200ev_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
+
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
+
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
+
+ mgag200_g200ev_set_hiprilvl(mdev);
+
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
+
+static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
+ .mode_valid = mgag200_crtc_helper_mode_valid,
+ .atomic_check = mgag200_crtc_helper_atomic_check,
+ .atomic_flush = mgag200_crtc_helper_atomic_flush,
+ .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+};
+
+static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200ev_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200ev_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200ev_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200ev_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200ev_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200ev_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200ev_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200ev_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200ev_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200ev_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200ev_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +308,12 @@
static const struct mgag200_device_info mgag200_g200ev_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 32700, false, 0, 1, false);
-struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200ev_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200ev_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200ev_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -36,15 +335,24 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200ev_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200ev_device_info,
+ &mgag200_g200ev_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200ev_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200ev_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index 3bfc1324cf78..170934414d7d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -2,10 +2,179 @@
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+static void mgag200_g200ew3_init_registers(struct mga_device *mdev)
+{
+ mgag200_g200wb_init_registers(mdev); // same as G200WB
+
+ WREG_ECRT(0x34, 0x5); // G200EW3 specific
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200ew3_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 800000;
+ static const unsigned int vcomin = 400000;
+ static const unsigned int pllreffreq = 25000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn, testp2;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 1; testp < 8; testp++) {
+ for (testp2 = 1; testp2 < 8; testp2++) {
+ if (testp < testp2)
+ continue;
+ if ((clock * testp * testp2) > vcomax)
+ continue;
+ if ((clock * testp * testp2) < vcomin)
+ continue;
+ for (testm = 1; testm < 26; testm++) {
+ for (testn = 32; testn < 2048 ; testn++) {
+ computed = (pllreffreq * testn) / (testm * testp * testp2);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm + 1;
+ n = testn + 1;
+ p = testp + 1;
+ s = testp2;
+ }
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200ew3_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200ew3_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200ew3_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200ew3_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200ew3_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200ew3_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200ew3_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200ew3_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200ew3_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200ew3_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200ew3_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200ew3_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200ew3_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200ew3_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,6 +182,13 @@
static const struct mgag200_device_info mgag200_g200ew3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
+static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = {
+ .disable_vidrst = mgag200_bmc_disable_vidrst,
+ .enable_vidrst = mgag200_bmc_enable_vidrst,
+ .pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB
+};
+
static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev)
{
resource_size_t vram_size = resource_size(mdev->vram_res);
@@ -23,8 +199,7 @@ static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev
}
struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum mga_type type)
+ const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -46,15 +221,24 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200ew3_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200ew3_device_info,
+ &mgag200_g200ew3_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200ew3_init_registers(mdev);
+
vram_available = mgag200_g200ew3_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200ew3_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 0a3e66695e22..be389ed91cbd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -1,8 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
@@ -28,6 +33,404 @@ static int mgag200_g200se_init_pci_options(struct pci_dev *pdev)
return mgag200_init_pci_options(pdev, option, 0x00008000);
}
+static void mgag200_g200se_init_registers(struct mgag200_g200se_device *g200se)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x03,
+ MGA1064_PIX_CLK_CTL_SEL_PLL,
+ MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS,
+ 0x00, 0x00, 0x00)
+ };
+
+ struct mga_device *mdev = &g200se->base;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
+ const struct drm_display_mode *mode,
+ const struct drm_format_info *format)
+{
+ struct mgag200_g200se_device *g200se = to_mgag200_g200se_device(&mdev->base);
+ unsigned int hiprilvl;
+ u8 crtcext6;
+
+ if (g200se->unique_rev_id >= 0x04) {
+ hiprilvl = 0;
+ } else if (g200se->unique_rev_id >= 0x02) {
+ unsigned int bpp;
+ unsigned long mb;
+
+ if (format->cpp[0] * 8 > 16)
+ bpp = 32;
+ else if (format->cpp[0] * 8 > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hiprilvl = 0;
+ else if (mb > 2600)
+ hiprilvl = 1;
+ else if (mb > 1900)
+ hiprilvl = 2;
+ else if (mb > 1160)
+ hiprilvl = 3;
+ else if (mb > 440)
+ hiprilvl = 4;
+ else
+ hiprilvl = 5;
+
+ } else if (g200se->unique_rev_id >= 0x01) {
+ hiprilvl = 3;
+ } else {
+ hiprilvl = 4;
+ }
+
+ crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */
+
+ WREG_ECRT(0x06, crtcext6);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200se_00_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 320000;
+ static const unsigned int vcomin = 160000;
+ static const unsigned int pllreffreq = 25000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm;
+ n = testn;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ pr_warn("PLL delta too large\n");
+ return -EINVAL;
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200se_00_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+}
+
+static int mgag200_g200se_04_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 1600000;
+ static const unsigned int vcomin = 800000;
+ static const unsigned int pllreffreq = 25000;
+ static const unsigned int pvalues_e4[] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+ unsigned int fvv;
+ unsigned int i;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ if (clock < 25000)
+ clock = 25000;
+ clock = clock * 2;
+
+ /* Permited delta is 0.5% as VESA Specification */
+ permitteddelta = clock * 5 / 1000;
+
+ for (i = 0 ; i < ARRAY_SIZE(pvalues_e4); i++) {
+ testp = pvalues_e4[i];
+
+ if ((clock * testp) > vcomax)
+ continue;
+ if ((clock * testp) < vcomin)
+ continue;
+
+ for (testn = 50; testn <= 256; testn++) {
+ for (testm = 1; testm <= 32; testm++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm;
+ n = testn;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ fvv = pllreffreq * n / m;
+ fvv = (fvv - 800000) / 50000;
+ if (fvv > 15)
+ fvv = 15;
+ s = fvv << 1;
+
+ if (delta > permitteddelta) {
+ pr_warn("PLL delta too large\n");
+ return -EINVAL;
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+
+ WREG_DAC(0x1a, 0x09);
+ msleep(20);
+ WREG_DAC(0x1a, 0x01);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200se_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200se_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
+
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
+
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
+
+ mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
+
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
+
+static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
+ .mode_valid = mgag200_crtc_helper_mode_valid,
+ .atomic_check = mgag200_crtc_helper_atomic_check,
+ .atomic_flush = mgag200_crtc_helper_atomic_flush,
+ .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+};
+
+static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200se_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200se_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200se_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200se_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200se_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200se_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200se_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200se_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200se_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200se_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200se_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -65,11 +468,22 @@ static int mgag200_g200se_init_unique_rev_id(struct mgag200_g200se_device *g200s
return 0;
}
+static const struct mgag200_device_funcs mgag200_g200se_00_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200se_00_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200se_00_pixpllc_atomic_update,
+};
+
+static const struct mgag200_device_funcs mgag200_g200se_04_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200se_04_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200se_04_pixpllc_atomic_update,
+};
+
struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mgag200_g200se_device *g200se;
const struct mgag200_device_info *info;
+ const struct mgag200_device_funcs *funcs;
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
@@ -116,15 +530,28 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
return ERR_PTR(-EINVAL);
}
- ret = mgag200_device_init(mdev, type, info);
+ if (g200se->unique_rev_id >= 0x04)
+ funcs = &mgag200_g200se_04_device_funcs;
+ else
+ funcs = &mgag200_g200se_00_device_funcs;
+
+ ret = mgag200_device_init(mdev, info, funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200se_init_registers(g200se);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200se_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index c8450ac8eaec..9baa727ac6f9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -1,11 +1,314 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+void mgag200_g200wb_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x07, 0xc9, 0x1f, 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)) ||
+ ((i >= 0x44) && (i <= 0x4e)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200wb_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 550000;
+ static const unsigned int vcomin = 150000;
+ static const unsigned int pllreffreq = 48000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn;
+ m = testm;
+ p = testp;
+ s = 0;
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+void mgag200_g200wb_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ bool pll_locked = false;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+ int i, j, tmpcount, vcount;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = ((pixpllcn & GENMASK(10, 9)) >> 3) | (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG8(DAC_DATA, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200wb_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200wb_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200wb_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200wb_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200wb_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200wb_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200wb_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200wb_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200wb_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200wb_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200wb_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200wb_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200wb_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200wb_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200wb_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +316,14 @@
static const struct mgag200_device_info mgag200_g200wb_device_info =
MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
-struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = {
+ .disable_vidrst = mgag200_bmc_disable_vidrst,
+ .enable_vidrst = mgag200_bmc_enable_vidrst,
+ .pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -36,15 +345,24 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200wb_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200wb_device_info,
+ &mgag200_g200wb_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200wb_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200wb_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 225cca2ed60e..bbab2549243a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -11,24 +11,19 @@
#include <linux/delay.h>
#include <linux/iosys-map.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include "mgag200_drv.h"
-#define MGAG200_LUT_SIZE 256
-
/*
* This file contains setup code for the CRTC.
*/
@@ -132,95 +127,6 @@ static inline void mga_wait_busy(struct mga_device *mdev)
} while ((status & 0x01) && time_before(jiffies, timeout));
}
-static void mgag200_g200wb_hold_bmc(struct mga_device *mdev)
-{
- u8 tmp;
- int iter_max;
-
- /* 1- The first step is to warn the BMC of an upcoming mode change.
- * We are putting the misc<0> to output.*/
-
- WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x10;
- WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
-
- /* we are putting a 1 on the misc<0> line */
- WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x10;
- WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
-
- /* 2- Second step to mask and further scan request
- * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
- */
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x80;
- WREG_DAC(MGA1064_SPAREREG, tmp);
-
- /* 3a- the third step is to verifu if there is an active scan
- * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
- */
- iter_max = 300;
- while (!(tmp & 0x1) && iter_max) {
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- udelay(1000);
- iter_max--;
- }
-
- /* 3b- this step occurs only if the remove is actually scanning
- * we are waiting for the end of the frame which is a 1 on
- * remvsyncsts (XSPAREREG<1>)
- */
- if (iter_max) {
- iter_max = 300;
- while ((tmp & 0x2) && iter_max) {
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- udelay(1000);
- iter_max--;
- }
- }
-}
-
-static void mgag200_g200wb_release_bmc(struct mga_device *mdev)
-{
- u8 tmp;
-
- /* 1- The first step is to ensure that the vrsten and hrsten are set */
- WREG8(MGAREG_CRTCEXT_INDEX, 1);
- tmp = RREG8(MGAREG_CRTCEXT_DATA);
- WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
-
- /* 2- second step is to assert the rstlvl2 */
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x8;
- WREG8(DAC_DATA, tmp);
-
- /* wait 10 us */
- udelay(10);
-
- /* 3- deassert rstlvl2 */
- tmp &= ~0x08;
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
- WREG8(DAC_DATA, tmp);
-
- /* 4- remove mask of scan request */
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- tmp &= ~0x80;
- WREG8(DAC_DATA, tmp);
-
- /* 5- put back a 0 on the misc<0> line */
- WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
- tmp = RREG8(DAC_DATA);
- tmp &= ~0x10;
- WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
-}
-
/*
* This is how the framebuffer base address is stored in g200 cards:
* * Assume @offset is the gpu_addr variable of the framebuffer object
@@ -267,86 +173,10 @@ static void mgag200_set_startadd(struct mga_device *mdev,
WREG_ECRT(0x00, crtcext0);
}
-static void mgag200_set_dac_regs(struct mga_device *mdev)
-{
- size_t i;
- u8 dacvalue[] = {
- /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
- /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
- /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
- /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
- /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
- /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
- };
-
- switch (mdev->type) {
- case G200_PCI:
- case G200_AGP:
- dacvalue[MGA1064_SYS_PLL_M] = 0x04;
- dacvalue[MGA1064_SYS_PLL_N] = 0x2D;
- dacvalue[MGA1064_SYS_PLL_P] = 0x19;
- break;
- case G200_SE_A:
- case G200_SE_B:
- dacvalue[MGA1064_VREF_CTL] = 0x03;
- dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
- dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
- MGA1064_MISC_CTL_VGA8 |
- MGA1064_MISC_CTL_DAC_RAM_CS;
- break;
- case G200_WB:
- case G200_EW3:
- dacvalue[MGA1064_VREF_CTL] = 0x07;
- break;
- case G200_EV:
- dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
- dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
- MGA1064_MISC_CTL_DAC_RAM_CS;
- break;
- case G200_EH:
- case G200_EH3:
- dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
- MGA1064_MISC_CTL_DAC_RAM_CS;
- break;
- case G200_ER:
- break;
- }
-
- for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
- if ((i <= 0x17) ||
- (i == 0x1b) ||
- (i == 0x1c) ||
- ((i >= 0x1f) && (i <= 0x29)) ||
- ((i >= 0x30) && (i <= 0x37)))
- continue;
- if (IS_G200_SE(mdev) &&
- ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
- continue;
- if ((mdev->type == G200_EV ||
- mdev->type == G200_WB ||
- mdev->type == G200_EH ||
- mdev->type == G200_EW3 ||
- mdev->type == G200_EH3) &&
- (i >= 0x44) && (i <= 0x4e))
- continue;
-
- WREG_DAC(i, dacvalue[i]);
- }
-
- if (mdev->type == G200_ER)
- WREG_DAC(0x90, 0);
-}
-
-static void mgag200_init_regs(struct mga_device *mdev)
+void mgag200_init_registers(struct mga_device *mdev)
{
u8 crtc11, misc;
- mgag200_set_dac_regs(mdev);
-
WREG_SEQ(2, 0x0f);
WREG_SEQ(3, 0x00);
WREG_SEQ(4, 0x0e);
@@ -364,19 +194,12 @@ static void mgag200_init_regs(struct mga_device *mdev)
MGAREG_CRTC11_VINTCLR);
WREG_CRT(0x11, crtc11);
- if (mdev->type == G200_ER)
- WREG_ECRT(0x24, 0x5);
-
- if (mdev->type == G200_EW3)
- WREG_ECRT(0x34, 0x5);
-
misc = RREG8(MGA_MISC_IN);
misc |= MGAREG_MISC_IOADSEL;
WREG8(MGA_MISC_OUT, misc);
}
-static void mgag200_set_mode_regs(struct mga_device *mdev,
- const struct drm_display_mode *mode)
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode)
{
const struct mgag200_device_info *info = mdev->info;
unsigned int hdisplay, hsyncstart, hsyncend, htotal;
@@ -500,11 +323,9 @@ static void mgag200_set_offset(struct mga_device *mdev,
WREG_ECRT(0x00, crtcext0);
}
-static void mgag200_set_format_regs(struct mga_device *mdev,
- const struct drm_framebuffer *fb)
+void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format)
{
struct drm_device *dev = &mdev->base;
- const struct drm_format_info *format = fb->format;
unsigned int bpp, bppshift, scale;
u8 crtcext3, xmulctrl;
@@ -565,76 +386,9 @@ static void mgag200_set_format_regs(struct mga_device *mdev,
WREG_ECRT(3, crtcext3);
}
-static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev)
+void mgag200_enable_display(struct mga_device *mdev)
{
- static uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */
- u32 memctl;
-
- memctl = RREG32(MGAREG_MEMCTL);
-
- memctl |= RESET_FLAG;
- WREG32(MGAREG_MEMCTL, memctl);
-
- udelay(1000);
-
- memctl &= ~RESET_FLAG;
- WREG32(MGAREG_MEMCTL, memctl);
-}
-
-static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
- const struct drm_display_mode *mode,
- const struct drm_framebuffer *fb)
-{
- struct mgag200_g200se_device *g200se = to_mgag200_g200se_device(&mdev->base);
- unsigned int hiprilvl;
- u8 crtcext6;
-
- if (g200se->unique_rev_id >= 0x04) {
- hiprilvl = 0;
- } else if (g200se->unique_rev_id >= 0x02) {
- unsigned int bpp;
- unsigned long mb;
-
- if (fb->format->cpp[0] * 8 > 16)
- bpp = 32;
- else if (fb->format->cpp[0] * 8 > 8)
- bpp = 16;
- else
- bpp = 8;
-
- mb = (mode->clock * bpp) / 1000;
- if (mb > 3100)
- hiprilvl = 0;
- else if (mb > 2600)
- hiprilvl = 1;
- else if (mb > 1900)
- hiprilvl = 2;
- else if (mb > 1160)
- hiprilvl = 3;
- else if (mb > 440)
- hiprilvl = 4;
- else
- hiprilvl = 5;
-
- } else if (g200se->unique_rev_id >= 0x01) {
- hiprilvl = 3;
- } else {
- hiprilvl = 4;
- }
-
- crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */
-
- WREG_ECRT(0x06, crtcext6);
-}
-
-static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev)
-{
- WREG_ECRT(0x06, 0x00);
-}
-
-static void mgag200_enable_display(struct mga_device *mdev)
-{
- u8 seq0, seq1, crtcext1;
+ u8 seq0, crtcext1;
RREG_SEQ(0x00, seq0);
seq0 |= MGAREG_SEQ0_SYNCRST |
@@ -648,12 +402,6 @@ static void mgag200_enable_display(struct mga_device *mdev)
mga_wait_vsync(mdev);
mga_wait_busy(mdev);
- RREG_SEQ(0x01, seq1);
- seq1 &= ~MGAREG_SEQ1_SCROFF;
- WREG_SEQ(0x01, seq1);
-
- msleep(20);
-
RREG_ECRT(0x01, crtcext1);
crtcext1 &= ~MGAREG_CRTCEXT1_VSYNCOFF;
crtcext1 &= ~MGAREG_CRTCEXT1_HSYNCOFF;
@@ -662,7 +410,7 @@ static void mgag200_enable_display(struct mga_device *mdev)
static void mgag200_disable_display(struct mga_device *mdev)
{
- u8 seq0, seq1, crtcext1;
+ u8 seq0, crtcext1;
RREG_SEQ(0x00, seq0);
seq0 &= ~MGAREG_SEQ0_SYNCRST;
@@ -675,59 +423,127 @@ static void mgag200_disable_display(struct mga_device *mdev)
mga_wait_vsync(mdev);
mga_wait_busy(mdev);
- RREG_SEQ(0x01, seq1);
- seq1 |= MGAREG_SEQ1_SCROFF;
- WREG_SEQ(0x01, seq1);
-
- msleep(20);
-
RREG_ECRT(0x01, crtcext1);
crtcext1 |= MGAREG_CRTCEXT1_VSYNCOFF |
MGAREG_CRTCEXT1_HSYNCOFF;
WREG_ECRT(0x01, crtcext1);
}
+static void mgag200_handle_damage(struct mga_device *mdev, const struct iosys_map *vmap,
+ struct drm_framebuffer *fb, struct drm_rect *clip)
+{
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram);
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
+ drm_fb_memcpy(&dst, fb->pitches, vmap, fb, clip);
+}
+
/*
- * Connector
+ * Primary plane
*/
-static int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
+const uint32_t mgag200_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+};
+
+const size_t mgag200_primary_plane_formats_size = ARRAY_SIZE(mgag200_primary_plane_formats);
+
+const uint64_t mgag200_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
{
- struct mga_device *mdev = to_mga_device(connector->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct mgag200_crtc_state *new_mgag200_crtc_state;
int ret;
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&mdev->rmmio_lock);
- ret = drm_connector_helper_get_modes_from_ddc(connector);
- mutex_unlock(&mdev->rmmio_lock);
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_crtc);
- return ret;
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (plane->state)
+ fb = plane->state->fb;
+
+ if (!fb || (fb->format != new_fb->format))
+ new_crtc_state->mode_changed = true; /* update PLL settings */
+
+ new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ new_mgag200_crtc_state->format = new_fb->format;
+
+ return 0;
}
-static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
- .get_modes = mgag200_vga_connector_helper_get_modes,
-};
+void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ u8 seq1;
-static const struct drm_connector_funcs mga_vga_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
+ if (!fb)
+ return;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage);
+ }
+
+ /* Always scanout image at VRAM offset 0 */
+ mgag200_set_startadd(mdev, (u32)0);
+ mgag200_set_offset(mdev, fb);
+
+ if (!old_plane_state->crtc && plane_state->crtc) { // enabling
+ RREG_SEQ(0x01, seq1);
+ seq1 &= ~MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
+ msleep(20);
+ }
+}
+
+void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 seq1;
+
+ RREG_SEQ(0x01, seq1);
+ seq1 |= MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
+ msleep(20);
+}
/*
- * Simple Display Pipe
+ * CRTC
*/
-static enum drm_mode_status
-mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- struct mga_device *mdev = to_mga_device(pipe->crtc.dev);
+ struct mga_device *mdev = to_mga_device(crtc->dev);
const struct mgag200_device_info *info = mdev->info;
/*
@@ -754,167 +570,112 @@ mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
return MODE_OK;
}
-static void
-mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
- struct drm_rect *clip, const struct iosys_map *map)
-{
- void __iomem *dst = mdev->vram;
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
-
- dst += drm_fb_clip_offset(fb->pitches[0], fb->format, clip);
- drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, clip);
-}
-
-static void
-mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
{
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
- struct mgag200_pll *pixpll = &mdev->pixpll;
- struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
-
- /*
- * Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock.
- */
- mutex_lock(&mdev->rmmio_lock);
-
- if (mdev->type == G200_WB || mdev->type == G200_EW3)
- mgag200_g200wb_hold_bmc(mdev);
-
- mgag200_set_format_regs(mdev, fb);
- mgag200_set_mode_regs(mdev, adjusted_mode);
-
- pixpll->funcs->update(pixpll, &mgag200_crtc_state->pixpllc);
-
- if (mdev->type == G200_ER)
- mgag200_g200er_reset_tagfifo(mdev);
-
- if (IS_G200_SE(mdev))
- mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, fb);
- else if (mdev->type == G200_EV)
- mgag200_g200ev_set_hiprilvl(mdev);
-
- if (mdev->type == G200_WB || mdev->type == G200_EW3)
- mgag200_g200wb_release_bmc(mdev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct drm_property_blob *new_gamma_lut = new_crtc_state->gamma_lut;
+ int ret;
- if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, fb->format, crtc_state->gamma_lut->data);
- else
- mgag200_crtc_set_gamma_linear(mdev, fb->format);
+ ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
+ if (ret)
+ return ret;
- mgag200_enable_display(mdev);
+ if (!new_crtc_state->enable)
+ return 0;
- mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->data[0]);
+ if (new_crtc_state->mode_changed) {
+ if (funcs->pixpllc_atomic_check) {
+ ret = funcs->pixpllc_atomic_check(crtc, new_state);
+ if (ret)
+ return ret;
+ }
+ }
- /* Always scanout image at VRAM offset 0 */
- mgag200_set_startadd(mdev, (u32)0);
- mgag200_set_offset(mdev, fb);
+ if (new_crtc_state->color_mgmt_changed && new_gamma_lut) {
+ if (new_gamma_lut->length != MGAG200_LUT_SIZE * sizeof(struct drm_color_lut)) {
+ drm_dbg(dev, "Wrong size for gamma_lut %zu\n", new_gamma_lut->length);
+ return -EINVAL;
+ }
+ }
- mutex_unlock(&mdev->rmmio_lock);
+ return drm_atomic_add_affected_planes(new_state, crtc);
}
-static void
-mgag200_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
- struct drm_crtc *crtc = &pipe->crtc;
- struct mga_device *mdev = to_mga_device(crtc->dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
- mgag200_disable_display(mdev);
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+ }
}
-static int
-mgag200_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
- struct drm_plane *plane = plane_state->plane;
- struct drm_device *dev = plane->dev;
+ struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
- struct mgag200_pll *pixpll = &mdev->pixpll;
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
- struct drm_framebuffer *new_fb = plane_state->fb;
- struct drm_framebuffer *fb = NULL;
- int ret;
+ const struct drm_format_info *format = mgag200_crtc_state->format;
- if (!new_fb)
- return 0;
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
- if (plane->state)
- fb = plane->state->fb;
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
- if (!fb || (fb->format != new_fb->format))
- crtc_state->mode_changed = true; /* update PLL settings */
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
- if (crtc_state->mode_changed) {
- ret = pixpll->funcs->compute(pixpll, crtc_state->mode.clock,
- &mgag200_crtc_state->pixpllc);
- if (ret)
- return ret;
- }
+ mgag200_enable_display(mdev);
- if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
- if (crtc_state->gamma_lut->length !=
- MGAG200_LUT_SIZE * sizeof(struct drm_color_lut)) {
- drm_err(dev, "Wrong size for gamma_lut %zu\n",
- crtc_state->gamma_lut->length);
- return -EINVAL;
- }
- }
- return 0;
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
}
-static void
-mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
- struct drm_plane *plane = &pipe->plane;
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_device *dev = plane->dev;
- struct mga_device *mdev = to_mga_device(dev);
- struct drm_plane_state *state = plane->state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_rect damage;
- struct drm_atomic_helper_damage_iter iter;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
- if (!fb)
- return;
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
- mutex_lock(&mdev->rmmio_lock);
+ mgag200_disable_display(mdev);
- if (crtc->state->color_mgmt_changed && crtc->state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, fb->format, crtc->state->gamma_lut->data);
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
- drm_atomic_helper_damage_iter_init(&iter, old_state, state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->data[0]);
- }
- /* Always scanout image at VRAM offset 0 */
- mgag200_set_startadd(mdev, (u32)0);
- mgag200_set_offset(mdev, fb);
+void mgag200_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mgag200_crtc_state *mgag200_crtc_state;
- mutex_unlock(&mdev->rmmio_lock);
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ mgag200_crtc_state = kzalloc(sizeof(*mgag200_crtc_state), GFP_KERNEL);
+ if (mgag200_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &mgag200_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
-static struct drm_crtc_state *
-mgag200_simple_display_pipe_duplicate_crtc_state(struct drm_simple_display_pipe *pipe)
+struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_crtc_state *crtc_state = crtc->state;
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct mgag200_crtc_state *new_mgag200_crtc_state;
@@ -927,14 +688,14 @@ mgag200_simple_display_pipe_duplicate_crtc_state(struct drm_simple_display_pipe
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &new_mgag200_crtc_state->base);
+ new_mgag200_crtc_state->format = mgag200_crtc_state->format;
memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc,
sizeof(new_mgag200_crtc_state->pixpllc));
return &new_mgag200_crtc_state->base;
}
-static void mgag200_simple_display_pipe_destroy_crtc_state(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state)
+void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
{
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
@@ -942,50 +703,49 @@ static void mgag200_simple_display_pipe_destroy_crtc_state(struct drm_simple_dis
kfree(mgag200_crtc_state);
}
-static void mgag200_simple_display_pipe_reset_crtc(struct drm_simple_display_pipe *pipe)
+/*
+ * Connector
+ */
+
+int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
{
- struct drm_crtc *crtc = &pipe->crtc;
- struct mgag200_crtc_state *mgag200_crtc_state;
+ struct mga_device *mdev = to_mga_device(connector->dev);
+ int ret;
- if (crtc->state) {
- mgag200_simple_display_pipe_destroy_crtc_state(pipe, crtc->state);
- crtc->state = NULL; /* must be set to NULL here */
- }
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&mdev->rmmio_lock);
+ ret = drm_connector_helper_get_modes_from_ddc(connector);
+ mutex_unlock(&mdev->rmmio_lock);
- mgag200_crtc_state = kzalloc(sizeof(*mgag200_crtc_state), GFP_KERNEL);
- if (!mgag200_crtc_state)
- return;
- __drm_atomic_helper_crtc_reset(crtc, &mgag200_crtc_state->base);
+ return ret;
}
-static const struct drm_simple_display_pipe_funcs
-mgag200_simple_display_pipe_funcs = {
- .mode_valid = mgag200_simple_display_pipe_mode_valid,
- .enable = mgag200_simple_display_pipe_enable,
- .disable = mgag200_simple_display_pipe_disable,
- .check = mgag200_simple_display_pipe_check,
- .update = mgag200_simple_display_pipe_update,
- .reset_crtc = mgag200_simple_display_pipe_reset_crtc,
- .duplicate_crtc_state = mgag200_simple_display_pipe_duplicate_crtc_state,
- .destroy_crtc_state = mgag200_simple_display_pipe_destroy_crtc_state,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
-};
-
-static const uint32_t mgag200_simple_display_pipe_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
-};
-
-static const uint64_t mgag200_simple_display_pipe_fmtmods[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
/*
* Mode config
*/
+static void mgag200_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(state->dev);
+
+ /*
+ * Concurrent operations could possibly trigger a call to
+ * drm_connector_helper_funcs.get_modes by trying to read the
+ * display modes. Protect access to I/O registers by acquiring
+ * the I/O-register lock.
+ */
+ mutex_lock(&mdev->rmmio_lock);
+ drm_atomic_helper_commit_tail(state);
+ mutex_unlock(&mdev->rmmio_lock);
+}
+
+static const struct drm_mode_config_helper_funcs mgag200_mode_config_helper_funcs = {
+ .atomic_commit_tail = mgag200_mode_config_helper_atomic_commit_tail,
+};
+
/* Calculates a mode's required memory bandwidth (in KiB/sec). */
static uint32_t mgag200_calculate_mode_bandwidth(const struct drm_display_mode *mode,
unsigned int bits_per_pixel)
@@ -1048,23 +808,16 @@ static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_available)
+int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available)
{
struct drm_device *dev = &mdev->base;
- struct mga_i2c_chan *i2c = &mdev->i2c;
- struct drm_connector *connector = &mdev->connector;
- struct drm_simple_display_pipe *pipe = &mdev->display_pipe;
- size_t format_count = ARRAY_SIZE(mgag200_simple_display_pipe_formats);
int ret;
- mgag200_init_regs(mdev);
-
mdev->vram_available = vram_available;
ret = drmm_mode_config_init(dev);
if (ret) {
- drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
- ret);
+ drm_err(dev, "drmm_mode_config_init() failed: %d\n", ret);
return ret;
}
@@ -1073,48 +826,7 @@ int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_available
dev->mode_config.preferred_depth = 24;
dev->mode_config.fb_base = mdev->vram_res->start;
dev->mode_config.funcs = &mgag200_mode_config_funcs;
-
- ret = mgag200_i2c_init(mdev, i2c);
- if (ret) {
- drm_err(dev, "failed to add DDC bus: %d\n", ret);
- return ret;
- }
-
- ret = drm_connector_init_with_ddc(dev, connector,
- &mga_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &i2c->adapter);
- if (ret) {
- drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
- return ret;
- }
- drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
-
- ret = mgag200_pixpll_init(&mdev->pixpll, mdev);
- if (ret)
- return ret;
-
- ret = drm_simple_display_pipe_init(dev, pipe,
- &mgag200_simple_display_pipe_funcs,
- mgag200_simple_display_pipe_formats,
- format_count,
- mgag200_simple_display_pipe_fmtmods,
- connector);
- if (ret) {
- drm_err(dev,
- "drm_simple_display_pipe_init() failed, error %d\n",
- ret);
- return ret;
- }
-
- drm_plane_enable_fb_damage_clips(&pipe->plane);
-
- /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
- drm_mode_crtc_set_gamma_size(&pipe->crtc, MGAG200_LUT_SIZE);
-
- drm_crtc_enable_color_mgmt(&pipe->crtc, 0, false, MGAG200_LUT_SIZE);
-
- drm_mode_config_reset(dev);
+ dev->mode_config.helper_private = &mgag200_mode_config_helper_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
deleted file mode 100644
index 8065ca5d8de9..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ /dev/null
@@ -1,997 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/delay.h>
-
-#include "mgag200_drv.h"
-
-/*
- * G200
- */
-
-static int mgag200_pixpll_compute_g200(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- struct mga_device *mdev = pixpll->mdev;
- struct drm_device *dev = &mdev->base;
- struct mgag200_g200_device *g200 = to_mgag200_g200_device(dev);
- const int post_div_max = 7;
- const int in_div_min = 1;
- const int in_div_max = 6;
- const int feed_div_min = 7;
- const int feed_div_max = 127;
- u8 testp, testm, testn;
- u8 n = 0, m = 0, p, s;
- long f_vco;
- long computed;
- long delta, tmp_delta;
- long ref_clk = g200->ref_clk;
- long p_clk_min = g200->pclk_min;
- long p_clk_max = g200->pclk_max;
-
- if (clock > p_clk_max) {
- drm_err(dev, "Pixel Clock %ld too high\n", clock);
- return -EINVAL;
- }
-
- if (clock < p_clk_min >> 3)
- clock = p_clk_min >> 3;
-
- f_vco = clock;
- for (testp = 0;
- testp <= post_div_max && f_vco < p_clk_min;
- testp = (testp << 1) + 1, f_vco <<= 1)
- ;
- p = testp + 1;
-
- delta = clock;
-
- for (testm = in_div_min; testm <= in_div_max; testm++) {
- for (testn = feed_div_min; testn <= feed_div_max; testn++) {
- computed = ref_clk * (testn + 1) / (testm + 1);
- if (computed < f_vco)
- tmp_delta = f_vco - computed;
- else
- tmp_delta = computed - f_vco;
- if (tmp_delta < delta) {
- delta = tmp_delta;
- m = testm + 1;
- n = testn + 1;
- }
- }
- }
- f_vco = ref_clk * n / m;
- if (f_vco < 100000)
- s = 0;
- else if (f_vco < 140000)
- s = 1;
- else if (f_vco < 180000)
- s = 2;
- else
- s = 3;
-
- drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
- clock, f_vco, m, n, p, s);
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- struct mga_device *mdev = pixpll->mdev;
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200 = {
- .compute = mgag200_pixpll_compute_g200,
- .update = mgag200_pixpll_update_g200,
-};
-
-/*
- * G200SE
- */
-
-static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 320000;
- static const unsigned int vcomin = 160000;
- static const unsigned int pllreffreq = 25000;
-
- unsigned int delta, tmpdelta, permitteddelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
-
- for (testp = 8; testp > 0; testp /= 2) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testn = 17; testn < 256; testn++) {
- for (testm = 1; testm < 32; testm++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm;
- n = testn;
- p = testp;
- }
- }
- }
- }
-
- if (delta > permitteddelta) {
- pr_warn("PLL delta too large\n");
- return -EINVAL;
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void mgag200_pixpll_update_g200se_00(struct mgag200_pll *pixpll,
- const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
-}
-
-static int mgag200_pixpll_compute_g200se_04(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 1600000;
- static const unsigned int vcomin = 800000;
- static const unsigned int pllreffreq = 25000;
- static const unsigned int pvalues_e4[] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
-
- unsigned int delta, tmpdelta, permitteddelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
- unsigned int fvv;
- unsigned int i;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- if (clock < 25000)
- clock = 25000;
- clock = clock * 2;
-
- /* Permited delta is 0.5% as VESA Specification */
- permitteddelta = clock * 5 / 1000;
-
- for (i = 0 ; i < ARRAY_SIZE(pvalues_e4); i++) {
- testp = pvalues_e4[i];
-
- if ((clock * testp) > vcomax)
- continue;
- if ((clock * testp) < vcomin)
- continue;
-
- for (testn = 50; testn <= 256; testn++) {
- for (testm = 1; testm <= 32; testm++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
-
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm;
- n = testn;
- p = testp;
- }
- }
- }
- }
-
- fvv = pllreffreq * n / m;
- fvv = (fvv - 800000) / 50000;
- if (fvv > 15)
- fvv = 15;
- s = fvv << 1;
-
- if (delta > permitteddelta) {
- pr_warn("PLL delta too large\n");
- return -EINVAL;
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void mgag200_pixpll_update_g200se_04(struct mgag200_pll *pixpll,
- const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
-
- WREG_DAC(0x1a, 0x09);
- msleep(20);
- WREG_DAC(0x1a, 0x01);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200se_00 = {
- .compute = mgag200_pixpll_compute_g200se_00,
- .update = mgag200_pixpll_update_g200se_00,
-};
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200se_04 = {
- .compute = mgag200_pixpll_compute_g200se_04,
- .update = mgag200_pixpll_update_g200se_04,
-};
-
-/*
- * G200WB
- */
-
-static int mgag200_pixpll_compute_g200wb(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 550000;
- static const unsigned int vcomin = 150000;
- static const unsigned int pllreffreq = 48000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 1; testp < 9; testp++) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testm = 1; testm < 17; testm++) {
- for (testn = 1; testn < 151; testn++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn;
- m = testm;
- p = testp;
- s = 0;
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- int i, j, tmpcount, vcount;
- struct mga_device *mdev = pixpll->mdev;
- bool pll_locked = false;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = ((pixpllcn & GENMASK(10, 9)) >> 3) | (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- for (i = 0; i <= 32 && pll_locked == false; i++) {
- if (i > 0) {
- WREG8(MGAREG_CRTC_INDEX, 0x1e);
- tmp = RREG8(MGAREG_CRTC_DATA);
- if (tmp < 0xff)
- WREG8(MGAREG_CRTC_DATA, tmp+1);
- }
-
- /* set pixclkdis to 1 */
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG8(DAC_DATA, tmp);
-
- /* select PLL Set C */
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= 0x3 << 2;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- /* reset the PLL */
- WREG8(DAC_INDEX, MGA1064_VREF_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~0x04;
- WREG8(DAC_DATA, tmp);
-
- udelay(50);
-
- /* program pixel pll register */
- WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp);
-
- udelay(50);
-
- /* turn pll on */
- WREG8(DAC_INDEX, MGA1064_VREF_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x04;
- WREG_DAC(MGA1064_VREF_CTL, tmp);
-
- udelay(500);
-
- /* select the pixel pll */
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
- tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
- tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
- WREG8(DAC_DATA, tmp);
-
- /* reset dotclock rate bit */
- WREG8(MGAREG_SEQ_INDEX, 1);
- tmp = RREG8(MGAREG_SEQ_DATA);
- tmp &= ~0x8;
- WREG8(MGAREG_SEQ_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- vcount = RREG8(MGAREG_VCOUNT);
-
- for (j = 0; j < 30 && pll_locked == false; j++) {
- tmpcount = RREG8(MGAREG_VCOUNT);
- if (tmpcount < vcount)
- vcount = 0;
- if ((tmpcount - vcount) > 2)
- pll_locked = true;
- else
- udelay(5);
- }
- }
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200wb = {
- .compute = mgag200_pixpll_compute_g200wb,
- .update = mgag200_pixpll_update_g200wb,
-};
-
-/*
- * G200EV
- */
-
-static int mgag200_pixpll_compute_g200ev(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 550000;
- static const unsigned int vcomin = 150000;
- static const unsigned int pllreffreq = 50000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 16; testp > 0; testp--) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testn = 1; testn < 257; testn++) {
- for (testm = 1; testm < 17; testm++) {
- computed = (pllreffreq * testn) /
- (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn;
- m = testm;
- p = testp;
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200ev(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= 0x3 << 2;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
- tmp = RREG8(DAC_DATA);
- WREG8(DAC_DATA, tmp & ~0x40);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- WREG_DAC(MGA1064_EV_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_EV_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_EV_PIX_PLLC_P, xpixpllcp);
-
- udelay(50);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
- tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
- tmp = RREG8(DAC_DATA);
- WREG8(DAC_DATA, tmp | 0x40);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= (0x3 << 2);
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ev = {
- .compute = mgag200_pixpll_compute_g200ev,
- .update = mgag200_pixpll_update_g200ev,
-};
-
-/*
- * G200EH
- */
-
-static int mgag200_pixpll_compute_g200eh(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 800000;
- static const unsigned int vcomin = 400000;
- static const unsigned int pllreffreq = 33333;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 16; testp > 0; testp >>= 1) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testm = 1; testm < 33; testm++) {
- for (testn = 17; testn < 257; testn++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn;
- m = testm;
- p = testp;
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200eh(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- int i, j, tmpcount, vcount;
- struct mga_device *mdev = pixpll->mdev;
- bool pll_locked = false;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- for (i = 0; i <= 32 && pll_locked == false; i++) {
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= 0x3 << 2;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- WREG_DAC(MGA1064_EH_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_EH_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_EH_PIX_PLLC_P, xpixpllcp);
-
- udelay(500);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
- tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- vcount = RREG8(MGAREG_VCOUNT);
-
- for (j = 0; j < 30 && pll_locked == false; j++) {
- tmpcount = RREG8(MGAREG_VCOUNT);
- if (tmpcount < vcount)
- vcount = 0;
- if ((tmpcount - vcount) > 2)
- pll_locked = true;
- else
- udelay(5);
- }
- }
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200eh = {
- .compute = mgag200_pixpll_compute_g200eh,
- .update = mgag200_pixpll_update_g200eh,
-};
-
-/*
- * G200EH3
- */
-
-static int mgag200_pixpll_compute_g200eh3(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 3000000;
- static const unsigned int vcomin = 1500000;
- static const unsigned int pllreffreq = 25000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
- testp = 0;
-
- for (testm = 150; testm >= 6; testm--) {
- if (clock * testm > vcomax)
- continue;
- if (clock * testm < vcomin)
- continue;
- for (testn = 120; testn >= 60; testn--) {
- computed = (pllreffreq * testn) / testm;
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn + 1;
- m = testm + 1;
- p = testp + 1;
- }
- if (delta == 0)
- break;
- }
- if (delta == 0)
- break;
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200eh3 = {
- .compute = mgag200_pixpll_compute_g200eh3,
- .update = mgag200_pixpll_update_g200eh, // same as G200EH
-};
-
-/*
- * G200ER
- */
-
-static int mgag200_pixpll_compute_g200er(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 1488000;
- static const unsigned int vcomin = 1056000;
- static const unsigned int pllreffreq = 48000;
- static const unsigned int m_div_val[] = { 1, 2, 4, 8 };
-
- unsigned int delta, tmpdelta;
- int testr, testn, testm, testo;
- unsigned int p, m, n, s;
- unsigned int computed, vco;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testr = 0; testr < 4; testr++) {
- if (delta == 0)
- break;
- for (testn = 5; testn < 129; testn++) {
- if (delta == 0)
- break;
- for (testm = 3; testm >= 0; testm--) {
- if (delta == 0)
- break;
- for (testo = 5; testo < 33; testo++) {
- vco = pllreffreq * (testn + 1) /
- (testr + 1);
- if (vco < vcomin)
- continue;
- if (vco > vcomax)
- continue;
- computed = vco / (m_div_val[testm] * (testo + 1));
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = (testm | (testo << 3)) + 1;
- n = testn + 1;
- p = testr + 1;
- s = testr;
- }
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200er(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG8(DAC_DATA, tmp);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= (0x3<<2) | 0xc0;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- WREG_DAC(MGA1064_ER_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_ER_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_ER_PIX_PLLC_P, xpixpllcp);
-
- udelay(50);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200er = {
- .compute = mgag200_pixpll_compute_g200er,
- .update = mgag200_pixpll_update_g200er,
-};
-
-/*
- * G200EW3
- */
-
-static int mgag200_pixpll_compute_g200ew3(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 800000;
- static const unsigned int vcomin = 400000;
- static const unsigned int pllreffreq = 25000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn, testp2;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 1; testp < 8; testp++) {
- for (testp2 = 1; testp2 < 8; testp2++) {
- if (testp < testp2)
- continue;
- if ((clock * testp * testp2) > vcomax)
- continue;
- if ((clock * testp * testp2) < vcomin)
- continue;
- for (testm = 1; testm < 26; testm++) {
- for (testn = 32; testn < 2048 ; testn++) {
- computed = (pllreffreq * testn) / (testm * testp * testp2);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm + 1;
- n = testn + 1;
- p = testp + 1;
- s = testp2;
- }
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ew3 = {
- .compute = mgag200_pixpll_compute_g200ew3,
- .update = mgag200_pixpll_update_g200wb, // same as G200WB
-};
-
-/*
- * PLL initialization
- */
-
-int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev)
-{
- struct drm_device *dev = &mdev->base;
- struct mgag200_g200se_device *g200se;
-
- pixpll->mdev = mdev;
-
- switch (mdev->type) {
- case G200_PCI:
- case G200_AGP:
- pixpll->funcs = &mgag200_pixpll_funcs_g200;
- break;
- case G200_SE_A:
- case G200_SE_B:
- g200se = to_mgag200_g200se_device(dev);
-
- if (g200se->unique_rev_id >= 0x04)
- pixpll->funcs = &mgag200_pixpll_funcs_g200se_04;
- else
- pixpll->funcs = &mgag200_pixpll_funcs_g200se_00;
- break;
- case G200_WB:
- pixpll->funcs = &mgag200_pixpll_funcs_g200wb;
- break;
- case G200_EV:
- pixpll->funcs = &mgag200_pixpll_funcs_g200ev;
- break;
- case G200_EH:
- pixpll->funcs = &mgag200_pixpll_funcs_g200eh;
- break;
- case G200_EH3:
- pixpll->funcs = &mgag200_pixpll_funcs_g200eh3;
- break;
- case G200_ER:
- pixpll->funcs = &mgag200_pixpll_funcs_g200er;
- break;
- case G200_EW3:
- pixpll->funcs = &mgag200_pixpll_funcs_g200ew3;
- break;
- default:
- drm_err(dev, "unknown device type %d\n", mdev->type);
- return -ENODEV;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index 99a9ab7d9119..1019ffd6c260 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -96,7 +96,7 @@
#define MGAREG_SRCORG 0x2cb4
#define MGAREG_DSTORG 0x2cb8
-/* add or or this to one of the previous "power registers" to start
+/* add or this to one of the previous "power registers" to start
the drawing engine */
#define MGAREG_EXEC 0x0100
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0ab0e1dd8bbb..2c8b9899625b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -68,7 +68,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 0c6b2a6d0b4c..7cb8d9849c07 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -62,7 +62,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index b03e2c413ab1..beea4a7fc1df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1413,6 +1413,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
+
+#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
+
#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 310a317885a1..e033d6a67a20 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -873,9 +873,47 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
(val & 1), 100, 1000);
}
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
/* Force the GMU off in case it isn't responsive */
static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
/* Flush all the queues */
a6xx_hfi_stop(gmu);
@@ -887,6 +925,15 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
+
+ /* Halt the gmu cm3 core */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
+
+ /* Reset GPU core blocks */
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
+ udelay(100);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@@ -1014,36 +1061,6 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
-#define GBIF_CLIENT_HALT_MASK BIT(0)
-#define GBIF_ARB_HALT_MASK BIT(1)
-
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
-{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if (!a6xx_has_gbif(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
- 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
- return;
- }
-
- /* Halt new client requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
-
- /* Halt all AXI requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
-
- /* The GBIF halt needs to be explicitly cleared */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-}
-
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
@@ -1069,7 +1086,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
- a6xx_gmu_notify_slumber(gmu);
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 4d501100b9e4..fdc578016e0b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
+#include <linux/reset.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -146,7 +147,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, 0x31);
+ OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
/*
@@ -987,6 +988,10 @@ static int hw_init(struct msm_gpu *gpu)
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ if (a6xx_has_gbif(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
/*
@@ -1261,7 +1266,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- int i;
+ int i, active_submits;
adreno_dump_info(gpu);
@@ -1272,14 +1277,46 @@ static void a6xx_recover(struct msm_gpu *gpu)
if (hang_debug)
a6xx_dump(gpu);
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
/*
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
- gpu->funcs->pm_suspend(gpu);
- gpu->funcs->pm_resume(gpu);
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ /* Call into gpucc driver to poll for cx gdsc collapse */
+ reset_control_reset(gpu->cx_collapse);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 781dcd3fb283..13ce321283ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -412,7 +412,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
@@ -420,6 +419,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
memset(fetch_active, 0, sizeof(fetch_active));
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum dpu_sspp sspp_idx;
+
state = plane->state;
if (!state)
continue;
@@ -430,14 +431,14 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
pstate = to_dpu_plane_state(state);
fb = state->fb;
- dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
- set_bit(dpu_plane_pipe(plane), fetch_active);
+ sspp_idx = dpu_plane_pipe(plane);
+ set_bit(sspp_idx, fetch_active);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
- dpu_plane_pipe(plane) - SSPP_VIG0,
+ sspp_idx - SSPP_VIG0,
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
@@ -447,13 +448,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
stage_idx = zpos_cnt[pstate->stage]++;
stage_cfg->stage[pstate->stage][stage_idx] =
- dpu_plane_pipe(plane);
+ sspp_idx;
stage_cfg->multirect_index[pstate->stage][stage_idx] =
pstate->multirect_index;
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, pstate, stage_idx,
- dpu_plane_pipe(plane) - SSPP_VIG0,
+ sspp_idx - SSPP_VIG0,
format->base.pixel_format,
fb ? fb->modifier : 0);
@@ -462,7 +463,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
pstate, format);
- mixer[lm_idx].flush_mask |= flush_mask;
+ mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl,
+ sspp_idx);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@@ -496,7 +498,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
- mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
@@ -513,17 +514,14 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
- mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
- mixer[i].hw_lm->idx);
-
/* stage config flush mask */
- ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+ ctl->ops.update_pending_flush_mixer(ctl,
+ mixer[i].hw_lm->idx);
- DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
- ctl->idx - CTL_0,
- mixer[i].flush_mask);
+ ctl->idx - CTL_0);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
@@ -767,16 +765,9 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
dspp->ops.setup_pcc(dspp, &cfg);
}
- mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
- mixer[i].hw_dspp->idx);
-
/* stage config flush mask */
- ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
-
- DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
- mixer[i].hw_lm->idx - DSPP_0,
- ctl->idx - CTL_0,
- mixer[i].flush_mask);
+ ctl->ops.update_pending_flush_dspp(ctl,
+ mixer[i].hw_dspp->idx);
}
}
@@ -1235,17 +1226,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
for (i = 1; i < SSPP_MAX; i++) {
- if (pipe_staged[i]) {
+ if (pipe_staged[i])
dpu_plane_clear_multirect(pipe_staged[i]);
-
- if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
- DPU_ERROR(
- "r1 only virt plane:%d not supported\n",
- pipe_staged[i]->plane->base.id);
- rc = -EINVAL;
- goto end;
- }
- }
}
z_pos = -1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 9b67645c2574..539b68b1626a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -97,7 +97,6 @@ struct dpu_crtc_mixer {
struct dpu_hw_ctl *lm_ctl;
struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
- u32 flush_mask;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c682d4e02d1b..9c6817b5a194 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -162,7 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
- * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
+ * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -208,7 +208,7 @@ struct dpu_encoder_virt {
bool wide_bus_en;
/* DSC configuration */
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -1791,12 +1791,12 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
static u32
-dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
+dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
u32 enc_ip_width)
{
int ssm_delay, total_pixels, soft_slice_per_enc;
- soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
+ soft_slice_per_enc = enc_ip_width / dsc->slice_width;
/*
* minimum number of initial line pixels is a sum of:
@@ -1808,16 +1808,16 @@ dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
* 5. 6 additional pixels as the output of the rate buffer is
* 48 bits wide
*/
- ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
- total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
+ ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
if (soft_slice_per_enc > 1)
total_pixels += (ssm_delay * 3);
- return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
+ return DIV_ROUND_UP(total_pixels, dsc->slice_width);
}
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
struct dpu_hw_pingpong *hw_pp,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 common_mode,
u32 initial_lines)
{
@@ -1835,7 +1835,7 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
}
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
- struct msm_display_dsc_config *dsc)
+ struct drm_dsc_config *dsc)
{
/* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
@@ -1858,14 +1858,15 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
- pic_width = dsc->drm->pic_width;
+ dsc_common_mode = 0;
+ pic_width = dsc->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
- this_frame_slices = pic_width / dsc->drm->slice_width;
- intf_ip_w = this_frame_slices * dsc->drm->slice_width;
+ this_frame_slices = pic_width / dsc->slice_width;
+ intf_ip_w = this_frame_slices * dsc->slice_width;
/*
* dsc merge case: when using 2 encoders for the same stream,
@@ -1980,7 +1981,6 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_mixer_cfg mixer;
int i, num_lm;
- u32 flush_mask = 0;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_lm[2];
struct dpu_hw_mixer *hw_mixer[2];
@@ -1999,9 +1999,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
- if (phys_enc->hw_ctl->ops.update_pending_flush)
- phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
+ phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
if (phys_enc->hw_ctl->ops.setup_blendstage)
@@ -2061,6 +2060,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index d4d1ecd416e3..9e7236ef34e6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -36,7 +36,7 @@ struct msm_display_info {
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_cmd_mode;
bool is_te_using_watchdog_timer;
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0239a811d5ec..27f029fdc682 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1333,7 +1333,7 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
- .name = "vbif_0", .id = VBIF_0,
+ .name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.default_ot_rd_limit = 32,
.default_ot_wr_limit = 32,
@@ -1363,7 +1363,7 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = {
static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
- .name = "vbif_0", .id = VBIF_0,
+ .name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
@@ -1939,11 +1939,6 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
{
int i;
- struct dpu_mdss_cfg *dpu_cfg;
-
- dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
- if (!dpu_cfg)
- return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
if (cfg_handler[i].hw_rev == hw_rev)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 71fe4c505f5b..38aa38ab1568 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -76,7 +76,7 @@ enum {
/**
* MDP TOP BLOCK features
- * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
* @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
* @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
* @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index e12b7fa48a7b..a35ecb6676c8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -150,92 +150,84 @@ static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
-static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
- uint32_t flushbits = 0;
-
switch (sspp) {
case SSPP_VIG0:
- flushbits = BIT(0);
+ ctx->pending_flush_mask |= BIT(0);
break;
case SSPP_VIG1:
- flushbits = BIT(1);
+ ctx->pending_flush_mask |= BIT(1);
break;
case SSPP_VIG2:
- flushbits = BIT(2);
+ ctx->pending_flush_mask |= BIT(2);
break;
case SSPP_VIG3:
- flushbits = BIT(18);
+ ctx->pending_flush_mask |= BIT(18);
break;
case SSPP_RGB0:
- flushbits = BIT(3);
+ ctx->pending_flush_mask |= BIT(3);
break;
case SSPP_RGB1:
- flushbits = BIT(4);
+ ctx->pending_flush_mask |= BIT(4);
break;
case SSPP_RGB2:
- flushbits = BIT(5);
+ ctx->pending_flush_mask |= BIT(5);
break;
case SSPP_RGB3:
- flushbits = BIT(19);
+ ctx->pending_flush_mask |= BIT(19);
break;
case SSPP_DMA0:
- flushbits = BIT(11);
+ ctx->pending_flush_mask |= BIT(11);
break;
case SSPP_DMA1:
- flushbits = BIT(12);
+ ctx->pending_flush_mask |= BIT(12);
break;
case SSPP_DMA2:
- flushbits = BIT(24);
+ ctx->pending_flush_mask |= BIT(24);
break;
case SSPP_DMA3:
- flushbits = BIT(25);
+ ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_CURSOR0:
- flushbits = BIT(22);
+ ctx->pending_flush_mask |= BIT(22);
break;
case SSPP_CURSOR1:
- flushbits = BIT(23);
+ ctx->pending_flush_mask |= BIT(23);
break;
default:
break;
}
-
- return flushbits;
}
-static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
- uint32_t flushbits = 0;
-
switch (lm) {
case LM_0:
- flushbits = BIT(6);
+ ctx->pending_flush_mask |= BIT(6);
break;
case LM_1:
- flushbits = BIT(7);
+ ctx->pending_flush_mask |= BIT(7);
break;
case LM_2:
- flushbits = BIT(8);
+ ctx->pending_flush_mask |= BIT(8);
break;
case LM_3:
- flushbits = BIT(9);
+ ctx->pending_flush_mask |= BIT(9);
break;
case LM_4:
- flushbits = BIT(10);
+ ctx->pending_flush_mask |= BIT(10);
break;
case LM_5:
- flushbits = BIT(20);
+ ctx->pending_flush_mask |= BIT(20);
break;
default:
- return -EINVAL;
+ break;
}
- flushbits |= CTL_FLUSH_MASK_CTL;
-
- return flushbits;
+ ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
}
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
@@ -294,29 +286,25 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
}
-static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp)
{
- uint32_t flushbits = 0;
-
switch (dspp) {
case DSPP_0:
- flushbits = BIT(13);
+ ctx->pending_flush_mask |= BIT(13);
break;
case DSPP_1:
- flushbits = BIT(14);
+ ctx->pending_flush_mask |= BIT(14);
break;
case DSPP_2:
- flushbits = BIT(15);
+ ctx->pending_flush_mask |= BIT(15);
break;
case DSPP_3:
- flushbits = BIT(21);
+ ctx->pending_flush_mask |= BIT(21);
break;
default:
- return 0;
+ break;
}
-
- return flushbits;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
@@ -685,9 +673,9 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
- ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
- ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
- ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
+ ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 7d9ad6a3f9f6..96c012ec8467 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -130,6 +130,32 @@ struct dpu_hw_ctl_ops {
enum dpu_merge_3d blk);
/**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : SSPP block index
+ */
+ void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : LM block index
+ */
+ void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : DSPP block index
+ */
+ void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -171,15 +197,6 @@ struct dpu_hw_ctl_ops {
*/
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
- uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
- enum dpu_sspp blk);
-
- uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
- enum dpu_lm blk);
-
- uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
- enum dpu_dspp blk);
-
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 411689ae6382..f2ddcfb6f7ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -37,7 +37,7 @@ static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
@@ -52,89 +52,89 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
if (is_cmd_mode)
initial_lines += 1;
- slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
+ slice_last_group_size = 3 - (dsc->slice_width % 3);
data = (initial_lines << 20);
data |= ((slice_last_group_size - 1) << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
- data |= dsc->drm->bits_per_pixel << 12;
- lsb = dsc->drm->bits_per_pixel % 4;
- bpp = dsc->drm->bits_per_pixel / 4;
+ data |= dsc->bits_per_pixel << 12;
+ lsb = dsc->bits_per_pixel % 4;
+ bpp = dsc->bits_per_pixel / 4;
bpp *= 4;
bpp <<= 4;
bpp |= lsb;
data |= bpp << 8;
- data |= (dsc->drm->block_pred_enable << 7);
- data |= (dsc->drm->line_buf_depth << 3);
- data |= (dsc->drm->simple_422 << 2);
- data |= (dsc->drm->convert_rgb << 1);
- data |= dsc->drm->bits_per_component;
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+ data |= dsc->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
- data = dsc->drm->pic_width << 16;
- data |= dsc->drm->pic_height;
+ data = dsc->pic_width << 16;
+ data |= dsc->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
- data = dsc->drm->slice_width << 16;
- data |= dsc->drm->slice_height;
+ data = dsc->slice_width << 16;
+ data |= dsc->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
- data = dsc->drm->slice_chunk_size << 16;
+ data = dsc->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
- data = dsc->drm->initial_dec_delay << 16;
- data |= dsc->drm->initial_xmit_delay;
+ data = dsc->initial_dec_delay << 16;
+ data |= dsc->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
- data = dsc->drm->initial_scale_value;
+ data = dsc->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
- data = dsc->drm->scale_decrement_interval;
+ data = dsc->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
- data = dsc->drm->scale_increment_interval;
+ data = dsc->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
- data = dsc->drm->first_line_bpg_offset;
+ data = dsc->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
- data = dsc->drm->nfl_bpg_offset << 16;
- data |= dsc->drm->slice_bpg_offset;
+ data = dsc->nfl_bpg_offset << 16;
+ data |= dsc->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
- data = dsc->drm->initial_offset << 16;
- data |= dsc->drm->final_offset;
+ data = dsc->initial_offset << 16;
+ data |= dsc->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
- det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
+ det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8);
data = det_thresh_flatness << 10;
- data |= dsc->drm->flatness_max_qp << 5;
- data |= dsc->drm->flatness_min_qp;
+ data |= dsc->flatness_max_qp << 5;
+ data |= dsc->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
- data = dsc->drm->rc_model_size;
+ data = dsc->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
- data = dsc->drm->rc_tgt_offset_low << 18;
- data |= dsc->drm->rc_tgt_offset_high << 14;
- data |= dsc->drm->rc_quant_incr_limit1 << 9;
- data |= dsc->drm->rc_quant_incr_limit0 << 4;
- data |= dsc->drm->rc_edge_factor;
+ data = dsc->rc_tgt_offset_low << 18;
+ data |= dsc->rc_tgt_offset_high << 14;
+ data |= dsc->rc_quant_incr_limit1 << 9;
+ data |= dsc->rc_quant_incr_limit0 << 4;
+ data |= dsc->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc)
+ struct drm_dsc_config *dsc)
{
- struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
+ struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
- DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
+ DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
off += 4;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index 45e4118f1fa2..c0b77fe1a696 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -31,7 +31,7 @@ struct dpu_hw_dsc_ops {
* @initial_lines: amount of initial lines to be used
*/
void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines);
@@ -41,7 +41,7 @@ struct dpu_hw_dsc_ops {
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc);
+ struct drm_dsc_config *dsc);
};
struct dpu_hw_dsc {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 9f402be55fbf..d3b0ed0a9c6c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -273,11 +273,9 @@ enum dpu_wd_timer {
};
enum dpu_vbif {
- VBIF_0,
- VBIF_1,
+ VBIF_RT,
+ VBIF_NRT,
VBIF_MAX,
- VBIF_RT = VBIF_0,
- VBIF_NRT = VBIF_1
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 102c21bb4192..691c471b08c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -780,8 +780,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
}
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, const struct dpu_mdss_cfg *catalog,
- bool is_virtual_pipe)
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog)
{
struct dpu_hw_pipe *hw_pipe;
const struct dpu_sspp_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 78b1bc9e004f..0c95b7e64f6c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -377,11 +377,9 @@ struct dpu_kms;
* @idx: Pipe index for which driver object is required
* @addr: Mapped register io address of MDP
* @catalog : Pointer to mdss catalog data
- * @is_virtual_pipe: is this pipe virtual pipe
*/
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, const struct dpu_mdss_cfg *catalog,
- bool is_virtual_pipe);
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 008e1420e6e5..5e6e2626151e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -384,12 +384,9 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
- struct device *mdss_dev = dpu_dev->parent;
- /* Interconnects are a part of MDSS device tree binding, not the
- * MDP/DPU device. */
- path0 = of_icc_get(mdss_dev, "mdp0-mem");
- path1 = of_icc_get(mdss_dev, "mdp1-mem");
+ path0 = msm_icc_get(dpu_dev, "mdp0-mem");
+ path1 = msm_icc_get(dpu_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
@@ -782,7 +779,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
- (1UL << max_crtc_count) - 1, 0);
+ (1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -826,12 +823,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
- for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
-
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
- dpu_kms->hw_vbif[vbif_idx] = NULL;
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
}
}
}
@@ -902,12 +897,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
int i;
struct dpu_kms *dpu_kms;
const struct dpu_mdss_cfg *cat;
- struct dpu_hw_mdp *top;
dpu_kms = to_dpu_kms(kms);
cat = dpu_kms->catalog;
- top = dpu_kms->hw_mdp;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -1113,12 +1106,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
- if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
- if (!dpu_kms->hw_vbif[vbif_idx])
- rc = -EINVAL;
DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
dpu_kms->hw_vbif[vbif_idx] = NULL;
goto power_error;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index a617a3d8b1bc..658005f609f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -91,7 +91,7 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
- * @mplane_list: List of multirect planes of the same pipe
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
*/
@@ -106,8 +106,6 @@ struct dpu_plane {
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
- bool is_virtual;
- struct list_head mplane_list;
const struct dpu_mdss_cfg *catalog;
};
@@ -225,7 +223,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
- struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
u32 fixed_buff_size;
u32 total_fl;
@@ -239,19 +237,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
pstate = to_dpu_plane_state(plane->state);
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
- list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
- u32 tmp_width;
-
- if (!tmp->base.state->visible)
- continue;
- tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
- DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
- pdpu->base.base.id, tmp->base.base.id,
- src_width,
- tmp_width);
- src_width = max_t(u32, src_width,
- tmp_width);
- }
+ /* FIXME: in multirect case account for the src_width of all the planes */
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == DPU_CHROMA_420) {
@@ -854,13 +840,8 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
}
done:
- if (dpu_plane[R0]->is_virtual) {
- pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
- pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
- } else {
- pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
- pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
- }
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -869,18 +850,6 @@ done:
return 0;
}
-/**
- * dpu_plane_get_ctl_flush - get control flush for the given plane
- * @plane: Pointer to drm plane structure
- * @ctl: Pointer to hardware control driver
- * @flush_sspp: Pointer to sspp flush control word
- */
-void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
- u32 *flush_sspp)
-{
- *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
-}
-
static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -1266,19 +1235,13 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
- trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ trace_dpu_plane_disable(DRMID(plane), false,
pstate->multirect_mode);
pstate->pending = true;
-
- if (is_dpu_plane_virtual(plane) &&
- pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
- pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
- DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
@@ -1493,22 +1456,16 @@ enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
}
-bool is_dpu_plane_virtual(struct drm_plane *plane)
-{
- return plane ? to_dpu_plane(plane)->is_virtual : false;
-}
-
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs, u32 master_plane_id)
+ unsigned long possible_crtcs)
{
- struct drm_plane *plane = NULL, *master_plane = NULL;
+ struct drm_plane *plane = NULL;
const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
- int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
uint32_t supported_rotations;
int ret = -EINVAL;
@@ -1524,18 +1481,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &pdpu->base;
pdpu->pipe = pipe;
- pdpu->is_virtual = (master_plane_id != 0);
- INIT_LIST_HEAD(&pdpu->mplane_list);
- master_plane = drm_plane_find(dev, NULL, master_plane_id);
- if (master_plane) {
- struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
-
- list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
- }
/* initialize underlying h/w driver */
- pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
- master_plane_id != 0);
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog);
if (IS_ERR(pdpu->pipe_hw)) {
DPU_ERROR("[%u]SSPP init failed\n", pipe);
ret = PTR_ERR(pdpu->pipe_hw);
@@ -1545,14 +1493,8 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (pdpu->is_virtual) {
- format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
- num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
- }
- else {
- format_list = pdpu->pipe_hw->cap->sblk->format_list;
- num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
- }
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
format_list, num_formats,
@@ -1562,14 +1504,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
pdpu->catalog = kms->catalog;
- if (kms->catalog->mixer_count &&
- kms->catalog->mixer[0].sblk->maxblendstages) {
- zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
- if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
- zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
- }
-
- ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
@@ -1594,15 +1529,14 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
mutex_init(&pdpu->lock);
- DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
- pipe, plane->base.id, master_plane_id);
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
return plane;
clean_sspp:
if (pdpu && pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
- list_del(&pdpu->mplane_list);
kfree(pdpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index e1463107a6fc..b7b1b05199c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -65,23 +65,6 @@ struct dpu_multirect_plane_states {
enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
/**
- * is_dpu_plane_virtual - check for virtual plane
- * @plane: Pointer to DRM plane object
- * returns: true - if the plane is virtual
- * false - if the plane is primary
- */
-bool is_dpu_plane_virtual(struct drm_plane *plane);
-
-/**
- * dpu_plane_get_ctl_flush - get control flush mask
- * @plane: Pointer to DRM plane object
- * @ctl: Pointer to control hardware
- * @flush_sspp: Pointer to sspp flush control word
- */
-void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
- u32 *flush_sspp);
-
-/**
* dpu_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
*/
@@ -99,14 +82,11 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* @pipe: dpu hardware pipe identifier
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
- * a regular plane initialization. A non-zero primary plane
- * id will be passed for a virtual pipe initialization.
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs, u32 master_plane_id);
+ unsigned long possible_crtcs);
/**
* dpu_plane_validate_multirecti_v2 - validate the multirect planes
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 21d20373eb8b..1305e250b71e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -11,6 +11,26 @@
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
+static const char *dpu_vbif_name(enum dpu_vbif idx)
+{
+ switch (idx) {
+ case VBIF_RT:
+ return "VBIF_RT";
+ case VBIF_NRT:
+ return "VBIF_NRT";
+ default:
+ return "??";
+ }
+}
+
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
@@ -42,12 +62,12 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
if (!status) {
rc = -ETIMEDOUT;
- DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
- vbif->idx - VBIF_0, xin_id);
+ DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
+ dpu_vbif_name(vbif->idx), xin_id);
} else {
rc = 0;
- DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
- vbif->idx - VBIF_0, xin_id);
+ DRM_DEBUG_ATOMIC("%s client %d is halted\n",
+ dpu_vbif_name(vbif->idx), xin_id);
}
return rc;
@@ -87,8 +107,8 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
}
}
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
- vbif->idx - VBIF_0, params->xin_id,
+ DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ dpu_vbif_name(vbif->idx), params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
}
@@ -133,8 +153,8 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
}
exit:
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
- vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
+ dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
return ot_lim;
}
@@ -148,20 +168,15 @@ exit:
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
- int ret, i;
+ int ret;
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
- vbif = dpu_kms->hw_vbif[i];
- }
-
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
@@ -204,7 +219,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
@@ -216,13 +231,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
- vbif = dpu_kms->hw_vbif[i];
- break;
- }
- }
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
@@ -245,8 +254,8 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
- params->vbif_idx, params->xin_id, i,
+ DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
+ dpu_vbif_name(params->vbif_idx), params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
@@ -266,8 +275,8 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
if (vbif && vbif->ops.clear_errors) {
vbif->ops.clear_errors(vbif, &pnd, &src);
if (pnd || src) {
- DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
- vbif->idx - VBIF_0, pnd, src);
+ DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
+ dpu_vbif_name(vbif->idx), pnd, src);
}
}
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index d2a48caf9d27..b0d21838a134 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -902,12 +902,9 @@ fail:
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
- /* Interconnects are a part of MDSS device tree binding, not the
- * MDP5 device. */
- struct device *mdss_dev = pdev->dev.parent;
- struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
- struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
- struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
+ struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
+ struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
+ struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 7257515871a9..676279d0ca8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -431,7 +431,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
if (rate == link_rate_hbr3)
pixel_div = 6;
- else if (rate == 1620000 || rate == 270000)
+ else if (rate == 162000 || rate == 270000)
pixel_div = 2;
else if (rate == link_rate_hbr2)
pixel_div = 4;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ab6aa13b1639..3854c9f1f7e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
@@ -1238,8 +1238,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
-
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
@@ -1358,25 +1356,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
- drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
- ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
-
- return ret;
-}
-
-static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
-{
- int ret = 0;
-
- dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
- ctrl->dp_ctrl.pixel_rate * 1000);
-
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
- if (ret)
- DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
-
- drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
- ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+ drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate);
return ret;
}
@@ -1520,8 +1500,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
-
ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
@@ -1535,38 +1513,6 @@ end:
return ret;
}
-static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
-
-static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
-{
- int ret = 0;
-
- if (!ctrl->link->phy_params.phy_test_pattern_sel) {
- drm_dbg_dp(ctrl->drm_dev,
- "no test pattern selected by sink\n");
- return ret;
- }
-
- /*
- * The global reset will need DP link related clocks to be
- * running. Add the global reset just before disabling the
- * link clocks and core clocks.
- */
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
- if (ret) {
- DRM_ERROR("failed to disable DP controller\n");
- return ret;
- }
-
- ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
- if (!ret)
- ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
- else
- DRM_ERROR("failed to enable DP link controller\n");
-
- return ret;
-}
-
static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
{
bool success = false;
@@ -1619,6 +1565,48 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
return success;
}
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+ int ret;
+ unsigned long pixel_rate;
+
+ if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "no test pattern selected by sink\n");
+ return 0;
+ }
+
+ /*
+ * The global reset will need DP link related clocks to be
+ * running. Add the global reset just before disabling the
+ * link clocks and core clocks.
+ */
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to disable DP controller\n");
+ return ret;
+ }
+
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to enable DP link controller\n");
+ return ret;
+ }
+
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_ctrl_send_phy_test_pattern(ctrl);
+
+ return 0;
+}
+
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
@@ -1689,11 +1677,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
struct dp_ctrl_private *ctrl;
- u32 rate = 0;
+ u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
+ unsigned long pixel_rate;
if (!dp_ctrl)
return -EINVAL;
@@ -1701,25 +1690,24 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
rate = ctrl->panel->link_info.rate;
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
"using phy test link parameters\n");
- if (!ctrl->panel->dp_mode.drm_mode.clock)
- ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
+ if (!pixel_rate)
+ pixel_rate = phy_cts_pixel_clk_khz;
} else {
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
}
- drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
- ctrl->dp_ctrl.pixel_rate);
-
+ pixel_rate);
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
@@ -1816,31 +1804,12 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
-static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
-{
- int ret;
- struct dp_ctrl_private *ctrl;
-
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
-
- ret = dp_ctrl_enable_stream_clocks(ctrl);
- if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
- return ret;
- }
-
- dp_ctrl_send_phy_test_pattern(ctrl);
-
- return 0;
-}
-
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
+ unsigned long pixel_rate;
unsigned long pixel_rate_orig;
if (!dp_ctrl)
@@ -1848,15 +1817,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
- pixel_rate_orig = ctrl->dp_ctrl.pixel_rate;
if (dp_ctrl->wide_bus_en)
- ctrl->dp_ctrl.pixel_rate >>= 1;
+ pixel_rate >>= 1;
- drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate,
- ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+ ctrl->link->link_params.num_lanes, pixel_rate);
if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
@@ -1866,9 +1834,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
}
}
- ret = dp_ctrl_enable_stream_clocks(ctrl);
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
goto end;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b563e2e3bfe5..9f29734af81c 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -16,7 +16,6 @@
struct dp_ctrl {
bool orientation;
atomic_t aborted;
- u32 pixel_rate;
bool wide_bus_en;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 36f0af02749f..36bb6191d2f0 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -786,7 +786,7 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
link->request.test_lane_count);
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
@@ -965,8 +965,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
if (channel_eq_done && clock_recovery_done)
return -EINVAL;
-
- return 0;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1625328fa430..39bbabb5daf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -6,14 +6,6 @@
#include "dsi.h"
#include "dsi_cfg.h"
-struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
-{
- if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
- return NULL;
-
- return msm_dsi->encoder;
-}
-
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
@@ -21,7 +13,7 @@ bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
-struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
@@ -220,7 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv;
- struct drm_bridge *ext_bridge;
int ret;
if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
@@ -254,26 +245,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- /*
- * check if the dsi encoder output is connected to a panel or an
- * external bridge. We create a connector only if we're connected to a
- * drm_panel device. When we're connected to an external bridge, we
- * assume that the drm_bridge driver will create the connector itself.
- */
- ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
-
- if (ext_bridge)
- msm_dsi->connector =
- msm_dsi_manager_ext_bridge_init(msm_dsi->id);
- else
- msm_dsi->connector =
- msm_dsi_manager_connector_init(msm_dsi->id);
-
- if (IS_ERR(msm_dsi->connector)) {
- ret = PTR_ERR(msm_dsi->connector);
+ ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
- msm_dsi->connector = NULL;
goto fail;
}
@@ -287,12 +262,6 @@ fail:
msm_dsi->bridge = NULL;
}
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
-
- msm_dsi->connector = NULL;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 580a1e6358bf..2a96b4fe7839 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -12,7 +12,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "disp/msm_disp_snapshot.h"
@@ -30,27 +29,12 @@ enum msm_dsi_phy_usecase {
MSM_DSI_PHY_SLAVE,
};
-#define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4
-/* Regulators for DSI devices */
-struct dsi_reg_entry {
- char name[32];
- int enable_load;
- int disable_load;
-};
-
-struct dsi_reg_config {
- int num;
- struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX];
-};
-
struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
- /* connector managed by us when we're connected to a drm_panel */
- struct drm_connector *connector;
/* internal dsi bridge attached to MDP interface */
struct drm_bridge *bridge;
@@ -58,10 +42,8 @@ struct msm_dsi {
struct msm_dsi_phy *phy;
/*
- * panel/external_bridge connected to dsi bridge output, only one of the
- * two can be valid at a time
+ * external_bridge connected to dsi bridge output
*/
- struct drm_panel *panel;
struct drm_bridge *external_bridge;
struct device *phy_dev;
@@ -76,8 +58,7 @@ struct msm_dsi {
/* dsi manager */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
-struct drm_connector *msm_dsi_manager_connector_init(u8 id);
-struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
+int msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
@@ -87,11 +68,9 @@ void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
{
- return msm_dsi->panel || msm_dsi->external_bridge;
+ return msm_dsi->external_bridge;
}
-struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
-
/* dsi host */
struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
@@ -116,9 +95,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
-struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
@@ -154,7 +131,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
-struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2c23324a2296..7e97c239ed48 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -9,16 +9,16 @@ static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss", "iface", "bus",
};
+static const struct regulator_bulk_data apq8064_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
- .reg_cfg = {
- .num = 3,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"avdd", 10000, 100}, /* 3.0 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = apq8064_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators),
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
.io_start = { 0x4700000, 0x5800000 },
@@ -29,16 +29,16 @@ static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
+static const struct regulator_bulk_data msm8974_apq8084_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 3,
- .regs = {
- {"vdd", 150000, 100}, /* 3.0 V */
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8974_apq8084_regulators,
+ .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd922800, 0xfd922b00 },
@@ -49,15 +49,15 @@ static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
+static const struct regulator_bulk_data msm8916_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8916_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8916_dsi_regulators),
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
.io_start = { 0x1a98000 },
@@ -68,34 +68,34 @@ static const char * const dsi_8976_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
+static const struct regulator_bulk_data msm8976_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8976_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8976_dsi_regulators),
.bus_clk_names = dsi_8976_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
.io_start = { 0x1a94000, 0x1a96000 },
.num_dsi = 2,
};
+static const struct regulator_bulk_data msm8994_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+ { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "lab_reg", .init_load_uA = -1 },
+ { .supply = "ibb_reg", .init_load_uA = -1 },
+};
+
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 6,
- .regs = {
- {"vdda", 100000, 100}, /* 1.25 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- {"vcca", 10000, 100}, /* 1.0 V */
- {"vdd", 100000, 100}, /* 1.8 V */
- {"lab_reg", -1, -1},
- {"ibb_reg", -1, -1},
- },
- },
+ .regulator_data = msm8994_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd998000, 0xfd9a0000 },
@@ -106,16 +106,16 @@ static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
+static const struct regulator_bulk_data msm8996_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */
+ { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 18160, 1 }, /* 1.25 V */
- {"vcca", 17000, 32 }, /* 0.925 V */
- {"vddio", 100000, 100 },/* 1.8 V */
- },
- },
+ .regulator_data = msm8996_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators),
.bus_clk_names = dsi_8996_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
.io_start = { 0x994000, 0x996000 },
@@ -126,15 +126,15 @@ static const char * const dsi_msm8998_bus_clk_names[] = {
"iface", "bus", "core",
};
+static const struct regulator_bulk_data msm8998_dsi_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */
+ { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config msm8998_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdd", 367000, 16 }, /* 0.9 V */
- {"vdda", 62800, 2 }, /* 1.2 V */
- },
- },
+ .regulator_data = msm8998_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators),
.bus_clk_names = dsi_msm8998_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@@ -145,14 +145,14 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
"iface", "bus", "core", "core_mmss",
};
+static const struct regulator_bulk_data sdm660_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 12560, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sdm660_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators),
.bus_clk_names = dsi_sdm660_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@@ -167,28 +167,28 @@ static const char * const dsi_sc7180_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data sdm845_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sdm845_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm845_dsi_regulators),
.bus_clk_names = dsi_sdm845_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
.io_start = { 0xae94000, 0xae96000 },
.num_dsi = 2,
};
+static const struct regulator_bulk_data sc7180_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sc7180_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sc7180_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7180_dsi_regulators),
.bus_clk_names = dsi_sc7180_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
.io_start = { 0xae94000 },
@@ -199,14 +199,14 @@ static const char * const dsi_sc7280_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sc7280_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 8350, 0 }, /* 1.2 V */
- },
- },
+ .regulator_data = sc7280_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
.bus_clk_names = dsi_sc7280_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
.io_start = { 0xae94000 },
@@ -217,14 +217,14 @@ static const char * const dsi_qcm2290_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config qcm2290_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = qcm2290_dsi_cfg_regulators,
+ .num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators),
.bus_clk_names = dsi_qcm2290_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names),
.io_start = { 0x5e94000 },
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index fe54a999968b..8f04e685a74e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -32,7 +32,8 @@
struct msm_dsi_config {
u32 io_offset;
- struct dsi_reg_config reg_cfg;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
const char * const *bus_clk_names;
const int num_bus_clks;
const resource_size_t io_start[DSI_MAX];
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a34078497af1..7fbf391c024f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -33,7 +33,7 @@
#define DSI_RESET_TOGGLE_DELAY_MS 20
-static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc);
+static int dsi_populate_dsc_params(struct drm_dsc_config *dsc);
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
@@ -108,7 +108,7 @@ struct msm_dsi_host {
void __iomem *ctrl_base;
phys_addr_t ctrl_size;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+ struct regulator_bulk_data *supplies;
int num_bus_clks;
struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
@@ -144,7 +144,6 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
- struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -161,10 +160,9 @@ struct msm_dsi_host {
struct regmap *sfpb;
struct drm_display_mode *mode;
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
/* connected device info */
- struct device_node *device_node;
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -205,9 +203,6 @@ static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
msm_writel(data, msm_host->ctrl_base + reg);
}
-static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
-static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
-
static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
@@ -258,76 +253,6 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
return container_of(host, struct msm_dsi_host, base);
}
-static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer,
- regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- pr_err("regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- pr_err("regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
-static int dsi_regulator_init(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
- if (ret < 0) {
- pr_err("%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -916,7 +841,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
{
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, intf_width, reg_ctrl, reg_ctrl2;
u32 slice_per_intf, total_bytes_per_intf;
u32 pkt_per_line;
@@ -927,24 +852,24 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
* compress mode registers
*/
intf_width = hdisplay;
- slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width);
+ slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
/* If slice_per_pkt is greater than slice_per_intf
* then default to 1. This can happen during partial
* update.
*/
- if (slice_per_intf > dsc->drm->slice_count)
- dsc->drm->slice_count = 1;
+ if (slice_per_intf > dsc->slice_count)
+ dsc->slice_count = 1;
- slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width);
- bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8);
+ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+ bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8);
- dsc->drm->slice_chunk_size = bytes_in_slice;
+ dsc->slice_chunk_size = bytes_in_slice;
total_bytes_per_intf = bytes_in_slice * slice_per_intf;
eol_byte_num = total_bytes_per_intf % 3;
- pkt_per_line = slice_per_intf / dsc->drm->slice_count;
+ pkt_per_line = slice_per_intf / dsc->slice_count;
if (is_cmd_mode) /* packet data type */
reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
@@ -1009,7 +934,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
}
if (msm_host->dsc) {
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@@ -1018,9 +943,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
return;
}
- dsc->drm->pic_width = mode->hdisplay;
- dsc->drm->pic_height = mode->vdisplay;
- DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height);
+ dsc->pic_width = mode->hdisplay;
+ dsc->pic_height = mode->vdisplay;
+ DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height);
/* we do the calculations for dsc parameters here so that
* panel can use these parameters
@@ -1500,14 +1425,6 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
-static void dsi_hpd_worker(struct work_struct *work)
-{
- struct msm_dsi_host *msm_host =
- container_of(work, struct msm_dsi_host, hpd_work);
-
- drm_helper_hpd_irq_event(msm_host->dev);
-}
-
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1686,6 +1603,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
+ if (dsi->dsc)
+ msm_host->dsc = dsi->dsc;
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1697,8 +1616,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
return ret;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
- queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1710,11 +1627,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
dsi_dev_detach(msm_host->pdev);
- msm_host->device_node = NULL;
-
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
- queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1841,7 +1754,7 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = {
2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
-static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
+static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
{
int mux_words_size;
int groups_per_line, groups_total;
@@ -1854,98 +1767,98 @@ static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
int final_value, final_scale;
int i;
- dsc->drm->rc_model_size = 8192;
- dsc->drm->first_line_bpg_offset = 12;
- dsc->drm->rc_edge_factor = 6;
- dsc->drm->rc_tgt_offset_high = 3;
- dsc->drm->rc_tgt_offset_low = 3;
- dsc->drm->simple_422 = 0;
- dsc->drm->convert_rgb = 1;
- dsc->drm->vbr_enable = 0;
+ dsc->rc_model_size = 8192;
+ dsc->first_line_bpg_offset = 12;
+ dsc->rc_edge_factor = 6;
+ dsc->rc_tgt_offset_high = 3;
+ dsc->rc_tgt_offset_low = 3;
+ dsc->simple_422 = 0;
+ dsc->convert_rgb = 1;
+ dsc->vbr_enable = 0;
/* handle only bpp = bpc = 8 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
- dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
+ dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- dsc->drm->rc_range_params[i].range_min_qp = min_qp[i];
- dsc->drm->rc_range_params[i].range_max_qp = max_qp[i];
- dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i];
+ dsc->rc_range_params[i].range_min_qp = min_qp[i];
+ dsc->rc_range_params[i].range_max_qp = max_qp[i];
+ dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i];
}
- dsc->drm->initial_offset = 6144; /* Not bpp 12 */
- if (dsc->drm->bits_per_pixel != 8)
- dsc->drm->initial_offset = 2048; /* bpp = 12 */
+ dsc->initial_offset = 6144; /* Not bpp 12 */
+ if (dsc->bits_per_pixel != 8)
+ dsc->initial_offset = 2048; /* bpp = 12 */
mux_words_size = 48; /* bpc == 8/10 */
- if (dsc->drm->bits_per_component == 12)
+ if (dsc->bits_per_component == 12)
mux_words_size = 64;
- dsc->drm->initial_xmit_delay = 512;
- dsc->drm->initial_scale_value = 32;
- dsc->drm->first_line_bpg_offset = 12;
- dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1;
+ dsc->initial_xmit_delay = 512;
+ dsc->initial_scale_value = 32;
+ dsc->first_line_bpg_offset = 12;
+ dsc->line_buf_depth = dsc->bits_per_component + 1;
/* bpc 8 */
- dsc->drm->flatness_min_qp = 3;
- dsc->drm->flatness_max_qp = 12;
- dsc->drm->rc_quant_incr_limit0 = 11;
- dsc->drm->rc_quant_incr_limit1 = 11;
- dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ dsc->flatness_min_qp = 3;
+ dsc->flatness_max_qp = 12;
+ dsc->rc_quant_incr_limit0 = 11;
+ dsc->rc_quant_incr_limit1 = 11;
+ dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
/* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
* params are calculated
*/
- groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3);
- dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8;
- if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8)
- dsc->drm->slice_chunk_size++;
+ groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+ dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8;
+ if ((dsc->slice_width * dsc->bits_per_pixel) % 8)
+ dsc->slice_chunk_size++;
/* rbs-min */
- min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset +
- dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel +
- groups_per_line * dsc->drm->first_line_bpg_offset;
+ min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset +
+ dsc->initial_xmit_delay * dsc->bits_per_pixel +
+ groups_per_line * dsc->first_line_bpg_offset;
- hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel);
+ hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel);
- dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay;
+ dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
- dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size /
- (dsc->drm->rc_model_size - dsc->drm->initial_offset);
+ dsc->initial_scale_value = 8 * dsc->rc_model_size /
+ (dsc->rc_model_size - dsc->initial_offset);
- slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height;
+ slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height;
- groups_total = groups_per_line * dsc->drm->slice_height;
+ groups_total = groups_per_line * dsc->slice_height;
- data = dsc->drm->first_line_bpg_offset * 2048;
+ data = dsc->first_line_bpg_offset * 2048;
- dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1));
+ dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
- pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2);
+ pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2);
num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
- data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits);
- dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+ data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits);
+ dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
/* bpp * 16 + 0.5 */
- data = dsc->drm->bits_per_pixel * 16;
+ data = dsc->bits_per_pixel * 16;
data *= 2;
data++;
data /= 2;
target_bpp_x16 = data;
- data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16;
- final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits;
- dsc->drm->final_offset = final_value;
+ data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+ final_value = dsc->rc_model_size - data + num_extra_mux_bits;
+ dsc->final_offset = final_value;
- final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value);
+ final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value);
- data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset);
- dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data;
+ data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset);
+ dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
- dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8);
+ dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8);
return 0;
}
@@ -1954,7 +1867,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *endpoint, *device_node;
+ struct device_node *endpoint;
int ret = 0;
/*
@@ -1977,16 +1890,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
goto err;
}
- /* Get panel node from the output port's endpoint data */
- device_node = of_graph_get_remote_node(np, 1, 0);
- if (!device_node) {
- DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
- ret = -ENODEV;
- goto err;
- }
-
- msm_host->device_node = device_node;
-
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
@@ -1997,8 +1900,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
}
}
- of_node_put(device_node);
-
err:
of_node_put(endpoint);
@@ -2028,6 +1929,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
struct platform_device *pdev = msm_dsi->pdev;
+ const struct msm_dsi_config *cfg;
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
@@ -2060,6 +1962,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
pr_err("%s: get config failed\n", __func__);
goto fail;
}
+ cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
@@ -2069,13 +1972,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
/* fixup base address by io offset */
- msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+ msm_host->ctrl_base += cfg->io_offset;
- ret = dsi_regulator_init(msm_host);
- if (ret) {
- pr_err("%s: regulator init failed\n", __func__);
+ ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators,
+ cfg->regulator_data,
+ &msm_host->supplies);
+ if (ret)
goto fail;
- }
ret = dsi_clk_init(msm_host);
if (ret) {
@@ -2126,7 +2029,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
- INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->id = msm_host->id;
@@ -2159,23 +2061,9 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- struct drm_panel *panel;
int ret;
msm_host->dev = dev;
- panel = msm_dsi_host_get_panel(&msm_host->base);
-
- if (!IS_ERR(panel) && panel->dsc) {
- struct msm_display_dsc_config *dsc = msm_host->dsc;
-
- if (!dsc) {
- dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL);
- if (!dsc)
- return -ENOMEM;
- dsc->drm = panel->dsc;
- msm_host->dsc = dsc;
- }
- }
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
@@ -2556,7 +2444,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
msm_dsi_sfpb_config(msm_host, true);
- ret = dsi_host_regulator_enable(msm_host);
+ ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n",
__func__, ret);
@@ -2596,7 +2485,8 @@ fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
- dsi_host_regulator_disable(msm_host);
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
unlock_ret:
mutex_unlock(&msm_host->dev_mutex);
return ret;
@@ -2623,7 +2513,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
- dsi_host_regulator_disable(msm_host);
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
msm_dsi_sfpb_config(msm_host, false);
@@ -2659,45 +2550,33 @@ enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
int pic_width = mode->hdisplay;
int pic_height = mode->vdisplay;
if (!msm_host->dsc)
return MODE_OK;
- if (pic_width % dsc->drm->slice_width) {
+ if (pic_width % dsc->slice_width) {
pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
- pic_width, dsc->drm->slice_width);
+ pic_width, dsc->slice_width);
return MODE_H_ILLEGAL;
}
- if (pic_height % dsc->drm->slice_height) {
+ if (pic_height % dsc->slice_height) {
pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
- pic_height, dsc->drm->slice_height);
+ pic_height, dsc->slice_height);
return MODE_V_ILLEGAL;
}
return MODE_OK;
}
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
-{
- return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
-}
-
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
{
return to_msm_dsi_host(host)->mode_flags;
}
-struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
-{
- struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-
- return of_drm_find_bridge(msm_host->device_node);
-}
-
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -2771,7 +2650,7 @@ void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
}
-struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index cb84d185d73a..3a1417397283 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -141,14 +141,11 @@ static int enable_phy(struct msm_dsi *msm_dsi,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
- int ret;
bool is_bonded_dsi = IS_BONDED_DSI();
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
- ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
-
- return ret;
+ return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
}
static int
@@ -214,39 +211,26 @@ static void dsi_mgr_phy_disable(int id)
}
}
-struct dsi_connector {
- struct drm_connector base;
- int id;
-};
-
struct dsi_bridge {
struct drm_bridge base;
int id;
};
-#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
-static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
-{
- struct dsi_connector *dsi_connector = to_dsi_connector(connector);
- return dsi_connector->id;
-}
-
static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
{
struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
return dsi_bridge->id;
}
-static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
+static void msm_dsi_manager_set_split_display(u8 id)
{
- struct msm_drm_private *priv = conn->dev->dev_private;
- struct msm_kms *kms = priv->kms;
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = msm_dsi->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct msm_dsi *master_dsi, *slave_dsi;
- struct drm_panel *panel;
if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
@@ -256,89 +240,18 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
slave_dsi = other_dsi;
}
- /*
- * There is only 1 panel in the global panel list for bonded DSI mode.
- * Therefore slave dsi should get the drm_panel instance from master
- * dsi.
- */
- panel = msm_dsi_host_get_panel(master_dsi->host);
- if (IS_ERR(panel)) {
- DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
- PTR_ERR(panel));
- return PTR_ERR(panel);
- }
-
- if (!panel || !IS_BONDED_DSI())
- goto out;
-
- drm_object_attach_property(&conn->base,
- conn->dev->mode_config.tile_property, 0);
+ if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
+ return;
/*
* Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
- if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
+ if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
msm_dsi_is_cmd_mode(msm_dsi));
}
-
-out:
- msm_dsi->panel = panel;
- return 0;
-}
-
-static enum drm_connector_status dsi_mgr_connector_detect(
- struct drm_connector *connector, bool force)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-
- return msm_dsi->panel ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static void dsi_mgr_connector_destroy(struct drm_connector *connector)
-{
- struct dsi_connector *dsi_connector = to_dsi_connector(connector);
-
- DBG("");
-
- drm_connector_cleanup(connector);
-
- kfree(dsi_connector);
-}
-
-static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- int num;
-
- if (!panel)
- return 0;
-
- /*
- * In bonded DSI mode, we have one connector that can be
- * attached to the drm_panel.
- */
- num = drm_panel_get_modes(panel, connector);
- if (!num)
- return 0;
-
- return num;
-}
-
-static struct drm_encoder *
-dsi_mgr_connector_best_encoder(struct drm_connector *connector)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-
- DBG("");
- return msm_dsi_get_encoder(msm_dsi);
}
static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
@@ -403,7 +316,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
- struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@@ -418,18 +330,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (!dsi_mgr_power_on_early(bridge))
dsi_mgr_bridge_power_on(bridge);
- /* Always call panel functions once, because even for dual panels,
- * there is only one drm_panel instance.
- */
- if (panel) {
- ret = drm_panel_prepare(panel);
- if (ret) {
- pr_err("%s: prepare panel %d failed, %d\n", __func__,
- id, ret);
- goto panel_prep_fail;
- }
- }
-
ret = msm_dsi_host_enable(host);
if (ret) {
pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
@@ -449,9 +349,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
- if (panel)
- drm_panel_unprepare(panel);
-panel_prep_fail:
return;
}
@@ -469,62 +366,12 @@ void msm_dsi_manager_tpg_enable(void)
}
}
-static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
-{
- int id = dsi_mgr_bridge_get_id(bridge);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- bool is_bonded_dsi = IS_BONDED_DSI();
- int ret;
-
- DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
- /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
- return;
-
- if (panel) {
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id,
- ret);
- }
- }
-}
-
-static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
-{
- int id = dsi_mgr_bridge_get_id(bridge);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- bool is_bonded_dsi = IS_BONDED_DSI();
- int ret;
-
- DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
- /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
- return;
-
- if (panel) {
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
- ret);
- }
-}
-
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
- struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@@ -551,13 +398,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
- if (panel) {
- ret = drm_panel_unprepare(panel);
- if (ret)
- pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
- id, ret);
- }
-
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_disable_irq(msm_dsi1->host);
@@ -614,76 +454,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
return msm_dsi_host_check_dsc(host, mode);
}
-static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
- .detect = dsi_mgr_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = dsi_mgr_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
- .get_modes = dsi_mgr_connector_get_modes,
- .best_encoder = dsi_mgr_connector_best_encoder,
-};
-
static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.pre_enable = dsi_mgr_bridge_pre_enable,
- .enable = dsi_mgr_bridge_enable,
- .disable = dsi_mgr_bridge_disable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
.mode_valid = dsi_mgr_bridge_mode_valid,
};
-/* initialize connector when we're connected to a drm_panel */
-struct drm_connector *msm_dsi_manager_connector_init(u8 id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_connector *connector = NULL;
- struct dsi_connector *dsi_connector;
- int ret;
-
- dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
- if (!dsi_connector)
- return ERR_PTR(-ENOMEM);
-
- dsi_connector->id = id;
-
- connector = &dsi_connector->base;
-
- ret = drm_connector_init(msm_dsi->dev, connector,
- &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
-
- /* Enable HPD to let hpd event is handled
- * when panel is attached to the host.
- */
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- /* Display driver doesn't support interlace now. */
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, msm_dsi->encoder);
-
- ret = msm_dsi_manager_panel_init(connector, id);
- if (ret) {
- DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
- goto fail;
- }
-
- return connector;
-
-fail:
- connector->funcs->destroy(connector);
- return ERR_PTR(ret);
-}
-
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -722,18 +499,21 @@ fail:
return ERR_PTR(ret);
}
-struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+int msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
- struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
int ret;
int_bridge = msm_dsi->bridge;
- ext_bridge = msm_dsi->external_bridge =
- msm_dsi_host_get_bridge(msm_dsi->host);
+ ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
+ msm_dsi->pdev->dev.of_node, 1, 0);
+ if (IS_ERR(ext_bridge))
+ return PTR_ERR(ext_bridge);
+
+ msm_dsi->external_bridge = ext_bridge;
encoder = msm_dsi->encoder;
@@ -745,36 +525,32 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret == -EINVAL) {
- struct drm_connector *connector;
- struct list_head *connector_list;
-
- /* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
-
/*
- * we need the drm_connector created by the external bridge
- * driver (or someone else) to feed it to our driver's
- * priv->connector[] list, mainly for msm_fbdev_init()
+ * link the internal dsi bridge to the external bridge,
+ * connector is created by the next bridge.
*/
- connector_list = &dev->mode_config.connector_list;
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ struct drm_connector *connector;
- list_for_each_entry(connector, connector_list, head) {
- if (drm_connector_has_possible_encoder(connector, encoder))
- return connector;
+ /* We are in charge of the connector, create one now. */
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return PTR_ERR(connector);
}
- return ERR_PTR(-ENODEV);
- }
-
- connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- return ERR_CAST(connector);
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
}
- drm_connector_attach_encoder(connector, encoder);
+ /* The pipeline is ready, ping encoders if necessary */
+ msm_dsi_manager_set_split_display(id);
- return connector;
+ return 0;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index a39de3bdc7fa..7fc0975cb869 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
@@ -507,82 +507,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(dev, num, s);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER) {
- DRM_DEV_ERROR(dev,
- "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- }
-
- return ret;
- }
-
- return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- int num = phy->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
@@ -697,12 +621,9 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
- if (phy->id < 0) {
- ret = phy->id;
- DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
- __func__, ret);
- goto fail;
- }
+ if (phy->id < 0)
+ return dev_err_probe(dev, phy->id,
+ "Couldn't identify PHY index\n");
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
@@ -710,86 +631,71 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
- if (IS_ERR(phy->base)) {
- DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->base))
+ return dev_err_probe(dev, PTR_ERR(phy->base),
+ "Failed to map phy base\n");
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
- if (IS_ERR(phy->pll_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->pll_base))
+ return dev_err_probe(dev, PTR_ERR(phy->pll_base),
+ "Failed to map pll base\n");
if (phy->cfg->has_phy_lane) {
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
- if (IS_ERR(phy->lane_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->lane_base))
+ return dev_err_probe(dev, PTR_ERR(phy->lane_base),
+ "Failed to map phy lane base\n");
}
if (phy->cfg->has_phy_regulator) {
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
- if (IS_ERR(phy->reg_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->reg_base))
+ return dev_err_probe(dev, PTR_ERR(phy->reg_base),
+ "Failed to map phy regulator base\n");
}
if (phy->cfg->ops.parse_dt_properties) {
ret = phy->cfg->ops.parse_dt_properties(phy);
if (ret)
- goto fail;
+ return ret;
}
- ret = dsi_phy_regulator_init(phy);
+ ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
+ phy->cfg->regulator_data,
+ &phy->supplies);
if (ret)
- goto fail;
+ return ret;
phy->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(phy->ahb_clk)) {
- DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
- ret = PTR_ERR(phy->ahb_clk);
- goto fail;
- }
+ if (IS_ERR(phy->ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ "Unable to get ahb clk\n");
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
ret = dsi_phy_enable_resource(phy);
if (ret)
- goto fail;
+ return ret;
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
- if (ret) {
- DRM_DEV_INFO(dev,
- "%s: pll init failed: %d, need separate pll clk driver\n",
- __func__, ret);
- goto fail;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "PLL init failed; need separate clk driver\n");
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
phy->provided_clocks);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
- goto fail;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
dsi_phy_disable_resource(phy);
platform_set_drvdata(pdev, phy);
return 0;
-
-fail:
- return ret;
}
static struct platform_driver dsi_phy_platform_driver = {
@@ -829,7 +735,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto res_en_fail;
}
- ret = dsi_phy_regulator_enable(phy);
+ ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
if (ret) {
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
@@ -866,7 +772,7 @@ pll_restor_fail:
if (phy->cfg->ops.disable)
phy->cfg->ops.disable(phy);
phy_en_fail:
- dsi_phy_regulator_disable(phy);
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
dsi_phy_disable_resource(phy);
res_en_fail:
@@ -880,7 +786,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
- dsi_phy_regulator_disable(phy);
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
dsi_phy_disable_resource(phy);
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index dc91b43d5a38..60a99c6525b2 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -29,7 +29,8 @@ struct msm_dsi_phy_ops {
};
struct msm_dsi_phy_cfg {
- struct dsi_reg_config reg_cfg;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
struct msm_dsi_phy_ops ops;
unsigned long min_pll_rate;
@@ -98,7 +99,7 @@ struct msm_dsi_phy {
int id;
struct clk *ahb_clk;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+ struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 08b015ea1b1e..27b592c776a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -188,19 +188,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
- config->ssc_stepsize & 0xff);
+ config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
- config->ssc_stepsize >> 8);
+ config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
- config->ssc_div_per & 0xff);
+ config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
- config->ssc_div_per >> 8);
+ config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
- config->ssc_adj_per & 0xff);
+ config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
- config->ssc_adj_per >> 8);
+ config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
- SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@@ -215,16 +215,19 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
- 0xba);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ 0xba);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
+ 0x0c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
+ 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
+ 0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
- 0x4c);
+ 0x4c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
@@ -236,18 +239,18 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *conf
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
- config->decimal_div_start);
+ config->decimal_div_start);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
- config->frac_div_start & 0xff);
+ config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
- (config->frac_div_start & 0xff00) >> 8);
+ (config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
- (config->frac_div_start & 0x30000) >> 16);
+ (config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
- config->pll_clock_inverters);
+ config->pll_clock_inverters);
}
static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -306,7 +309,7 @@ static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
- data & ~BIT(5));
+ data & ~BIT(5));
ndelay(250);
}
@@ -315,7 +318,7 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
- data | BIT(5));
+ data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
@@ -326,7 +329,7 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- data & ~BIT(5));
+ data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
@@ -335,7 +338,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- data | BIT(5));
+ data | BIT(5));
}
static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
@@ -356,7 +359,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Start PLL */
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
- 0x01);
+ 0x01);
/*
* ensure all PLL configurations are written prior to checking
@@ -378,10 +381,10 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
dsi_pll_enable_global_clk(pll_10nm->slave);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
- 0x01);
+ 0x01);
if (pll_10nm->slave)
dsi_phy_write(pll_10nm->slave->phy->base +
- REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
error:
return rc;
@@ -486,7 +489,7 @@ static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
- REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
@@ -515,7 +518,7 @@ static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- cached->bit_clk_div | (cached->pix_clk_div << 4));
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@@ -571,64 +574,59 @@ static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
- char parent2[32], parent3[32], parent4[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *pclk_mux;
int ret;
DBG("DSI%d", pll_10nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
pll_10nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent, CLK_SET_RATE_PARENT,
- pll_10nm->phy->pll_base +
- REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- CLK_SET_RATE_PARENT,
- pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- 0, 4, CLK_DIVIDER_ONE_BASED,
- &pll_10nm->postdiv_lock);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -636,52 +634,45 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 2);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 4);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2, parent3, parent4
- }), 4, 0, pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- 0, 2, 0, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
+
+ pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ pll_out_div,
+ pll_post_out_div,
+ }), 4, 0, pll_10nm->phy->base +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
+ if (IS_ERR(pclk_mux)) {
+ ret = PTR_ERR(pclk_mux);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- 0, pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- 4, 4, CLK_DIVIDER_ONE_BASED,
- &pll_10nm->postdiv_lock);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
+ 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -1028,14 +1019,14 @@ static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
return 0;
}
+static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
@@ -1052,12 +1043,8 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 8199c53567f4..0f8f4ca46429 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -711,7 +711,7 @@ static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
+ "restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -764,14 +764,14 @@ static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
const char *name,
- const char *parent_name,
+ const struct clk_hw *parent_hw,
unsigned long flags,
u8 shift)
{
struct dsi_pll_14nm_postdiv *pll_postdiv;
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_init_data postdiv_init = {
- .parent_names = (const char *[]) { parent_name },
+ .parent_hws = (const struct clk_hw *[]) { parent_hw },
.num_parents = 1,
.name = name,
.flags = flags,
@@ -800,72 +800,70 @@ static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_14nm_vco,
};
struct device *dev = &pll_14nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
int ret;
DBG("DSI%d", pll_14nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
pll_14nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
- hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
- CLK_SET_RATE_PARENT, 0);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
+ &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(n1_postdiv))
+ return PTR_ERR(n1_postdiv);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
/* DSI Byte clock = VCO_CLK / N1 / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
/*
* Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
* on the way. Don't let it set parent.
*/
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, n1_postdiv, 0, 1, 2);
+ if (IS_ERR(n1_postdivby2))
+ return PTR_ERR(n1_postdivby2);
- snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
* This is the output of N2 post-divider, bits 4-7 in
* REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
*/
- hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
+ 0, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
- provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
}
@@ -952,7 +950,8 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -1005,7 +1004,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
ret = dsi_14nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
@@ -1024,14 +1023,18 @@ static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 17000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 73400 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 17000, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@@ -1047,12 +1050,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 73400, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_73p4mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@@ -1068,12 +1067,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 17000, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index ee7c418a1c29..c9752b991744 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -129,15 +129,15 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
dsi_20nm_phy_regulator_ctrl(phy, false);
}
+static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+};
+
const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- {"vcca", 10000, 100}, /* 1.0 V */
- },
- },
+ .regulator_data = dsi_phy_20nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 48eab80b548e..4c1bf55c5f38 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -104,7 +104,7 @@ static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
* reset bit off and back on.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
- DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+ DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
}
@@ -201,9 +201,9 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
- DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
- DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
/* Add hardware recommended delay for correct PLL configuration */
@@ -316,12 +316,12 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
for (i = 0; i < 2; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
- 0x0c, 100);
+ 0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
/* poll for PLL ready status */
- locked = pll_28nm_poll_for_ready(pll_28nm,
- max_reads, timeout_us);
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
+ timeout_us);
if (locked)
break;
@@ -508,28 +508,28 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- cached_state->postdiv3);
+ cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
- cached_state->postdiv1);
+ cached_state->postdiv1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
- cached_state->byte_mux);
+ cached_state->byte_mux);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent1[32], parent2[32], vco_name[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref", .name = "xo",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
};
struct device *dev = &pll_28nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
int ret;
DBG("%d", pll_28nm->phy->id);
@@ -539,55 +539,49 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
else
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
pll_28nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT,
+ snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+ analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
pll_28nm->phy->pll_base +
- REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
0, 4, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent1, 0, pll_28nm->phy->pll_base +
+ if (IS_ERR(analog_postdiv))
+ return PTR_ERR(analog_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+ indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
+ if (IS_ERR(indirect_path_div2))
+ return PTR_ERR(indirect_path_div2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- 0, 8, 0, NULL);
+ 0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent1, parent2
+ snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
+ byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ &pll_28nm->clk_hw,
+ indirect_path_div2,
}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
- REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+ if (IS_ERR(byte_mux))
+ return PTR_ERR(byte_mux);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT, 1, 4);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ byte_mux, CLK_SET_RATE_PARENT, 1, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
@@ -627,31 +621,31 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
- DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
- DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
- DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
- DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
- DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
- DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
- DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
- DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
- DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
- DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
- DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
- DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
@@ -713,7 +707,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -769,14 +764,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100},
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@@ -792,12 +787,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100},
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@@ -813,12 +804,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index fc56cdcc9ad6..26c08047e20c 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -104,29 +104,29 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
- fb_divider & 0xff);
+ fb_divider & 0xff);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
- val);
+ val);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
- val);
+ val);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
- 0xf);
+ 0xf);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
- val);
+ val);
return 0;
}
@@ -206,7 +206,7 @@ static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
/* enable the PLL */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
- DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+ DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
@@ -367,23 +367,23 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
+ "restore vco rate failed. ret=%d\n", ret);
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
- cached_state->postdiv3);
+ cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
- cached_state->postdiv2);
+ cached_state->postdiv2);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
- cached_state->postdiv1);
+ cached_state->postdiv1);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
- char *clk_name, *parent_name, *vco_name;
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
@@ -404,20 +404,8 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
if (!bytediv)
return -ENOMEM;
- vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!vco_name)
- return -ENOMEM;
-
- parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!parent_name)
- return -ENOMEM;
-
- clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!clk_name)
- return -ENOMEM;
-
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- vco_init.name = vco_name;
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ vco_init.name = clk_name;
pll_28nm->clk_hw.init = &vco_init;
@@ -429,13 +417,14 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->hw.init = &bytediv_init;
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
- snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
bytediv_init.flags = CLK_SET_RATE_PARENT;
- bytediv_init.parent_names = (const char * const *) &parent_name;
+ bytediv_init.parent_hws = (const struct clk_hw*[]){
+ &pll_28nm->clk_hw,
+ };
bytediv_init.num_parents = 1;
/* DIV2 */
@@ -444,12 +433,12 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent_name, 0, pll_28nm->phy->pll_base +
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
- 0, 8, 0, NULL);
+ 0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
@@ -489,29 +478,29 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
- DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
- DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
- DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
- DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
- DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
- DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
- DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
- DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
- DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
- DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
- DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+ DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
@@ -523,7 +512,7 @@ static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
- 0x100);
+ 0x100);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
@@ -544,7 +533,7 @@ static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
- 0x3);
+ 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
@@ -577,11 +566,11 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
- 0x00);
+ 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
- 0x01);
+ 0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
- 0x66);
+ 0x66);
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
@@ -602,7 +591,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -648,14 +638,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = dsi_phy_28nm_8960_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 66ed1919a1db..9e7fa7d88ead 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -176,19 +176,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *c
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
- config->ssc_stepsize & 0xff);
+ config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
- config->ssc_stepsize >> 8);
+ config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
- config->ssc_div_per & 0xff);
+ config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
- config->ssc_div_per >> 8);
+ config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
- config->ssc_adj_per & 0xff);
+ config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
- config->ssc_adj_per >> 8);
+ config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
- SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@@ -208,7 +208,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
}
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
- analog_controls_five_1);
+ analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
@@ -245,17 +245,20 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
- config->frac_div_start & 0xff);
+ config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
- (config->frac_div_start & 0xff00) >> 8);
+ (config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
- (config->frac_div_start & 0x30000) >> 16);
+ (config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
+ pll->phy->cphy_mode ? 0x00 : 0x10);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -341,7 +344,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- data | BIT(5) | BIT(4));
+ data | BIT(5) | BIT(4));
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@@ -500,7 +503,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
- REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
@@ -529,7 +532,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- cached->bit_clk_div | (cached->pix_clk_div << 4));
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@@ -585,65 +588,60 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
- char parent2[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
};
struct device *dev = &pll_7nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
int ret;
DBG("DSI%d", pll_7nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
pll_7nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent, CLK_SET_RATE_PARENT,
- pll_7nm->phy->pll_base +
- REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- CLK_SET_RATE_PARENT,
- pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- 0, 4, CLK_DIVIDER_ONE_BASED,
- &pll_7nm->postdiv_lock);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1,
- pll_7nm->phy->cphy_mode ? 7 : 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -651,25 +649,25 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 2);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
if (pll_7nm->phy->cphy_mode)
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 2, 7);
else
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
goto fail;
}
@@ -682,34 +680,32 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
- snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+ phy_pll_out_dsi_parent = pll_post_out_div;
} else {
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2,
- }), 2, 0, pll_7nm->phy->base +
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ }), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 1, 0, NULL);
+ 0, 1, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+ phy_pll_out_dsi_parent = hw;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- 0, pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- 4, 4, CLK_DIVIDER_ONE_BASED,
- &pll_7nm->postdiv_lock);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ phy_pll_out_dsi_parent, 0,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -841,7 +837,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: PHY timing calculation failed\n", __func__);
+ "%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -960,10 +956,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
- timing->shared_timings.clk_pre);
+ timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
- timing->shared_timings.clk_post);
+ timing->shared_timings.clk_post);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
@@ -982,9 +978,9 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
- timing->shared_timings.clk_pre);
+ timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
- timing->shared_timings.clk_post);
+ timing->shared_timings.clk_post);
}
/* DSI lane settings */
@@ -1036,14 +1032,18 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
DBG("DSI%d PHY disabled", phy->id);
}
+static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 37550 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@@ -1065,12 +1065,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@@ -1087,12 +1083,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 37550, 0},
- },
- },
+ .regulator_data = dsi_phy_7nm_37750uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index b06d9d25a189..4dd055416620 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -691,15 +691,13 @@ static const struct clk_ops hdmi_8996_pll_ops = {
.is_enabled = hdmi_8996_pll_is_enabled,
};
-static const char * const hdmi_pll_parents[] = {
- "xo",
-};
-
static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
- .parent_names = hdmi_pll_parents,
- .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
};
@@ -707,8 +705,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8996 *pll;
- struct clk *clk;
- int i;
+ int i, ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
@@ -735,10 +732,16 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
}
pll->clk_hw.init = &pll_init;
- clk = devm_clk_register(dev, &pll->clk_hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
- return -EINVAL;
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 7d2dab260f86..95f4374ae21c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -7,6 +7,7 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -326,6 +327,13 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
+ &fail_gem_alloc);
+ fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
+ &fail_gem_iova);
+#endif
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 1ed4cd09dbf8..28034c21f6bc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -6,6 +6,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -78,6 +79,11 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(fail_gem_alloc);
+DECLARE_FAULT_ATTR(fail_gem_iova);
+#endif
+
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -355,7 +361,7 @@ static int msm_init_vram(struct drm_device *dev)
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire CMA chunk carved out in early startup in
+ * Grab the entire DMA chunk carved out in early startup in
* mach-msm:
*/
} else if (!msm_use_mmu(dev)) {
@@ -418,14 +424,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
- INIT_LIST_HEAD(&priv->inactive_willneed);
- INIT_LIST_HEAD(&priv->inactive_dontneed);
- INIT_LIST_HEAD(&priv->inactive_unpinned);
- mutex_init(&priv->mm_lock);
+ /*
+ * Initialize the LRUs:
+ */
+ mutex_init(&priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
- might_lock(&priv->mm_lock);
+ might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
@@ -469,6 +479,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
}
}
+ drm_helper_move_panel_connectors_to_head(ddev);
+
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
@@ -697,6 +709,9 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
flags |= MSM_BO_WC;
}
+ if (should_fail(&fail_gem_alloc, args->size))
+ return -ENOMEM;
+
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle, NULL);
}
@@ -758,6 +773,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
@@ -779,6 +797,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
if (priv->gpu->aspace == ctx->aspace)
return -EOPNOTSUPP;
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
@@ -883,13 +904,13 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
- ret = mutex_lock_interruptible(&queue->lock);
+ ret = mutex_lock_interruptible(&queue->idr_lock);
if (ret)
return ret;
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
- mutex_unlock(&queue->lock);
+ mutex_unlock(&queue->idr_lock);
if (!fence)
return 0;
@@ -1242,10 +1263,15 @@ void msm_drv_shutdown(struct platform_device *pdev)
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
- if (!priv || !priv->kms)
- return;
-
- drm_atomic_helper_shutdown(drm);
+ /*
+ * Shutdown the hw if we're far enough along where things might be on.
+ * If we run this too early, we'll end up panicking in any variety of
+ * places. Since we don't register the drm device until late in
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
+ * shutdown will be successful.
+ */
+ if (drm && drm->registered)
+ drm_atomic_helper_shutdown(drm);
}
static struct platform_driver msm_platform_driver = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b3689a2d27d7..b2ea262296a4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -27,13 +27,19 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
+#ifdef CONFIG_FAULT_INJECTION
+extern struct fault_attr fail_gem_alloc;
+extern struct fault_attr fail_gem_iova;
+#else
+# define should_fail(attr, size) 0
+#endif
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@@ -96,11 +102,6 @@ struct msm_drm_thread {
struct kthread_worker *worker;
};
-/* DSC config */
-struct msm_display_dsc_config {
- struct drm_dsc_config *drm;
-};
-
struct msm_drm_private {
struct drm_device *dev;
@@ -142,28 +143,60 @@ struct msm_drm_private {
struct mutex obj_lock;
/**
- * LRUs of inactive GEM objects. Every bo is either in one of the
- * inactive lists (depending on whether or not it is shrinkable) or
- * gpu->active_list (for the gpu it is active on[1]), or transiently
- * on a temporary list as the shrinker is running.
+ * lru:
*
- * Note that inactive_willneed also contains pinned and vmap'd bos,
- * but the number of pinned-but-not-active objects is small (scanout
- * buffers, ringbuffer, etc).
+ * The various LRU's that a GEM object is in at various stages of
+ * it's lifetime. Objects start out in the unbacked LRU. When
+ * pinned (for scannout or permanently mapped GPU buffers, like
+ * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
+ * unpinned, it moves into willneed or dontneed LRU depending on
+ * madvise state. When backing pages are evicted (willneed) or
+ * purged (dontneed) it moves back into the unbacked LRU.
*
- * These lists are protected by mm_lock (which should be acquired
- * before per GEM object lock). One should *not* hold mm_lock in
- * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
- *
- * [1] if someone ever added support for the old 2d cores, there could be
- * more than one gpu object
+ * The dontneed LRU is considered by the shrinker for objects
+ * that are candidate for purging, and the willneed LRU is
+ * considered for objects that could be evicted.
*/
- struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */
- struct list_head inactive_dontneed; /* inactive + shrinkable */
- struct list_head inactive_unpinned; /* inactive + purged or unpinned */
- long shrinkable_count; /* write access under mm_lock */
- long evictable_count; /* write access under mm_lock */
- struct mutex mm_lock;
+ struct {
+ /**
+ * unbacked:
+ *
+ * The LRU for GEM objects without backing pages allocated.
+ * This mostly exists so that objects are always is one
+ * LRU.
+ */
+ struct drm_gem_lru unbacked;
+
+ /**
+ * pinned:
+ *
+ * The LRU for pinned GEM objects
+ */
+ struct drm_gem_lru pinned;
+
+ /**
+ * willneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * WILLNEED state (ie. can be evicted)
+ */
+ struct drm_gem_lru willneed;
+
+ /**
+ * dontneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * DONTNEED state (ie. can be purged)
+ */
+ struct drm_gem_lru dontneed;
+
+ /**
+ * lock:
+ *
+ * Protects manipulation of all of the LRUs.
+ */
+ struct mutex lock;
+ } lru;
struct workqueue_struct *wq;
@@ -290,7 +323,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
-struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@@ -320,7 +353,7 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
return false;
}
-static inline struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return NULL;
}
@@ -433,6 +466,8 @@ void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
phys_addr_t *size);
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
+struct icc_path *msm_icc_get(struct device *dev, const char *name);
+
#define msm_writel(data, addr) writel((data), (addr))
#define msm_readl(addr) readl((addr))
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8ddbd2e001d4..1dee0d18abbb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -19,7 +19,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
-static void update_inactive(struct msm_gem_object *msm_obj);
+static void update_lru(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
@@ -132,7 +132,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->pages;
@@ -174,40 +174,45 @@ static void put_pages(struct drm_gem_object *obj)
put_pages_vram(obj);
msm_obj->pages = NULL;
+ update_lru(obj);
}
}
-struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
-
if (!IS_ERR(p)) {
- msm_obj->pin_count++;
- update_inactive(msm_obj);
+ to_msm_bo(obj)->pin_count++;
+ update_lru(obj);
}
- msm_gem_unlock(obj);
return p;
}
-void msm_gem_put_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
msm_gem_lock(obj);
- msm_obj->pin_count--;
- GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ p = msm_gem_pin_pages_locked(obj);
+ msm_gem_unlock(obj);
+
+ return p;
+}
+
+void msm_gem_unpin_pages(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
@@ -273,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -302,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
@@ -321,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
@@ -352,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
@@ -370,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
@@ -383,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
{
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace);
@@ -423,19 +428,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
- pages = get_pages(obj);
+ pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
-
- if (!ret)
- msm_obj->pin_count++;
+ if (ret)
+ msm_gem_unpin_locked(obj);
return ret;
}
@@ -444,12 +448,12 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ update_lru(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@@ -465,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma))
@@ -626,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (obj->import_attach)
return ERR_PTR(-ENODEV);
@@ -658,7 +662,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->vaddr;
@@ -699,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
@@ -729,8 +733,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
- if (msm_obj->active_count == 0)
- update_inactive(msm_obj);
+ update_lru(obj);
msm_gem_unlock(obj);
@@ -742,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
@@ -757,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
- update_inactive(msm_obj);
drm_gem_free_mmap_offset(obj);
@@ -780,10 +782,8 @@ void msm_gem_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
- GEM_WARN_ON(!msm_obj->evictable);
- GEM_WARN_ON(msm_obj->active_count);
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false);
@@ -791,15 +791,13 @@ void msm_gem_evict(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
-
- update_inactive(msm_obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
@@ -808,66 +806,37 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
+static void update_lru(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
-
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
- GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
- GEM_WARN_ON(msm_obj->dontneed);
-
- if (msm_obj->active_count++ == 0) {
- mutex_lock(&priv->mm_lock);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
- list_move_tail(&msm_obj->mm_list, &gpu->active_list);
- mutex_unlock(&priv->mm_lock);
- }
-}
-
-void msm_gem_active_put(struct drm_gem_object *obj)
-{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(&msm_obj->base);
- if (--msm_obj->active_count == 0) {
- update_inactive(msm_obj);
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+ GEM_WARN_ON(msm_obj->vmap_count);
+
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
+ } else if (msm_obj->pin_count || msm_obj->vmap_count) {
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
}
}
-static void update_inactive(struct msm_gem_object *msm_obj)
+bool msm_gem_active(struct drm_gem_object *obj)
{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(obj);
- if (msm_obj->active_count != 0)
- return;
-
- mutex_lock(&priv->mm_lock);
-
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
-
- list_del(&msm_obj->mm_list);
- if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
- mark_evictable(msm_obj);
- } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
- mark_purgeable(msm_obj);
- } else {
- GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- }
+ if (to_msm_bo(obj)->pin_count)
+ return true;
- mutex_unlock(&priv->mm_lock);
+ return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -910,7 +879,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
stats->all.count++;
stats->all.size += obj->size;
- if (is_active(msm_obj)) {
+ if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
@@ -938,7 +907,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
- msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
@@ -1015,15 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- mutex_lock(&priv->mm_lock);
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- list_del(&msm_obj->mm_list);
- mutex_unlock(&priv->mm_lock);
-
- /* object should not be on active list: */
- GEM_WARN_ON(is_active(msm_obj));
-
put_iova_spaces(obj, true);
if (obj->import_attach) {
@@ -1183,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
to_msm_bo(obj)->vram_node = &vma->node;
- /* Call chain get_pages() -> update_inactive() tries to
- * access msm_obj->mm_list, but it is not initialized yet.
- * To avoid NULL pointer dereference error, initialize
- * mm_list to be empty.
- */
- INIT_LIST_HEAD(&msm_obj->mm_list);
-
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
@@ -1212,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
@@ -1270,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 432032ad4aed..c4844cf3a585 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -94,16 +94,6 @@ struct msm_gem_object {
uint8_t madv;
/**
- * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
- */
- bool dontneed : 1;
-
- /**
- * Is object evictable (ie. counted in priv->evictable_count)?
- */
- bool evictable : 1;
-
- /**
* count of active vmap'ing
*/
uint8_t vmap_count;
@@ -114,17 +104,6 @@ struct msm_gem_object {
*/
struct list_head node;
- /**
- * An object is either:
- * inactive - on priv->inactive_dontneed or priv->inactive_willneed
- * (depending on purgeability status)
- * active - on one one of the gpu's active_list.. well, at
- * least for now we don't have (I don't think) hw sync between
- * 2d and 3d one devices which have both, meaning we need to
- * block on submit if a bo is already on other ring
- */
- struct list_head mm_list;
-
struct page **pages;
struct sg_table *sgt;
void *vaddr;
@@ -138,7 +117,6 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */
- int active_count;
int pin_count;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -159,8 +137,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
-struct page **msm_gem_get_pages(struct drm_gem_object *obj);
-void msm_gem_put_pages(struct drm_gem_object *obj);
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
+void msm_gem_unpin_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -171,8 +149,7 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
-void msm_gem_active_put(struct drm_gem_object *obj);
+bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -208,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
-static inline bool __must_check
-msm_gem_trylock(struct drm_gem_object *obj)
-{
- return dma_resv_trylock(obj->resv);
-}
-
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -226,8 +197,8 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-static inline bool
-msm_gem_is_locked(struct drm_gem_object *obj)
+static inline void
+msm_gem_assert_locked(struct drm_gem_object *obj)
{
/*
* Destroying the object is a special case.. msm_gem_free_object()
@@ -241,13 +212,10 @@ msm_gem_is_locked(struct drm_gem_object *obj)
* Unfortunately lockdep is not aware of this detail. So when the
* refcount drops to zero, we pretend it is already locked.
*/
- return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
-}
-
-static inline bool is_active(struct msm_gem_object *msm_obj)
-{
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
- return msm_obj->active_count;
+ lockdep_assert_once(
+ (kref_read(&obj->refcount) == 0) ||
+ (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD)
+ );
}
/* imported/exported objects are not purgeable: */
@@ -264,81 +232,15 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(&msm_obj->base);
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
-static inline void mark_purgeable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unpurgeable(msm_obj))
- return;
-
- if (GEM_WARN_ON(msm_obj->dontneed))
- return;
-
- priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
- msm_obj->dontneed = true;
-}
-
-static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unpurgeable(msm_obj))
- return;
-
- if (GEM_WARN_ON(!msm_obj->dontneed))
- return;
-
- priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
- GEM_WARN_ON(priv->shrinkable_count < 0);
- msm_obj->dontneed = false;
-}
-
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
return is_unpurgeable(msm_obj) || msm_obj->vaddr;
}
-static inline void mark_evictable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unevictable(msm_obj))
- return;
-
- if (WARN_ON(msm_obj->evictable))
- return;
-
- priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
- msm_obj->evictable = true;
-}
-
-static inline void mark_unevictable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unevictable(msm_obj))
- return;
-
- if (WARN_ON(!msm_obj->evictable))
- return;
-
- priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
- WARN_ON(priv->evictable_count < 0);
- msm_obj->evictable = false;
-}
-
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_evict(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
@@ -390,9 +292,8 @@ struct msm_gem_submit {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_ACTIVE 0x2000 /* active refcnt is held */
-#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
-#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
+#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */
uint32_t flags;
union {
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dcc8a573bc76..c1d91863df05 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -63,12 +63,12 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
- msm_gem_get_pages(obj);
+ msm_gem_pin_pages(obj);
return 0;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
- msm_gem_put_pages(obj);
+ msm_gem_unpin_pages(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 0317055e3253..1de14e67f96b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -24,103 +24,77 @@ static bool can_swap(void)
return enable_eviction && get_nr_swap_pages() > 0;
}
+static bool can_block(struct shrink_control *sc)
+{
+ if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
+ return false;
+ return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
+}
+
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned count = priv->shrinkable_count;
+ unsigned count = priv->lru.dontneed.count;
if (can_swap())
- count += priv->evictable_count;
+ count += priv->lru.willneed.count;
return count;
}
static bool
-purge(struct msm_gem_object *msm_obj)
+purge(struct drm_gem_object *obj)
{
- if (!is_purgeable(msm_obj))
+ if (!is_purgeable(to_msm_bo(obj)))
return false;
- /*
- * This will move the obj out of still_in_list to
- * the purged list
- */
- msm_gem_purge(&msm_obj->base);
+ if (msm_gem_active(obj))
+ return false;
+
+ msm_gem_purge(obj);
return true;
}
static bool
-evict(struct msm_gem_object *msm_obj)
+evict(struct drm_gem_object *obj)
{
- if (is_unevictable(msm_obj))
+ if (is_unevictable(to_msm_bo(obj)))
+ return false;
+
+ if (msm_gem_active(obj))
return false;
- msm_gem_evict(&msm_obj->base);
+ msm_gem_evict(obj);
return true;
}
-static unsigned long
-scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
- bool (*shrink)(struct msm_gem_object *msm_obj))
+static bool
+wait_for_idle(struct drm_gem_object *obj)
{
- unsigned freed = 0;
- struct list_head still_in_list;
-
- INIT_LIST_HEAD(&still_in_list);
-
- mutex_lock(&priv->mm_lock);
-
- while (freed < nr_to_scan) {
- struct msm_gem_object *msm_obj = list_first_entry_or_null(
- list, typeof(*msm_obj), mm_list);
-
- if (!msm_obj)
- break;
-
- list_move_tail(&msm_obj->mm_list, &still_in_list);
-
- /*
- * If it is in the process of being freed, msm_gem_free_object
- * can be blocked on mm_lock waiting to remove it. So just
- * skip it.
- */
- if (!kref_get_unless_zero(&msm_obj->base.refcount))
- continue;
-
- /*
- * Now that we own a reference, we can drop mm_lock for the
- * rest of the loop body, to reduce contention with the
- * retire_submit path (which could make more objects purgeable)
- */
-
- mutex_unlock(&priv->mm_lock);
-
- /*
- * Note that this still needs to be trylock, since we can
- * hit shrinker in response to trying to get backing pages
- * for this obj (ie. while it's lock is already held)
- */
- if (!msm_gem_trylock(&msm_obj->base))
- goto tail;
-
- if (shrink(msm_obj))
- freed += msm_obj->base.size >> PAGE_SHIFT;
+ enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+}
- msm_gem_unlock(&msm_obj->base);
+static bool
+active_purge(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
-tail:
- drm_gem_object_put(&msm_obj->base);
- mutex_lock(&priv->mm_lock);
- }
+ return purge(obj);
+}
- list_splice_tail(&still_in_list, list);
- mutex_unlock(&priv->mm_lock);
+static bool
+active_evict(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
- return freed;
+ return evict(obj);
}
static unsigned long
@@ -128,21 +102,34 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned long freed;
-
- freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
-
- if (freed > 0)
- trace_msm_gem_purge(freed << PAGE_SHIFT);
-
- if (can_swap() && freed < sc->nr_to_scan) {
- int evicted = scan(priv, sc->nr_to_scan - freed,
- &priv->inactive_willneed, evict);
+ struct {
+ struct drm_gem_lru *lru;
+ bool (*shrink)(struct drm_gem_object *obj);
+ bool cond;
+ unsigned long freed;
+ } stages[] = {
+ /* Stages of progressively more aggressive/expensive reclaim: */
+ { &priv->lru.dontneed, purge, true },
+ { &priv->lru.willneed, evict, can_swap() },
+ { &priv->lru.dontneed, active_purge, can_block(sc) },
+ { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
+ };
+ long nr = sc->nr_to_scan;
+ unsigned long freed = 0;
- if (evicted > 0)
- trace_msm_gem_evict(evicted << PAGE_SHIFT);
+ for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ if (!stages[i].cond)
+ continue;
+ stages[i].freed =
+ drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ nr -= stages[i].freed;
+ freed += stages[i].freed;
+ }
- freed += evicted;
+ if (freed) {
+ trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
+ stages[1].freed, stages[2].freed,
+ stages[3].freed);
}
return (freed > 0) ? freed : SHRINK_STOP;
@@ -173,12 +160,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct msm_gem_object *msm_obj)
+vmap_shrink(struct drm_gem_object *obj)
{
- if (!is_vunmapable(msm_obj))
+ if (!is_vunmapable(to_msm_bo(obj)))
return false;
- msm_gem_vunmap(&msm_obj->base);
+ msm_gem_vunmap(obj);
return true;
}
@@ -188,17 +175,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
- struct list_head *mm_lists[] = {
- &priv->inactive_dontneed,
- &priv->inactive_willneed,
- priv->gpu ? &priv->gpu->active_list : NULL,
+ struct drm_gem_lru *lrus[] = {
+ &priv->lru.dontneed,
+ &priv->lru.willneed,
+ &priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
- for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
- unmapped += scan(priv, vmap_shrink_limit - unmapped,
- mm_lists[idx], vmap_shrink);
+ for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ unmapped += drm_gem_lru_scan(lrus[idx],
+ vmap_shrink_limit - unmapped,
+ vmap_shrink);
}
*(unsigned long *)ptr += unmapped;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c9e4aeb14f4a..5599d93ec0d2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -26,6 +26,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
uint64_t sz;
int ret;
@@ -36,7 +37,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
- submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return ERR_PTR(-ENOMEM);
@@ -52,9 +53,13 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
+ submit->pid = get_pid(task_pid(current));
submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
+ /* Get a unique identifier for the submission for logging purposes */
+ submit->ident = atomic_inc_return(&ident) - 1;
+
INIT_LIST_HEAD(&submit->node);
return submit;
@@ -67,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
unsigned i;
if (submit->fence_id) {
- mutex_lock(&submit->queue->lock);
+ mutex_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
- mutex_unlock(&submit->queue->lock);
+ mutex_unlock(&submit->queue->idr_lock);
}
dma_fence_put(submit->user_fence);
@@ -238,17 +243,13 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
if (flags & BO_OBJ_PINNED)
msm_gem_unpin_locked(obj);
- if (flags & BO_ACTIVE)
- msm_gem_active_put(obj);
-
if (flags & BO_LOCKED)
dma_resv_unlock(obj->resv);
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
- unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
- BO_ACTIVE | BO_LOCKED;
+ unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
@@ -353,18 +354,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
submit->valid = true;
- /*
- * Increment active_count first, so if under memory pressure, we
- * don't inadvertently evict a bo needed by the submit in order
- * to pin an earlier bo in the same submit.
- */
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
-
- msm_gem_active_get(obj, submit->gpu);
- submit->bos[i].flags |= BO_ACTIVE;
- }
-
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
struct msm_gem_vma *vma;
@@ -512,11 +501,11 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
- unsigned cleanup_flags = BO_LOCKED;
+ unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
unsigned i;
if (error)
- cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
+ cleanup_flags |= BO_VMA_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -533,10 +522,6 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
- msm_gem_lock(obj);
- /* Note, VMA already fence-unpinned before submit: */
- submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
- msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
@@ -718,7 +703,6 @@ static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
- static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
@@ -729,10 +713,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
- struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
unsigned i;
- int ret, submitid;
+ int ret;
if (!gpu)
return -ENXIO;
@@ -764,35 +747,26 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
- /* Get a unique identifier for the submission for logging purposes */
- submitid = atomic_inc_return(&ident) - 1;
-
ring = gpu->rb[queue->ring_nr];
- trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
- args->nr_bos, args->nr_cmds);
-
- ret = mutex_lock_interruptible(&queue->lock);
- if (ret)
- goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
- goto out_unlock;
+ return ret;
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos,
- args->nr_cmds);
- if (IS_ERR(submit)) {
- ret = PTR_ERR(submit);
- submit = NULL;
- goto out_unlock;
- }
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ if (IS_ERR(submit))
+ return PTR_ERR(submit);
+
+ trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
+ args->nr_bos, args->nr_cmds);
- submit->pid = pid;
- submit->ident = submitid;
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@@ -887,6 +861,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ mutex_lock(&queue->idr_lock);
+
/*
* If using userspace provided seqno fence, validate that the id
* is available before arming sched job. Since access to fence_idr
@@ -895,6 +871,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
+ mutex_unlock(&queue->idr_lock);
ret = -EINVAL;
goto out;
}
@@ -927,6 +904,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->user_fence, 1,
INT_MAX, GFP_KERNEL);
}
+
+ mutex_unlock(&queue->idr_lock);
+
if (submit->fence_id < 0) {
ret = submit->fence_id;
submit->fence_id = 0;
@@ -965,9 +945,9 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&queue->lock);
+out_post_unlock:
if (submit)
msm_gem_submit_put(submit);
-out_post_unlock:
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c2bfcf3f1f40..0098ee8438aa 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -16,6 +16,7 @@
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/devcoredump.h>
+#include <linux/reset.h>
#include <linux/sched/task.h>
/*
@@ -394,7 +395,6 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
- pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
@@ -423,9 +423,7 @@ static void recover_worker(struct kthread_work *work)
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
- pm_runtime_put_sync(&gpu->pdev->dev);
/*
* Replay all remaining submits starting with highest priority
@@ -442,6 +440,8 @@ static void recover_worker(struct kthread_work *work)
}
}
+ pm_runtime_put(&gpu->pdev->dev);
+
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
@@ -664,11 +664,12 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
mutex_lock(&gpu->active_lock);
gpu->active_submits--;
WARN_ON(gpu->active_submits < 0);
- if (!gpu->active_submits)
+ if (!gpu->active_submits) {
msm_devfreq_idle(gpu);
- mutex_unlock(&gpu->active_lock);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ }
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
@@ -757,14 +758,17 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Update devfreq on transition from idle->active: */
mutex_lock(&gpu->active_lock);
- if (!gpu->active_submits)
+ if (!gpu->active_submits) {
+ pm_runtime_get(&gpu->pdev->dev);
msm_devfreq_active(gpu);
+ }
gpu->active_submits++;
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}
@@ -846,7 +850,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task);
- INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event);
@@ -901,6 +904,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
+ gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "cx_collapse");
+
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
@@ -974,8 +980,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- WARN_ON(!list_empty(&gpu->active_list));
-
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4d935fedd2ac..ff911e7305ce 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -13,6 +13,7 @@
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -187,12 +188,6 @@ struct msm_gpu {
*/
int cur_ctx_seqno;
- /*
- * List of GEM active objects on this gpu. Protected by
- * msm_drm_private::mm_lock
- */
- struct list_head active_list;
-
/**
* lock:
*
@@ -277,6 +272,9 @@ struct msm_gpu {
bool hw_apriv;
struct thermal_cooling_device *cooling;
+
+ /* To poll for cx gdsc collapse during gpu recovery */
+ struct reset_control *cx_collapse;
};
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
@@ -466,7 +464,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @node: node in the context's list of submitqueues
* @fence_idr: maps fence-id to dma_fence for userspace visible fence
* seqno, protected by submitqueue lock
- * @lock: submitqueue lock
+ * @idr_lock: for serializing access to fence_idr
+ * @lock: submitqueue lock for serializing submits on a queue
* @ref: reference count
* @entity: the submit job-queue
*/
@@ -479,6 +478,7 @@ struct msm_gpu_submitqueue {
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
+ struct mutex idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index d1f70426f554..85c443a37e4e 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_qos_remove_request(&df->idle_freq);
+ dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index ca0b08d7875b..ac40d857bc45 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -115,29 +115,27 @@ TRACE_EVENT(msm_gmu_freq_change,
);
-TRACE_EVENT(msm_gem_purge,
- TP_PROTO(u32 bytes),
- TP_ARGS(bytes),
+TRACE_EVENT(msm_gem_shrink,
+ TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
+ u32 active_purged, u32 active_evicted),
+ TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
TP_STRUCT__entry(
- __field(u32, bytes)
+ __field(u32, nr_to_scan)
+ __field(u32, purged)
+ __field(u32, evicted)
+ __field(u32, active_purged)
+ __field(u32, active_evicted)
),
TP_fast_assign(
- __entry->bytes = bytes;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->purged = purged;
+ __entry->evicted = evicted;
+ __entry->active_purged = active_purged;
+ __entry->active_evicted = active_evicted;
),
- TP_printk("Purging %u bytes", __entry->bytes)
-);
-
-
-TRACE_EVENT(msm_gem_evict,
- TP_PROTO(u32 bytes),
- TP_ARGS(bytes),
- TP_STRUCT__entry(
- __field(u32, bytes)
- ),
- TP_fast_assign(
- __entry->bytes = bytes;
- ),
- TP_printk("Evicting %u bytes", __entry->bytes)
+ TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg",
+ __entry->nr_to_scan, __entry->purged, __entry->evicted,
+ __entry->active_purged, __entry->active_evicted)
);
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index 7b504617833a..d02cd29ce829 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -5,6 +5,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/interconnect.h>
+
#include "msm_drv.h"
/*
@@ -124,3 +126,23 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
work->worker = worker;
kthread_init_work(&work->work, fn);
}
+
+struct icc_path *msm_icc_get(struct device *dev, const char *name)
+{
+ struct device *mdss_dev = dev->parent;
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (path)
+ return path;
+
+ /*
+ * If there are no interconnects attached to the corresponding device
+ * node, of_icc_get() will return NULL.
+ *
+ * If the MDP5/DPU device node doesn't have interconnects, lookup the
+ * path in the parent (MDSS) device.
+ */
+ return of_icc_get(mdss_dev, name);
+
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a54ed354578b..5577cea7c009 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,7 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
};
@@ -29,23 +30,84 @@ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
return container_of(mmu, struct msm_iommu_pagetable, base);
}
+/* based on iommu_pgsize() in iommu.c: */
+static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, size_t *count)
+{
+ unsigned int pgsize_idx, pgsize_idx_next;
+ unsigned long pgsizes;
+ size_t offset, pgsize, pgsize_next;
+ unsigned long addr_merge = paddr | iova;
+
+ /* Page sizes supported by the hardware and small enough for @size */
+ pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
+
+ /* Constrain the page sizes further based on the maximum alignment */
+ if (likely(addr_merge))
+ pgsizes &= GENMASK(__ffs(addr_merge), 0);
+
+ /* Make sure we have at least one suitable page size */
+ BUG_ON(!pgsizes);
+
+ /* Pick the biggest page size remaining */
+ pgsize_idx = __fls(pgsizes);
+ pgsize = BIT(pgsize_idx);
+ if (!count)
+ return pgsize;
+
+ /* Find the next biggest support page size, if it exists */
+ pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
+ if (!pgsizes)
+ goto out_set_count;
+
+ pgsize_idx_next = __ffs(pgsizes);
+ pgsize_next = BIT(pgsize_idx_next);
+
+ /*
+ * There's no point trying a bigger page size unless the virtual
+ * and physical addresses are similarly offset within the larger page.
+ */
+ if ((iova ^ paddr) & (pgsize_next - 1))
+ goto out_set_count;
+
+ /* Calculate the offset to the next page size alignment boundary */
+ offset = pgsize_next - (addr_merge & (pgsize_next - 1));
+
+ /*
+ * If size is big enough to accommodate the larger page, reduce
+ * the number of smaller pages.
+ */
+ if (offset + pgsize_next <= size)
+ size = offset;
+
+out_set_count:
+ *count = size >> pgsize_idx;
+ return pgsize;
+}
+
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
- size_t unmapped = 0;
- /* Unmap the block one page at a time */
while (size) {
- unmapped += ops->unmap(ops, iova, 4096, NULL);
- iova += 4096;
- size -= 4096;
+ size_t unmapped, pgsize, count;
+
+ pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
+
+ unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
+ if (!unmapped)
+ break;
+
+ iova += unmapped;
+ size -= unmapped;
}
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (unmapped == size) ? 0 : -EINVAL;
+ return (size == 0) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
@@ -54,7 +116,6 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
- size_t mapped = 0;
u64 addr = iova;
unsigned int i;
@@ -62,17 +123,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
- /* Map the block one page at a time */
while (size) {
- if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
- msm_iommu_pagetable_unmap(mmu, iova, mapped);
+ size_t pgsize, count, mapped = 0;
+ int ret;
+
+ pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
+
+ ret = ops->map_pages(ops, addr, phys, pgsize, count,
+ prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ phys += mapped;
+ addr += mapped;
+ size -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
return -EINVAL;
}
-
- phys += 4096;
- addr += 4096;
- size -= 4096;
- mapped += 4096;
}
}
@@ -207,6 +277,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index a92ffde53f0b..db2f847c8535 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 56eecb4a72dc..cad4c3525f0b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -29,8 +29,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_unlock(obj);
}
- pm_runtime_get_sync(&gpu->pdev->dev);
-
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
@@ -38,8 +36,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&gpu->lock);
- pm_runtime_put(&gpu->pdev->dev);
-
return dma_fence_get(submit->hw_fence);
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index f486a3cd4e55..c6929e205b51 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -200,6 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
*id = queue->id;
idr_init(&queue->fence_idr);
+ mutex_init(&queue->idr_lock);
mutex_init(&queue->lock);
list_add_tail(&queue->node, &ctx->submitqueues);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 873551b4552f..116f8168bda4 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,7 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
@@ -26,7 +26,7 @@ config DRM_IMX_LCDIF
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index befad33dcb95..075002ed6fb0 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -8,7 +8,6 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -16,11 +15,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
@@ -45,23 +42,11 @@ static int lcdif_attach_bridge(struct lcdif_drm_private *lcdif)
{
struct drm_device *drm = lcdif->drm;
struct drm_bridge *bridge;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel,
- &bridge);
- if (ret)
- return ret;
-
- if (panel) {
- bridge = devm_drm_panel_bridge_add_typed(drm->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
- }
-
- if (!bridge)
- return -ENODEV;
+ bridge = devm_drm_of_get_bridge(drm->dev, drm->dev->of_node, 0, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
ret = drm_bridge_attach(&lcdif->encoder, bridge, NULL, 0);
if (ret)
@@ -199,11 +184,11 @@ static void lcdif_unload(struct drm_device *drm)
drm->dev_private = NULL;
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver lcdif_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.h b/drivers/gpu/drm/mxsfb/lcdif_drv.h
index cb916341e845..6cdba6e20c02 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.h
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.h
@@ -8,6 +8,7 @@
#ifndef __LCDIF_DRV_H__
#define __LCDIF_DRV_H__
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index 1bec1279c8b5..b1092aab1423 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -17,13 +17,12 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "lcdif_drv.h"
@@ -123,8 +122,8 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
writel(ctrl, lcdif->base + LCDC_V8_CTRL);
- writel(DISP_SIZE_DELTA_Y(m->crtc_vdisplay) |
- DISP_SIZE_DELTA_X(m->crtc_hdisplay),
+ writel(DISP_SIZE_DELTA_Y(m->vdisplay) |
+ DISP_SIZE_DELTA_X(m->hdisplay),
lcdif->base + LCDC_V8_DISP_SIZE);
writel(HSYN_PARA_BP_H(m->htotal - m->hsync_end) |
@@ -139,8 +138,8 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
VSYN_HSYN_WIDTH_PW_H(m->hsync_end - m->hsync_start),
lcdif->base + LCDC_V8_VSYN_HSYN_WIDTH);
- writel(CTRLDESCL0_1_HEIGHT(m->crtc_vdisplay) |
- CTRLDESCL0_1_WIDTH(m->crtc_hdisplay),
+ writel(CTRLDESCL0_1_HEIGHT(m->vdisplay) |
+ CTRLDESCL0_1_WIDTH(m->hdisplay),
lcdif->base + LCDC_V8_CTRLDESCL0_1);
writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
@@ -204,7 +203,7 @@ static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
m->crtc_clock,
(int)(clk_get_rate(lcdif->clk) / 1000));
- DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n",
bus_flags);
DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
@@ -297,7 +296,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
lcdif_crtc_mode_set_nofb(lcdif, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
if (paddr) {
writel(lower_32_bits(paddr),
lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
@@ -393,8 +392,8 @@ static int lcdif_plane_atomic_check(struct drm_plane *plane,
&lcdif->crtc);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -406,7 +405,7 @@ static void lcdif_plane_primary_atomic_update(struct drm_plane *plane,
plane);
dma_addr_t paddr;
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
if (paddr) {
writel(lower_32_bits(paddr),
lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 55aad92e08ba..b29b332ed381 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -22,7 +22,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
@@ -324,11 +324,11 @@ static void mxsfb_unload(struct drm_device *drm)
pm_runtime_disable(drm->dev);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver mxsfb_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index e38ce5737a5f..3bcc9c0f2019 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -20,13 +20,12 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
@@ -353,7 +352,7 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_bridge_state *bridge_state = NULL;
struct drm_device *drm = mxsfb->drm;
u32 bus_format = 0;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
pm_runtime_get_sync(drm->dev);
mxsfb_enable_axi_clk(mxsfb);
@@ -389,10 +388,10 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
mxsfb_crtc_mode_set_nofb(mxsfb, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
- if (paddr) {
- writel(paddr, mxsfb->base + mxsfb->devdata->cur_buf);
- writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (dma_addr) {
+ writel(dma_addr, mxsfb->base + mxsfb->devdata->cur_buf);
+ writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf);
}
mxsfb_enable_controller(mxsfb);
@@ -531,8 +530,8 @@ static int mxsfb_plane_atomic_check(struct drm_plane *plane,
&mxsfb->crtc);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -542,11 +541,11 @@ static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane,
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
plane);
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
- if (paddr)
- writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (dma_addr)
+ writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf);
}
static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
@@ -557,11 +556,11 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
plane);
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
u32 ctrl;
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
- if (!paddr) {
+ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (!dma_addr) {
writel(0, mxsfb->base + LCDC_AS_CTRL);
return;
}
@@ -572,16 +571,16 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
* is understood, live with the 16 initial invalid pixels on the first
* line and start 64 bytes within the framebuffer.
*/
- paddr += 64;
+ dma_addr += 64;
- writel(paddr, mxsfb->base + LCDC_AS_NEXT_BUF);
+ writel(dma_addr, mxsfb->base + LCDC_AS_NEXT_BUF);
/*
* If the plane was previously disabled, write LCDC_AS_BUF as well to
* provide the first buffer.
*/
if (!old_pstate->fb)
- writel(paddr, mxsfb->base + LCDC_AS_BUF);
+ writel(dma_addr, mxsfb->base + LCDC_AS_BUF);
ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index f9e962fd94d0..ee92d576d277 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1275,31 +1275,9 @@ static const uint32_t modeset_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static struct drm_plane *
-create_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- /* possible_crtc's will be filled in later by crtc_init */
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- modeset_formats,
- ARRAY_SIZE(modeset_formats), NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
+static const struct drm_plane_funcs nv04_primary_plane_funcs = {
+ DRM_PLANE_NON_ATOMIC_FUNCS,
+};
static int nv04_crtc_vblank_handler(struct nvif_notify *notify)
{
@@ -1315,6 +1293,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
+ struct drm_plane *primary;
int ret;
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
@@ -1329,8 +1308,18 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv_crtc->save = nv_crtc_save;
nv_crtc->restore = nv_crtc_restore;
- drm_crtc_init_with_planes(dev, &nv_crtc->base,
- create_primary_plane(dev), NULL,
+ primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0,
+ &nv04_primary_plane_funcs,
+ modeset_formats,
+ ARRAY_SIZE(modeset_formats), NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ kfree(nv_crtc);
+ return ret;
+ }
+
+ drm_crtc_init_with_planes(dev, &nv_crtc->base, primary, NULL,
&nv04_crtc_funcs, NULL);
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 37e63e98cd08..33f29736024a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -296,9 +296,10 @@ nv10_overlay_init(struct drm_device *device)
break;
}
- ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
- &nv10_plane_funcs,
- formats, num_formats, false);
+ ret = drm_universal_plane_init(device, &plane->base, 3 /* both crtc's */,
+ &nv10_plane_funcs,
+ formats, num_formats, NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret)
goto err;
@@ -475,9 +476,9 @@ nv04_overlay_init(struct drm_device *device)
if (!plane)
return;
- ret = drm_plane_init(device, &plane->base, 1 /* single crtc */,
- &nv04_plane_funcs,
- formats, 2, false);
+ ret = drm_universal_plane_init(device, &plane->base, 1 /* single crtc */,
+ &nv04_plane_funcs, formats, 2, NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index cad5a646983a..70c62b861276 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "nouveau_bo.h"
@@ -237,8 +236,8 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index cd2c79e4b7af..78ee32da01c8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -29,7 +29,6 @@
#include <nvhw/class/cl507a.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
bool
curs507a_space(struct nv50_wndw *wndw)
@@ -103,8 +102,8 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
asyh->curs.visible = asyw->state.visible;
if (ret || !asyh->curs.visible)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a53d685a77eb..33c97d510999 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -39,7 +39,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -933,6 +932,7 @@ struct nv50_msto {
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
+ bool enabled;
};
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
@@ -948,57 +948,37 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
return msto->mstc->mstm->outp;
}
-static struct drm_dp_payload *
-nv50_msto_payload(struct nv50_msto *msto)
-{
- struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
- struct nv50_mstc *mstc = msto->mstc;
- struct nv50_mstm *mstm = mstc->mstm;
- int vcpi = mstc->port->vcpi.vcpi, i;
-
- WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
-
- NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
- for (i = 0; i < mstm->mgr.max_payloads; i++) {
- struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
- NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
- mstm->outp->base.base.name, i, payload->vcpi,
- payload->start_slot, payload->num_slots);
- }
-
- for (i = 0; i < mstm->mgr.max_payloads; i++) {
- struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
- if (payload->vcpi == vcpi)
- return payload;
- }
-
- return NULL;
-}
-
static void
-nv50_msto_cleanup(struct nv50_msto *msto)
+nv50_msto_cleanup(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
- struct nv50_mstc *mstc = msto->mstc;
- struct nv50_mstm *mstm = mstc->mstm;
-
- if (!msto->disabled)
- return;
+ struct drm_dp_mst_atomic_payload *payload =
+ drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
- drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
-
- msto->mstc = NULL;
- msto->disabled = false;
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->disabled = false;
+ } else if (msto->enabled) {
+ drm_dp_add_payload_part2(mgr, state, payload);
+ msto->enabled = false;
+ }
}
static void
-nv50_msto_prepare(struct nv50_msto *msto)
+nv50_msto_prepare(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
+ struct drm_dp_mst_atomic_payload *payload;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
@@ -1010,17 +990,21 @@ nv50_msto_prepare(struct nv50_msto *msto)
(0x0100 << msto->head->base.index),
};
- mutex_lock(&mstm->mgr.payload_lock);
-
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
- if (mstc->port->vcpi.vcpi > 0) {
- struct drm_dp_payload *payload = nv50_msto_payload(msto);
- if (payload) {
- args.vcpi.start_slot = payload->start_slot;
- args.vcpi.num_slots = payload->num_slots;
- args.vcpi.pbn = mstc->port->vcpi.pbn;
- args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
- }
+
+ payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
+
+ // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
+ if (msto->disabled) {
+ drm_dp_remove_payload(mgr, mst_state, payload);
+ } else {
+ if (msto->enabled)
+ drm_dp_add_payload_part1(mgr, mst_state, payload);
+
+ args.vcpi.start_slot = payload->vc_start_slot;
+ args.vcpi.num_slots = payload->time_slots;
+ args.vcpi.pbn = payload->pbn;
+ args.vcpi.aligned_pbn = payload->time_slots * mst_state->pbn_div;
}
NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
@@ -1029,7 +1013,6 @@ nv50_msto_prepare(struct nv50_msto *msto)
args.vcpi.pbn, args.vcpi.aligned_pbn);
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
- mutex_unlock(&mstm->mgr.payload_lock);
}
static int
@@ -1039,6 +1022,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_connector *connector = conn_state->connector;
+ struct drm_dp_mst_topology_state *mst_state;
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nv50_mstm *mstm = mstc->mstm;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
@@ -1050,7 +1034,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (ret)
return ret;
- if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
/*
@@ -1066,8 +1050,18 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
false);
}
- slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
- asyh->dp.pbn, 0);
+ mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ if (!mst_state->pbn_div) {
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
+ outp->dp.link_bw, outp->dp.link_nr);
+ }
+
+ slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
if (slots < 0)
return slots;
@@ -1099,7 +1093,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 proto;
- bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
@@ -1114,10 +1107,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
if (WARN_ON(!mstc))
return;
- r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
- if (!r)
- DRM_DEBUG_KMS("Failed to allocate VCPI\n");
-
if (!mstm->links++)
nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
@@ -1130,6 +1119,7 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
nv50_dp_bpc_to_depth(asyh->or.bpc));
msto->mstc = mstc;
+ msto->enabled = true;
mstm->modified = true;
}
@@ -1140,8 +1130,6 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
- drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
-
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
if (!--mstm->links)
@@ -1256,29 +1244,8 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
- struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *new_crtc = new_conn_state->crtc;
-
- if (!old_conn_state->crtc)
- return 0;
-
- /* We only want to free VCPI if this state disables the CRTC on this
- * connector
- */
- if (new_crtc) {
- crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(crtc_state) ||
- crtc_state->enable)
- return 0;
- }
- return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
+ return drm_dp_atomic_release_time_slots(state, mgr, mstc->port);
}
static int
@@ -1382,7 +1349,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
}
static void
-nv50_mstm_cleanup(struct nv50_mstm *mstm)
+nv50_mstm_cleanup(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
@@ -1390,14 +1359,12 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm)
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
drm_dp_check_act_status(&mstm->mgr);
- drm_dp_update_payload_part2(&mstm->mgr);
-
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
- nv50_msto_cleanup(msto);
+ nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
}
}
@@ -1405,20 +1372,34 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_prepare(struct nv50_mstm *mstm)
+nv50_mstm_prepare(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
- drm_dp_update_payload_part1(&mstm->mgr, 1);
+ /* Disable payloads first */
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
- if (mstc && mstc->mstm == mstm)
- nv50_msto_prepare(msto);
+ if (mstc && mstc->mstm == mstm && msto->disabled)
+ nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
+ }
+ }
+
+ /* Add payloads for new heads, while also updating the start slots of any unmodified (but
+ * active) heads that may have had their VC slots shifted left after the previous step
+ */
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm && !msto->disabled)
+ nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
}
}
@@ -1615,9 +1596,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
- max_payloads, outp->dcb->dpconf.link_nr,
- drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw),
- conn_base_id);
+ max_payloads, conn_base_id);
if (ret)
return ret;
@@ -1835,7 +1814,7 @@ nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
-static bool nv50_has_mst(struct nouveau_drm *drm)
+bool nv50_has_mst(struct nouveau_drm *drm)
{
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
u32 data;
@@ -2069,20 +2048,20 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
static void
nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
- struct drm_encoder *encoder;
+ int i;
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
- drm_for_each_encoder(encoder, drm->dev) {
- if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
- mstm = nouveau_encoder(encoder)->dp.mstm;
- if (mstm && mstm->modified)
- nv50_mstm_prepare(mstm);
- }
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ mstm = nv50_mstm(mgr);
+ if (mstm->modified)
+ nv50_mstm_prepare(state, mst_state, mstm);
}
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
@@ -2091,12 +2070,10 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
disp->core->chan.base.device))
NV_ERROR(drm, "core notifier timeout\n");
- drm_for_each_encoder(encoder, drm->dev) {
- if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
- mstm = nouveau_encoder(encoder)->dp.mstm;
- if (mstm && mstm->modified)
- nv50_mstm_cleanup(mstm);
- }
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ mstm = nv50_mstm(mgr);
+ if (mstm->modified)
+ nv50_mstm_cleanup(state, mst_state, mstm);
}
}
@@ -2137,6 +2114,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_crc_atomic_stop_reporting(state);
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
@@ -2617,6 +2595,11 @@ nv50_disp_func = {
.atomic_state_free = nv50_disp_atomic_state_free,
};
+static const struct drm_mode_config_helper_funcs
+nv50_disp_helper_func = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
/******************************************************************************
* Init
*****************************************************************************/
@@ -2700,6 +2683,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
+ dev->mode_config.helper_private = &nv50_disp_helper_func;
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
dev->mode_config.normalize_zpos = true;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 38dec11e7dda..9d66c9c726c3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -106,6 +106,8 @@ void nv50_dmac_destroy(struct nv50_dmac *);
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
+bool nv50_has_mst(struct nouveau_drm *drm);
+
u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index d4af69e903ad..797c1e4e0eaa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -24,7 +24,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <nvif/if0014.h>
#include <nvif/push507c.h>
@@ -106,8 +105,8 @@ ovly507e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 082a66d59506..b3deea5aca58 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -23,7 +23,6 @@
#include "atom.h"
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <nouveau_bo.h>
#include <nvif/if0014.h>
@@ -297,8 +296,8 @@ wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
return drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 31167c398708..1d214a4b960a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -23,7 +23,6 @@
#include "atom.h"
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <nouveau_bo.h>
#include <nvif/pushc37b.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 6140db756d06..8cf096f841a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -386,3 +386,13 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
}
+
+bool nouveau_acpi_video_backlight_use_native(void)
+{
+ return acpi_video_backlight_use_native();
+}
+
+void nouveau_acpi_video_register_backlight(void)
+{
+ acpi_video_register_backlight();
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 330f9b837066..e39dd8b94b8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -11,6 +11,8 @@ void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
+bool nouveau_acpi_video_backlight_use_native(void);
+void nouveau_acpi_video_register_backlight(void);
#else
static inline bool nouveau_is_optimus(void) { return false; };
static inline bool nouveau_is_v1_dsm(void) { return false; };
@@ -18,6 +20,8 @@ static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
+static inline bool nouveau_acpi_video_backlight_use_native(void) { return true; }
+static inline void nouveau_acpi_video_register_backlight(void) {}
#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index a2141d3d9b1d..a614582779ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -38,6 +38,7 @@
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+#include "nouveau_acpi.h"
static struct ida bl_ida;
#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
@@ -405,6 +406,11 @@ nouveau_backlight_init(struct drm_connector *connector)
goto fail_alloc;
}
+ if (!nouveau_acpi_video_backlight_use_native()) {
+ NV_INFO(drm, "Skipping nv_backlight registration\n");
+ goto fail_alloc;
+ }
+
if (!nouveau_get_backlight_name(backlight_name, bl)) {
NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
goto fail_alloc;
@@ -430,6 +436,13 @@ nouveau_backlight_init(struct drm_connector *connector)
fail_alloc:
kfree(bl);
+ /*
+ * If we get here we have an internal panel, but no nv_backlight,
+ * try registering an ACPI video backlight device instead.
+ */
+ if (ret == 0)
+ nouveau_acpi_video_register_backlight();
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 05076e530e7d..126b3c6e12f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -281,8 +281,10 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
break;
}
- if (WARN_ON(pi < 0))
+ if (WARN_ON(pi < 0)) {
+ kfree(nvbo);
return ERR_PTR(-EINVAL);
+ }
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
@@ -307,9 +309,9 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
- ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
- &nvbo->placement, align >> PAGE_SHIFT, false, sg,
- robj, nouveau_bo_del_ttm);
+ ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
+ &nvbo->placement, align >> PAGE_SHIFT, false,
+ sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
@@ -820,6 +822,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
@@ -1006,7 +1017,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
}
/* Fake bo copy. */
- if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+ if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM &&
+ !bo->ttm)) {
ttm_bo_move_null(bo, new_reg);
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 43a9d1e1cf71..1991bbb1d05c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -504,7 +504,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed =
nv_encoder->caps.dp_interlace;
else
- connector->interlace_allowed = true;
+ connector->interlace_allowed =
+ drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
@@ -1105,11 +1106,25 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
+static int
+nouveau_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state)
+{
+ struct nouveau_connector *nv_conn = nouveau_connector(connector);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+
+ if (!nv_conn->dp_encoder || !nv50_has_mst(nouveau_drm(connector->dev)))
+ return 0;
+
+ return drm_dp_mst_root_conn_atomic_check(conn_state, &nv_conn->dp_encoder->dp.mstm->mgr);
+}
+
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
.mode_valid = nouveau_connector_mode_valid,
.best_encoder = nouveau_connector_best_encoder,
+ .atomic_check = nouveau_connector_atomic_check,
};
static const struct drm_connector_funcs
@@ -1367,7 +1382,7 @@ nouveau_connector_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
drm_dp_aux_init(&nv_connector->aux);
- fallthrough;
+ break;
default:
funcs = &nouveau_connector_funcs;
break;
@@ -1430,6 +1445,8 @@ nouveau_connector_create(struct drm_device *dev,
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
+ nv_connector->dp_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
+ fallthrough;
case DRM_MODE_CONNECTOR_eDP:
drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 4bf0c703eee7..f4e17ff68bf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -128,6 +128,9 @@ struct nouveau_connector {
struct drm_dp_aux aux;
+ /* The fixed DP encoder for this connector, if there is one */
+ struct nouveau_encoder *dp_encoder;
+
int dithering_mode;
int scaling_mode;
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 1c3104d20571..a7db7c31064b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -211,75 +211,24 @@ static const struct attribute_group temp1_auto_point_sensor_group = {
#define N_ATTR_GROUPS 3
-static const u32 nouveau_config_chip[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const u32 nouveau_config_in[] = {
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_LABEL,
- 0
-};
-
-static const u32 nouveau_config_temp[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
- HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_EMERGENCY |
- HWMON_T_EMERGENCY_HYST,
- 0
-};
-
-static const u32 nouveau_config_fan[] = {
- HWMON_F_INPUT,
- 0
-};
-
-static const u32 nouveau_config_pwm[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const u32 nouveau_config_power[] = {
- HWMON_P_INPUT | HWMON_P_CAP_MAX | HWMON_P_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nouveau_chip = {
- .type = hwmon_chip,
- .config = nouveau_config_chip,
-};
-
-static const struct hwmon_channel_info nouveau_temp = {
- .type = hwmon_temp,
- .config = nouveau_config_temp,
-};
-
-static const struct hwmon_channel_info nouveau_fan = {
- .type = hwmon_fan,
- .config = nouveau_config_fan,
-};
-
-static const struct hwmon_channel_info nouveau_in = {
- .type = hwmon_in,
- .config = nouveau_config_in,
-};
-
-static const struct hwmon_channel_info nouveau_pwm = {
- .type = hwmon_pwm,
- .config = nouveau_config_pwm,
-};
-
-static const struct hwmon_channel_info nouveau_power = {
- .type = hwmon_power,
- .config = nouveau_config_power,
-};
-
static const struct hwmon_channel_info *nouveau_info[] = {
- &nouveau_chip,
- &nouveau_temp,
- &nouveau_fan,
- &nouveau_in,
- &nouveau_pwm,
- &nouveau_power,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_EMERGENCY | HWMON_T_EMERGENCY_HYST),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT |
+ HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CAP_MAX | HWMON_P_CRIT),
NULL
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2e517cdc24c9..76f8edefa637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -187,3 +187,32 @@ nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
*res = &mem->base;
return 0;
}
+
+bool
+nouveau_mem_intersects(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ u32 num_pages = PFN_UP(size);
+
+ /* Don't evict BOs outside of the requested placement range */
+ if (place->fpfn >= (res->start + num_pages) ||
+ (place->lpfn && place->lpfn <= res->start))
+ return false;
+
+ return true;
+}
+
+bool
+nouveau_mem_compatible(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ u32 num_pages = PFN_UP(size);
+
+ if (res->start < place->fpfn ||
+ (place->lpfn && (res->start + num_pages) > place->lpfn))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 325551eba5cd..1ee6cdb9ad9b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -25,6 +25,12 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource_manager *man,
struct ttm_resource *);
+bool nouveau_mem_intersects(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool nouveau_mem_compatible(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 347488685f74..9608121e49b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,7 +71,6 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
sg, robj);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
goto unlock;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 85f1f5a0fe5d..9602c30928f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -42,6 +42,24 @@ nouveau_manager_del(struct ttm_resource_manager *man,
nouveau_mem_del(man, reg);
}
+static bool
+nouveau_manager_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return nouveau_mem_intersects(res, place, size);
+}
+
+static bool
+nouveau_manager_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return nouveau_mem_compatible(res, place, size);
+}
+
static int
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
@@ -73,6 +91,8 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
const struct ttm_resource_manager_func nouveau_vram_manager = {
.alloc = nouveau_vram_manager_new,
.free = nouveau_manager_del,
+ .intersects = nouveau_manager_intersects,
+ .compatible = nouveau_manager_compatible,
};
static int
@@ -97,6 +117,8 @@ nouveau_gart_manager_new(struct ttm_resource_manager *man,
const struct ttm_resource_manager_func nouveau_gart_manager = {
.alloc = nouveau_gart_manager_new,
.free = nouveau_manager_del,
+ .intersects = nouveau_manager_intersects,
+ .compatible = nouveau_manager_compatible,
};
static int
@@ -130,6 +152,8 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
const struct ttm_resource_manager_func nv04_gart_manager = {
.alloc = nv04_gart_manager_new,
.free = nouveau_manager_del,
+ .intersects = nouveau_manager_intersects,
+ .compatible = nouveau_manager_compatible,
};
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 568182e68dd7..d8cf71fb0512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2605,6 +2605,27 @@ nv172_chipset = {
};
static const struct nvkm_device_chip
+nv173_chipset = {
+ .name = "GA103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = { 0x00000001, tu102_bar_new },
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index b4a308f3cf7b..49e2664a734c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -64,12 +64,9 @@ nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_pmuE *info)
{
u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
- memset(info, 0x00, sizeof(*info));
- switch (!!data * *ver) {
- default:
+ if (data) {
info->type = nvbios_rd08(bios, data + 0x00);
info->data = nvbios_rd32(bios, data + 0x02);
- break;
}
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index a139dafffe06..7c33542f651b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -581,7 +581,7 @@ gm20b_clk_prog(struct nvkm_clk *base)
/*
* Interim step for changing DVFS detection settings: low enough
- * frequency to be safe at at DVFS coeff = 0.
+ * frequency to be safe at DVFS coeff = 0.
*
* 1. If voltage is increasing:
* - safe frequency target matches the lowest - old - frequency
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c4de142cc85b..0ee344ebcd1c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -2451,7 +2451,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
*decim_x = DIV_ROUND_UP(width, in_width_max);
- *decim_x = *decim_x > decim_x_min ? *decim_x : decim_x_min;
+ *decim_x = max(*decim_x, decim_x_min);
if (*decim_x > *x_predecim)
return -EINVAL;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 0399f3390a0a..c4febb861910 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1176,6 +1176,7 @@ static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
default:
break;
}
+ of_node_put(port);
}
}
@@ -1208,11 +1209,13 @@ static int dss_init_ports(struct dss_device *dss)
default:
break;
}
+ of_node_put(port);
}
return 0;
error:
+ of_node_put(port);
__dss_uninit_ports(dss, i);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 06a719c104f4..63ddc5127f7b 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index ac869acf80ea..61a27dd7392e 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -813,10 +813,8 @@ static int omap_dmm_probe(struct platform_device *dev)
}
omap_dmm->irq = platform_get_irq(dev, 0);
- if (omap_dmm->irq < 0) {
- dev_err(&dev->dev, "failed to get IRQ resource\n");
+ if (omap_dmm->irq < 0)
goto fail;
- }
omap_dmm->dev = &dev->dev;
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c
index b0bc9ad2ef73..fb97c74386f2 100644
--- a/drivers/gpu/drm/omapdrm/omap_overlay.c
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.c
@@ -6,7 +6,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index b6cb537f7689..24a2ded08b45 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a9043eacce97..a582ddd583c2 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -165,8 +165,8 @@ config DRM_PANEL_ILITEK_IL9322
config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI
- depends on DRM_KMS_HELPER
- depends on DRM_GEM_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI
help
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index 174ff434bd71..b3235781e6ba 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -321,7 +321,7 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+static void tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
{
struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -332,8 +332,6 @@ static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
"Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id tm5p5_nt35596_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index ef00cd67dc40..ad58840eda41 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -410,7 +410,7 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
+static void boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
{
struct boe_bf060y8m_aj0 *boe = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -420,8 +420,6 @@ static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&boe->panel);
-
- return 0;
}
static const struct of_device_id boe_bf060y8m_aj0_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index 42854bd37fd5..d879b3b14c48 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -919,7 +919,7 @@ static int panel_probe(struct mipi_dsi_device *dsi)
return err;
}
-static int panel_remove(struct mipi_dsi_device *dsi)
+static void panel_remove(struct mipi_dsi_device *dsi)
{
struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
int err;
@@ -937,8 +937,6 @@ static int panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&pinfo->base);
-
- return 0;
}
static void panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 07f722f33fc5..857a2f0420d7 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1622,7 +1622,7 @@ static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
drm_panel_unprepare(&boe->base);
}
-static int boe_panel_remove(struct mipi_dsi_device *dsi)
+static void boe_panel_remove(struct mipi_dsi_device *dsi)
{
struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -1635,8 +1635,6 @@ static int boe_panel_remove(struct mipi_dsi_device *dsi)
if (boe->base.dev)
drm_panel_remove(&boe->base);
-
- return 0;
}
static const struct of_device_id boe_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index b0213a518f9d..ba17bcc4461c 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -579,7 +579,7 @@ err_bl:
return r;
}
-static int dsicm_remove(struct mipi_dsi_device *dsi)
+static void dsicm_remove(struct mipi_dsi_device *dsi)
{
struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
@@ -593,8 +593,6 @@ static int dsicm_remove(struct mipi_dsi_device *dsi)
if (ddata->extbldev)
put_device(&ddata->extbldev->dev);
-
- return 0;
}
static const struct dsic_panel_data taal_data = {
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 386f8321b930..e85d63a176d0 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -250,7 +250,7 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
+static void ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
{
struct ebbg_ft8719 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -260,8 +260,6 @@ static int ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ebbg_ft8719_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index cdb154c8b866..4b39d1dd9140 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -53,7 +53,7 @@ struct panel_delay {
* before the HPD signal is reliable. Ideally this is 0 but some panels,
* board designs, or bad pulldown configs can cause a glitch here.
*
- * NOTE: on some old panel data this number appers to be much too big.
+ * NOTE: on some old panel data this number appears to be much too big.
* Presumably some old panels simply didn't have HPD hooked up and put
* the hpd_absent here because this field predates the
* hpd_absent. While that works, it's non-ideal.
@@ -403,17 +403,10 @@ static int panel_edp_unprepare(struct drm_panel *panel)
static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
{
- int err;
-
p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
- if (IS_ERR(p->hpd_gpio)) {
- err = PTR_ERR(p->hpd_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(p->hpd_gpio))
+ return dev_err_probe(dev, PTR_ERR(p->hpd_gpio),
+ "failed to get 'hpd' GPIO\n");
return 0;
}
@@ -832,12 +825,9 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio)) {
- err = PTR_ERR(panel->enable_gpio);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
@@ -1295,7 +1285,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
},
.delay = {
.hpd_absent = 200,
- .prepare_to_enable = 80,
+ .enable = 80,
+ .disable = 50,
.unprepare = 500,
},
};
@@ -1854,6 +1845,12 @@ static const struct panel_delay delay_100_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1870,6 +1867,9 @@ static const struct panel_delay delay_100_500_e200 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
@@ -1877,10 +1877,19 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "M133NW4J-R3"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index 01dd555a7f26..eee714cf3f49 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -321,7 +321,7 @@ static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int kd35t133_remove(struct mipi_dsi_device *dsi)
+static void kd35t133_remove(struct mipi_dsi_device *dsi)
{
struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -333,8 +333,6 @@ static int kd35t133_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id kd35t133_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index cb0bb3076099..76572c922983 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -486,14 +486,12 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+static void k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
{
struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id k101_im2ba02_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index ee61d60eceae..df493da50afe 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -233,14 +233,12 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int feiyang_dsi_remove(struct mipi_dsi_device *dsi)
+static void feiyang_dsi_remove(struct mipi_dsi_device *dsi)
{
struct feiyang *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id feiyang_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 6826f4d4826a..39dc40cf681f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -32,7 +32,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -576,6 +576,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = ili9341_dbi_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -586,12 +587,12 @@ static const struct drm_display_mode ili9341_dbi_mode = {
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9341_dbi_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops);
static struct drm_driver ili9341_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_dbi_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 596861269774..cbb68caa36f2 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -923,14 +923,12 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return mipi_dsi_attach(dsi);
}
-static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+static void ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
{
struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct ili9881c_desc lhr050h41_desc = {
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index f194b62e290c..9992d0d4c0e5 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -506,7 +506,7 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int innolux_panel_remove(struct mipi_dsi_device *dsi)
+static void innolux_panel_remove(struct mipi_dsi_device *dsi)
{
struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
int err;
@@ -524,8 +524,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
innolux_panel_del(innolux);
-
- return 0;
}
static void innolux_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 31eafbc38ec0..d8765b2294fb 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -288,7 +288,7 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
+static void jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
{
struct jdi_fhd_r63452 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -298,8 +298,6 @@ static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id jdi_fhd_r63452_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 3c86ad262d5e..8f4f137a2af6 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -482,7 +482,7 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int jdi_panel_remove(struct mipi_dsi_device *dsi)
+static void jdi_panel_remove(struct mipi_dsi_device *dsi)
{
struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -497,8 +497,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
ret);
jdi_panel_del(jdi);
-
- return 0;
}
static void jdi_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index a3ec4cbdbf7a..1ab1ebe30882 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -830,7 +830,7 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
return err;
}
-static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
+static void khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
{
struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
int err;
@@ -842,8 +842,6 @@ static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&khadas_ts050->base);
drm_panel_disable(&khadas_ts050->base);
drm_panel_unprepare(&khadas_ts050->base);
-
- return 0;
}
static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index daccb1fd5fda..17f8d80cf2b3 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -415,7 +415,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
+static void kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
int err;
@@ -433,8 +433,6 @@ static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
kingdisplay_panel_del(kingdisplay);
-
- return 0;
}
static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index a5a414920430..5619f186d28c 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -628,7 +628,7 @@ static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+static void ltk050h3146w_remove(struct mipi_dsi_device *dsi)
{
struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -640,8 +640,6 @@ static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ltk050h3146w_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 21e48923836d..39e408c9f762 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -477,7 +477,7 @@ static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
+static void ltk500hd1829_remove(struct mipi_dsi_device *dsi)
{
struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -489,8 +489,6 @@ static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ltk500hd1829_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 31daae1da9c9..772e3b6acece 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -336,7 +336,7 @@ static void mantix_shutdown(struct mipi_dsi_device *dsi)
drm_panel_disable(&ctx->panel);
}
-static int mantix_remove(struct mipi_dsi_device *dsi)
+static void mantix_remove(struct mipi_dsi_device *dsi)
{
struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
@@ -344,8 +344,6 @@ static int mantix_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id mantix_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 40ea41b0a5dd..493c3c23f0d6 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -231,7 +231,7 @@ struct nt35510_config {
* bits 0..2 in the lower nibble controls HCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTH, the boosting
- * amplification for the the step-up circuit.
+ * amplification for the step-up circuit.
* 0 = AVDD + VDDB
* 1 = AVDD - AVEE
* 2 = AVDD - AVEE + VDDB
@@ -966,7 +966,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt35510_remove(struct mipi_dsi_device *dsi)
+static void nt35510_remove(struct mipi_dsi_device *dsi)
{
struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -974,9 +974,10 @@ static int nt35510_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
/* Power off */
ret = nt35510_power_off(nt);
- drm_panel_remove(&nt->panel);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to power off\n");
- return ret;
+ drm_panel_remove(&nt->panel);
}
/*
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 1b6042321ea1..cc7f96d70826 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -523,14 +523,12 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt35560_remove(struct mipi_dsi_device *dsi)
+static void nt35560_remove(struct mipi_dsi_device *dsi)
{
struct nt35560 *nt = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&nt->panel);
-
- return 0;
}
static const struct of_device_id nt35560_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 288c7fa83ecc..3a844917da07 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -620,7 +620,7 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt35950_remove(struct mipi_dsi_device *dsi)
+static void nt35950_remove(struct mipi_dsi_device *dsi)
{
struct nt35950 *nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -639,8 +639,6 @@ static int nt35950_remove(struct mipi_dsi_device *dsi)
}
drm_panel_remove(&nt->panel);
-
- return 0;
}
static const struct nt35950_panel_mode sharp_ls055d1sx04_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 6d6ce42787e2..73bcffa1e0c1 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -669,7 +669,7 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
+static void nt36672a_panel_remove(struct mipi_dsi_device *dsi)
{
struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
int err;
@@ -687,8 +687,6 @@ static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&pinfo->base);
-
- return 0;
}
static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index cb5cb27462df..36a46cb7fe1c 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,7 +288,7 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
return 0;
}
-static int lcd_olinuxino_remove(struct i2c_client *client)
+static void lcd_olinuxino_remove(struct i2c_client *client)
{
struct lcd_olinuxino *panel = i2c_get_clientdata(client);
@@ -296,8 +296,6 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
drm_panel_disable(&panel->panel);
drm_panel_unprepare(&panel->panel);
-
- return 0;
}
static const struct of_device_id lcd_olinuxino_of_ids[] = {
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index dfb43b1374e7..b4729a94c34a 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -497,14 +497,12 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int otm8009a_remove(struct mipi_dsi_device *dsi)
+static void otm8009a_remove(struct mipi_dsi_device *dsi)
{
struct otm8009a *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id orisetech_otm8009a_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index 198493a6eb6a..493e0504f6f7 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -206,7 +206,7 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
+static void osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -221,8 +221,6 @@ static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
-
- return ret;
}
static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3991f5d950af..8ba6d8287938 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -250,7 +250,7 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
+static void wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -264,8 +264,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
wuxga_nt_panel_del(wuxga_nt);
-
- return 0;
}
static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index a6dc5ab182fa..79f852465a84 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -446,7 +446,7 @@ error:
return -ENODEV;
}
-static int rpi_touchscreen_remove(struct i2c_client *i2c)
+static void rpi_touchscreen_remove(struct i2c_client *i2c)
{
struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
@@ -455,8 +455,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
-
- return 0;
}
static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 4e021a572211..dbb1ed4efbed 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -616,7 +616,7 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int rad_panel_remove(struct mipi_dsi_device *dsi)
+static void rad_panel_remove(struct mipi_dsi_device *dsi)
{
struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
struct device *dev = &dsi->dev;
@@ -627,8 +627,6 @@ static int rad_panel_remove(struct mipi_dsi_device *dsi)
dev_err(dev, "Failed to detach from host (%d)\n", ret);
drm_panel_remove(&rad->panel);
-
- return 0;
}
static void rad_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 412c0dbcb2b6..5f9b340588fb 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -412,14 +412,12 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int rm68200_remove(struct mipi_dsi_device *dsi)
+static void rm68200_remove(struct mipi_dsi_device *dsi)
{
struct rm68200 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id raydium_rm68200_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 1fb579a574d9..a8a98c91b13c 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -208,14 +208,12 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
+static void rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
{
struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id rb070d30_panel_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 70560cac53a9..008e2b0d6652 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -212,14 +212,12 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int s6d16d0_remove(struct mipi_dsi_device *dsi)
+static void s6d16d0_remove(struct mipi_dsi_device *dsi)
{
struct s6d16d0 *s6 = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&s6->panel);
-
- return 0;
}
static const struct of_device_id s6d16d0_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 0ab1b7ec84cd..5c621b15e84c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -747,15 +747,13 @@ remove_panel:
return ret;
}
-static int s6e3ha2_remove(struct mipi_dsi_device *dsi)
+static void s6e3ha2_remove(struct mipi_dsi_device *dsi)
{
struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
-
- return 0;
}
static const struct of_device_id s6e3ha2_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index e38262b67ff7..e06fd35de814 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -488,7 +488,7 @@ remove_panel:
return ret;
}
-static int s6e63j0x03_remove(struct mipi_dsi_device *dsi)
+static void s6e63j0x03_remove(struct mipi_dsi_device *dsi)
{
struct s6e63j0x03 *ctx = mipi_dsi_get_drvdata(dsi);
@@ -496,8 +496,6 @@ static int s6e63j0x03_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
-
- return 0;
}
static const struct of_device_id s6e63j0x03_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
index e0f773678168..ed3895e4ca5e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
@@ -113,11 +113,10 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi)
+static void s6e63m0_dsi_remove(struct mipi_dsi_device *dsi)
{
mipi_dsi_detach(dsi);
s6e63m0_remove(&dsi->dev);
- return 0;
}
static const struct of_device_id s6e63m0_dsi_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 29fde3823212..97ff7a18545c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -254,7 +254,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+static void s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
{
struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -264,8 +264,6 @@ static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 9b3599d6d2de..54213beafaf5 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1028,14 +1028,12 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
+static void s6e8aa0_remove(struct mipi_dsi_device *dsi)
{
struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id s6e8aa0_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 1fb37fda4ba9..1a0d24595faa 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -305,7 +305,7 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sofef00_panel_remove(struct mipi_dsi_device *dsi)
+static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
{
struct sofef00_panel *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -315,8 +315,6 @@ static int sofef00_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id sofef00_panel_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index f8cd2a42ed13..14851408a5e1 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -391,7 +391,7 @@ static int sharp_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sharp_panel_remove(struct mipi_dsi_device *dsi)
+static void sharp_panel_remove(struct mipi_dsi_device *dsi)
{
struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
int err;
@@ -399,7 +399,7 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
/* only detach from host for the DSI-LINK2 interface */
if (!sharp) {
mipi_dsi_detach(dsi);
- return 0;
+ return;
}
err = drm_panel_disable(&sharp->base);
@@ -411,8 +411,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
sharp_panel_del(sharp);
-
- return 0;
}
static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 25829a0a8e80..d1ec80a3e3c7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -305,7 +305,7 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
+static void sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -319,8 +319,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
sharp_nt_panel_del(sharp_nt);
-
- return 0;
}
static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index e12570561629..8a4e0c1fe73f 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -298,7 +298,7 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sharp_ls060_remove(struct mipi_dsi_device *dsi)
+static void sharp_ls060_remove(struct mipi_dsi_device *dsi)
{
struct sharp_ls060 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -308,8 +308,6 @@ static int sharp_ls060_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id sharp_ls060t1sx01_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index ff5e1a44c43a..2944228a8e2c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -575,12 +575,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio)) {
- err = PTR_ERR(panel->enable_gpio);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
@@ -696,7 +693,7 @@ free_ddc:
return err;
}
-static int panel_simple_remove(struct device *dev)
+static void panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
@@ -708,8 +705,6 @@ static int panel_simple_remove(struct device *dev)
pm_runtime_disable(dev);
if (panel->ddc)
put_device(&panel->ddc->dev);
-
- return 0;
}
static void panel_simple_shutdown(struct device *dev)
@@ -2257,7 +2252,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -2703,6 +2698,36 @@ static const struct panel_desc multi_inno_mi0700s4t_6 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing multi_inno_mi0800ft_9_timing = {
+ .pixelclock = { 32000000, 40000000, 50000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 6, 26, 45 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 12, 77 },
+ .vback_porch = { 3, 13, 22 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc multi_inno_mi0800ft_9 = {
+ .timings = &multi_inno_mi0800ft_9_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 162,
+ .height = 122,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct display_timing multi_inno_mi1010ait_1cp_timing = {
.pixelclock = { 68900000, 70000000, 73400000 },
.hactive = { 1280, 1280, 1280 },
@@ -3220,6 +3245,37 @@ static const struct panel_desc rocktech_rk101ii01d_ct = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing samsung_ltl101al01_timing = {
+ .pixelclock = { 66663000, 66663000, 66663000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 18, 18 },
+ .hback_porch = { 36, 36, 36 },
+ .hsync_len = { 16, 16, 16 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 16, 16, 16 },
+ .vsync_len = { 3, 3, 3 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc samsung_ltl101al01 = {
+ .timings = &samsung_ltl101al01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 135,
+ },
+ .delay = {
+ .prepare = 40,
+ .enable = 300,
+ .disable = 200,
+ .unprepare = 600,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -4104,6 +4160,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
+ .compatible = "multi-inno,mi0800ft-9",
+ .data = &multi_inno_mi0800ft_9,
+ }, {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
@@ -4164,6 +4223,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk101ii01d-ct",
.data = &rocktech_rk101ii01d_ct,
}, {
+ .compatible = "samsung,ltl101al01",
+ .data = &samsung_ltl101al01,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
@@ -4273,7 +4335,9 @@ static int panel_simple_platform_probe(struct platform_device *pdev)
static int panel_simple_platform_remove(struct platform_device *pdev)
{
- return panel_simple_remove(&pdev->dev);
+ panel_simple_remove(&pdev->dev);
+
+ return 0;
}
static void panel_simple_platform_shutdown(struct platform_device *pdev)
@@ -4566,7 +4630,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
return err;
}
-static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+static void panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
{
int err;
@@ -4574,7 +4638,7 @@ static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
- return panel_simple_remove(&dsi->dev);
+ panel_simple_remove(&dsi->dev);
}
static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 320a2a8fd459..c481daa4bbce 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -8,6 +8,7 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -41,59 +42,101 @@
/*
* Command2 with BK function selection.
*
- * BIT[4, 0]: [CN2, BKXSEL]
- * 10 = CMD2BK0, Command2 BK0
- * 11 = CMD2BK1, Command2 BK1
- * 00 = Command2 disable
+ * BIT[4].....CN2
+ * BIT[1:0]...BKXSEL
+ * 1:00 = CMD2BK0, Command2 BK0
+ * 1:01 = CMD2BK1, Command2 BK1
+ * 1:11 = CMD2BK3, Command2 BK3
+ * 0:00 = Command2 disable
*/
-#define DSI_CMD2BK1_SEL 0x11
#define DSI_CMD2BK0_SEL 0x10
+#define DSI_CMD2BK1_SEL 0x11
+#define DSI_CMD2BK3_SEL 0x13
#define DSI_CMD2BKX_SEL_NONE 0x00
/* Command2, BK0 bytes */
-#define DSI_LINESET_LINE 0x69
-#define DSI_LINESET_LDE_EN BIT(7)
-#define DSI_LINESET_LINEDELTA GENMASK(1, 0)
-#define DSI_CMD2_BK0_LNESET_B1 DSI_LINESET_LINEDELTA
-#define DSI_CMD2_BK0_LNESET_B0 (DSI_LINESET_LDE_EN | DSI_LINESET_LINE)
-#define DSI_INVSEL_DEFAULT GENMASK(5, 4)
-#define DSI_INVSEL_NLINV GENMASK(2, 0)
-#define DSI_INVSEL_RTNI GENMASK(2, 1)
-#define DSI_CMD2_BK0_INVSEL_B1 DSI_INVSEL_RTNI
-#define DSI_CMD2_BK0_INVSEL_B0 (DSI_INVSEL_DEFAULT | DSI_INVSEL_NLINV)
-#define DSI_CMD2_BK0_PORCTRL_B0(m) ((m)->vtotal - (m)->vsync_end)
-#define DSI_CMD2_BK0_PORCTRL_B1(m) ((m)->vsync_start - (m)->vdisplay)
+#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
+#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
+#define DSI_CMD2_BK0_LNESET_LDE_EN BIT(7)
+#define DSI_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
+#define DSI_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
+#define DSI_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
+#define DSI_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
/* Command2, BK1 bytes */
-#define DSI_CMD2_BK1_VRHA_SET 0x45
-#define DSI_CMD2_BK1_VCOM_SET 0x13
-#define DSI_CMD2_BK1_VGHSS_SET GENMASK(2, 0)
+#define DSI_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
#define DSI_CMD2_BK1_TESTCMD_VAL BIT(7)
-#define DSI_VGLS_DEFAULT BIT(6)
-#define DSI_VGLS_SEL GENMASK(2, 0)
-#define DSI_CMD2_BK1_VGLS_SET (DSI_VGLS_DEFAULT | DSI_VGLS_SEL)
-#define DSI_PWCTLR1_AP BIT(7) /* Gamma OP bias, max */
-#define DSI_PWCTLR1_APIS BIT(2) /* Source OP input bias, min */
-#define DSI_PWCTLR1_APOS BIT(0) /* Source OP output bias, min */
-#define DSI_CMD2_BK1_PWCTLR1_SET (DSI_PWCTLR1_AP | DSI_PWCTLR1_APIS | \
- DSI_PWCTLR1_APOS)
-#define DSI_PWCTLR2_AVDD BIT(5) /* AVDD 6.6v */
-#define DSI_PWCTLR2_AVCL 0x0 /* AVCL -4.4v */
-#define DSI_CMD2_BK1_PWCTLR2_SET (DSI_PWCTLR2_AVDD | DSI_PWCTLR2_AVCL)
-#define DSI_SPD1_T2D BIT(3)
-#define DSI_CMD2_BK1_SPD1_SET (GENMASK(6, 4) | DSI_SPD1_T2D)
-#define DSI_CMD2_BK1_SPD2_SET DSI_CMD2_BK1_SPD1_SET
-#define DSI_MIPISET1_EOT_EN BIT(3)
-#define DSI_CMD2_BK1_MIPISET1_SET (BIT(7) | DSI_MIPISET1_EOT_EN)
+#define DSI_CMD2_BK1_VGLS_ONES BIT(6)
+#define DSI_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
+#define DSI_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
+#define DSI_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
+#define DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
+#define DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
+#define DSI_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
+#define DSI_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
+#define DSI_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK1_MIPISET1_ONES BIT(7)
+#define DSI_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
+
+#define CFIELD_PREP(_mask, _val) \
+ (((typeof(_mask))(_val) << (__builtin_ffsll(_mask) - 1)) & (_mask))
+
+enum op_bias {
+ OP_BIAS_OFF = 0,
+ OP_BIAS_MIN,
+ OP_BIAS_MIDDLE,
+ OP_BIAS_MAX
+};
+
+struct st7701;
struct st7701_panel_desc {
const struct drm_display_mode *mode;
unsigned int lanes;
- unsigned long flags;
enum mipi_dsi_pixel_format format;
- const char *const *supply_names;
- unsigned int num_supplies;
unsigned int panel_sleep_delay;
+
+ /* TFT matrix driver configuration, panel specific. */
+ const u8 pv_gamma[16]; /* Positive voltage gamma control */
+ const u8 nv_gamma[16]; /* Negative voltage gamma control */
+ const u8 nlinv; /* Inversion selection */
+ const u32 vop_uv; /* Vop in uV */
+ const u32 vcom_uv; /* Vcom in uV */
+ const u16 vgh_mv; /* Vgh in mV */
+ const s16 vgl_mv; /* Vgl in mV */
+ const u16 avdd_mv; /* Avdd in mV */
+ const s16 avcl_mv; /* Avcl in mV */
+ const enum op_bias gamma_op_bias;
+ const enum op_bias input_op_bias;
+ const enum op_bias output_op_bias;
+ const u16 t2d_ns; /* T2D in ns */
+ const u16 t3d_ns; /* T3D in ns */
+ const bool eot_en;
+
+ /* GIP sequence, fully custom and undocumented. */
+ void (*gip_sequence)(struct st7701 *st7701);
};
struct st7701 {
@@ -101,7 +144,7 @@ struct st7701 {
struct mipi_dsi_device *dsi;
const struct st7701_panel_desc *desc;
- struct regulator_bulk_data *supplies;
+ struct regulator_bulk_data supplies[2];
struct gpio_desc *reset;
unsigned int sleep_delay;
};
@@ -123,9 +166,37 @@ static inline int st7701_dsi_write(struct st7701 *st7701, const void *seq,
st7701_dsi_write(st7701, d, ARRAY_SIZE(d)); \
}
+static u8 st7701_vgls_map(struct st7701 *st7701)
+{
+ const struct st7701_panel_desc *desc = st7701->desc;
+ struct {
+ s32 vgl;
+ u8 val;
+ } map[16] = {
+ { -7060, 0x0 }, { -7470, 0x1 },
+ { -7910, 0x2 }, { -8140, 0x3 },
+ { -8650, 0x4 }, { -8920, 0x5 },
+ { -9210, 0x6 }, { -9510, 0x7 },
+ { -9830, 0x8 }, { -10170, 0x9 },
+ { -10530, 0xa }, { -10910, 0xb },
+ { -11310, 0xc }, { -11730, 0xd },
+ { -12200, 0xe }, { -12690, 0xf }
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (desc->vgl_mv == map[i].vgl)
+ return map[i].val;
+
+ return 0;
+}
+
static void st7701_init_sequence(struct st7701 *st7701)
{
- const struct drm_display_mode *mode = st7701->desc->mode;
+ const struct st7701_panel_desc *desc = st7701->desc;
+ const struct drm_display_mode *mode = desc->mode;
+ const u8 linecount8 = mode->vdisplay / 8;
+ const u8 linecountrem2 = (mode->vdisplay % 8) / 2;
ST7701_DSI(st7701, MIPI_DCS_SOFT_RESET, 0x00);
@@ -139,34 +210,105 @@ static void st7701_init_sequence(struct st7701 *st7701)
/* Command2, BK0 */
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK0_SEL);
- ST7701_DSI(st7701, DSI_CMD2_BK0_PVGAMCTRL, 0x00, 0x0E, 0x15, 0x0F,
- 0x11, 0x08, 0x08, 0x08, 0x08, 0x23, 0x04, 0x13, 0x12,
- 0x2B, 0x34, 0x1F);
- ST7701_DSI(st7701, DSI_CMD2_BK0_NVGAMCTRL, 0x00, 0x0E, 0x95, 0x0F,
- 0x13, 0x07, 0x09, 0x08, 0x08, 0x22, 0x04, 0x10, 0x0E,
- 0x2C, 0x34, 0x1F);
+ mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
+ desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
+ mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
+ desc->nv_gamma, ARRAY_SIZE(desc->nv_gamma));
+ /*
+ * Vertical line count configuration:
+ * Line[6:0]: select number of vertical lines of the TFT matrix in
+ * multiples of 8 lines
+ * LDE_EN: enable sub-8-line granularity line count
+ * Line_delta[1:0]: add 0/2/4/6 extra lines to line count selected
+ * using Line[6:0]
+ *
+ * Total number of vertical lines:
+ * LN = ((Line[6:0] + 1) * 8) + (LDE_EN ? Line_delta[1:0] * 2 : 0)
+ */
ST7701_DSI(st7701, DSI_CMD2_BK0_LNESET,
- DSI_CMD2_BK0_LNESET_B0, DSI_CMD2_BK0_LNESET_B1);
+ FIELD_PREP(DSI_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
+ (linecountrem2 ? DSI_CMD2_BK0_LNESET_LDE_EN : 0),
+ FIELD_PREP(DSI_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
ST7701_DSI(st7701, DSI_CMD2_BK0_PORCTRL,
- DSI_CMD2_BK0_PORCTRL_B0(mode),
- DSI_CMD2_BK0_PORCTRL_B1(mode));
+ FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VBP_MASK,
+ mode->vtotal - mode->vsync_end),
+ FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VFP_MASK,
+ mode->vsync_start - mode->vdisplay));
+ /*
+ * Horizontal pixel count configuration:
+ * PCLK = 512 + (RTNI[4:0] * 16)
+ * The PCLK is number of pixel clock per line, which matches
+ * mode htotal. The minimum is 512 PCLK.
+ */
ST7701_DSI(st7701, DSI_CMD2_BK0_INVSEL,
- DSI_CMD2_BK0_INVSEL_B0, DSI_CMD2_BK0_INVSEL_B1);
+ DSI_CMD2_BK0_INVSEL_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
+ FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
+ DIV_ROUND_UP(mode->htotal, 16)));
/* Command2, BK1 */
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK1_SEL);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS, DSI_CMD2_BK1_VRHA_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM, DSI_CMD2_BK1_VCOM_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS, DSI_CMD2_BK1_VGHSS_SET);
+
+ /* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
+ FIELD_PREP(DSI_CMD2_BK1_VRHA_MASK,
+ DIV_ROUND_CLOSEST(desc->vop_uv - 3537500, 12500)));
+
+ /* Vcom = 0.1V + (VCOM[7:0] * 0.0125V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM,
+ FIELD_PREP(DSI_CMD2_BK1_VCOM_MASK,
+ DIV_ROUND_CLOSEST(desc->vcom_uv - 100000, 12500)));
+
+ /* Vgh = 11.5V + (VGHSS[7:0] * 0.5V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS,
+ FIELD_PREP(DSI_CMD2_BK1_VGHSS_MASK,
+ DIV_ROUND_CLOSEST(clamp(desc->vgh_mv,
+ (u16)11500,
+ (u16)17000) - 11500,
+ 500)));
+
ST7701_DSI(st7701, DSI_CMD2_BK1_TESTCMD, DSI_CMD2_BK1_TESTCMD_VAL);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS, DSI_CMD2_BK1_VGLS_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1, DSI_CMD2_BK1_PWCTLR1_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2, DSI_CMD2_BK1_PWCTLR2_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1, DSI_CMD2_BK1_SPD1_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2, DSI_CMD2_BK1_SPD2_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1, DSI_CMD2_BK1_MIPISET1_SET);
+ /* Vgl is non-linear */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS,
+ DSI_CMD2_BK1_VGLS_ONES |
+ FIELD_PREP(DSI_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
+
+ ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1,
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_AP_MASK,
+ desc->gamma_op_bias) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APIS_MASK,
+ desc->input_op_bias) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APOS_MASK,
+ desc->output_op_bias));
+
+ /* Avdd = 6.2V + (AVDD[1:0] * 0.2V) , Avcl = -4.4V - (AVCL[1:0] * 0.2V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2,
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
+ DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
+ DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
+
+ /* T2D = 0.2us * T2D[3:0] */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
+ DSI_CMD2_BK1_SPD1_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK1_SPD1_T2D_MASK,
+ DIV_ROUND_CLOSEST(desc->t2d_ns, 200)));
+
+ /* T3D = 4us + (0.8us * T3D[3:0]) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2,
+ DSI_CMD2_BK1_SPD2_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK1_SPD2_T3D_MASK,
+ DIV_ROUND_CLOSEST(desc->t3d_ns - 4000, 800)));
+
+ ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1,
+ DSI_CMD2_BK1_MIPISET1_ONES |
+ (desc->eot_en ? DSI_CMD2_BK1_MIPISET1_EOT_EN : 0));
+}
+
+static void ts8550b_gip_sequence(struct st7701 *st7701)
+{
/**
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
@@ -188,10 +330,78 @@ static void st7701_init_sequence(struct st7701 *st7701)
ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
ST7701_DSI(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xF3, 0x27, 0x65, 0x40, 0x1F, 0xFF);
+}
+
+static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
+{
+ ST7701_DSI(st7701, 0xEE, 0x42);
+ ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+
+ ST7701_DSI(st7701, 0xE1,
+ 0x04, 0xA0, 0x06, 0xA0,
+ 0x05, 0xA0, 0x07, 0xA0,
+ 0x00, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE2,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE3,
+ 0x00, 0x00, 0x22, 0x22);
+ ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE5,
+ 0x0C, 0x90, 0xA0, 0xA0,
+ 0x0E, 0x92, 0xA0, 0xA0,
+ 0x08, 0x8C, 0xA0, 0xA0,
+ 0x0A, 0x8E, 0xA0, 0xA0);
+ ST7701_DSI(st7701, 0xE6,
+ 0x00, 0x00, 0x22, 0x22);
+ ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE8,
+ 0x0D, 0x91, 0xA0, 0xA0,
+ 0x0F, 0x93, 0xA0, 0xA0,
+ 0x09, 0x8D, 0xA0, 0xA0,
+ 0x0B, 0x8F, 0xA0, 0xA0);
+ ST7701_DSI(st7701, 0xEB,
+ 0x00, 0x00, 0xE4, 0xE4,
+ 0x44, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xED,
+ 0xFF, 0xF5, 0x47, 0x6F,
+ 0x0B, 0xA1, 0xAB, 0xFF,
+ 0xFF, 0xBA, 0x1A, 0xB0,
+ 0xF6, 0x74, 0x5F, 0xFF);
+ ST7701_DSI(st7701, 0xEF,
+ 0x08, 0x08, 0x08, 0x40,
+ 0x3F, 0x64);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
+ ST7701_DSI(st7701, 0xE6, 0x7C);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ ST7701_DSI(st7701, 0x11);
+ msleep(120);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
+ msleep(10);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ ST7701_DSI(st7701, 0x11);
+ msleep(120);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
- /* disable Command2 */
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+
+ ST7701_DSI(st7701, 0x3A, 0x70);
}
static int st7701_prepare(struct drm_panel *panel)
@@ -201,7 +411,7 @@ static int st7701_prepare(struct drm_panel *panel)
gpiod_set_value(st7701->reset, 0);
- ret = regulator_bulk_enable(st7701->desc->num_supplies,
+ ret = regulator_bulk_enable(ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
@@ -212,6 +422,13 @@ static int st7701_prepare(struct drm_panel *panel)
st7701_init_sequence(st7701);
+ if (st7701->desc->gip_sequence)
+ st7701->desc->gip_sequence(st7701);
+
+ /* Disable Command2 */
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+
return 0;
}
@@ -254,7 +471,7 @@ static int st7701_unprepare(struct drm_panel *panel)
*/
msleep(st7701->sleep_delay);
- regulator_bulk_disable(st7701->desc->num_supplies, st7701->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(st7701->supplies), st7701->supplies);
return 0;
}
@@ -310,46 +527,207 @@ static const struct drm_display_mode ts8550b_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
-static const char * const ts8550b_supply_names[] = {
- "VCC",
- "IOVCC",
-};
-
static const struct st7701_panel_desc ts8550b_desc = {
.mode = &ts8550b_mode,
.lanes = 2,
- .flags = MIPI_DSI_MODE_VIDEO,
.format = MIPI_DSI_FMT_RGB888,
- .supply_names = ts8550b_supply_names,
- .num_supplies = ARRAY_SIZE(ts8550b_supply_names),
.panel_sleep_delay = 80, /* panel need extra 80ms for sleep out cmd */
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nlinv = 7,
+ .vop_uv = 4400000,
+ .vcom_uv = 337500,
+ .vgh_mv = 15000,
+ .vgl_mv = -9510,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MAX,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = ts8550b_gip_sequence,
+};
+
+static const struct drm_display_mode dmt028vghmcmi_1a_mode = {
+ .clock = 22325,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 4,
+ .htotal = 480 + 40 + 4 + 20,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 2,
+ .vsync_end = 640 + 2 + 40,
+ .vtotal = 640 + 2 + 40 + 16,
+
+ .width_mm = 56,
+ .height_mm = 78,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc dmt028vghmcmi_1a_desc = {
+ .mode = &dmt028vghmcmi_1a_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 5, /* panel need extra 5ms for sleep out cmd */
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nlinv = 1,
+ .vop_uv = 4800000,
+ .vcom_uv = 1650000,
+ .vgh_mv = 15000,
+ .vgl_mv = -10170,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = dmt028vghmcmi_1a_gip_sequence,
};
static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct st7701_panel_desc *desc;
struct st7701 *st7701;
- int ret, i;
+ int ret;
st7701 = devm_kzalloc(&dsi->dev, sizeof(*st7701), GFP_KERNEL);
if (!st7701)
return -ENOMEM;
desc = of_device_get_match_data(&dsi->dev);
- dsi->mode_flags = desc->flags;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- st7701->supplies = devm_kcalloc(&dsi->dev, desc->num_supplies,
- sizeof(*st7701->supplies),
- GFP_KERNEL);
- if (!st7701->supplies)
- return -ENOMEM;
-
- for (i = 0; i < desc->num_supplies; i++)
- st7701->supplies[i].supply = desc->supply_names[i];
+ st7701->supplies[0].supply = "VCC";
+ st7701->supplies[1].supply = "IOVCC";
- ret = devm_regulator_bulk_get(&dsi->dev, desc->num_supplies,
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
@@ -387,17 +765,16 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
return mipi_dsi_attach(dsi);
}
-static int st7701_dsi_remove(struct mipi_dsi_device *dsi)
+static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
{
struct st7701 *st7701 = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&st7701->panel);
-
- return 0;
}
static const struct of_device_id st7701_of_match[] = {
+ { .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 73f69c929a75..86a472b01360 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -598,7 +598,7 @@ static void st7703_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int st7703_remove(struct mipi_dsi_device *dsi)
+static void st7703_remove(struct mipi_dsi_device *dsi)
{
struct st7703 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -612,8 +612,6 @@ static int st7703_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
st7703_debugfs_remove(ctx);
-
- return 0;
}
static const struct of_device_id st7703_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index 69f07b15fca4..fa9be3c299c0 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -517,7 +517,7 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int truly_nt35521_remove(struct mipi_dsi_device *dsi)
+static void truly_nt35521_remove(struct mipi_dsi_device *dsi)
{
struct truly_nt35521 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -527,8 +527,6 @@ static int truly_nt35521_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id truly_nt35521_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
index 820731be7147..d8487bc6d611 100644
--- a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
+++ b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
@@ -210,7 +210,7 @@ static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi)
return mipi_dsi_attach(dsi);
}
-static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
+static void tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
{
struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
int err;
@@ -222,8 +222,6 @@ static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&tdo_tl070wsh30->base);
drm_panel_disable(&tdo_tl070wsh30->base);
drm_panel_unprepare(&tdo_tl070wsh30->base);
-
- return 0;
}
static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 9ca5c7ff41d6..b31cffb660a7 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -616,7 +616,7 @@ err_panel_add:
return ret;
}
-static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
+static void truly_nt35597_remove(struct mipi_dsi_device *dsi)
{
struct truly_nt35597 *ctx = mipi_dsi_get_drvdata(dsi);
@@ -628,7 +628,6 @@ static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
}
drm_panel_remove(&ctx->panel);
- return 0;
}
static const struct of_device_id truly_nt35597_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index db2443ac81d3..ec228c269146 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -256,7 +256,7 @@ err_dsi_attach:
return ret;
}
-static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
{
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
@@ -264,7 +264,6 @@ static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel);
- return 0;
}
static const struct of_device_id visionox_rm69299_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 8177f5a360fb..2c54733ee241 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -339,7 +339,7 @@ static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int xpp055c272_remove(struct mipi_dsi_device *dsi)
+static void xpp055c272_remove(struct mipi_dsi_device *dsi)
{
struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -351,8 +351,6 @@ static int xpp055c272_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id xpp055c272_of_match[] = {
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
index 86cdc0ce79e6..079600328be1 100644
--- a/drivers/gpu/drm/panfrost/Kconfig
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -11,6 +11,7 @@ config DRM_PANFROST
select DRM_GEM_SHMEM_HELPER
select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select WANT_DEV_COREDUMP
help
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
Bifrost (G3x, G5x, G7x) GPUs.
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index b71935862417..7da2b3f02ed9 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -9,6 +9,7 @@ panfrost-y := \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o \
- panfrost_perfcnt.o
+ panfrost_perfcnt.o \
+ panfrost_dump.o
obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5110cd9b2425..fe5f12f16a63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -131,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
dev_pm_opp_put(opp);
/*
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
new file mode 100644
index 000000000000..89056a1aac7d
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021 Collabora ltd. */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/devcoredump.h>
+#include <linux/moduleparam.h>
+#include <linux/iosys-map.h>
+#include <drm/panfrost_drm.h>
+#include <drm/drm_device.h>
+
+#include "panfrost_job.h"
+#include "panfrost_gem.h"
+#include "panfrost_regs.h"
+#include "panfrost_dump.h"
+#include "panfrost_device.h"
+
+static bool panfrost_dump_core = true;
+module_param_named(dump_core, panfrost_dump_core, bool, 0600);
+
+struct panfrost_dump_iterator {
+ void *start;
+ struct panfrost_dump_object_header *hdr;
+ void *data;
+};
+
+static const unsigned short panfrost_dump_registers[] = {
+ SHADER_READY_LO,
+ SHADER_READY_HI,
+ TILER_READY_LO,
+ TILER_READY_HI,
+ L2_READY_LO,
+ L2_READY_HI,
+ JOB_INT_MASK,
+ JOB_INT_STAT,
+ JS_HEAD_LO(0),
+ JS_HEAD_HI(0),
+ JS_TAIL_LO(0),
+ JS_TAIL_HI(0),
+ JS_AFFINITY_LO(0),
+ JS_AFFINITY_HI(0),
+ JS_CONFIG(0),
+ JS_STATUS(0),
+ JS_HEAD_NEXT_LO(0),
+ JS_HEAD_NEXT_HI(0),
+ JS_AFFINITY_NEXT_LO(0),
+ JS_AFFINITY_NEXT_HI(0),
+ JS_CONFIG_NEXT(0),
+ MMU_INT_MASK,
+ MMU_INT_STAT,
+ AS_TRANSTAB_LO(0),
+ AS_TRANSTAB_HI(0),
+ AS_MEMATTR_LO(0),
+ AS_MEMATTR_HI(0),
+ AS_FAULTSTATUS(0),
+ AS_FAULTADDRESS_LO(0),
+ AS_FAULTADDRESS_HI(0),
+ AS_STATUS(0),
+};
+
+static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
+ u32 type, void *data_end)
+{
+ struct panfrost_dump_object_header *hdr = iter->hdr;
+
+ hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC);
+ hdr->type = cpu_to_le32(type);
+ hdr->file_offset = cpu_to_le32(iter->data - iter->start);
+ hdr->file_size = cpu_to_le32(data_end - iter->data);
+
+ iter->hdr++;
+ iter->data += le32_to_cpu(hdr->file_size);
+}
+
+static void
+panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
+ struct panfrost_device *pfdev,
+ u32 as_nr, int slot)
+{
+ struct panfrost_dump_registers *dumpreg = iter->data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(panfrost_dump_registers); i++, dumpreg++) {
+ unsigned int js_as_offset = 0;
+ unsigned int reg;
+
+ if (panfrost_dump_registers[i] >= JS_BASE &&
+ panfrost_dump_registers[i] <= JS_BASE + JS_SLOT_STRIDE)
+ js_as_offset = slot * JS_SLOT_STRIDE;
+ else if (panfrost_dump_registers[i] >= MMU_BASE &&
+ panfrost_dump_registers[i] <= MMU_BASE + MMU_AS_STRIDE)
+ js_as_offset = (as_nr << MMU_AS_SHIFT);
+
+ reg = panfrost_dump_registers[i] + js_as_offset;
+
+ dumpreg->reg = cpu_to_le32(reg);
+ dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg));
+ }
+
+ panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
+}
+
+void panfrost_core_dump(struct panfrost_job *job)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ struct panfrost_dump_iterator iter;
+ struct drm_gem_object *dbo;
+ unsigned int n_obj, n_bomap_pages;
+ __le64 *bomap, *bomap_start;
+ size_t file_size;
+ u32 as_nr;
+ int slot;
+ int ret, i;
+
+ as_nr = job->mmu->as;
+ slot = panfrost_job_get_slot(job);
+
+ /* Only catch the first event, or when manually re-armed */
+ if (!panfrost_dump_core)
+ return;
+ panfrost_dump_core = false;
+
+ /* At least, we dump registers and end marker */
+ n_obj = 2;
+ n_bomap_pages = 0;
+ file_size = ARRAY_SIZE(panfrost_dump_registers) *
+ sizeof(struct panfrost_dump_registers);
+
+ /* Add in the active buffer objects */
+ for (i = 0; i < job->bo_count; i++) {
+ /*
+ * Even though the CPU could be configured to use 16K or 64K pages, this
+ * is a very unusual situation for most kernel setups on SoCs that have
+ * a Panfrost device. Also many places across the driver make the somewhat
+ * arbitrary assumption that Panfrost's MMU page size is the same as the CPU's,
+ * so let's have a sanity check to ensure that's always the case
+ */
+ dbo = job->bos[i];
+ WARN_ON(!IS_ALIGNED(dbo->size, PAGE_SIZE));
+
+ file_size += dbo->size;
+ n_bomap_pages += dbo->size >> PAGE_SHIFT;
+ n_obj++;
+ }
+
+ /* If we have any buffer objects, add a bomap object */
+ if (n_bomap_pages) {
+ file_size += n_bomap_pages * sizeof(*bomap);
+ n_obj++;
+ }
+
+ /* Add the size of the headers */
+ file_size += sizeof(*iter.hdr) * n_obj;
+
+ /*
+ * Allocate the file in vmalloc memory, it's likely to be big.
+ * The reason behind these GFP flags is that we don't want to trigger the
+ * OOM killer in the event that not enough memory could be found for our
+ * dump file. We also don't want the allocator to do any error reporting,
+ * as the right behaviour is failing gracefully if a big enough buffer
+ * could not be allocated.
+ */
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
+ if (!iter.start) {
+ dev_warn(pfdev->dev, "failed to allocate devcoredump file\n");
+ return;
+ }
+
+ /* Point the data member after the headers */
+ iter.hdr = iter.start;
+ iter.data = &iter.hdr[n_obj];
+
+ memset(iter.hdr, 0, iter.data - iter.start);
+
+ /*
+ * For now, we write the job identifier in the register dump header,
+ * so that we can decode the entire dump later with pandecode
+ */
+ iter.hdr->reghdr.jc = cpu_to_le64(job->jc);
+ iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR);
+ iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR);
+ iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id);
+ iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count);
+
+ panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
+
+ /* Reserve space for the bomap */
+ if (job->bo_count) {
+ bomap_start = bomap = iter.data;
+ memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
+ panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BOMAP,
+ bomap + n_bomap_pages);
+ }
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct iosys_map map;
+ struct panfrost_gem_mapping *mapping;
+ struct panfrost_gem_object *bo;
+ struct sg_page_iter page_iter;
+ void *vaddr;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = job->mappings[i];
+
+ if (!bo->base.sgt) {
+ dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
+ iter.hdr->bomap.valid = 0;
+ goto dump_header;
+ }
+
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+ if (ret) {
+ dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
+ iter.hdr->bomap.valid = 0;
+ goto dump_header;
+ }
+
+ WARN_ON(!mapping->active);
+
+ iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start));
+
+ for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
+ struct page *page = sg_page_iter_page(&page_iter);
+
+ if (!IS_ERR(page)) {
+ *bomap++ = cpu_to_le64(page_to_phys(page));
+ } else {
+ dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
+ *bomap++ = ~cpu_to_le64(0);
+ }
+ }
+
+ iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT);
+
+ vaddr = map.vaddr;
+ memcpy(iter.data, vaddr, bo->base.base.size);
+
+ drm_gem_shmem_vunmap(&bo->base, &map);
+
+ iter.hdr->bomap.valid = cpu_to_le32(1);
+
+dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
+ bo->base.base.size);
+ }
+ panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
+
+ dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.h b/drivers/gpu/drm/panfrost/panfrost_dump.h
new file mode 100644
index 000000000000..7d9bcefa5346
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Collabora ltd.
+ */
+
+#ifndef PANFROST_DUMP_H
+#define PANFROST_DUMP_H
+
+struct panfrost_job;
+void panfrost_core_dump(struct panfrost_job *job);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 7c4208476fbd..dbc597ab46fb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -20,6 +20,7 @@
#include "panfrost_regs.h"
#include "panfrost_gpu.h"
#include "panfrost_mmu.h"
+#include "panfrost_dump.h"
#define JOB_TIMEOUT_MS 500
@@ -727,6 +728,8 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
+ panfrost_core_dump(job);
+
atomic_set(&pfdev->reset.pending, 1);
panfrost_reset(pfdev, sched_job);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b285a8001b1d..e246d914e7f6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -248,11 +248,15 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
mmu_write(pfdev, MMU_INT_MASK, ~0);
}
-static size_t get_pgsize(u64 addr, size_t size)
+static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{
- if (addr & (SZ_2M - 1) || size < SZ_2M)
- return SZ_4K;
+ size_t blk_offset = -addr % SZ_2M;
+ if (blk_offset || size < SZ_2M) {
+ *count = min_not_zero(blk_offset, size) / SZ_4K;
+ return SZ_4K;
+ }
+ *count = size / SZ_2M;
return SZ_2M;
}
@@ -287,12 +291,16 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) {
- size_t pgsize = get_pgsize(iova | paddr, len);
-
- ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
- iova += pgsize;
- paddr += pgsize;
- len -= pgsize;
+ size_t pgcount, mapped = 0;
+ size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
+
+ ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ GFP_KERNEL, &mapped);
+ /* Don't get stuck if things have gone wrong */
+ mapped = max(mapped, pgsize);
+ iova += mapped;
+ paddr += mapped;
+ len -= mapped;
}
}
@@ -344,15 +352,17 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
mapping->mmu->as, iova, len);
while (unmapped_len < len) {
- size_t unmapped_page;
- size_t pgsize = get_pgsize(iova, len - unmapped_len);
-
- if (ops->iova_to_phys(ops, iova)) {
- unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
- WARN_ON(unmapped_page != pgsize);
+ size_t unmapped_page, pgcount;
+ size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
+
+ if (bo->is_heap)
+ pgcount = 1;
+ if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
+ unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
+ WARN_ON(unmapped_page != pgsize * pgcount);
}
- iova += pgsize;
- unmapped_len += pgsize;
+ iova += pgsize * pgcount;
+ unmapped_len += pgsize * pgcount;
}
panfrost_mmu_flush_range(pfdev, mapping->mmu,
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index accb4fa3adb8..919f44ac853d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -226,23 +226,25 @@
#define JOB_INT_MASK_DONE(j) BIT(j)
#define JS_BASE 0x1800
-#define JS_HEAD_LO(n) (JS_BASE + ((n) * 0x80) + 0x00)
-#define JS_HEAD_HI(n) (JS_BASE + ((n) * 0x80) + 0x04)
-#define JS_TAIL_LO(n) (JS_BASE + ((n) * 0x80) + 0x08)
-#define JS_TAIL_HI(n) (JS_BASE + ((n) * 0x80) + 0x0c)
-#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * 0x80) + 0x10)
-#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * 0x80) + 0x14)
-#define JS_CONFIG(n) (JS_BASE + ((n) * 0x80) + 0x18)
-#define JS_XAFFINITY(n) (JS_BASE + ((n) * 0x80) + 0x1c)
-#define JS_COMMAND(n) (JS_BASE + ((n) * 0x80) + 0x20)
-#define JS_STATUS(n) (JS_BASE + ((n) * 0x80) + 0x24)
-#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x40)
-#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x44)
-#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x50)
-#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x54)
-#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x58)
-#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x60)
-#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x70)
+#define JS_SLOT_STRIDE 0x80
+
+#define JS_HEAD_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x00)
+#define JS_HEAD_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x04)
+#define JS_TAIL_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x08)
+#define JS_TAIL_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x0c)
+#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x10)
+#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x14)
+#define JS_CONFIG(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x18)
+#define JS_XAFFINITY(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x1c)
+#define JS_COMMAND(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x20)
+#define JS_STATUS(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x24)
+#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x40)
+#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x44)
+#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x50)
+#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x54)
+#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x58)
+#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x60)
+#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x70)
/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
#define JS_CONFIG_START_FLUSH_CLEAN BIT(8)
@@ -281,7 +283,9 @@
#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
flush all L2 caches then issue a flush region command to all MMUs */
-#define MMU_AS(as) (0x2400 + ((as) << 6))
+#define MMU_BASE 0x2400
+#define MMU_AS_SHIFT 0x06
+#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x00) /* (RW) Translation Table Base Address for address space n, low word */
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
@@ -300,6 +304,8 @@
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
+#define MMU_AS_STRIDE (1 << MMU_AS_SHIFT)
+
/*
* Begin LPAE MMU TRANSTAB register values
*/
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 91ee05b01303..ad24cdf1d992 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -6,7 +6,7 @@ config DRM_PL111
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 6263346f24c6..6afdf260a4e2 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -15,11 +15,11 @@
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "pl111_drm.h"
@@ -94,7 +94,7 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe,
return -EINVAL;
if (fb) {
- u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3)
@@ -398,7 +398,7 @@ static void pl111_display_update(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
- u32 addr = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 addr = drm_fb_dma_get_gem_addr(fb, pstate, 0);
writel(addr, priv->regs + CLCD_UBAS);
}
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 19a4324bd356..eb25eedb5ee0 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -48,10 +48,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -208,10 +207,10 @@ pl111_gem_import_sg_table(struct drm_device *dev,
if (priv->use_device_memory)
return ERR_PTR(-EINVAL);
- return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver pl111_drm_driver = {
.driver_features =
@@ -224,7 +223,7 @@ static const struct drm_driver pl111_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = drm_gem_dma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index efb01a554574..1b436b75fd39 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -404,6 +404,7 @@ static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
if (of_device_is_compatible(child, "arm,pl111")) {
has_coretile_clcd = true;
ct_clcd = child;
+ of_node_put(child);
break;
}
if (of_device_is_compatible(child, "arm,hdlcd")) {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 7b00c955cd82..63aa96a69752 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -53,17 +53,11 @@ void qxl_ring_free(struct qxl_ring *ring)
kfree(ring);
}
-void qxl_ring_init_hdr(struct qxl_ring *ring)
-{
- ring->ring->header.notify_on_prod = ring->n_elements;
-}
-
struct qxl_ring *
qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
- bool set_prod_notify,
wait_queue_head_t *push_event)
{
struct qxl_ring *ring;
@@ -77,8 +71,6 @@ qxl_ring_create(struct qxl_ring_header *header,
ring->n_elements = n_elements;
ring->prod_notify = prod_notify;
ring->push_event = push_event;
- if (set_prod_notify)
- qxl_ring_init_hdr(ring);
spin_lock_init(&ring->lock);
return ring;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 2e8949863d6b..a152a7c6db21 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -902,7 +902,7 @@ static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
static const struct drm_plane_funcs qxl_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -924,7 +924,7 @@ static const struct drm_plane_helper_funcs primary_helper_funcs = {
static const struct drm_plane_funcs qxl_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1cb6f0c224bb..3044ca948ce2 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -194,7 +194,6 @@ static int qxl_drm_resume(struct drm_device *dev, bool thaw)
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (!thaw) {
qxl_reinit_memslots(qdev);
- qxl_ring_init_hdr(qdev->release_ring);
}
qxl_create_monitors_object(qdev);
@@ -220,6 +219,7 @@ static int qxl_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct qxl_device *qdev = to_qxl(drm_dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -227,6 +227,7 @@ static int qxl_pm_resume(struct device *dev)
return -EIO;
}
+ qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 47c169673088..432758ad39a3 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -277,10 +277,8 @@ struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
- bool set_prod_notify,
wait_queue_head_t *push_event);
void qxl_ring_free(struct qxl_ring *ring);
-void qxl_ring_init_hdr(struct qxl_ring *ring);
int qxl_check_idle(struct qxl_ring *ring);
static inline uint64_t
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9bf6d4cc98d4..dc3828db1991 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -194,7 +194,6 @@ int qxl_device_init(struct qxl_device *qdev,
sizeof(struct qxl_command),
QXL_COMMAND_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CMD,
- false,
&qdev->display_event);
if (!qdev->command_ring) {
DRM_ERROR("Unable to create command ring\n");
@@ -207,7 +206,6 @@ int qxl_device_init(struct qxl_device *qdev,
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CURSOR,
- false,
&qdev->cursor_event);
if (!qdev->cursor_ring) {
@@ -219,7 +217,7 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
- QXL_RELEASE_RING_SIZE, 0, true,
+ QXL_RELEASE_RING_SIZE, 0,
NULL);
if (!qdev->release_ring) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b42a657e4c2f..695d9308d1f0 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -141,7 +141,7 @@ int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
qxl_ttm_placement_from_domain(bo, domain);
bo->tbo.priority = priority;
- r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
&bo->placement, 0, &ctx, NULL, NULL,
&qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index e3ab3aca1396..bb4e56f2f170 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \
- radeon_sync.o radeon_audio.o radeon_dp_auxch.o radeon_dp_mst.o
+ radeon_sync.o radeon_audio.o radeon_dp_auxch.o
radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 69f1bc073902..d28d3acb3ba1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -617,13 +617,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
- if (radeon_encoder->is_mst_encoder) {
- struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- }
-
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_crtc->ss_enabled) {
@@ -972,9 +965,7 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
- if (radeon_encoder->is_mst_encoder) {
- radeon_dp_mst_prepare_pll(crtc, mode);
- } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c93040e60d04..c841c273222e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -32,6 +32,8 @@
#include <drm/drm_file.h>
#include <drm/radeon_drm.h>
+#include <acpi/video.h>
+
#include "atom.h"
#include "radeon_atombios.h"
#include "radeon.h"
@@ -209,6 +211,11 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping radeon atom DIG backlight registration\n");
+ return;
+ }
+
pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
@@ -667,15 +674,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- struct radeon_encoder_atom_dig *dig_enc;
- if (radeon_encoder_is_digital(encoder)) {
- dig_enc = radeon_encoder->enc_priv;
- if (dig_enc->active_mst_links)
- return ATOM_ENCODER_MODE_DP_MST;
- }
- if (radeon_encoder->is_mst_encoder || radeon_encoder->offset)
- return ATOM_ENCODER_MODE_DP_MST;
/* dp bridges are always DP */
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
@@ -1723,10 +1722,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* don't power off encoders with active MST links */
- if (dig->active_mst_links)
- return;
-
if (ASIC_IS_DCE4(rdev)) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1992,53 +1987,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-void
-atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
- uint8_t frev, crev;
- union crtc_source_param args;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- if (frev != 1 && crev != 2)
- DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev);
-
- args.v2.ucCRTC = radeon_crtc->crtc_id;
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST;
-
- switch (fe) {
- case 0:
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- break;
- case 1:
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
- case 2:
- args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
- break;
- case 3:
- args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
- break;
- case 4:
- args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
- break;
- case 5:
- args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
- break;
- case 6:
- args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
- break;
- }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ac006bed4743..8ef25ab305ae 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2056,7 +2056,7 @@ static void ci_clear_vc(struct radeon_device *rdev)
static int ci_upload_firmware(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- int i, ret;
+ int i;
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
@@ -2067,9 +2067,7 @@ static int ci_upload_firmware(struct radeon_device *rdev)
ci_stop_smc_clock(rdev);
ci_reset_smc(rdev);
- ret = ci_load_smc_ucode(rdev, pi->sram_end);
-
- return ret;
+ return ci_load_smc_ucode(rdev, pi->sram_end);
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 8be4799a98ef..638f861af80f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -34,8 +34,6 @@
#include "r600_reg_safe.h"
static int r600_nomm;
-extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
-
struct r600_cs_track {
/* configuration we mirror so that we use same code btw kms/ums */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 08f83bf2c330..166c18d62f6d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -116,7 +116,6 @@ extern int radeon_use_pflipirq;
extern int radeon_bapm;
extern int radeon_backlight;
extern int radeon_auxch;
-extern int radeon_mst;
extern int radeon_uvd;
extern int radeon_vce;
extern int radeon_si_support;
@@ -2950,8 +2949,6 @@ struct radeon_hdmi_acr {
};
-extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
-
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 28c4413f4dc8..204127bad89c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -826,8 +826,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
radeon_link_encoder_connector(dev);
-
- radeon_setup_mst_connector(dev);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 58db79921cd3..f7431d224604 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -37,33 +37,12 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-static int radeon_dp_handle_hpd(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_dp_mst_check_status(radeon_connector);
- if (ret == -EINVAL)
- return 1;
- return 0;
-}
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- if (radeon_connector->is_mst_connector)
- return;
- if (dig_connector->is_mst) {
- radeon_dp_handle_hpd(connector);
- return;
- }
- }
/* bail if the connector does not have hpd pin, e.g.,
* VGA, TV, etc.
*/
@@ -1664,9 +1643,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int r;
- if (radeon_dig_connector->is_mst)
- return connector_status_disconnected;
-
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0) {
@@ -1729,21 +1705,12 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
radeon_dp_getdpcd(radeon_connector);
- r = radeon_dp_mst_probe(radeon_connector);
- if (r == 1)
- ret = connector_status_disconnected;
- }
} else {
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- if (radeon_dp_getdpcd(radeon_connector)) {
- r = radeon_dp_mst_probe(radeon_connector);
- if (r == 1)
- ret = connector_status_disconnected;
- else
- ret = connector_status_connected;
- }
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
@@ -2561,25 +2528,3 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
-
-void radeon_setup_mst_connector(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
-
- if (!ASIC_IS_DCE5(rdev))
- return;
-
- if (radeon_mst == 0)
- return;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- radeon_connector = to_radeon_connector(connector);
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- radeon_dp_mst_init(radeon_connector);
- }
-}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2b12389f841a..a556b6be1137 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1438,7 +1438,6 @@ int radeon_device_init(struct radeon_device *rdev,
goto failed;
radeon_gem_debugfs_init(rdev);
- radeon_mst_debugfs_init(rdev);
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
@@ -1605,6 +1604,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f12675e3d261..ca5598ae8bfc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -38,7 +38,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
deleted file mode 100644
index 54ced1f4ff67..000000000000
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ /dev/null
@@ -1,778 +0,0 @@
-// SPDX-License-Identifier: MIT
-
-#include <drm/display/drm_dp_mst_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_file.h>
-#include <drm/drm_probe_helper.h>
-
-#include "atom.h"
-#include "ni_reg.h"
-#include "radeon.h"
-
-static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector);
-
-static int radeon_atom_set_enc_offset(int id)
-{
- static const int offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- 0x13830 - 0x7030 };
-
- return offsets[id];
-}
-
-static int radeon_dp_mst_set_be_cntl(struct radeon_encoder *primary,
- struct radeon_encoder_mst *mst_enc,
- enum radeon_hpd_id hpd, bool enable)
-{
- struct drm_device *dev = primary->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t reg;
- int retries = 0;
- uint32_t temp;
-
- reg = RREG32(NI_DIG_BE_CNTL + primary->offset);
-
- /* set MST mode */
- reg &= ~NI_DIG_FE_DIG_MODE(7);
- reg |= NI_DIG_FE_DIG_MODE(NI_DIG_MODE_DP_MST);
-
- if (enable)
- reg |= NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
- else
- reg &= ~NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
-
- reg |= NI_DIG_HPD_SELECT(hpd);
- DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DIG_BE_CNTL + primary->offset, reg);
- WREG32(NI_DIG_BE_CNTL + primary->offset, reg);
-
- if (enable) {
- uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
-
- do {
- temp = RREG32(NI_DIG_FE_CNTL + offset);
- } while ((temp & NI_DIG_SYMCLK_FE_ON) && retries++ < 10000);
- if (retries == 10000)
- DRM_ERROR("timed out waiting for FE %d %d\n", primary->offset, mst_enc->fe);
- }
- return 0;
-}
-
-static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
- int stream_number,
- int fe,
- int slots)
-{
- struct drm_device *dev = primary->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- u32 temp, val;
- int retries = 0;
- int satreg, satidx;
-
- satreg = stream_number >> 1;
- satidx = stream_number & 1;
-
- temp = RREG32(NI_DP_MSE_SAT0 + satreg + primary->offset);
-
- val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe);
-
- val <<= (16 * satidx);
-
- temp &= ~(0xffff << (16 * satidx));
-
- temp |= val;
-
- DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
- WREG32(NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
-
- WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
-
- do {
- unsigned value1, value2;
- udelay(10);
- temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
-
- value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
- value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
-
- if (!value1 && !value2)
- break;
- } while (retries++ < 50);
-
- if (retries == 10000)
- DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
-
- /* MTP 16 ? */
- return 0;
-}
-
-static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn,
- struct radeon_encoder *primary)
-{
- struct drm_device *dev = mst_conn->base.dev;
- struct stream_attribs new_attribs[6];
- int i;
- int idx = 0;
- struct radeon_connector *radeon_connector;
- struct drm_connector *connector;
-
- memset(new_attribs, 0, sizeof(new_attribs));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_encoder *subenc;
- struct radeon_encoder_mst *mst_enc;
-
- radeon_connector = to_radeon_connector(connector);
- if (!radeon_connector->is_mst_connector)
- continue;
-
- if (radeon_connector->mst_port != mst_conn)
- continue;
-
- subenc = radeon_connector->mst_encoder;
- mst_enc = subenc->enc_priv;
-
- if (!mst_enc->enc_active)
- continue;
-
- new_attribs[idx].fe = mst_enc->fe;
- new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port);
- idx++;
- }
-
- for (i = 0; i < idx; i++) {
- if (new_attribs[i].fe != mst_conn->cur_stream_attribs[i].fe ||
- new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) {
- radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots);
- mst_conn->cur_stream_attribs[i].fe = new_attribs[i].fe;
- mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots;
- }
- }
-
- for (i = idx; i < mst_conn->enabled_attribs; i++) {
- radeon_dp_mst_set_stream_attrib(primary, i, 0, 0);
- mst_conn->cur_stream_attribs[i].fe = 0;
- mst_conn->cur_stream_attribs[i].slots = 0;
- }
- mst_conn->enabled_attribs = idx;
- return 0;
-}
-
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
-{
- struct drm_device *dev = mst->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_mst *mst_enc = mst->enc_priv;
- uint32_t val, temp;
- uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
- int retries = 0;
- uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
- uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
-
- val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
-
- WREG32(NI_DP_MSE_RATE_CNTL + offset, val);
-
- do {
- temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
- udelay(10);
- } while ((temp & 0x1) && (retries++ < 10000));
-
- if (retries >= 10000)
- DRM_ERROR("timed out wait for rate cntl %d\n", mst_enc->fe);
- return 0;
-}
-
-static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
- struct edid *edid;
- int ret = 0;
-
- edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, radeon_connector->port);
- radeon_connector->edid = edid;
- DRM_DEBUG_KMS("edid retrieved %p\n", edid);
- if (radeon_connector->edid) {
- drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
- ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- return ret;
- }
- drm_connector_update_edid_property(&radeon_connector->base, NULL);
-
- return ret;
-}
-
-static int radeon_dp_mst_get_modes(struct drm_connector *connector)
-{
- return radeon_dp_mst_get_ddc_modes(connector);
-}
-
-static enum drm_mode_status
-radeon_dp_mst_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TODO - validate mode against available PBN for link */
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- return MODE_H_ILLEGAL;
-
- return MODE_OK;
-}
-
-static struct
-drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- return &radeon_connector->mst_encoder->base;
-}
-
-static int
-radeon_dp_mst_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct radeon_connector *radeon_connector =
- to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
-
- return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
- radeon_connector->port);
-}
-
-static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
- .get_modes = radeon_dp_mst_get_modes,
- .mode_valid = radeon_dp_mst_mode_valid,
- .best_encoder = radeon_mst_best_encoder,
- .detect_ctx = radeon_dp_mst_detect,
-};
-
-static void
-radeon_dp_mst_connector_destroy(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_encoder *radeon_encoder = radeon_connector->mst_encoder;
-
- drm_encoder_cleanup(&radeon_encoder->base);
- kfree(radeon_encoder);
- drm_connector_cleanup(connector);
- kfree(radeon_connector);
-}
-
-static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = radeon_dp_mst_connector_destroy,
-};
-
-static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- const char *pathprop)
-{
- struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct radeon_connector *radeon_connector;
- struct drm_connector *connector;
-
- radeon_connector = kzalloc(sizeof(*radeon_connector), GFP_KERNEL);
- if (!radeon_connector)
- return NULL;
-
- radeon_connector->is_mst_connector = true;
- connector = &radeon_connector->base;
- radeon_connector->port = port;
- radeon_connector->mst_port = master;
- DRM_DEBUG_KMS("\n");
-
- drm_connector_init(dev, connector, &radeon_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
- drm_connector_helper_add(connector, &radeon_dp_mst_connector_helper_funcs);
- radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
-
- drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
- drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_connector_set_path_property(connector, pathprop);
-
- return connector;
-}
-
-static const struct drm_dp_mst_topology_cbs mst_cbs = {
- .add_connector = radeon_dp_add_mst_connector,
-};
-
-static struct
-radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (!connector->encoder)
- continue;
- if (!radeon_connector->is_mst_connector)
- continue;
-
- DRM_DEBUG_KMS("checking %p vs %p\n", connector->encoder, encoder);
- if (connector->encoder == encoder)
- return radeon_connector;
- }
- return NULL;
-}
-
-void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder);
- struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
- struct radeon_connector *radeon_connector = radeon_mst_find_connector(&radeon_encoder->base);
- int dp_clock;
- struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
-
- if (radeon_connector) {
- radeon_connector->pixelclock_for_modeset = mode->clock;
- if (radeon_connector->base.display_info.bpc)
- radeon_crtc->bpc = radeon_connector->base.display_info.bpc;
- else
- radeon_crtc->bpc = 8;
- }
-
- DRM_DEBUG_KMS("dp_clock %p %d\n", dig_connector, dig_connector->dp_clock);
- dp_clock = dig_connector->dp_clock;
- radeon_crtc->ss_enabled =
- radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
- ASIC_INTERNAL_SS_ON_DP,
- dp_clock);
-}
-
-static void
-radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder, *primary;
- struct radeon_encoder_mst *mst_enc;
- struct radeon_encoder_atom_dig *dig_enc;
- struct radeon_connector *radeon_connector;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
- int slots;
- s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
- if (!ASIC_IS_DCE5(rdev)) {
- DRM_ERROR("got mst dpms on non-DCE5\n");
- return;
- }
-
- radeon_connector = radeon_mst_find_connector(encoder);
- if (!radeon_connector)
- return;
-
- radeon_encoder = to_radeon_encoder(encoder);
-
- mst_enc = radeon_encoder->enc_priv;
-
- primary = mst_enc->primary;
-
- dig_enc = primary->enc_priv;
-
- crtc = encoder->crtc;
- DRM_DEBUG_KMS("got connector %d\n", dig_enc->active_mst_links);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- dig_enc->active_mst_links++;
-
- radeon_crtc = to_radeon_crtc(crtc);
-
- if (dig_enc->active_mst_links == 1) {
- mst_enc->fe = dig_enc->dig_encoder;
- mst_enc->fe_from_be = true;
- atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
-
- atombios_dig_encoder_setup(&primary->base, ATOM_ENCODER_CMD_SETUP, 0);
- atombios_dig_transmitter_setup2(&primary->base, ATOM_TRANSMITTER_ACTION_ENABLE,
- 0, 0, dig_enc->dig_encoder);
-
- if (radeon_dp_needs_link_train(mst_enc->connector) ||
- dig_enc->active_mst_links == 1) {
- radeon_dp_link_train(&primary->base, &mst_enc->connector->base);
- }
-
- } else {
- mst_enc->fe = radeon_atom_pick_dig_encoder(encoder, radeon_crtc->crtc_id);
- if (mst_enc->fe == -1)
- DRM_ERROR("failed to get frontend for dig encoder\n");
- mst_enc->fe_from_be = false;
- atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
- }
-
- DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder,
- dig_enc->linkb, radeon_crtc->crtc_id);
-
- slots = drm_dp_find_vcpi_slots(&radeon_connector->mst_port->mst_mgr,
- mst_enc->pbn);
- drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
- radeon_connector->port,
- mst_enc->pbn, slots);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
-
- radeon_dp_mst_set_be_cntl(primary, mst_enc,
- radeon_connector->mst_port->hpd.hpd, true);
-
- mst_enc->enc_active = true;
- radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-
- fixed_pbn = drm_int2fixp(mst_enc->pbn);
- fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
- avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
- radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
-
- atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
- mst_enc->fe);
- drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
-
- drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
-
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- DRM_ERROR("DPMS OFF %d\n", dig_enc->active_mst_links);
-
- if (!mst_enc->enc_active)
- return;
-
- drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
-
- drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
- /* and this can also fail */
- drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
-
- drm_dp_mst_deallocate_vcpi(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
-
- mst_enc->enc_active = false;
- radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-
- radeon_dp_mst_set_be_cntl(primary, mst_enc,
- radeon_connector->mst_port->hpd.hpd, false);
- atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0,
- mst_enc->fe);
-
- if (!mst_enc->fe_from_be)
- radeon_atom_release_dig_encoder(rdev, mst_enc->fe);
-
- mst_enc->fe_from_be = false;
- dig_enc->active_mst_links--;
- if (dig_enc->active_mst_links == 0) {
- /* drop link */
- }
-
- break;
- }
-
-}
-
-static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder_mst *mst_enc;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector_atom_dig *dig_connector;
- int bpp = 24;
-
- mst_enc = radeon_encoder->enc_priv;
-
- mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
-
- mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
- DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
- mst_enc->primary->active_device, mst_enc->primary->devices,
- mst_enc->connector->devices, mst_enc->primary->base.encoder_type);
-
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- dig_connector = mst_enc->connector->con_priv;
- dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
- dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
- DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
- dig_connector->dp_lane_count, dig_connector->dp_clock);
- return true;
-}
-
-static void radeon_mst_encoder_prepare(struct drm_encoder *encoder)
-{
- struct radeon_connector *radeon_connector;
- struct radeon_encoder *radeon_encoder, *primary;
- struct radeon_encoder_mst *mst_enc;
- struct radeon_encoder_atom_dig *dig_enc;
-
- radeon_connector = radeon_mst_find_connector(encoder);
- if (!radeon_connector) {
- DRM_DEBUG_KMS("failed to find connector %p\n", encoder);
- return;
- }
- radeon_encoder = to_radeon_encoder(encoder);
-
- radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- mst_enc = radeon_encoder->enc_priv;
-
- primary = mst_enc->primary;
-
- dig_enc = primary->enc_priv;
-
- mst_enc->port = radeon_connector->port;
-
- if (dig_enc->dig_encoder == -1) {
- dig_enc->dig_encoder = radeon_atom_pick_dig_encoder(&primary->base, -1);
- primary->offset = radeon_atom_set_enc_offset(dig_enc->dig_encoder);
- atombios_set_mst_encoder_crtc_source(encoder, dig_enc->dig_encoder);
-
-
- }
- DRM_DEBUG_KMS("%d %d\n", dig_enc->dig_encoder, primary->offset);
-}
-
-static void
-radeon_mst_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-static void radeon_mst_encoder_commit(struct drm_encoder *encoder)
-{
- radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- DRM_DEBUG_KMS("\n");
-}
-
-static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
- .dpms = radeon_mst_encoder_dpms,
- .mode_fixup = radeon_mst_mode_fixup,
- .prepare = radeon_mst_encoder_prepare,
- .mode_set = radeon_mst_encoder_mode_set,
- .commit = radeon_mst_encoder_commit,
-};
-
-static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_funcs radeon_dp_mst_enc_funcs = {
- .destroy = radeon_dp_mst_encoder_destroy,
-};
-
-static struct radeon_encoder *
-radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
-{
- struct drm_device *dev = connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder;
- struct radeon_encoder_mst *mst_enc;
- struct drm_encoder *encoder;
- const struct drm_connector_helper_funcs *connector_funcs = connector->base.helper_private;
- struct drm_encoder *enc_master = connector_funcs->best_encoder(&connector->base);
-
- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
- radeon_encoder = kzalloc(sizeof(*radeon_encoder), GFP_KERNEL);
- if (!radeon_encoder)
- return NULL;
-
- radeon_encoder->enc_priv = kzalloc(sizeof(*mst_enc), GFP_KERNEL);
- if (!radeon_encoder->enc_priv) {
- kfree(radeon_encoder);
- return NULL;
- }
- encoder = &radeon_encoder->base;
- switch (rdev->num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST, NULL);
- drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
-
- mst_enc = radeon_encoder->enc_priv;
- mst_enc->connector = connector;
- mst_enc->primary = to_radeon_encoder(enc_master);
- radeon_encoder->is_mst_encoder = true;
- return radeon_encoder;
-}
-
-int
-radeon_dp_mst_init(struct radeon_connector *radeon_connector)
-{
- struct drm_device *dev = radeon_connector->base.dev;
- int max_link_rate;
-
- if (!radeon_connector->ddc_bus->has_aux)
- return 0;
-
- if (radeon_connector_is_dp12_capable(&radeon_connector->base))
- max_link_rate = 0x14;
- else
- max_link_rate = 0x0a;
-
- radeon_connector->mst_mgr.cbs = &mst_cbs;
- return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
- &radeon_connector->ddc_bus->aux, 16, 6,
- 4, drm_dp_bw_code_to_link_rate(max_link_rate),
- radeon_connector->base.base.id);
-}
-
-int
-radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
-{
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret;
- u8 msg[1];
-
- if (!radeon_mst)
- return 0;
-
- if (!ASIC_IS_DCE5(rdev))
- return 0;
-
- if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
- return 0;
-
- ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_MSTM_CAP, msg,
- 1);
- if (ret) {
- if (msg[0] & DP_MST_CAP) {
- DRM_DEBUG_KMS("Sink is MST capable\n");
- dig_connector->is_mst = true;
- } else {
- DRM_DEBUG_KMS("Sink is not MST capable\n");
- dig_connector->is_mst = false;
- }
-
- }
- drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
- dig_connector->is_mst);
- return dig_connector->is_mst;
-}
-
-int
-radeon_dp_mst_check_status(struct radeon_connector *radeon_connector)
-{
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- int retry;
-
- if (dig_connector->is_mst) {
- u8 esi[16] = { 0 };
- int dret;
- int ret = 0;
- bool handled;
-
- dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
- DP_SINK_COUNT_ESI, esi, 8);
-go_again:
- if (dret == 8) {
- DRM_DEBUG_KMS("got esi %3ph\n", esi);
- ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled);
-
- if (handled) {
- for (retry = 0; retry < 3; retry++) {
- int wret;
- wret = drm_dp_dpcd_write(&radeon_connector->ddc_bus->aux,
- DP_SINK_COUNT_ESI + 1, &esi[1], 3);
- if (wret == 3)
- break;
- }
-
- dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
- DP_SINK_COUNT_ESI, esi, 8);
- if (dret == 8) {
- DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
- goto go_again;
- }
- } else
- ret = 0;
-
- return ret;
- } else {
- DRM_DEBUG_KMS("failed to get ESI - device may have failed %d\n", ret);
- dig_connector->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
- dig_connector->is_mst);
- /* send a hotplug event */
- }
- }
- return -EINVAL;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static int radeon_debugfs_mst_info_show(struct seq_file *m, void *unused)
-{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
- struct drm_device *dev = rdev->ddev;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- int i;
-
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- radeon_connector = to_radeon_connector(connector);
- dig_connector = radeon_connector->con_priv;
- if (radeon_connector->is_mst_connector)
- continue;
- if (!dig_connector->is_mst)
- continue;
- drm_dp_mst_dump_topology(m, &radeon_connector->mst_mgr);
-
- for (i = 0; i < radeon_connector->enabled_attribs; i++)
- seq_printf(m, "attrib %d: %d %d\n", i,
- radeon_connector->cur_stream_attribs[i].fe,
- radeon_connector->cur_stream_attribs[i].slots);
- }
- drm_modeset_unlock_all(dev);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_mst_info);
-#endif
-
-void radeon_mst_debugfs_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
-
- debugfs_create_file("radeon_mst_info", 0444, root, rdev,
- &radeon_debugfs_mst_info_fops);
-
-#endif
-}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 956c72b5aa33..6cbe1ab81aba 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -172,7 +172,6 @@ int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
int radeon_backlight = -1;
int radeon_auxch = -1;
-int radeon_mst = 0;
int radeon_uvd = 1;
int radeon_vce = 1;
@@ -263,9 +262,6 @@ module_param_named(backlight, radeon_backlight, int, 0444);
MODULE_PARM_DESC(auxch, "Use native auxch experimental support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(auxch, radeon_auxch, int, 0444);
-MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
-module_param_named(mst, radeon_mst, int, 0444);
-
MODULE_PARM_DESC(uvd, "uvd enable/disable uvd support (1 = enable, 0 = disable)");
module_param_named(uvd, radeon_uvd, int, 0444);
@@ -516,14 +512,11 @@ long radeon_drm_ioctl(struct file *filp,
static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = radeon_drm_ioctl(filp, cmd, arg);
-
- return ret;
+ return radeon_drm_ioctl(filp, cmd, arg);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 46549d5179ee..fbc0a2182318 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -30,6 +30,8 @@
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
+#include <acpi/video.h>
+
#include "radeon.h"
#include "radeon_atombios.h"
#include "radeon_legacy_encoders.h"
@@ -167,7 +169,7 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
return;
if (radeon_backlight == 0) {
- return;
+ use_bl = false;
} else if (radeon_backlight == 1) {
use_bl = true;
} else if (radeon_backlight == -1) {
@@ -193,6 +195,13 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
else
radeon_legacy_backlight_init(radeon_encoder, connector);
}
+
+ /*
+ * If there is no native backlight device (which may happen even when
+ * use_bl==true) try registering an ACPI video backlight device instead.
+ */
+ if (!rdev->mode_info.bl_encoder)
+ acpi_video_register_backlight();
}
void
@@ -244,16 +253,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->is_mst_encoder) {
- struct radeon_encoder_mst *mst_enc;
-
- if (!radeon_connector->is_mst_connector)
- continue;
-
- mst_enc = radeon_encoder->enc_priv;
- if (mst_enc->connector == radeon_connector->mst_port)
- return connector;
- } else if (radeon_encoder->active_device & radeon_connector->devices)
+ if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -399,9 +399,6 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_DisplayPort:
- if (radeon_connector->is_mst_connector)
- return false;
-
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 3907785d0798..da2173435edd 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -100,16 +100,8 @@ static void radeon_hotplug_work_func(struct work_struct *work)
static void radeon_dp_work_func(struct work_struct *work)
{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- dp_work);
- struct drm_device *dev = rdev->ddev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
-
- /* this should take a mutex */
- list_for_each_entry(connector, &mode_config->connector_list, head)
- radeon_connector_hotplug(connector);
}
+
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 1a66fb969ee7..0cd32c65456c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -33,6 +33,8 @@
#include <drm/drm_util.h>
#include <drm/radeon_drm.h>
+#include <acpi/video.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_legacy_encoders.h"
@@ -387,6 +389,11 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
return;
#endif
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping radeon legacy LVDS backlight registration\n");
+ return;
+ }
+
pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index b34cffc162e2..9f5be416454f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -31,7 +31,6 @@
#define RADEON_MODE_H
#include <drm/display/drm_dp_helper.h>
-#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -436,24 +435,12 @@ struct radeon_encoder_atom_dig {
int panel_mode;
struct radeon_afmt *afmt;
struct r600_audio_pin *pin;
- int active_mst_links;
};
struct radeon_encoder_atom_dac {
enum radeon_tv_std tv_std;
};
-struct radeon_encoder_mst {
- int crtc;
- struct radeon_encoder *primary;
- struct radeon_connector *connector;
- struct drm_dp_mst_port *port;
- int pbn;
- int fe;
- bool fe_from_be;
- bool enc_active;
-};
-
struct radeon_encoder {
struct drm_encoder base;
uint32_t encoder_enum;
@@ -475,8 +462,6 @@ struct radeon_encoder {
enum radeon_output_csc output_csc;
bool can_mst;
uint32_t offset;
- bool is_mst_encoder;
- /* front end for this mst encoder */
};
struct radeon_connector_atom_dig {
@@ -487,7 +472,6 @@ struct radeon_connector_atom_dig {
int dp_clock;
int dp_lane_count;
bool edp_on;
- bool is_mst;
};
struct radeon_gpio_rec {
@@ -531,11 +515,6 @@ enum radeon_connector_dither {
RADEON_FMT_DITHER_ENABLE = 1,
};
-struct stream_attribs {
- uint16_t fe;
- uint16_t slots;
-};
-
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -558,14 +537,6 @@ struct radeon_connector {
enum radeon_connector_audio audio;
enum radeon_connector_dither dither;
int pixelclock_for_modeset;
- bool is_mst_connector;
- struct radeon_connector *mst_port;
- struct drm_dp_mst_port *port;
- struct drm_dp_mst_topology_mgr mst_mgr;
-
- struct radeon_encoder *mst_encoder;
- struct stream_attribs cur_stream_attribs[6];
- int enabled_attribs;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
@@ -767,8 +738,6 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set, int fe);
-extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder,
- int fe);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
@@ -905,7 +874,6 @@ extern struct radeon_encoder_tv_dac *
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_lvds *
radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
-extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac *
@@ -986,15 +954,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
-/* mst */
-int radeon_dp_mst_init(struct radeon_connector *radeon_connector);
-int radeon_dp_mst_probe(struct radeon_connector *radeon_connector);
-int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector);
-void radeon_mst_debugfs_init(struct radeon_device *rdev);
-void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode);
-
-void radeon_setup_mst_connector(struct drm_device *dev);
-
int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx);
void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c4a6802ca96..00c33b24d5d3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -202,9 +202,9 @@ int radeon_bo_create(struct radeon_device *rdev,
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
- r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, sg, resv,
- &radeon_ttm_bo_destroy);
+ r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type,
+ &bo->placement, page_align, !kernel, sg, resv,
+ &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e765abcb3b01..04c693ca419a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1899,7 +1899,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
* to false since we want to wait for vbl to avoid flicker.
*/
if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
- jiffies > rdev->pm.dynpm_action_timeout) {
+ time_after(jiffies, rdev->pm.dynpm_action_timeout)) {
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index f6e6a6d5d987..c959e8c6be7d 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -5,7 +5,7 @@ config DRM_RCAR_DU
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index e7275b5e7ec8..6f132325c8b7 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -14,10 +14,3 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
-
-# 'remote-endpoint' is fixed up at run-time
-DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 621bbccb95d4..3619e1ddeb62 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -17,9 +17,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "rcar_cmm.h"
@@ -31,6 +29,7 @@
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
#include "rcar_lvds.h"
+#include "rcar_mipi_dsi.h"
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
@@ -746,7 +745,19 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- rcar_lvds_clk_enable(bridge, mode->clock * 1000);
+ rcar_lvds_pclk_enable(bridge, mode->clock * 1000);
+ }
+
+ /*
+ * Similarly to LVDS, on V3U the dot clock is provided by the DSI
+ * encoder, and we need to enable the DSI clocks before enabling the CRTC.
+ */
+ if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
+ (rstate->outputs &
+ (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
+ struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
+
+ rcar_mipi_dsi_pclk_enable(bridge, state);
}
rcar_du_crtc_start(rcrtc);
@@ -779,7 +790,20 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- rcar_lvds_clk_disable(bridge);
+ rcar_lvds_pclk_disable(bridge);
+ }
+
+ if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
+ (rstate->outputs &
+ (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
+ struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
+
+ /*
+ * Disable the DSI clock output, see
+ * rcar_du_crtc_atomic_enable().
+ */
+
+ rcar_mipi_dsi_pclk_disable(bridge);
}
spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 70d85610d720..a2776f1d6f2c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -20,15 +20,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
-#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
* Device Information
@@ -508,7 +506,8 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A779A0 has two MIPI DSI outputs. */
@@ -579,7 +578,7 @@ const char *rcar_du_output_name(enum rcar_du_output output)
* DRM operations
*/
-DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
+DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index bfad7775d9a1..5cfa2bb7ad93 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -31,6 +31,7 @@ struct rcar_du_device;
#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */
#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */
#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */
+#define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
@@ -91,6 +92,7 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_VSPS 4
#define RCAR_DU_MAX_LVDS 2
+#define RCAR_DU_MAX_DSI 2
struct rcar_du_device {
struct device *dev;
@@ -107,6 +109,7 @@ struct rcar_du_device {
struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
+ struct drm_bridge *dsi[RCAR_DU_MAX_DSI];
struct {
struct drm_property *colorkey;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 60d6be78323b..b1787be31e92 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,18 +9,13 @@
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/slab.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_managed.h>
-#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_kms.h"
#include "rcar_lvds.h"
/* -----------------------------------------------------------------------------
@@ -84,6 +79,10 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
if (output == RCAR_DU_OUTPUT_LVDS0 ||
output == RCAR_DU_OUTPUT_LVDS1)
rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
+
+ if (output == RCAR_DU_OUTPUT_DSI0 ||
+ output == RCAR_DU_OUTPUT_DSI1)
+ rcdu->dsi[output - RCAR_DU_OUTPUT_DSI0] = bridge;
}
/*
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 761451ee5263..8c2719efda2a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -11,9 +11,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -328,12 +327,12 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
*/
static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
- .free = drm_gem_cma_object_free,
- .print_info = drm_gem_cma_object_print_info,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
- .mmap = drm_gem_cma_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .free = drm_gem_dma_object_free,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = drm_gem_dma_object_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
@@ -341,33 +340,33 @@ struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
- return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
- /* Create a CMA GEM buffer. */
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
+ /* Create a DMA GEM buffer. */
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
return ERR_PTR(-ENOMEM);
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
gem_obj->funcs = &rcar_du_gem_funcs;
drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size);
- cma_obj->map_noncoherent = false;
+ dma_obj->map_noncoherent = false;
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret) {
drm_gem_object_release(gem_obj);
- kfree(cma_obj);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
- cma_obj->paddr = 0;
- cma_obj->sgt = sgt;
+ dma_obj->dma_addr = 0;
+ dma_obj->sgt = sgt;
return gem_obj;
}
@@ -390,7 +389,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = roundup(min_pitch, align);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
@@ -406,8 +405,8 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
- dev_dbg(dev->dev, "unsupported pixel format %08x\n",
- mode_cmd->pixel_format);
+ dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
+ &mode_cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 501d79367e3e..d759e0192181 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -12,11 +12,10 @@
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_group.h"
@@ -342,7 +341,7 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
if (state->source == RCAR_DU_PLANE_MEMORY) {
struct drm_framebuffer *fb = state->state.fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int i;
if (state->format->planes == 2)
@@ -351,8 +350,8 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
pitch = fb->pitches[0] * 8 / state->format->bpp;
for (i = 0; i < state->format->planes; ++i) {
- gem = drm_fb_cma_get_gem_obj(fb, i);
- dma[i] = gem->paddr + fb->offsets[i];
+ gem = drm_fb_dma_get_gem_obj(fb, i);
+ dma[i] = gem->dma_addr + fb->offsets[i];
}
} else {
pitch = drm_rect_width(&state->state.src) >> 16;
@@ -507,8 +506,15 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
- rcar_du_plane_write(rgrp, index, PnMR,
- PnMR_SPIM_TP_OFF | state->format->pnmr);
+ struct rcar_du_device *rcdu = rgrp->dev;
+ u32 pnmr = state->format->pnmr | PnMR_SPIM_TP_OFF;
+
+ if (rcdu->info->features & RCAR_DU_FEATURE_NO_BLENDING) {
+ /* No blending. ALP and EOR are not supported. */
+ pnmr &= ~(PnMR_SPIM_ALP | PnMR_SPIM_EOR);
+ }
+
+ rcar_du_plane_write(rgrp, index, PnMR, pnmr);
rcar_du_plane_write(rgrp, index, PnDDCR4,
state->format->edf | PnDDCR4_CODE);
@@ -522,7 +528,6 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
* register to 0 to avoid this.
*/
- /* TODO: Check if alpha-blending should be disabled in PnMR. */
rcar_du_plane_write(rgrp, index, PnALPHAR, 0);
}
@@ -607,8 +612,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret < 0)
return ret;
@@ -620,8 +625,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
*format = rcar_du_format_info(state->fb->format->format);
if (*format == NULL) {
- dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
- state->fb->format->format);
+ dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
+ &state->fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index dbc68cdabcff..e465aef41585 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -11,13 +11,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include <linux/bitops.h>
@@ -153,6 +152,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
.alpha = state->state.alpha >> 8,
.zpos = state->state.zpos,
};
+ u32 fourcc = state->format->fourcc;
unsigned int i;
cfg.src.left = state->state.src.x1 >> 16;
@@ -169,9 +169,27 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- format = rcar_du_format_info(state->format->fourcc);
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+ }
+ }
+
+ format = rcar_du_format_info(fourcc);
cfg.pixelformat = format->v4l2;
+ cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI;
+
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
@@ -184,7 +202,7 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
if (gem->sgt) {
@@ -213,7 +231,7 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
}
} else {
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
- gem->paddr, gem->base.size);
+ gem->dma_addr, gem->base.size);
if (ret)
goto fail;
}
@@ -437,6 +455,11 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_create_zpos_property(&plane->plane, i, 0,
num_planes - 1);
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
vsp->num_planes++;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 25f50a297c11..8cd37d7b8ae2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -166,8 +166,8 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
wb_state->format = rcar_du_format_info(fb->format->format);
if (wb_state->format == NULL) {
- dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
- fb->format->format);
+ dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
+ &fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index d85aa4bc7f84..81a060c2fe3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -306,7 +306,7 @@ static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
* Clock - D3/E3 only
*/
-int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
int ret;
@@ -324,9 +324,9 @@ int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
return 0;
}
-EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable);
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
-void rcar_lvds_clk_disable(struct drm_bridge *bridge)
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -339,7 +339,7 @@ void rcar_lvds_clk_disable(struct drm_bridge *bridge)
clk_disable_unprepare(lvds->clocks.mod);
}
-EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
/* -----------------------------------------------------------------------------
* Bridge
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index 3097bf749bec..bee7033b60d6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -13,17 +13,17 @@
struct drm_bridge;
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
-int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
-void rcar_lvds_clk_disable(struct drm_bridge *bridge);
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq);
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge);
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else
-static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
- unsigned long freq)
+static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge,
+ unsigned long freq)
{
return -ENOSYS;
}
-static inline void rcar_lvds_clk_disable(struct drm_bridge *bridge) { }
+static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { }
static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
return false;
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
index 62f7eb84ab01..a7f2b7f66a17 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -25,6 +25,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include "rcar_mipi_dsi.h"
#include "rcar_mipi_dsi_regs.h"
struct rcar_mipi_dsi {
@@ -414,7 +415,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
/* Enable DOT clock */
vclkset = VCLKSET_CKEN;
- rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+ rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
if (dsi_format == 24)
vclkset |= VCLKSET_BPP_24;
@@ -429,7 +430,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div)
| VCLKSET_LANE(dsi->lanes - 1);
- rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+ rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
/* After setting VCLKSET register, enable VCLKEN */
rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN);
@@ -441,9 +442,21 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi)
{
+ /* Disable VCLKEN */
+ rcar_mipi_dsi_write(dsi, VCLKSET, 0);
+
+ /* Disable DOT clock */
+ rcar_mipi_dsi_write(dsi, VCLKSET, 0);
+
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+ /* CFGCLK disable */
+ rcar_mipi_dsi_clr(dsi, CFGCLKSET, CFGCLKSET_CKEN);
+
+ /* LPCLK disable */
+ rcar_mipi_dsi_clr(dsi, LPCLKSET, LPCLKSET_CKEN);
+
dev_dbg(dsi->dev, "DSI device is shutdown\n");
}
@@ -542,6 +555,34 @@ static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi)
return 0;
}
+static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
+{
+ u32 status;
+ int ret;
+
+ /* Disable transmission in video mode. */
+ rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_EN_VIDEO);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & TXVMSR_ACT),
+ 2000, 100000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to disable video transmission\n");
+ return;
+ }
+
+ /* Assert video FIFO clear. */
+ rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_VFCLR);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & TXVMSR_VFRDY),
+ 2000, 100000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to assert video FIFO clear\n");
+ return;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Bridge
*/
@@ -558,7 +599,22 @@ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_start_video(dsi);
+}
+
+static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_stop_video(dsi);
+}
+
+void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
@@ -586,8 +642,6 @@ static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
if (ret < 0)
goto err_dsi_start_hs;
- rcar_mipi_dsi_start_video(dsi);
-
return;
err_dsi_start_hs:
@@ -595,15 +649,16 @@ err_dsi_start_hs:
err_dsi_startup:
rcar_mipi_dsi_clk_disable(dsi);
}
+EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_enable);
-static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
rcar_mipi_dsi_shutdown(dsi);
rcar_mipi_dsi_clk_disable(dsi);
}
+EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_disable);
static enum drm_mode_status
rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h
new file mode 100644
index 000000000000..528a196e6edd
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car DSI Encoder
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ *
+ * Contact: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#ifndef __RCAR_MIPI_DSI_H__
+#define __RCAR_MIPI_DSI_H__
+
+struct drm_atomic_state;
+struct drm_bridge;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_MIPI_DSI)
+void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge);
+#else
+static inline void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
+{
+}
+#endif /* CONFIG_DRM_RCAR_MIPI_DSI */
+
+#endif /* __RCAR_MIPI_DSI_H__ */
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 53c2d9980d48..1bf3e2829cd0 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,7 +2,7 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c204e9b95c1f..518ee13b1d6f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 110e83aad9bb..bf6948125b84 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -179,6 +179,23 @@
#define RK3399_TXRX_SRC_SEL_ISP0 BIT(4)
#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0)
+#define RK3568_GRF_VO_CON2 0x0368
+#define RK3568_DSI0_SKEWCALHS (0x1f << 11)
+#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI0_TURNDISABLE BIT(2)
+#define RK3568_DSI0_FORCERXMODE BIT(0)
+
+/*
+ * Note these registers do not appear in the datasheet, they are
+ * however present in the BSP driver which is where these values
+ * come from. Name GRF_VO_CON3 is assumed.
+ */
+#define RK3568_GRF_VO_CON3 0x36c
+#define RK3568_DSI1_SKEWCALHS (0x1f << 11)
+#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI1_TURNDISABLE BIT(2)
+#define RK3568_DSI1_FORCERXMODE BIT(0)
+
#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
enum {
@@ -735,8 +752,9 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
int mux)
{
- regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
- mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+ if (dsi->cdata->lcdsel_grf_reg < 0)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
}
static int
@@ -963,6 +981,8 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
goto out_pll_clk;
}
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dsi->encoder,
+ dev->of_node, 0, 0);
ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder.encoder);
if (ret) {
@@ -1612,6 +1632,30 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
+ {
+ .reg = 0xfe060000,
+ .lcdsel_grf_reg = -1,
+ .lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS |
+ RK3568_DSI0_FORCETXSTOPMODE |
+ RK3568_DSI0_TURNDISABLE |
+ RK3568_DSI0_FORCERXMODE),
+ .max_data_lanes = 4,
+ },
+ {
+ .reg = 0xfe070000,
+ .lcdsel_grf_reg = -1,
+ .lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS |
+ RK3568_DSI1_FORCETXSTOPMODE |
+ RK3568_DSI1_TURNDISABLE |
+ RK3568_DSI1_FORCERXMODE),
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
{
.compatible = "rockchip,px30-mipi-dsi",
@@ -1622,6 +1666,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
}, {
.compatible = "rockchip,rk3399-mipi-dsi",
.data = &rk3399_chip_data,
+ }, {
+ .compatible = "rockchip,rk3568-mipi-dsi",
+ .data = &rk3568_chip_data,
},
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 13ed33e74457..813f9f8c8698 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 985584147da1..614e97aaac80 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -10,7 +10,7 @@
#include <drm/drm.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
@@ -279,7 +279,7 @@ static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
.vmap = rockchip_gem_prime_vmap,
.vunmap = rockchip_gem_prime_vunmap,
.mmap = rockchip_drm_gem_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
static struct rockchip_gem_object *
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ad3958b6f8bf..c356de5dd220 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -27,7 +28,6 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -68,6 +68,9 @@
#define VOP_REG_SET(vop, group, name, v) \
vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
+#define VOP_HAS_REG(vop, group, name) \
+ (!!(vop->data->group->name.mask))
+
#define VOP_INTR_SET_TYPE(vop, name, type, v) \
do { \
int i, reg = 0, mask = 0; \
@@ -185,12 +188,6 @@ struct vop {
struct vop_win win[];
};
-static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
-{
- writel(v, vop->regs + offset);
- vop->regsbak[offset >> 2] = v;
-}
-
static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
{
return readl(vop->regs + offset);
@@ -809,9 +806,9 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
const struct vop_win_data *win = vop_win->data;
int ret;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
if (!crtc || WARN_ON(!fb))
return 0;
@@ -1060,9 +1057,9 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
struct drm_crtc_state *crtc_state;
if (plane != new_plane_state->crtc->cursor)
@@ -1189,7 +1186,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
*
* Key points:
*
- * - DRM works in in kHz.
+ * - DRM works in kHz.
* - Clock framework works in Hz.
* - Rockchip's clock driver picks the clock rate that is the
* same _OR LOWER_ than the one requested.
@@ -1224,17 +1221,22 @@ static bool vop_dsp_lut_is_enabled(struct vop *vop)
return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
}
+static u32 vop_lut_buffer_index(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->lut_buffer_index);
+}
+
static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
{
struct drm_color_lut *lut = crtc->state->gamma_lut->data;
- unsigned int i;
+ unsigned int i, bpc = ilog2(vop->data->lut_size);
for (i = 0; i < crtc->gamma_size; i++) {
u32 word;
- word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
- (drm_color_lut_extract(lut[i].green, 10) << 10) |
- drm_color_lut_extract(lut[i].blue, 10);
+ word = (drm_color_lut_extract(lut[i].red, bpc) << (2 * bpc)) |
+ (drm_color_lut_extract(lut[i].green, bpc) << bpc) |
+ drm_color_lut_extract(lut[i].blue, bpc);
writel(word, vop->lut_regs + i * 4);
}
}
@@ -1244,38 +1246,66 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
{
struct drm_crtc_state *state = crtc->state;
unsigned int idle;
+ u32 lut_idx, old_idx;
int ret;
if (!vop->lut_regs)
return;
- /*
- * To disable gamma (gamma_lut is null) or to write
- * an update to the LUT, clear dsp_lut_en.
- */
- spin_lock(&vop->reg_lock);
- VOP_REG_SET(vop, common, dsp_lut_en, 0);
- vop_cfg_done(vop);
- spin_unlock(&vop->reg_lock);
- /*
- * In order to write the LUT to the internal memory,
- * we need to first make sure the dsp_lut_en bit is cleared.
- */
- ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
- idle, !idle, 5, 30 * 1000);
- if (ret) {
- DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
- return;
- }
+ if (!state->gamma_lut || !VOP_HAS_REG(vop, common, update_gamma_lut)) {
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
- if (!state->gamma_lut)
- return;
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+ } else {
+ /*
+ * On RK3399 the gamma LUT can updated without clearing dsp_lut_en,
+ * by setting update_gamma_lut then waiting for lut_buffer_index change
+ */
+ old_idx = vop_lut_buffer_index(vop);
+ }
spin_lock(&vop->reg_lock);
vop_crtc_write_gamma_lut(vop, crtc);
VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ VOP_REG_SET(vop, common, update_gamma_lut, 1);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
+
+ if (VOP_HAS_REG(vop, common, update_gamma_lut)) {
+ ret = readx_poll_timeout(vop_lut_buffer_index, vop,
+ lut_idx, lut_idx != old_idx, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "gamma LUT update timeout!\n");
+ return;
+ }
+
+ /*
+ * update_gamma_lut is auto cleared by HW, but write 0 to clear the bit
+ * in our backup of the regs.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, update_gamma_lut, 0);
+ spin_unlock(&vop->reg_lock);
+ }
}
static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -1325,14 +1355,6 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
- /*
- * If we have a GAMMA LUT in the state, then let's make sure
- * it's updated. We might be coming out of suspend,
- * which means the LUT internal memory needs to be re-written.
- */
- if (crtc->state->gamma_lut)
- vop_crtc_gamma_set(vop, crtc, old_state);
-
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
@@ -1423,6 +1445,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
VOP_REG_SET(vop, common, standby, 0);
mutex_unlock(&vop->vop_lock);
+
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
}
static bool vop_fs_irq_is_pending(struct vop *vop)
@@ -2148,8 +2178,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
- if (!vop_data->lut_size) {
- DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ if (vop_data->lut_size != 1024 && vop_data->lut_size != 256) {
+ DRM_DEV_ERROR(dev, "unsupported gamma LUT size %d\n", vop_data->lut_size);
return -EINVAL;
}
vop->lut_regs = devm_ioremap_resource(dev, res);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index ba88addc1a75..8502849833d9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -113,6 +113,8 @@ struct vop_common {
struct vop_reg dither_down_en;
struct vop_reg dither_up;
struct vop_reg dsp_lut_en;
+ struct vop_reg update_gamma_lut;
+ struct vop_reg lut_buffer_index;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index e4631f515ba4..aac20be5ac08 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -29,7 +29,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -1439,11 +1438,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d03dd0402923..014f99e8928e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -875,6 +875,24 @@ static const struct vop_output rk3399_output = {
.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
};
+static const struct vop_common rk3399_common = {
+ .standby = VOP_REG_SYNC(RK3399_SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+ .mmu_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 20),
+ .dither_down_sel = VOP_REG(RK3399_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3399_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 2),
+ .pre_dither_down = VOP_REG(RK3399_DSP_CTRL1, 0x1, 1),
+ .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 0),
+ .update_gamma_lut = VOP_REG(RK3399_DSP_CTRL1, 0x1, 7),
+ .lut_buffer_index = VOP_REG(RK3399_DBG_POST_REG1, 0x1, 1),
+ .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
+ .dsp_blank = VOP_REG(RK3399_DSP_CTRL0, 0x3, 18),
+ .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG_SYNC(RK3399_REG_CFG_DONE, 0x1, 0),
+};
+
static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = {
.y2r_coefficients = {
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0),
@@ -957,7 +975,7 @@ static const struct vop_data rk3399_vop_big = {
.version = VOP_VERSION(3, 5),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3366_vop_intr,
- .common = &rk3288_common,
+ .common = &rk3399_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.afbc = &rk3399_vop_afbc,
@@ -965,6 +983,7 @@ static const struct vop_data rk3399_vop_big = {
.win = rk3399_vop_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
+ .lut_size = 1024,
};
static const struct vop_win_data rk3399_vop_lit_win_data[] = {
@@ -983,13 +1002,14 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = {
static const struct vop_data rk3399_vop_lit = {
.version = VOP_VERSION(3, 6),
.intr = &rk3366_vop_intr,
- .common = &rk3288_common,
+ .common = &rk3399_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.misc = &rk3368_misc,
.win = rk3399_vop_lit_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
.win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
+ .lut_size = 256,
};
static const struct vop_win_data rk3228_vop_win_data[] = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index 0b3cd65ba5c1..406e981c75bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -628,6 +628,7 @@
#define RK3399_YUV2YUV_WIN 0x02c0
#define RK3399_YUV2YUV_POST 0x02c4
#define RK3399_AUTO_GATING_EN 0x02cc
+#define RK3399_DBG_POST_REG1 0x036c
#define RK3399_WIN0_CSC_COE 0x03a0
#define RK3399_WIN1_CSC_COE 0x03c0
#define RK3399_WIN2_CSC_COE 0x03e0
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 68317d3a7a27..e5a4ecde0063 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -198,7 +198,7 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_dependency_optimized
+ * drm_sched_dependency_optimized - test if the dependency can be optimized
*
* @fence: the dependency fence
* @entity: the entity which depends on the above fence
@@ -592,7 +592,6 @@ int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
- drm_sched_entity_select_rq(entity);
if (!entity->rq)
return -ENOENT;
@@ -628,7 +627,7 @@ void drm_sched_job_arm(struct drm_sched_job *job)
struct drm_sched_entity *entity = job->entity;
BUG_ON(!entity);
-
+ drm_sched_entity_select_rq(entity);
sched = entity->rq->sched;
job->sched = sched;
@@ -994,6 +993,7 @@ static int drm_sched_main(void *param)
* used
* @score: optional score atomic shared with other schedulers
* @name: name used for debugging
+ * @dev: target &struct device
*
* Return 0 on success, otherwise error code.
*/
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
deleted file mode 100644
index 5ba5f9138c95..000000000000
--- a/drivers/gpu/drm/selftests/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
- test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o test-drm_dp_mst_helper.o \
- test-drm_rect.o
-
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o \
- test-drm_buddy.o
diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
deleted file mode 100644
index 455b756c4ae5..000000000000
--- a/drivers/gpu/drm/selftests/drm_buddy_selftests.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_buddy
- */
-selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
-selftest(buddy_alloc_limit, igt_buddy_alloc_limit)
-selftest(buddy_alloc_range, igt_buddy_alloc_range)
-selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
-selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
-selftest(buddy_alloc_smoke, igt_buddy_alloc_smoke)
-selftest(buddy_alloc_pathological, igt_buddy_alloc_pathological)
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
deleted file mode 100644
index 29e367db6118..000000000000
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_mm
- */
-
-#define cmdline_test(test) selftest(test, test)
-
-cmdline_test(drm_cmdline_test_force_d_only)
-cmdline_test(drm_cmdline_test_force_D_only_dvi)
-cmdline_test(drm_cmdline_test_force_D_only_hdmi)
-cmdline_test(drm_cmdline_test_force_D_only_not_digital)
-cmdline_test(drm_cmdline_test_force_e_only)
-cmdline_test(drm_cmdline_test_margin_only)
-cmdline_test(drm_cmdline_test_interlace_only)
-cmdline_test(drm_cmdline_test_res)
-cmdline_test(drm_cmdline_test_res_missing_x)
-cmdline_test(drm_cmdline_test_res_missing_y)
-cmdline_test(drm_cmdline_test_res_bad_y)
-cmdline_test(drm_cmdline_test_res_missing_y_bpp)
-cmdline_test(drm_cmdline_test_res_vesa)
-cmdline_test(drm_cmdline_test_res_vesa_rblank)
-cmdline_test(drm_cmdline_test_res_rblank)
-cmdline_test(drm_cmdline_test_res_bpp)
-cmdline_test(drm_cmdline_test_res_bad_bpp)
-cmdline_test(drm_cmdline_test_res_refresh)
-cmdline_test(drm_cmdline_test_res_bad_refresh)
-cmdline_test(drm_cmdline_test_res_bpp_refresh)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_interlaced)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_margins)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_off)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on_off)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on_analog)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on_digital)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on)
-cmdline_test(drm_cmdline_test_res_margins_force_on)
-cmdline_test(drm_cmdline_test_res_vesa_margins)
-cmdline_test(drm_cmdline_test_res_invalid_mode)
-cmdline_test(drm_cmdline_test_res_bpp_wrong_place_mode)
-cmdline_test(drm_cmdline_test_name)
-cmdline_test(drm_cmdline_test_name_bpp)
-cmdline_test(drm_cmdline_test_name_refresh)
-cmdline_test(drm_cmdline_test_name_bpp_refresh)
-cmdline_test(drm_cmdline_test_name_refresh_wrong_mode)
-cmdline_test(drm_cmdline_test_name_refresh_invalid_mode)
-cmdline_test(drm_cmdline_test_name_option)
-cmdline_test(drm_cmdline_test_name_bpp_option)
-cmdline_test(drm_cmdline_test_rotate_0)
-cmdline_test(drm_cmdline_test_rotate_90)
-cmdline_test(drm_cmdline_test_rotate_180)
-cmdline_test(drm_cmdline_test_rotate_270)
-cmdline_test(drm_cmdline_test_rotate_multiple)
-cmdline_test(drm_cmdline_test_rotate_invalid_val)
-cmdline_test(drm_cmdline_test_rotate_truncated)
-cmdline_test(drm_cmdline_test_hmirror)
-cmdline_test(drm_cmdline_test_vmirror)
-cmdline_test(drm_cmdline_test_margin_options)
-cmdline_test(drm_cmdline_test_multiple_options)
-cmdline_test(drm_cmdline_test_invalid_option)
-cmdline_test(drm_cmdline_test_bpp_extra_and_option)
-cmdline_test(drm_cmdline_test_extra_and_option)
-cmdline_test(drm_cmdline_test_freestanding_options)
-cmdline_test(drm_cmdline_test_freestanding_force_e_and_options)
-cmdline_test(drm_cmdline_test_panel_orientation)
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
deleted file mode 100644
index 8c87c964176b..000000000000
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_mm
- */
-selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
-selftest(init, igt_init)
-selftest(debug, igt_debug)
-selftest(reserve, igt_reserve)
-selftest(insert, igt_insert)
-selftest(replace, igt_replace)
-selftest(insert_range, igt_insert_range)
-selftest(align, igt_align)
-selftest(frag, igt_frag)
-selftest(align32, igt_align32)
-selftest(align64, igt_align64)
-selftest(evict, igt_evict)
-selftest(evict_range, igt_evict_range)
-selftest(bottomup, igt_bottomup)
-selftest(lowest, igt_lowest)
-selftest(topdown, igt_topdown)
-selftest(highest, igt_highest)
-selftest(color, igt_color)
-selftest(color_evict, igt_color_evict)
-selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
deleted file mode 100644
index 782e285ca383..000000000000
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_selftests_helper
- */
-selftest(drm_rect_clip_scaled_div_by_zero, igt_drm_rect_clip_scaled_div_by_zero)
-selftest(drm_rect_clip_scaled_not_clipped, igt_drm_rect_clip_scaled_not_clipped)
-selftest(drm_rect_clip_scaled_clipped, igt_drm_rect_clip_scaled_clipped)
-selftest(drm_rect_clip_scaled_signed_vs_unsigned, igt_drm_rect_clip_scaled_signed_vs_unsigned)
-selftest(check_plane_state, igt_check_plane_state)
-selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
-selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
-selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
-selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
-selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
-selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
-selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
-selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
-selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
-selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
-selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
-selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
-selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
-selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
-selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
-selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
-selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
-selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
-selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
-selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
-selftest(damage_iter_damage, igt_damage_iter_damage)
-selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
-selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
-selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
-selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
-selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode)
-selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode)
diff --git a/drivers/gpu/drm/selftests/drm_selftest.c b/drivers/gpu/drm/selftests/drm_selftest.c
deleted file mode 100644
index e29ed9faef5b..000000000000
--- a/drivers/gpu/drm/selftests/drm_selftest.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/compiler.h>
-
-#define selftest(name, func) __idx_##name,
-enum {
-#include TESTS
-};
-#undef selftest
-
-#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
-static struct drm_selftest {
- bool enabled;
- const char *name;
- int (*func)(void *);
-} selftests[] = {
-#include TESTS
-};
-#undef selftest
-
-/* Embed the line number into the parameter name so that we can order tests */
-#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
-#define selftest_0(n, func, id) \
-module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
-#define selftest(n, func) selftest_0(n, func, param(n))
-#include TESTS
-#undef selftest
-
-static void set_default_test_all(struct drm_selftest *st, unsigned long count)
-{
- unsigned long i;
-
- for (i = 0; i < count; i++)
- if (st[i].enabled)
- return;
-
- for (i = 0; i < count; i++)
- st[i].enabled = true;
-}
-
-static int run_selftests(struct drm_selftest *st,
- unsigned long count,
- void *data)
-{
- int err = 0;
-
- set_default_test_all(st, count);
-
- /* Tests are listed in natural order in drm_*_selftests.h */
- for (; count--; st++) {
- if (!st->enabled)
- continue;
-
- pr_debug("drm: Running %s\n", st->name);
- err = st->func(data);
- if (err)
- break;
- }
-
- if (WARN(err > 0 || err == -ENOTTY,
- "%s returned %d, conflicting with selftest's magic values!\n",
- st->name, err))
- err = -1;
-
- rcu_barrier();
- return err;
-}
-
-static int __maybe_unused
-__drm_subtests(const char *caller,
- const struct drm_subtest *st,
- int count,
- void *data)
-{
- int err;
-
- for (; count--; st++) {
- pr_debug("Running %s/%s\n", caller, st->name);
- err = st->func(data);
- if (err) {
- pr_err("%s: %s failed with error %d\n",
- caller, st->name, err);
- return err;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/selftests/drm_selftest.h b/drivers/gpu/drm/selftests/drm_selftest.h
deleted file mode 100644
index c784ec02ff53..000000000000
--- a/drivers/gpu/drm/selftests/drm_selftest.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __DRM_SELFTEST_H__
-#define __DRM_SELFTEST_H__
-
-struct drm_subtest {
- int (*func)(void *data);
- const char *name;
-};
-
-static int __drm_subtests(const char *caller,
- const struct drm_subtest *st,
- int count,
- void *data);
-#define drm_subtests(T, data) \
- __drm_subtests(__func__, T, ARRAY_SIZE(T), data)
-
-#define SUBTEST(x) { x, #x }
-
-#endif /* __DRM_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c
deleted file mode 100644
index aca0c491040f..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_buddy.c
+++ /dev/null
@@ -1,994 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#define pr_fmt(fmt) "drm_buddy: " fmt
-
-#include <linux/module.h>
-#include <linux/prime_numbers.h>
-#include <linux/sched/signal.h>
-
-#include <drm/drm_buddy.h>
-
-#include "../lib/drm_random.h"
-
-#define TESTS "drm_buddy_selftests.h"
-#include "drm_selftest.h"
-
-#define IGT_TIMEOUT(name__) \
- unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
-
-static unsigned int random_seed;
-
-static inline u64 get_size(int order, u64 chunk_size)
-{
- return (1 << order) * chunk_size;
-}
-
-__printf(2, 3)
-static bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
-{
- va_list va;
-
- if (!signal_pending(current)) {
- cond_resched();
- if (time_before(jiffies, timeout))
- return false;
- }
-
- if (fmt) {
- va_start(va, fmt);
- vprintk(fmt, va);
- va_end(va);
- }
-
- return true;
-}
-
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static void __igt_dump_block(struct drm_buddy *mm,
- struct drm_buddy_block *block,
- bool buddy)
-{
- pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
- block->header,
- drm_buddy_block_state(block),
- drm_buddy_block_order(block),
- drm_buddy_block_offset(block),
- drm_buddy_block_size(mm, block),
- yesno(!block->parent),
- yesno(buddy));
-}
-
-static void igt_dump_block(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
-
- __igt_dump_block(mm, block, false);
-
- buddy = drm_get_buddy(block);
- if (buddy)
- __igt_dump_block(mm, buddy, true);
-}
-
-static int igt_check_block(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
- unsigned int block_state;
- u64 block_size;
- u64 offset;
- int err = 0;
-
- block_state = drm_buddy_block_state(block);
-
- if (block_state != DRM_BUDDY_ALLOCATED &&
- block_state != DRM_BUDDY_FREE &&
- block_state != DRM_BUDDY_SPLIT) {
- pr_err("block state mismatch\n");
- err = -EINVAL;
- }
-
- block_size = drm_buddy_block_size(mm, block);
- offset = drm_buddy_block_offset(block);
-
- if (block_size < mm->chunk_size) {
- pr_err("block size smaller than min size\n");
- err = -EINVAL;
- }
-
- if (!is_power_of_2(block_size)) {
- pr_err("block size not power of two\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(block_size, mm->chunk_size)) {
- pr_err("block size not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, mm->chunk_size)) {
- pr_err("block offset not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, block_size)) {
- pr_err("block offset not aligned to block size\n");
- err = -EINVAL;
- }
-
- buddy = drm_get_buddy(block);
-
- if (!buddy && block->parent) {
- pr_err("buddy has gone fishing\n");
- err = -EINVAL;
- }
-
- if (buddy) {
- if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
- pr_err("buddy has wrong offset\n");
- err = -EINVAL;
- }
-
- if (drm_buddy_block_size(mm, buddy) != block_size) {
- pr_err("buddy size mismatch\n");
- err = -EINVAL;
- }
-
- if (drm_buddy_block_state(buddy) == block_state &&
- block_state == DRM_BUDDY_FREE) {
- pr_err("block and its buddy are free\n");
- err = -EINVAL;
- }
- }
-
- return err;
-}
-
-static int igt_check_blocks(struct drm_buddy *mm,
- struct list_head *blocks,
- u64 expected_size,
- bool is_contiguous)
-{
- struct drm_buddy_block *block;
- struct drm_buddy_block *prev;
- u64 total;
- int err = 0;
-
- block = NULL;
- prev = NULL;
- total = 0;
-
- list_for_each_entry(block, blocks, link) {
- err = igt_check_block(mm, block);
-
- if (!drm_buddy_block_is_allocated(block)) {
- pr_err("block not allocated\n"),
- err = -EINVAL;
- }
-
- if (is_contiguous && prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(block);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("block offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- if (err)
- break;
-
- total += drm_buddy_block_size(mm, block);
- prev = block;
- }
-
- if (!err) {
- if (total != expected_size) {
- pr_err("size mismatch, expected=%llx, found=%llx\n",
- expected_size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev block, dump:\n");
- igt_dump_block(mm, prev);
- }
-
- pr_err("bad block, dump:\n");
- igt_dump_block(mm, block);
-
- return err;
-}
-
-static int igt_check_mm(struct drm_buddy *mm)
-{
- struct drm_buddy_block *root;
- struct drm_buddy_block *prev;
- unsigned int i;
- u64 total;
- int err = 0;
-
- if (!mm->n_roots) {
- pr_err("n_roots is zero\n");
- return -EINVAL;
- }
-
- if (mm->n_roots != hweight64(mm->size)) {
- pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
- mm->n_roots, hweight64(mm->size));
- return -EINVAL;
- }
-
- root = NULL;
- prev = NULL;
- total = 0;
-
- for (i = 0; i < mm->n_roots; ++i) {
- struct drm_buddy_block *block;
- unsigned int order;
-
- root = mm->roots[i];
- if (!root) {
- pr_err("root(%u) is NULL\n", i);
- err = -EINVAL;
- break;
- }
-
- err = igt_check_block(mm, root);
-
- if (!drm_buddy_block_is_free(root)) {
- pr_err("root not free\n");
- err = -EINVAL;
- }
-
- order = drm_buddy_block_order(root);
-
- if (!i) {
- if (order != mm->max_order) {
- pr_err("max order root missing\n");
- err = -EINVAL;
- }
- }
-
- if (prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(root);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("root offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- block = list_first_entry_or_null(&mm->free_list[order],
- struct drm_buddy_block,
- link);
- if (block != root) {
- pr_err("root mismatch at order=%u\n", order);
- err = -EINVAL;
- }
-
- if (err)
- break;
-
- prev = root;
- total += drm_buddy_block_size(mm, root);
- }
-
- if (!err) {
- if (total != mm->size) {
- pr_err("expected mm size=%llx, found=%llx\n", mm->size,
- total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev root(%u), dump:\n", i - 1);
- igt_dump_block(mm, prev);
- }
-
- if (root) {
- pr_err("bad root(%u), dump:\n", i);
- igt_dump_block(mm, root);
- }
-
- return err;
-}
-
-static void igt_mm_config(u64 *size, u64 *chunk_size)
-{
- DRM_RND_STATE(prng, random_seed);
- u32 s, ms;
-
- /* Nothing fancy, just try to get an interesting bit pattern */
-
- prandom_seed_state(&prng, random_seed);
-
- /* Let size be a random number of pages up to 8 GB (2M pages) */
- s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
- /* Let the chunk size be a random power of 2 less than size */
- ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
- /* Round size down to the chunk size */
- s &= -ms;
-
- /* Convert from pages to bytes */
- *chunk_size = (u64)ms << 12;
- *size = (u64)s << 12;
-}
-
-static int igt_buddy_alloc_pathological(void *arg)
-{
- u64 mm_size, size, min_page_size, start = 0;
- struct drm_buddy_block *block;
- const int max_order = 3;
- unsigned long flags = 0;
- int order, top, err;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
- LIST_HEAD(holes);
- LIST_HEAD(tmp);
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left. Free the largest block, then whittle down again.
- * Eventually we will have a fully 50% fragmented mm.
- */
-
- mm_size = PAGE_SIZE << max_order;
- err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- BUG_ON(mm.max_order != max_order);
-
- for (top = max_order; top; top--) {
- /* Make room by freeing the largest allocated block */
- block = list_first_entry_or_null(&blocks, typeof(*block), link);
- if (block) {
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
- }
-
- for (order = top; order--; ) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
- min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
- order, top);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- }
-
- /* There should be one final page for this sub-allocation */
- size = min_page_size = get_size(0, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM for hole\n");
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &holes);
-
- size = min_page_size = get_size(top, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
- top, max_order);
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- drm_buddy_free_list(&mm, &holes);
-
- /* Nothing larger than blocks of chunk_size now available */
- for (order = 1; order <= max_order; order++) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- if (err)
- err = 0;
-
-err:
- list_splice_tail(&holes, &blocks);
- drm_buddy_free_list(&mm, &blocks);
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_smoke(void *arg)
-{
- u64 mm_size, min_page_size, chunk_size, start = 0;
- unsigned long flags = 0;
- struct drm_buddy mm;
- int *order;
- int err, i;
-
- DRM_RND_STATE(prng, random_seed);
- IGT_TIMEOUT(end_time);
-
- igt_mm_config(&mm_size, &chunk_size);
-
- err = drm_buddy_init(&mm, mm_size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- order = drm_random_order(mm.max_order + 1, &prng);
- if (!order) {
- err = -ENOMEM;
- goto out_fini;
- }
-
- for (i = 0; i <= mm.max_order; ++i) {
- struct drm_buddy_block *block;
- int max_order = order[i];
- bool timeout = false;
- LIST_HEAD(blocks);
- u64 total, size;
- LIST_HEAD(tmp);
- int order;
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort\n");
- break;
- }
-
- order = max_order;
- total = 0;
-
- do {
-retry:
- size = min_page_size = get_size(order, chunk_size);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
- min_page_size, &tmp, flags);
- if (err) {
- if (err == -ENOMEM) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- } else {
- if (order--) {
- err = 0;
- goto retry;
- }
-
- pr_err("buddy_alloc with order=%d failed(%d)\n",
- order, err);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- break;
- }
-
- list_move_tail(&block->link, &blocks);
-
- if (drm_buddy_block_order(block) != order) {
- pr_err("buddy_alloc order mismatch\n");
- err = -EINVAL;
- break;
- }
-
- total += drm_buddy_block_size(&mm, block);
-
- if (__igt_timeout(end_time, NULL)) {
- timeout = true;
- break;
- }
- } while (total < mm.size);
-
- if (!err)
- err = igt_check_blocks(&mm, &blocks, total, false);
-
- drm_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
- if (err || timeout)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- kfree(order);
-out_fini:
- drm_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_pessimistic(void *arg)
-{
- u64 mm_size, size, min_page_size, start = 0;
- struct drm_buddy_block *block, *bn;
- const unsigned int max_order = 16;
- unsigned long flags = 0;
- struct drm_buddy mm;
- unsigned int order;
- LIST_HEAD(blocks);
- LIST_HEAD(tmp);
- int err;
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left.
- */
-
- mm_size = PAGE_SIZE << max_order;
- err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order < max_order; order++) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- }
-
- /* And now the last remaining block available */
- size = min_page_size = get_size(0, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
-
- /* Should be completely full! */
- for (order = max_order; order--; ) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- block = list_last_entry(&blocks, typeof(*block), link);
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
-
- /* As we free in increasing size, we make available larger blocks */
- order = 1;
- list_for_each_entry_safe(block, bn, &blocks, link) {
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
-
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
- order++;
- }
-
- /* To confirm, now the whole mm should be available */
- size = min_page_size = get_size(max_order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- max_order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
-
-err:
- drm_buddy_free_list(&mm, &blocks);
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_optimistic(void *arg)
-{
- u64 mm_size, size, min_page_size, start = 0;
- struct drm_buddy_block *block;
- unsigned long flags = 0;
- const int max_order = 16;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
- LIST_HEAD(tmp);
- int order, err;
-
- /*
- * Create a mm with one block of each order available, and
- * try to allocate them all.
- */
-
- mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
- err = drm_buddy_init(&mm,
- mm_size,
- PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order <= max_order; order++) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- }
-
- /* Should be completely full! */
- size = min_page_size = get_size(0, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- } else {
- err = 0;
- }
-
-err:
- drm_buddy_free_list(&mm, &blocks);
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_range(void *arg)
-{
- unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
- u64 offset, size, rem, chunk_size, end;
- unsigned long page_num;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
- int err;
-
- igt_mm_config(&size, &chunk_size);
-
- err = drm_buddy_init(&mm, size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort, abort, abort!\n");
- goto err_fini;
- }
-
- rem = mm.size;
- offset = 0;
-
- for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
- struct drm_buddy_block *block;
- LIST_HEAD(tmp);
-
- size = min(page_num * mm.chunk_size, rem);
- end = offset + size;
-
- err = drm_buddy_alloc_blocks(&mm, offset, end, size, mm.chunk_size, &tmp, flags);
- if (err) {
- if (err == -ENOMEM) {
- pr_info("alloc_range hit -ENOMEM with size=%llx\n",
- size);
- } else {
- pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
- offset, size, err);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_range has no blocks\n");
- err = -EINVAL;
- break;
- }
-
- if (drm_buddy_block_offset(block) != offset) {
- pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
- drm_buddy_block_offset(block), offset);
- err = -EINVAL;
- }
-
- if (!err)
- err = igt_check_blocks(&mm, &tmp, size, true);
-
- list_splice_tail(&tmp, &blocks);
-
- if (err)
- break;
-
- offset += size;
-
- rem -= size;
- if (!rem)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- drm_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
-err_fini:
- drm_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_limit(void *arg)
-{
- u64 size = U64_MAX, start = 0;
- struct drm_buddy_block *block;
- unsigned long flags = 0;
- LIST_HEAD(allocated);
- struct drm_buddy mm;
- int err;
-
- err = drm_buddy_init(&mm, size, PAGE_SIZE);
- if (err)
- return err;
-
- if (mm.max_order != DRM_BUDDY_MAX_ORDER) {
- pr_err("mm.max_order(%d) != %d\n",
- mm.max_order, DRM_BUDDY_MAX_ORDER);
- err = -EINVAL;
- goto out_fini;
- }
-
- size = mm.chunk_size << mm.max_order;
- err = drm_buddy_alloc_blocks(&mm, start, size, size,
- PAGE_SIZE, &allocated, flags);
-
- if (unlikely(err))
- goto out_free;
-
- block = list_first_entry_or_null(&allocated,
- struct drm_buddy_block,
- link);
-
- if (!block) {
- err = -EINVAL;
- goto out_fini;
- }
-
- if (drm_buddy_block_order(block) != mm.max_order) {
- pr_err("block order(%d) != %d\n",
- drm_buddy_block_order(block), mm.max_order);
- err = -EINVAL;
- goto out_free;
- }
-
- if (drm_buddy_block_size(&mm, block) !=
- BIT_ULL(mm.max_order) * PAGE_SIZE) {
- pr_err("block size(%llu) != %llu\n",
- drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
- err = -EINVAL;
- goto out_free;
- }
-
-out_free:
- drm_buddy_free_list(&mm, &allocated);
-out_fini:
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_sanitycheck(void *ignored)
-{
- pr_info("%s - ok!\n", __func__);
- return 0;
-}
-
-#include "drm_selftest.c"
-
-static int __init test_drm_buddy_init(void)
-{
- int err;
-
- while (!random_seed)
- random_seed = get_random_int();
-
- pr_info("Testing DRM buddy manager (struct drm_buddy), with random_seed=0x%x\n",
- random_seed);
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-static void __exit test_drm_buddy_exit(void)
-{
-}
-
-module_init(test_drm_buddy_init);
-module_exit(test_drm_buddy_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
deleted file mode 100644
index d96cd890def6..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ /dev/null
@@ -1,1141 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 Bootlin
- */
-
-#define pr_fmt(fmt) "drm_cmdline: " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <drm/drm_connector.h>
-#include <drm/drm_modes.h>
-
-#define TESTS "drm_cmdline_selftests.h"
-#include "drm_selftest.h"
-#include "test-drm_modeset_common.h"
-
-static const struct drm_connector no_connector = {};
-
-static int drm_cmdline_test_force_e_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static const struct drm_connector connector_hdmi = {
- .connector_type = DRM_MODE_CONNECTOR_HDMIB,
-};
-
-static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
- &connector_hdmi,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
-
- return 0;
-}
-
-static const struct drm_connector connector_dvi = {
- .connector_type = DRM_MODE_CONNECTOR_DVII,
-};
-
-static int drm_cmdline_test_force_D_only_dvi(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
- &connector_dvi,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
-
- return 0;
-}
-
-static int drm_cmdline_test_force_d_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_OFF);
-
- return 0;
-}
-
-static int drm_cmdline_test_margin_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("m",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_interlace_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("i",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_missing_x(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("x480",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_missing_y(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("1024x",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bad_y(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("1024xtest",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_missing_y_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("1024x-24",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_vesa(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480M",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(!mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_vesa_rblank(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480MR",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(!mode.rb);
- FAIL_ON(!mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_rblank(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480R",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(!mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bad_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480-test",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480@60",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bad_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480@refresh",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_interlaced(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60i",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(!mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_margins(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60m",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_off(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60d",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_OFF);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on_off(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480-24@60de",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60e",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on_analog(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60D",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on_digital(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
- static const struct drm_connector connector = {
- .connector_type = DRM_MODE_CONNECTOR_DVII,
- };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60D",
- &connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60ime",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(!mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_margins_force_on(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480me",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_vesa_margins(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480Mm",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(!mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_invalid_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480f",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_wrong_place_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480e-24",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC",
- &no_connector,
- &mode));
- FAIL_ON(strcmp(mode.name, "NTSC"));
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- return 0;
-}
-
-static int drm_cmdline_test_name_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC-24",
- &no_connector,
- &mode));
- FAIL_ON(strcmp(mode.name, "NTSC"));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- return 0;
-}
-
-static int drm_cmdline_test_name_bpp_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC-24@60",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_refresh_wrong_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60m",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_refresh_invalid_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60f",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(strcmp(mode.name, "NTSC"));
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- return 0;
-}
-
-static int drm_cmdline_test_name_bpp_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC-24,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(strcmp(mode.name, "NTSC"));
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_0(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=0",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_0);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_90(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=90",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_90);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_180(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_270(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=270",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_270);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_multiple(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_invalid_val(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=42",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_truncated(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_hmirror(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,reflect_x",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_vmirror(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,reflect_y",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_margin_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.tv_margins.right != 14);
- FAIL_ON(mode.tv_margins.left != 24);
- FAIL_ON(mode.tv_margins.bottom != 36);
- FAIL_ON(mode.tv_margins.top != 42);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_multiple_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=270,reflect_x",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_invalid_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,test=42",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_bpp_extra_and_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24e,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_extra_and_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480e,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_freestanding_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.tv_margins.right != 14);
- FAIL_ON(mode.tv_margins.left != 24);
- FAIL_ON(mode.tv_margins.bottom != 36);
- FAIL_ON(mode.tv_margins.top != 42);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_freestanding_force_e_and_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.tv_margins.right != 14);
- FAIL_ON(mode.tv_margins.left != 24);
- FAIL_ON(mode.tv_margins.bottom != 36);
- FAIL_ON(mode.tv_margins.top != 42);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_panel_orientation(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("panel_orientation=upside_down",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.panel_orientation != DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-#include "drm_selftest.c"
-
-static int __init test_drm_cmdline_init(void)
-{
- int err;
-
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-module_init(test_drm_cmdline_init);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
deleted file mode 100644
index 816e1464a98f..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ /dev/null
@@ -1,668 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test case for drm_damage_helper functions
- */
-
-#define pr_fmt(fmt) "drm_damage_helper: " fmt
-
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane.h>
-#include <drm/drm_drv.h>
-
-#include "test-drm_modeset_common.h"
-
-struct drm_driver mock_driver;
-static struct drm_device mock_device;
-static struct drm_object_properties mock_obj_props;
-static struct drm_plane mock_plane;
-static struct drm_property mock_prop;
-
-static void mock_setup(struct drm_plane_state *state)
-{
- static bool setup_done = false;
-
- state->plane = &mock_plane;
-
- if (setup_done)
- return;
-
- /* just enough so that drm_plane_enable_fb_damage_clips() works */
- mock_device.driver = &mock_driver;
- mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
- mock_plane.dev = &mock_device;
- mock_obj_props.count = 0;
- mock_plane.base.properties = &mock_obj_props;
- mock_prop.base.id = 1; /* 0 is an invalid id */
- mock_prop.dev = &mock_device;
-
- drm_plane_enable_fb_damage_clips(&mock_plane);
-}
-
-static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
- int y2)
-{
- state->src.x1 = x1;
- state->src.y1 = y1;
- state->src.x2 = x2;
- state->src.y2 = y2;
-}
-
-static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
- int y2)
-{
- r->x1 = x1;
- r->y1 = y1;
- r->x2 = x2;
- r->y2 = y2;
-}
-
-static void set_damage_blob(struct drm_property_blob *damage_blob,
- struct drm_mode_rect *r, uint32_t size)
-{
- damage_blob->length = size;
- damage_blob->data = r;
-}
-
-static void set_plane_damage(struct drm_plane_state *state,
- struct drm_property_blob *damage_blob)
-{
- state->fb_damage_clips = damage_blob;
-}
-
-static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
- int x1, int y1, int x2, int y2)
-{
- /*
- * Round down x1/y1 and round up x2/y2. This is because damage is not in
- * 16.16 fixed point so to catch all pixels.
- */
- int src_x1 = state->src.x1 >> 16;
- int src_y1 = state->src.y1 >> 16;
- int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
- int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
-
- if (x1 >= x2 || y1 >= y2) {
- pr_err("Cannot have damage clip with no dimension.\n");
- return false;
- }
-
- if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
- pr_err("Damage cannot be outside rounded plane src.\n");
- return false;
- }
-
- if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
- pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
- return false;
- }
-
- return true;
-}
-
-const struct drm_framebuffer fb = {
- .width = 2048,
- .height = 2048
-};
-
-/* common mocked structs many tests need */
-#define MOCK_VARIABLES() \
- struct drm_plane_state old_state; \
- struct drm_plane_state state = { \
- .crtc = ZERO_SIZE_PTR, \
- .fb = (struct drm_framebuffer *) &fb, \
- .visible = true, \
- }; \
- mock_setup(&old_state); \
- mock_setup(&state);
-
-int igt_damage_iter_no_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src same as fb size. */
- set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
- set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- set_plane_src(&state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src moved since old plane state. */
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 10 << 16, 10 << 16,
- (10 + 1024) << 16, (10 + 768) << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part and it moved since old plane state. */
- set_plane_src(&old_state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_not_visible(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- state.visible = false;
-
- mock_setup(&old_state);
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_no_crtc(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- state.crtc = NULL;
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_no_fb(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_plane_state old_state;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- struct drm_plane_state state = {
- .crtc = ZERO_SIZE_PTR,
- .fb = 0,
- };
-
- mock_setup(&old_state);
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_simple_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* Damage set to plane src */
- set_damage_clip(&damage, 0, 0, 1024, 768);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- set_damage_clip(&damage, 256, 192, 768, 576);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_intersect_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* Damage intersect with plane src. */
- set_damage_clip(&damage, 256, 192, 1360, 768);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage clipped to src.");
- FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_outside_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* Damage clip outside plane src */
- set_damage_clip(&damage, 1360, 1360, 1380, 1380);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_damage_clip(&damage, 10, 10, 256, 330);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* Damage intersect with plane src. */
- set_damage_clip(&damage, 10, 1, 1360, 330);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* Damage clip outside plane src */
- set_damage_clip(&damage, 1360, 1360, 1380, 1380);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src moved since old plane state. */
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 10 << 16, 10 << 16,
- (10 + 1024) << 16, (10 + 768) << 16);
- set_damage_clip(&damage, 20, 30, 256, 256);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src with fractional part moved since old plane state. */
- set_plane_src(&old_state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* Damage intersect with plane src. */
- set_damage_clip(&damage, 20, 30, 1360, 256);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
-
- return 0;
-}
-
-int igt_damage_iter_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* 2 damage clips. */
- set_damage_clip(&damage[0], 20, 30, 200, 180);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (num_hits == 0)
- FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
- if (num_hits == 1)
- FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
- num_hits++;
- }
-
- FAIL(num_hits != 2, "Should return damage when set.");
-
- return 0;
-}
-
-int igt_damage_iter_damage_one_intersect(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* 2 damage clips, one intersect plane src. */
- set_damage_clip(&damage[0], 20, 30, 200, 180);
- set_damage_clip(&damage[1], 2, 2, 1360, 1360);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (num_hits == 0)
- FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
- if (num_hits == 1)
- FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
- num_hits++;
- }
-
- FAIL(num_hits != 2, "Should return damage when set.");
-
- return 0;
-}
-
-int igt_damage_iter_damage_one_outside(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* 2 damage clips, one outside plane src. */
- set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
-
- return 0;
-}
-
-int igt_damage_iter_damage_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- /* 2 damage clips, one outside plane src. */
- set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return round off plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
-
- return 0;
-}
-
-int igt_damage_iter_damage_not_visible(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- state.visible = false;
-
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- /* 2 damage clips, one outside plane src. */
- set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should not return any damage.");
-
- return 0;
-}
diff --git a/drivers/gpu/drm/selftests/test-drm_format.c b/drivers/gpu/drm/selftests/test-drm_format.c
deleted file mode 100644
index c5e212afa27a..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_format.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test cases for the drm_format functions
- */
-
-#define pr_fmt(fmt) "drm_format: " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include <drm/drm_fourcc.h>
-
-#include "test-drm_modeset_common.h"
-
-int igt_check_drm_format_block_width(void *ignored)
-{
- const struct drm_format_info *info = NULL;
-
- /* Test invalid arguments */
- FAIL_ON(drm_format_info_block_width(info, 0) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
- FAIL_ON(drm_format_info_block_width(info, 1) != 0);
-
- /* Test 1 plane format */
- info = drm_format_info(DRM_FORMAT_XRGB4444);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 1);
- FAIL_ON(drm_format_info_block_width(info, 1) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- /* Test 2 planes format */
- info = drm_format_info(DRM_FORMAT_NV12);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 1);
- FAIL_ON(drm_format_info_block_width(info, 1) != 1);
- FAIL_ON(drm_format_info_block_width(info, 2) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- /* Test 3 planes format */
- info = drm_format_info(DRM_FORMAT_YUV422);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 1);
- FAIL_ON(drm_format_info_block_width(info, 1) != 1);
- FAIL_ON(drm_format_info_block_width(info, 2) != 1);
- FAIL_ON(drm_format_info_block_width(info, 3) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- /* Test a tiled format */
- info = drm_format_info(DRM_FORMAT_X0L0);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 2);
- FAIL_ON(drm_format_info_block_width(info, 1) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- return 0;
-}
-
-int igt_check_drm_format_block_height(void *ignored)
-{
- const struct drm_format_info *info = NULL;
-
- /* Test invalid arguments */
- FAIL_ON(drm_format_info_block_height(info, 0) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
- FAIL_ON(drm_format_info_block_height(info, 1) != 0);
-
- /* Test 1 plane format */
- info = drm_format_info(DRM_FORMAT_XRGB4444);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 1);
- FAIL_ON(drm_format_info_block_height(info, 1) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- /* Test 2 planes format */
- info = drm_format_info(DRM_FORMAT_NV12);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 1);
- FAIL_ON(drm_format_info_block_height(info, 1) != 1);
- FAIL_ON(drm_format_info_block_height(info, 2) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- /* Test 3 planes format */
- info = drm_format_info(DRM_FORMAT_YUV422);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 1);
- FAIL_ON(drm_format_info_block_height(info, 1) != 1);
- FAIL_ON(drm_format_info_block_height(info, 2) != 1);
- FAIL_ON(drm_format_info_block_height(info, 3) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- /* Test a tiled format */
- info = drm_format_info(DRM_FORMAT_X0L0);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 2);
- FAIL_ON(drm_format_info_block_height(info, 1) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- return 0;
-}
-
-int igt_check_drm_format_min_pitch(void *ignored)
-{
- const struct drm_format_info *info = NULL;
-
- /* Test invalid arguments */
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- /* Test 1 plane 8 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_RGB332);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
- (uint64_t)(UINT_MAX - 1));
-
- /* Test 1 plane 16 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_XRGB4444);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
- (uint64_t)(UINT_MAX - 1) * 2);
-
- /* Test 1 plane 24 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_RGB888);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 3);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 6);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 3072);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 5760);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 12288);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2013);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 3);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
- (uint64_t)(UINT_MAX - 1) * 3);
-
- /* Test 1 plane 32 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_ABGR8888);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 8);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 2560);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 7680);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 16384);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2684);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
- (uint64_t)(UINT_MAX - 1) * 4);
-
- /* Test 2 planes format */
- info = drm_format_info(DRM_FORMAT_NV12);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 672);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX);
- FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
- (uint64_t)UINT_MAX + 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
- (uint64_t)(UINT_MAX - 1));
- FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1));
-
- /* Test 3 planes 8 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_YUV422);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 3, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 320);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 320) != 320);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 512);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 512) != 512);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 960);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 960) != 960);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 2048) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 336);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 336) != 336);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX);
- FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
- (uint64_t)UINT_MAX / 2 + 1);
- FAIL_ON(drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1) !=
- (uint64_t)UINT_MAX / 2 + 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1) / 2);
- FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1) / 2);
- FAIL_ON(drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1) / 2);
-
- /* Test tiled format */
- info = drm_format_info(DRM_FORMAT_X0L2);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
- (uint64_t)(UINT_MAX - 1) * 2);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.c b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
deleted file mode 100644
index 2a7f93774006..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common file for modeset selftests.
- */
-
-#include <linux/module.h>
-
-#include "test-drm_modeset_common.h"
-
-#define TESTS "drm_modeset_selftests.h"
-#include "drm_selftest.h"
-
-#include "drm_selftest.c"
-
-static int __init test_drm_modeset_init(void)
-{
- int err;
-
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-static void __exit test_drm_modeset_exit(void)
-{
-}
-
-module_init(test_drm_modeset_init);
-module_exit(test_drm_modeset_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
deleted file mode 100644
index cfb51d8da2bc..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __TEST_DRM_MODESET_COMMON_H__
-#define __TEST_DRM_MODESET_COMMON_H__
-
-#include <linux/errno.h>
-#include <linux/printk.h>
-
-#define FAIL(test, msg, ...) \
- do { \
- if (test) { \
- pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
- return -EINVAL; \
- } \
- } while (0)
-
-#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
-
-int igt_drm_rect_clip_scaled_div_by_zero(void *ignored);
-int igt_drm_rect_clip_scaled_not_clipped(void *ignored);
-int igt_drm_rect_clip_scaled_clipped(void *ignored);
-int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored);
-int igt_check_plane_state(void *ignored);
-int igt_check_drm_format_block_width(void *ignored);
-int igt_check_drm_format_block_height(void *ignored);
-int igt_check_drm_format_min_pitch(void *ignored);
-int igt_check_drm_framebuffer_create(void *ignored);
-int igt_damage_iter_no_damage(void *ignored);
-int igt_damage_iter_no_damage_fractional_src(void *ignored);
-int igt_damage_iter_no_damage_src_moved(void *ignored);
-int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
-int igt_damage_iter_no_damage_not_visible(void *ignored);
-int igt_damage_iter_no_damage_no_crtc(void *ignored);
-int igt_damage_iter_no_damage_no_fb(void *ignored);
-int igt_damage_iter_simple_damage(void *ignored);
-int igt_damage_iter_single_damage(void *ignored);
-int igt_damage_iter_single_damage_intersect_src(void *ignored);
-int igt_damage_iter_single_damage_outside_src(void *ignored);
-int igt_damage_iter_single_damage_fractional_src(void *ignored);
-int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
-int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
-int igt_damage_iter_single_damage_src_moved(void *ignored);
-int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
-int igt_damage_iter_damage(void *ignored);
-int igt_damage_iter_damage_one_intersect(void *ignored);
-int igt_damage_iter_damage_one_outside(void *ignored);
-int igt_damage_iter_damage_src_moved(void *ignored);
-int igt_damage_iter_damage_not_visible(void *ignored);
-int igt_dp_mst_calc_pbn_mode(void *ignored);
-int igt_dp_mst_sideband_msg_req_decode(void *ignored);
-
-#endif
diff --git a/drivers/gpu/drm/selftests/test-drm_rect.c b/drivers/gpu/drm/selftests/test-drm_rect.c
deleted file mode 100644
index 3a5ff38321f4..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_rect.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test cases for the drm_rect functions
- */
-
-#define pr_fmt(fmt) "drm_rect: " fmt
-
-#include <linux/limits.h>
-
-#include <drm/drm_rect.h>
-
-#include "test-drm_modeset_common.h"
-
-int igt_drm_rect_clip_scaled_div_by_zero(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /*
- * Make sure we don't divide by zero when dst
- * width/height is zero and dst and clip do not intersect.
- */
- drm_rect_init(&src, 0, 0, 0, 0);
- drm_rect_init(&dst, 0, 0, 0, 0);
- drm_rect_init(&clip, 1, 1, 1, 1);
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
- FAIL(visible, "Destination not be visible\n");
- FAIL(drm_rect_visible(&src), "Source should not be visible\n");
-
- drm_rect_init(&src, 0, 0, 0, 0);
- drm_rect_init(&dst, 3, 3, 0, 0);
- drm_rect_init(&clip, 1, 1, 1, 1);
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
- FAIL(visible, "Destination not be visible\n");
- FAIL(drm_rect_visible(&src), "Source should not be visible\n");
-
- return 0;
-}
-
-int igt_drm_rect_clip_scaled_not_clipped(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /* 1:1 scaling */
- drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
- drm_rect_init(&dst, 0, 0, 1, 1);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 2:1 scaling */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 1, 1);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
- src.y1 != 0 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:2 scaling */
- drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 0, 0, 2, 2);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 2 ||
- dst.y1 != 0 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- return 0;
-}
-
-int igt_drm_rect_clip_scaled_clipped(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /* 1:1 scaling top/left clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:1 scaling bottom/right clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 1, 1, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
- src.y1 != 1 << 16 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 1 || dst.x2 != 2 ||
- dst.y1 != 1 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 2:1 scaling top/left clip */
- drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
- src.y1 != 0 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 2:1 scaling bottom/right clip */
- drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 1, 1, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
- src.y1 != 2 << 16 || src.y2 != 4 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 1 || dst.x2 != 2 ||
- dst.y1 != 1 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:2 scaling top/left clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 4, 4);
- drm_rect_init(&clip, 0, 0, 2, 2);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 2 ||
- dst.y1 != 0 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:2 scaling bottom/right clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 4, 4);
- drm_rect_init(&clip, 2, 2, 2, 2);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
- src.y1 != 1 << 16 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 2 || dst.x2 != 4 ||
- dst.y1 != 2 || dst.y2 != 4,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- return 0;
-}
-
-int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /*
- * 'clip.x2 - dst.x1 >= dst width' could result a negative
- * src rectangle width which is no longer expected by the
- * code as it's using unsigned types. This could lead to
- * the clipped source rectangle appering visible when it
- * should have been fully clipped. Make sure both rectangles
- * end up invisible.
- */
- drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 3, 3, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(visible, "Destination should not be visible\n");
- FAIL(drm_rect_visible(&src), "Source should not be visible\n");
-
- return 0;
-}
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 288b838a904a..4ec5dc74a6b0 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -5,7 +5,7 @@ config DRM_SHMOBILE
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
Choose this option if you have an SH Mobile chipset.
If M is selected the module will be called shmob-drm.
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 071a929e9fe3..4624c0aff51f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -12,11 +12,10 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -289,18 +288,18 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
{
struct drm_crtc *crtc = &scrtc->crtc;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- scrtc->dma[0] = gem->paddr + fb->offsets[0]
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (scrtc->format->yuv) {
bpp = scrtc->format->bpp - 8;
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- scrtc->dma[1] = gem->paddr + fb->offsets[1]
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
+ scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 731cbad7520f..3d511fa38913 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -126,11 +126,11 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 68d21be784aa..60a2c8d8a0d9 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -9,9 +9,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
index 6ec2b732bb94..0347b1fd2338 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
-struct drm_gem_cma_object;
+struct drm_gem_dma_object;
struct shmob_drm_device;
struct shmob_drm_format_info {
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 4763ea8e1af0..6c5f0cbe7d95 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -9,10 +9,10 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
@@ -41,18 +41,18 @@ static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = splane->format->yuv ? 8 : splane->format->bpp;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- splane->dma[0] = gem->paddr + fb->offsets[0]
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ splane->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (splane->format->yuv) {
bpp = splane->format->bpp - 8;
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- splane->dma[1] = gem->paddr + fb->offsets[1]
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
+ splane->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
@@ -252,9 +252,10 @@ int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
splane->index = index;
splane->alpha = 255;
- ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
- &shmob_drm_plane_funcs, formats,
- ARRAY_SIZE(formats), false);
+ ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1,
+ &shmob_drm_plane_funcs,
+ formats, ARRAY_SIZE(formats), NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e35e719cf315..6173020a9bf5 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -50,7 +50,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
- idr_init(&dev_priv->object_idr);
+ idr_init_base(&dev_priv->object_idr, 1);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index 1e0fcec7be47..ddfa0bb5d9c9 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -39,13 +39,11 @@ static int ssd130x_i2c_probe(struct i2c_client *client)
return 0;
}
-static int ssd130x_i2c_remove(struct i2c_client *client)
+static void ssd130x_i2c_remove(struct i2c_client *client)
{
struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
ssd130x_remove(ssd130x);
-
- return 0;
}
static void ssd130x_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 07802907e39a..19ab4942cb33 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -18,11 +18,6 @@ struct ssd130x_spi_transport {
struct gpio_desc *dc;
};
-static const struct regmap_config ssd130x_spi_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
/*
* The regmap bus .write handler, it is just a wrapper around spi_write()
* but toggling the Data/Command control pin (D/C#). Since for 4-wire SPI
@@ -56,17 +51,12 @@ static int ssd130x_spi_read(void *context, const void *reg, size_t reg_size,
return -EOPNOTSUPP;
}
-/*
- * A custom bus is needed due the special write that toggles a D/C# pin,
- * another option could be to just have a .reg_write() callback but that
- * will prevent to do data writes in bulk.
- *
- * Once the regmap API is extended to support defining a bulk write handler
- * in the struct regmap_config, this can be simplified and the bus dropped.
- */
-static struct regmap_bus regmap_ssd130x_spi_bus = {
+static const struct regmap_config ssd130x_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
.write = ssd130x_spi_write,
.read = ssd130x_spi_read,
+ .can_multi_write = true,
};
static int ssd130x_spi_probe(struct spi_device *spi)
@@ -90,8 +80,7 @@ static int ssd130x_spi_probe(struct spi_device *spi)
t->spi = spi;
t->dc = dc;
- regmap = devm_regmap_init(dev, &regmap_ssd130x_spi_bus, t,
- &ssd130x_spi_regmap_config);
+ regmap = devm_regmap_init(dev, NULL, t, &ssd130x_spi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 5a3e3b78cd9e..bc41a5ae810a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -18,10 +18,10 @@
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
@@ -537,11 +537,11 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
kfree(buf);
}
-static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *map,
+static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ struct iosys_map dst;
unsigned int dst_pitch;
int ret = 0;
u8 *buf = NULL;
@@ -555,127 +555,174 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_m
if (!buf)
return -ENOMEM;
- drm_fb_xrgb8888_to_mono(buf, dst_pitch, vmap, fb, rect);
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ iosys_map_set_vaddr(&dst, buf);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
ssd130x_update_rect(ssd130x, buf, rect);
+out_free:
kfree(buf);
return ret;
}
-static int ssd130x_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_device *drm = plane->dev;
+ struct drm_rect src_clip, dst_clip;
+ int idx;
- if (mode->hdisplay != ssd130x->mode.hdisplay &&
- mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_SIZE;
+ if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
+ return;
- if (mode->hdisplay != ssd130x->mode.hdisplay)
- return MODE_ONE_WIDTH;
+ dst_clip = plane_state->dst;
+ if (!drm_rect_intersect(&dst_clip, &src_clip))
+ return;
- if (mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_HEIGHT;
+ if (!drm_dev_enter(drm, &idx))
+ return;
- return MODE_OK;
+ ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+
+ drm_dev_exit(idx);
}
-static void ssd130x_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_device *drm = &ssd130x->drm;
- int idx, ret;
+ struct drm_device *drm = plane->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ int idx;
- ret = ssd130x_power_on(ssd130x);
- if (ret)
+ if (!drm_dev_enter(drm, &idx))
return;
- ret = ssd130x_init(ssd130x);
- if (ret)
- goto out_power_off;
+ ssd130x_clear_screen(ssd130x);
- if (!drm_dev_enter(drm, &idx))
- goto out_power_off;
+ drm_dev_exit(idx);
+}
- ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &plane_state->dst);
+static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_update = ssd130x_primary_plane_helper_atomic_update,
+ .atomic_disable = ssd130x_primary_plane_helper_atomic_disable,
+};
- ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
+static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
- backlight_enable(ssd130x->bl_dev);
+static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev);
- drm_dev_exit(idx);
+ if (mode->hdisplay != ssd130x->mode.hdisplay &&
+ mode->vdisplay != ssd130x->mode.vdisplay)
+ return MODE_ONE_SIZE;
+ else if (mode->hdisplay != ssd130x->mode.hdisplay)
+ return MODE_ONE_WIDTH;
+ else if (mode->vdisplay != ssd130x->mode.vdisplay)
+ return MODE_ONE_HEIGHT;
- return;
-out_power_off:
- ssd130x_power_off(ssd130x);
+ return MODE_OK;
}
-static void ssd130x_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
- struct drm_device *drm = &ssd130x->drm;
- int idx;
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
- if (!drm_dev_enter(drm, &idx))
- return;
+ ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
+ if (ret)
+ return ret;
- ssd130x_clear_screen(ssd130x);
+ return drm_atomic_add_affected_planes(new_state, crtc);
+}
- backlight_disable(ssd130x->bl_dev);
+/*
+ * The CRTC is always enabled. Screen updates are performed by
+ * the primary plane's atomic_update function. Disabling clears
+ * the screen in the primary plane's atomic_disable function.
+ */
+static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
+ .mode_valid = ssd130x_crtc_helper_mode_valid,
+ .atomic_check = ssd130x_crtc_helper_atomic_check,
+};
- ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF);
+static void ssd130x_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *drm = crtc->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
- ssd130x_power_off(ssd130x);
+ ssd130x_init(ssd130x);
- drm_dev_exit(idx);
+ drm_atomic_helper_crtc_reset(crtc);
}
-static void ssd130x_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_plane_state)
+static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
+ .reset = ssd130x_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
- struct drm_plane_state *plane_state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *drm = &ssd130x->drm;
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct drm_device *drm = encoder->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ int ret;
- if (!fb)
+ ret = ssd130x_power_on(ssd130x);
+ if (ret)
return;
- if (!pipe->crtc.state->active)
- return;
+ ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
- return;
+ backlight_enable(ssd130x->bl_dev);
+}
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
- if (!drm_dev_enter(drm, &idx))
- return;
+ backlight_disable(ssd130x->bl_dev);
- ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+ ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF);
- drm_dev_exit(idx);
+ ssd130x_power_off(ssd130x);
}
-static const struct drm_simple_display_pipe_funcs ssd130x_pipe_funcs = {
- .mode_valid = ssd130x_display_pipe_mode_valid,
- .enable = ssd130x_display_pipe_enable,
- .disable = ssd130x_display_pipe_disable,
- .update = ssd130x_display_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static const struct drm_encoder_helper_funcs ssd130x_encoder_helper_funcs = {
+ .atomic_enable = ssd130x_encoder_helper_atomic_enable,
+ .atomic_disable = ssd130x_encoder_helper_atomic_disable,
+};
+
+static const struct drm_encoder_funcs ssd130x_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
-static int ssd130x_connector_get_modes(struct drm_connector *connector)
+static int ssd130x_connector_helper_get_modes(struct drm_connector *connector)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
struct drm_display_mode *mode;
@@ -695,7 +742,7 @@ static int ssd130x_connector_get_modes(struct drm_connector *connector)
}
static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
- .get_modes = ssd130x_connector_get_modes,
+ .get_modes = ssd130x_connector_helper_get_modes,
};
static const struct drm_connector_funcs ssd130x_connector_funcs = {
@@ -806,8 +853,16 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
struct device *dev = ssd130x->dev;
struct drm_device *drm = &ssd130x->drm;
unsigned long max_width, max_height;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
int ret;
+ /*
+ * Modesetting
+ */
+
ret = drmm_mode_config_init(drm);
if (ret) {
dev_err(dev, "DRM mode config init failed: %d\n", ret);
@@ -833,25 +888,65 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
drm->mode_config.preferred_depth = 32;
drm->mode_config.funcs = &ssd130x_mode_config_funcs;
- ret = drm_connector_init(drm, &ssd130x->connector, &ssd130x_connector_funcs,
+ /* Primary plane */
+
+ primary_plane = &ssd130x->primary_plane;
+ ret = drm_universal_plane_init(drm, primary_plane, 0, &ssd130x_primary_plane_funcs,
+ ssd130x_formats, ARRAY_SIZE(ssd130x_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ dev_err(dev, "DRM primary plane init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_plane_helper_add(primary_plane, &ssd130x_primary_plane_helper_funcs);
+
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &ssd130x->crtc;
+ ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ &ssd130x_crtc_funcs, NULL);
+ if (ret) {
+ dev_err(dev, "DRM crtc init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &ssd130x_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &ssd130x->encoder;
+ ret = drm_encoder_init(drm, encoder, &ssd130x_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(dev, "DRM encoder init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &ssd130x_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &ssd130x->connector;
+ ret = drm_connector_init(drm, connector, &ssd130x_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
if (ret) {
dev_err(dev, "DRM connector init failed: %d\n", ret);
return ret;
}
- drm_connector_helper_add(&ssd130x->connector, &ssd130x_connector_helper_funcs);
+ drm_connector_helper_add(connector, &ssd130x_connector_helper_funcs);
- ret = drm_simple_display_pipe_init(drm, &ssd130x->pipe, &ssd130x_pipe_funcs,
- ssd130x_formats, ARRAY_SIZE(ssd130x_formats),
- NULL, &ssd130x->connector);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
- dev_err(dev, "DRM simple display pipeline init failed: %d\n", ret);
+ dev_err(dev, "DRM attach connector to encoder failed: %d\n", ret);
return ret;
}
- drm_plane_enable_fb_damage_clips(&ssd130x->pipe.plane);
-
drm_mode_config_reset(drm);
return 0;
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index 4c4a84e962e7..03038c1b6476 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -13,8 +13,11 @@
#ifndef __SSD1307X_H__
#define __SSD1307X_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_plane_helper.h>
#include <linux/regmap.h>
@@ -42,8 +45,10 @@ struct ssd130x_deviceinfo {
struct ssd130x_device {
struct drm_device drm;
struct device *dev;
- struct drm_simple_display_pipe pipe;
struct drm_display_mode mode;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
struct i2c_client *client;
diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig
index 9a9c7ebfc716..e22b780fe822 100644
--- a/drivers/gpu/drm/sprd/Kconfig
+++ b/drivers/gpu/drm/sprd/Kconfig
@@ -2,7 +2,7 @@ config DRM_SPRD
tristate "DRM Support for Unisoc SoCs Platform"
depends on ARCH_SPRD || COMPILE_TEST
depends on DRM && OF
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 3664089b6983..88f4259680f1 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -19,11 +19,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include "sprd_drm.h"
#include "sprd_dpu.h"
@@ -324,7 +323,7 @@ static u32 drm_blend_to_dpu(struct drm_plane_state *state)
static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
{
struct dpu_context *ctx = &dpu->ctx;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb = state->fb;
u32 addr, size, offset, pitch, blend, format, rotation;
u32 src_x = state->src_x >> 16;
@@ -341,8 +340,8 @@ static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
size = (src_w & 0xffff) | (src_h << 16);
for (i = 0; i < fb->format->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(fb, i);
- addr = cma_obj->paddr + fb->offsets[i];
+ dma_obj = drm_fb_dma_get_gem_obj(fb, i);
+ addr = dma_obj->dma_addr + fb->offsets[i];
if (i == 0)
layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
@@ -524,8 +523,8 @@ static int sprd_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index b8fc1c6a0cb8..9d42f17a5734 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -13,7 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -48,14 +48,14 @@ static void sprd_drm_mode_config_init(struct drm_device *drm)
drm->mode_config.helper_private = &sprd_drm_mode_config_helper;
}
-DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sprd_drm_fops);
static struct drm_driver sprd_drm_drv = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &sprd_drm_fops,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 246a94afbe74..f2a880c48485 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -4,7 +4,7 @@ config DRM_STI
depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
select RESET_CONTROLLER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 409795786f03..3c7154f2d5f3 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 1e9bd4241f10..db0a1eb53532 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -11,9 +11,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
@@ -243,8 +243,8 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
}
}
- if (!drm_fb_cma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ if (!drm_fb_dma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
@@ -267,7 +267,7 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
u32 y, x;
u32 val;
@@ -278,10 +278,10 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
dst_x = newstate->crtc_x;
dst_y = newstate->crtc_y;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Convert ARGB8888 to CLUT8 */
- sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
+ sti_cursor_argb8888_to_clut8(cursor, (u32 *)dma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index d858209cf8de..7abf010a3293 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -14,9 +14,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -128,12 +127,12 @@ static void sti_mode_config_init(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
}
-DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sti_driver_fops);
static const struct drm_driver sti_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &sti_driver_fops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.debugfs_init = sti_drm_dbg_init,
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index af783f599306..43c72c2604a0 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -12,10 +12,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
@@ -658,8 +658,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (!drm_fb_cma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ if (!drm_fb_dma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
@@ -714,7 +714,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
struct sti_gdp_node *top_field, *btm_field;
@@ -778,15 +778,15 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
- (unsigned long)cma_obj->paddr);
+ (unsigned long) dma_obj->dma_addr);
/* pixel memory location */
bpp = fb->format->cpp[0];
- top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+ top_field->gam_gdp_pml = (u32) dma_obj->dma_addr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * bpp;
top_field->gam_gdp_pml += src_y * fb->pitches[0];
@@ -831,7 +831,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
- (unsigned long)cma_obj->paddr,
+ (unsigned long) dma_obj->dma_addr,
readl(gdp->regs + GAM_GDP_PML_OFFSET));
if (!curr_list) {
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 271982080437..02b77279f6e4 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -16,10 +16,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
@@ -1055,8 +1055,8 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (!drm_fb_cma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ if (!drm_fb_dma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
@@ -1124,7 +1124,7 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sti_hqvdp_cmd *cmd;
int scale_h, scale_v;
int cmd_offset;
@@ -1178,15 +1178,15 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
- (unsigned long)cma_obj->paddr);
+ (unsigned long) dma_obj->dma_addr);
/* Buffer planes address */
- cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
- cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+ cmd->top.current_luma = (u32) dma_obj->dma_addr + fb->offsets[0];
+ cmd->top.current_chroma = (u32) dma_obj->dma_addr + fb->offsets[1];
/* Pitches */
cmd->top.luma_processed_pitch = fb->pitches[0];
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index c74b524663ab..29e669ccec5b 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -9,10 +9,9 @@
#include <linux/types.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index 8e33e629d9b0..2c0156bede9c 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -8,7 +8,6 @@
#define _STI_PLANE_H_
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index e0379488cd0d..ded72f879482 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -3,7 +3,7 @@ config DRM_STM
tristate "DRM Support for STMicroelectronics SoC Series"
depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index c63945dc2260..d7914f5122df 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -18,9 +18,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -37,7 +36,7 @@ static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int stm_gem_cma_dumb_create(struct drm_file *file,
+static int stm_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -50,10 +49,10 @@ static int stm_gem_cma_dumb_create(struct drm_file *file,
args->pitch = roundup(min_pitch, 128);
args->height = roundup(args->height, 4);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drv_driver_fops);
static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -64,7 +63,7 @@ static const struct drm_driver drv_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create),
};
static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index da7a0a183b27..03c6becda795 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -28,13 +28,12 @@
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -1347,7 +1346,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
}
/* Sets the FB address */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 0);
+ paddr = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 0);
if (newstate->rotation & DRM_MODE_REFLECT_X)
paddr += (fb->format->cpp[0] * (x1 - x0 + 1)) - 1;
@@ -1381,7 +1380,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
/* Configure the auxiliary frame buffer address 0 */
- paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
if (newstate->rotation & DRM_MODE_REFLECT_X)
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
@@ -1393,8 +1392,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
break;
case DRM_FORMAT_YUV420:
/* Configure the auxiliary frame buffer address 0 & 1 */
- paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- paddr2 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
+ paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
+ paddr2 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 2);
if (newstate->rotation & DRM_MODE_REFLECT_X) {
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
@@ -1411,8 +1410,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
break;
case DRM_FORMAT_YVU420:
/* Configure the auxiliary frame buffer address 0 & 1 */
- paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
- paddr2 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 2);
+ paddr2 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
if (newstate->rotation & DRM_MODE_REFLECT_X) {
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 3a43c436c74a..4741d9f6544c 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -3,7 +3,7 @@ config DRM_SUN4I
tristate "DRM Support for Allwinner A10 Display Engine"
depends on DRM && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
@@ -16,23 +16,25 @@ config DRM_SUN4I
if DRM_SUN4I
config DRM_SUN4I_HDMI
- tristate "Allwinner A10 HDMI Controller Support"
+ tristate "Allwinner A10/A10s/A20/A31 HDMI Controller Support"
+ depends on ARM || COMPILE_TEST
default DRM_SUN4I
help
- Choose this option if you have an Allwinner SoC with an HDMI
- controller.
+ Choose this option if you have an Allwinner A10/A10s/A20/A31
+ SoC with an HDMI controller.
config DRM_SUN4I_HDMI_CEC
- bool "Allwinner A10 HDMI CEC Support"
+ bool "Allwinner A10/A10s/A20/A31 HDMI CEC Support"
depends on DRM_SUN4I_HDMI
select CEC_CORE
select CEC_PIN
help
- Choose this option if you have an Allwinner SoC with an HDMI
- controller and want to use CEC.
+ Choose this option if you have an Allwinner A10/A10s/A20/A31
+ SoC with an HDMI controller and want to use CEC.
config DRM_SUN4I_BACKEND
tristate "Support for Allwinner A10 Display Engine Backend"
+ depends on ARM || COMPILE_TEST
default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with the
@@ -41,8 +43,8 @@ config DRM_SUN4I_BACKEND
selected the module will be called sun4i-backend.
config DRM_SUN6I_DSI
- tristate "Allwinner A31 MIPI-DSI Controller Support"
- default MACH_SUN8I
+ tristate "Allwinner A31/A64 MIPI-DSI Controller Support"
+ default DRM_SUN4I
select CRC_CCITT
select DRM_MIPI_DSI
select RESET_CONTROLLER
@@ -55,15 +57,17 @@ config DRM_SUN6I_DSI
config DRM_SUN8I_DW_HDMI
tristate "Support for Allwinner version of DesignWare HDMI"
depends on DRM_SUN4I
+ default DRM_SUN4I
select DRM_DW_HDMI
help
Choose this option if you have an Allwinner SoC with the
- DesignWare HDMI controller with custom HDMI PHY. If M is
+ DesignWare HDMI controller. SoCs that support HDMI and
+ have a Display Engine 2.0 contain this controller. If M is
selected the module will be called sun8i_dw_hdmi.
config DRM_SUN8I_MIXER
tristate "Support for Allwinner Display Engine 2.0 Mixer"
- default MACH_SUN8I
+ default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with the
Allwinner Display Engine 2.0, which has a mixer to do some
@@ -75,6 +79,6 @@ config DRM_SUN8I_TCON_TOP
default DRM_SUN4I if DRM_SUN8I_MIXER!=n
help
TCON TOP is responsible for configuring display pipeline for
- HTMI, TVE and LCD.
+ HDMI, TVE and LCD.
endif
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 287e8c4bbaea..38070fc261f3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -19,11 +19,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_backend.h"
@@ -330,7 +329,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
u32 lo_paddr, hi_paddr;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
@@ -339,21 +338,21 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
fb->pitches[0] * 8);
/* Get the start of the displayed memory */
- paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
- DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
if (fb->format->is_yuv)
- return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
+ return sun4i_backend_update_yuv_buffer(backend, fb, dma_addr);
/* Write the 32 lower bits of the address (in bits) */
- lo_paddr = paddr << 3;
+ lo_paddr = dma_addr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
regmap_write(backend->engine.regs,
SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
lo_paddr);
/* And the upper bits */
- hi_paddr = paddr >> 29;
+ hi_paddr = dma_addr >> 29;
DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 6eb1aabdb161..d06ffd99d86e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -17,9 +17,8 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -38,10 +37,10 @@ static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
/* The hardware only allows even pitches for YUV buffers. */
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sun4i_drv_fops);
static const struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -55,7 +54,7 @@ static const struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
};
static int sun4i_drv_bind(struct device *dev)
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 462fae73eae9..799ab7460ae5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -14,10 +14,10 @@
#include <linux/reset.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
#include "sun4i_drv.h"
@@ -160,7 +160,7 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
struct drm_framebuffer *fb = state->fb;
unsigned int strides[3] = {};
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
bool swap;
if (fb->modifier == DRM_FORMAT_MOD_ALLWINNER_TILED) {
@@ -221,22 +221,24 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
swap = sun4i_frontend_format_chroma_requires_swap(fb->format->format);
/* Set the physical address of the buffer in memory */
- paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
- DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
- regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
+ DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &dma_addr);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, dma_addr);
if (fb->format->num_planes > 1) {
- paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
- DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 2 : 1);
+ DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n",
+ &dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
- paddr);
+ dma_addr);
}
if (fb->format->num_planes > 2) {
- paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
- DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 1 : 2);
+ DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n",
+ &dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
- paddr);
+ dma_addr);
}
}
EXPORT_SYMBOL(sun4i_frontend_update_buffer);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 648dd0b5b116..98f3176366c0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "sun4i_backend.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 94883abe0dfd..c65f0a89b6b0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -275,13 +276,6 @@ drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
encoder);
}
-static inline struct sun4i_tv *
-drm_connector_to_sun4i_tv(struct drm_connector *connector)
-{
- return container_of(connector, struct sun4i_tv,
- connector);
-}
-
/*
* FIXME: If only the drm_display_mode private field was usable, this
* could go away...
@@ -339,7 +333,8 @@ static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
}
-static void sun4i_tv_disable(struct drm_encoder *encoder)
+static void sun4i_tv_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
@@ -353,27 +348,18 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
sunxi_engine_disable_color_correction(crtc->engine);
}
-static void sun4i_tv_enable(struct drm_encoder *encoder)
+static void sun4i_tv_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, encoder->crtc);
+ struct drm_display_mode *mode = &crtc_state->mode;
+ const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
- sunxi_engine_apply_color_correction(crtc->engine);
-
- regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
- SUN4I_TVE_EN_ENABLE,
- SUN4I_TVE_EN_ENABLE);
-}
-
-static void sun4i_tv_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
-
/* Enable and map the DAC to the output */
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_DAC_MAP_MASK,
@@ -466,12 +452,17 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
SUN4I_TVE_RESYNC_FIELD : 0));
regmap_write(tv->regs, SUN4I_TVE_SLAVE_REG, 0);
+
+ sunxi_engine_apply_color_correction(crtc->engine);
+
+ regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+ SUN4I_TVE_EN_ENABLE,
+ SUN4I_TVE_EN_ENABLE);
}
static const struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
- .disable = sun4i_tv_disable,
- .enable = sun4i_tv_enable,
- .mode_set = sun4i_tv_mode_set,
+ .atomic_disable = sun4i_tv_disable,
+ .atomic_enable = sun4i_tv_enable,
};
static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
@@ -497,27 +488,13 @@ static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
return i;
}
-static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TODO */
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
.get_modes = sun4i_tv_comp_get_modes,
- .mode_valid = sun4i_tv_comp_mode_valid,
};
-static void
-sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = sun4i_tv_comp_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -604,7 +581,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
if (ret) {
dev_err(dev,
"Couldn't initialise the Composite connector\n");
- goto err_cleanup_connector;
+ goto err_cleanup_encoder;
}
tv->connector.interlace_allowed = true;
@@ -612,7 +589,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
return 0;
-err_cleanup_connector:
+err_cleanup_encoder:
drm_encoder_cleanup(&tv->encoder);
err_disable_clk:
clk_disable_unprepare(tv->clk);
@@ -629,6 +606,7 @@ static void sun4i_tv_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&tv->connector);
drm_encoder_cleanup(&tv->encoder);
clk_disable_unprepare(tv->clk);
+ reset_control_assert(tv->reset);
}
static const struct component_ops sun4i_tv_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index b4dfa166eccd..34234a144e87 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 648b38a73066..bafee05f6b24 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -16,10 +16,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_drv.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 36da962de394..ca75ca0835a6 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -13,12 +13,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_mixer.h"
@@ -193,25 +192,25 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
- dma_addr_t paddr;
+ struct drm_gem_dma_object *gem;
+ dma_addr_t dma_addr;
u32 ch_base;
int bpp;
ch_base = sun8i_channel_base(mixer, channel);
/* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
/* Compute the start of the displayed memory */
bpp = fb->format->cpp[0];
- paddr = gem->paddr + fb->offsets[0];
+ dma_addr = gem->dma_addr + fb->offsets[0];
/* Fixup framebuffer address for src coordinates */
- paddr += (state->src.x1 >> 16) * bpp;
- paddr += (state->src.y1 >> 16) * fb->pitches[0];
+ dma_addr += (state->src.x1 >> 16) * bpp;
+ dma_addr += (state->src.y1 >> 16) * fb->pitches[0];
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
@@ -219,11 +218,11 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
fb->pitches[0]);
- DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
- lower_32_bits(paddr));
+ lower_32_bits(dma_addr));
return 0;
}
@@ -246,8 +245,8 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ min_scale = DRM_PLANE_NO_SCALING;
+ max_scale = DRM_PLANE_NO_SCALING;
if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_UI_SCALER_SCALE_MIN;
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 1fee6499bdd3..f9c0a56d3a14 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -7,11 +7,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_csc.h"
@@ -309,9 +308,9 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 dx, dy, src_x, src_y;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
u32 ch_base;
int i;
@@ -323,12 +322,12 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
for (i = 0; i < format->num_planes; i++) {
/* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, i);
+ gem = drm_fb_dma_get_gem_obj(fb, i);
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
/* Compute the start of the displayed memory */
- paddr = gem->paddr + fb->offsets[i];
+ dma_addr = gem->dma_addr + fb->offsets[i];
dx = src_x;
dy = src_y;
@@ -339,8 +338,8 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
}
/* Fixup framebuffer address for src coordinates */
- paddr += dx * format->cpp[i];
- paddr += dy * fb->pitches[i];
+ dma_addr += dx * format->cpp[i];
+ dma_addr += dy * fb->pitches[i];
/* Set the line width */
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
@@ -351,12 +350,12 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
fb->pitches[i]);
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
- i + 1, &paddr);
+ i + 1, &dma_addr);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
overlay, i),
- lower_32_bits(paddr));
+ lower_32_bits(dma_addr));
}
return 0;
@@ -380,8 +379,8 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ min_scale = DRM_PLANE_NO_SCALING;
+ max_scale = DRM_PLANE_NO_SCALING;
if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_VI_SCALER_SCALE_MIN;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 747abafb6a5c..bd0f60704467 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -26,7 +26,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "dc.h"
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ed828de5ac01..9291209154a7 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2013 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
- * Based on the KMS/FB CMA helpers
+ * Based on the KMS/FB DMA helpers
* Copyright (C) 2012 Analog Devices Inc.
*/
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index ca9f03e3675b..10090116895f 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -12,7 +12,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "dc.h"
#include "plane.h"
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 2c8273796d9d..91b70f7d2769 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o
+obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o drm_damage_helper_test.o \
+ drm_cmdline_parser_test.o drm_rect_test.o drm_format_test.o drm_plane_helper_test.o \
+ drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o drm_mm_test.o
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
new file mode 100644
index 000000000000..7a2b2d6bc3fe
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <linux/prime_numbers.h>
+#include <linux/sched/signal.h>
+
+#include <drm/drm_buddy.h>
+
+#include "../lib/drm_random.h"
+
+#define TIMEOUT(name__) \
+ unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
+
+static unsigned int random_seed;
+
+static inline u64 get_size(int order, u64 chunk_size)
+{
+ return (1 << order) * chunk_size;
+}
+
+__printf(2, 3)
+static bool __timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+static void __dump_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block, bool buddy)
+{
+ kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
+ block->header, drm_buddy_block_state(block),
+ drm_buddy_block_order(block), drm_buddy_block_offset(block),
+ drm_buddy_block_size(mm, block), !block->parent, buddy);
+}
+
+static void dump_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+
+ __dump_block(test, mm, block, false);
+
+ buddy = drm_get_buddy(block);
+ if (buddy)
+ __dump_block(test, mm, buddy, true);
+}
+
+static int check_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = drm_buddy_block_state(block);
+
+ if (block_state != DRM_BUDDY_ALLOCATED &&
+ block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
+ kunit_err(test, "block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = drm_buddy_block_size(mm, block);
+ offset = drm_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ kunit_err(test, "block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ kunit_err(test, "block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ kunit_err(test, "block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ kunit_err(test, "block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ kunit_err(test, "block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = drm_get_buddy(block);
+
+ if (!buddy && block->parent) {
+ kunit_err(test, "buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ kunit_err(test, "buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_size(mm, buddy) != block_size) {
+ kunit_err(test, "buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_state(buddy) == block_state &&
+ block_state == DRM_BUDDY_FREE) {
+ kunit_err(test, "block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int check_blocks(struct kunit *test, struct drm_buddy *mm,
+ struct list_head *blocks, u64 expected_size, bool is_contiguous)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = check_block(test, mm, block);
+
+ if (!drm_buddy_block_is_allocated(block)) {
+ kunit_err(test, "block not allocated\n");
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ kunit_err(test, "block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += drm_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ kunit_err(test, "prev block, dump:\n");
+ dump_block(test, mm, prev);
+ }
+
+ kunit_err(test, "bad block, dump:\n");
+ dump_block(test, mm, block);
+
+ return err;
+}
+
+static int check_mm(struct kunit *test, struct drm_buddy *mm)
+{
+ struct drm_buddy_block *root;
+ struct drm_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ kunit_err(test, "n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct drm_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ kunit_err(test, "root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = check_block(test, mm, root);
+
+ if (!drm_buddy_block_is_free(root)) {
+ kunit_err(test, "root not free\n");
+ err = -EINVAL;
+ }
+
+ order = drm_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ kunit_err(test, "max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ kunit_err(test, "root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct drm_buddy_block, link);
+ if (block != root) {
+ kunit_err(test, "root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += drm_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ kunit_err(test, "expected mm size=%llx, found=%llx\n",
+ mm->size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ kunit_err(test, "prev root(%u), dump:\n", i - 1);
+ dump_block(test, mm, prev);
+ }
+
+ if (root) {
+ kunit_err(test, "bad root(%u), dump:\n", i);
+ dump_block(test, mm, root);
+ }
+
+ return err;
+}
+
+static void mm_config(u64 *size, u64 *chunk_size)
+{
+ DRM_RND_STATE(prng, random_seed);
+ u32 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, random_seed);
+
+ /* Let size be a random number of pages up to 8 GB (2M pages) */
+ s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+ /* Let the chunk size be a random power of 2 less than size */
+ ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
+ /* Round size down to the chunk size */
+ s &= -ms;
+
+ /* Convert from pages to bytes */
+ *chunk_size = (u64)ms << 12;
+ *size = (u64)s << 12;
+}
+
+static void drm_test_buddy_alloc_pathological(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block;
+ const int max_order = 3;
+ unsigned long flags = 0;
+ int order, top;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ }
+
+ for (order = top; order--;) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
+ mm_size, size, size,
+ &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM for hole\n");
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &holes);
+
+ size = get_size(top, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ }
+
+ drm_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ }
+
+ list_splice_tail(&holes, &blocks);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_smoke(struct kunit *test)
+{
+ u64 mm_size, chunk_size, start = 0;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ int *order;
+ int i;
+
+ DRM_RND_STATE(prng, random_seed);
+ TIMEOUT(end_time);
+
+ mm_config(&mm_size, &chunk_size);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
+ "buddy_init failed\n");
+
+ order = drm_random_order(mm.max_order + 1, &prng);
+ KUNIT_ASSERT_TRUE(test, order);
+
+ for (i = 0; i <= mm.max_order; ++i) {
+ struct drm_buddy_block *block;
+ int max_order = order[i];
+ bool timeout = false;
+ LIST_HEAD(blocks);
+ u64 total, size;
+ LIST_HEAD(tmp);
+ int order, err;
+
+ KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
+ "pre-mm check failed, abort\n");
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ size = get_size(order, chunk_size);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
+ if (err) {
+ if (err == -ENOMEM) {
+ KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
+ order);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
+ "buddy_alloc order mismatch\n");
+
+ total += drm_buddy_block_size(&mm, block);
+
+ if (__timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
+ } while (total < mm.size);
+
+ if (!err)
+ err = check_blocks(test, &mm, &blocks, total, false);
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
+ "post-mm check failed\n");
+ }
+
+ if (err || timeout)
+ break;
+
+ cond_resched();
+ }
+
+ kfree(order);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block, *bn;
+ const unsigned int max_order = 16;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (order = 0; order < max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM on final alloc\n");
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--;) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded, it should be full!");
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ size = get_size(max_order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_optimistic(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ const int max_order = 16;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+ int order;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded, it should be full!");
+
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_range(struct kunit *test)
+{
+ unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
+ u64 offset, size, rem, chunk_size, end;
+ unsigned long page_num;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+
+ mm_config(&size, &chunk_size);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
+ "buddy_init failed");
+
+ KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
+ "pre-mm check failed, abort!");
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct drm_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+ end = offset + size;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
+ size, mm.chunk_size,
+ &tmp, flags),
+ "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
+
+ KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
+ "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ drm_buddy_block_offset(block), offset);
+
+ KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
+
+ list_splice_tail(&tmp, &blocks);
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+
+ cond_resched();
+ }
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
+
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_limit(struct kunit *test)
+{
+ u64 size = U64_MAX, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+
+ KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
+ "mm.max_order(%d) != %d\n", mm.max_order,
+ DRM_BUDDY_MAX_ORDER);
+
+ size = mm.chunk_size << mm.max_order;
+ KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
+ PAGE_SIZE, &allocated, flags));
+
+ block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
+ KUNIT_EXPECT_TRUE(test, block);
+
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
+ "block order(%d) != %d\n",
+ drm_buddy_block_order(block), mm.max_order);
+
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE,
+ "block size(%llu) != %llu\n",
+ drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
+static int drm_buddy_init_test(struct kunit *test)
+{
+ while (!random_seed)
+ random_seed = get_random_int();
+
+ return 0;
+}
+
+static struct kunit_case drm_buddy_tests[] = {
+ KUNIT_CASE(drm_test_buddy_alloc_limit),
+ KUNIT_CASE(drm_test_buddy_alloc_range),
+ KUNIT_CASE(drm_test_buddy_alloc_optimistic),
+ KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
+ KUNIT_CASE(drm_test_buddy_alloc_smoke),
+ KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ {}
+};
+
+static struct kunit_suite drm_buddy_test_suite = {
+ .name = "drm_buddy",
+ .init = drm_buddy_init_test,
+ .test_cases = drm_buddy_tests,
+};
+
+kunit_test_suite(drm_buddy_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
new file mode 100644
index 000000000000..34790e7a3760
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -0,0 +1,991 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Bootlin
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+
+static const struct drm_connector no_connector = {};
+
+static void drm_test_cmdline_force_e_only(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "e";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_force_D_only_not_digital(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static const struct drm_connector connector_hdmi = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static void drm_test_cmdline_force_D_only_hdmi(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector_hdmi, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static const struct drm_connector connector_dvi = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static void drm_test_cmdline_force_D_only_dvi(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector_dvi, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static void drm_test_cmdline_force_d_only(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "d";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
+}
+
+static void drm_test_cmdline_res(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_vesa(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480M";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_vesa_rblank(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480MR";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_rblank(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480R";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_refresh(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480@60";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_interlaced(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60i";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_TRUE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_margins(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60m";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_off(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60d";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60e";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on_analog(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on_digital(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ static const struct drm_connector connector = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+ };
+ const char *cmdline = "720x480-24@60D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60ime";
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_TRUE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_margins_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480me";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_vesa_margins(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480Mm";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_name(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+}
+
+static void drm_test_cmdline_name_bpp(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC-24";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+}
+
+static void drm_test_cmdline_name_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+}
+
+static void drm_test_cmdline_name_bpp_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC-24,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+}
+
+static void drm_test_cmdline_rotate_0(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=0";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_0);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_90(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=90";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_90);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_180(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_270(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=270";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_270);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_hmirror(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,reflect_x";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_vmirror(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,reflect_y";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_margin_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline =
+ "720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_multiple_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=270,reflect_x";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_bpp_extra_and_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24e,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_extra_and_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480e,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_freestanding_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_freestanding_force_e_and_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_panel_orientation(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "panel_orientation=upside_down";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.panel_orientation, DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+struct drm_cmdline_invalid_test {
+ const char *name;
+ const char *cmdline;
+};
+
+static void drm_test_cmdline_invalid(struct kunit *test)
+{
+ const struct drm_cmdline_invalid_test *params = test->param_value;
+ struct drm_cmdline_mode mode = { };
+
+ KUNIT_EXPECT_FALSE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
+ &no_connector,
+ &mode));
+}
+
+static const struct drm_cmdline_invalid_test drm_cmdline_invalid_tests[] = {
+ {
+ .name = "margin_only",
+ .cmdline = "m",
+ },
+ {
+ .name = "interlace_only",
+ .cmdline = "i",
+ },
+ {
+ .name = "res_missing_x",
+ .cmdline = "x480",
+ },
+ {
+ .name = "res_missing_y",
+ .cmdline = "1024x",
+ },
+ {
+ .name = "res_bad_y",
+ .cmdline = "1024xtest",
+ },
+ {
+ .name = "res_missing_y_bpp",
+ .cmdline = "1024x-24",
+ },
+ {
+ .name = "res_bad_bpp",
+ .cmdline = "720x480-test",
+ },
+ {
+ .name = "res_bad_refresh",
+ .cmdline = "720x480@refresh",
+ },
+ {
+ .name = "res_bpp_refresh_force_on_off",
+ .cmdline = "720x480-24@60de",
+ },
+ {
+ .name = "res_invalid_mode",
+ .cmdline = "720x480f",
+ },
+ {
+ .name = "res_bpp_wrong_place_mode",
+ .cmdline = "720x480e-24",
+ },
+ {
+ .name = "name_bpp_refresh",
+ .cmdline = "NTSC-24@60",
+ },
+ {
+ .name = "name_refresh",
+ .cmdline = "NTSC@60",
+ },
+ {
+ .name = "name_refresh_wrong_mode",
+ .cmdline = "NTSC@60m",
+ },
+ {
+ .name = "name_refresh_invalid_mode",
+ .cmdline = "NTSC@60f",
+ },
+ {
+ .name = "rotate_multiple",
+ .cmdline = "720x480,rotate=0,rotate=90",
+ },
+ {
+ .name = "rotate_invalid_val",
+ .cmdline = "720x480,rotate=42",
+ },
+ {
+ .name = "rotate_truncated",
+ .cmdline = "720x480,rotate=",
+ },
+ {
+ .name = "invalid_option",
+ .cmdline = "720x480,test=42",
+ },
+};
+
+static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc);
+
+static struct kunit_case drm_cmdline_parser_tests[] = {
+ KUNIT_CASE(drm_test_cmdline_force_d_only),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_dvi),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_hdmi),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_not_digital),
+ KUNIT_CASE(drm_test_cmdline_force_e_only),
+ KUNIT_CASE(drm_test_cmdline_res),
+ KUNIT_CASE(drm_test_cmdline_res_vesa),
+ KUNIT_CASE(drm_test_cmdline_res_vesa_rblank),
+ KUNIT_CASE(drm_test_cmdline_res_rblank),
+ KUNIT_CASE(drm_test_cmdline_res_bpp),
+ KUNIT_CASE(drm_test_cmdline_res_refresh),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_margins),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_off),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_analog),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_digital),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_margins_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_vesa_margins),
+ KUNIT_CASE(drm_test_cmdline_name),
+ KUNIT_CASE(drm_test_cmdline_name_bpp),
+ KUNIT_CASE(drm_test_cmdline_name_option),
+ KUNIT_CASE(drm_test_cmdline_name_bpp_option),
+ KUNIT_CASE(drm_test_cmdline_rotate_0),
+ KUNIT_CASE(drm_test_cmdline_rotate_90),
+ KUNIT_CASE(drm_test_cmdline_rotate_180),
+ KUNIT_CASE(drm_test_cmdline_rotate_270),
+ KUNIT_CASE(drm_test_cmdline_hmirror),
+ KUNIT_CASE(drm_test_cmdline_vmirror),
+ KUNIT_CASE(drm_test_cmdline_margin_options),
+ KUNIT_CASE(drm_test_cmdline_multiple_options),
+ KUNIT_CASE(drm_test_cmdline_bpp_extra_and_option),
+ KUNIT_CASE(drm_test_cmdline_extra_and_option),
+ KUNIT_CASE(drm_test_cmdline_freestanding_options),
+ KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options),
+ KUNIT_CASE(drm_test_cmdline_panel_orientation),
+ KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_cmdline_parser_test_suite = {
+ .name = "drm_cmdline_parser",
+ .test_cases = drm_cmdline_parser_tests
+};
+
+kunit_test_suite(drm_cmdline_parser_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_damage_helper_test.c b/drivers/gpu/drm/tests/drm_damage_helper_test.c
new file mode 100644
index 000000000000..115034fc3421
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_damage_helper_test.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_drv.h>
+
+struct drm_damage_mock {
+ struct drm_driver driver;
+ struct drm_device device;
+ struct drm_object_properties obj_props;
+ struct drm_plane plane;
+ struct drm_property prop;
+ struct drm_framebuffer fb;
+ struct drm_plane_state state;
+ struct drm_plane_state old_state;
+};
+
+static int drm_damage_helper_init(struct kunit *test)
+{
+ struct drm_damage_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+
+ mock->fb.width = 2048;
+ mock->fb.height = 2048;
+
+ mock->state.crtc = ZERO_SIZE_PTR;
+ mock->state.fb = &mock->fb;
+ mock->state.visible = true;
+
+ mock->old_state.plane = &mock->plane;
+ mock->state.plane = &mock->plane;
+
+ /* just enough so that drm_plane_enable_fb_damage_clips() works */
+ mock->device.driver = &mock->driver;
+ mock->device.mode_config.prop_fb_damage_clips = &mock->prop;
+ mock->plane.dev = &mock->device;
+ mock->obj_props.count = 0;
+ mock->plane.base.properties = &mock->obj_props;
+ mock->prop.base.id = 1; /* 0 is an invalid id */
+ mock->prop.dev = &mock->device;
+
+ drm_plane_enable_fb_damage_clips(&mock->plane);
+
+ test->priv = mock;
+
+ return 0;
+}
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src_x = x1;
+ state->src_y = y1;
+ state->src_w = x2 - x1;
+ state->src_h = y2 - y1;
+
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, u32 size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static void check_damage_clip(struct kunit *test, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_plane_state state = mock->state;
+
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state.src.x1 >> 16;
+ int src_y1 = state.src.y1 >> 16;
+ int src_x2 = (state.src.x2 >> 16) + !!(state.src.x2 & 0xFFFF);
+ int src_y2 = (state.src.y2 >> 16) + !!(state.src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2)
+ KUNIT_FAIL(test, "Cannot have damage clip with no dimension.");
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2)
+ KUNIT_FAIL(test, "Damage cannot be outside rounded plane src.");
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2)
+ KUNIT_FAIL(test, "Damage = %d %d %d %d, want = %d %d %d %d",
+ r->x1, r->y1, r->x2, r->y2, x1, y1, x2, y2);
+}
+
+static void drm_test_damage_iter_no_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src same as fb size. */
+ set_plane_src(&mock->old_state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
+ set_plane_src(&mock->state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 0, 0, 2048, 2048);
+}
+
+static void drm_test_damage_iter_no_damage_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return rounded off plane src as damage.");
+ check_damage_clip(test, &clip, 3, 3, 1028, 772);
+}
+
+static void drm_test_damage_iter_no_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 10, 10, 1034, 778);
+}
+
+static void drm_test_damage_iter_no_damage_fractional_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+}
+
+static void drm_test_damage_iter_no_damage_not_visible(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.visible = false;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_no_damage_no_crtc(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.crtc = NULL;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_no_damage_no_fb(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.fb = NULL;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_simple_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 0, 0, 1024, 768);
+}
+
+static void drm_test_damage_iter_single_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 256, 192, 768, 576);
+}
+
+static void drm_test_damage_iter_single_damage_intersect_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage clipped to src.");
+ check_damage_clip(test, &clip, 256, 192, 1024, 768);
+}
+
+static void drm_test_damage_iter_single_damage_outside_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_single_damage_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 10, 10, 256, 330);
+}
+
+static void drm_test_damage_iter_single_damage_intersect_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return damage clipped to rounded off src.");
+ check_damage_clip(test, &clip, 10, 4, 1029, 330);
+}
+
+static void drm_test_damage_iter_single_damage_outside_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_single_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 10, 10, 1034, 778);
+}
+
+static void drm_test_damage_iter_single_damage_fractional_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return rounded off plane as damage.");
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+}
+
+static void drm_test_damage_iter_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ check_damage_clip(test, &clip, 20, 30, 200, 180);
+ if (num_hits == 1)
+ check_damage_clip(test, &clip, 240, 200, 280, 250);
+ num_hits++;
+ }
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
+}
+
+static void drm_test_damage_iter_damage_one_intersect(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ check_damage_clip(test, &clip, 20, 30, 200, 180);
+ if (num_hits == 1)
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+ num_hits++;
+ }
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
+}
+
+static void drm_test_damage_iter_damage_one_outside(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 240, 200, 280, 250);
+}
+
+static void drm_test_damage_iter_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return round off plane src as damage.");
+ check_damage_clip(test, &clip, 3, 3, 1028, 772);
+}
+
+static void drm_test_damage_iter_damage_not_visible(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.visible = false;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should not return any damage.");
+}
+
+static struct kunit_case drm_damage_helper_tests[] = {
+ KUNIT_CASE(drm_test_damage_iter_no_damage),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_not_visible),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_no_crtc),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_no_fb),
+ KUNIT_CASE(drm_test_damage_iter_simple_damage),
+ KUNIT_CASE(drm_test_damage_iter_single_damage),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_outside_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_outside_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_damage),
+ KUNIT_CASE(drm_test_damage_iter_damage_one_intersect),
+ KUNIT_CASE(drm_test_damage_iter_damage_one_outside),
+ KUNIT_CASE(drm_test_damage_iter_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_damage_not_visible),
+ { }
+};
+
+static struct kunit_suite drm_damage_helper_test_suite = {
+ .name = "drm_damage_helper",
+ .init = drm_damage_helper_init,
+ .test_cases = drm_damage_helper_tests,
+};
+
+kunit_test_suite(drm_damage_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 4caa9be900ac..65c9d225b558 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -1,19 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for the DRM DP MST helpers
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
#define PREFIX_STR "[drm_dp_mst_helper]"
+#include <kunit/test.h>
+
#include <linux/random.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "../display/drm_dp_mst_topology_internal.h"
-#include "test-drm_modeset_common.h"
-int igt_dp_mst_calc_pbn_mode(void *ignored)
+static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
int pbn, i;
const struct {
@@ -33,13 +36,11 @@ int igt_dp_mst_calc_pbn_mode(void *ignored)
pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
test_params[i].bpp,
test_params[i].dsc);
- FAIL(pbn != test_params[i].expected,
- "Expected PBN %d for clock %d bpp %d, got %d\n",
+ KUNIT_EXPECT_EQ_MSG(test, pbn, test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
test_params[i].expected, test_params[i].rate,
test_params[i].bpp, pbn);
}
-
- return 0;
}
static bool
@@ -176,66 +177,64 @@ out:
return result;
}
-int igt_dp_mst_sideband_msg_req_decode(void *unused)
+static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
{
struct drm_dp_sideband_msg_req_body in = { 0 };
u8 data[] = { 0xff, 0x0, 0xdd };
int i;
-#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in))
-
in.req_type = DP_ENUM_PATH_RESOURCES;
in.u.port_num.port_number = 5;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_POWER_UP_PHY;
in.u.port_num.port_number = 5;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_POWER_DOWN_PHY;
in.u.port_num.port_number = 5;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_ALLOCATE_PAYLOAD;
in.u.allocate_payload.number_sdp_streams = 3;
for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.vcpi = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.pbn = U16_MAX;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_QUERY_PAYLOAD;
in.u.query_payload.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.query_payload.vcpi = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_DPCD_READ;
in.u.dpcd_read.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_read.dpcd_address = 0xfedcb;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_read.num_bytes = U8_MAX;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_DPCD_WRITE;
in.u.dpcd_write.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_write.dpcd_address = 0xfedcb;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
in.u.dpcd_write.bytes = data;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_I2C_READ;
in.u.i2c_read.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_read.read_i2c_device_id = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_read.num_transactions = 3;
in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
@@ -244,32 +243,44 @@ int igt_dp_mst_sideband_msg_req_decode(void *unused)
in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
}
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_I2C_WRITE;
in.u.i2c_write.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_write.write_i2c_device_id = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
in.u.i2c_write.bytes = data;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_QUERY_STREAM_ENC_STATUS;
in.u.enc_status.stream_id = 1;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
get_random_bytes(in.u.enc_status.client_id,
sizeof(in.u.enc_status.client_id));
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.stream_event = 3;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.valid_stream_event = 0;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.stream_behavior = 3;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.valid_stream_behavior = 1;
- DO_TEST();
-
-#undef DO_TEST
- return 0;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
}
+
+static struct kunit_case drm_dp_mst_helper_tests[] = {
+ KUNIT_CASE(drm_test_dp_mst_calc_pbn_mode),
+ KUNIT_CASE(drm_test_dp_mst_sideband_msg_req_decode),
+ { }
+};
+
+static struct kunit_suite drm_dp_mst_helper_test_suite = {
+ .name = "drm_dp_mst_helper",
+ .test_cases = drm_dp_mst_helper_tests,
+};
+
+kunit_test_suite(drm_dp_mst_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 98583bf56044..8d86c250c2ec 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -16,34 +16,101 @@
#define TEST_BUF_SIZE 50
-struct xrgb8888_to_rgb332_case {
+struct convert_to_gray8_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb332_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb565_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+ const u16 expected_swab[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb888_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_xrgb2101010_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
+struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
- unsigned int dst_pitch;
struct drm_rect clip;
const u32 xrgb8888[TEST_BUF_SIZE];
- const u8 expected[4 * TEST_BUF_SIZE];
+ struct convert_to_gray8_result gray8_result;
+ struct convert_to_rgb332_result rgb332_result;
+ struct convert_to_rgb565_result rgb565_result;
+ struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_xrgb2101010_result xrgb2101010_result;
};
-static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
+static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
{
.name = "single_pixel_source_buffer",
.pitch = 1 * 4,
- .dst_pitch = 0,
.clip = DRM_RECT_INIT(0, 0, 1, 1),
.xrgb8888 = { 0x01FF0000 },
- .expected = { 0xE0 },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = { 0xE0 },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF800 },
+ .expected_swab = { 0x00F8 },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
},
{
.name = "single_pixel_clip_rectangle",
.pitch = 2 * 4,
- .dst_pitch = 0,
.clip = DRM_RECT_INIT(1, 1, 1, 1),
.xrgb8888 = {
0x00000000, 0x00000000,
0x00000000, 0x10FF0000,
},
- .expected = { 0xE0 },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = { 0xE0 },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF800 },
+ .expected_swab = { 0x00F8 },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
},
{
/* Well known colors: White, black, red, green, blue, magenta,
@@ -52,7 +119,6 @@ static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
*/
.name = "well_known_colors",
.pitch = 4 * 4,
- .dst_pitch = 0,
.clip = DRM_RECT_INIT(1, 1, 2, 4),
.xrgb8888 = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -61,28 +127,115 @@ static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000,
0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000,
},
- .expected = {
- 0xFF, 0x00,
- 0xE0, 0x1C,
- 0x03, 0xE3,
- 0xFC, 0x1F,
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0x4C, 0x99,
+ 0x19, 0x66,
+ 0xE5, 0xB2,
+ },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0xE0, 0x1C,
+ 0x03, 0xE3,
+ 0xFC, 0x1F,
+ },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x0000,
+ 0xF800, 0x07E0,
+ 0x001F, 0xF81F,
+ 0xFFE0, 0x07FF,
+ },
+ .expected_swab = {
+ 0xFFFF, 0x0000,
+ 0x00F8, 0xE007,
+ 0x1F00, 0x1FF8,
+ 0xE0FF, 0xFF07,
+ },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00,
+ 0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF,
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0x3FFFFFFF, 0x00000000,
+ 0x3FF00000, 0x000FFC00,
+ 0x000003FF, 0x3FF003FF,
+ 0x3FFFFC00, 0x000FFFFF,
+ },
},
},
{
/* Randomly picked colors. Full buffer within the clip area. */
.name = "destination_pitch",
.pitch = 3 * 4,
- .dst_pitch = 5,
.clip = DRM_RECT_INIT(0, 0, 3, 3),
.xrgb8888 = {
0xA10E449C, 0xB1114D05, 0xC1A80303,
0xD16C7073, 0xA20E449C, 0xB2114D05,
0xC2A80303, 0xD26C7073, 0xA30E449C,
},
- .expected = {
- 0x0A, 0x08, 0xA0, 0x00, 0x00,
- 0x6D, 0x0A, 0x08, 0x00, 0x00,
- 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ .gray8_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x3C, 0x33, 0x34, 0x00, 0x00,
+ 0x6F, 0x3C, 0x33, 0x00, 0x00,
+ 0x34, 0x6F, 0x3C, 0x00, 0x00,
+ },
+ },
+ .rgb332_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x0A, 0x08, 0xA0, 0x00, 0x00,
+ 0x6D, 0x0A, 0x08, 0x00, 0x00,
+ 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ },
+ },
+ .rgb565_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0A33, 0x1260, 0xA800, 0x0000, 0x0000,
+ 0x6B8E, 0x0A33, 0x1260, 0x0000, 0x0000,
+ 0xA800, 0x6B8E, 0x0A33, 0x0000, 0x0000,
+ },
+ .expected_swab = {
+ 0x330A, 0x6012, 0x00A8, 0x0000, 0x0000,
+ 0x8E6B, 0x330A, 0x6012, 0x0000, 0x0000,
+ 0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
+ },
+ },
+ .rgb888_result = {
+ .dst_pitch = 15,
+ .expected = {
+ 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000,
+ 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
+ 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
+ },
},
},
};
@@ -111,41 +264,190 @@ static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
return dst_pitch * drm_rect_height(clip);
}
-static void xrgb8888_to_rgb332_case_desc(struct xrgb8888_to_rgb332_case *t,
- char *desc)
+static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
+{
+ u32 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = le32_to_cpu((__force __le32)buf[n]);
+
+ return dst;
+}
+
+static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
+ char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
-KUNIT_ARRAY_PARAM(xrgb8888_to_rgb332, xrgb8888_to_rgb332_cases,
- xrgb8888_to_rgb332_case_desc);
+KUNIT_ARRAY_PARAM(convert_xrgb8888, convert_xrgb8888_cases,
+ convert_xrgb8888_case_desc);
-static void xrgb8888_to_rgb332_test(struct kunit *test)
+static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
{
- const struct xrgb8888_to_rgb332_case *params = test->param_value;
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_gray8_result *result = &params->gray8_result;
size_t dst_size;
- __u8 *dst = NULL;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
- dst_size = conversion_buf_size(DRM_FORMAT_RGB332, params->dst_pitch,
+ dst_size = conversion_buf_size(DRM_FORMAT_R8, result->dst_pitch,
&params->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
- dst = kunit_kzalloc(test, dst_size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dst);
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_gray8(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb332_result *result = &params->rgb332_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB332, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb332(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb565_result *result = &params->rgb565_result;
+ size_t dst_size;
+ __u16 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB565, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, false);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected_swab, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb888_result *result = &params->rgb888_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB888, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_xrgb2101010_result *result = &params->xrgb2101010_result;
+ size_t dst_size;
+ __u32 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_XRGB2101010,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
- drm_fb_xrgb8888_to_rgb332(dst, params->dst_pitch, params->xrgb8888,
- &fb, &params->clip);
- KUNIT_EXPECT_EQ(test, memcmp(dst, params->expected, dst_size), 0);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
}
static struct kunit_case drm_format_helper_test_cases[] = {
- KUNIT_CASE_PARAM(xrgb8888_to_rgb332_test,
- xrgb8888_to_rgb332_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_format_test.c b/drivers/gpu/drm/tests/drm_format_test.c
new file mode 100644
index 000000000000..ec6996ce819a
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_format_test.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_format functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+
+static void drm_test_format_block_width_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+}
+
+static void drm_test_format_block_width_one_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_three_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 3), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_height_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+}
+
+static void drm_test_format_block_height_one_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+}
+
+static void drm_test_format_block_height_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_block_height_three_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 3), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_block_height_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_min_pitch_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+}
+
+static void drm_test_format_min_pitch_one_plane_8bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB332);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1));
+}
+
+static void drm_test_format_min_pitch_one_plane_16bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1) * 2);
+}
+
+static void drm_test_format_min_pitch_one_plane_24bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB888);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 3);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 6);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 3072);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 5760);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 12288);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2013);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 3);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 3);
+}
+
+static void drm_test_format_min_pitch_one_plane_32bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ABGR8888);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 8);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 2560);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 7680);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 16384);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2684);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 4);
+}
+
+static void drm_test_format_min_pitch_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 672);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1));
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1));
+}
+
+static void drm_test_format_min_pitch_three_plane_8bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 3, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 320);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 320), 320);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 512);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 512), 512);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 960);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 960), 960);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2048), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 336);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 336), 336);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX / 2 + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX / 2 + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+}
+
+static void drm_test_format_min_pitch_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L2);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 2);
+}
+
+static struct kunit_case drm_format_tests[] = {
+ KUNIT_CASE(drm_test_format_block_width_invalid),
+ KUNIT_CASE(drm_test_format_block_width_one_plane),
+ KUNIT_CASE(drm_test_format_block_width_two_plane),
+ KUNIT_CASE(drm_test_format_block_width_three_plane),
+ KUNIT_CASE(drm_test_format_block_width_tiled),
+ KUNIT_CASE(drm_test_format_block_height_invalid),
+ KUNIT_CASE(drm_test_format_block_height_one_plane),
+ KUNIT_CASE(drm_test_format_block_height_two_plane),
+ KUNIT_CASE(drm_test_format_block_height_three_plane),
+ KUNIT_CASE(drm_test_format_block_height_tiled),
+ KUNIT_CASE(drm_test_format_min_pitch_invalid),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_8bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_16bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_24bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_32bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_two_plane),
+ KUNIT_CASE(drm_test_format_min_pitch_three_plane_8bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_tiled),
+ {}
+};
+
+static struct kunit_suite drm_format_test_suite = {
+ .name = "drm_format",
+ .test_cases = drm_format_tests,
+};
+
+kunit_test_suite(drm_format_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index f6d66285c5fc..df235b7fdaa5 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_framebuffer functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
-#include <linux/kernel.h>
+#include <kunit/test.h>
#include <drm/drm_device.h>
#include <drm/drm_mode.h>
@@ -12,8 +14,6 @@
#include "../drm_crtc_internal.h"
-#include "test-drm_modeset_common.h"
-
#define MIN_WIDTH 4
#define MAX_WIDTH 4096
#define MIN_HEIGHT 4
@@ -25,7 +25,7 @@ struct drm_framebuffer_test {
const char *name;
};
-static struct drm_framebuffer_test createbuffer_tests[] = {
+static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
@@ -73,12 +73,14 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
- .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
- .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
@@ -89,11 +91,13 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
- .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
- .flags = DRM_MODE_FB_MODIFIERS, .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
}
},
-{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
+{ .buffer_created = 0,
+ .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
@@ -143,7 +147,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
- .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
@@ -164,7 +169,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
- .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
@@ -203,24 +209,29 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
- .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH + MAX_WIDTH * MAX_HEIGHT,
- MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
- .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1, DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH +
+ MAX_WIDTH * MAX_HEIGHT, MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
}
},
-{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
-{ .buffer_created = 0, .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
- .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .handles = { 1, 1, 1 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
-{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
@@ -230,7 +241,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
{ .buffer_created = 1, .name = "YVU420 Valid modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
- .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
@@ -245,8 +257,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
- .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
- AFBC_FORMAT_MOD_SPARSE },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
@@ -276,7 +288,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
}
},
-{ .buffer_created = 1, .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
+{ .buffer_created = 1,
+ .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
@@ -317,34 +330,53 @@ static struct drm_mode_config_funcs mock_config_funcs = {
.fb_create = fb_create_mock,
};
-static struct drm_device mock_drm_device = {
- .mode_config = {
- .min_width = MIN_WIDTH,
- .max_width = MAX_WIDTH,
- .min_height = MIN_HEIGHT,
- .max_height = MAX_HEIGHT,
- .funcs = &mock_config_funcs,
- },
-};
+static int drm_framebuffer_test_init(struct kunit *test)
+{
+ struct drm_device *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+
+ mock->mode_config.min_width = MIN_WIDTH;
+ mock->mode_config.max_width = MAX_WIDTH;
+ mock->mode_config.min_height = MIN_HEIGHT;
+ mock->mode_config.max_height = MAX_HEIGHT;
+ mock->mode_config.funcs = &mock_config_funcs;
-static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r)
+ test->priv = mock;
+ return 0;
+}
+
+static void drm_test_framebuffer_create(struct kunit *test)
{
+ const struct drm_framebuffer_test *params = test->param_value;
+ struct drm_device *mock = test->priv;
int buffer_created = 0;
- mock_drm_device.dev_private = &buffer_created;
- drm_internal_framebuffer_create(&mock_drm_device, r, NULL);
- return buffer_created;
+ mock->dev_private = &buffer_created;
+ drm_internal_framebuffer_create(mock, &params->cmd, NULL);
+ KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created);
}
-int igt_check_drm_framebuffer_create(void *ignored)
+static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc)
{
- int i = 0;
+ strcpy(desc, t->name);
+}
- for (i = 0; i < ARRAY_SIZE(createbuffer_tests); i++) {
- FAIL(createbuffer_tests[i].buffer_created !=
- execute_drm_mode_fb_cmd2(&createbuffer_tests[i].cmd),
- "Test %d: \"%s\" failed\n", i, createbuffer_tests[i].name);
- }
+KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases,
+ drm_framebuffer_test_to_desc);
- return 0;
-}
+static struct kunit_case drm_framebuffer_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params),
+ { }
+};
+
+static struct kunit_suite drm_framebuffer_test_suite = {
+ .name = "drm_framebuffer",
+ .init = drm_framebuffer_test_init,
+ .test_cases = drm_framebuffer_tests,
+};
+
+kunit_test_suite(drm_framebuffer_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/tests/drm_mm_test.c
index b768b53c4aee..659d1af4dca7 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for the drm_mm range manager
+ *
+ * Copyright (c) 2022 Arthur Grillo <arthur.grillo@usp.br>
*/
-#define pr_fmt(fmt) "drm_mm: " fmt
+#include <kunit/test.h>
-#include <linux/module.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
#include <linux/random.h>
@@ -16,9 +17,6 @@
#include "../lib/drm_random.h"
-#define TESTS "drm_mm_selftests.h"
-#include "drm_selftest.h"
-
static unsigned int random_seed;
static unsigned int max_iterations = 8192;
static unsigned int max_prime = 128;
@@ -45,13 +43,7 @@ static const struct insert_mode {
{}
};
-static int igt_sanitycheck(void *ignored)
-{
- pr_info("%s - ok!\n", __func__);
- return 0;
-}
-
-static bool assert_no_holes(const struct drm_mm *mm)
+static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
{
struct drm_mm_node *hole;
u64 hole_start, __always_unused hole_end;
@@ -61,13 +53,14 @@ static bool assert_no_holes(const struct drm_mm *mm)
drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
count++;
if (count) {
- pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
+ KUNIT_FAIL(test,
+ "Expected to find no holes (after reserve), found %lu instead\n", count);
return false;
}
drm_mm_for_each_node(hole, mm) {
if (drm_mm_hole_follows(hole)) {
- pr_err("Hole follows node, expected none!\n");
+ KUNIT_FAIL(test, "Hole follows node, expected none!\n");
return false;
}
}
@@ -75,7 +68,7 @@ static bool assert_no_holes(const struct drm_mm *mm)
return true;
}
-static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
+static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
{
struct drm_mm_node *hole;
u64 hole_start, hole_end;
@@ -89,62 +82,62 @@ static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (start != hole_start || end != hole_end) {
if (ok)
- pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
- hole_start, hole_end,
- start, end);
+ KUNIT_FAIL(test,
+ "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
+ hole_start, hole_end, start, end);
ok = false;
}
count++;
}
if (count != 1) {
- pr_err("Expected to find one hole, found %lu instead\n", count);
+ KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count);
ok = false;
}
return ok;
}
-static bool assert_continuous(const struct drm_mm *mm, u64 size)
+static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
{
struct drm_mm_node *node, *check, *found;
unsigned long n;
u64 addr;
- if (!assert_no_holes(mm))
+ if (!assert_no_holes(test, mm))
return false;
n = 0;
addr = 0;
drm_mm_for_each_node(node, mm) {
if (node->start != addr) {
- pr_err("node[%ld] list out of order, expected %llx found %llx\n",
- n, addr, node->start);
+ KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
+ n, addr, node->start);
return false;
}
if (node->size != size) {
- pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
- n, size, node->size);
+ KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
+ n, size, node->size);
return false;
}
if (drm_mm_hole_follows(node)) {
- pr_err("node[%ld] is followed by a hole!\n", n);
+ KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
return false;
}
found = NULL;
drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
if (node != check) {
- pr_err("lookup return wrong node, expected start %llx, found %llx\n",
- node->start, check->start);
+ KUNIT_FAIL(test,
+ "lookup return wrong node, expected start %llx, found %llx\n",
+ node->start, check->start);
return false;
}
found = check;
}
if (!found) {
- pr_err("lookup failed for node %llx + %llx\n",
- addr, size);
+ KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
return false;
}
@@ -166,107 +159,96 @@ static u64 misalignment(struct drm_mm_node *node, u64 alignment)
return rem;
}
-static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
+static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
u64 size, u64 alignment, unsigned long color)
{
bool ok = true;
if (!drm_mm_node_allocated(node) || node->mm != mm) {
- pr_err("node not allocated\n");
+ KUNIT_FAIL(test, "node not allocated\n");
ok = false;
}
if (node->size != size) {
- pr_err("node has wrong size, found %llu, expected %llu\n",
- node->size, size);
+ KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n",
+ node->size, size);
ok = false;
}
if (misalignment(node, alignment)) {
- pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
- node->start, misalignment(node, alignment), alignment);
+ KUNIT_FAIL(test,
+ "node is misaligned, start %llx rem %llu, expected alignment %llu\n",
+ node->start, misalignment(node, alignment), alignment);
ok = false;
}
if (node->color != color) {
- pr_err("node has wrong color, found %lu, expected %lu\n",
- node->color, color);
+ KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n",
+ node->color, color);
ok = false;
}
return ok;
}
-#define show_mm(mm) do { \
- struct drm_printer __p = drm_debug_printer(__func__); \
- drm_mm_print((mm), &__p); } while (0)
-
-static int igt_init(void *ignored)
+static void drm_test_mm_init(struct kunit *test)
{
const unsigned int size = 4096;
struct drm_mm mm;
struct drm_mm_node tmp;
- int ret = -EINVAL;
/* Start with some simple checks on initialising the struct drm_mm */
memset(&mm, 0, sizeof(mm));
- if (drm_mm_initialized(&mm)) {
- pr_err("zeroed mm claims to be initialized\n");
- return ret;
- }
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm),
+ "zeroed mm claims to be initialized\n");
memset(&mm, 0xff, sizeof(mm));
drm_mm_init(&mm, 0, size);
if (!drm_mm_initialized(&mm)) {
- pr_err("mm claims not to be initialized\n");
+ KUNIT_FAIL(test, "mm claims not to be initialized\n");
goto out;
}
if (!drm_mm_clean(&mm)) {
- pr_err("mm not empty on creation\n");
+ KUNIT_FAIL(test, "mm not empty on creation\n");
goto out;
}
/* After creation, it should all be one massive hole */
- if (!assert_one_hole(&mm, 0, size)) {
- ret = -EINVAL;
+ if (!assert_one_hole(test, &mm, 0, size)) {
+ KUNIT_FAIL(test, "");
goto out;
}
memset(&tmp, 0, sizeof(tmp));
tmp.start = 0;
tmp.size = size;
- ret = drm_mm_reserve_node(&mm, &tmp);
- if (ret) {
- pr_err("failed to reserve whole drm_mm\n");
+ if (drm_mm_reserve_node(&mm, &tmp)) {
+ KUNIT_FAIL(test, "failed to reserve whole drm_mm\n");
goto out;
}
/* After filling the range entirely, there should be no holes */
- if (!assert_no_holes(&mm)) {
- ret = -EINVAL;
+ if (!assert_no_holes(test, &mm)) {
+ KUNIT_FAIL(test, "");
goto out;
}
/* And then after emptying it again, the massive hole should be back */
drm_mm_remove_node(&tmp);
- if (!assert_one_hole(&mm, 0, size)) {
- ret = -EINVAL;
+ if (!assert_one_hole(test, &mm, 0, size)) {
+ KUNIT_FAIL(test, "");
goto out;
}
out:
- if (ret)
- show_mm(&mm);
drm_mm_takedown(&mm);
- return ret;
}
-static int igt_debug(void *ignored)
+static void drm_test_mm_debug(struct kunit *test)
{
struct drm_mm mm;
struct drm_mm_node nodes[2];
- int ret;
/* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm.
@@ -277,24 +259,15 @@ static int igt_debug(void *ignored)
memset(nodes, 0, sizeof(nodes));
nodes[0].start = 512;
nodes[0].size = 1024;
- ret = drm_mm_reserve_node(&mm, &nodes[0]);
- if (ret) {
- pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
- nodes[0].start, nodes[0].size);
- return ret;
- }
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]),
+ "failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
nodes[1].size = 1024;
nodes[1].start = 4096 - 512 - nodes[1].size;
- ret = drm_mm_reserve_node(&mm, &nodes[1]);
- if (ret) {
- pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
- nodes[1].start, nodes[1].size);
- return ret;
- }
-
- show_mm(&mm);
- return 0;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
+ "failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
}
static struct drm_mm_node *set_node(struct drm_mm_node *node,
@@ -305,7 +278,7 @@ static struct drm_mm_node *set_node(struct drm_mm_node *node,
return node;
}
-static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
+static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
{
int err;
@@ -314,17 +287,18 @@ static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
return true;
if (!err) {
- pr_err("impossible reserve succeeded, node %llu + %llu\n",
- node->start, node->size);
+ KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
+ node->start, node->size);
drm_mm_remove_node(node);
} else {
- pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
+ KUNIT_FAIL(test,
+ "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
err, -ENOSPC, node->start, node->size);
}
return false;
}
-static bool check_reserve_boundaries(struct drm_mm *mm,
+static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
unsigned int count,
u64 size)
{
@@ -339,29 +313,27 @@ static bool check_reserve_boundaries(struct drm_mm *mm,
B(size * count, 0),
B(-size, size),
B(-size, -size),
- B(-size, 2*size),
+ B(-size, 2 * size),
B(0, -size),
B(size, -size),
- B(count*size, size),
- B(count*size, -size),
- B(count*size, count*size),
- B(count*size, -count*size),
- B(count*size, -(count+1)*size),
- B((count+1)*size, size),
- B((count+1)*size, -size),
- B((count+1)*size, -2*size),
+ B(count * size, size),
+ B(count * size, -size),
+ B(count * size, count * size),
+ B(count * size, -count * size),
+ B(count * size, -(count + 1) * size),
+ B((count + 1) * size, size),
+ B((count + 1) * size, -size),
+ B((count + 1) * size, -2 * size),
#undef B
};
struct drm_mm_node tmp = {};
int n;
for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
- if (!expect_reserve_fail(mm,
- set_node(&tmp,
- boundaries[n].start,
- boundaries[n].size))) {
- pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
- n, boundaries[n].name, count, size);
+ if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
+ boundaries[n].size))) {
+ KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
+ n, boundaries[n].name, count, size);
return false;
}
}
@@ -369,7 +341,7 @@ static bool check_reserve_boundaries(struct drm_mm *mm,
return true;
}
-static int __igt_reserve(unsigned int count, u64 size)
+static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
{
DRM_RND_STATE(prng, random_seed);
struct drm_mm mm;
@@ -392,13 +364,12 @@ static int __igt_reserve(unsigned int count, u64 size)
goto err;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err_order;
+ KUNIT_ASSERT_TRUE(test, nodes);
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
- if (!check_reserve_boundaries(&mm, count, size))
+ if (!check_reserve_boundaries(test, &mm, count, size))
goto out;
for (n = 0; n < count; n++) {
@@ -407,57 +378,53 @@ static int __igt_reserve(unsigned int count, u64 size)
err = drm_mm_reserve_node(&mm, &nodes[n]);
if (err) {
- pr_err("reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
+ KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
ret = err;
goto out;
}
if (!drm_mm_node_allocated(&nodes[n])) {
- pr_err("reserved node not allocated! step %d, start %llu\n",
- n, nodes[n].start);
+ KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
+ n, nodes[n].start);
goto out;
}
- if (!expect_reserve_fail(&mm, &nodes[n]))
+ if (!expect_reserve_fail(test, &mm, &nodes[n]))
goto out;
}
/* After random insertion the nodes should be in order */
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
/* Repeated use should then fail */
drm_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(&mm,
- set_node(&tmp, order[n] * size, 1)))
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
goto out;
/* Remove and reinsert should work */
drm_mm_remove_node(&nodes[order[n]]);
err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
if (err) {
- pr_err("reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
+ KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
ret = err;
goto out;
}
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
/* Overlapping use should then fail */
for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
goto out;
}
for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(&mm,
- set_node(&tmp,
- size * n,
- size * (count - n))))
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
goto out;
}
@@ -472,8 +439,8 @@ static int __igt_reserve(unsigned int count, u64 size)
node = &nodes[order[(o + m) % count]];
err = drm_mm_reserve_node(&mm, node);
if (err) {
- pr_err("reserve failed, step %d/%d, start %llu\n",
- m, n, node->start);
+ KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
+ m, n, node->start);
ret = err;
goto out;
}
@@ -481,7 +448,7 @@ static int __igt_reserve(unsigned int count, u64 size)
o += n;
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
}
@@ -491,41 +458,30 @@ out:
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-err_order:
kfree(order);
err:
return ret;
}
-static int igt_reserve(void *ignored)
+static void drm_test_mm_reserve(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- int n, ret;
+ int n;
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
- ret = __igt_reserve(count, size - 1);
- if (ret)
- return ret;
-
- ret = __igt_reserve(count, size);
- if (ret)
- return ret;
-
- ret = __igt_reserve(count, size + 1);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
cond_resched();
}
-
- return 0;
}
-static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
- u64 size, u64 alignment, unsigned long color,
- const struct insert_mode *mode)
+static bool expect_insert(struct kunit *test, struct drm_mm *mm,
+ struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
+ const struct insert_mode *mode)
{
int err;
@@ -533,12 +489,13 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
size, alignment, color,
mode->mode);
if (err) {
- pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
- size, alignment, color, mode->name, err);
+ KUNIT_FAIL(test,
+ "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
+ size, alignment, color, mode->name, err);
return false;
}
- if (!assert_node(node, mm, size, alignment, color)) {
+ if (!assert_node(test, node, mm, size, alignment, color)) {
drm_mm_remove_node(node);
return false;
}
@@ -546,7 +503,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
return true;
}
-static bool expect_insert_fail(struct drm_mm *mm, u64 size)
+static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
{
struct drm_mm_node tmp = {};
int err;
@@ -556,17 +513,18 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size)
return true;
if (!err) {
- pr_err("impossible insert succeeded, node %llu + %llu\n",
- tmp.start, tmp.size);
+ KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
+ tmp.start, tmp.size);
drm_mm_remove_node(&tmp);
} else {
- pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
- err, -ENOSPC, size);
+ KUNIT_FAIL(test,
+ "impossible insert failed with wrong error %d [expected %d], size %llu\n",
+ err, -ENOSPC, size);
}
return false;
}
-static int __igt_insert(unsigned int count, u64 size, bool replace)
+static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
{
DRM_RND_STATE(prng, random_seed);
const struct insert_mode *mode;
@@ -582,8 +540,7 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
ret = -ENOMEM;
nodes = vmalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(count, &prng);
if (!order)
@@ -598,41 +555,43 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
node = replace ? &tmp : &nodes[n];
memset(node, 0, sizeof(*node));
- if (!expect_insert(&mm, node, size, 0, n, mode)) {
- pr_err("%s insert failed, size %llu step %d\n",
- mode->name, size, n);
+ if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
+ KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
+ mode->name, size, n);
goto out;
}
if (replace) {
drm_mm_replace_node(&tmp, &nodes[n]);
if (drm_mm_node_allocated(&tmp)) {
- pr_err("replaced old-node still allocated! step %d\n",
- n);
+ KUNIT_FAIL(test,
+ "replaced old-node still allocated! step %d\n",
+ n);
goto out;
}
- if (!assert_node(&nodes[n], &mm, size, 0, n)) {
- pr_err("replaced node did not inherit parameters, size %llu step %d\n",
- size, n);
+ if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
+ KUNIT_FAIL(test,
+ "replaced node did not inherit parameters, size %llu step %d\n",
+ size, n);
goto out;
}
if (tmp.start != nodes[n].start) {
- pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
- tmp.start, size,
- nodes[n].start, nodes[n].size);
+ KUNIT_FAIL(test,
+ "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
+ tmp.start, size, nodes[n].start, nodes[n].size);
goto out;
}
}
}
/* After random insertion the nodes should be in order */
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
/* Repeated use should then fail */
- if (!expect_insert_fail(&mm, size))
+ if (!expect_insert_fail(test, &mm, size))
goto out;
/* Remove one and reinsert, as the only hole it should refill itself */
@@ -640,19 +599,20 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
u64 addr = nodes[n].start;
drm_mm_remove_node(&nodes[n]);
- if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
- pr_err("%s reinsert failed, size %llu step %d\n",
- mode->name, size, n);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
+ KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
goto out;
}
if (nodes[n].start != addr) {
- pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
+ KUNIT_FAIL(test,
+ "%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
goto out;
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
}
@@ -665,19 +625,20 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
- if (!expect_insert(&mm, node, size, 0, n, mode)) {
- pr_err("%s multiple reinsert failed, size %llu step %d\n",
- mode->name, size, n);
+ if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s multiple reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
goto out;
}
}
o += n;
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
- if (!expect_insert_fail(&mm, size))
+ if (!expect_insert_fail(test, &mm, size))
goto out;
}
@@ -696,44 +657,31 @@ out:
kfree(order);
err_nodes:
vfree(nodes);
-err:
return ret;
}
-static int igt_insert(void *ignored)
+static void drm_test_mm_insert(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
unsigned int n;
- int ret;
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
- ret = __igt_insert(count, size - 1, false);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size, false);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size + 1, false);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
cond_resched();
}
-
- return 0;
}
-static int igt_replace(void *ignored)
+static void drm_test_mm_replace(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
unsigned int n;
- int ret;
- /* Reuse igt_insert to exercise replacement by inserting a dummy node,
+ /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
* then replacing it with the intended node. We want to check that
* the tree is intact and all the information we need is carried
* across to the target node.
@@ -742,28 +690,17 @@ static int igt_replace(void *ignored)
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
- ret = __igt_insert(count, size - 1, true);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size, true);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size + 1, true);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
cond_resched();
}
-
- return 0;
}
-static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
- u64 range_start, u64 range_end,
- const struct insert_mode *mode)
+ u64 range_start, u64 range_end, const struct insert_mode *mode)
{
int err;
@@ -772,13 +709,14 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
range_start, range_end,
mode->mode);
if (err) {
- pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
- size, alignment, color, mode->name,
- range_start, range_end, err);
+ KUNIT_FAIL(test,
+ "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
+ size, alignment, color, mode->name,
+ range_start, range_end, err);
return false;
}
- if (!assert_node(node, mm, size, alignment, color)) {
+ if (!assert_node(test, node, mm, size, alignment, color)) {
drm_mm_remove_node(node);
return false;
}
@@ -786,67 +724,63 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
return true;
}
-static bool expect_insert_in_range_fail(struct drm_mm *mm,
- u64 size,
- u64 range_start,
- u64 range_end)
+static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
+ u64 size, u64 range_start, u64 range_end)
{
struct drm_mm_node tmp = {};
int err;
- err = drm_mm_insert_node_in_range(mm, &tmp,
- size, 0, 0,
- range_start, range_end,
+ err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
0);
if (likely(err == -ENOSPC))
return true;
if (!err) {
- pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
- tmp.start, tmp.size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
+ tmp.start, tmp.size, range_start, range_end);
drm_mm_remove_node(&tmp);
} else {
- pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
- err, -ENOSPC, size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
+ err, -ENOSPC, size, range_start, range_end);
}
return false;
}
-static bool assert_contiguous_in_range(struct drm_mm *mm,
- u64 size,
- u64 start,
- u64 end)
+static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
+ u64 size, u64 start, u64 end)
{
struct drm_mm_node *node;
unsigned int n;
- if (!expect_insert_in_range_fail(mm, size, start, end))
+ if (!expect_insert_in_range_fail(test, mm, size, start, end))
return false;
n = div64_u64(start + size - 1, size);
drm_mm_for_each_node(node, mm) {
if (node->start < start || node->start + node->size > end) {
- pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
- n, node->start, node->start + node->size, start, end);
+ KUNIT_FAIL(test,
+ "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
+ n, node->start, node->start + node->size, start, end);
return false;
}
if (node->start != n * size) {
- pr_err("node %d out of order, expected start %llx, found %llx\n",
- n, n * size, node->start);
+ KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
+ n, n * size, node->start);
return false;
}
if (node->size != size) {
- pr_err("node %d has wrong size, expected size %llx, found %llx\n",
- n, size, node->size);
+ KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
+ n, size, node->size);
return false;
}
- if (drm_mm_hole_follows(node) &&
- drm_mm_hole_node_end(node) < end) {
- pr_err("node %d is followed by a hole!\n", n);
+ if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
+ KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
return false;
}
@@ -856,8 +790,8 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
if (drm_mm_node_allocated(node)) {
- pr_err("node before start: node=%llx+%llu, start=%llx\n",
- node->start, node->size, start);
+ KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
+ node->start, node->size, start);
return false;
}
}
@@ -865,8 +799,8 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
if (drm_mm_node_allocated(node)) {
- pr_err("node after end: node=%llx+%llu, end=%llx\n",
- node->start, node->size, end);
+ KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
+ node->start, node->size, end);
return false;
}
}
@@ -874,7 +808,8 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
return true;
}
-static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
+static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
+ u64 start, u64 end)
{
const struct insert_mode *mode;
struct drm_mm mm;
@@ -886,14 +821,13 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
DRM_MM_BUG_ON(!size);
DRM_MM_BUG_ON(end <= start);
- /* Very similar to __igt_insert(), but now instead of populating the
+ /* Very similar to __drm_test_mm_insert(), but now instead of populating the
* full range of the drm_mm, we try to fill a small portion of it.
*/
ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
@@ -903,20 +837,19 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
for (mode = insert_modes; mode->name; mode++) {
for (n = start_n; n <= end_n; n++) {
- if (!expect_insert_in_range(&mm, &nodes[n],
- size, size, n,
+ if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
start, end, mode)) {
- pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
- mode->name, size, n,
- start_n, end_n,
- start, end);
+ KUNIT_FAIL(test,
+ "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
+ mode->name, size, n, start_n, end_n, start, end);
goto out;
}
}
- if (!assert_contiguous_in_range(&mm, size, start, end)) {
- pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
- mode->name, start, end, size);
+ if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
+ KUNIT_FAIL(test,
+ "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
+ mode->name, start, end, size);
goto out;
}
@@ -925,23 +858,24 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
u64 addr = nodes[n].start;
drm_mm_remove_node(&nodes[n]);
- if (!expect_insert_in_range(&mm, &nodes[n],
- size, size, n,
+ if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
start, end, mode)) {
- pr_err("%s reinsert failed, step %d\n", mode->name, n);
+ KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
goto out;
}
if (nodes[n].start != addr) {
- pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
+ KUNIT_FAIL(test,
+ "%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
goto out;
}
}
- if (!assert_contiguous_in_range(&mm, size, start, end)) {
- pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
- mode->name, start, end, size);
+ if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
+ KUNIT_FAIL(test,
+ "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
+ mode->name, start, end, size);
goto out;
}
@@ -958,11 +892,10 @@ out:
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-err:
return ret;
}
-static int insert_outside_range(void)
+static int insert_outside_range(struct kunit *test)
{
struct drm_mm mm;
const unsigned int start = 1024;
@@ -971,81 +904,58 @@ static int insert_outside_range(void)
drm_mm_init(&mm, start, size);
- if (!expect_insert_in_range_fail(&mm, 1, 0, start))
+ if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
return -EINVAL;
- if (!expect_insert_in_range_fail(&mm, size,
- start - size/2, start + (size+1)/2))
+ if (!expect_insert_in_range_fail(test, &mm, size,
+ start - size / 2, start + (size + 1) / 2))
return -EINVAL;
- if (!expect_insert_in_range_fail(&mm, size,
- end - (size+1)/2, end + size/2))
+ if (!expect_insert_in_range_fail(test, &mm, size,
+ end - (size + 1) / 2, end + size / 2))
return -EINVAL;
- if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
+ if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
return -EINVAL;
drm_mm_takedown(&mm);
return 0;
}
-static int igt_insert_range(void *ignored)
+static void drm_test_mm_insert_range(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
unsigned int n;
- int ret;
/* Check that requests outside the bounds of drm_mm are rejected. */
- ret = insert_outside_range();
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
for_each_prime_number_from(n, 1, 50) {
const u64 size = BIT_ULL(n);
const u64 max = count * size;
- ret = __igt_insert_range(count, size, 0, max);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, 1, max);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, 0, max - 1);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, 0, max/2);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, max/2, max);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ max / 2, max / 2));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ max / 4 + 1, 3 * max / 4 - 1));
cond_resched();
}
-
- return 0;
}
-static int prepare_igt_frag(struct drm_mm *mm,
- struct drm_mm_node *nodes,
- unsigned int num_insert,
- const struct insert_mode *mode)
+static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
+ unsigned int num_insert, const struct insert_mode *mode)
{
unsigned int size = 4096;
unsigned int i;
for (i = 0; i < num_insert; i++) {
- if (!expect_insert(mm, &nodes[i], size, 0, i,
- mode) != 0) {
- pr_err("%s insert failed\n", mode->name);
+ if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
+ KUNIT_FAIL(test, "%s insert failed\n", mode->name);
return -EINVAL;
}
}
@@ -1057,12 +967,10 @@ static int prepare_igt_frag(struct drm_mm *mm,
}
return 0;
-
}
-static u64 get_insert_time(struct drm_mm *mm,
- unsigned int num_insert,
- struct drm_mm_node *nodes,
+static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
+ unsigned int num_insert, struct drm_mm_node *nodes,
const struct insert_mode *mode)
{
unsigned int size = 8192;
@@ -1071,8 +979,8 @@ static u64 get_insert_time(struct drm_mm *mm,
start = ktime_get();
for (i = 0; i < num_insert; i++) {
- if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) {
- pr_err("%s insert failed\n", mode->name);
+ if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
+ KUNIT_FAIL(test, "%s insert failed\n", mode->name);
return 0;
}
}
@@ -1080,28 +988,26 @@ static u64 get_insert_time(struct drm_mm *mm,
return ktime_to_ns(ktime_sub(ktime_get(), start));
}
-static int igt_frag(void *ignored)
+static void drm_test_mm_frag(struct kunit *test)
{
struct drm_mm mm;
const struct insert_mode *mode;
struct drm_mm_node *nodes, *node, *next;
unsigned int insert_size = 10000;
unsigned int scale_factor = 4;
- int ret = -EINVAL;
/* We need 4 * insert_size nodes to hold intermediate allocated
* drm_mm nodes.
- * 1 times for prepare_igt_frag()
+ * 1 times for prepare_frag()
* 1 times for get_insert_time()
* 2 times for get_insert_time()
*/
nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
- if (!nodes)
- return -ENOMEM;
+ KUNIT_ASSERT_TRUE(test, nodes);
/* For BOTTOMUP and TOPDOWN, we first fragment the
- * address space using prepare_igt_frag() and then try to verify
- * that that insertions scale quadratically from 10k to 20k insertions
+ * address space using prepare_frag() and then try to verify
+ * that insertions scale quadratically from 10k to 20k insertions
*/
drm_mm_init(&mm, 1, U64_MAX - 2);
for (mode = insert_modes; mode->name; mode++) {
@@ -1111,28 +1017,25 @@ static int igt_frag(void *ignored)
mode->mode != DRM_MM_INSERT_HIGH)
continue;
- ret = prepare_igt_frag(&mm, nodes, insert_size, mode);
- if (ret)
+ if (prepare_frag(test, &mm, nodes, insert_size, mode))
goto err;
- insert_time1 = get_insert_time(&mm, insert_size,
+ insert_time1 = get_insert_time(test, &mm, insert_size,
nodes + insert_size, mode);
if (insert_time1 == 0)
goto err;
- insert_time2 = get_insert_time(&mm, (insert_size * 2),
+ insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
nodes + insert_size * 2, mode);
if (insert_time2 == 0)
goto err;
- pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
- mode->name, insert_size, insert_size * 2,
- insert_time1, insert_time2);
+ kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
+ mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
if (insert_time2 > (scale_factor * insert_time1)) {
- pr_err("%s fragmented insert took %llu nsecs more\n",
- mode->name,
- insert_time2 - (scale_factor * insert_time1));
+ KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
+ mode->name, insert_time2 - (scale_factor * insert_time1));
goto err;
}
@@ -1140,24 +1043,20 @@ static int igt_frag(void *ignored)
drm_mm_remove_node(node);
}
- ret = 0;
err:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-
- return ret;
}
-static int igt_align(void *ignored)
+static void drm_test_mm_align(struct kunit *test)
{
const struct insert_mode *mode;
const unsigned int max_count = min(8192u, max_prime);
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int prime;
- int ret = -EINVAL;
/* For each of the possible insertion modes, we pick a few
* arbitrary alignments and check that the inserted node
@@ -1165,8 +1064,7 @@ static int igt_align(void *ignored)
*/
nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
drm_mm_init(&mm, 1, U64_MAX - 2);
@@ -1176,11 +1074,9 @@ static int igt_align(void *ignored)
for_each_prime_number_from(prime, 1, max_count) {
u64 size = next_prime_number(prime);
- if (!expect_insert(&mm, &nodes[i],
- size, prime, i,
- mode)) {
- pr_err("%s insert failed with alignment=%d",
- mode->name, prime);
+ if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
+ KUNIT_FAIL(test, "%s insert failed with alignment=%d",
+ mode->name, prime);
goto out;
}
@@ -1194,22 +1090,18 @@ static int igt_align(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-err:
- return ret;
}
-static int igt_align_pot(int max)
+static void drm_test_mm_align_pot(struct kunit *test, int max)
{
struct drm_mm mm;
struct drm_mm_node *node, *next;
int bit;
- int ret = -EINVAL;
/* Check that we can align to the full u64 address space */
@@ -1220,51 +1112,45 @@ static int igt_align_pot(int max)
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
- ret = -ENOMEM;
+ KUNIT_FAIL(test, "failed to allocate node");
goto out;
}
align = BIT_ULL(bit);
- size = BIT_ULL(bit-1) + 1;
- if (!expect_insert(&mm, node,
- size, align, bit,
- &insert_modes[0])) {
- pr_err("insert failed with alignment=%llx [%d]",
- align, bit);
+ size = BIT_ULL(bit - 1) + 1;
+ if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
goto out;
}
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm) {
drm_mm_remove_node(node);
kfree(node);
}
drm_mm_takedown(&mm);
- return ret;
}
-static int igt_align32(void *ignored)
+static void drm_test_mm_align32(struct kunit *test)
{
- return igt_align_pot(32);
+ drm_test_mm_align_pot(test, 32);
}
-static int igt_align64(void *ignored)
+static void drm_test_mm_align64(struct kunit *test)
{
- return igt_align_pot(64);
+ drm_test_mm_align_pot(test, 64);
}
-static void show_scan(const struct drm_mm_scan *scan)
+static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
{
- pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
- scan->hit_start, scan->hit_end,
- scan->size, scan->alignment, scan->color);
+ kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
+ scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
}
-static void show_holes(const struct drm_mm *mm, int count)
+static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
{
u64 hole_start, hole_end;
struct drm_mm_node *hole;
@@ -1274,19 +1160,15 @@ static void show_holes(const struct drm_mm *mm, int count)
const char *node1 = NULL, *node2 = NULL;
if (drm_mm_node_allocated(hole))
- node1 = kasprintf(GFP_KERNEL,
- "[%llx + %lld, color=%ld], ",
+ node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
if (drm_mm_node_allocated(next))
- node2 = kasprintf(GFP_KERNEL,
- ", [%llx + %lld, color=%ld]",
+ node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
- pr_info("%sHole [%llx - %llx, size %lld]%s\n",
- node1,
- hole_start, hole_end, hole_end - hole_start,
- node2);
+ kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
+ hole_start, hole_end, hole_end - hole_start, node2);
kfree(node2);
kfree(node1);
@@ -1301,12 +1183,9 @@ struct evict_node {
struct list_head link;
};
-static bool evict_nodes(struct drm_mm_scan *scan,
- struct evict_node *nodes,
- unsigned int *order,
- unsigned int count,
- bool use_color,
- struct list_head *evict_list)
+static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
+ struct evict_node *nodes, unsigned int *order, unsigned int count,
+ bool use_color, struct list_head *evict_list)
{
struct evict_node *e, *en;
unsigned int i;
@@ -1322,8 +1201,9 @@ static bool evict_nodes(struct drm_mm_scan *scan,
list_del(&e->link);
}
if (list_empty(evict_list)) {
- pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
- scan->size, count, scan->alignment, scan->color);
+ KUNIT_FAIL(test,
+ "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
+ scan->size, count, scan->alignment, scan->color);
return false;
}
@@ -1340,7 +1220,8 @@ static bool evict_nodes(struct drm_mm_scan *scan,
}
} else {
if (drm_mm_scan_color_evict(scan)) {
- pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
+ KUNIT_FAIL(test,
+ "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
return false;
}
}
@@ -1348,9 +1229,8 @@ static bool evict_nodes(struct drm_mm_scan *scan,
return true;
}
-static bool evict_nothing(struct drm_mm *mm,
- unsigned int total_size,
- struct evict_node *nodes)
+static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
+ unsigned int total_size, struct evict_node *nodes)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -1371,7 +1251,7 @@ static bool evict_nothing(struct drm_mm *mm,
e = &nodes[n];
if (!drm_mm_node_allocated(&e->node)) {
- pr_err("node[%d] no longer allocated!\n", n);
+ KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
return false;
}
@@ -1387,17 +1267,16 @@ static bool evict_nothing(struct drm_mm *mm,
e = &nodes[n];
if (!e->link.next) {
- pr_err("node[%d] no longer connected!\n", n);
+ KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
return false;
}
}
- return assert_continuous(mm, nodes[0].node.size);
+ return assert_continuous(test, mm, nodes[0].node.size);
}
-static bool evict_everything(struct drm_mm *mm,
- unsigned int total_size,
- struct evict_node *nodes)
+static bool evict_everything(struct kunit *test, struct drm_mm *mm,
+ unsigned int total_size, struct evict_node *nodes)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -1417,8 +1296,8 @@ static bool evict_everything(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link) {
if (!drm_mm_scan_remove_block(&scan, &e->node)) {
if (!err) {
- pr_err("Node %lld not marked for eviction!\n",
- e->node.start);
+ KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
+ e->node.start);
err = -EINVAL;
}
}
@@ -1429,29 +1308,25 @@ static bool evict_everything(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link)
drm_mm_remove_node(&e->node);
- if (!assert_one_hole(mm, 0, total_size))
+ if (!assert_one_hole(test, mm, 0, total_size))
return false;
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
- pr_err("Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
return false;
}
}
- return assert_continuous(mm, nodes[0].node.size);
+ return assert_continuous(test, mm, nodes[0].node.size);
}
-static int evict_something(struct drm_mm *mm,
- u64 range_start, u64 range_end,
- struct evict_node *nodes,
- unsigned int *order,
- unsigned int count,
- unsigned int size,
- unsigned int alignment,
- const struct insert_mode *mode)
+static int evict_something(struct kunit *test, struct drm_mm *mm,
+ u64 range_start, u64 range_end, struct evict_node *nodes,
+ unsigned int *order, unsigned int count, unsigned int size,
+ unsigned int alignment, const struct insert_mode *mode)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -1459,38 +1334,35 @@ static int evict_something(struct drm_mm *mm,
struct drm_mm_node tmp;
int err;
- drm_mm_scan_init_with_range(&scan, mm,
- size, alignment, 0,
- range_start, range_end,
- mode->mode);
- if (!evict_nodes(&scan,
- nodes, order, count, false,
- &evict_list))
+ drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
+ range_end, mode->mode);
+ if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
return -EINVAL;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
DRM_MM_INSERT_EVICT);
if (err) {
- pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
- size, alignment);
- show_scan(&scan);
- show_holes(mm, 3);
+ KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
+ size, alignment);
+ show_scan(test, &scan);
+ show_holes(test, mm, 3);
return err;
}
if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
err = -EINVAL;
}
- if (!assert_node(&tmp, mm, size, alignment, 0) ||
+ if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
drm_mm_hole_follows(&tmp)) {
- pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
- tmp.size, size,
- alignment, misalignment(&tmp, alignment),
- tmp.start, drm_mm_hole_follows(&tmp));
+ KUNIT_FAIL(test,
+ "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
+ tmp.size, size, alignment, misalignment(&tmp, alignment),
+ tmp.start, drm_mm_hole_follows(&tmp));
err = -EINVAL;
}
@@ -1501,21 +1373,21 @@ static int evict_something(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
- pr_err("Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
return err;
}
}
- if (!assert_continuous(mm, nodes[0].node.size)) {
- pr_err("range is no longer continuous\n");
+ if (!assert_continuous(test, mm, nodes[0].node.size)) {
+ KUNIT_FAIL(test, "range is no longer continuous\n");
return -EINVAL;
}
return 0;
}
-static int igt_evict(void *ignored)
+static void drm_test_mm_evict(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int size = 8192;
@@ -1524,7 +1396,6 @@ static int igt_evict(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
/* Here we populate a full drm_mm and then try and insert a new node
* by evicting other nodes in a random order. The drm_mm_scan should
@@ -1533,61 +1404,49 @@ static int igt_evict(void *ignored)
* sizes to try and stress the hole finder.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) {
- err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
- if (err) {
- pr_err("insert failed, step %d\n", n);
- ret = err;
+ if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
/* First check that using the scanner doesn't break the mm */
- if (!evict_nothing(&mm, size, nodes)) {
- pr_err("evict_nothing() failed\n");
+ if (!evict_nothing(test, &mm, size, nodes)) {
+ KUNIT_FAIL(test, "evict_nothing() failed\n");
goto out;
}
- if (!evict_everything(&mm, size, nodes)) {
- pr_err("evict_everything() failed\n");
+ if (!evict_everything(test, &mm, size, nodes)) {
+ KUNIT_FAIL(test, "evict_everything() failed\n");
goto out;
}
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, 0, U64_MAX,
- nodes, order, size,
- n, 1,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u) failed\n",
- mode->name, n);
- ret = err;
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
+ mode)) {
+ KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
+ mode->name, n);
goto out;
}
}
for (n = 1; n < size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, 0, U64_MAX,
- nodes, order, size,
- size/2, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, size/2, n);
- ret = err;
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
+ size / 2, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, size / 2, n);
goto out;
}
}
@@ -1598,14 +1457,11 @@ static int igt_evict(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, 0, U64_MAX,
- nodes, order, size,
- nsize, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
- ret = err;
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
+ nsize, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
goto out;
}
}
@@ -1613,7 +1469,6 @@ static int igt_evict(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1621,11 +1476,9 @@ out:
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int igt_evict_range(void *ignored)
+static void drm_test_mm_evict_range(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int size = 8192;
@@ -1637,28 +1490,22 @@ static int igt_evict_range(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
- /* Like igt_evict() but now we are limiting the search to a
+ /* Like drm_test_mm_evict() but now we are limiting the search to a
* small portion of the full drm_mm.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) {
- err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
- if (err) {
- pr_err("insert failed, step %d\n", n);
- ret = err;
+ if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
@@ -1666,26 +1513,22 @@ static int igt_evict_range(void *ignored)
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, range_start, range_end,
- nodes, order, size,
- n, 1,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
- mode->name, n, range_start, range_end);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, n, 1, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u) failed with range [%u, %u]\n",
+ mode->name, n, range_start, range_end);
goto out;
}
}
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, range_start, range_end,
- nodes, order, size,
- range_size/2, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, range_size/2, n, range_start, range_end);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, range_size / 2, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, range_size / 2, n, range_start, range_end);
goto out;
}
}
@@ -1696,13 +1539,11 @@ static int igt_evict_range(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, range_start, range_end,
- nodes, order, size,
- nsize, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, nsize, n, range_start, range_end);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, nsize, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, nsize, n, range_start, range_end);
goto out;
}
}
@@ -1710,7 +1551,6 @@ static int igt_evict_range(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1718,8 +1558,6 @@ out:
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
static unsigned int node_index(const struct drm_mm_node *node)
@@ -1727,9 +1565,10 @@ static unsigned int node_index(const struct drm_mm_node *node)
return div64_u64(node->start, node->size);
}
-static int igt_topdown(void *ignored)
+static void drm_test_mm_topdown(struct kunit *test)
{
const struct insert_mode *topdown = &insert_modes[TOPDOWN];
+
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
@@ -1737,17 +1576,14 @@ static int igt_topdown(void *ignored)
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
- int ret;
/* When allocating top-down, we expect to be returned a node
* from a suitable hole at the top of the drm_mm. We check that
* the returned node does match the highest available slot.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
@@ -1757,28 +1593,26 @@ static int igt_topdown(void *ignored)
if (!order)
goto err_bitmap;
- ret = -EINVAL;
for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size*count);
+ drm_mm_init(&mm, 0, size * count);
for (n = 0; n < count; n++) {
- if (!expect_insert(&mm, &nodes[n],
- size, 0, n,
- topdown)) {
- pr_err("insert failed, size %u step %d\n", size, n);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
+ KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
goto out;
}
if (drm_mm_hole_follows(&nodes[n])) {
- pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
- n, nodes[n].start, size);
+ KUNIT_FAIL(test,
+ "hole after topdown insert %d, start=%llx\n, size=%u",
+ n, nodes[n].start, size);
goto out;
}
- if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
+ if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
goto out;
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
drm_random_reorder(order, count, &prng);
@@ -1793,23 +1627,23 @@ static int igt_topdown(void *ignored)
unsigned int last;
node = &nodes[order[(o + m) % count]];
- if (!expect_insert(&mm, node,
- size, 0, 0,
- topdown)) {
- pr_err("insert failed, step %d/%d\n", m, n);
+ if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
+ KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
goto out;
}
if (drm_mm_hole_follows(node)) {
- pr_err("hole after topdown insert %d/%d, start=%llx\n",
- m, n, node->start);
+ KUNIT_FAIL(test,
+ "hole after topdown insert %d/%d, start=%llx\n",
+ m, n, node->start);
goto out;
}
last = find_last_bit(bitmap, count);
if (node_index(node) != last) {
- pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
- m, n, size, last, node_index(node));
+ KUNIT_FAIL(test,
+ "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
+ m, n, size, last, node_index(node));
goto out;
}
@@ -1827,7 +1661,6 @@ static int igt_topdown(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1837,13 +1670,12 @@ err_bitmap:
bitmap_free(bitmap);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int igt_bottomup(void *ignored)
+static void drm_test_mm_bottomup(struct kunit *test)
{
const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
+
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
@@ -1851,16 +1683,13 @@ static int igt_bottomup(void *ignored)
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
- int ret;
- /* Like igt_topdown, but instead of searching for the last hole,
+ /* Like drm_test_mm_topdown, but instead of searching for the last hole,
* we search for the first.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
@@ -1870,22 +1699,20 @@ static int igt_bottomup(void *ignored)
if (!order)
goto err_bitmap;
- ret = -EINVAL;
for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size*count);
+ drm_mm_init(&mm, 0, size * count);
for (n = 0; n < count; n++) {
- if (!expect_insert(&mm, &nodes[n],
- size, 0, n,
- bottomup)) {
- pr_err("bottomup insert failed, size %u step %d\n", size, n);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
+ KUNIT_FAIL(test,
+ "bottomup insert failed, size %u step %d\n", size, n);
goto out;
}
- if (!assert_one_hole(&mm, size*(n + 1), size*count))
+ if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
goto out;
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
drm_random_reorder(order, count, &prng);
@@ -1900,17 +1727,16 @@ static int igt_bottomup(void *ignored)
unsigned int first;
node = &nodes[order[(o + m) % count]];
- if (!expect_insert(&mm, node,
- size, 0, 0,
- bottomup)) {
- pr_err("insert failed, step %d/%d\n", m, n);
+ if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
+ KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
goto out;
}
first = find_first_bit(bitmap, count);
if (node_index(node) != first) {
- pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
- m, n, first, node_index(node));
+ KUNIT_FAIL(test,
+ "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
+ m, n, first, node_index(node));
goto out;
}
__clear_bit(first, bitmap);
@@ -1927,7 +1753,6 @@ static int igt_bottomup(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1937,47 +1762,39 @@ err_bitmap:
bitmap_free(bitmap);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int __igt_once(unsigned int mode)
+static void drm_test_mm_once(struct kunit *test, unsigned int mode)
{
struct drm_mm mm;
struct drm_mm_node rsvd_lo, rsvd_hi, node;
- int err;
drm_mm_init(&mm, 0, 7);
memset(&rsvd_lo, 0, sizeof(rsvd_lo));
rsvd_lo.start = 1;
rsvd_lo.size = 1;
- err = drm_mm_reserve_node(&mm, &rsvd_lo);
- if (err) {
- pr_err("Could not reserve low node\n");
+ if (drm_mm_reserve_node(&mm, &rsvd_lo)) {
+ KUNIT_FAIL(test, "Could not reserve low node\n");
goto err;
}
memset(&rsvd_hi, 0, sizeof(rsvd_hi));
rsvd_hi.start = 5;
rsvd_hi.size = 1;
- err = drm_mm_reserve_node(&mm, &rsvd_hi);
- if (err) {
- pr_err("Could not reserve low node\n");
+ if (drm_mm_reserve_node(&mm, &rsvd_hi)) {
+ KUNIT_FAIL(test, "Could not reserve low node\n");
goto err_lo;
}
if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
- pr_err("Expected a hole after lo and high nodes!\n");
- err = -EINVAL;
+ KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n");
goto err_hi;
}
memset(&node, 0, sizeof(node));
- err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
- if (err) {
- pr_err("Could not insert the node into the available hole!\n");
- err = -EINVAL;
+ if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
+ KUNIT_FAIL(test, "Could not insert the node into the available hole!\n");
goto err_hi;
}
@@ -1988,23 +1805,20 @@ err_lo:
drm_mm_remove_node(&rsvd_lo);
err:
drm_mm_takedown(&mm);
- return err;
}
-static int igt_lowest(void *ignored)
+static void drm_test_mm_lowest(struct kunit *test)
{
- return __igt_once(DRM_MM_INSERT_LOW);
+ drm_test_mm_once(test, DRM_MM_INSERT_LOW);
}
-static int igt_highest(void *ignored)
+static void drm_test_mm_highest(struct kunit *test)
{
- return __igt_once(DRM_MM_INSERT_HIGH);
+ drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
}
static void separate_adjacent_colors(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
+ unsigned long color, u64 *start, u64 *end)
{
if (drm_mm_node_allocated(node) && node->color != color)
++*start;
@@ -2014,12 +1828,12 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
--*end;
}
-static bool colors_abutt(const struct drm_mm_node *node)
+static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
drm_mm_node_allocated(list_next_entry(node, node_list))) {
- pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
- node->color, node->start, node->size,
+ KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
+ node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
list_next_entry(node, node_list)->start,
list_next_entry(node, node_list)->size);
@@ -2029,14 +1843,13 @@ static bool colors_abutt(const struct drm_mm_node *node)
return false;
}
-static int igt_color(void *ignored)
+static void drm_test_mm_color(struct kunit *test)
{
const unsigned int count = min(4096u, max_iterations);
const struct insert_mode *mode;
struct drm_mm mm;
struct drm_mm_node *node, *nn;
unsigned int n;
- int ret = -EINVAL, err;
/* Color adjustment complicates everything. First we just check
* that when we insert a node we apply any color_adjustment callback.
@@ -2049,15 +1862,11 @@ static int igt_color(void *ignored)
for (n = 1; n <= count; n++) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
- if (!expect_insert(&mm, node,
- n, 0, n,
- &insert_modes[0])) {
- pr_err("insert failed, step %d\n", n);
+ if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
kfree(node);
goto out;
}
@@ -2065,8 +1874,8 @@ static int igt_color(void *ignored)
drm_mm_for_each_node_safe(node, nn, &mm) {
if (node->color != node->size) {
- pr_err("invalid color stored: expected %lld, found %ld\n",
- node->size, node->color);
+ KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
+ node->size, node->color);
goto out;
}
@@ -2081,18 +1890,14 @@ static int igt_color(void *ignored)
u64 last;
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
- node->size = 1 + 2*count;
+ node->size = 1 + 2 * count;
node->color = node->size;
- err = drm_mm_reserve_node(&mm, node);
- if (err) {
- pr_err("initial reserve failed!\n");
- ret = err;
+ if (drm_mm_reserve_node(&mm, node)) {
+ KUNIT_FAIL(test, "initial reserve failed!\n");
goto out;
}
@@ -2102,19 +1907,15 @@ static int igt_color(void *ignored)
int rem;
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
node->start = last;
node->size = n + count;
node->color = node->size;
- err = drm_mm_reserve_node(&mm, node);
- if (err != -ENOSPC) {
- pr_err("reserve %d did not report color overlap! err=%d\n",
- n, err);
+ if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
+ KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
goto out;
}
@@ -2122,10 +1923,8 @@ static int igt_color(void *ignored)
rem = misalignment(node, n + count);
node->start += n + count - rem;
- err = drm_mm_reserve_node(&mm, node);
- if (err) {
- pr_err("reserve %d failed, err=%d\n", n, err);
- ret = err;
+ if (drm_mm_reserve_node(&mm, node)) {
+ KUNIT_FAIL(test, "reserve %d failed", n);
goto out;
}
@@ -2134,16 +1933,11 @@ static int igt_color(void *ignored)
for (n = 1; n <= count; n++) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
- if (!expect_insert(&mm, node,
- n, n, n,
- mode)) {
- pr_err("%s insert failed, step %d\n",
- mode->name, n);
+ if (!expect_insert(test, &mm, node, n, n, n, mode)) {
+ KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
kfree(node);
goto out;
}
@@ -2153,19 +1947,21 @@ static int igt_color(void *ignored)
u64 rem;
if (node->color != node->size) {
- pr_err("%s invalid color stored: expected %lld, found %ld\n",
- mode->name, node->size, node->color);
+ KUNIT_FAIL(test,
+ "%s invalid color stored: expected %lld, found %ld\n",
+ mode->name, node->size, node->color);
goto out;
}
- if (colors_abutt(node))
+ if (colors_abutt(test, node))
goto out;
div64_u64_rem(node->start, node->size, &rem);
if (rem) {
- pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
- mode->name, node->start, node->size, rem);
+ KUNIT_FAIL(test,
+ "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
+ mode->name, node->start, node->size, rem);
goto out;
}
@@ -2176,25 +1972,18 @@ static int igt_color(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, nn, &mm) {
drm_mm_remove_node(node);
kfree(node);
}
drm_mm_takedown(&mm);
- return ret;
}
-static int evict_color(struct drm_mm *mm,
- u64 range_start, u64 range_end,
- struct evict_node *nodes,
- unsigned int *order,
- unsigned int count,
- unsigned int size,
- unsigned int alignment,
- unsigned long color,
- const struct insert_mode *mode)
+static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
+ u64 range_end, struct evict_node *nodes, unsigned int *order,
+ unsigned int count, unsigned int size, unsigned int alignment,
+ unsigned long color, const struct insert_mode *mode)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -2202,39 +1991,37 @@ static int evict_color(struct drm_mm *mm,
struct drm_mm_node tmp;
int err;
- drm_mm_scan_init_with_range(&scan, mm,
- size, alignment, color,
- range_start, range_end,
- mode->mode);
- if (!evict_nodes(&scan,
- nodes, order, count, true,
- &evict_list))
+ drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
+ range_end, mode->mode);
+ if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
return -EINVAL;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
DRM_MM_INSERT_EVICT);
if (err) {
- pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
- size, alignment, color, err);
- show_scan(&scan);
- show_holes(mm, 3);
+ KUNIT_FAIL(test,
+ "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
+ size, alignment, color, err);
+ show_scan(test, &scan);
+ show_holes(test, mm, 3);
return err;
}
if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
err = -EINVAL;
}
- if (colors_abutt(&tmp))
+ if (colors_abutt(test, &tmp))
err = -EINVAL;
- if (!assert_node(&tmp, mm, size, alignment, color)) {
- pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
- tmp.size, size,
- alignment, misalignment(&tmp, alignment), tmp.start);
+ if (!assert_node(test, &tmp, mm, size, alignment, color)) {
+ KUNIT_FAIL(test,
+ "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
+ tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
err = -EINVAL;
}
@@ -2245,8 +2032,8 @@ static int evict_color(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
- pr_err("Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
return err;
}
}
@@ -2255,7 +2042,7 @@ static int evict_color(struct drm_mm *mm,
return 0;
}
-static int igt_color_evict(void *ignored)
+static void drm_test_mm_color_evict(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int total_size = min(8192u, max_iterations);
@@ -2265,7 +2052,6 @@ static int igt_color_evict(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
/* Check that the drm_mm_scan also honours color adjustment when
* choosing its victims to create a hole. Our color_adjust does not
@@ -2273,23 +2059,20 @@ static int igt_color_evict(void *ignored)
* enlarging the set of victims that must be evicted.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(total_size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
- drm_mm_init(&mm, 0, 2*total_size - 1);
+ drm_mm_init(&mm, 0, 2 * total_size - 1);
mm.color_adjust = separate_adjacent_colors;
for (n = 0; n < total_size; n++) {
- if (!expect_insert(&mm, &nodes[n].node,
+ if (!expect_insert(test, &mm, &nodes[n].node,
1, 0, color++,
&insert_modes[0])) {
- pr_err("insert failed, step %d\n", n);
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
@@ -2297,26 +2080,19 @@ static int igt_color_evict(void *ignored)
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= total_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, 0, U64_MAX,
- nodes, order, total_size,
- n, 1, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u) failed\n",
- mode->name, n);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ n, 1, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
goto out;
}
}
for (n = 1; n < total_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, 0, U64_MAX,
- nodes, order, total_size,
- total_size/2, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, total_size/2, n);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ total_size / 2, n, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, total_size / 2, n);
goto out;
}
}
@@ -2327,13 +2103,10 @@ static int igt_color_evict(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, 0, U64_MAX,
- nodes, order, total_size,
- nsize, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ nsize, n, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
goto out;
}
}
@@ -2341,21 +2114,16 @@ static int igt_color_evict(void *ignored)
cond_resched();
}
- ret = 0;
out:
- if (ret)
- show_mm(&mm);
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int igt_color_evict_range(void *ignored)
+static void drm_test_mm_color_evict_range(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int total_size = 8192;
@@ -2368,29 +2136,25 @@ static int igt_color_evict_range(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
- /* Like igt_color_evict(), but limited to small portion of the full
+ /* Like drm_test_mm_color_evict(), but limited to small portion of the full
* drm_mm range.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(total_size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
- drm_mm_init(&mm, 0, 2*total_size - 1);
+ drm_mm_init(&mm, 0, 2 * total_size - 1);
mm.color_adjust = separate_adjacent_colors;
for (n = 0; n < total_size; n++) {
- if (!expect_insert(&mm, &nodes[n].node,
+ if (!expect_insert(test, &mm, &nodes[n].node,
1, 0, color++,
&insert_modes[0])) {
- pr_err("insert failed, step %d\n", n);
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
@@ -2398,26 +2162,22 @@ static int igt_color_evict_range(void *ignored)
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, range_size, &prng);
- err = evict_color(&mm, range_start, range_end,
- nodes, order, total_size,
- n, 1, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
- mode->name, n, range_start, range_end);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, n, 1, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u) failed for range [%x, %x]\n",
+ mode->name, n, range_start, range_end);
goto out;
}
}
for (n = 1; n < range_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, range_start, range_end,
- nodes, order, total_size,
- range_size/2, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, total_size/2, n, range_start, range_end);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, range_size / 2, n, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, total_size / 2, n, range_start, range_end);
goto out;
}
}
@@ -2428,13 +2188,11 @@ static int igt_color_evict_range(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, range_start, range_end,
- nodes, order, total_size,
- nsize, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, nsize, n, range_start, range_end);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, nsize, n, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, nsize, n, range_start, range_end);
goto out;
}
}
@@ -2442,46 +2200,57 @@ static int igt_color_evict_range(void *ignored)
cond_resched();
}
- ret = 0;
out:
- if (ret)
- show_mm(&mm);
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-#include "drm_selftest.c"
-
-static int __init test_drm_mm_init(void)
+static int drm_mm_init_test(struct kunit *test)
{
- int err;
-
while (!random_seed)
random_seed = get_random_int();
- pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
- random_seed, max_iterations, max_prime);
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-static void __exit test_drm_mm_exit(void)
-{
+ return 0;
}
-module_init(test_drm_mm_init);
-module_exit(test_drm_mm_exit);
-
module_param(random_seed, uint, 0400);
module_param(max_iterations, uint, 0400);
module_param(max_prime, uint, 0400);
+static struct kunit_case drm_mm_tests[] = {
+ KUNIT_CASE(drm_test_mm_init),
+ KUNIT_CASE(drm_test_mm_debug),
+ KUNIT_CASE(drm_test_mm_reserve),
+ KUNIT_CASE(drm_test_mm_insert),
+ KUNIT_CASE(drm_test_mm_replace),
+ KUNIT_CASE(drm_test_mm_insert_range),
+ KUNIT_CASE(drm_test_mm_frag),
+ KUNIT_CASE(drm_test_mm_align),
+ KUNIT_CASE(drm_test_mm_align32),
+ KUNIT_CASE(drm_test_mm_align64),
+ KUNIT_CASE(drm_test_mm_evict),
+ KUNIT_CASE(drm_test_mm_evict_range),
+ KUNIT_CASE(drm_test_mm_topdown),
+ KUNIT_CASE(drm_test_mm_bottomup),
+ KUNIT_CASE(drm_test_mm_lowest),
+ KUNIT_CASE(drm_test_mm_highest),
+ KUNIT_CASE(drm_test_mm_color),
+ KUNIT_CASE(drm_test_mm_color_evict),
+ KUNIT_CASE(drm_test_mm_color_evict_range),
+ {}
+};
+
+static struct kunit_suite drm_mm_test_suite = {
+ .name = "drm_mm",
+ .init = drm_mm_init_test,
+ .test_cases = drm_mm_tests,
+};
+
+kunit_test_suite(drm_mm_test_suite);
+
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_plane_helper.c b/drivers/gpu/drm/tests/drm_plane_helper_test.c
index 64e8938ab194..ec71af791f1f 100644
--- a/drivers/gpu/drm/selftests/test-drm_plane_helper.c
+++ b/drivers/gpu/drm/tests/drm_plane_helper_test.c
@@ -1,20 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_plane_helper functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
-#define pr_fmt(fmt) "drm_plane_helper: " fmt
+#include <kunit/test.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_modes.h>
-#include "test-drm_modeset_common.h"
-
static void set_src(struct drm_plane_state *plane_state,
- unsigned src_x, unsigned src_y,
- unsigned src_w, unsigned src_h)
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h)
{
plane_state->src_x = src_x;
plane_state->src_y = src_y;
@@ -23,8 +22,8 @@ static void set_src(struct drm_plane_state *plane_state,
}
static bool check_src_eq(struct drm_plane_state *plane_state,
- unsigned src_x, unsigned src_y,
- unsigned src_w, unsigned src_h)
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h)
{
if (plane_state->src.x1 < 0) {
pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
@@ -50,7 +49,7 @@ static bool check_src_eq(struct drm_plane_state *plane_state,
static void set_crtc(struct drm_plane_state *plane_state,
int crtc_x, int crtc_y,
- unsigned crtc_w, unsigned crtc_h)
+ unsigned int crtc_w, unsigned int crtc_h)
{
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
@@ -60,7 +59,7 @@ static void set_crtc(struct drm_plane_state *plane_state,
static bool check_crtc_eq(struct drm_plane_state *plane_state,
int crtc_x, int crtc_y,
- unsigned crtc_w, unsigned crtc_h)
+ unsigned int crtc_w, unsigned int crtc_h)
{
if (plane_state->dst.x1 != crtc_x ||
plane_state->dst.y1 != crtc_y ||
@@ -74,7 +73,7 @@ static bool check_crtc_eq(struct drm_plane_state *plane_state,
return true;
}
-int igt_check_plane_state(void *ignored)
+static void drm_test_check_plane_state(struct kunit *test)
{
int ret;
@@ -83,9 +82,8 @@ int igt_check_plane_state(void *ignored)
.enable = true,
.active = true,
.mode = {
- DRM_MODE("1024x768", 0, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ DRM_MODE("1024x768", 0, 65000, 1024, 1048, 1184, 1344, 0, 768, 771,
+ 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
},
};
static struct drm_plane plane = {
@@ -106,119 +104,134 @@ int igt_check_plane_state(void *ignored)
set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
set_crtc(&plane_state, 0, 0, fb.width, fb.height);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Simple clipping check should pass\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple clipping check should pass\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
/* Rotated clipping + reflection, no scaling. */
plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Rotated clipping check should pass\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Rotated clipping check should pass\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
plane_state.rotation = DRM_MODE_ROTATE_0;
/* Check whether positioning works correctly. */
set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
set_crtc(&plane_state, 0, 0, 1023, 767);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(!ret, "Should not be able to position on the crtc with can_position=false\n");
+ KUNIT_EXPECT_TRUE_MSG(test, ret,
+ "Should not be able to position on the crtc with can_position=false\n");
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, false);
- FAIL(ret < 0, "Simple positioning should work\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1023, 767));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple positioning should work\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1023, 767));
/* Simple scaling tests. */
set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
set_crtc(&plane_state, 0, 0, 1024, 768);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0x8001,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(!ret, "Upscaling out of range should fail.\n");
+ KUNIT_EXPECT_TRUE_MSG(test, ret, "Upscaling out of range should fail.\n");
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0x8000,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Upscaling exactly 2x should work\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Upscaling exactly 2x should work\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x1ffff, false, false);
- FAIL(!ret, "Downscaling out of range should fail.\n");
+ KUNIT_EXPECT_TRUE_MSG(test, ret, "Downscaling out of range should fail.\n");
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x20000, false, false);
- FAIL(ret < 0, "Should succeed with exact scaling limit\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed with exact scaling limit\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
/* Testing rounding errors. */
set_src(&plane_state, 0, 0, 0x40001, 0x40001);
set_crtc(&plane_state, 1022, 766, 4, 4);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x10001,
true, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
set_crtc(&plane_state, -2, -2, 1028, 772);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x10001,
false, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0x40002, 0x40002, 1024 << 16, 768 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x40002, 0x40002,
+ 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
set_crtc(&plane_state, 1022, 766, 4, 4);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0xffff,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
/* Should not be rounded to 0x20001, which would be upscaling. */
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
set_crtc(&plane_state, -2, -2, 1028, 772);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0xffff,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
-
- return 0;
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x3fffe, 0x3fffe,
+ 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
}
+
+static struct kunit_case drm_plane_helper_test[] = {
+ KUNIT_CASE(drm_test_check_plane_state),
+ {}
+};
+
+static struct kunit_suite drm_plane_helper_test_suite = {
+ .name = "drm_plane_helper",
+ .test_cases = drm_plane_helper_test,
+};
+
+kunit_test_suite(drm_plane_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_rect_test.c b/drivers/gpu/drm/tests/drm_rect_test.c
new file mode 100644
index 000000000000..e9809ea32696
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_rect_test.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_rect functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_rect.h>
+
+static void drm_test_rect_clip_scaled_div_by_zero(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * Make sure we don't divide by zero when dst
+ * width/height is zero and dst and clip do not intersect.
+ */
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 0, 0, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 3, 3, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_not_clipped(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_clipped(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 || dst.y1 != 0 ||
+ dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
+ src.y1 != 2 << 16 || src.y2 != 4 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 || dst.y1 != 0 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 2, 2, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 2 || dst.x2 != 4 || dst.y1 != 2 ||
+ dst.y2 != 4, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_signed_vs_unsigned(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * 'clip.x2 - dst.x1 >= dst width' could result a negative
+ * src rectangle width which is no longer expected by the
+ * code as it's using unsigned types. This could lead to
+ * the clipped source rectangle appering visible when it
+ * should have been fully clipped. Make sure both rectangles
+ * end up invisible.
+ */
+ drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 3, 3, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination should not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+}
+
+static struct kunit_case drm_rect_tests[] = {
+ KUNIT_CASE(drm_test_rect_clip_scaled_div_by_zero),
+ KUNIT_CASE(drm_test_rect_clip_scaled_not_clipped),
+ KUNIT_CASE(drm_test_rect_clip_scaled_clipped),
+ KUNIT_CASE(drm_test_rect_clip_scaled_signed_vs_unsigned),
+ { }
+};
+
+static struct kunit_suite drm_rect_test_suite = {
+ .name = "drm_rect",
+ .test_cases = drm_rect_tests,
+};
+
+kunit_test_suite(drm_rect_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index bc4fa59b6fa9..378600806167 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -3,7 +3,7 @@ config DRM_TIDSS
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
Display SubSystem. There is currently three Keystone family
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 2218da3b3ca3..cd3c43a6c806 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -8,9 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "tidss_crtc.h"
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index dd3c6a606ae2..ad93acc9abd2 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -24,9 +24,9 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include "tidss_crtc.h"
@@ -1954,16 +1954,16 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
}
static
-dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+dma_addr_t dispc_plane_state_dma_addr(const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 x = state->src_x >> 16;
u32 y = state->src_y >> 16;
- gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+ gem = drm_fb_dma_get_gem_obj(state->fb, 0);
- return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+ return gem->dma_addr + fb->offsets[0] + x * fb->format->cpp[0] +
y * fb->pitches[0];
}
@@ -1971,16 +1971,16 @@ static
dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 x = state->src_x >> 16;
u32 y = state->src_y >> 16;
if (WARN_ON(state->fb->format->num_planes != 2))
return 0;
- gem = drm_fb_cma_get_gem_obj(fb, 1);
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
- return gem->paddr + fb->offsets[1] +
+ return gem->dma_addr + fb->offsets[1] +
(x * fb->format->cpp[1] / fb->format->hsub) +
(y * fb->pitches[1] / fb->format->vsub);
}
@@ -1993,17 +1993,17 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
u32 fourcc = state->fb->format->format;
u16 cpp = state->fb->format->cpp[0];
u32 fb_width = state->fb->pitches[0] / cpp;
- dma_addr_t paddr = dispc_plane_state_paddr(state);
+ dma_addr_t dma_addr = dispc_plane_state_dma_addr(state);
struct dispc_scaling_params scale;
dispc_vid_calc_scaling(dispc, state, &scale, lite);
dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, dma_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)dma_addr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, dma_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32);
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
(scale.in_w - 1) | ((scale.in_h - 1) << 16));
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 04cfff89ee51..15cd9b91b7e2 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -15,7 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -101,13 +101,13 @@ static void tidss_release(struct drm_device *ddev)
drm_kms_helper_poll_fini(ddev);
}
-DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+DEFINE_DRM_GEM_DMA_FOPS(tidss_fops);
static const struct drm_driver tidss_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &tidss_fops,
.release = tidss_release,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "tidss",
.desc = "TI Keystone DSS",
.date = "20180215",
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 666e527a0acf..afb2879980c6 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
@@ -71,7 +70,7 @@ static int tidss_atomic_check(struct drm_device *ddev,
* changes. This is needed for updating the plane positions in
* tidss_crtc_position_planes() which is called from
* crtc_atomic_enable() and crtc_atomic_flush(). We have an
- * extra flag to to mark x,y-position changes and together
+ * extra flag to mark x,y-position changes and together
* with zpos_changed the condition recognizes all the above
* cases.
*/
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 68a85a94ffcb..42d50ec5526d 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -11,7 +11,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include "tidss_crtc.h"
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index e315591eb36b..d3bd2d7a181e 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -3,7 +3,7 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 509fbae8c9a6..b5f60b2b2d0e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -12,10 +12,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -64,13 +64,13 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
- start = gem->paddr + fb->offsets[0] +
+ start = gem->dma_addr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
crtc->x * fb->format->cpp[0];
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index eee3c447fbac..f72755b8ea14 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mm.h>
#include <drm/drm_probe_helper.h>
@@ -476,11 +476,11 @@ static void tilcdc_debugfs_init(struct drm_minor *minor)
}
#endif
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver tilcdc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 960136518814..cf77a8ce7398 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -5,7 +5,6 @@
*/
#include <drm/drm_atomic.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
@@ -106,11 +105,10 @@ int tilcdc_plane_init(struct drm_device *dev,
struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
- ret = drm_plane_init(dev, plane, 1,
- &tilcdc_plane_funcs,
- priv->pixelformats,
- priv->num_pixelformats,
- true);
+ ret = drm_universal_plane_init(dev, plane, 1, &tilcdc_plane_funcs,
+ priv->pixelformats,
+ priv->num_pixelformats,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 027cd87c3d0d..565957264875 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,7 +3,7 @@
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
@@ -55,7 +55,7 @@ config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
@@ -87,7 +87,7 @@ config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -100,7 +100,7 @@ config TINYDRM_ILI9163
tristate "DRM support for ILI9163 display panels"
depends on DRM && SPI
select BACKLIGHT_CLASS_DEVICE
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DBI
help
@@ -113,7 +113,7 @@ config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
@@ -125,7 +125,7 @@ config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -138,7 +138,7 @@ config TINYDRM_ILI9486
tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -152,7 +152,7 @@ config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -163,7 +163,7 @@ config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
@@ -177,7 +177,7 @@ config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
@@ -189,7 +189,7 @@ config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 7461cb401407..bb302a3fd6b5 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -11,11 +11,11 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
@@ -220,14 +220,14 @@ static void arc_pgu_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *state)
{
struct arcpgu_drm_private *arcpgu;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
if (!pipe->plane.state->fb)
return;
arcpgu = pipe_to_arcpgu_priv(pipe);
- gem = drm_fb_cma_get_gem_obj(pipe->plane.state->fb, 0);
- arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
+ gem = drm_fb_dma_get_gem_obj(pipe->plane.state->fb, 0);
+ arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->dma_addr);
}
static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = {
@@ -243,7 +243,7 @@ static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
+DEFINE_DRM_GEM_DMA_FOPS(arcpgu_drm_ops);
static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
{
@@ -370,7 +370,7 @@ static const struct drm_driver arcpgu_drm_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 82364a0a7b18..a51262289aef 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -309,6 +309,8 @@ static void bochs_hw_fini(struct drm_device *dev)
static void bochs_hw_blank(struct bochs_device *bochs, bool blank)
{
DRM_DEBUG_DRIVER("hw_blank %d\n", blank);
+ /* enable color bit (so VGA_IS1_RC access works) */
+ bochs_vga_writeb(bochs, VGA_MIS_W, VGA_MIS_COLOR);
/* discard ar_flip_flop */
(void)bochs_vga_readb(bochs, VGA_IS1_RC);
/* blank or unblank; we need only update index and set 0x20 */
@@ -583,13 +585,17 @@ static int bochs_load(struct drm_device *dev)
ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size);
if (ret)
- return ret;
+ goto err_hw_fini;
ret = bochs_kms_init(bochs);
if (ret)
- return ret;
+ goto err_hw_fini;
return 0;
+
+err_hw_fini:
+ bochs_hw_fini(dev);
+ return ret;
}
DEFINE_DRM_GEM_FOPS(bochs_fops);
@@ -664,11 +670,13 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_free_dev;
+ goto err_hw_fini;
drm_fbdev_generic_setup(dev, 32);
return ret;
+err_hw_fini:
+ bochs_hw_fini(dev);
err_free_dev:
drm_dev_put(dev);
return ret;
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index c4f5beea1f90..354d5e854a6f 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -316,28 +316,29 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
}
static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
- const struct iosys_map *map,
+ const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct cirrus_device *cirrus = to_cirrus(fb->dev);
- void __iomem *dst = cirrus->vram;
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ struct iosys_map dst;
int idx;
if (!drm_dev_enter(&cirrus->dev, &idx))
return -ENODEV;
+ iosys_map_set_vaddr_iomem(&dst, cirrus->vram);
+
if (cirrus->cpp == fb->format->cpp[0]) {
- dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect);
- drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect);
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, rect));
+ drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect);
} else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) {
- dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect);
- drm_fb_xrgb8888_to_rgb565_toio(dst, cirrus->pitch, vmap, fb, rect, false);
+ iosys_map_incr(&dst, drm_fb_clip_offset(cirrus->pitch, fb->format, rect));
+ drm_fb_xrgb8888_to_rgb565(&dst, &cirrus->pitch, vmap, fb, rect, false);
} else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) {
- dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect);
- drm_fb_xrgb8888_to_rgb888_toio(dst, cirrus->pitch, vmap, fb, rect);
+ iosys_map_incr(&dst, drm_fb_clip_offset(cirrus->pitch, fb->format, rect));
+ drm_fb_xrgb8888_to_rgb888(&dst, &cirrus->pitch, vmap, fb, rect);
} else {
WARN_ON_ONCE("cpp mismatch");
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index ebb025543f8d..48c24aa8c28a 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -20,7 +20,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -181,6 +181,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -190,12 +191,12 @@ static const struct drm_display_mode yx350hv15_mode = {
DRM_SIMPLE_MODE(320, 480, 60, 75),
};
-DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
+DEFINE_DRM_GEM_DMA_FOPS(hx8357d_fops);
static const struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index fc8ed245b0bc..9a1a5943bee0 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -11,7 +11,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -100,6 +100,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -110,12 +111,12 @@ static const struct drm_display_mode yx240qv29_mode = {
DRM_SIMPLE_MODE(128, 160, 28, 35),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9163_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9163_fops);
static struct drm_driver ili9163_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9163_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 8d686eecd5f4..a79da2b4af64 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -19,12 +19,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -78,7 +78,7 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
@@ -104,7 +104,7 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (ret)
goto err_msg;
} else {
- tr = cma_obj->vaddr;
+ tr = dma_obj->vaddr;
}
switch (dbidev->rotation) {
@@ -335,12 +335,12 @@ static const struct drm_display_mode ili9225_mode = {
DRM_SIMPLE_MODE(176, 220, 35, 44),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9225_fops);
static const struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 5b8cc770ee7b..69b265e78096 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -19,7 +19,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -137,6 +137,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -146,12 +147,12 @@ static const struct drm_display_mode yx240qv29_mode = {
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9341_fops);
static const struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 6d655e18e0aa..c80028bb1d11 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -18,7 +18,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -150,6 +150,7 @@ static void waveshare_enable(struct drm_simple_display_pipe *pipe,
}
static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = waveshare_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -159,12 +160,12 @@ static const struct drm_display_mode waveshare_mode = {
DRM_SIMPLE_MODE(480, 320, 73, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9486_fops);
static const struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index 5e060f6910bb..bc522fb3d94d 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -17,7 +17,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -141,6 +141,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = mi0283qt_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -150,12 +151,12 @@ static const struct drm_display_mode mi0283qt_mode = {
DRM_SIMPLE_MODE(320, 240, 58, 43),
};
-DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
+DEFINE_DRM_GEM_DMA_FOPS(mi0283qt_fops);
static const struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index c759ff9c2c87..955a61d628e7 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -18,7 +18,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -212,17 +212,18 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs panel_mipi_dbi_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = panel_mipi_dbi_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
};
-DEFINE_DRM_GEM_CMA_FOPS(panel_mipi_dbi_fops);
+DEFINE_DRM_GEM_DMA_FOPS(panel_mipi_dbi_fops);
static const struct drm_driver panel_mipi_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &panel_mipi_dbi_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 013790c45d0a..e62f4d16b2c6 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -25,12 +25,12 @@
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
@@ -511,8 +511,10 @@ static void repaper_get_temperature(struct repaper_epd *epd)
static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
+ unsigned int dst_pitch = 0;
+ struct iosys_map dst, vmap;
struct drm_rect clip;
int idx, ret = 0;
u8 *buf = NULL;
@@ -541,7 +543,9 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
if (ret)
goto out_free;
- drm_fb_xrgb8888_to_mono(buf, 0, cma_obj->vaddr, fb, &clip);
+ iosys_map_set_vaddr(&dst, buf);
+ iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -617,6 +621,15 @@ static void power_off(struct repaper_epd *epd)
gpiod_set_value_cansleep(epd->discharge, 0);
}
+static enum drm_mode_status repaper_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct repaper_epd *epd = drm_to_epd(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, epd->mode);
+}
+
static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
@@ -827,6 +840,7 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
+ .mode_valid = repaper_pipe_mode_valid,
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
@@ -835,22 +849,8 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
static int repaper_connector_get_modes(struct drm_connector *connector)
{
struct repaper_epd *epd = drm_to_epd(connector->dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, epd->mode);
- if (!mode) {
- DRM_ERROR("Failed to duplicate mode\n");
- return 0;
- }
-
- drm_mode_set_name(mode);
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, epd->mode);
}
static const struct drm_connector_helper_funcs repaper_connector_hfuncs = {
@@ -903,12 +903,12 @@ static const struct drm_display_mode repaper_e2271cs021_mode = {
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
0xff, 0xfe, 0x00, 0x00 };
-DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
+DEFINE_DRM_GEM_DMA_FOPS(repaper_fops);
static const struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 5422363690e7..18489779fb8a 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -8,6 +8,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
@@ -20,8 +21,8 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
@@ -30,16 +31,6 @@
#define DRIVER_MINOR 0
/*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
-#define RES_MM(d) \
- (((d) * 254ul) / (96ul * 10ul))
-
-#define SIMPLEDRM_MODE(hd, vd) \
- DRM_SIMPLE_MODE(hd, vd, RES_MM(hd), RES_MM(vd))
-
-/*
* Helpers for simplefb
*/
@@ -198,7 +189,6 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
struct simpledrm_device {
struct drm_device dev;
- struct platform_device *pdev;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -217,14 +207,15 @@ struct simpledrm_device {
unsigned int pitch;
/* memory management */
- struct resource *mem;
void __iomem *screen_base;
/* modesetting */
uint32_t formats[8];
size_t nformats;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
- struct drm_simple_display_pipe pipe;
};
static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
@@ -272,7 +263,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
unsigned int i;
@@ -370,7 +361,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
struct regulator *regulator;
@@ -451,120 +442,6 @@ static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
#endif
/*
- * Simplefb settings
- */
-
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height)
-{
- struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
-
- mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
- drm_mode_set_name(&mode);
-
- return mode;
-}
-
-static int simpledrm_device_init_fb(struct simpledrm_device *sdev)
-{
- int width, height, stride;
- const struct drm_format_info *format;
- struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
- const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
- struct device_node *of_node = pdev->dev.of_node;
-
- if (pd) {
- width = simplefb_get_width_pd(dev, pd);
- if (width < 0)
- return width;
- height = simplefb_get_height_pd(dev, pd);
- if (height < 0)
- return height;
- stride = simplefb_get_stride_pd(dev, pd);
- if (stride < 0)
- return stride;
- format = simplefb_get_format_pd(dev, pd);
- if (IS_ERR(format))
- return PTR_ERR(format);
- } else if (of_node) {
- width = simplefb_get_width_of(dev, of_node);
- if (width < 0)
- return width;
- height = simplefb_get_height_of(dev, of_node);
- if (height < 0)
- return height;
- stride = simplefb_get_stride_of(dev, of_node);
- if (stride < 0)
- return stride;
- format = simplefb_get_format_of(dev, of_node);
- if (IS_ERR(format))
- return PTR_ERR(format);
- } else {
- drm_err(dev, "no simplefb configuration found\n");
- return -ENODEV;
- }
-
- sdev->mode = simpledrm_mode(width, height);
- sdev->format = format;
- sdev->pitch = stride;
-
- drm_dbg_kms(dev, "display mode={" DRM_MODE_FMT "}\n",
- DRM_MODE_ARG(&sdev->mode));
- drm_dbg_kms(dev,
- "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
- &format->format, width, height, stride);
-
- return 0;
-}
-
-/*
- * Memory management
- */
-
-static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
-{
- struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
- struct resource *res, *mem;
- void __iomem *screen_base;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
- if (ret) {
- drm_err(dev, "could not acquire memory range %pr: error %d\n",
- res, ret);
- return ret;
- }
-
- mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- sdev->dev.driver->name);
- if (!mem) {
- /*
- * We cannot make this fatal. Sometimes this comes from magic
- * spaces our resource handlers simply don't know about. Use
- * the I/O-memory resource as-is and try to map that instead.
- */
- drm_warn(dev, "could not acquire memory region %pr\n", res);
- mem = res;
- }
-
- screen_base = devm_ioremap_wc(&pdev->dev, mem->start,
- resource_size(mem));
- if (!screen_base)
- return -ENOMEM;
-
- sdev->mem = mem;
- sdev->screen_base = screen_base;
-
- return 0;
-}
-
-/*
* Modesetting
*/
@@ -576,7 +453,7 @@ static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
* TODO: Add blit helpers for remaining formats and uncomment
* constants.
*/
-static const uint32_t simpledrm_default_formats[] = {
+static const uint32_t simpledrm_primary_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
@@ -587,100 +464,54 @@ static const uint32_t simpledrm_default_formats[] = {
DRM_FORMAT_ARGB2101010,
};
-static const uint64_t simpledrm_format_modifiers[] = {
+static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &sdev->mode);
- if (!mode)
- return 0;
-
- if (mode->name[0] == '\0')
- drm_mode_set_name(mode);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- if (mode->width_mm)
- connector->display_info.width_mm = mode->width_mm;
- if (mode->height_mm)
- connector->display_info.height_mm = mode->height_mm;
-
- return 1;
-}
-
-static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
-};
-
-static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static enum drm_mode_status
-simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
-
- if (mode->hdisplay != sdev->mode.hdisplay &&
- mode->vdisplay != sdev->mode.vdisplay)
- return MODE_ONE_SIZE;
- else if (mode->hdisplay != sdev->mode.hdisplay)
- return MODE_ONE_WIDTH;
- else if (mode->vdisplay != sdev->mode.vdisplay)
- return MODE_ONE_HEIGHT;
-
- return MODE_OK;
-}
-
-static void
-simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */
- struct drm_device *dev = &sdev->dev;
- void __iomem *dst = sdev->screen_base;
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct drm_device *dev = plane->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
- if (!fb)
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
return;
- drm_rect_fp_to_int(&src_clip, &plane_state->src);
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base);
+ struct drm_rect dst_clip = plane_state->dst;
- if (!drm_dev_enter(dev, &idx))
- return;
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
- dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip);
- drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip);
+ iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
+ drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb,
+ &damage);
+ }
drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
-static void
-simpledrm_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = plane->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
int idx;
if (!drm_dev_enter(dev, &idx))
@@ -692,46 +523,81 @@ simpledrm_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_dev_exit(idx);
}
-static void
-simpledrm_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_plane_state)
+static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_update = simpledrm_primary_plane_helper_atomic_update,
+ .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
- struct drm_plane_state *plane_state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = &sdev->dev;
- void __iomem *dst = sdev->screen_base;
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
- if (!fb)
- return;
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
+}
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
- return;
+static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+ ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
+ if (ret)
+ return ret;
- if (!drm_dev_enter(dev, &idx))
- return;
+ return drm_atomic_add_affected_planes(new_state, crtc);
+}
- dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip);
- drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip);
+/*
+ * The CRTC is always enabled. Screen updates are performed by
+ * the primary plane's atomic_update function. Disabling clears
+ * the screen in the primary plane's atomic_disable function.
+ */
+static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
+ .mode_valid = simpledrm_crtc_helper_mode_valid,
+ .atomic_check = simpledrm_crtc_helper_atomic_check,
+};
- drm_dev_exit(idx);
+static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
+
+ return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
}
-static const struct drm_simple_display_pipe_funcs
-simpledrm_simple_display_pipe_funcs = {
- .mode_valid = simpledrm_simple_display_pipe_mode_valid,
- .enable = simpledrm_simple_display_pipe_enable,
- .disable = simpledrm_simple_display_pipe_disable,
- .update = simpledrm_simple_display_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
+ .get_modes = simpledrm_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs simpledrm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
@@ -740,127 +606,207 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const uint32_t *simpledrm_device_formats(struct simpledrm_device *sdev,
- size_t *nformats_out)
-{
- struct drm_device *dev = &sdev->dev;
- size_t i;
-
- if (sdev->nformats)
- goto out; /* don't rebuild list on recurring calls */
-
- /* native format goes first */
- sdev->formats[0] = sdev->format->format;
- sdev->nformats = 1;
-
- /* default formats go second */
- for (i = 0; i < ARRAY_SIZE(simpledrm_default_formats); ++i) {
- if (simpledrm_default_formats[i] == sdev->format->format)
- continue; /* native format already went first */
- sdev->formats[sdev->nformats] = simpledrm_default_formats[i];
- sdev->nformats++;
- }
+/*
+ * Init / Cleanup
+ */
+static struct drm_display_mode simpledrm_mode(unsigned int width,
+ unsigned int height)
+{
/*
- * TODO: The simpledrm driver converts framebuffers to the native
- * format when copying them to device memory. If there are more
- * formats listed than supported by the driver, the native format
- * is not supported by the conversion helpers. Therefore *only*
- * support the native format and add a conversion helper ASAP.
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
*/
- if (drm_WARN_ONCE(dev, i != sdev->nformats,
- "format conversion helpers required for %p4cc",
- &sdev->format->format)) {
- sdev->nformats = 1;
- }
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height,
+ DRM_MODE_RES_MM(width, 96ul),
+ DRM_MODE_RES_MM(height, 96ul))
+ };
-out:
- *nformats_out = sdev->nformats;
- return sdev->formats;
+ return mode;
}
-static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
+static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
{
- struct drm_device *dev = &sdev->dev;
- struct drm_display_mode *mode = &sdev->mode;
- struct drm_connector *connector = &sdev->connector;
- struct drm_simple_display_pipe *pipe = &sdev->pipe;
+ const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
+ struct device_node *of_node = pdev->dev.of_node;
+ struct simpledrm_device *sdev;
+ struct drm_device *dev;
+ int width, height, stride;
+ const struct drm_format_info *format;
+ struct resource *res, *mem;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
unsigned long max_width, max_height;
- const uint32_t *formats;
size_t nformats;
int ret;
- ret = drmm_mode_config_init(dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ if (IS_ERR(sdev))
+ return ERR_CAST(sdev);
+ dev = &sdev->dev;
+ platform_set_drvdata(pdev, sdev);
+
+ /*
+ * Hardware settings
+ */
+
+ ret = simpledrm_device_init_clocks(sdev);
if (ret)
- return ret;
+ return ERR_PTR(ret);
+ ret = simpledrm_device_init_regulators(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (pd) {
+ width = simplefb_get_width_pd(dev, pd);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = simplefb_get_height_pd(dev, pd);
+ if (height < 0)
+ return ERR_PTR(height);
+ stride = simplefb_get_stride_pd(dev, pd);
+ if (stride < 0)
+ return ERR_PTR(stride);
+ format = simplefb_get_format_pd(dev, pd);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ } else if (of_node) {
+ width = simplefb_get_width_of(dev, of_node);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = simplefb_get_height_of(dev, of_node);
+ if (height < 0)
+ return ERR_PTR(height);
+ stride = simplefb_get_stride_of(dev, of_node);
+ if (stride < 0)
+ return ERR_PTR(stride);
+ format = simplefb_get_format_of(dev, of_node);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ } else {
+ drm_err(dev, "no simplefb configuration found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ if (!stride) {
+ stride = drm_format_info_min_pitch(format, 0, width);
+ if (drm_WARN_ON(dev, !stride))
+ return ERR_PTR(-EINVAL);
+ }
- max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH);
- max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT);
+ sdev->mode = simpledrm_mode(width, height);
+ sdev->format = format;
+ sdev->pitch = stride;
- dev->mode_config.min_width = mode->hdisplay;
- dev->mode_config.max_width = max_width;
- dev->mode_config.min_height = mode->vdisplay;
- dev->mode_config.max_height = max_height;
- dev->mode_config.preferred_depth = sdev->format->cpp[0] * 8;
- dev->mode_config.funcs = &simpledrm_mode_config_funcs;
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
+ &format->format, width, height, stride);
- ret = drm_connector_init(dev, connector, &simpledrm_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (ret)
- return ret;
- drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
- drm_connector_set_panel_orientation_with_quirk(connector,
- DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
- mode->hdisplay, mode->vdisplay);
+ /*
+ * Memory management
+ */
- formats = simpledrm_device_formats(sdev, &nformats);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return ERR_PTR(-EINVAL);
- ret = drm_simple_display_pipe_init(dev, pipe, &simpledrm_simple_display_pipe_funcs,
- formats, nformats, simpledrm_format_modifiers,
- connector);
- if (ret)
- return ret;
+ ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: error %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
- drm_plane_enable_fb_damage_clips(&pipe->plane);
+ mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
- drm_mode_config_reset(dev);
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ sdev->screen_base = screen_base;
- return 0;
-}
+ /*
+ * Modesetting
+ */
-/*
- * Init / Cleanup
- */
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
-static struct simpledrm_device *
-simpledrm_device_create(struct drm_driver *drv, struct platform_device *pdev)
-{
- struct simpledrm_device *sdev;
- int ret;
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device,
- dev);
- if (IS_ERR(sdev))
- return ERR_CAST(sdev);
- sdev->pdev = pdev;
- platform_set_drvdata(pdev, sdev);
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->cpp[0] * 8;
+ dev->mode_config.funcs = &simpledrm_mode_config_funcs;
- ret = simpledrm_device_init_clocks(sdev);
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ simpledrm_primary_plane_formats,
+ ARRAY_SIZE(simpledrm_primary_plane_formats),
+ sdev->formats, ARRAY_SIZE(sdev->formats));
+
+ primary_plane = &sdev->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
+ sdev->formats, nformats,
+ simpledrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_regulators(sdev);
+ drm_plane_helper_add(primary_plane, &simpledrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &sdev->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &simpledrm_crtc_funcs, NULL);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_fb(sdev);
+ drm_crtc_helper_add(crtc, &simpledrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &sdev->encoder;
+ ret = drm_encoder_init(dev, encoder, &simpledrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_mm(sdev);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &sdev->connector;
+ ret = drm_connector_init(dev, connector, &simpledrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_modeset(sdev);
+ drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ERR_PTR(ret);
+ drm_mode_config_reset(dev);
+
return sdev;
}
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 8eddb020c43e..b6f620b902e6 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -15,12 +15,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
@@ -69,12 +69,15 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
unsigned int x, y;
u8 *src, *buf, val;
+ struct iosys_map dst_map, vmap;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return;
- drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, clip);
+ iosys_map_set_vaddr(&dst_map, buf);
+ iosys_map_set_vaddr(&vmap, vaddr);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -92,8 +95,8 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- void *src = cma_obj->vaddr;
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ void *src = dma_obj->vaddr;
int ret = 0;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
@@ -269,12 +272,12 @@ static const struct drm_display_mode st7586_mode = {
DRM_SIMPLE_MODE(178, 128, 37, 27),
};
-DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
+DEFINE_DRM_GEM_DMA_FOPS(st7586_fops);
static const struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index e0f02d367d88..c36ba08acda1 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -20,7 +20,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
@@ -133,6 +133,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -151,12 +152,12 @@ static const struct st7735r_cfg rh128128t_cfg = {
.rgb = true,
};
-DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
+DEFINE_DRM_GEM_DMA_FOPS(st7735r_fops);
static const struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e210df65c30..7c8e8be774f1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -117,12 +117,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man, *new_man;
struct ttm_device *bdev = bo->bdev;
+ bool old_use_tt, new_use_tt;
int ret;
- old_man = ttm_manager_type(bdev, bo->resource->mem_type);
- new_man = ttm_manager_type(bdev, mem->mem_type);
+ old_use_tt = bo->resource &&
+ ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+ new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
@@ -130,11 +131,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
* Create and bind a ttm if required.
*/
- if (new_man->use_tt) {
+ if (new_use_tt) {
/* Zero init the new TTM structure if the old location should
* have used one as well.
*/
- ret = ttm_tt_create(bo, old_man->use_tt);
+ ret = ttm_tt_create(bo, old_use_tt);
if (ret)
goto out_err;
@@ -160,8 +161,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return 0;
out_err:
- new_man = ttm_manager_type(bdev, bo->resource->mem_type);
- if (!new_man->use_tt)
+ if (!old_use_tt)
ttm_bo_tt_destroy(bo);
return ret;
@@ -518,6 +518,9 @@ out:
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
+ struct ttm_resource *res = bo->resource;
+ struct ttm_device *bdev = bo->bdev;
+
dma_resv_assert_held(bo->base.resv);
if (bo->resource->mem_type == TTM_PL_SYSTEM)
return true;
@@ -525,11 +528,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
- (place->lpfn && place->lpfn <= bo->resource->start))
- return false;
-
- return true;
+ return ttm_resource_intersects(bdev, res, place, bo->base.size);
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
@@ -904,7 +903,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- if (!ttm_resource_compat(bo->resource, placement)) {
+ if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -912,7 +911,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
@@ -921,36 +920,61 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
-int ttm_bo_init_reserved(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
- struct sg_table *sg,
- struct dma_resv *resv,
+/**
+ * ttm_bo_init_reserved
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @ctx: TTM operation context for memory allocation.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function, enables driver-specific objects
+ * derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
+ * and it is the caller's responsibility to call ttm_bo_unreserve.
+ *
+ * If a failure occurs, the function will call the @destroy function. Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
- bool locked;
int ret;
- bo->destroy = destroy;
kref_init(&bo->kref);
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
- bo->page_alignment = page_alignment;
+ bo->page_alignment = alignment;
+ bo->destroy = destroy;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
- if (resv) {
+ if (resv)
bo->base.resv = resv;
- dma_resv_assert_held(bo->base.resv);
- } else {
+ else
bo->base.resv = &bo->base._resv;
- }
atomic_inc(&ttm_glob.bo_count);
ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
@@ -963,50 +987,84 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg)
+ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
- bo->resource->num_pages);
+ PFN_UP(bo->base.size));
+ if (ret)
+ goto err_put;
+ }
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
- if (!resv) {
- locked = dma_resv_trylock(bo->base.resv);
- WARN_ON(!locked);
- }
+ if (!resv)
+ WARN_ON(!dma_resv_trylock(bo->base.resv));
+ else
+ dma_resv_assert_held(resv);
- if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, ctx);
+ ret = ttm_bo_validate(bo, placement, ctx);
+ if (unlikely(ret))
+ goto err_unlock;
- if (unlikely(ret)) {
- if (!resv)
- ttm_bo_unreserve(bo);
+ return 0;
- ttm_bo_put(bo);
- return ret;
- }
+err_unlock:
+ if (!resv)
+ dma_resv_unlock(bo->base.resv);
+err_put:
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_init_reserved);
-int ttm_bo_init(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct sg_table *sg,
- struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *))
+/**
+ * ttm_bo_init_validate
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ *
+ * If a failure occurs, the function will call the @destroy function, Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
- ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
- page_alignment, &ctx, sg, resv, destroy);
+ ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
+ sg, resv, destroy);
if (ret)
return ret;
@@ -1015,7 +1073,7 @@ int ttm_bo_init(struct ttm_device *bdev,
return 0;
}
-EXPORT_SYMBOL(ttm_bo_init);
+EXPORT_SYMBOL(ttm_bo_init_validate);
/*
* buffer object vm functions.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1cbfb00c1d65..fa04e62202c1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -137,8 +137,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
struct ttm_resource *src_mem = bo->resource;
- struct ttm_resource_manager *src_man =
- ttm_manager_type(bdev, src_mem->mem_type);
+ struct ttm_resource_manager *src_man;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
@@ -147,6 +146,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
+ if (!src_mem)
+ return 0;
+
+ src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
@@ -236,16 +239,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
- if (fbo->base.resource) {
- ttm_resource_set_bo(fbo->base.resource, &fbo->base);
- bo->resource = NULL;
- }
-
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
@@ -399,6 +405,8 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
struct ttm_resource *mem = bo->resource;
int ret;
+ dma_resv_assert_held(bo->base.resv);
+
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (ret)
return ret;
@@ -457,6 +465,8 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
+ dma_resv_assert_held(bo->base.resv);
+
if (iosys_map_is_null(map))
return;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index d91666721dc6..4cfef2b3514d 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -113,6 +113,37 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
kfree(node);
}
+static bool ttm_range_man_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
+ u32 num_pages = PFN_UP(size);
+
+ /* Don't evict BOs outside of the requested placement range */
+ if (place->fpfn >= (node->start + num_pages) ||
+ (place->lpfn && place->lpfn <= node->start))
+ return false;
+
+ return true;
+}
+
+static bool ttm_range_man_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
+ u32 num_pages = PFN_UP(size);
+
+ if (node->start < place->fpfn ||
+ (place->lpfn && (node->start + num_pages) > place->lpfn))
+ return false;
+
+ return true;
+}
+
static void ttm_range_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
@@ -126,6 +157,8 @@ static void ttm_range_man_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func ttm_range_manager_func = {
.alloc = ttm_range_man_alloc,
.free = ttm_range_man_free,
+ .intersects = ttm_range_man_intersects,
+ .compatible = ttm_range_man_compatible,
.debug = ttm_range_man_debug
};
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 20f9adcc3235..a729c32a1e48 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -253,10 +253,71 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
+/**
+ * ttm_resource_intersects - test for intersection
+ *
+ * @bdev: TTM device structure
+ * @res: The resource to test
+ * @place: The placement to test
+ * @size: How many bytes the new allocation needs.
+ *
+ * Test if @res intersects with @place and @size. Used for testing if evictions
+ * are valueable or not.
+ *
+ * Returns true if the res placement intersects with @place and @size.
+ */
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct ttm_resource_manager *man;
+
+ if (!res)
+ return false;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (!place || !man->func->intersects)
+ return true;
+
+ return man->func->intersects(man, res, place, size);
+}
+
+/**
+ * ttm_resource_compatible - test for compatibility
+ *
+ * @bdev: TTM device structure
+ * @res: The resource to test
+ * @place: The placement to test
+ * @size: How many bytes the new allocation needs.
+ *
+ * Test if @res compatible with @place and @size.
+ *
+ * Returns true if the res placement compatible with @place and @size.
+ */
+bool ttm_resource_compatible(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct ttm_resource_manager *man;
+
+ if (!res || !place)
+ return false;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (!man->func->compatible)
+ return true;
+
+ return man->func->compatible(man, res, place, size);
+}
+
static bool ttm_resource_places_compat(struct ttm_resource *res,
const struct ttm_place *places,
unsigned num_placement)
{
+ struct ttm_buffer_object *bo = res->bo;
+ struct ttm_device *bdev = bo->bdev;
unsigned i;
if (res->placement & TTM_PL_FLAG_TEMPORARY)
@@ -265,8 +326,7 @@ static bool ttm_resource_places_compat(struct ttm_resource *res,
for (i = 0; i < num_placement; i++) {
const struct ttm_place *heap = &places[i];
- if (res->start < heap->fpfn || (heap->lpfn &&
- (res->start + res->num_pages) > heap->lpfn))
+ if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
continue;
if ((res->mem_type == heap->mem_type) &&
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index 47a7dbe6c114..11e865be81c6 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -8,7 +8,7 @@ config DRM_TVE200
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Faraday TV Encoder
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 771bad881714..37bdd976ae59 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -15,11 +15,11 @@
#include <linux/of_graph.h>
#include <linux/delay.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_vblank.h>
@@ -90,7 +90,7 @@ static int tve200_display_check(struct drm_simple_display_pipe *pipe,
}
if (fb) {
- u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3) {
@@ -267,14 +267,14 @@ static void tve200_display_update(struct drm_simple_display_pipe *pipe,
if (fb) {
/* For RGB, the Y component is used as base address */
- writel(drm_fb_cma_get_gem_addr(fb, pstate, 0),
+ writel(drm_fb_dma_get_gem_addr(fb, pstate, 0),
priv->regs + TVE200_Y_FRAME_BASE_ADDR);
/* For three plane YUV we need two more addresses */
if (fb->format->format == DRM_FORMAT_YUV420) {
- writel(drm_fb_cma_get_gem_addr(fb, pstate, 1),
+ writel(drm_fb_dma_get_gem_addr(fb, pstate, 1),
priv->regs + TVE200_U_FRAME_BASE_ADDR);
- writel(drm_fb_cma_get_gem_addr(fb, pstate, 2),
+ writel(drm_fb_dma_get_gem_addr(fb, pstate, 2),
priv->regs + TVE200_V_FRAME_BASE_ADDR);
}
}
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 6d9d2921abf4..04db72e3fa9c 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -39,9 +39,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
@@ -65,7 +64,7 @@ static int tve200_modeset_init(struct drm_device *dev)
struct tve200_drm_dev_private *priv = dev->dev_private;
struct drm_panel *panel;
struct drm_bridge *bridge;
- int ret = 0;
+ int ret;
drm_mode_config_init(dev);
mode_config = &dev->mode_config;
@@ -93,6 +92,7 @@ static int tve200_modeset_init(struct drm_device *dev)
* method to get the connector out of the bridge.
*/
dev_err(dev->dev, "the bridge is not a panel\n");
+ ret = -EINVAL;
goto out_bridge;
}
@@ -135,7 +135,7 @@ finish:
return ret;
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver tve200_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -147,7 +147,7 @@ static const struct drm_driver tve200_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static int tve200_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5703277c6f52..91effdcefb6d 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -21,8 +21,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ int ret;
- return drm_mode_config_helper_suspend(dev);
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
+
+ udl_sync_pending_urbs(dev);
+ return 0;
}
static int udl_usb_resume(struct usb_interface *interface)
@@ -32,6 +38,16 @@ static int udl_usb_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
+static int udl_usb_reset_resume(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
+
+ udl_select_std_channel(udl);
+
+ return drm_mode_config_helper_resume(dev);
+}
+
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
@@ -140,6 +156,7 @@ static struct usb_driver udl_driver = {
.disconnect = udl_usb_disconnect,
.suspend = udl_usb_suspend,
.resume = udl_usb_resume,
+ .reset_resume = udl_usb_reset_resume,
.id_table = id_table,
};
module_usb_driver(udl_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc16a13316e4..b4cc7cc568c7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -34,14 +34,13 @@ struct udl_device;
struct urb_node {
struct list_head entry;
struct udl_device *dev;
- struct delayed_work release_urb_work;
struct urb *urb;
};
struct urb_list {
struct list_head list;
spinlock_t lock;
- struct semaphore limit_sem;
+ wait_queue_head_t sleep;
int available;
int count;
size_t size;
@@ -78,6 +77,7 @@ struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct drm_device *dev);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
@@ -87,6 +87,7 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
int udl_drop_usb(struct drm_device *dev);
+int udl_select_std_channel(struct udl_device *udl);
#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 853f147036f6..061cb88c08a2 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -20,11 +20,10 @@
#define NR_USB_REQUEST_CHANNEL 0x12
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
-#define WRITES_IN_FLIGHT (4)
+#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
-#define GET_URB_TIMEOUT HZ
-#define FREE_URB_TIMEOUT (HZ*2)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
@@ -95,7 +94,7 @@ success:
/*
* Need to ensure a channel is selected before submitting URBs
*/
-static int udl_select_std_channel(struct udl_device *udl)
+int udl_select_std_channel(struct udl_device *udl)
{
static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
@@ -119,14 +118,6 @@ static int udl_select_std_channel(struct udl_device *udl)
return ret < 0 ? ret : 0;
}
-static void udl_release_urb_work(struct work_struct *work)
-{
- struct urb_node *unode = container_of(work, struct urb_node,
- release_urb_work.work);
-
- up(&unode->dev->urbs.limit_sem);
-}
-
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
@@ -137,6 +128,7 @@ void udl_urb_completion(struct urb *urb)
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
+ urb->status == -EPROTO ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
@@ -150,49 +142,34 @@ void udl_urb_completion(struct urb *urb)
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
-#if 0
- /*
- * When using fb_defio, we deadlock if up() is called
- * while another is waiting. So queue to another process.
- */
- if (fb_defio)
- schedule_delayed_work(&unode->release_urb_work, 0);
- else
-#endif
- up(&udl->urbs.limit_sem);
+ wake_up(&udl->urbs.sleep);
}
static void udl_free_urb_list(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
- int count = udl->urbs.count;
- struct list_head *node;
struct urb_node *unode;
struct urb *urb;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
- while (count--) {
- down(&udl->urbs.limit_sem);
-
+ while (udl->urbs.count) {
spin_lock_irq(&udl->urbs.lock);
-
- node = udl->urbs.list.next; /* have reserved one with sem */
- list_del_init(node);
-
+ urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
+ udl->urbs.count--;
spin_unlock_irq(&udl->urbs.lock);
-
- unode = list_entry(node, struct urb_node, entry);
- urb = unode->urb;
-
+ if (WARN_ON(!urb))
+ break;
+ unode = urb->context;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
- kfree(node);
+ kfree(unode);
}
- udl->urbs.count = 0;
+
+ wake_up_all(&udl->urbs.sleep);
}
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
@@ -205,24 +182,20 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
struct usb_device *udev = udl_to_usb_device(udl);
spin_lock_init(&udl->urbs.lock);
-
-retry:
- udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
-
- sema_init(&udl->urbs.limit_sem, 0);
+ init_waitqueue_head(&udl->urbs.sleep);
udl->urbs.count = 0;
udl->urbs.available = 0;
+retry:
+ udl->urbs.size = size;
+
while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
- INIT_DELAYED_WORK(&unode->release_urb_work,
- udl_release_urb_work);
-
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
@@ -250,7 +223,6 @@ retry:
list_add_tail(&unode->entry, &udl->urbs.list);
- up(&udl->urbs.limit_sem);
udl->urbs.count++;
udl->urbs.available++;
}
@@ -260,35 +232,41 @@ retry:
return udl->urbs.count;
}
-struct urb *udl_get_urb(struct drm_device *dev)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
{
- struct udl_device *udl = to_udl(dev);
- int ret = 0;
- struct list_head *entry;
struct urb_node *unode;
- struct urb *urb = NULL;
+
+ assert_spin_locked(&udl->urbs.lock);
/* Wait for an in-flight buffer to complete and get re-queued */
- ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
- if (ret) {
- DRM_INFO("wait for urb interrupted: %x available: %d\n",
- ret, udl->urbs.available);
- goto error;
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ !udl->urbs.count ||
+ !list_empty(&udl->urbs.list),
+ udl->urbs.lock, timeout)) {
+ DRM_INFO("wait for urb interrupted: available: %d\n",
+ udl->urbs.available);
+ return NULL;
}
- spin_lock_irq(&udl->urbs.lock);
+ if (!udl->urbs.count)
+ return NULL;
- BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
- entry = udl->urbs.list.next;
- list_del_init(entry);
+ unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
+ list_del_init(&unode->entry);
udl->urbs.available--;
- spin_unlock_irq(&udl->urbs.lock);
+ return unode ? unode->urb : NULL;
+}
- unode = list_entry(entry, struct urb_node, entry);
- urb = unode->urb;
+#define GET_URB_TIMEOUT HZ
+struct urb *udl_get_urb(struct drm_device *dev)
+{
+ struct udl_device *udl = to_udl(dev);
+ struct urb *urb;
-error:
+ spin_lock_irq(&udl->urbs.lock);
+ urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
+ spin_unlock_irq(&udl->urbs.lock);
return urb;
}
@@ -297,10 +275,13 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
struct udl_device *udl = to_udl(dev);
int ret;
- BUG_ON(len > udl->urbs.size);
-
+ if (WARN_ON(len > udl->urbs.size)) {
+ ret = -EINVAL;
+ goto error;
+ }
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
+ error:
if (ret) {
udl_urb_completion(urb); /* because no one else will */
DRM_ERROR("usb_submit_urb error %x\n", ret);
@@ -308,6 +289,21 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
+/* wait until all pending URBs have been processed */
+void udl_sync_pending_urbs(struct drm_device *dev)
+{
+ struct udl_device *udl = to_udl(dev);
+
+ spin_lock_irq(&udl->urbs.lock);
+ /* 2 seconds as a sane timeout */
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ udl->urbs.available == udl->urbs.count,
+ udl->urbs.lock,
+ msecs_to_jiffies(2000)))
+ drm_err(dev, "Timeout for syncing pending URBs\n");
+ spin_unlock_irq(&udl->urbs.lock);
+}
+
int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index e67c40a48fb4..ec6876f449f3 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -242,38 +242,15 @@ static long udl_log_cpp(unsigned int cpp)
return __ffs(cpp);
}
-static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
- int width, int height)
-{
- int x1, x2;
-
- if (WARN_ON_ONCE(x < 0) ||
- WARN_ON_ONCE(y < 0) ||
- WARN_ON_ONCE(width < 0) ||
- WARN_ON_ONCE(height < 0))
- return -EINVAL;
-
- x1 = ALIGN_DOWN(x, sizeof(unsigned long));
- x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1;
-
- clip->x1 = x1;
- clip->y1 = y;
- clip->x2 = x2;
- clip->y2 = y + height;
-
- return 0;
-}
-
static int udl_handle_damage(struct drm_framebuffer *fb,
const struct iosys_map *map,
- int x, int y, int width, int height)
+ const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
struct urb *urb;
- struct drm_rect clip;
int log_bpp;
ret = udl_log_cpp(fb->format->cpp[0]);
@@ -281,12 +258,6 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- ret = udl_aligned_damage_clip(&clip, x, y, width, height);
- if (ret)
- return ret;
- else if ((clip.x2 > fb->width) || (clip.y2 > fb->height))
- return -EINVAL;
-
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
@@ -298,11 +269,11 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
}
cmd = urb->transfer_buffer;
- for (i = clip.y1; i < clip.y2; i++) {
+ for (i = clip->y1; i < clip->y2; i++) {
const int line_offset = fb->pitches[0] * i;
- const int byte_offset = line_offset + (clip.x1 << log_bpp);
- const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp;
- const int byte_width = (clip.x2 - clip.x1) << log_bpp;
+ const int byte_offset = line_offset + (clip->x1 << log_bpp);
+ const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
+ const int byte_width = drm_rect_width(clip) << log_bpp;
ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
@@ -355,6 +326,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_rect clip = DRM_RECT_INIT(0, 0, fb->width, fb->height);
char *buf;
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
@@ -380,10 +352,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
udl->mode_buf_len = wrptr - buf;
- udl_handle_damage(fb, &shadow_plane_state->data[0], 0, 0, fb->width, fb->height);
-
- if (!crtc_state->mode_changed)
- return;
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &clip);
/* enable display */
udl_crtc_write_mode_to_hw(crtc);
@@ -423,8 +392,7 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
- udl_handle_damage(fb, &shadow_plane_state->data[0], rect.x1, rect.y1,
- rect.x2 - rect.x1, rect.y2 - rect.y1);
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &rect);
}
static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
@@ -479,6 +447,7 @@ int udl_modeset_init(struct drm_device *dev)
format_count, NULL, connector);
if (ret)
return ret;
+ drm_plane_enable_fb_damage_clips(&udl->display_pipe.plane);
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 971927669d6b..b57844632dbd 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -25,46 +25,6 @@
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-/*
- * Trims identical data from front and back of line
- * Sets new front buffer address and width
- * And returns byte count of identical pixels
- * Assumes CPU natural alignment (unsigned long)
- * for back and front buffer ptrs and width
- */
-#if 0
-static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
-{
- int j, k;
- const unsigned long *back = (const unsigned long *) bback;
- const unsigned long *front = (const unsigned long *) *bfront;
- const int width = *width_bytes / sizeof(unsigned long);
- int identical = width;
- int start = width;
- int end = width;
-
- for (j = 0; j < width; j++) {
- if (back[j] != front[j]) {
- start = j;
- break;
- }
- }
-
- for (k = width - 1; k > j; k--) {
- if (back[k] != front[k]) {
- end = k+1;
- break;
- }
- }
-
- identical = start + (width - end);
- *bfront = (u8 *) &front[start];
- *width_bytes = (end - start) * sizeof(unsigned long);
-
- return identical * sizeof(unsigned long);
-}
-#endif
-
static inline u16 pixel32_to_be16(const uint32_t pixel)
{
return (((pixel >> 3) & 0x001f) |
@@ -220,7 +180,11 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(log_bpp == 1 || log_bpp == 2));
+ if (WARN_ON(!(log_bpp == 1 || log_bpp == 2))) {
+ /* need to finish URB at error from this function */
+ udl_urb_completion(urb);
+ return -EINVAL;
+ }
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 8c7f910daa28..e8c975b81585 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -22,7 +22,6 @@
#include <linux/reset.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 725a252e837b..b8980440d137 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -313,7 +313,7 @@ v3d_lookup_bos(struct drm_device *dev,
}
job->bo = kvmalloc_array(job->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!job->bo) {
DRM_DEBUG("Failed to allocate validated BO pointers\n");
@@ -1092,7 +1092,7 @@ v3d_gem_init(struct drm_device *dev)
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
- "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
+ "Failed to allocate page tables. Please ensure you have DMA enabled.\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index f6a88abccc7d..48aaaa972c49 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -95,7 +95,7 @@ struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id)
void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
{
mutex_init(&v3d_priv->perfmon.lock);
- idr_init(&v3d_priv->perfmon.idr);
+ idr_init_base(&v3d_priv->perfmon.idr, 1);
}
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index fa0d73ce07bc..341edd982cb3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -269,8 +269,8 @@ static int vbox_primary_atomic_check(struct drm_plane *plane,
}
return drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -351,8 +351,8 @@ static int vbox_cursor_atomic_check(struct drm_plane *plane,
}
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
@@ -477,7 +477,7 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
@@ -496,7 +496,7 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index a5de40fe1a76..f60d82504da0 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -43,7 +43,7 @@
* VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
* from the host and issue commands to the host.
*
- * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the
* following operations with the VBE data register can be performed:
*
* Operation Result
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 061be9a6619d..246305d17a52 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -8,10 +8,11 @@ config DRM_VC4
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
+ depends on PM
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
select SND_PCM_ELD
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 0846d56f74f2..231add8b8e12 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -8,10 +8,10 @@
*
* The VC4 GPU architecture (both scanout and rendering) has direct
* access to system memory with no MMU in between. To support it, we
- * use the GEM CMA helper functions to allocate contiguous ranges of
+ * use the GEM DMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
*
- * Since the CMA allocator is very slow, we keep a cache of recently
+ * Since the DMA allocator is very slow, we keep a cache of recently
* freed BOs around so that the kernel's allocation of objects for 3D
* rendering can return quickly.
*/
@@ -179,7 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- drm_gem_cma_free(&bo->base);
+ drm_gem_dma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -303,7 +303,7 @@ static void vc4_bo_purge(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
- dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
+ dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
bo->base.vaddr = NULL;
bo->madv = __VC4_MADV_PURGED;
}
@@ -387,13 +387,14 @@ out:
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
- * This lets the CMA helpers allocate object structs for us, and keep
+ * This lets the DMA helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
@@ -404,7 +405,11 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
- mutex_init(&bo->madv_lock);
+
+ ret = drmm_mutex_init(dev, &bo->madv_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
@@ -421,7 +426,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -438,39 +443,39 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo;
}
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
+ dma_obj = drm_gem_dma_create(dev, size);
+ if (IS_ERR(dma_obj)) {
/*
- * If we've run out of CMA memory, kill the cache of
- * CMA allocations we've got laying around and try again.
+ * If we've run out of DMA memory, kill the cache of
+ * DMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
/*
- * Still not enough CMA memory, purge the userspace BO
+ * Still not enough DMA memory, purge the userspace BO
* cache and retry.
* This is sub-optimal since we purge the whole userspace
* BO cache which forces user that want to re-use the BO to
* restore its initial content.
* Ideally, we should purge entries one by one and retry
- * after each to see if CMA allocation succeeds. Or even
+ * after each to see if DMA allocation succeeds. Or even
* better, try to find an entry with at least the same
* size.
*/
vc4_bo_userspace_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from CMA:\n");
+ DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
- bo = to_vc4_bo(&cma_obj->base);
+ bo = to_vc4_bo(&dma_obj->base);
/* By default, BOs do not support the MADV ioctl. This will be enabled
* only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
@@ -479,7 +484,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
bo->madv = __VC4_MADV_NOTSUPP;
mutex_lock(&vc4->bo_lock);
- vc4_bo_set_label(&cma_obj->base, type);
+ vc4_bo_set_label(&dma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
return bo;
@@ -564,7 +569,7 @@ static void vc4_free_object(struct drm_gem_object *gem_bo)
goto out;
}
- /* If this object was partially constructed but CMA allocation
+ /* If this object was partially constructed but DMA allocation
* had failed, just free it. Can also happen when the BO has been
* purged.
*/
@@ -742,7 +747,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL;
}
- return drm_gem_cma_mmap(&bo->base, vma);
+ return drm_gem_dma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -754,8 +759,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
@@ -984,10 +989,28 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int vc4_bo_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ ret = vc4_debugfs_add_file(minor, "bo_stats",
+ vc4_bo_stats_debugfs, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
int i;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1007,9 +1030,11 @@ int vc4_bo_cache_init(struct drm_device *dev)
for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
vc4->bo_labels[i].name = bo_type_names[i];
- mutex_init(&vc4->bo_lock);
-
- vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
+ ret = drmm_mutex_init(dev, &vc4->bo_lock);
+ if (ret) {
+ kfree(vc4->bo_labels);
+ return ret;
+ }
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 029be98660b3..0108613e79d5 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -37,8 +37,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -206,11 +207,6 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
return ret;
}
-void vc4_crtc_destroy(struct drm_crtc *crtc)
-{
- drm_crtc_cleanup(crtc);
-}
-
static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
@@ -300,10 +296,17 @@ struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
/* The PV needs to be disabled before it can be flushed */
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
+
+ drm_dev_exit(idx);
}
static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder,
@@ -326,6 +329,10 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
bool debug_dump_regs = false;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
@@ -415,6 +422,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
drm_crtc_index(crtc));
drm_print_regset32(&p, &vc4_crtc->regset);
}
+
+ drm_dev_exit(idx);
}
static void require_hvs_enabled(struct drm_device *dev)
@@ -435,7 +444,10 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
+ int idx, ret;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
@@ -469,6 +481,8 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
vc4_encoder->post_crtc_powerdown(encoder, state);
+ drm_dev_exit(idx);
+
return 0;
}
@@ -544,6 +558,20 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
return 0;
}
+void vc4_crtc_send_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ if (!crtc->state || !crtc->state->event)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -567,14 +595,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
* Make sure we issue a vblank event after disabling the CRTC if
* someone was waiting it.
*/
- if (crtc->state->event) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+ vc4_crtc_send_vblank(crtc);
}
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -586,10 +607,14 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ int idx;
drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)",
crtc->name, crtc->base.id, encoder->name, encoder->base.id);
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
require_hvs_enabled(dev);
/* Enable vblank irq handling before crtc is started otherwise
@@ -617,6 +642,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
if (vc4_encoder->post_crtc_enable)
vc4_encoder->post_crtc_enable(encoder, state);
+
+ drm_dev_exit(idx);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -709,17 +736,31 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
static int vc4_enable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
+ drm_dev_exit(idx);
+
return 0;
}
static void vc4_disable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
CRTC_WRITE(PV_INTEN, 0);
+
+ drm_dev_exit(idx);
}
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
@@ -821,9 +862,9 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo =
- drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
- bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo =
+ drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
+ bo = to_vc4_bo(&dma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
@@ -855,19 +896,19 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
- struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
- ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
+ ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
@@ -943,8 +984,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1050,9 +1091,23 @@ void vc4_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
}
+int vc4_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, crtc_data->debugfs_name,
+ &vc4_crtc->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = vc4_crtc_destroy,
.page_flip = vc4_page_flip,
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
@@ -1063,6 +1118,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .late_register = vc4_crtc_late_register,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1077,10 +1133,10 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
+ .debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
- .debugfs_name = "crtc0_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1091,10 +1147,10 @@ static const struct vc4_pv_data bcm2835_pv0_data = {
static const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
+ .debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
},
- .debugfs_name = "crtc1_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1105,10 +1161,10 @@ static const struct vc4_pv_data bcm2835_pv1_data = {
static const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
+ .debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
- .debugfs_name = "crtc2_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1119,10 +1175,10 @@ static const struct vc4_pv_data bcm2835_pv2_data = {
static const struct vc4_pv_data bcm2711_pv0_data = {
.base = {
+ .debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
- .debugfs_name = "crtc0_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1133,10 +1189,10 @@ static const struct vc4_pv_data bcm2711_pv0_data = {
static const struct vc4_pv_data bcm2711_pv1_data = {
.base = {
+ .debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 3,
},
- .debugfs_name = "crtc1_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1147,10 +1203,10 @@ static const struct vc4_pv_data bcm2711_pv1_data = {
static const struct vc4_pv_data bcm2711_pv2_data = {
.base = {
+ .debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 4,
},
- .debugfs_name = "crtc2_regs",
.fifo_depth = 256,
.pixels_per_clock = 2,
.encoder_types = {
@@ -1160,10 +1216,10 @@ static const struct vc4_pv_data bcm2711_pv2_data = {
static const struct vc4_pv_data bcm2711_pv3_data = {
.base = {
+ .debugfs_name = "crtc3_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
- .debugfs_name = "crtc3_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1173,10 +1229,10 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
static const struct vc4_pv_data bcm2711_pv4_data = {
.base = {
+ .debugfs_name = "crtc4_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 5,
},
- .debugfs_name = "crtc4_regs",
.fifo_depth = 64,
.pixels_per_clock = 2,
.encoder_types = {
@@ -1230,6 +1286,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_plane *primary_plane;
unsigned int i;
+ int ret;
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
@@ -1237,15 +1294,18 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
* requirement of the plane configuration, and reject ones
* that will take too much.
*/
- primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
+ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(primary_plane)) {
dev_err(drm->dev, "failed to construct primary plane\n");
return PTR_ERR(primary_plane);
}
spin_lock_init(&vc4_crtc->irq_lock);
- drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
- crtc_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
drm_crtc_helper_add(crtc, crtc_helper_funcs);
if (!vc4->is_vc5) {
@@ -1275,10 +1335,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
const struct vc4_pv_data *pv_data;
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
- struct drm_plane *destroy_plane, *temp;
int ret;
- vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
+ vc4_crtc = drmm_kzalloc(drm, sizeof(*vc4_crtc), GFP_KERNEL);
if (!vc4_crtc)
return -ENOMEM;
crtc = &vc4_crtc->base;
@@ -1310,23 +1369,11 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
IRQF_SHARED,
"vc4 crtc", vc4_crtc);
if (ret)
- goto err_destroy_planes;
+ return ret;
platform_set_drvdata(pdev, vc4_crtc);
- vc4_debugfs_add_regset32(drm, pv_data->debugfs_name,
- &vc4_crtc->regset);
-
return 0;
-
-err_destroy_planes:
- list_for_each_entry_safe(destroy_plane, temp,
- &drm->mode_config.plane_list, head) {
- if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
- destroy_plane->funcs->destroy(destroy_plane);
- }
-
- return ret;
}
static void vc4_crtc_unbind(struct device *dev, struct device *master,
@@ -1335,8 +1382,6 @@ static void vc4_crtc_unbind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
- vc4_crtc_destroy(&vc4_crtc->base);
-
CRTC_WRITE(PV_INTEN, 0);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index ba2d8ea562af..19cda4f91a82 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Broadcom
*/
+#include <drm/drm_drv.h>
+
#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
@@ -12,11 +14,6 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-struct vc4_debugfs_info_entry {
- struct list_head link;
- struct drm_info_list info;
-};
-
/*
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
@@ -25,62 +22,59 @@ void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
- struct vc4_debugfs_info_entry *entry;
+ struct drm_device *drm = &vc4->base;
- if (!of_device_is_compatible(vc4->hvs->pdev->dev.of_node,
- "brcm,bcm2711-vc5"))
- debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
- minor->debugfs_root, &vc4->load_tracker_enabled);
+ drm_WARN_ON(drm, vc4_hvs_debugfs_init(minor));
- list_for_each_entry(entry, &vc4->debugfs_list, link) {
- drm_debugfs_create_files(&entry->info, 1,
- minor->debugfs_root, minor);
+ if (vc4->v3d) {
+ drm_WARN_ON(drm, vc4_bo_debugfs_init(minor));
+ drm_WARN_ON(drm, vc4_v3d_debugfs_init(minor));
}
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
struct debugfs_regset32 *regset = node->info_ent->data;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
drm_print_regset32(&p, regset);
+ drm_dev_exit(idx);
+
return 0;
}
-/*
- * Registers a debugfs file with a callback function for a vc4 component.
- *
- * This is like drm_debugfs_create_files(), but that can only be
- * called a given DRM minor, while the various VC4 components want to
- * register their debugfs files during the component bind process. We
- * track the request and delay it to be called on each minor during
- * vc4_debugfs_init().
- */
-void vc4_debugfs_add_file(struct drm_device *dev,
- const char *name,
- int (*show)(struct seq_file*, void*),
- void *data)
+int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_device *dev = minor->dev;
+ struct dentry *root = minor->debugfs_root;
+ struct drm_info_list *file;
- struct vc4_debugfs_info_entry *entry =
- devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL);
+ file = drmm_kzalloc(dev, sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
- if (!entry)
- return;
+ file->name = name;
+ file->show = show;
+ file->data = data;
- entry->info.name = name;
- entry->info.show = show;
- entry->info.data = data;
+ drm_debugfs_create_files(file, 1, root, minor);
- list_add(&entry->link, &vc4->debugfs_list);
+ return 0;
}
-void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *name,
- struct debugfs_regset32 *regset)
+int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *name,
+ struct debugfs_regset32 *regset)
{
- vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
+ return vc4_debugfs_add_file(minor, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index ef5e3921062c..1f8f44b7b5a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -84,9 +85,9 @@
/* General DPI hardware state. */
struct vc4_dpi {
- struct platform_device *pdev;
+ struct vc4_encoder encoder;
- struct drm_encoder *encoder;
+ struct platform_device *pdev;
void __iomem *regs;
@@ -96,21 +97,15 @@ struct vc4_dpi {
struct debugfs_regset32 regset;
};
-#define DPI_READ(offset) readl(dpi->regs + (offset))
-#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
-
-/* VC4 DPI encoder KMS struct */
-struct vc4_dpi_encoder {
- struct vc4_encoder base;
- struct vc4_dpi *dpi;
-};
-
-static inline struct vc4_dpi_encoder *
-to_vc4_dpi_encoder(struct drm_encoder *encoder)
+static inline struct vc4_dpi *
+to_vc4_dpi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_dpi_encoder, base.base);
+ return container_of(encoder, struct vc4_dpi, encoder.base);
}
+#define DPI_READ(offset) readl(dpi->regs + (offset))
+#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+
static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_C),
VC4_REG32(DPI_ID),
@@ -118,21 +113,27 @@ static const struct debugfs_reg32 dpi_regs[] = {
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
- struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
- struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
clk_disable_unprepare(dpi->pixel_clock);
+
+ drm_dev_exit(idx);
}
static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_display_mode *mode = &encoder->crtc->mode;
- struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
- struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL, *connector_scan;
u32 dpi_c = DPI_ENABLE;
+ int idx;
int ret;
/* Look up the connector attached to DPI so we can get the
@@ -212,6 +213,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
dpi_c |= DPI_VSYNC_DISABLE;
}
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
DPI_WRITE(DPI_C, dpi_c);
ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
@@ -221,6 +225,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+ drm_dev_exit(idx);
}
static enum drm_mode_status vc4_dpi_encoder_mode_valid(struct drm_encoder *encoder,
@@ -238,6 +244,23 @@ static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
.mode_valid = vc4_dpi_encoder_mode_valid,
};
+static int vc4_dpi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, "dpi_regs", &dpi->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
+ .late_register = vc4_dpi_late_register,
+};
+
static const struct of_device_id vc4_dpi_dt_match[] = {
{ .compatible = "brcm,bcm2835-dpi", .data = NULL },
{}
@@ -248,10 +271,11 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
*/
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
+ struct drm_device *drm = dpi->encoder.base.dev;
struct device *dev = &dpi->pdev->dev;
struct drm_bridge *bridge;
- bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
@@ -262,30 +286,28 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
return PTR_ERR(bridge);
}
- return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
+ return drm_bridge_attach(&dpi->encoder.base, bridge, NULL, 0);
+}
+
+static void vc4_dpi_disable_clock(void *ptr)
+{
+ struct vc4_dpi *dpi = ptr;
+
+ clk_disable_unprepare(dpi->core_clock);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dpi *dpi;
- struct vc4_dpi_encoder *vc4_dpi_encoder;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ dpi = drmm_kzalloc(drm, sizeof(*dpi), GFP_KERNEL);
if (!dpi)
return -ENOMEM;
- vc4_dpi_encoder = devm_kzalloc(dev, sizeof(*vc4_dpi_encoder),
- GFP_KERNEL);
- if (!vc4_dpi_encoder)
- return -ENOMEM;
- vc4_dpi_encoder->base.type = VC4_ENCODER_TYPE_DPI;
- vc4_dpi_encoder->dpi = dpi;
- dpi->encoder = &vc4_dpi_encoder->base.base;
-
+ dpi->encoder.type = VC4_ENCODER_TYPE_DPI;
dpi->pdev = pdev;
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
@@ -307,6 +329,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
DRM_ERROR("Failed to get core clock: %d\n", ret);
return ret;
}
+
dpi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clock)) {
ret = PTR_ERR(dpi->pixel_clock);
@@ -316,49 +339,35 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
}
ret = clk_prepare_enable(dpi->core_clock);
- if (ret)
+ if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ return ret;
+ }
- drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
- drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
+ ret = devm_add_action_or_reset(dev, vc4_dpi_disable_clock, dpi);
+ if (ret)
+ return ret;
- ret = vc4_dpi_init_bridge(dpi);
+ ret = drmm_encoder_init(drm, &dpi->encoder.base,
+ &vc4_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_DPI,
+ NULL);
if (ret)
- goto err_destroy_encoder;
+ return ret;
- dev_set_drvdata(dev, dpi);
+ drm_encoder_helper_add(&dpi->encoder.base, &vc4_dpi_encoder_helper_funcs);
- vc4->dpi = dpi;
+ ret = vc4_dpi_init_bridge(dpi);
+ if (ret)
+ return ret;
- vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
+ dev_set_drvdata(dev, dpi);
return 0;
-
-err_destroy_encoder:
- drm_encoder_cleanup(dpi->encoder);
- clk_disable_unprepare(dpi->core_clock);
- return ret;
-}
-
-static void vc4_dpi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_dpi *dpi = dev_get_drvdata(dev);
-
- drm_of_panel_bridge_remove(dev->of_node, 0, 0);
-
- drm_encoder_cleanup(dpi->encoder);
-
- clk_disable_unprepare(dpi->core_clock);
-
- vc4->dpi = NULL;
}
static const struct component_ops vc4_dpi_ops = {
.bind = vc4_dpi_bind,
- .unbind = vc4_dpi_unbind,
};
static int vc4_dpi_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 292d1b6a01b6..ffbbb454c9e8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,7 +33,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_vblank.h>
@@ -86,7 +85,7 @@ static int vc5_dumb_create(struct drm_file *file_priv,
if (ret)
return ret;
- return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, dev, args);
}
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
@@ -212,7 +211,7 @@ static const struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -235,7 +234,7 @@ static const struct drm_driver vc5_drm_driver = {
.debugfs_init = vc4_debugfs_init,
#endif
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
.fops = &vc4_drm_fops,
@@ -267,6 +266,13 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
+static void vc4_component_unbind_all(void *ptr)
+{
+ struct vc4_dev *vc4 = ptr;
+
+ component_unbind_all(vc4->dev, &vc4->base);
+}
+
static const struct of_device_id vc4_dma_range_matches[] = {
{ .compatible = "brcm,bcm2711-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
@@ -310,13 +316,16 @@ static int vc4_drm_bind(struct device *dev)
if (IS_ERR(vc4))
return PTR_ERR(vc4);
vc4->is_vc5 = is_vc5;
+ vc4->dev = dev;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
- mutex_init(&vc4->bin_bo_lock);
+ ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
+ if (ret)
+ return ret;
ret = vc4_bo_cache_init(drm);
if (ret)
@@ -360,6 +369,10 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, vc4_component_unbind_all, vc4);
+ if (ret)
+ return ret;
+
ret = vc4_plane_create_additional_planes(drm);
if (ret)
goto unbind_all;
@@ -380,8 +393,6 @@ static int vc4_drm_bind(struct device *dev)
return 0;
unbind_all:
- component_unbind_all(dev, drm);
-
return ret;
}
@@ -389,8 +400,7 @@ static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- drm_dev_unregister(drm);
-
+ drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 1beb96b77b8c..418a8242691f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,7 +14,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -76,6 +76,7 @@ struct vc4_perfmon {
struct vc4_dev {
struct drm_device base;
+ struct device *dev;
bool is_vc5;
@@ -83,9 +84,6 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
- struct vc4_dpi *dpi;
- struct vc4_vec *vec;
- struct vc4_txp *txp;
struct vc4_hang_state *hang_state;
@@ -241,7 +239,7 @@ to_vc4_dev(struct drm_device *dev)
}
struct vc4_bo {
- struct drm_gem_cma_object base;
+ struct drm_gem_dma_object base;
/* seqno of the last job to render using this BO. */
uint64_t seqno;
@@ -290,7 +288,7 @@ struct vc4_bo {
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
+ return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
@@ -458,6 +456,8 @@ to_vc4_encoder(struct drm_encoder *encoder)
}
struct vc4_crtc_data {
+ const char *debugfs_name;
+
/* Bitmask of channels (FIFOs) of the HVS that the output can source from */
unsigned int hvs_available_channels;
@@ -475,8 +475,6 @@ struct vc4_pv_data {
u8 pixels_per_clock;
enum vc4_encoder_type encoder_types[4];
- const char *debugfs_name;
-
};
struct vc4_crtc {
@@ -604,14 +602,14 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_cma_object **bo;
+ struct drm_gem_dma_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
* the binner temporary storage, this is all the BOs written
* by the job.
*/
- struct drm_gem_cma_object *rcl_write_bo[4];
+ struct drm_gem_dma_object *rcl_write_bo[4];
uint32_t rcl_write_bo_count;
/* Pointers for our position in vc4->job_list */
@@ -630,7 +628,7 @@ struct vc4_exec_info {
/* This is the BO where we store the validated command lists, shader
* records, and uniforms.
*/
- struct drm_gem_cma_object *exec_bo;
+ struct drm_gem_dma_object *exec_bo;
/**
* This tracks the per-shader-record state (packet 64) that
@@ -843,6 +841,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
+int vc4_bo_debugfs_init(struct drm_minor *minor);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
@@ -850,7 +849,6 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs);
-void vc4_crtc_destroy(struct drm_crtc *crtc);
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -861,6 +859,8 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void vc4_crtc_reset(struct drm_crtc *crtc);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_send_vblank(struct drm_crtc *crtc);
+int vc4_crtc_late_register(struct drm_crtc *crtc);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);
@@ -868,25 +868,27 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
-void vc4_debugfs_add_file(struct drm_device *drm,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data);
-void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *filename,
- struct debugfs_regset32 *regset);
+int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data);
+int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *filename,
+ struct debugfs_regset32 *regset);
#else
-static inline void vc4_debugfs_add_file(struct drm_device *drm,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data)
+static inline int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data)
{
+ return 0;
}
-static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *filename,
- struct debugfs_regset32 *regset)
+static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *filename,
+ struct debugfs_regset32 *regset)
{
+ return 0;
}
#endif
@@ -952,13 +954,15 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
void vc4_hvs_dump_state(struct vc4_hvs *hvs);
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
+int vc4_hvs_debugfs_init(struct drm_minor *minor);
/* vc4_kms.c */
int vc4_kms_load(struct drm_device *dev);
/* vc4_plane.c */
struct drm_plane *vc4_plane_init(struct drm_device *dev,
- enum drm_plane_type type);
+ enum drm_plane_type type,
+ uint32_t possible_crtcs);
int vc4_plane_create_additional_planes(struct drm_device *dev);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
@@ -973,6 +977,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
int vc4_v3d_pm_get(struct vc4_dev *vc4);
void vc4_v3d_pm_put(struct vc4_dev *vc4);
+int vc4_v3d_debugfs_init(struct drm_minor *minor);
/* vc4_validate.c */
int
@@ -984,19 +989,19 @@ vc4_validate_bin_cl(struct drm_device *dev,
int
vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
-struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
+struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
uint32_t hindex);
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
bool vc4_check_tex_size(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *fbo,
+ struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp);
/* vc4_validate_shader.c */
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
/* vc4_perfmon.c */
void vc4_perfmon_get(struct vc4_perfmon *perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index b7b2c76770dc..878e05d79e81 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -549,10 +549,13 @@ struct vc4_dsi_variant {
/* General DSI hardware state. */
struct vc4_dsi {
+ struct vc4_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+
+ struct kref kref;
+
struct platform_device *pdev;
- struct mipi_dsi_host dsi_host;
- struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct list_head bridge_chain;
@@ -600,6 +603,12 @@ struct vc4_dsi {
#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
+static inline struct vc4_dsi *
+to_vc4_dsi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_dsi, encoder.base);
+}
+
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
@@ -644,18 +653,6 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
-/* VC4 DSI encoder KMS struct */
-struct vc4_dsi_encoder {
- struct vc4_encoder base;
- struct vc4_dsi *dsi;
-};
-
-static inline struct vc4_dsi_encoder *
-to_vc4_dsi_encoder(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct vc4_dsi_encoder, base.base);
-}
-
static const struct debugfs_reg32 dsi0_regs[] = {
VC4_REG32(DSI0_CTRL),
VC4_REG32(DSI0_STAT),
@@ -795,8 +792,7 @@ dsi_esc_timing(u32 ns)
static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
{
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct device *dev = &dsi->pdev->dev;
struct drm_bridge *iter;
@@ -839,8 +835,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock);
unsigned long parent_rate = clk_get_rate(phy_parent);
unsigned long pixel_clock_hz = mode->clock * 1000;
@@ -875,8 +870,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct device *dev = &dsi->pdev->dev;
bool debug_dump_regs = false;
struct drm_bridge *iter;
@@ -1378,6 +1372,24 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static int vc4_dsi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, dsi->variant->debugfs_name,
+ &dsi->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
+ .late_register = vc4_dsi_late_register,
+};
+
static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
.port = 1,
.debugfs_name = "dsi1_regs",
@@ -1564,26 +1576,50 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
+static void vc4_dsi_release(struct kref *kref)
+{
+ struct vc4_dsi *dsi =
+ container_of(kref, struct vc4_dsi, kref);
+
+ kfree(dsi);
+}
+
+static void vc4_dsi_get(struct vc4_dsi *dsi)
+{
+ kref_get(&dsi->kref);
+}
+
+static void vc4_dsi_put(struct vc4_dsi *dsi)
+{
+ kref_put(&dsi->kref, &vc4_dsi_release);
+}
+
+static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
+{
+ struct vc4_dsi *dsi = ptr;
+
+ vc4_dsi_put(dsi);
+}
+
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- struct vc4_dsi_encoder *vc4_dsi_encoder;
+ struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- dsi->variant = of_device_get_match_data(dev);
+ vc4_dsi_get(dsi);
- vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
- GFP_KERNEL);
- if (!vc4_dsi_encoder)
- return -ENOMEM;
+ ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
+ if (ret)
+ return ret;
+
+ dsi->variant = of_device_get_match_data(dev);
INIT_LIST_HEAD(&dsi->bridge_chain);
- vc4_dsi_encoder->base.type = dsi->variant->port ?
- VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
- vc4_dsi_encoder->dsi = dsi;
- dsi->encoder = &vc4_dsi_encoder->base.base;
+ dsi->encoder.type = dsi->variant->port ?
+ VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
dsi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dsi->regs))
@@ -1687,7 +1723,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ dsi->bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
@@ -1702,10 +1738,20 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
- drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
+ ret = drmm_encoder_init(drm, encoder,
+ &vc4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI,
+ NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, dsi->bridge, NULL, 0);
if (ret)
return ret;
/* Disable the atomic helper calls into the bridge. We
@@ -1713,11 +1759,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
* from our driver, since we need to sequence them within the
* encoder's enable/disable paths.
*/
- list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
-
- vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
-
- pm_runtime_enable(dev);
+ list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
return 0;
}
@@ -1726,15 +1768,13 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
-
- pm_runtime_disable(dev);
+ struct drm_encoder *encoder = &dsi->encoder.base;
/*
* Restore the bridge_chain so the bridge detach procedure can happen
* normally.
*/
- list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
- drm_encoder_cleanup(dsi->encoder);
+ list_splice_init(&dsi->bridge_chain, &encoder->bridge_chain);
}
static const struct component_ops vc4_dsi_ops = {
@@ -1747,11 +1787,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dev_set_drvdata(dev, dsi);
+ kref_init(&dsi->kref);
dsi->pdev = pdev;
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
@@ -1766,6 +1807,8 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
+ vc4_dsi_put(dsi);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index fe10d9c3fff8..628d40ff3aa1 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -126,7 +126,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
goto err_delete_handle;
}
bo_state[i].handle = handle;
- bo_state[i].paddr = vc4_bo->base.paddr;
+ bo_state[i].paddr = vc4_bo->base.dma_addr;
bo_state[i].size = vc4_bo->base.base.size;
}
@@ -764,7 +764,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
@@ -797,7 +797,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_cma_object *)bo;
+ exec->bo[i] = (struct drm_gem_dma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
@@ -917,16 +917,16 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
&exec->unref_list);
- exec->ct0ca = exec->exec_bo->paddr + bin_offset;
+ exec->ct0ca = exec->exec_bo->dma_addr + bin_offset;
exec->bin_u = bin;
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
- exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
+ exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset;
exec->shader_rec_size = args->shader_rec_size;
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
- exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
+ exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset;
exec->uniforms_size = args->uniforms_size;
ret = vc4_validate_bin_cl(dev,
@@ -1308,6 +1308,7 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused);
int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
@@ -1325,10 +1326,15 @@ int vc4_gem_init(struct drm_device *dev)
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
- mutex_init(&vc4->power_lock);
+ ret = drmm_mutex_init(dev, &vc4->power_lock);
+ if (ret)
+ return ret;
INIT_LIST_HEAD(&vc4->purgeable.list);
- mutex_init(&vc4->purgeable.lock);
+
+ ret = drmm_mutex_init(dev, &vc4->purgeable.lock);
+ if (ret)
+ return ret;
return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 592c3b5d03e6..64f9feabf43e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
@@ -41,7 +42,6 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/rational.h>
@@ -124,6 +124,23 @@ static unsigned long long
vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, enum vc4_hdmi_output_format fmt);
+static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_info *display = &vc4_hdmi->connector.display_info;
+
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
+ if (!display->is_hdmi)
+ return false;
+
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ return true;
+}
+
static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
@@ -146,7 +163,12 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct vc4_hdmi *vc4_hdmi = node->info_ent->data;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
drm_print_regset32(&p, &vc4_hdmi->hd_regset);
@@ -157,12 +179,23 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
drm_print_regset32(&p, &vc4_hdmi->ram_regset);
drm_print_regset32(&p, &vc4_hdmi->rm_regset);
+ drm_dev_exit(idx);
+
return 0;
}
static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
+
+ /*
+ * We can be called by our bind callback, when the
+ * connector->dev pointer might not be initialised yet.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -179,11 +212,23 @@ static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
+
+ /*
+ * We can be called by our bind callback, when the
+ * connector->dev pointer might not be initialised yet.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
reset_control_reset(vc4_hdmi->reset);
@@ -195,15 +240,31 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
{
- unsigned long cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ unsigned long cec_rate;
unsigned long flags;
u16 clk_cnt;
u32 value;
+ int idx;
+
+ /*
+ * This function is called by our runtime_resume implementation
+ * and thus at bind time, when we haven't registered our
+ * connector yet and thus don't have a pointer to the DRM
+ * device.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
+
+ cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -219,58 +280,180 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
#else
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
-static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder);
+static int reset_pipe(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
-static enum drm_connector_status
-vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+static int vc4_hdmi_reset_link(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *drm = connector->dev;
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
- bool connected = false;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ bool scrambling_needed;
+ u8 config;
+ int ret;
- mutex_lock(&vc4_hdmi->mutex);
+ if (!connector)
+ return 0;
+
+ ret = drm_modeset_lock(&drm->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->state;
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = crtc->state;
+ if (!crtc_state->active)
+ return 0;
+
+ if (!vc4_hdmi_supports_scrambling(encoder))
+ return 0;
+
+ scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode,
+ vc4_hdmi->output_bpc,
+ vc4_hdmi->output_format);
+ if (!scrambling_needed)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ drm_err(drm, "Failed to read TMDS config: %d\n", ret);
+ return 0;
+ }
+
+ if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed)
+ return 0;
+
+ /*
+ * HDMI 2.0 says that one should not send scrambled data
+ * prior to configuring the sink scrambling, and that
+ * TMDS clock/data transmission should be suspended when
+ * changing the TMDS clock rate in the sink. So let's
+ * just do a full modeset here, even though some sinks
+ * would be perfectly happy if were to just reconfigure
+ * the SCDC settings on the fly.
+ */
+ return reset_pipe(crtc, ctx);
+}
+
+static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_connector_status status)
+{
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct edid *edid;
+
+ /*
+ * NOTE: This function should really be called with
+ * vc4_hdmi->mutex held, but doing so results in reentrancy
+ * issues since cec_s_phys_addr_from_edid might call
+ * .adap_enable, which leads to that funtion being called with
+ * our mutex held.
+ *
+ * A similar situation occurs with
+ * drm_atomic_helper_connector_hdmi_reset_link() that will call
+ * into our KMS hooks if the scrambling was enabled.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
+
+ if (status == connector_status_disconnected) {
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ return;
+ }
+
+ edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ if (!edid)
+ return;
+
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ kfree(edid);
+
+ vc4_hdmi_reset_link(connector, ctx);
+}
+
+static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+ * doing so results in reentrancy issues since
+ * vc4_hdmi_handle_hotplug() can call into other functions that
+ * would take the mutex while it's held here.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio) {
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
- connected = true;
+ status = connector_status_connected;
} else {
if (vc4_hdmi->variant->hp_detect &&
vc4_hdmi->variant->hp_detect(vc4_hdmi))
- connected = true;
- }
-
- if (connected) {
- if (connector->status != connector_status_connected) {
- struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
-
- if (edid) {
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- kfree(edid);
- }
- }
-
- vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
- mutex_unlock(&vc4_hdmi->mutex);
- return connector_status_connected;
+ status = connector_status_connected;
}
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ vc4_hdmi_handle_hotplug(vc4_hdmi, ctx, status);
pm_runtime_put(&vc4_hdmi->pdev->dev);
- mutex_unlock(&vc4_hdmi->mutex);
- return connector_status_disconnected;
-}
-static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
+ return status;
}
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -279,14 +462,21 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
int ret = 0;
struct edid *edid;
- mutex_lock(&vc4_hdmi->mutex);
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+ * doing so results in reentrancy issues since
+ * cec_s_phys_addr_from_edid might call .adap_enable, which
+ * leads to that funtion being called with our mutex held.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- if (!edid) {
- ret = -ENODEV;
- goto out;
- }
+ if (!edid)
+ return -ENODEV;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -294,7 +484,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
if (vc4_hdmi->disable_4kp60) {
struct drm_device *drm = connector->dev;
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (vc4_hdmi_mode_needs_scrambling(mode, 8, VC4_HDMI_OUTPUT_RGB)) {
@@ -304,9 +494,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
}
}
-out:
- mutex_unlock(&vc4_hdmi->mutex);
-
return ret;
}
@@ -378,15 +565,14 @@ vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
- .detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
+ .detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
};
@@ -398,10 +584,13 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
int ret;
- drm_connector_init_with_ddc(dev, connector,
- &vc4_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- vc4_hdmi->ddc);
+ ret = drmm_connector_init(dev, connector,
+ &vc4_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ vc4_hdmi->ddc);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -444,25 +633,34 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = type - 0x80;
unsigned long flags;
+ int ret = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
- if (!poll)
- return 0;
+ if (poll) {
+ ret = wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
+ BIT(packet_id)), 100);
+ }
- return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
- BIT(packet_id)), 100);
+ drm_dev_exit(idx);
+ return ret;
}
static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = frame->any.type - 0x80;
const struct vc4_hdmi_register *ram_packet_start =
&vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START];
@@ -475,6 +673,10 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
unsigned long flags;
ssize_t len, i;
int ret;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE),
@@ -482,12 +684,12 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
if (len < 0)
- return;
+ goto out;
ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
- return;
+ goto out;
}
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -523,6 +725,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
BIT(packet_id)), 100);
if (ret)
DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+
+out:
+ drm_dev_exit(idx);
}
static void vc4_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
@@ -649,35 +854,19 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_hdr_infoframe(encoder);
}
-static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_info *display = &vc4_hdmi->connector.display_info;
-
- lockdep_assert_held(&vc4_hdmi->mutex);
-
- if (!display->is_hdmi)
- return false;
-
- if (!display->hdmi.scdc.supported ||
- !display->hdmi.scdc.scrambling.supported)
- return false;
-
- return true;
-}
-
#define SCRAMBLING_POLLING_DELAY_MS 1000
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long flags;
+ int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
- if (!vc4_hdmi_supports_scrambling(encoder, mode))
+ if (!vc4_hdmi_supports_scrambling(encoder))
return;
if (!vc4_hdmi_mode_needs_scrambling(mode,
@@ -685,6 +874,9 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
vc4_hdmi->output_format))
return;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
@@ -693,6 +885,8 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
VC5_HDMI_SCRAMBLER_CTL_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
vc4_hdmi->scdc_enabled = true;
queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
@@ -702,7 +896,9 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
@@ -714,6 +910,9 @@ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
@@ -721,6 +920,8 @@ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
drm_scdc_set_scrambling(vc4_hdmi->ddc, false);
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_scrambling_wq(struct work_struct *work)
@@ -743,12 +944,17 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
vc4_hdmi->packet_ram_enabled = false;
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
@@ -766,6 +972,9 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
vc4_hdmi_disable_scrambling(encoder);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -773,11 +982,16 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
@@ -793,6 +1007,9 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
if (ret < 0)
DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -800,8 +1017,13 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 csc_ctl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -836,6 +1058,8 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
/*
@@ -920,6 +1144,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
unsigned long flags;
@@ -928,6 +1153,10 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
u32 csc_chan_ctl = 0;
u32 csc_ctl = VC5_MT_CP_CSC_CTL_ENABLE | VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
VC5_MT_CP_CSC_CTL_MODE);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -970,12 +1199,15 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -995,6 +1227,10 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
u32 reg;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1027,12 +1263,15 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_MISC_CONTROL, reg);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
const struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1056,6 +1295,10 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
unsigned char gcp;
bool gcp_en;
u32 reg;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1132,13 +1375,20 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 drift;
int ret;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1167,25 +1417,32 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long tmds_char_rate = vc4_conn_state->tmds_char_rate;
unsigned long bvb_rate, hsm_rate;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
@@ -1206,13 +1463,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- goto out;
+ goto err_dev_exit;
}
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
- goto out;
+ goto err_dev_exit;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
@@ -1264,6 +1521,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
if (vc4_hdmi->variant->set_timings)
vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
+ drm_dev_exit(idx);
+
mutex_unlock(&vc4_hdmi->mutex);
return;
@@ -1272,6 +1531,8 @@ err_disable_pixel_clock:
clk_disable_unprepare(vc4_hdmi->pixel_clock);
err_put_runtime_pm:
pm_runtime_put(&vc4_hdmi->pdev->dev);
+err_dev_exit:
+ drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
return;
@@ -1281,14 +1542,19 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
if (vc4_hdmi->variant->csc_setup)
vc4_hdmi->variant->csc_setup(vc4_hdmi, conn_state, mode);
@@ -1296,6 +1562,9 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1303,15 +1572,20 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
@@ -1370,6 +1644,9 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
vc4_hdmi_recenter_fifo(vc4_hdmi);
vc4_hdmi_enable_scrambling(encoder);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1692,6 +1969,26 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.mode_valid = vc4_hdmi_encoder_mode_valid,
};
+static int vc4_hdmi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ int ret;
+
+ ret = vc4_debugfs_add_file(drm->primary, variant->debugfs_name,
+ vc4_hdmi_debugfs_regs,
+ vc4_hdmi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
+ .late_register = vc4_hdmi_late_register,
+};
+
static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
{
int i;
@@ -1718,13 +2015,20 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 hotplug;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return false;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
hotplug = HDMI_READ(HDMI_HOTPLUG);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
}
@@ -1732,10 +2036,16 @@ static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
{
- u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ u32 hsm_clock;
unsigned long flags;
unsigned long n, m;
+ int idx;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
rational_best_approximation(hsm_clock, samplerate,
VC4_HD_MAI_SMP_N_MASK >>
VC4_HD_MAI_SMP_N_SHIFT,
@@ -1748,6 +2058,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
@@ -1803,13 +2115,21 @@ static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int ret = 0;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- mutex_unlock(&vc4_hdmi->mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_dev_exit;
}
vc4_hdmi->audio.streaming = true;
@@ -1826,9 +2146,12 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data)
if (vc4_hdmi->variant->phy_rng_enable)
vc4_hdmi->variant->phy_rng_enable(vc4_hdmi);
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
mutex_unlock(&vc4_hdmi->mutex);
- return 0;
+ return ret;
}
static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
@@ -1857,10 +2180,15 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
@@ -1876,6 +2204,9 @@ static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
vc4_hdmi->audio.streaming = false;
vc4_hdmi_audio_reset(vc4_hdmi);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1923,6 +2254,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
struct hdmi_codec_params *params)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
@@ -1931,15 +2263,22 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
u32 channel_map;
u32 mai_audio_format;
u32 mai_sample_rate;
+ int ret = 0;
+ int idx;
dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- mutex_unlock(&vc4_hdmi->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_dev_exit;
}
vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate);
@@ -1996,9 +2335,12 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
memcpy(&vc4_hdmi->audio.infoframe, &params->cea, sizeof(params->cea));
vc4_hdmi_set_audio_infoframe(encoder);
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
mutex_unlock(&vc4_hdmi->mutex);
- return 0;
+ return ret;
}
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
@@ -2061,6 +2403,14 @@ static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.i2s = 1,
};
+static void vc4_hdmi_audio_codec_release(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+ vc4_hdmi->audio.codec_pdev = NULL;
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2073,6 +2423,26 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
int index, len;
int ret;
+ /*
+ * ASoC makes it a bit hard to retrieve a pointer to the
+ * vc4_hdmi structure. Registering the card will overwrite our
+ * device drvdata with a pointer to the snd_soc_card structure,
+ * which can then be used to retrieve whatever drvdata we want
+ * to associate.
+ *
+ * However, that doesn't fly in the case where we wouldn't
+ * register an ASoC card (because of an old DT that is missing
+ * the dmas properties for example), then the card isn't
+ * registered and the device drvdata wouldn't be set.
+ *
+ * We can deal with both cases by making sure a snd_soc_card
+ * pointer and a vc4_hdmi structure are pointing to the same
+ * memory address, so we can treat them indistinctly without any
+ * issue.
+ */
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
+
if (!of_find_property(dev->of_node, "dmas", &len) || !len) {
dev_warn(dev,
"'dmas' DT property is missing or empty, no HDMI audio\n");
@@ -2102,6 +2472,30 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
vc4_hdmi->audio.dma_data.maxburst = 2;
+ /*
+ * NOTE: Strictly speaking, we should probably use a DRM-managed
+ * registration there to avoid removing all the audio components
+ * by the time the driver doesn't have any user anymore.
+ *
+ * However, the ASoC core uses a number of devm_kzalloc calls
+ * when registering, even when using non-device-managed
+ * functions (such as in snd_soc_register_component()).
+ *
+ * If we call snd_soc_unregister_component() in a DRM-managed
+ * action, the device-managed actions have already been executed
+ * and thus we would access memory that has been freed.
+ *
+ * Using device-managed hooks here probably leaves us open to a
+ * bunch of issues if userspace still has a handle on the ALSA
+ * device when the device is removed. However, this is mitigated
+ * by the use of drm_dev_enter()/drm_dev_exit() in the audio
+ * path to prevent the access to the device resources if it
+ * isn't there anymore.
+ *
+ * Then, the vc4_hdmi structure is DRM-managed and thus only
+ * freed whenever the last user has closed the DRM device file.
+ * It should thus outlive ALSA in most situations.
+ */
ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0);
if (ret) {
dev_err(dev, "Could not register PCM component: %d\n", ret);
@@ -2125,6 +2519,10 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
vc4_hdmi->audio.codec_pdev = codec_pdev;
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2163,12 +2561,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
-static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
-{
- platform_device_unregister(vc4_hdmi->audio.codec_pdev);
- vc4_hdmi->audio.codec_pdev = NULL;
-}
-
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -2191,21 +2583,19 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
unsigned int hpd_con = platform_get_irq_byname(pdev, "hpd-connected");
unsigned int hpd_rm = platform_get_irq_byname(pdev, "hpd-removed");
- ret = request_threaded_irq(hpd_con,
- NULL,
- vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
- "vc4 hdmi hpd connected", vc4_hdmi);
+ ret = devm_request_threaded_irq(&pdev->dev, hpd_con,
+ NULL,
+ vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
+ "vc4 hdmi hpd connected", vc4_hdmi);
if (ret)
return ret;
- ret = request_threaded_irq(hpd_rm,
- NULL,
- vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
- "vc4 hdmi hpd disconnected", vc4_hdmi);
- if (ret) {
- free_irq(hpd_con, vc4_hdmi);
+ ret = devm_request_threaded_irq(&pdev->dev, hpd_rm,
+ NULL,
+ vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
+ "vc4 hdmi hpd disconnected", vc4_hdmi);
+ if (ret)
return ret;
- }
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -2213,16 +2603,6 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static void vc4_hdmi_hotplug_exit(struct vc4_hdmi *vc4_hdmi)
-{
- struct platform_device *pdev = vc4_hdmi->pdev;
-
- if (vc4_hdmi->variant->external_irq_controller) {
- free_irq(platform_get_irq_byname(pdev, "hpd-connected"), vc4_hdmi);
- free_irq(platform_get_irq_byname(pdev, "hpd-removed"), vc4_hdmi);
- }
-}
-
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
{
@@ -2296,6 +2676,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi)
{
u32 cntrl1;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
lockdep_assert_held(&vc4_hdmi->hw_lock);
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
@@ -2324,6 +2715,17 @@ static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi)
lockdep_assert_held(&vc4_hdmi->hw_lock);
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
vc4_hdmi->cec_rx_msg.len = 0;
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_cec_read_msg(vc4_hdmi, cntrl1);
@@ -2355,6 +2757,17 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
irqreturn_t ret;
u32 cntrl5;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
@@ -2375,26 +2788,29 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
unsigned long flags;
u32 val;
int ret;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
- if (ret)
+ if (ret) {
+ drm_dev_exit(idx);
return ret;
+ }
+
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -2430,24 +2846,28 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+ drm_dev_exit(idx);
+
return 0;
}
static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
+
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -2459,8 +2879,12 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+
pm_runtime_put(&vc4_hdmi->pdev->dev);
+ drm_dev_exit(idx);
+
return 0;
}
@@ -2475,24 +2899,27 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CNTRL_1,
(HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
(log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -2505,23 +2932,19 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned long flags;
u32 val;
unsigned int i;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
if (msg->len > 16) {
drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
+ drm_dev_exit(idx);
return -ENOMEM;
}
+ mutex_lock(&vc4_hdmi->mutex);
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
for (i = 0; i < msg->len; i += 4)
@@ -2541,6 +2964,8 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+ drm_dev_exit(idx);
return 0;
}
@@ -2551,6 +2976,14 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
.adap_transmit = vc4_hdmi_cec_adap_transmit,
};
+static void vc4_hdmi_cec_release(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ cec_unregister_adapter(vc4_hdmi->cec_adap);
+ vc4_hdmi->cec_adap = NULL;
+}
+
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
struct cec_connector_info conn_info;
@@ -2575,73 +3008,82 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
if (vc4_hdmi->variant->external_irq_controller) {
- ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
- vc4_cec_irq_handler_rx_bare,
- vc4_cec_irq_handler_rx_thread, 0,
- "vc4 hdmi cec rx", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
- ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
- vc4_cec_irq_handler_tx_bare,
- vc4_cec_irq_handler_tx_thread, 0,
- "vc4 hdmi cec tx", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_remove_cec_rx_handler;
+ goto err_delete_cec_adap;
} else {
- ret = request_threaded_irq(platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
}
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
- goto err_remove_handlers;
-
- return 0;
+ goto err_delete_cec_adap;
-err_remove_handlers:
- if (vc4_hdmi->variant->external_irq_controller)
- free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
- else
- free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+ /*
+ * NOTE: Strictly speaking, we should probably use a DRM-managed
+ * registration there to avoid removing the CEC adapter by the
+ * time the DRM driver doesn't have any user anymore.
+ *
+ * However, the CEC framework already cleans up the CEC adapter
+ * only when the last user has closed its file descriptor, so we
+ * don't need to handle it in DRM.
+ *
+ * By the time the device-managed hook is executed, we will give
+ * up our reference to the CEC adapter and therefore don't
+ * really care when it's actually freed.
+ *
+ * There's still a problematic sequence: if we unregister our
+ * CEC adapter, but the userspace keeps a handle on the CEC
+ * adapter but not the DRM device for some reason. In such a
+ * case, our vc4_hdmi structure will be freed, but the
+ * cec_adapter structure will have a dangling pointer to what
+ * used to be our HDMI controller. If we get a CEC call at that
+ * moment, we could end up with a use-after-free. Fortunately,
+ * the CEC framework already handles this too, by calling
+ * cec_is_registered() in cec_ioctl() and cec_poll().
+ */
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
+ if (ret)
+ return ret;
-err_remove_cec_rx_handler:
- if (vc4_hdmi->variant->external_irq_controller)
- free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+ return 0;
err_delete_cec_adap:
cec_delete_adapter(vc4_hdmi->cec_adap);
return ret;
}
-
-static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
-{
- struct platform_device *pdev = vc4_hdmi->pdev;
-
- if (vc4_hdmi->variant->external_irq_controller) {
- free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
- free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
- } else {
- free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
- }
-
- cec_unregister_adapter(vc4_hdmi->cec_adap);
-}
#else
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
-
-static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {};
#endif
-static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
+static void vc4_hdmi_free_regset(struct drm_device *drm, void *ptr)
+{
+ struct debugfs_reg32 *regs = ptr;
+
+ kfree(regs);
+}
+
+static int vc4_hdmi_build_regset(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi,
struct debugfs_regset32 *regset,
enum vc4_hdmi_regs reg)
{
@@ -2649,6 +3091,7 @@ static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
struct debugfs_reg32 *regs, *new_regs;
unsigned int count = 0;
unsigned int i;
+ int ret;
regs = kcalloc(variant->num_registers, sizeof(*regs),
GFP_KERNEL);
@@ -2674,10 +3117,15 @@ static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
regset->regs = new_regs;
regset->nregs = count;
+ ret = drmm_add_action_or_reset(drm, vc4_hdmi_free_regset, new_regs);
+ if (ret)
+ return ret;
+
return 0;
}
-static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_init_resources(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
@@ -2691,11 +3139,11 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
if (IS_ERR(vc4_hdmi->hd_regs))
return PTR_ERR(vc4_hdmi->hd_regs);
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
@@ -2718,7 +3166,8 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+static int vc5_hdmi_init_resources(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
@@ -2820,42 +3269,42 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->reset);
}
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
if (ret)
return ret;
return 0;
}
-static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
@@ -2898,6 +3347,13 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
return 0;
}
+static void vc4_hdmi_put_ddc_device(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ put_device(&vc4_hdmi->ddc->dev);
+}
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2908,10 +3364,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
struct device_node *ddc_node;
int ret;
- vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
+ vc4_hdmi = drmm_kzalloc(drm, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
- mutex_init(&vc4_hdmi->mutex);
+
+ ret = drmm_mutex_init(drm, &vc4_hdmi->mutex);
+ if (ret)
+ return ret;
+
spin_lock_init(&vc4_hdmi->hw_lock);
INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
@@ -2935,7 +3395,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
vc4_hdmi->scdc_enabled = true;
- ret = variant->init_resources(vc4_hdmi);
+ ret = variant->init_resources(drm, vc4_hdmi);
if (ret)
return ret;
@@ -2952,13 +3412,16 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return -EPROBE_DEFER;
}
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_put_ddc_device, vc4_hdmi);
+ if (ret)
+ return ret;
+
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
if (IS_ERR(vc4_hdmi->hpd_gpio)) {
- ret = PTR_ERR(vc4_hdmi->hpd_gpio);
- goto err_put_ddc;
+ return PTR_ERR(vc4_hdmi->hpd_gpio);
}
vc4_hdmi->disable_wifi_frequencies =
@@ -2972,17 +3435,17 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
/*
- * We need to have the device powered up at this point to call
- * our reset hook and for the CEC init.
+ * We need to have the device powered up at this point to call
+ * our reset hook and for the CEC init.
*/
- ret = vc4_hdmi_runtime_resume(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_put_ddc;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -2992,92 +3455,43 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
}
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+ ret = drmm_encoder_init(drm, encoder,
+ &vc4_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+ if (ret)
+ goto err_put_runtime_pm;
+
drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
ret = vc4_hdmi_connector_init(drm, vc4_hdmi);
if (ret)
- goto err_destroy_encoder;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_hotplug_init(vc4_hdmi);
if (ret)
- goto err_destroy_conn;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_cec_init(vc4_hdmi);
if (ret)
- goto err_free_hotplug;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_audio_init(vc4_hdmi);
if (ret)
- goto err_free_cec;
-
- vc4_debugfs_add_file(drm, variant->debugfs_name,
- vc4_hdmi_debugfs_regs,
- vc4_hdmi);
+ goto err_put_runtime_pm;
pm_runtime_put_sync(dev);
return 0;
-err_free_cec:
- vc4_hdmi_cec_exit(vc4_hdmi);
-err_free_hotplug:
- vc4_hdmi_hotplug_exit(vc4_hdmi);
-err_destroy_conn:
- vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
-err_destroy_encoder:
- drm_encoder_cleanup(encoder);
+err_put_runtime_pm:
pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
-err_put_ddc:
- put_device(&vc4_hdmi->ddc->dev);
return ret;
}
-static void vc4_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct vc4_hdmi *vc4_hdmi;
-
- /*
- * ASoC makes it a bit hard to retrieve a pointer to the
- * vc4_hdmi structure. Registering the card will overwrite our
- * device drvdata with a pointer to the snd_soc_card structure,
- * which can then be used to retrieve whatever drvdata we want
- * to associate.
- *
- * However, that doesn't fly in the case where we wouldn't
- * register an ASoC card (because of an old DT that is missing
- * the dmas properties for example), then the card isn't
- * registered and the device drvdata wouldn't be set.
- *
- * We can deal with both cases by making sure a snd_soc_card
- * pointer and a vc4_hdmi structure are pointing to the same
- * memory address, so we can treat them indistinctly without any
- * issue.
- */
- BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
- BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
- vc4_hdmi = dev_get_drvdata(dev);
-
- kfree(vc4_hdmi->hdmi_regset.regs);
- kfree(vc4_hdmi->hd_regset.regs);
-
- vc4_hdmi_audio_exit(vc4_hdmi);
- vc4_hdmi_cec_exit(vc4_hdmi);
- vc4_hdmi_hotplug_exit(vc4_hdmi);
- vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
- drm_encoder_cleanup(&vc4_hdmi->encoder.base);
-
- pm_runtime_disable(dev);
-
- put_device(&vc4_hdmi->ddc->dev);
-}
-
static const struct component_ops vc4_hdmi_ops = {
.bind = vc4_hdmi_bind,
- .unbind = vc4_hdmi_unbind,
};
static int vc4_hdmi_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index c3ed2b07df23..db823efb2563 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -58,7 +58,8 @@ struct vc4_hdmi_variant {
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
- int (*init_resources)(struct vc4_hdmi *vc4_hdmi);
+ int (*init_resources)(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi);
/* Callback to reset the HDMI block */
void (*reset)(struct vc4_hdmi *vc4_hdmi);
@@ -71,7 +72,7 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
@@ -194,15 +195,7 @@ struct vc4_hdmi {
/**
* @mutex: Mutex protecting the driver access across multiple
- * frameworks (KMS, ALSA).
- *
- * NOTE: While supported, CEC has been left out since
- * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a
- * reentrancy issue between .get_modes (or .detect) and .adap_enable.
- * Since we don't share any state between the CEC hooks and KMS', it's
- * not a big deal. The only trouble might come from updating the CEC
- * clock divider which might be affected by a modeset, but CEC should
- * be resilient to that.
+ * frameworks (KMS, ALSA, CEC).
*/
struct mutex mutex;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index fbaa741dda5f..4ac9f5a2d5f9 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_vblank.h>
#include "vc4_drv.h"
@@ -66,8 +67,12 @@ static const struct debugfs_reg32 hvs_regs[] = {
void vc4_hvs_dump_state(struct vc4_hvs *hvs)
{
+ struct drm_device *drm = &hvs->vc4->base;
struct drm_printer p = drm_info_printer(&hvs->pdev->dev);
- int i;
+ int idx, i;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
drm_print_regset32(&p, &hvs->regset);
@@ -80,6 +85,8 @@ void vc4_hvs_dump_state(struct vc4_hvs *hvs)
readl((u32 __iomem *)hvs->dlist + i + 2),
readl((u32 __iomem *)hvs->dlist + i + 3));
}
+
+ drm_dev_exit(idx);
}
static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
@@ -175,6 +182,11 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
int ret, i;
u32 __iomem *dst_kernel;
+ /*
+ * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit()
+ * here since that function is only called from vc4_hvs_bind().
+ */
+
ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
if (ret) {
DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
@@ -199,10 +211,15 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
struct vc4_crtc *vc4_crtc)
{
+ struct drm_device *drm = &hvs->vc4->base;
struct drm_crtc *crtc = &vc4_crtc->base;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ int idx;
u32 i;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
/* The LUT memory is laid out with each HVS channel in order,
* each of which takes 256 writes for R, 256 for G, then 256
* for B.
@@ -217,6 +234,8 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
for (i = 0; i < crtc->gamma_size; i++)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
@@ -238,7 +257,12 @@ static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
{
+ struct drm_device *drm = &hvs->vc4->base;
u8 field = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return 0;
switch (fifo) {
case 0:
@@ -255,6 +279,7 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
break;
}
+ drm_dev_exit(idx);
return field;
}
@@ -267,6 +292,12 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
if (!vc4->is_vc5)
return output;
+ /*
+ * NOTE: We should probably use drm_dev_enter()/drm_dev_exit()
+ * here, but this function is only used during the DRM device
+ * initialization, so we should be fine.
+ */
+
switch (output) {
case 0:
return 0;
@@ -315,12 +346,17 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
struct drm_display_mode *mode, bool oneshot)
{
struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int chan = vc4_crtc_state->assigned_channel;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 dispbkgndx;
u32 dispctrl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
@@ -362,14 +398,22 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
*/
vc4_hvs_lut_load(hvs, vc4_crtc);
+ drm_dev_exit(idx);
+
return 0;
}
void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
{
- if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ struct drm_device *drm = &hvs->vc4->base;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
return;
+ if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ goto out;
+
HVS_WRITE(SCALER_DISPCTRLX(chan),
HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
HVS_WRITE(SCALER_DISPCTRLX(chan),
@@ -385,6 +429,9 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
(SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
SCALER_DISPSTATX_EMPTY);
+
+out:
+ drm_dev_exit(idx);
}
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -426,9 +473,15 @@ static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
vc4_state->mm.start);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
@@ -513,6 +566,12 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx)) {
+ vc4_crtc_send_vblank(crtc);
+ return;
+ }
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
@@ -583,26 +642,44 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(hvs);
}
+
+ drm_dev_exit(idx);
}
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
{
- u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+ struct drm_device *drm = &hvs->vc4->base;
+ u32 dispctrl;
+ int idx;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
+ drm_dev_exit(idx);
}
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
{
- u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+ struct drm_device *drm = &hvs->vc4->base;
+ u32 dispctrl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_report_underrun(struct drm_device *dev)
@@ -623,6 +700,17 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
u32 control;
u32 status;
+ /*
+ * NOTE: We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
status = HVS_READ(SCALER_DISPSTAT);
control = HVS_READ(SCALER_DISPCTRL);
@@ -645,6 +733,39 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
return irqret;
}
+int vc4_hvs_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ int ret;
+
+ if (!vc4->hvs)
+ return -ENODEV;
+
+ if (!vc4->is_vc5)
+ debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ &vc4->load_tracker_enabled);
+
+ ret = vc4_debugfs_add_file(minor, "hvs_dlists",
+ vc4_hvs_debugfs_dlist, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_file(minor, "hvs_underrun",
+ vc4_hvs_debugfs_underrun, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_regset32(minor, "hvs_regs",
+ &hvs->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -655,10 +776,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
u32 dispctrl;
u32 reg;
- hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
+ hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
return -ENOMEM;
-
hvs->vc4 = vc4;
hvs->pdev = pdev;
@@ -771,12 +891,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
- vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
- NULL);
- vc4_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist,
- NULL);
-
return 0;
}
@@ -786,11 +900,18 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_mm_node *node, *next;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
+ drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm)
+ drm_mm_remove_node(node);
+
drm_mm_takedown(&vc4->hvs->dlist_mm);
+
+ drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm)
+ drm_mm_remove_node(node);
drm_mm_takedown(&vc4->hvs->lbm_mm);
clk_disable_unprepare(hvs->core_clk);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 2eacfb6773d2..1e6db0121ccd 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -105,7 +105,7 @@ vc4_overflow_mem_work(struct work_struct *work)
}
vc4->bin_alloc_overflow = BIT(bin_bo_slot);
- V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size);
+ V3D_WRITE(V3D_BPOA, bo->base.dma_addr + bin_bo_slot * vc4->bin_alloc_size);
V3D_WRITE(V3D_BPOS, bo->base.base.size);
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
@@ -295,7 +295,7 @@ vc4_irq_disable(struct drm_device *dev)
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
/* Finish any interrupt handler still in flight. */
- disable_irq(vc4->irq);
+ synchronize_irq(vc4->irq);
cancel_work_sync(&vc4->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index b45dcdfd7306..4419e810103d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -18,7 +18,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 79a74184d732..c4ac2c946238 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -133,6 +133,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
+ mutex_destroy(&vc4file->perfmon.lock);
}
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index f27e87a23df7..8b92a45a3c89 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -19,11 +19,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "uapi/drm/vc4_drm.h"
@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -360,7 +360,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
return ret;
for (i = 0; i < num_planes; i++)
- vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
+ vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
/*
* We don't support subpixel source positioning for scaling,
@@ -1220,6 +1220,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
int i;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ goto out;
vc4_state->hw_dlist = dlist;
@@ -1227,6 +1231,9 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
for (i = 0; i < vc4_state->dlist_count; i++)
writel(vc4_state->dlist[i], &dlist[i]);
+ drm_dev_exit(idx);
+
+out:
return vc4_state->dlist_count;
}
@@ -1244,14 +1251,18 @@ u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
uint32_t addr;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
/* We're skipping the address adjustment for negative origin,
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
- addr = bo->paddr + fb->offsets[0];
+ addr = bo->dma_addr + fb->offsets[0];
/* Write the new address into the hardware immediately. The
* scanout will start from this address as soon as the FIFO
@@ -1264,6 +1275,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* also use our updated address.
*/
vc4_state->dlist[vc4_state->ptr0_offset] = addr;
+
+ drm_dev_exit(idx);
}
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
@@ -1272,6 +1285,10 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state, *new_vc4_state;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
swap(plane->state->fb, new_plane_state->fb);
plane->state->crtc_x = new_plane_state->crtc_x;
@@ -1334,6 +1351,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
&vc4_state->hw_dlist[vc4_state->pos2_offset]);
writel(vc4_state->dlist[vc4_state->ptr0_offset],
&vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+
+ drm_dev_exit(idx);
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
@@ -1388,7 +1407,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
if (!state->fb)
return 0;
- bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
drm_gem_plane_helper_prepare_fb(plane, state);
@@ -1406,7 +1425,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
if (plane->state->fb == state->fb || !state->fb)
return;
- bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
vc4_bo_dec_usecnt(bo);
}
@@ -1483,8 +1502,6 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .set_property = NULL,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
.atomic_destroy_state = vc4_plane_destroy_state,
@@ -1492,14 +1509,14 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
};
struct drm_plane *vc4_plane_init(struct drm_device *dev,
- enum drm_plane_type type)
+ enum drm_plane_type type,
+ uint32_t possible_crtcs)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0;
- int ret = 0;
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
@@ -1510,11 +1527,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_FORMAT_MOD_INVALID
};
- vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
- GFP_KERNEL);
- if (!vc4_plane)
- return ERR_PTR(-ENOMEM);
-
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm;
@@ -1522,13 +1534,14 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
}
}
+ vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
+ possible_crtcs,
+ &vc4_plane_funcs,
+ formats, num_formats,
+ modifiers, type, NULL);
+ if (IS_ERR(vc4_plane))
+ return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- ret = drm_universal_plane_init(dev, plane, 0,
- &vc4_plane_funcs,
- formats, num_formats,
- modifiers, type, NULL);
- if (ret)
- return ERR_PTR(ret);
if (vc4->is_vc5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
@@ -1575,13 +1588,11 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
*/
for (i = 0; i < 16; i++) {
struct drm_plane *plane =
- vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
+ vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
+ GENMASK(drm->mode_config.num_crtc - 1, 0));
if (IS_ERR(plane))
continue;
-
- plane->possible_crtcs =
- GENMASK(drm->mode_config.num_crtc - 1, 0);
}
drm_for_each_crtc(crtc, drm) {
@@ -1589,9 +1600,9 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* since we overlay planes on the CRTC in the order they were
* initialized.
*/
- cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+ cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
+ drm_crtc_mask(crtc));
if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index f6b7dc3df08c..1bda5010f15a 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -40,14 +40,14 @@
#include "vc4_packet.h"
struct vc4_rcl_setup {
- struct drm_gem_cma_object *color_read;
- struct drm_gem_cma_object *color_write;
- struct drm_gem_cma_object *zs_read;
- struct drm_gem_cma_object *zs_write;
- struct drm_gem_cma_object *msaa_color_write;
- struct drm_gem_cma_object *msaa_zs_write;
-
- struct drm_gem_cma_object *rcl;
+ struct drm_gem_dma_object *color_read;
+ struct drm_gem_dma_object *color_write;
+ struct drm_gem_dma_object *zs_read;
+ struct drm_gem_dma_object *zs_write;
+ struct drm_gem_dma_object *msaa_color_write;
+ struct drm_gem_dma_object *msaa_zs_write;
+
+ struct drm_gem_dma_object *rcl;
u32 next_offset;
u32 next_write_bo_index;
@@ -97,11 +97,11 @@ static void vc4_store_before_load(struct vc4_rcl_setup *setup)
* coordinates packet, and instead just store to the address given.
*/
static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *bo,
+ struct drm_gem_dma_object *bo,
struct drm_vc4_submit_rcl_surface *surf,
uint8_t x, uint8_t y)
{
- return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
+ return bo->dma_addr + surf->offset + VC4_TILE_BUFFER_SIZE *
(DIV_ROUND_UP(exec->args->width, 32) * y + x);
}
@@ -142,7 +142,7 @@ static void emit_tile(struct vc4_exec_info *exec,
} else {
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->color_read.bits);
- rcl_u32(setup, setup->color_read->paddr +
+ rcl_u32(setup, setup->color_read->dma_addr +
args->color_read.offset);
}
}
@@ -164,7 +164,7 @@ static void emit_tile(struct vc4_exec_info *exec,
} else {
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->zs_read.bits);
- rcl_u32(setup, setup->zs_read->paddr +
+ rcl_u32(setup, setup->zs_read->dma_addr +
args->zs_read.offset);
}
}
@@ -232,7 +232,7 @@ static void emit_tile(struct vc4_exec_info *exec,
(last_tile_write ?
0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
rcl_u32(setup,
- (setup->zs_write->paddr + args->zs_write.offset) |
+ (setup->zs_write->dma_addr + args->zs_write.offset) |
((last && last_tile_write) ?
VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
}
@@ -355,7 +355,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
rcl_u32(setup,
- (setup->color_write ? (setup->color_write->paddr +
+ (setup->color_write ? (setup->color_write->dma_addr +
args->color_write.offset) :
0));
rcl_u16(setup, args->width);
@@ -374,14 +374,14 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
BUG_ON(setup->next_offset != size);
- exec->ct1ca = setup->rcl->paddr;
- exec->ct1ea = setup->rcl->paddr + setup->next_offset;
+ exec->ct1ca = setup->rcl->dma_addr;
+ exec->ct1ea = setup->rcl->dma_addr + setup->next_offset;
return 0;
}
static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *obj,
+ struct drm_gem_dma_object *obj,
struct drm_vc4_submit_rcl_surface *surf)
{
struct drm_vc4_submit_cl *args = exec->args;
@@ -407,7 +407,7 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
}
static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
@@ -433,7 +433,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
}
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf,
bool is_write)
{
@@ -533,7 +533,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
static int
vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
struct vc4_rcl_setup *setup,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index d20b0bc51a18..bd181b5a7b52 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -15,8 +15,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_panel.h>
@@ -155,7 +156,6 @@ struct vc4_txp {
struct drm_writeback_connector connector;
void __iomem *regs;
- struct debugfs_regset32 regset;
};
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
@@ -276,13 +276,15 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_atomic_state *state)
{
+ struct drm_device *drm = conn->dev;
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
u32 ctrl;
+ int idx;
int i;
if (WARN_ON(!conn_state->writeback_job))
@@ -312,8 +314,11 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
*/
ctrl |= TXP_ALPHA_INVERT;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
TXP_WRITE(TXP_DIM,
VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
@@ -322,6 +327,8 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
drm_writeback_queue_job(&txp->connector, conn_state);
+
+ drm_dev_exit(idx);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
@@ -337,16 +344,10 @@ vc4_txp_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static void vc4_txp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs vc4_txp_connector_funcs = {
.detect = vc4_txp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_txp_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -354,7 +355,12 @@ static const struct drm_connector_funcs vc4_txp_connector_funcs = {
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *drm = encoder->dev;
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
@@ -369,6 +375,8 @@ static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
}
TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+
+ drm_dev_exit(idx);
}
static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
@@ -384,13 +392,13 @@ static void vc4_txp_disable_vblank(struct drm_crtc *crtc) {}
static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = vc4_crtc_destroy,
.page_flip = vc4_page_flip,
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
+ .late_register = vc4_crtc_late_register,
};
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
@@ -453,6 +461,16 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
struct vc4_txp *txp = data;
struct vc4_crtc *vc4_crtc = &txp->base;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
vc4_crtc_handle_vblank(vc4_crtc);
drm_writeback_signal_completion(&txp->connector, 0);
@@ -461,6 +479,7 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
}
static const struct vc4_crtc_data vc4_txp_crtc_data = {
+ .debugfs_name = "txp_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
};
@@ -469,7 +488,6 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_crtc *vc4_crtc;
struct vc4_txp *txp;
struct drm_crtc *crtc;
@@ -480,7 +498,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
if (irq < 0)
return irq;
- txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+ txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
vc4_crtc = &txp->base;
@@ -495,9 +513,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
- txp->regset.base = txp->regs;
- txp->regset.regs = txp_regs;
- txp->regset.nregs = ARRAY_SIZE(txp_regs);
+ vc4_crtc->regset.base = txp->regs;
+ vc4_crtc->regset.regs = txp_regs;
+ vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
@@ -523,9 +541,6 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
return ret;
dev_set_drvdata(dev, txp);
- vc4->txp = txp;
-
- vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset);
return 0;
}
@@ -533,13 +548,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
static void vc4_txp_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_txp *txp = dev_get_drvdata(dev);
- vc4_txp_connector_destroy(&txp->connector.base);
-
- vc4->txp = NULL;
+ drm_connector_cleanup(&txp->connector.base);
}
static const struct component_ops vc4_txp_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index cc714dcfe1f2..56abb0d6bc39 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -231,7 +231,7 @@ try_again:
* if it doesn't fit within the buffer that we allocated up front.
* However, it turns out that 16MB is "enough for anybody", and
* real-world applications run into allocation failures from the
- * overall CMA pool before they make scenes complicated enough to run
+ * overall DMA pool before they make scenes complicated enough to run
* out of bin space.
*/
static int bin_bo_alloc(struct vc4_dev *vc4)
@@ -261,15 +261,15 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
dev_err(&v3d->pdev->dev,
"Failed to allocate memory for tile binning: "
- "%d. You may need to enable CMA or give it "
+ "%d. You may need to enable DMA or give it "
"more memory.",
ret);
break;
}
/* Check if this BO won't trigger the addressing bug. */
- if ((bo->base.paddr & 0xf0000000) ==
- ((bo->base.paddr + bo->base.base.size - 1) & 0xf0000000)) {
+ if ((bo->base.dma_addr & 0xf0000000) ==
+ ((bo->base.dma_addr + bo->base.base.size - 1) & 0xf0000000)) {
vc4->bin_bo = bo;
/* Set up for allocating 512KB chunks of
@@ -393,14 +393,34 @@ static int vc4_v3d_runtime_resume(struct device *dev)
vc4_v3d_init_hw(&vc4->base);
- /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
- enable_irq(vc4->irq);
vc4_irq_enable(&vc4->base);
return 0;
}
#endif
+int vc4_v3d_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_v3d *v3d = vc4->v3d;
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ ret = vc4_debugfs_add_file(minor, "v3d_ident",
+ vc4_v3d_debugfs_ident, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_regset32(minor, "v3d_regs", &v3d->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -443,44 +463,47 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
}
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ vc4->irq = ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_runtime_pm;
}
- ret = clk_prepare_enable(v3d->clk);
- if (ret != 0)
- return ret;
-
/* Reset the binner overflow address/size at setup, to be sure
* we don't reuse an old one.
*/
V3D_WRITE(V3D_BPOA, 0);
V3D_WRITE(V3D_BPOS, 0);
- vc4_v3d_init_hw(drm);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- vc4->irq = ret;
-
ret = vc4_irq_install(drm, vc4->irq);
if (ret) {
DRM_ERROR("Failed to install IRQ handler\n");
- return ret;
+ goto err_put_runtime_pm;
}
- pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
- pm_runtime_enable(dev);
-
- vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
- vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
return 0;
+
+err_put_runtime_pm:
+ pm_runtime_put(dev);
+
+ return ret;
}
static void vc4_v3d_unbind(struct device *dev, struct device *master,
@@ -489,8 +512,6 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
- pm_runtime_disable(dev);
-
vc4_irq_uninstall(drm);
/* Disable the binner's overflow memory address, so the next
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 2feba55bcef7..520231af4df9 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -102,11 +102,11 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
height <= 4 * utile_height(cpp));
}
-struct drm_gem_cma_object *
+struct drm_gem_dma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
struct vc4_dev *vc4 = exec->dev;
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -129,7 +129,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
return obj;
}
-static struct drm_gem_cma_object *
+static struct drm_gem_dma_object *
vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
{
return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
@@ -160,7 +160,7 @@ gl_shader_rec_size(uint32_t pointer_bits)
}
bool
-vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
@@ -263,7 +263,7 @@ validate_increment_semaphore(VALIDATE_ARGS)
static int
validate_indexed_prim_list(VALIDATE_ARGS)
{
- struct drm_gem_cma_object *ib;
+ struct drm_gem_dma_object *ib;
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t offset = *(uint32_t *)(untrusted + 5);
uint32_t max_index = *(uint32_t *)(untrusted + 9);
@@ -294,7 +294,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
return -EINVAL;
}
- *(uint32_t *)(validated + 5) = ib->paddr + offset;
+ *(uint32_t *)(validated + 5) = ib->dma_addr + offset;
return 0;
}
@@ -400,7 +400,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
* free when the job completes rendering.
*/
exec->bin_slots |= BIT(bin_slot);
- bin_addr = vc4->bin_bo->base.paddr + bin_slot * vc4->bin_alloc_size;
+ bin_addr = vc4->bin_bo->base.dma_addr + bin_slot * vc4->bin_alloc_size;
/* The tile state data array is 48 bytes per tile, and we put it at
* the start of a BO containing both it and the tile alloc.
@@ -575,7 +575,7 @@ reloc_tex(struct vc4_exec_info *exec,
struct vc4_texture_sample_info *sample,
uint32_t texture_handle_index, bool is_cs)
{
- struct drm_gem_cma_object *tex;
+ struct drm_gem_dma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
uint32_t p2 = (sample->p_offset[2] != ~0 ?
@@ -608,7 +608,7 @@ reloc_tex(struct vc4_exec_info *exec,
"outside of UBO\n");
goto fail;
}
- *validated_p0 = tex->paddr + p0;
+ *validated_p0 = tex->dma_addr + p0;
return true;
}
@@ -736,7 +736,7 @@ reloc_tex(struct vc4_exec_info *exec,
offset -= level_size;
}
- *validated_p0 = tex->paddr + p0;
+ *validated_p0 = tex->dma_addr + p0;
if (is_cs) {
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
@@ -765,7 +765,7 @@ validate_gl_shader_rec(struct drm_device *dev,
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
+ struct drm_gem_dma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
@@ -840,7 +840,7 @@ validate_gl_shader_rec(struct drm_device *dev,
void *uniform_data_u;
uint32_t tex, uni;
- *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
+ *(uint32_t *)(pkt_v + o) = bo[i]->dma_addr + src_offset;
if (src_offset != 0) {
DRM_DEBUG("Shaders must be at offset 0 of "
@@ -896,7 +896,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
for (i = 0; i < nr_attributes; i++) {
- struct drm_gem_cma_object *vbo =
+ struct drm_gem_dma_object *vbo =
bo[ARRAY_SIZE(shader_reloc_offsets) + i];
uint32_t o = 36 + i * 8;
uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
@@ -928,7 +928,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
}
- *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
+ *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset;
}
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index e315aeb5fef5..9745f8810eca 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -776,7 +776,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
}
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
{
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 11fc3d6f66b1..0b3333865702 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -14,6 +14,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -160,48 +161,28 @@ struct vc4_vec_variant {
/* General VEC hardware state. */
struct vc4_vec {
+ struct vc4_encoder encoder;
+ struct drm_connector connector;
+
struct platform_device *pdev;
const struct vc4_vec_variant *variant;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
void __iomem *regs;
struct clk *clock;
- const struct vc4_vec_tv_mode *tv_mode;
-
struct debugfs_regset32 regset;
};
#define VEC_READ(offset) readl(vec->regs + (offset))
#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
-/* VC4 VEC encoder KMS struct */
-struct vc4_vec_encoder {
- struct vc4_encoder base;
- struct vc4_vec *vec;
-};
-
-static inline struct vc4_vec_encoder *
-to_vc4_vec_encoder(struct drm_encoder *encoder)
+static inline struct vc4_vec *
+encoder_to_vc4_vec(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_vec_encoder, base.base);
+ return container_of(encoder, struct vc4_vec, encoder.base);
}
-/* VC4 VEC connector KMS struct */
-struct vc4_vec_connector {
- struct drm_connector base;
- struct vc4_vec *vec;
-
- /* Since the connector is attached to just the one encoder,
- * this is the reference to it so we can do the best_encoder()
- * hook.
- */
- struct drm_encoder *encoder;
-};
-
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
@@ -211,7 +192,9 @@ enum vc4_vec_tv_mode_id {
struct vc4_vec_tv_mode {
const struct drm_display_mode *mode;
- void (*mode_set)(struct vc4_vec *vec);
+ u32 config0;
+ u32 config1;
+ u32 custom_freq;
};
static const struct debugfs_reg32 vec_regs[] = {
@@ -241,63 +224,41 @@ static const struct debugfs_reg32 vec_regs[] = {
VC4_REG32(VEC_DAC_MISC),
};
-static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
-static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
static const struct drm_display_mode ntsc_mode = {
DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ 480, 480 + 7, 480 + 7 + 6, 525, 0,
DRM_MODE_FLAG_INTERLACE)
};
-static void vc4_vec_pal_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
-static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
- VEC_WRITE(VEC_CONFIG1,
- VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ);
- VEC_WRITE(VEC_FREQ3_2, 0x223b);
- VEC_WRITE(VEC_FREQ1_0, 0x61d1);
-}
-
static const struct drm_display_mode pal_mode = {
DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ 576, 576 + 4, 576 + 4 + 6, 625, 0,
DRM_MODE_FLAG_INTERLACE)
};
static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
[VC4_VEC_TV_MODE_NTSC] = {
.mode = &ntsc_mode,
- .mode_set = vc4_vec_ntsc_mode_set,
+ .config0 = VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_NTSC_J] = {
.mode = &ntsc_mode,
- .mode_set = vc4_vec_ntsc_j_mode_set,
+ .config0 = VEC_CONFIG0_NTSC_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL] = {
.mode = &pal_mode,
- .mode_set = vc4_vec_pal_mode_set,
+ .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL_M] = {
.mode = &pal_mode,
- .mode_set = vc4_vec_pal_m_mode_set,
+ .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x223b61d1,
},
};
@@ -307,12 +268,6 @@ vc4_vec_connector_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
}
-static void vc4_vec_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static int vc4_vec_connector_get_modes(struct drm_connector *connector)
{
struct drm_connector_state *state = connector->state;
@@ -333,7 +288,6 @@ static int vc4_vec_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_funcs vc4_vec_connector_funcs = {
.detect = vc4_vec_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_vec_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -343,42 +297,38 @@ static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs =
.get_modes = vc4_vec_connector_get_modes,
};
-static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
- struct vc4_vec *vec)
+static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
{
- struct drm_connector *connector = NULL;
- struct vc4_vec_connector *vec_connector;
-
- vec_connector = devm_kzalloc(dev->dev, sizeof(*vec_connector),
- GFP_KERNEL);
- if (!vec_connector)
- return ERR_PTR(-ENOMEM);
+ struct drm_connector *connector = &vec->connector;
+ int ret;
- connector = &vec_connector->base;
connector->interlace_allowed = true;
- vec_connector->encoder = vec->encoder;
- vec_connector->vec = vec;
+ ret = drmm_connector_init(dev, connector, &vc4_vec_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite, NULL);
+ if (ret)
+ return ret;
- drm_connector_init(dev, connector, &vc4_vec_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
drm_connector_helper_add(connector, &vc4_vec_connector_helper_funcs);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_mode_property,
VC4_VEC_TV_MODE_NTSC);
- vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
- drm_connector_attach_encoder(connector, vec->encoder);
+ drm_connector_attach_encoder(connector, &vec->encoder.base);
- return connector;
+ return 0;
}
-static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
+static void vc4_vec_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
- int ret;
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ int idx, ret;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
VEC_WRITE(VEC_CFG, 0);
VEC_WRITE(VEC_DAC_MISC,
@@ -392,20 +342,35 @@ static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
ret = pm_runtime_put(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to release power domain: %d\n", ret);
- return;
+ goto err_dev_exit;
}
+
+ drm_dev_exit(idx);
+ return;
+
+err_dev_exit:
+ drm_dev_exit(idx);
}
-static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
+static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
- int ret;
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ struct drm_connector *connector = &vec->connector;
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ const struct vc4_vec_tv_mode *tv_mode =
+ &vc4_vec_tv_modes[conn_state->tv.mode];
+ int idx, ret;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
ret = pm_runtime_get_sync(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
- return;
+ goto err_dev_exit;
}
/*
@@ -418,13 +383,13 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(vec->clock, 108000000);
if (ret) {
DRM_ERROR("Failed to set clock rate: %d\n", ret);
- return;
+ goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vec->clock);
if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- return;
+ goto err_put_runtime_pm;
}
/* Reset the different blocks */
@@ -455,29 +420,27 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
/* Mask all interrupts. */
VEC_WRITE(VEC_MASK0, 0);
- vec->tv_mode->mode_set(vec);
+ VEC_WRITE(VEC_CONFIG0, tv_mode->config0);
+ VEC_WRITE(VEC_CONFIG1, tv_mode->config1);
+
+ if (tv_mode->custom_freq) {
+ VEC_WRITE(VEC_FREQ3_2,
+ (tv_mode->custom_freq >> 16) & 0xffff);
+ VEC_WRITE(VEC_FREQ1_0,
+ tv_mode->custom_freq & 0xffff);
+ }
VEC_WRITE(VEC_DAC_MISC,
VEC_DAC_MISC_VID_ACT | VEC_DAC_MISC_DAC_RST_N);
VEC_WRITE(VEC_CFG, VEC_CFG_VEC_EN);
-}
+ drm_dev_exit(idx);
+ return;
-static bool vc4_vec_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static void vc4_vec_encoder_atomic_mode_set(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
-
- vec->tv_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+err_put_runtime_pm:
+ pm_runtime_put(&vec->pdev->dev);
+err_dev_exit:
+ drm_dev_exit(idx);
}
static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
@@ -496,11 +459,27 @@ static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = {
- .disable = vc4_vec_encoder_disable,
- .enable = vc4_vec_encoder_enable,
- .mode_fixup = vc4_vec_encoder_mode_fixup,
.atomic_check = vc4_vec_encoder_atomic_check,
- .atomic_mode_set = vc4_vec_encoder_atomic_mode_set,
+ .atomic_disable = vc4_vec_encoder_disable,
+ .atomic_enable = vc4_vec_encoder_enable,
+};
+
+static int vc4_vec_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, "vec_regs",
+ &vec->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
+ .late_register = vc4_vec_late_register,
};
static const struct vc4_vec_variant bcm2835_vec_variant = {
@@ -532,9 +511,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_vec *vec;
- struct vc4_vec_encoder *vc4_vec_encoder;
int ret;
ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
@@ -542,18 +519,11 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vec = devm_kzalloc(dev, sizeof(*vec), GFP_KERNEL);
+ vec = drmm_kzalloc(drm, sizeof(*vec), GFP_KERNEL);
if (!vec)
return -ENOMEM;
- vc4_vec_encoder = devm_kzalloc(dev, sizeof(*vc4_vec_encoder),
- GFP_KERNEL);
- if (!vc4_vec_encoder)
- return -ENOMEM;
- vc4_vec_encoder->base.type = VC4_ENCODER_TYPE_VEC;
- vc4_vec_encoder->vec = vec;
- vec->encoder = &vc4_vec_encoder->base.base;
-
+ vec->encoder.type = VC4_ENCODER_TYPE_VEC;
vec->pdev = pdev;
vec->variant = (const struct vc4_vec_variant *)
of_device_get_match_data(dev);
@@ -572,49 +542,30 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pm_runtime_enable(dev);
-
- drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
- vec->connector = vc4_vec_connector_init(drm, vec);
- if (IS_ERR(vec->connector)) {
- ret = PTR_ERR(vec->connector);
- goto err_destroy_encoder;
- }
+ ret = drmm_encoder_init(drm, &vec->encoder.base,
+ &vc4_vec_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC,
+ NULL);
+ if (ret)
+ return ret;
- dev_set_drvdata(dev, vec);
+ drm_encoder_helper_add(&vec->encoder.base, &vc4_vec_encoder_helper_funcs);
- vc4->vec = vec;
+ ret = vc4_vec_connector_init(drm, vec);
+ if (ret)
+ return ret;
- vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
+ dev_set_drvdata(dev, vec);
return 0;
-
-err_destroy_encoder:
- drm_encoder_cleanup(vec->encoder);
- pm_runtime_disable(dev);
-
- return ret;
-}
-
-static void vc4_vec_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_vec *vec = dev_get_drvdata(dev);
-
- vc4_vec_connector_destroy(vec->connector);
- drm_encoder_cleanup(vec->encoder);
- pm_runtime_disable(dev);
-
- vc4->vec = NULL;
}
static const struct component_ops vc4_vec_ops = {
.bind = vc4_vec_bind,
- .unbind = vc4_vec_unbind,
};
static int vc4_vec_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
index 84db4eee7828..8b978dd51a25 100644
--- a/drivers/gpu/drm/via/Makefile
+++ b/drivers/gpu/drm/via/Makefile
@@ -3,6 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
+via-y := via_dri1.o
obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/gpu/drm/via/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
index 462375d543b9..eb848508b12b 100644
--- a/drivers/gpu/drm/via/via_3d_reg.h
+++ b/drivers/gpu/drm/via/via_3d_reg.h
@@ -1,25 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * Copyright 1998-2011 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2011 S3 Graphics, Inc. All Rights Reserved.
*/
#ifndef VIA_3D_REG_H
@@ -50,6 +32,7 @@
#define HC_ParaType_Palette 0x0003
#define HC_ParaType_PreCR 0x0010
#define HC_ParaType_Auto 0x00fe
+#define INV_ParaType_Dummy 0x00300000
/* Transmission Space
*/
@@ -173,10 +156,10 @@
#define HC_HSPXOS_SHIFT 12
#define HC_HSPYOS_MASK 0x00000fff
-/* Command
+/*
* Command A
*/
-#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
+#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
#define HC_HE3Fire_MASK 0x00100000
#define HC_HPMType_MASK 0x000f0000
#define HC_HEFlag_MASK 0x0000e000
@@ -236,6 +219,8 @@
/* Enable Setting
*/
#define HC_SubA_HEnable 0x0000
+#define HC_HenForce1P_MASK 0x00800000 /* [Force 1 Pipe] */
+#define HC_HenZDCheck_MASK 0x00400000 /* [Z dirty bit settings] */
#define HC_HenTXEnvMap_MASK 0x00200000
#define HC_HenVertexCNT_MASK 0x00100000
#define HC_HenCPUDAZ_MASK 0x00080000
@@ -684,6 +669,12 @@
/* Texture subtype definitions
*/
+#define HC_SubType_Samp0 0x00000020
+#define HC_SubType_Samp1 0x00000021
+
+
+/* Texture subtype definitions
+ */
#define HC_SubType_Tex0 0x00000000
#define HC_SubType_Tex1 0x00000001
#define HC_SubType_TexGeneral 0x000000fe
@@ -762,7 +753,13 @@
#define HC_SubA_HTXnBumpM10 0x0092
#define HC_SubA_HTXnBumpM11 0x0093
#define HC_SubA_HTXnLScale 0x0094
-#define HC_SubA_HTXSMD 0x0000
+
+#define HC_SubA_HTXSMD 0x0000
+#define HC_SubA_HTXYUV2RGB1 0x0001
+#define HC_SubA_HTXYUV2RGB2 0x0002
+#define HC_SubA_HTXYUV2RGB3 0x0003
+#define HTXYUV2RGB4BT601 (1<<23)
+#define HTXYUV2RGB4BT709 (1<<22)
/* HC_SubA_HTXnL012BasH 0x0020
*/
#define HC_HTXnL0BasH_MASK 0x000000ff
@@ -965,6 +962,7 @@
#define HC_HTXnFM_Lum 0x00100000
#define HC_HTXnFM_Alpha 0x00180000
#define HC_HTXnFM_DX 0x00280000
+#define HC_HTXnFM_YUV 0x00300000
#define HC_HTXnFM_ARGB16 0x00880000
#define HC_HTXnFM_ARGB32 0x00980000
#define HC_HTXnFM_ABGR16 0x00a80000
@@ -995,6 +993,12 @@
#define HC_HTXnFM_DX1 (HC_HTXnFM_DX | 0x00010000)
#define HC_HTXnFM_DX23 (HC_HTXnFM_DX | 0x00020000)
#define HC_HTXnFM_DX45 (HC_HTXnFM_DX | 0x00030000)
+/* YUV package mode */
+#define HC_HTXnFM_YUY2 (HC_HTXnFM_YUV | 0x00000000)
+/* YUV planner mode */
+#define HC_HTXnFM_YV12 (HC_HTXnFM_YUV | 0x00040000)
+/* YUV planner mode */
+#define HC_HTXnFM_IYUV (HC_HTXnFM_YUV | 0x00040000)
#define HC_HTXnFM_RGB555 (HC_HTXnFM_ARGB16 | 0x00000000)
#define HC_HTXnFM_RGB565 (HC_HTXnFM_ARGB16 | 0x00010000)
#define HC_HTXnFM_ARGB1555 (HC_HTXnFM_ARGB16 | 0x00020000)
@@ -1023,6 +1027,13 @@
#define HC_HTXnLoc_Local 0x00000000
#define HC_HTXnLoc_Sys 0x00000002
#define HC_HTXnLoc_AGP 0x00000003
+
+/* Video Texture */
+#define HC_HTXnYUV2RGBMode_RGB 0x00000000
+#define HC_HTXnYUV2RGBMode_SDTV 0x00000001
+#define HC_HTXnYUV2RGBMode_HDTV 0x00000002
+#define HC_HTXnYUV2RGBMode_TABLE 0x00000003
+
/* HC_SubA_HTXnTRAH 0x007f
*/
#define HC_HTXnTRAH_MASK 0x00ff0000
@@ -1330,9 +1341,9 @@
*/
#define HC_HFthRTXA_MASK 0x000000ff
-/******************************************************************************
-** Define the Halcyon Internal register access constants. For simulator only.
-******************************************************************************/
+/****************************************************************************
+ * Define the Halcyon Internal register access constants. For simulator only.
+ ***************************************************************************/
#define HC_SIMA_HAGPBstL 0x0000
#define HC_SIMA_HAGPBendL 0x0001
#define HC_SIMA_HAGPCMNT 0x0002
@@ -1477,80 +1488,80 @@
#define HC_SIMA_TX0TX1_OFF 0x0050
/*---- start of texture 1 setting ----
*/
-#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
#define HC_SIMA_HTX1Lc_11HE (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
/*---- end of texture 1 setting ---- 0xaf
*/
#define HC_SIMA_HTXSMD 0x00b0
@@ -1580,9 +1591,9 @@
#define HC_SIMA_HRErr 0x0445
#define HC_SIMA_FIFOstatus 0x0446
-/******************************************************************************
-** Define the AGP command header.
-******************************************************************************/
+/****************************************************************************
+ * Define the AGP command header.
+ ***************************************************************************/
#define HC_ACMD_MASK 0xfe000000
#define HC_ACMD_SUB_MASK 0x0c000000
#define HC_ACMD_HCmdA 0xee000000
@@ -1605,18 +1616,18 @@
#define HC_ACMD_H4COUNT_MASK 0x01fffe00
#define HC_ACMD_H4COUNT_SHIFT 9
-/********************************************************************************
-** Define Header
-********************************************************************************/
-#define HC_HEADER2 0xF210F110
+/*****************************************************************************
+ * Define Header
+ ****************************************************************************/
+#define HC_HEADER2 0xF210F110
-/********************************************************************************
-** Define Dummy Value
-********************************************************************************/
-#define HC_DUMMY 0xCCCCCCCC
-/********************************************************************************
-** Define for DMA use
-********************************************************************************/
+/*****************************************************************************
+ * Define Dummy Value
+ ****************************************************************************/
+#define HC_DUMMY 0xCCCCCCCC
+/*****************************************************************************
+ * Define for DMA use
+ ****************************************************************************/
#define HALCYON_HEADER2 0XF210F110
#define HALCYON_FIRECMD 0XEE100000
#define HALCYON_FIREMASK 0XFFF00000
@@ -1643,8 +1654,118 @@
#define HC_HAGPBpID_STOP 0x00000002
#define HC_HAGPBpH_MASK 0x00ffffff
+
#define VIA_VIDEO_HEADER5 0xFE040000
#define VIA_VIDEO_HEADER6 0xFE050000
#define VIA_VIDEO_HEADER7 0xFE060000
#define VIA_VIDEOMASK 0xFFFF0000
+
+/*****************************************************************************
+ * Define for H5 DMA use
+ ****************************************************************************/
+#define H5_HC_DUMMY 0xCC000000
+
+/* Command Header Type */
+#define INV_DUMMY_MASK 0xFF000000
+#define INV_AGPHeader0 0xFE000000
+#define INV_AGPHeader1 0xFE010000
+#define INV_AGPHeader2 0xFE020000
+#define INV_AGPHeader3 0xFE030000
+#define INV_AGPHeader4 0xFE040000
+#define INV_AGPHeader5 0xFE050000
+#define INV_AGPHeader6 0xFE060000
+#define INV_AGPHeader7 0xFE070000
+#define INV_AGPHeader9 0xFE090000
+#define INV_AGPHeaderA 0xFE0A0000
+#define INV_AGPHeader40 0xFE400000
+#define INV_AGPHeader41 0xFE410000
+#define INV_AGPHeader43 0xFE430000
+#define INV_AGPHeader45 0xFE450000
+#define INV_AGPHeader47 0xFE470000
+#define INV_AGPHeader4A 0xFE4A0000
+#define INV_AGPHeader82 0xFE820000
+#define INV_AGPHeader83 0xFE830000
+#define INV_AGPHeader_MASK 0xFFFF0000
+#define INV_AGPHeader2A 0xFE2A0000
+#define INV_AGPHeader25 0xFE250000
+#define INV_AGPHeader20 0xFE200000
+#define INV_AGPHeader23 0xFE230000
+#define INV_AGPHeaderE2 0xFEE20000
+#define INV_AGPHeaderE3 0xFEE30000
+
+/*Transmission IO Space*/
+#define INV_REG_CR_TRANS 0x041C
+#define INV_REG_CR_BEGIN 0x0420
+#define INV_REG_CR_END 0x0438
+
+#define INV_REG_3D_TRANS 0x043C
+#define INV_REG_3D_BEGIN 0x0440
+#define INV_REG_3D_END 0x06FC
+
+#define INV_ParaType_CmdVdata 0x0000
+
+/* H5 Enable Setting
+ */
+#define INV_HC_SubA_HEnable1 0x00
+
+#define INV_HC_HenAT4ALLRT_MASK 0x00100000
+#define INV_HC_HenATMRT3_MASK 0x00080000
+#define INV_HC_HenATMRT2_MASK 0x00040000
+#define INV_HC_HenATMRT1_MASK 0x00020000
+#define INV_HC_HenATMRT0_MASK 0x00010000
+#define INV_HC_HenSCMRT3_MASK 0x00008000
+#define INV_HC_HenSCMRT2_MASK 0x00004000
+#define INV_HC_HenSCMRT1_MASK 0x00002000
+#define INV_HC_HenSCMRT0_MASK 0x00001000
+#define INV_HC_HenFOGMRT3_MASK 0x00000800
+#define INV_HC_HenFOGMRT2_MASK 0x00000400
+#define INV_HC_HenFOGMRT1_MASK 0x00000200
+#define INV_HC_HenFOGMRT0_MASK 0x00000100
+#define INV_HC_HenABLMRT3_MASK 0x00000080
+#define INV_HC_HenABLMRT2_MASK 0x00000040
+#define INV_HC_HenABLMRT1_MASK 0x00000020
+#define INV_HC_HenABLMRT0_MASK 0x00000010
+#define INV_HC_HenDTMRT3_MASK 0x00000008
+#define INV_HC_HenDTMRT2_MASK 0x00000004
+#define INV_HC_HenDTMRT1_MASK 0x00000002
+#define INV_HC_HenDTMRT0_MASK 0x00000001
+
+#define INV_HC_SubA_HEnable2 0x01
+
+#define INV_HC_HenLUL2DR_MASK 0x00800000
+#define INV_HC_HenLDIAMOND_MASK 0x00400000
+#define INV_HC_HenPSPRITE_MASK 0x00200000
+#define INV_HC_HenC2S_MASK 0x00100000
+#define INV_HC_HenFOGPP_MASK 0x00080000
+#define INV_HC_HenSCPP_MASK 0x00040000
+#define INV_HC_HenCPP_MASK 0x00020000
+#define INV_HC_HenCZ_MASK 0x00002000
+#define INV_HC_HenVC_MASK 0x00001000
+#define INV_HC_HenCL_MASK 0x00000800
+#define INV_HC_HenPS_MASK 0x00000400
+#define INV_HC_HenWCZ_MASK 0x00000200
+#define INV_HC_HenTXCH_MASK 0x00000100
+#define INV_HC_HenBFCULL_MASK 0x00000080
+#define INV_HC_HenCW_MASK 0x00000040
+#define INV_HC_HenAA_MASK 0x00000020
+#define INV_HC_HenST_MASK 0x00000010
+#define INV_HC_HenZT_MASK 0x00000008
+#define INV_HC_HenZW_MASK 0x00000004
+#define INV_HC_HenSP_MASK 0x00000002
+#define INV_HC_HenLP_MASK 0x00000001
+
+/* H5 Miscellaneous Settings
+ */
+#define INV_HC_SubA_HCClipTL 0x0080
+#define INV_HC_SubA_HCClipBL 0x0081
+#define INV_HC_SubA_HSClipTL 0x0082
+#define INV_HC_SubA_HSClipBL 0x0083
+#define INV_HC_SubA_HSolidCL 0x0086
+#define INV_HC_SubA_HSolidCH 0x0087
+#define INV_HC_SubA_HGBClipGL 0x0088
+#define INV_HC_SubA_HGBClipGR 0x0089
+
+
+#define INV_HC_ParaType_Vetex 0x00040000
+
#endif
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
deleted file mode 100644
index 177b0499abf1..000000000000
--- a/drivers/gpu/drm/via/via_dma.c
+++ /dev/null
@@ -1,744 +0,0 @@
-/* via_dma.c -- DMA support for the VIA Unichrome/Pro
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
- * All Rights Reserved.
- *
- * Copyright 2004 The Unichrome project.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Tungsten Graphics,
- * Erdi Chen,
- * Thomas Hellstrom.
- */
-
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-#include "via_3d_reg.h"
-
-#define CMDBUF_ALIGNMENT_SIZE (0x100)
-#define CMDBUF_ALIGNMENT_MASK (0x0ff)
-
-/* defines for VIA 3D registers */
-#define VIA_REG_STATUS 0x400
-#define VIA_REG_TRANSET 0x43C
-#define VIA_REG_TRANSPACE 0x440
-
-/* VIA_REG_STATUS(0x400): Engine Status */
-#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
-#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
-#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
-#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
-
-#define SetReg2DAGP(nReg, nData) { \
- *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
- *((uint32_t *)(vb) + 1) = (nData); \
- vb = ((uint32_t *)vb) + 2; \
- dev_priv->dma_low += 8; \
-}
-
-#define via_flush_write_combine() mb()
-
-#define VIA_OUT_RING_QW(w1, w2) do { \
- *vb++ = (w1); \
- *vb++ = (w2); \
- dev_priv->dma_low += 8; \
-} while (0)
-
-static void via_cmdbuf_start(drm_via_private_t *dev_priv);
-static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
-static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
-static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
-static int via_wait_idle(drm_via_private_t *dev_priv);
-static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
-
-/*
- * Free space in command buffer.
- */
-
-static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
-
- return ((hw_addr <= dev_priv->dma_low) ?
- (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
- (hw_addr - dev_priv->dma_low));
-}
-
-/*
- * How much does the command regulator lag behind?
- */
-
-static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
-
- return ((hw_addr <= dev_priv->dma_low) ?
- (dev_priv->dma_low - hw_addr) :
- (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
-}
-
-/*
- * Check that the given size fits in the buffer, otherwise wait.
- */
-
-static inline int
-via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t cur_addr, hw_addr, next_addr;
- volatile uint32_t *hw_addr_ptr;
- uint32_t count;
- hw_addr_ptr = dev_priv->hw_addr_ptr;
- cur_addr = dev_priv->dma_low;
- next_addr = cur_addr + size + 512 * 1024;
- count = 1000000;
- do {
- hw_addr = *hw_addr_ptr - agp_base;
- if (count-- == 0) {
- DRM_ERROR
- ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
- hw_addr, cur_addr, next_addr);
- return -1;
- }
- if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
- msleep(1);
- } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
- return 0;
-}
-
-/*
- * Checks whether buffer head has reach the end. Rewind the ring buffer
- * when necessary.
- *
- * Returns virtual pointer to ring buffer.
- */
-
-static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
- unsigned int size)
-{
- if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
- dev_priv->dma_high) {
- via_cmdbuf_rewind(dev_priv);
- }
- if (via_cmdbuf_wait(dev_priv, size) != 0)
- return NULL;
-
- return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
-}
-
-int via_dma_cleanup(struct drm_device *dev)
-{
- if (dev->dev_private) {
- drm_via_private_t *dev_priv =
- (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start) {
- via_cmdbuf_reset(dev_priv);
-
- drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- }
-
- }
-
- return 0;
-}
-
-static int via_initialize(struct drm_device *dev,
- drm_via_private_t *dev_priv,
- drm_via_dma_init_t *init)
-{
- if (!dev_priv || !dev_priv->mmio) {
- DRM_ERROR("via_dma_init called before via_map_init\n");
- return -EFAULT;
- }
-
- if (dev_priv->ring.virtual_start != NULL) {
- DRM_ERROR("called again without calling cleanup\n");
- return -EFAULT;
- }
-
- if (!dev->agp || !dev->agp->base) {
- DRM_ERROR("called with no agp memory available\n");
- return -EFAULT;
- }
-
- if (dev_priv->chipset == VIA_DX9_0) {
- DRM_ERROR("AGP DMA is not supported on this chip\n");
- return -EINVAL;
- }
-
- dev_priv->ring.map.offset = dev->agp->base + init->offset;
- dev_priv->ring.map.size = init->size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_legacy_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- via_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->dma_ptr = dev_priv->ring.virtual_start;
- dev_priv->dma_low = 0;
- dev_priv->dma_high = init->size;
- dev_priv->dma_wrap = init->size;
- dev_priv->dma_offset = init->offset;
- dev_priv->last_pause_ptr = NULL;
- dev_priv->hw_addr_ptr =
- (volatile uint32_t *)((char *)dev_priv->mmio->handle +
- init->reg_pause_addr);
-
- via_cmdbuf_start(dev_priv);
-
- return 0;
-}
-
-static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_dma_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case VIA_INIT_DMA:
- if (!capable(CAP_SYS_ADMIN))
- retcode = -EPERM;
- else
- retcode = via_initialize(dev, dev_priv, init);
- break;
- case VIA_CLEANUP_DMA:
- if (!capable(CAP_SYS_ADMIN))
- retcode = -EPERM;
- else
- retcode = via_dma_cleanup(dev);
- break;
- case VIA_DMA_INITIALIZED:
- retcode = (dev_priv->ring.virtual_start != NULL) ?
- 0 : -EFAULT;
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
-{
- drm_via_private_t *dev_priv;
- uint32_t *vb;
- int ret;
-
- dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("called without initializing AGP ring buffer.\n");
- return -EFAULT;
- }
-
- if (cmd->size > VIA_PCI_BUF_SIZE)
- return -ENOMEM;
-
- if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
- return -EFAULT;
-
- /*
- * Running this function on AGP memory is dead slow. Therefore
- * we run it on a temporary cacheable system memory buffer and
- * copy it to AGP memory when ready.
- */
-
- if ((ret =
- via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
- cmd->size, dev, 1))) {
- return ret;
- }
-
- vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
- if (vb == NULL)
- return -EAGAIN;
-
- memcpy(vb, dev_priv->pci_buf, cmd->size);
-
- dev_priv->dma_low += cmd->size;
-
- /*
- * Small submissions somehow stalls the CPU. (AGP cache effects?)
- * pad to greater size.
- */
-
- if (cmd->size < 0x100)
- via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
- via_cmdbuf_pause(dev_priv);
-
- return 0;
-}
-
-int via_driver_dma_quiescent(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- if (!via_wait_idle(dev_priv))
- return -EBUSY;
- return 0;
-}
-
-static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- return via_driver_dma_quiescent(dev);
-}
-
-static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuffer_t *cmdbuf = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
- ret = via_dispatch_cmdbuffer(dev, cmdbuf);
- return ret;
-}
-
-static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
- drm_via_cmdbuffer_t *cmd)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (cmd->size > VIA_PCI_BUF_SIZE)
- return -ENOMEM;
- if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
- return -EFAULT;
-
- if ((ret =
- via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
- cmd->size, dev, 0))) {
- return ret;
- }
-
- ret =
- via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
- cmd->size);
- return ret;
-}
-
-static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuffer_t *cmdbuf = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
- ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
- return ret;
-}
-
-static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
- uint32_t * vb, int qw_count)
-{
- for (; qw_count > 0; --qw_count)
- VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
- return vb;
-}
-
-/*
- * This function is used internally by ring buffer management code.
- *
- * Returns virtual pointer to ring buffer.
- */
-static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
-{
- return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
-}
-
-/*
- * Hooks a segment of data into the tail of the ring-buffer by
- * modifying the pause address stored in the buffer itself. If
- * the regulator has already paused, restart it.
- */
-static int via_hook_segment(drm_via_private_t *dev_priv,
- uint32_t pause_addr_hi, uint32_t pause_addr_lo,
- int no_pci_fire)
-{
- int paused, count;
- volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
- uint32_t reader, ptr;
- uint32_t diff;
-
- paused = 0;
- via_flush_write_combine();
- (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
-
- *paused_at = pause_addr_lo;
- via_flush_write_combine();
- (void) *paused_at;
-
- reader = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
-
- /*
- * If there is a possibility that the command reader will
- * miss the new pause address and pause on the old one,
- * In that case we need to program the new start address
- * using PCI.
- */
-
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- count = 10000000;
- while (diff == 0 && count--) {
- paused = (via_read(dev_priv, 0x41c) & 0x80000000);
- if (paused)
- break;
- reader = *(dev_priv->hw_addr_ptr);
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- }
-
- paused = via_read(dev_priv, 0x41c) & 0x80000000;
-
- if (paused && !no_pci_fire) {
- reader = *(dev_priv->hw_addr_ptr);
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- diff &= (dev_priv->dma_high - 1);
- if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
- DRM_ERROR("Paused at incorrect address. "
- "0x%08x, 0x%08x 0x%08x\n",
- ptr, reader, dev_priv->dma_diff);
- } else if (diff == 0) {
- /*
- * There is a concern that these writes may stall the PCI bus
- * if the GPU is not idle. However, idling the GPU first
- * doesn't make a difference.
- */
-
- via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
- via_read(dev_priv, VIA_REG_TRANSPACE);
- }
- }
- return paused;
-}
-
-static int via_wait_idle(drm_via_private_t *dev_priv)
-{
- int count = 10000000;
-
- while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
- ;
-
- while (count && (via_read(dev_priv, VIA_REG_STATUS) &
- (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
- VIA_3D_ENG_BUSY)))
- --count;
- return count;
-}
-
-static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
- uint32_t addr, uint32_t *cmd_addr_hi,
- uint32_t *cmd_addr_lo, int skip_wait)
-{
- uint32_t agp_base;
- uint32_t cmd_addr, addr_lo, addr_hi;
- uint32_t *vb;
- uint32_t qw_pad_count;
-
- if (!skip_wait)
- via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
-
- vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
- (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
- agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
- ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
-
- cmd_addr = (addr) ? addr :
- agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
- addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
- (cmd_addr & HC_HAGPBpL_MASK));
- addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
-
- vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
- VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
- return vb;
-}
-
-static void via_cmdbuf_start(drm_via_private_t *dev_priv)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
- uint32_t start_addr, start_addr_lo;
- uint32_t end_addr, end_addr_lo;
- uint32_t command;
- uint32_t agp_base;
- uint32_t ptr;
- uint32_t reader;
- int count;
-
- dev_priv->dma_low = 0;
-
- agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- start_addr = agp_base;
- end_addr = agp_base + dev_priv->dma_high;
-
- start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
- end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
- command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
- ((end_addr & 0xff000000) >> 16));
-
- dev_priv->last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
- &pause_addr_hi, &pause_addr_lo, 1) - 1;
-
- via_flush_write_combine();
- (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
-
- via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- via_write(dev_priv, VIA_REG_TRANSPACE, command);
- via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
- via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
-
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
- wmb();
- via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
- via_read(dev_priv, VIA_REG_TRANSPACE);
-
- dev_priv->dma_diff = 0;
-
- count = 10000000;
- while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
-
- reader = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- /*
- * This is the difference between where we tell the
- * command reader to pause and where it actually pauses.
- * This differs between hw implementation so we need to
- * detect it.
- */
-
- dev_priv->dma_diff = ptr - reader;
-}
-
-static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
-{
- uint32_t *vb;
-
- via_cmdbuf_wait(dev_priv, qwords + 2);
- vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
- via_align_buffer(dev_priv, vb, qwords);
-}
-
-static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
-{
- uint32_t *vb = via_get_dma(dev_priv);
- SetReg2DAGP(0x0C, (0 | (0 << 16)));
- SetReg2DAGP(0x10, 0 | (0 << 16));
- SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
-}
-
-static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
- uint32_t jump_addr_lo, jump_addr_hi;
- volatile uint32_t *last_pause_ptr;
- uint32_t dma_low_save1, dma_low_save2;
-
- via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
- &jump_addr_lo, 0);
-
- dev_priv->dma_wrap = dev_priv->dma_low;
-
- /*
- * Wrap command buffer to the beginning.
- */
-
- dev_priv->dma_low = 0;
- if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
- DRM_ERROR("via_cmdbuf_jump failed\n");
-
- via_dummy_bitblt(dev_priv);
- via_dummy_bitblt(dev_priv);
-
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
-
- *last_pause_ptr = pause_addr_lo;
- dma_low_save1 = dev_priv->dma_low;
-
- /*
- * Now, set a trap that will pause the regulator if it tries to rerun the old
- * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
- * and reissues the jump command over PCI, while the regulator has already taken the jump
- * and actually paused at the current buffer end).
- * There appears to be no other way to detect this condition, since the hw_addr_pointer
- * does not seem to get updated immediately when a jump occurs.
- */
-
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
- *last_pause_ptr = pause_addr_lo;
-
- dma_low_save2 = dev_priv->dma_low;
- dev_priv->dma_low = dma_low_save1;
- via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
- dev_priv->dma_low = dma_low_save2;
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-
-static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_jump(dev_priv);
-}
-
-static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
-
- via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
-}
-
-static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
- via_wait_idle(dev_priv);
-}
-
-/*
- * User interface to the space and lag functions.
- */
-
-static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuf_size_t *d_siz = data;
- int ret = 0;
- uint32_t tmp_size, count;
- drm_via_private_t *dev_priv;
-
- DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("called without initializing AGP ring buffer.\n");
- return -EFAULT;
- }
-
- count = 1000000;
- tmp_size = d_siz->size;
- switch (d_siz->func) {
- case VIA_CMDBUF_SPACE:
- while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
- && --count) {
- if (!d_siz->wait)
- break;
- }
- if (!count) {
- DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
- ret = -EAGAIN;
- }
- break;
- case VIA_CMDBUF_LAG:
- while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
- && --count) {
- if (!d_siz->wait)
- break;
- }
- if (!count) {
- DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
- ret = -EAGAIN;
- }
- break;
- default:
- ret = -EFAULT;
- }
- d_siz->size = tmp_size;
-
- return ret;
-}
-
-const struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
-};
-
-int via_max_ioctl = ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
deleted file mode 100644
index e016a4d62090..000000000000
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
- * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Hellstrom.
- * Partially based on code obtained from Digeo Inc.
- */
-
-
-/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
- */
-
-#include <linux/pagemap.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_device.h>
-#include <drm/via_drm.h>
-
-#include "via_dmablit.h"
-#include "via_drv.h"
-
-#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
-#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
-#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
-
-typedef struct _drm_via_descriptor {
- uint32_t mem_addr;
- uint32_t dev_addr;
- uint32_t size;
- uint32_t next;
-} drm_via_descriptor_t;
-
-
-/*
- * Unmap a DMA mapping.
- */
-
-
-
-static void
-via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
-{
- int num_desc = vsg->num_desc;
- unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
- unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
- descriptor_this_page;
- dma_addr_t next = vsg->chain_start;
-
- while (num_desc--) {
- if (descriptor_this_page-- == 0) {
- cur_descriptor_page--;
- descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
- descriptor_this_page;
- }
- dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
- dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
- next = (dma_addr_t) desc_ptr->next;
- desc_ptr--;
- }
-}
-
-/*
- * If mode = 0, count how many descriptors are needed.
- * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
- * Descriptors are run in reverse order by the hardware because we are not allowed to update the
- * 'next' field without syncing calls when the descriptor is already mapped.
- */
-
-static void
-via_map_blit_for_device(struct pci_dev *pdev,
- const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
- int mode)
-{
- unsigned cur_descriptor_page = 0;
- unsigned num_descriptors_this_page = 0;
- unsigned char *mem_addr = xfer->mem_addr;
- unsigned char *cur_mem;
- unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
- uint32_t fb_addr = xfer->fb_addr;
- uint32_t cur_fb;
- unsigned long line_len;
- unsigned remaining_len;
- int num_desc = 0;
- int cur_line;
- dma_addr_t next = 0 | VIA_DMA_DPR_EC;
- drm_via_descriptor_t *desc_ptr = NULL;
-
- if (mode == 1)
- desc_ptr = vsg->desc_pages[cur_descriptor_page];
-
- for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
-
- line_len = xfer->line_length;
- cur_fb = fb_addr;
- cur_mem = mem_addr;
-
- while (line_len > 0) {
-
- remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
- line_len -= remaining_len;
-
- if (mode == 1) {
- desc_ptr->mem_addr =
- dma_map_page(&pdev->dev,
- vsg->pages[VIA_PFN(cur_mem) -
- VIA_PFN(first_addr)],
- VIA_PGOFF(cur_mem), remaining_len,
- vsg->direction);
- desc_ptr->dev_addr = cur_fb;
-
- desc_ptr->size = remaining_len;
- desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
- DMA_TO_DEVICE);
- desc_ptr++;
- if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
- num_descriptors_this_page = 0;
- desc_ptr = vsg->desc_pages[++cur_descriptor_page];
- }
- }
-
- num_desc++;
- cur_mem += remaining_len;
- cur_fb += remaining_len;
- }
-
- mem_addr += xfer->mem_stride;
- fb_addr += xfer->fb_stride;
- }
-
- if (mode == 1) {
- vsg->chain_start = next;
- vsg->state = dr_via_device_mapped;
- }
- vsg->num_desc = num_desc;
-}
-
-/*
- * Function that frees up all resources for a blit. It is usable even if the
- * blit info has only been partially built as long as the status enum is consistent
- * with the actual status of the used resources.
- */
-
-
-static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
-{
- int i;
-
- switch (vsg->state) {
- case dr_via_device_mapped:
- via_unmap_blit_from_device(pdev, vsg);
- fallthrough;
- case dr_via_desc_pages_alloc:
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (vsg->desc_pages[i] != NULL)
- free_page((unsigned long)vsg->desc_pages[i]);
- }
- kfree(vsg->desc_pages);
- fallthrough;
- case dr_via_pages_locked:
- unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE));
- fallthrough;
- case dr_via_pages_alloc:
- vfree(vsg->pages);
- fallthrough;
- default:
- vsg->state = dr_via_sg_init;
- }
- vfree(vsg->bounce_buffer);
- vsg->bounce_buffer = NULL;
- vsg->free_on_sequence = 0;
-}
-
-/*
- * Fire a blit engine.
- */
-
-static void
-via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
- VIA_DMA_CSR_DE);
- via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
- via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
- wmb();
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
-}
-
-/*
- * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
- * occur here if the calling user does not have access to the submitted address.
- */
-
-static int
-via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
-{
- int ret;
- unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
- first_pfn + 1;
-
- vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
- if (NULL == vsg->pages)
- return -ENOMEM;
- ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
- vsg->num_pages,
- vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
- vsg->pages);
- if (ret != vsg->num_pages) {
- if (ret < 0)
- return ret;
- vsg->state = dr_via_pages_locked;
- return -EINVAL;
- }
- vsg->state = dr_via_pages_locked;
- DRM_DEBUG("DMA pages locked\n");
- return 0;
-}
-
-/*
- * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
- * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
- * quite large for some blits, and pages don't need to be contiguous.
- */
-
-static int
-via_alloc_desc_pages(drm_via_sg_info_t *vsg)
-{
- int i;
-
- vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
- vsg->descriptors_per_page;
-
- if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
- return -ENOMEM;
-
- vsg->state = dr_via_desc_pages_alloc;
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
- (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
- }
- DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
- vsg->num_desc);
- return 0;
-}
-
-static void
-via_abort_dmablit(struct drm_device *dev, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
-}
-
-static void
-via_dmablit_engine_off(struct drm_device *dev, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
-}
-
-
-
-/*
- * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
- * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
- * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
- * the workqueue task takes care of processing associated with the old blit.
- */
-
-void
-via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
- int cur;
- int done_transfer;
- unsigned long irqsave = 0;
- uint32_t status = 0;
-
- DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
- engine, from_irq, (unsigned long) blitq);
-
- if (from_irq)
- spin_lock(&blitq->blit_lock);
- else
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- done_transfer = blitq->is_active &&
- ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
-
- cur = blitq->cur;
- if (done_transfer) {
-
- blitq->blits[cur]->aborted = blitq->aborting;
- blitq->done_blit_handle++;
- wake_up(blitq->blit_queue + cur);
-
- cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
- cur = 0;
- blitq->cur = cur;
-
- /*
- * Clear transfer done flag.
- */
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
-
- blitq->is_active = 0;
- blitq->aborting = 0;
- schedule_work(&blitq->wq);
-
- } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
-
- /*
- * Abort transfer after one second.
- */
-
- via_abort_dmablit(dev, engine);
- blitq->aborting = 1;
- blitq->end = jiffies + HZ;
- }
-
- if (!blitq->is_active) {
- if (blitq->num_outstanding) {
- via_fire_dmablit(dev, blitq->blits[cur], engine);
- blitq->is_active = 1;
- blitq->cur = cur;
- blitq->num_outstanding--;
- blitq->end = jiffies + HZ;
- if (!timer_pending(&blitq->poll_timer))
- mod_timer(&blitq->poll_timer, jiffies + 1);
- } else {
- if (timer_pending(&blitq->poll_timer))
- del_timer(&blitq->poll_timer);
- via_dmablit_engine_off(dev, engine);
- }
- }
-
- if (from_irq)
- spin_unlock(&blitq->blit_lock);
- else
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-}
-
-
-
-/*
- * Check whether this blit is still active, performing necessary locking.
- */
-
-static int
-via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
-{
- unsigned long irqsave;
- uint32_t slot;
- int active;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- /*
- * Allow for handle wraparounds.
- */
-
- active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
- ((blitq->cur_blit_handle - handle) <= (1 << 23));
-
- if (queue && active) {
- slot = handle - blitq->done_blit_handle + blitq->cur - 1;
- if (slot >= VIA_NUM_BLIT_SLOTS)
- slot -= VIA_NUM_BLIT_SLOTS;
- *queue = blitq->blit_queue + slot;
- }
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- return active;
-}
-
-/*
- * Sync. Wait for at least three seconds for the blit to be performed.
- */
-
-static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
- wait_queue_head_t *queue;
- int ret = 0;
-
- if (via_dmablit_active(blitq, engine, handle, &queue)) {
- VIA_WAIT_ON(ret, *queue, 3 * HZ,
- !via_dmablit_active(blitq, engine, handle, NULL));
- }
- DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
- handle, engine, ret);
-
- return ret;
-}
-
-
-/*
- * A timer that regularly polls the blit engine in cases where we don't have interrupts:
- * a) Broken hardware (typically those that don't have any video capture facility).
- * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
- * The timer and hardware IRQ's can and do work in parallel. If the hardware has
- * irqs, it will shorten the latency somewhat.
- */
-
-
-
-static void
-via_dmablit_timer(struct timer_list *t)
-{
- drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
- struct drm_device *dev = blitq->dev;
- int engine = (int)
- (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
- (unsigned long) jiffies);
-
- via_dmablit_handler(dev, engine, 0);
-
- if (!timer_pending(&blitq->poll_timer)) {
- mod_timer(&blitq->poll_timer, jiffies + 1);
-
- /*
- * Rerun handler to delete timer if engines are off, and
- * to shorten abort latency. This is a little nasty.
- */
-
- via_dmablit_handler(dev, engine, 0);
-
- }
-}
-
-
-
-
-/*
- * Workqueue task that frees data and mappings associated with a blit.
- * Also wakes up waiting processes. Each of these tasks handles one
- * blit engine only and may not be called on each interrupt.
- */
-
-
-static void
-via_dmablit_workqueue(struct work_struct *work)
-{
- drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
- struct drm_device *dev = blitq->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- unsigned long irqsave;
- drm_via_sg_info_t *cur_sg;
- int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
- (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- while (blitq->serviced != blitq->cur) {
-
- cur_released = blitq->serviced++;
-
- DRM_DEBUG("Releasing blit slot %d\n", cur_released);
-
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
- blitq->serviced = 0;
-
- cur_sg = blitq->blits[cur_released];
- blitq->num_free++;
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- wake_up(&blitq->busy_queue);
-
- via_free_sg_info(pdev, cur_sg);
- kfree(cur_sg);
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-}
-
-
-/*
- * Init all blit engines. Currently we use two, but some hardware have 4.
- */
-
-
-void
-via_init_dmablit(struct drm_device *dev)
-{
- int i, j;
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_via_blitq_t *blitq;
-
- pci_set_master(pdev);
-
- for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
- blitq = dev_priv->blit_queues + i;
- blitq->dev = dev;
- blitq->cur_blit_handle = 0;
- blitq->done_blit_handle = 0;
- blitq->head = 0;
- blitq->cur = 0;
- blitq->serviced = 0;
- blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
- blitq->num_outstanding = 0;
- blitq->is_active = 0;
- blitq->aborting = 0;
- spin_lock_init(&blitq->blit_lock);
- for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
- init_waitqueue_head(blitq->blit_queue + j);
- init_waitqueue_head(&blitq->busy_queue);
- INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
- }
-}
-
-/*
- * Build all info and do all mappings required for a blit.
- */
-
-
-static int
-via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int draw = xfer->to_fb;
- int ret = 0;
-
- vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
- vsg->bounce_buffer = NULL;
-
- vsg->state = dr_via_sg_init;
-
- if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
- DRM_ERROR("Zero size bitblt.\n");
- return -EINVAL;
- }
-
- /*
- * Below check is a driver limitation, not a hardware one. We
- * don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
- * (Not a big limitation anyway.)
- */
-
- if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
- DRM_ERROR("Too large system memory stride. Stride: %d, "
- "Length: %d\n", xfer->mem_stride, xfer->line_length);
- return -EINVAL;
- }
-
- if ((xfer->mem_stride == xfer->line_length) &&
- (xfer->fb_stride == xfer->line_length)) {
- xfer->mem_stride *= xfer->num_lines;
- xfer->line_length = xfer->mem_stride;
- xfer->fb_stride = xfer->mem_stride;
- xfer->num_lines = 1;
- }
-
- /*
- * Don't lock an arbitrary large number of pages, since that causes a
- * DOS security hole.
- */
-
- if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
- DRM_ERROR("Too large PCI DMA bitblt.\n");
- return -EINVAL;
- }
-
- /*
- * we allow a negative fb stride to allow flipping of images in
- * transfer.
- */
-
- if (xfer->mem_stride < xfer->line_length ||
- abs(xfer->fb_stride) < xfer->line_length) {
- DRM_ERROR("Invalid frame-buffer / memory stride.\n");
- return -EINVAL;
- }
-
- /*
- * A hardware bug seems to be worked around if system memory addresses start on
- * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
- * about this. Meanwhile, impose the following restrictions:
- */
-
-#ifdef VIA_BUGFREE
- if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
- DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return -EINVAL;
- }
-#else
- if ((((unsigned long)xfer->mem_addr & 15) ||
- ((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) &&
- ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
- DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return -EINVAL;
- }
-#endif
-
- if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
- DRM_ERROR("Could not lock DMA pages.\n");
- via_free_sg_info(pdev, vsg);
- return ret;
- }
-
- via_map_blit_for_device(pdev, xfer, vsg, 0);
- if (0 != (ret = via_alloc_desc_pages(vsg))) {
- DRM_ERROR("Could not allocate DMA descriptor pages.\n");
- via_free_sg_info(pdev, vsg);
- return ret;
- }
- via_map_blit_for_device(pdev, xfer, vsg, 1);
-
- return 0;
-}
-
-
-/*
- * Reserve one free slot in the blit queue. Will wait for one second for one
- * to become available. Otherwise -EBUSY is returned.
- */
-
-static int
-via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
-{
- int ret = 0;
- unsigned long irqsave;
-
- DRM_DEBUG("Num free is %d\n", blitq->num_free);
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while (blitq->num_free == 0) {
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
- if (ret)
- return (-EINTR == ret) ? -EAGAIN : ret;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
-
- blitq->num_free--;
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- return 0;
-}
-
-/*
- * Hand back a free slot if we changed our mind.
- */
-
-static void
-via_dmablit_release_slot(drm_via_blitq_t *blitq)
-{
- unsigned long irqsave;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- blitq->num_free++;
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- wake_up(&blitq->busy_queue);
-}
-
-/*
- * Grab a free slot. Build blit info and queue a blit.
- */
-
-
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_sg_info_t *vsg;
- drm_via_blitq_t *blitq;
- int ret;
- int engine;
- unsigned long irqsave;
-
- if (dev_priv == NULL) {
- DRM_ERROR("Called without initialization.\n");
- return -EINVAL;
- }
-
- engine = (xfer->to_fb) ? 0 : 1;
- blitq = dev_priv->blit_queues + engine;
- if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
- return ret;
- if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
- via_dmablit_release_slot(blitq);
- return -ENOMEM;
- }
- if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
- via_dmablit_release_slot(blitq);
- kfree(vsg);
- return ret;
- }
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
- blitq->head = 0;
- blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- xfer->sync.engine = engine;
-
- via_dmablit_handler(dev, engine, 0);
-
- return 0;
-}
-
-/*
- * Sync on a previously submitted blit. Note that the X server use signals extensively, and
- * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
- * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
- */
-
-int
-via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_blitsync_t *sync = data;
- int err;
-
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
- return -EINVAL;
-
- err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
-
- if (-EINTR == err)
- err = -EAGAIN;
-
- return err;
-}
-
-
-/*
- * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
- * be reissued. See the above IOCTL code.
- */
-
-int
-via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_dmablit_t *xfer = data;
- int err;
-
- err = via_dmablit(dev, xfer);
-
- return err;
-}
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
deleted file mode 100644
index 9b662a327cef..000000000000
--- a/drivers/gpu/drm/via/via_dmablit.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Hellstrom.
- * Register info from Digeo Inc.
- */
-
-#ifndef _VIA_DMABLIT_H
-#define _VIA_DMABLIT_H
-
-#include <linux/dma-mapping.h>
-
-#define VIA_NUM_BLIT_ENGINES 2
-#define VIA_NUM_BLIT_SLOTS 8
-
-struct _drm_via_descriptor;
-
-typedef struct _drm_via_sg_info {
- struct page **pages;
- unsigned long num_pages;
- struct _drm_via_descriptor **desc_pages;
- int num_desc_pages;
- int num_desc;
- enum dma_data_direction direction;
- unsigned char *bounce_buffer;
- dma_addr_t chain_start;
- uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
- int aborted;
- enum {
- dr_via_device_mapped,
- dr_via_desc_pages_alloc,
- dr_via_pages_locked,
- dr_via_pages_alloc,
- dr_via_sg_init
- } state;
-} drm_via_sg_info_t;
-
-typedef struct _drm_via_blitq {
- struct drm_device *dev;
- uint32_t cur_blit_handle;
- uint32_t done_blit_handle;
- unsigned serviced;
- unsigned head;
- unsigned cur;
- unsigned num_free;
- unsigned num_outstanding;
- unsigned long end;
- int aborting;
- int is_active;
- drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
- spinlock_t blit_lock;
- wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
- wait_queue_head_t busy_queue;
- struct work_struct wq;
- struct timer_list poll_timer;
-} drm_via_blitq_t;
-
-
-/*
- * PCI DMA Registers
- * Channels 2 & 3 don't seem to be implemented in hardware.
- */
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
-/* DPR */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
-#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
-
-/* MR */
-#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
-#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
-#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
-
-/* CSR */
-#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
-#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
-#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
-#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
-#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-
-
-
-#endif
diff --git a/drivers/gpu/drm/via/via_dri1.c b/drivers/gpu/drm/via/via_dri1.c
new file mode 100644
index 000000000000..217d1e84b0ea
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dri1.c
@@ -0,0 +1,3630 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2002 Tungsten Graphics, Inc.
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. All Rights Reserved.
+ * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
+ * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A. All Rights Reserved.
+ * Copyright 2004 The Unichrome project. All Rights Reserved.
+ * Copyright 2004 BEAM Ltd.
+ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_legacy.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_pciids.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/via_drm.h>
+
+#include "via_3d_reg.h"
+
+#define DRIVER_AUTHOR "Various"
+
+#define DRIVER_NAME "via"
+#define DRIVER_DESC "VIA Unichrome / Pro"
+#define DRIVER_DATE "20070202"
+
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 11
+#define DRIVER_PATCHLEVEL 1
+
+typedef enum {
+ no_sequence = 0,
+ z_address,
+ dest_address,
+ tex_address
+} drm_via_sequence_t;
+
+typedef struct {
+ unsigned texture;
+ uint32_t z_addr;
+ uint32_t d_addr;
+ uint32_t t_addr[2][10];
+ uint32_t pitch[2][10];
+ uint32_t height[2][10];
+ uint32_t tex_level_lo[2];
+ uint32_t tex_level_hi[2];
+ uint32_t tex_palette_size[2];
+ uint32_t tex_npot[2];
+ drm_via_sequence_t unfinished;
+ int agp_texture;
+ int multitex;
+ struct drm_device *dev;
+ drm_local_map_t *map_cache;
+ uint32_t vertex_count;
+ int agp;
+ const uint32_t *buf_start;
+} drm_via_state_t;
+
+#define VIA_PCI_BUF_SIZE 60000
+#define VIA_FIRE_BUF_SIZE 1024
+#define VIA_NUM_IRQS 4
+
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+struct _drm_via_descriptor;
+
+typedef struct _drm_via_sg_info {
+ struct page **pages;
+ unsigned long num_pages;
+ struct _drm_via_descriptor **desc_pages;
+ int num_desc_pages;
+ int num_desc;
+ enum dma_data_direction direction;
+ unsigned char *bounce_buffer;
+ dma_addr_t chain_start;
+ uint32_t free_on_sequence;
+ unsigned int descriptors_per_page;
+ int aborted;
+ enum {
+ dr_via_device_mapped,
+ dr_via_desc_pages_alloc,
+ dr_via_pages_locked,
+ dr_via_pages_alloc,
+ dr_via_sg_init
+ } state;
+} drm_via_sg_info_t;
+
+typedef struct _drm_via_blitq {
+ struct drm_device *dev;
+ uint32_t cur_blit_handle;
+ uint32_t done_blit_handle;
+ unsigned serviced;
+ unsigned head;
+ unsigned cur;
+ unsigned num_free;
+ unsigned num_outstanding;
+ unsigned long end;
+ int aborting;
+ int is_active;
+ drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
+ spinlock_t blit_lock;
+ wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+ wait_queue_head_t busy_queue;
+ struct work_struct wq;
+ struct timer_list poll_timer;
+} drm_via_blitq_t;
+
+typedef struct drm_via_ring_buffer {
+ drm_local_map_t map;
+ char *virtual_start;
+} drm_via_ring_buffer_t;
+
+typedef uint32_t maskarray_t[5];
+
+typedef struct drm_via_irq {
+ atomic_t irq_received;
+ uint32_t pending_mask;
+ uint32_t enable_mask;
+ wait_queue_head_t irq_queue;
+} drm_via_irq_t;
+
+typedef struct drm_via_private {
+ drm_via_sarea_t *sarea_priv;
+ drm_local_map_t *sarea;
+ drm_local_map_t *fb;
+ drm_local_map_t *mmio;
+ unsigned long agpAddr;
+ wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
+ char *dma_ptr;
+ unsigned int dma_low;
+ unsigned int dma_high;
+ unsigned int dma_offset;
+ uint32_t dma_wrap;
+ volatile uint32_t *last_pause_ptr;
+ volatile uint32_t *hw_addr_ptr;
+ drm_via_ring_buffer_t ring;
+ ktime_t last_vblank;
+ int last_vblank_valid;
+ ktime_t nsec_per_vblank;
+ atomic_t vbl_received;
+ drm_via_state_t hc_state;
+ char pci_buf[VIA_PCI_BUF_SIZE];
+ const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+ uint32_t num_fire_offsets;
+ int chipset;
+ drm_via_irq_t via_irqs[VIA_NUM_IRQS];
+ unsigned num_irqs;
+ maskarray_t *irq_masks;
+ uint32_t irq_enable_mask;
+ uint32_t irq_pending_mask;
+ int *irq_map;
+ unsigned int idle_fault;
+ int vram_initialized;
+ struct drm_mm vram_mm;
+ int agp_initialized;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
+ unsigned long vram_offset;
+ unsigned long agp_offset;
+ drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
+ uint32_t dma_diff;
+} drm_via_private_t;
+
+struct via_file_private {
+ struct list_head obj_list;
+};
+
+enum via_family {
+ VIA_OTHER = 0, /* Baseline */
+ VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
+ VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
+};
+
+/* VIA MMIO register access */
+static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
+{
+ return readl((void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8_mask(struct drm_via_private *dev_priv,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
+ tmp = (tmp & ~mask) | (val & mask);
+ writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+/*
+ * Poll in a loop waiting for 'contidition' to be true.
+ * Note: A direct replacement with wait_event_interruptible_timeout()
+ * will not work unless driver is updated to emit wake_up()
+ * in relevant places that can impact the 'condition'
+ *
+ * Returns:
+ * ret keeps current value if 'condition' becomes true
+ * ret = -BUSY if timeout happens
+ * ret = -EINTR if a signal interrupted the waiting period
+ */
+#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
+do { \
+ DECLARE_WAITQUEUE(entry, current); \
+ unsigned long end = jiffies + (timeout); \
+ add_wait_queue(&(queue), &entry); \
+ \
+ for (;;) { \
+ __set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (time_after_eq(jiffies, end)) { \
+ ret = -EBUSY; \
+ break; \
+ } \
+ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
+ if (signal_pending(current)) { \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&(queue), &entry); \
+} while (0)
+
+int via_do_cleanup_map(struct drm_device *dev);
+
+int via_dma_cleanup(struct drm_device *dev);
+int via_driver_dma_quiescent(struct drm_device *dev);
+
+#define CMDBUF_ALIGNMENT_SIZE (0x100)
+#define CMDBUF_ALIGNMENT_MASK (0x0ff)
+
+/* defines for VIA 3D registers */
+#define VIA_REG_STATUS 0x400
+#define VIA_REG_TRANSET 0x43C
+#define VIA_REG_TRANSPACE 0x440
+
+/* VIA_REG_STATUS(0x400): Engine Status */
+#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
+
+#define SetReg2DAGP(nReg, nData) { \
+ *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
+ *((uint32_t *)(vb) + 1) = (nData); \
+ vb = ((uint32_t *)vb) + 2; \
+ dev_priv->dma_low += 8; \
+}
+
+#define via_flush_write_combine() mb()
+
+#define VIA_OUT_RING_QW(w1, w2) do { \
+ *vb++ = (w1); \
+ *vb++ = (w2); \
+ dev_priv->dma_low += 8; \
+} while (0)
+
+#define VIA_MM_ALIGN_SHIFT 4
+#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+
+struct via_memblock {
+ struct drm_mm_node mm_node;
+ struct list_head owner_list;
+};
+
+#define VIA_REG_INTERRUPT 0x200
+
+/* VIA_REG_INTERRUPT */
+#define VIA_IRQ_GLOBAL (1 << 31)
+#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
+#define VIA_IRQ_VBLANK_PENDING (1 << 3)
+#define VIA_IRQ_HQV0_ENABLE (1 << 11)
+#define VIA_IRQ_HQV1_ENABLE (1 << 25)
+#define VIA_IRQ_HQV0_PENDING (1 << 9)
+#define VIA_IRQ_HQV1_PENDING (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
+/*
+ * PCI DMA Registers
+ * Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
+/* DPR */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
+#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
+#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
+#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
+#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
+#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+
+/*
+ * Device-specific IRQs go here. This type might need to be extended with
+ * the register if there are multiple IRQ control registers.
+ * Currently we activate the HQV interrupts of Unichrome Pro group A.
+ */
+
+static maskarray_t via_pro_group_a_irqs[] = {
+ {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
+ 0x00000000 },
+ {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
+ 0x00000000 },
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+};
+static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
+
+static maskarray_t via_unichrome_irqs[] = {
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
+static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
+
+
+/*
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
+ */
+#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
+
+typedef struct _drm_via_descriptor {
+ uint32_t mem_addr;
+ uint32_t dev_addr;
+ uint32_t size;
+ uint32_t next;
+} drm_via_descriptor_t;
+
+typedef enum {
+ state_command,
+ state_header2,
+ state_header1,
+ state_vheader5,
+ state_vheader6,
+ state_error
+} verifier_state_t;
+
+typedef enum {
+ no_check = 0,
+ check_for_header2,
+ check_for_header1,
+ check_for_header2_err,
+ check_for_header1_err,
+ check_for_fire,
+ check_z_buffer_addr0,
+ check_z_buffer_addr1,
+ check_z_buffer_addr_mode,
+ check_destination_addr0,
+ check_destination_addr1,
+ check_destination_addr_mode,
+ check_for_dummy,
+ check_for_dd,
+ check_texture_addr0,
+ check_texture_addr1,
+ check_texture_addr2,
+ check_texture_addr3,
+ check_texture_addr4,
+ check_texture_addr5,
+ check_texture_addr6,
+ check_texture_addr7,
+ check_texture_addr8,
+ check_texture_addr_mode,
+ check_for_vertex_count,
+ check_number_texunits,
+ forbidden_command
+} hazard_t;
+
+/*
+ * Associates each hazard above with a possible multi-command
+ * sequence. For example an address that is split over multiple
+ * commands and that needs to be checked at the first command
+ * that does not include any part of the address.
+ */
+
+static drm_via_sequence_t seqs[] = {
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ z_address,
+ z_address,
+ z_address,
+ dest_address,
+ dest_address,
+ dest_address,
+ no_sequence,
+ no_sequence,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ no_sequence
+};
+
+typedef struct {
+ unsigned int code;
+ hazard_t hz;
+} hz_init_t;
+
+static hz_init_t init_table1[] = {
+ {0xf2, check_for_header2_err},
+ {0xf0, check_for_header1_err},
+ {0xee, check_for_fire},
+ {0xcc, check_for_dummy},
+ {0xdd, check_for_dd},
+ {0x00, no_check},
+ {0x10, check_z_buffer_addr0},
+ {0x11, check_z_buffer_addr1},
+ {0x12, check_z_buffer_addr_mode},
+ {0x13, no_check},
+ {0x14, no_check},
+ {0x15, no_check},
+ {0x23, no_check},
+ {0x24, no_check},
+ {0x33, no_check},
+ {0x34, no_check},
+ {0x35, no_check},
+ {0x36, no_check},
+ {0x37, no_check},
+ {0x38, no_check},
+ {0x39, no_check},
+ {0x3A, no_check},
+ {0x3B, no_check},
+ {0x3C, no_check},
+ {0x3D, no_check},
+ {0x3E, no_check},
+ {0x40, check_destination_addr0},
+ {0x41, check_destination_addr1},
+ {0x42, check_destination_addr_mode},
+ {0x43, no_check},
+ {0x44, no_check},
+ {0x50, no_check},
+ {0x51, no_check},
+ {0x52, no_check},
+ {0x53, no_check},
+ {0x54, no_check},
+ {0x55, no_check},
+ {0x56, no_check},
+ {0x57, no_check},
+ {0x58, no_check},
+ {0x70, no_check},
+ {0x71, no_check},
+ {0x78, no_check},
+ {0x79, no_check},
+ {0x7A, no_check},
+ {0x7B, no_check},
+ {0x7C, no_check},
+ {0x7D, check_for_vertex_count}
+};
+
+static hz_init_t init_table2[] = {
+ {0xf2, check_for_header2_err},
+ {0xf0, check_for_header1_err},
+ {0xee, check_for_fire},
+ {0xcc, check_for_dummy},
+ {0x00, check_texture_addr0},
+ {0x01, check_texture_addr0},
+ {0x02, check_texture_addr0},
+ {0x03, check_texture_addr0},
+ {0x04, check_texture_addr0},
+ {0x05, check_texture_addr0},
+ {0x06, check_texture_addr0},
+ {0x07, check_texture_addr0},
+ {0x08, check_texture_addr0},
+ {0x09, check_texture_addr0},
+ {0x20, check_texture_addr1},
+ {0x21, check_texture_addr1},
+ {0x22, check_texture_addr1},
+ {0x23, check_texture_addr4},
+ {0x2B, check_texture_addr3},
+ {0x2C, check_texture_addr3},
+ {0x2D, check_texture_addr3},
+ {0x2E, check_texture_addr3},
+ {0x2F, check_texture_addr3},
+ {0x30, check_texture_addr3},
+ {0x31, check_texture_addr3},
+ {0x32, check_texture_addr3},
+ {0x33, check_texture_addr3},
+ {0x34, check_texture_addr3},
+ {0x4B, check_texture_addr5},
+ {0x4C, check_texture_addr6},
+ {0x51, check_texture_addr7},
+ {0x52, check_texture_addr8},
+ {0x77, check_texture_addr2},
+ {0x78, no_check},
+ {0x79, no_check},
+ {0x7A, no_check},
+ {0x7B, check_texture_addr_mode},
+ {0x7C, no_check},
+ {0x7D, no_check},
+ {0x7E, no_check},
+ {0x7F, no_check},
+ {0x80, no_check},
+ {0x81, no_check},
+ {0x82, no_check},
+ {0x83, no_check},
+ {0x85, no_check},
+ {0x86, no_check},
+ {0x87, no_check},
+ {0x88, no_check},
+ {0x89, no_check},
+ {0x8A, no_check},
+ {0x90, no_check},
+ {0x91, no_check},
+ {0x92, no_check},
+ {0x93, no_check}
+};
+
+static hz_init_t init_table3[] = {
+ {0xf2, check_for_header2_err},
+ {0xf0, check_for_header1_err},
+ {0xcc, check_for_dummy},
+ {0x00, check_number_texunits}
+};
+
+static hazard_t table1[256];
+static hazard_t table2[256];
+static hazard_t table3[256];
+
+static __inline__ int
+eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
+{
+ if ((buf_end - *buf) >= num_words) {
+ *buf += num_words;
+ return 0;
+ }
+ DRM_ERROR("Illegal termination of DMA command buffer\n");
+ return 1;
+}
+
+/*
+ * Partially stolen from drm_memory.h
+ */
+
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
+ unsigned long offset,
+ unsigned long size,
+ struct drm_device *dev)
+{
+ struct drm_map_list *r_list;
+ drm_local_map_t *map = seq->map_cache;
+
+ if (map && map->offset <= offset
+ && (offset + size) <= (map->offset + map->size)) {
+ return map;
+ }
+
+ list_for_each_entry(r_list, &dev->maplist, head) {
+ map = r_list->map;
+ if (!map)
+ continue;
+ if (map->offset <= offset
+ && (offset + size) <= (map->offset + map->size)
+ && !(map->flags & _DRM_RESTRICTED)
+ && (map->type == _DRM_AGP)) {
+ seq->map_cache = map;
+ return map;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Require that all AGP texture levels reside in the same AGP map which should
+ * be mappable by the client. This is not a big restriction.
+ * FIXME: To actually enforce this security policy strictly, drm_rmmap
+ * would have to wait for dma quiescent before removing an AGP map.
+ * The via_drm_lookup_agp_map call in reality seems to take
+ * very little CPU time.
+ */
+
+static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
+{
+ switch (cur_seq->unfinished) {
+ case z_address:
+ DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
+ break;
+ case dest_address:
+ DRM_DEBUG("Destination start address is 0x%x\n",
+ cur_seq->d_addr);
+ break;
+ case tex_address:
+ if (cur_seq->agp_texture) {
+ unsigned start =
+ cur_seq->tex_level_lo[cur_seq->texture];
+ unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
+ unsigned long lo = ~0, hi = 0, tmp;
+ uint32_t *addr, *pitch, *height, tex;
+ unsigned i;
+ int npot;
+
+ if (end > 9)
+ end = 9;
+ if (start > 9)
+ start = 9;
+
+ addr =
+ &(cur_seq->t_addr[tex = cur_seq->texture][start]);
+ pitch = &(cur_seq->pitch[tex][start]);
+ height = &(cur_seq->height[tex][start]);
+ npot = cur_seq->tex_npot[tex];
+ for (i = start; i <= end; ++i) {
+ tmp = *addr++;
+ if (tmp < lo)
+ lo = tmp;
+ if (i == 0 && npot)
+ tmp += (*height++ * *pitch++);
+ else
+ tmp += (*height++ << *pitch++);
+ if (tmp > hi)
+ hi = tmp;
+ }
+
+ if (!via_drm_lookup_agp_map
+ (cur_seq, lo, hi - lo, cur_seq->dev)) {
+ DRM_ERROR
+ ("AGP texture is not in allowed map\n");
+ return 2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ cur_seq->unfinished = no_sequence;
+ return 0;
+}
+
+static __inline__ int
+investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
+{
+ register uint32_t tmp, *tmp_addr;
+
+ if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
+ int ret;
+ if ((ret = finish_current_sequence(cur_seq)))
+ return ret;
+ }
+
+ switch (hz) {
+ case check_for_header2:
+ if (cmd == HALCYON_HEADER2)
+ return 1;
+ return 0;
+ case check_for_header1:
+ if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ return 1;
+ return 0;
+ case check_for_header2_err:
+ if (cmd == HALCYON_HEADER2)
+ return 1;
+ DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
+ break;
+ case check_for_header1_err:
+ if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ return 1;
+ DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
+ break;
+ case check_for_fire:
+ if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
+ return 1;
+ DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
+ break;
+ case check_for_dummy:
+ if (HC_DUMMY == cmd)
+ return 0;
+ DRM_ERROR("Illegal DMA HC_DUMMY command\n");
+ break;
+ case check_for_dd:
+ if (0xdddddddd == cmd)
+ return 0;
+ DRM_ERROR("Illegal DMA 0xdddddddd command\n");
+ break;
+ case check_z_buffer_addr0:
+ cur_seq->unfinished = z_address;
+ cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
+ (cmd & 0x00FFFFFF);
+ return 0;
+ case check_z_buffer_addr1:
+ cur_seq->unfinished = z_address;
+ cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
+ ((cmd & 0xFF) << 24);
+ return 0;
+ case check_z_buffer_addr_mode:
+ cur_seq->unfinished = z_address;
+ if ((cmd & 0x0000C000) == 0)
+ return 0;
+ DRM_ERROR("Attempt to place Z buffer in system memory\n");
+ return 2;
+ case check_destination_addr0:
+ cur_seq->unfinished = dest_address;
+ cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
+ (cmd & 0x00FFFFFF);
+ return 0;
+ case check_destination_addr1:
+ cur_seq->unfinished = dest_address;
+ cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
+ ((cmd & 0xFF) << 24);
+ return 0;
+ case check_destination_addr_mode:
+ cur_seq->unfinished = dest_address;
+ if ((cmd & 0x0000C000) == 0)
+ return 0;
+ DRM_ERROR
+ ("Attempt to place 3D drawing buffer in system memory\n");
+ return 2;
+ case check_texture_addr0:
+ cur_seq->unfinished = tex_address;
+ tmp = (cmd >> 24);
+ tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+ *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
+ return 0;
+ case check_texture_addr1:
+ cur_seq->unfinished = tex_address;
+ tmp = ((cmd >> 24) - 0x20);
+ tmp += tmp << 1;
+ tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+ tmp_addr++;
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
+ tmp_addr++;
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
+ return 0;
+ case check_texture_addr2:
+ cur_seq->unfinished = tex_address;
+ cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
+ cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
+ return 0;
+ case check_texture_addr3:
+ cur_seq->unfinished = tex_address;
+ tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
+ if (tmp == 0 &&
+ (cmd & HC_HTXnEnPit_MASK)) {
+ cur_seq->pitch[cur_seq->texture][tmp] =
+ (cmd & HC_HTXnLnPit_MASK);
+ cur_seq->tex_npot[cur_seq->texture] = 1;
+ } else {
+ cur_seq->pitch[cur_seq->texture][tmp] =
+ (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
+ cur_seq->tex_npot[cur_seq->texture] = 0;
+ if (cmd & 0x000FFFFF) {
+ DRM_ERROR
+ ("Unimplemented texture level 0 pitch mode.\n");
+ return 2;
+ }
+ }
+ return 0;
+ case check_texture_addr4:
+ cur_seq->unfinished = tex_address;
+ tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+ return 0;
+ case check_texture_addr5:
+ case check_texture_addr6:
+ cur_seq->unfinished = tex_address;
+ /*
+ * Texture width. We don't care since we have the pitch.
+ */
+ return 0;
+ case check_texture_addr7:
+ cur_seq->unfinished = tex_address;
+ tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+ tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
+ tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
+ tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
+ tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
+ tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
+ tmp_addr[0] = 1 << (cmd & 0x0000000F);
+ return 0;
+ case check_texture_addr8:
+ cur_seq->unfinished = tex_address;
+ tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+ tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
+ tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
+ tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
+ tmp_addr[6] = 1 << (cmd & 0x0000000F);
+ return 0;
+ case check_texture_addr_mode:
+ cur_seq->unfinished = tex_address;
+ if (2 == (tmp = cmd & 0x00000003)) {
+ DRM_ERROR
+ ("Attempt to fetch texture from system memory.\n");
+ return 2;
+ }
+ cur_seq->agp_texture = (tmp == 3);
+ cur_seq->tex_palette_size[cur_seq->texture] =
+ (cmd >> 16) & 0x000000007;
+ return 0;
+ case check_for_vertex_count:
+ cur_seq->vertex_count = cmd & 0x0000FFFF;
+ return 0;
+ case check_number_texunits:
+ cur_seq->multitex = (cmd >> 3) & 1;
+ return 0;
+ default:
+ DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
+ return 2;
+ }
+ return 2;
+}
+
+static __inline__ int
+via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
+ drm_via_state_t *cur_seq)
+{
+ drm_via_private_t *dev_priv =
+ (drm_via_private_t *) cur_seq->dev->dev_private;
+ uint32_t a_fire, bcmd, dw_count;
+ int ret = 0;
+ int have_fire;
+ const uint32_t *buf = *buffer;
+
+ while (buf < buf_end) {
+ have_fire = 0;
+ if ((buf_end - buf) < 2) {
+ DRM_ERROR
+ ("Unexpected termination of primitive list.\n");
+ ret = 1;
+ break;
+ }
+ if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
+ break;
+ bcmd = *buf++;
+ if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
+ DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
+ *buf);
+ ret = 1;
+ break;
+ }
+ a_fire =
+ *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
+ HC_HE3Fire_MASK;
+
+ /*
+ * How many dwords per vertex ?
+ */
+
+ if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
+ DRM_ERROR("Illegal B command vertex data for AGP.\n");
+ ret = 1;
+ break;
+ }
+
+ dw_count = 0;
+ if (bcmd & (1 << 7))
+ dw_count += (cur_seq->multitex) ? 2 : 1;
+ if (bcmd & (1 << 8))
+ dw_count += (cur_seq->multitex) ? 2 : 1;
+ if (bcmd & (1 << 9))
+ dw_count++;
+ if (bcmd & (1 << 10))
+ dw_count++;
+ if (bcmd & (1 << 11))
+ dw_count++;
+ if (bcmd & (1 << 12))
+ dw_count++;
+ if (bcmd & (1 << 13))
+ dw_count++;
+ if (bcmd & (1 << 14))
+ dw_count++;
+
+ while (buf < buf_end) {
+ if (*buf == a_fire) {
+ if (dev_priv->num_fire_offsets >=
+ VIA_FIRE_BUF_SIZE) {
+ DRM_ERROR("Fire offset buffer full.\n");
+ ret = 1;
+ break;
+ }
+ dev_priv->fire_offsets[dev_priv->
+ num_fire_offsets++] =
+ buf;
+ have_fire = 1;
+ buf++;
+ if (buf < buf_end && *buf == a_fire)
+ buf++;
+ break;
+ }
+ if ((*buf == HALCYON_HEADER2) ||
+ ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
+ DRM_ERROR("Missing Vertex Fire command, "
+ "Stray Vertex Fire command or verifier "
+ "lost sync.\n");
+ ret = 1;
+ break;
+ }
+ if ((ret = eat_words(&buf, buf_end, dw_count)))
+ break;
+ }
+ if (buf >= buf_end && !have_fire) {
+ DRM_ERROR("Missing Vertex Fire command or verifier "
+ "lost sync.\n");
+ ret = 1;
+ break;
+ }
+ if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
+ DRM_ERROR("AGP Primitive list end misaligned.\n");
+ ret = 1;
+ break;
+ }
+ }
+ *buffer = buf;
+ return ret;
+}
+
+static __inline__ verifier_state_t
+via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
+ drm_via_state_t *hc_state)
+{
+ uint32_t cmd;
+ int hz_mode;
+ hazard_t hz;
+ const uint32_t *buf = *buffer;
+ const hazard_t *hz_table;
+
+ if ((buf_end - buf) < 2) {
+ DRM_ERROR
+ ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
+ return state_error;
+ }
+ buf++;
+ cmd = (*buf++ & 0xFFFF0000) >> 16;
+
+ switch (cmd) {
+ case HC_ParaType_CmdVdata:
+ if (via_check_prim_list(&buf, buf_end, hc_state))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+ case HC_ParaType_NotTex:
+ hz_table = table1;
+ break;
+ case HC_ParaType_Tex:
+ hc_state->texture = 0;
+ hz_table = table2;
+ break;
+ case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
+ hc_state->texture = 1;
+ hz_table = table2;
+ break;
+ case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
+ hz_table = table3;
+ break;
+ case HC_ParaType_Auto:
+ if (eat_words(&buf, buf_end, 2))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+ case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
+ if (eat_words(&buf, buf_end, 32))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+ case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
+ case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
+ DRM_ERROR("Texture palettes are rejected because of "
+ "lack of info how to determine their size.\n");
+ return state_error;
+ case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
+ DRM_ERROR("Fog factor palettes are rejected because of "
+ "lack of info how to determine their size.\n");
+ return state_error;
+ default:
+
+ /*
+ * There are some unimplemented HC_ParaTypes here, that
+ * need to be implemented if the Mesa driver is extended.
+ */
+
+ DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
+ "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
+ cmd, *(buf - 2));
+ *buffer = buf;
+ return state_error;
+ }
+
+ while (buf < buf_end) {
+ cmd = *buf++;
+ if ((hz = hz_table[cmd >> 24])) {
+ if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
+ if (hz_mode == 1) {
+ buf--;
+ break;
+ }
+ return state_error;
+ }
+ } else if (hc_state->unfinished &&
+ finish_current_sequence(hc_state)) {
+ return state_error;
+ }
+ }
+ if (hc_state->unfinished && finish_current_sequence(hc_state))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end, int *fire_count)
+{
+ uint32_t cmd;
+ const uint32_t *buf = *buffer;
+ const uint32_t *next_fire;
+ int burst = 0;
+
+ next_fire = dev_priv->fire_offsets[*fire_count];
+ buf++;
+ cmd = (*buf & 0xFFFF0000) >> 16;
+ via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
+ switch (cmd) {
+ case HC_ParaType_CmdVdata:
+ while ((buf < buf_end) &&
+ (*fire_count < dev_priv->num_fire_offsets) &&
+ (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
+ while (buf <= next_fire) {
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
+ (burst & 63), *buf++);
+ burst += 4;
+ }
+ if ((buf < buf_end)
+ && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
+ buf++;
+
+ if (++(*fire_count) < dev_priv->num_fire_offsets)
+ next_fire = dev_priv->fire_offsets[*fire_count];
+ }
+ break;
+ default:
+ while (buf < buf_end) {
+
+ if (*buf == HC_HEADER2 ||
+ (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
+ (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
+ (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+ break;
+
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
+ (burst & 63), *buf++);
+ burst += 4;
+ }
+ }
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ int verify_mmio_address(uint32_t address)
+{
+ if ((address > 0x3FF) && (address < 0xC00)) {
+ DRM_ERROR("Invalid VIDEO DMA command. "
+ "Attempt to access 3D- or command burst area.\n");
+ return 1;
+ } else if ((address > 0xCFF) && (address < 0x1300)) {
+ DRM_ERROR("Invalid VIDEO DMA command. "
+ "Attempt to access PCI DMA area.\n");
+ return 1;
+ } else if (address > 0x13FF) {
+ DRM_ERROR("Invalid VIDEO DMA command. "
+ "Attempt to access VGA registers.\n");
+ return 1;
+ }
+ return 0;
+}
+
+static __inline__ int
+verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
+ uint32_t dwords)
+{
+ const uint32_t *buf = *buffer;
+
+ if (buf_end - buf < dwords) {
+ DRM_ERROR("Illegal termination of video command.\n");
+ return 1;
+ }
+ while (dwords--) {
+ if (*buf++) {
+ DRM_ERROR("Illegal video command tail.\n");
+ return 1;
+ }
+ }
+ *buffer = buf;
+ return 0;
+}
+
+static __inline__ verifier_state_t
+via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
+{
+ uint32_t cmd;
+ const uint32_t *buf = *buffer;
+ verifier_state_t ret = state_command;
+
+ while (buf < buf_end) {
+ cmd = *buf;
+ if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
+ (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
+ if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+ break;
+ DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+ "Attempt to access 3D- or command burst area.\n");
+ ret = state_error;
+ break;
+ } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
+ if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+ break;
+ DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+ "Attempt to access VGA registers.\n");
+ ret = state_error;
+ break;
+ } else {
+ buf += 2;
+ }
+ }
+ *buffer = buf;
+ return ret;
+}
+
+static __inline__ verifier_state_t
+via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
+{
+ register uint32_t cmd;
+ const uint32_t *buf = *buffer;
+
+ while (buf < buf_end) {
+ cmd = *buf;
+ if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+ break;
+ via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
+ buf++;
+ }
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
+{
+ uint32_t data;
+ const uint32_t *buf = *buffer;
+
+ if (buf_end - buf < 4) {
+ DRM_ERROR("Illegal termination of video header5 command\n");
+ return state_error;
+ }
+
+ data = *buf++ & ~VIA_VIDEOMASK;
+ if (verify_mmio_address(data))
+ return state_error;
+
+ data = *buf++;
+ if (*buf++ != 0x00F50000) {
+ DRM_ERROR("Illegal header5 header data\n");
+ return state_error;
+ }
+ if (*buf++ != 0x00000000) {
+ DRM_ERROR("Illegal header5 header data\n");
+ return state_error;
+ }
+ if (eat_words(&buf, buf_end, data))
+ return state_error;
+ if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
+{
+ uint32_t addr, count, i;
+ const uint32_t *buf = *buffer;
+
+ addr = *buf++ & ~VIA_VIDEOMASK;
+ i = count = *buf;
+ buf += 3;
+ while (i--)
+ via_write(dev_priv, addr, *buf++);
+ if (count & 3)
+ buf += 4 - (count & 3);
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
+{
+ uint32_t data;
+ const uint32_t *buf = *buffer;
+ uint32_t i;
+
+ if (buf_end - buf < 4) {
+ DRM_ERROR("Illegal termination of video header6 command\n");
+ return state_error;
+ }
+ buf++;
+ data = *buf++;
+ if (*buf++ != 0x00F60000) {
+ DRM_ERROR("Illegal header6 header data\n");
+ return state_error;
+ }
+ if (*buf++ != 0x00000000) {
+ DRM_ERROR("Illegal header6 header data\n");
+ return state_error;
+ }
+ if ((buf_end - buf) < (data << 1)) {
+ DRM_ERROR("Illegal termination of video header6 command\n");
+ return state_error;
+ }
+ for (i = 0; i < data; ++i) {
+ if (verify_mmio_address(*buf++))
+ return state_error;
+ buf++;
+ }
+ data <<= 1;
+ if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
+{
+
+ uint32_t addr, count, i;
+ const uint32_t *buf = *buffer;
+
+ i = count = *++buf;
+ buf += 3;
+ while (i--) {
+ addr = *buf++;
+ via_write(dev_priv, addr, *buf++);
+ }
+ count <<= 1;
+ if (count & 3)
+ buf += 4 - (count & 3);
+ *buffer = buf;
+ return state_command;
+}
+
+static int
+via_verify_command_stream(const uint32_t * buf, unsigned int size,
+ struct drm_device * dev, int agp)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_state_t *hc_state = &dev_priv->hc_state;
+ drm_via_state_t saved_state = *hc_state;
+ uint32_t cmd;
+ const uint32_t *buf_end = buf + (size >> 2);
+ verifier_state_t state = state_command;
+ int cme_video;
+ int supported_3d;
+
+ cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
+ dev_priv->chipset == VIA_DX9_0);
+
+ supported_3d = dev_priv->chipset != VIA_DX9_0;
+
+ hc_state->dev = dev;
+ hc_state->unfinished = no_sequence;
+ hc_state->map_cache = NULL;
+ hc_state->agp = agp;
+ hc_state->buf_start = buf;
+ dev_priv->num_fire_offsets = 0;
+
+ while (buf < buf_end) {
+
+ switch (state) {
+ case state_header2:
+ state = via_check_header2(&buf, buf_end, hc_state);
+ break;
+ case state_header1:
+ state = via_check_header1(&buf, buf_end);
+ break;
+ case state_vheader5:
+ state = via_check_vheader5(&buf, buf_end);
+ break;
+ case state_vheader6:
+ state = via_check_vheader6(&buf, buf_end);
+ break;
+ case state_command:
+ cmd = *buf;
+ if ((cmd == HALCYON_HEADER2) && supported_3d)
+ state = state_header2;
+ else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ state = state_header1;
+ else if (cme_video
+ && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+ state = state_vheader5;
+ else if (cme_video
+ && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+ state = state_vheader6;
+ else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
+ DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
+ state = state_error;
+ } else {
+ DRM_ERROR
+ ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+ cmd);
+ state = state_error;
+ }
+ break;
+ case state_error:
+ default:
+ *hc_state = saved_state;
+ return -EINVAL;
+ }
+ }
+ if (state == state_error) {
+ *hc_state = saved_state;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
+ unsigned int size)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ uint32_t cmd;
+ const uint32_t *buf_end = buf + (size >> 2);
+ verifier_state_t state = state_command;
+ int fire_count = 0;
+
+ while (buf < buf_end) {
+
+ switch (state) {
+ case state_header2:
+ state =
+ via_parse_header2(dev_priv, &buf, buf_end,
+ &fire_count);
+ break;
+ case state_header1:
+ state = via_parse_header1(dev_priv, &buf, buf_end);
+ break;
+ case state_vheader5:
+ state = via_parse_vheader5(dev_priv, &buf, buf_end);
+ break;
+ case state_vheader6:
+ state = via_parse_vheader6(dev_priv, &buf, buf_end);
+ break;
+ case state_command:
+ cmd = *buf;
+ if (cmd == HALCYON_HEADER2)
+ state = state_header2;
+ else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ state = state_header1;
+ else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+ state = state_vheader5;
+ else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+ state = state_vheader6;
+ else {
+ DRM_ERROR
+ ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+ cmd);
+ state = state_error;
+ }
+ break;
+ case state_error:
+ default:
+ return -EINVAL;
+ }
+ }
+ if (state == state_error)
+ return -EINVAL;
+ return 0;
+}
+
+static void
+setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
+{
+ int i;
+
+ for (i = 0; i < 256; ++i)
+ table[i] = forbidden_command;
+
+ for (i = 0; i < size; ++i)
+ table[init_table[i].code] = init_table[i].hz;
+}
+
+static void via_init_command_verifier(void)
+{
+ setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
+ setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
+ setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
+}
+/*
+ * Unmap a DMA mapping.
+ */
+static void
+via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+ int num_desc = vsg->num_desc;
+ unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
+ unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ descriptor_this_page;
+ dma_addr_t next = vsg->chain_start;
+
+ while (num_desc--) {
+ if (descriptor_this_page-- == 0) {
+ cur_descriptor_page--;
+ descriptor_this_page = vsg->descriptors_per_page - 1;
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ descriptor_this_page;
+ }
+ dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
+ dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
+ next = (dma_addr_t) desc_ptr->next;
+ desc_ptr--;
+ }
+}
+
+/*
+ * If mode = 0, count how many descriptors are needed.
+ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
+ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
+ * 'next' field without syncing calls when the descriptor is already mapped.
+ */
+static void
+via_map_blit_for_device(struct pci_dev *pdev,
+ const drm_via_dmablit_t *xfer,
+ drm_via_sg_info_t *vsg,
+ int mode)
+{
+ unsigned cur_descriptor_page = 0;
+ unsigned num_descriptors_this_page = 0;
+ unsigned char *mem_addr = xfer->mem_addr;
+ unsigned char *cur_mem;
+ unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
+ uint32_t fb_addr = xfer->fb_addr;
+ uint32_t cur_fb;
+ unsigned long line_len;
+ unsigned remaining_len;
+ int num_desc = 0;
+ int cur_line;
+ dma_addr_t next = 0 | VIA_DMA_DPR_EC;
+ drm_via_descriptor_t *desc_ptr = NULL;
+
+ if (mode == 1)
+ desc_ptr = vsg->desc_pages[cur_descriptor_page];
+
+ for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
+
+ line_len = xfer->line_length;
+ cur_fb = fb_addr;
+ cur_mem = mem_addr;
+
+ while (line_len > 0) {
+
+ remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+ line_len -= remaining_len;
+
+ if (mode == 1) {
+ desc_ptr->mem_addr =
+ dma_map_page(&pdev->dev,
+ vsg->pages[VIA_PFN(cur_mem) -
+ VIA_PFN(first_addr)],
+ VIA_PGOFF(cur_mem), remaining_len,
+ vsg->direction);
+ desc_ptr->dev_addr = cur_fb;
+
+ desc_ptr->size = remaining_len;
+ desc_ptr->next = (uint32_t) next;
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ DMA_TO_DEVICE);
+ desc_ptr++;
+ if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
+ num_descriptors_this_page = 0;
+ desc_ptr = vsg->desc_pages[++cur_descriptor_page];
+ }
+ }
+
+ num_desc++;
+ cur_mem += remaining_len;
+ cur_fb += remaining_len;
+ }
+
+ mem_addr += xfer->mem_stride;
+ fb_addr += xfer->fb_stride;
+ }
+
+ if (mode == 1) {
+ vsg->chain_start = next;
+ vsg->state = dr_via_device_mapped;
+ }
+ vsg->num_desc = num_desc;
+}
+
+/*
+ * Function that frees up all resources for a blit. It is usable even if the
+ * blit info has only been partially built as long as the status enum is consistent
+ * with the actual status of the used resources.
+ */
+static void
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+ int i;
+
+ switch (vsg->state) {
+ case dr_via_device_mapped:
+ via_unmap_blit_from_device(pdev, vsg);
+ fallthrough;
+ case dr_via_desc_pages_alloc:
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
+ if (vsg->desc_pages[i] != NULL)
+ free_page((unsigned long)vsg->desc_pages[i]);
+ }
+ kfree(vsg->desc_pages);
+ fallthrough;
+ case dr_via_pages_locked:
+ unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
+ fallthrough;
+ case dr_via_pages_alloc:
+ vfree(vsg->pages);
+ fallthrough;
+ default:
+ vsg->state = dr_via_sg_init;
+ }
+ vfree(vsg->bounce_buffer);
+ vsg->bounce_buffer = NULL;
+ vsg->free_on_sequence = 0;
+}
+
+/*
+ * Fire a blit engine.
+ */
+static void
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_DMA_CSR_DE);
+ via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+ via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+ wmb();
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+ via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
+}
+
+/*
+ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
+ * occur here if the calling user does not have access to the submitted address.
+ */
+static int
+via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+ int ret;
+ unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
+ first_pfn + 1;
+
+ vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
+ if (NULL == vsg->pages)
+ return -ENOMEM;
+ ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
+ vsg->num_pages,
+ vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
+ vsg->pages);
+ if (ret != vsg->num_pages) {
+ if (ret < 0)
+ return ret;
+ vsg->state = dr_via_pages_locked;
+ return -EINVAL;
+ }
+ vsg->state = dr_via_pages_locked;
+ DRM_DEBUG("DMA pages locked\n");
+ return 0;
+}
+
+/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contiguous.
+ */
+static int
+via_alloc_desc_pages(drm_via_sg_info_t *vsg)
+{
+ int i;
+
+ vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->descriptors_per_page;
+
+ if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
+ return -ENOMEM;
+
+ vsg->state = dr_via_desc_pages_alloc;
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
+ if (NULL == (vsg->desc_pages[i] =
+ (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ }
+ DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+ vsg->num_desc);
+ return 0;
+}
+
+static void
+via_abort_dmablit(struct drm_device *dev, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+}
+
+static void
+via_dmablit_engine_off(struct drm_device *dev, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+}
+
+/*
+ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
+ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
+ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
+ * the workqueue task takes care of processing associated with the old blit.
+ */
+static void
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+ int cur;
+ int done_transfer;
+ unsigned long irqsave = 0;
+ uint32_t status = 0;
+
+ DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
+ engine, from_irq, (unsigned long) blitq);
+
+ if (from_irq)
+ spin_lock(&blitq->blit_lock);
+ else
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ done_transfer = blitq->is_active &&
+ ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
+
+ cur = blitq->cur;
+ if (done_transfer) {
+
+ blitq->blits[cur]->aborted = blitq->aborting;
+ blitq->done_blit_handle++;
+ wake_up(blitq->blit_queue + cur);
+
+ cur++;
+ if (cur >= VIA_NUM_BLIT_SLOTS)
+ cur = 0;
+ blitq->cur = cur;
+
+ /*
+ * Clear transfer done flag.
+ */
+
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
+
+ blitq->is_active = 0;
+ blitq->aborting = 0;
+ schedule_work(&blitq->wq);
+
+ } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
+
+ /*
+ * Abort transfer after one second.
+ */
+
+ via_abort_dmablit(dev, engine);
+ blitq->aborting = 1;
+ blitq->end = jiffies + HZ;
+ }
+
+ if (!blitq->is_active) {
+ if (blitq->num_outstanding) {
+ via_fire_dmablit(dev, blitq->blits[cur], engine);
+ blitq->is_active = 1;
+ blitq->cur = cur;
+ blitq->num_outstanding--;
+ blitq->end = jiffies + HZ;
+ if (!timer_pending(&blitq->poll_timer))
+ mod_timer(&blitq->poll_timer, jiffies + 1);
+ } else {
+ if (timer_pending(&blitq->poll_timer))
+ del_timer(&blitq->poll_timer);
+ via_dmablit_engine_off(dev, engine);
+ }
+ }
+
+ if (from_irq)
+ spin_unlock(&blitq->blit_lock);
+ else
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+/*
+ * Check whether this blit is still active, performing necessary locking.
+ */
+static int
+via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
+{
+ unsigned long irqsave;
+ uint32_t slot;
+ int active;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ /*
+ * Allow for handle wraparounds.
+ */
+
+ active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
+ ((blitq->cur_blit_handle - handle) <= (1 << 23));
+
+ if (queue && active) {
+ slot = handle - blitq->done_blit_handle + blitq->cur - 1;
+ if (slot >= VIA_NUM_BLIT_SLOTS)
+ slot -= VIA_NUM_BLIT_SLOTS;
+ *queue = blitq->blit_queue + slot;
+ }
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ return active;
+}
+
+/*
+ * Sync. Wait for at least three seconds for the blit to be performed.
+ */
+static int
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+ wait_queue_head_t *queue;
+ int ret = 0;
+
+ if (via_dmablit_active(blitq, engine, handle, &queue)) {
+ VIA_WAIT_ON(ret, *queue, 3 * HZ,
+ !via_dmablit_active(blitq, engine, handle, NULL));
+ }
+ DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
+ handle, engine, ret);
+
+ return ret;
+}
+
+/*
+ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
+ * a) Broken hardware (typically those that don't have any video capture facility).
+ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
+ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
+ * irqs, it will shorten the latency somewhat.
+ */
+static void
+via_dmablit_timer(struct timer_list *t)
+{
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
+ struct drm_device *dev = blitq->dev;
+ int engine = (int)
+ (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+ (unsigned long) jiffies);
+
+ via_dmablit_handler(dev, engine, 0);
+
+ if (!timer_pending(&blitq->poll_timer)) {
+ mod_timer(&blitq->poll_timer, jiffies + 1);
+
+ /*
+ * Rerun handler to delete timer if engines are off, and
+ * to shorten abort latency. This is a little nasty.
+ */
+
+ via_dmablit_handler(dev, engine, 0);
+
+ }
+}
+
+/*
+ * Workqueue task that frees data and mappings associated with a blit.
+ * Also wakes up waiting processes. Each of these tasks handles one
+ * blit engine only and may not be called on each interrupt.
+ */
+static void
+via_dmablit_workqueue(struct work_struct *work)
+{
+ drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
+ struct drm_device *dev = blitq->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned long irqsave;
+ drm_via_sg_info_t *cur_sg;
+ int cur_released;
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
+ (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ while (blitq->serviced != blitq->cur) {
+
+ cur_released = blitq->serviced++;
+
+ DRM_DEBUG("Releasing blit slot %d\n", cur_released);
+
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ blitq->serviced = 0;
+
+ cur_sg = blitq->blits[cur_released];
+ blitq->num_free++;
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ wake_up(&blitq->busy_queue);
+
+ via_free_sg_info(pdev, cur_sg);
+ kfree(cur_sg);
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+/*
+ * Init all blit engines. Currently we use two, but some hardware have 4.
+ */
+static void
+via_init_dmablit(struct drm_device *dev)
+{
+ int i, j;
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ drm_via_blitq_t *blitq;
+
+ pci_set_master(pdev);
+
+ for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
+ blitq = dev_priv->blit_queues + i;
+ blitq->dev = dev;
+ blitq->cur_blit_handle = 0;
+ blitq->done_blit_handle = 0;
+ blitq->head = 0;
+ blitq->cur = 0;
+ blitq->serviced = 0;
+ blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
+ blitq->num_outstanding = 0;
+ blitq->is_active = 0;
+ blitq->aborting = 0;
+ spin_lock_init(&blitq->blit_lock);
+ for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
+ init_waitqueue_head(blitq->blit_queue + j);
+ init_waitqueue_head(&blitq->busy_queue);
+ INIT_WORK(&blitq->wq, via_dmablit_workqueue);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
+ }
+}
+
+/*
+ * Build all info and do all mappings required for a blit.
+ */
+static int
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int draw = xfer->to_fb;
+ int ret = 0;
+
+ vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ vsg->bounce_buffer = NULL;
+
+ vsg->state = dr_via_sg_init;
+
+ if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
+ DRM_ERROR("Zero size bitblt.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Below check is a driver limitation, not a hardware one. We
+ * don't want to lock unused pages, and don't want to incoporate the
+ * extra logic of avoiding them. Make sure there are no.
+ * (Not a big limitation anyway.)
+ */
+
+ if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
+ DRM_ERROR("Too large system memory stride. Stride: %d, "
+ "Length: %d\n", xfer->mem_stride, xfer->line_length);
+ return -EINVAL;
+ }
+
+ if ((xfer->mem_stride == xfer->line_length) &&
+ (xfer->fb_stride == xfer->line_length)) {
+ xfer->mem_stride *= xfer->num_lines;
+ xfer->line_length = xfer->mem_stride;
+ xfer->fb_stride = xfer->mem_stride;
+ xfer->num_lines = 1;
+ }
+
+ /*
+ * Don't lock an arbitrary large number of pages, since that causes a
+ * DOS security hole.
+ */
+
+ if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
+ DRM_ERROR("Too large PCI DMA bitblt.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * we allow a negative fb stride to allow flipping of images in
+ * transfer.
+ */
+
+ if (xfer->mem_stride < xfer->line_length ||
+ abs(xfer->fb_stride) < xfer->line_length) {
+ DRM_ERROR("Invalid frame-buffer / memory stride.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * A hardware bug seems to be worked around if system memory addresses start on
+ * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
+ * about this. Meanwhile, impose the following restrictions:
+ */
+
+#ifdef VIA_BUGFREE
+ if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
+ ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
+ DRM_ERROR("Invalid DRM bitblt alignment.\n");
+ return -EINVAL;
+ }
+#else
+ if ((((unsigned long)xfer->mem_addr & 15) ||
+ ((unsigned long)xfer->fb_addr & 3)) ||
+ ((xfer->num_lines > 1) &&
+ ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
+ DRM_ERROR("Invalid DRM bitblt alignment.\n");
+ return -EINVAL;
+ }
+#endif
+
+ if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
+ DRM_ERROR("Could not lock DMA pages.\n");
+ via_free_sg_info(pdev, vsg);
+ return ret;
+ }
+
+ via_map_blit_for_device(pdev, xfer, vsg, 0);
+ if (0 != (ret = via_alloc_desc_pages(vsg))) {
+ DRM_ERROR("Could not allocate DMA descriptor pages.\n");
+ via_free_sg_info(pdev, vsg);
+ return ret;
+ }
+ via_map_blit_for_device(pdev, xfer, vsg, 1);
+
+ return 0;
+}
+
+/*
+ * Reserve one free slot in the blit queue. Will wait for one second for one
+ * to become available. Otherwise -EBUSY is returned.
+ */
+static int
+via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
+{
+ int ret = 0;
+ unsigned long irqsave;
+
+ DRM_DEBUG("Num free is %d\n", blitq->num_free);
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ while (blitq->num_free == 0) {
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
+ if (ret)
+ return (-EINTR == ret) ? -EAGAIN : ret;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ blitq->num_free--;
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ return 0;
+}
+
+/*
+ * Hand back a free slot if we changed our mind.
+ */
+static void
+via_dmablit_release_slot(drm_via_blitq_t *blitq)
+{
+ unsigned long irqsave;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ blitq->num_free++;
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ wake_up(&blitq->busy_queue);
+}
+
+/*
+ * Grab a free slot. Build blit info and queue a blit.
+ */
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_sg_info_t *vsg;
+ drm_via_blitq_t *blitq;
+ int ret;
+ int engine;
+ unsigned long irqsave;
+
+ if (dev_priv == NULL) {
+ DRM_ERROR("Called without initialization.\n");
+ return -EINVAL;
+ }
+
+ engine = (xfer->to_fb) ? 0 : 1;
+ blitq = dev_priv->blit_queues + engine;
+ if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
+ return ret;
+ if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
+ via_dmablit_release_slot(blitq);
+ return -ENOMEM;
+ }
+ if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
+ via_dmablit_release_slot(blitq);
+ kfree(vsg);
+ return ret;
+ }
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ blitq->blits[blitq->head++] = vsg;
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ blitq->head = 0;
+ blitq->num_outstanding++;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ xfer->sync.engine = engine;
+
+ via_dmablit_handler(dev, engine, 0);
+
+ return 0;
+}
+
+/*
+ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
+ * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
+ * case it returns with -EAGAIN for the signal to be delivered.
+ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
+ */
+static int
+via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_blitsync_t *sync = data;
+ int err;
+
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ return -EINVAL;
+
+ err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
+
+ if (-EINTR == err)
+ err = -EAGAIN;
+
+ return err;
+}
+
+/*
+ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * be reissued. See the above IOCTL code.
+ */
+static int
+via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_dmablit_t *xfer = data;
+ int err;
+
+ err = via_dmablit(dev, xfer);
+
+ return err;
+}
+
+static u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ if (pipe != 0)
+ return 0;
+
+ return atomic_read(&dev_priv->vbl_received);
+}
+
+static irqreturn_t via_driver_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+ int handled = 0;
+ ktime_t cur_vblank;
+ drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+ int i;
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ if (status & VIA_IRQ_VBLANK_PENDING) {
+ atomic_inc(&dev_priv->vbl_received);
+ if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
+ cur_vblank = ktime_get();
+ if (dev_priv->last_vblank_valid) {
+ dev_priv->nsec_per_vblank =
+ ktime_sub(cur_vblank,
+ dev_priv->last_vblank) >> 4;
+ }
+ dev_priv->last_vblank = cur_vblank;
+ dev_priv->last_vblank_valid = 1;
+ }
+ if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
+ DRM_DEBUG("nsec per vblank is: %llu\n",
+ ktime_to_ns(dev_priv->nsec_per_vblank));
+ }
+ drm_handle_vblank(dev, 0);
+ handled = 1;
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ if (status & cur_irq->pending_mask) {
+ atomic_inc(&cur_irq->irq_received);
+ wake_up(&cur_irq->irq_queue);
+ handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+ via_dmablit_handler(dev, 0, 1);
+ else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
+ via_dmablit_handler(dev, 1, 1);
+ }
+ cur_irq++;
+ }
+
+ /* Acknowledge interrupts */
+ via_write(dev_priv, VIA_REG_INTERRUPT, status);
+
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
+{
+ u32 status;
+
+ if (dev_priv) {
+ /* Acknowledge interrupts */
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status |
+ dev_priv->irq_pending_mask);
+ }
+}
+
+static int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ u32 status;
+
+ if (pipe != 0) {
+ DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
+ return -EINVAL;
+ }
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
+
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
+
+ return 0;
+}
+
+static void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ u32 status;
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
+
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
+
+ if (pipe != 0)
+ DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
+}
+
+static int
+via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
+ unsigned int *sequence)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ unsigned int cur_irq_sequence;
+ drm_via_irq_t *cur_irq;
+ int ret = 0;
+ maskarray_t *masks;
+ int real_irq;
+
+ DRM_DEBUG("\n");
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ if (irq >= drm_via_irq_num) {
+ DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
+ return -EINVAL;
+ }
+
+ real_irq = dev_priv->irq_map[irq];
+
+ if (real_irq < 0) {
+ DRM_ERROR("Video IRQ %d not available on this hardware.\n",
+ irq);
+ return -EINVAL;
+ }
+
+ masks = dev_priv->irq_masks;
+ cur_irq = dev_priv->via_irqs + real_irq;
+
+ if (masks[real_irq][2] && !force_sequence) {
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
+ masks[irq][4]));
+ cur_irq_sequence = atomic_read(&cur_irq->irq_received);
+ } else {
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ (((cur_irq_sequence =
+ atomic_read(&cur_irq->irq_received)) -
+ *sequence) <= (1 << 23)));
+ }
+ *sequence = cur_irq_sequence;
+ return ret;
+}
+
+
+/*
+ * drm_dma.h hooks
+ */
+
+static void via_driver_irq_preinstall(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+ drm_via_irq_t *cur_irq;
+ int i;
+
+ DRM_DEBUG("dev_priv: %p\n", dev_priv);
+ if (dev_priv) {
+ cur_irq = dev_priv->via_irqs;
+
+ dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
+ dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
+
+ if (dev_priv->chipset == VIA_PRO_GROUP_A ||
+ dev_priv->chipset == VIA_DX9_0) {
+ dev_priv->irq_masks = via_pro_group_a_irqs;
+ dev_priv->num_irqs = via_num_pro_group_a;
+ dev_priv->irq_map = via_irqmap_pro_group_a;
+ } else {
+ dev_priv->irq_masks = via_unichrome_irqs;
+ dev_priv->num_irqs = via_num_unichrome;
+ dev_priv->irq_map = via_irqmap_unichrome;
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ atomic_set(&cur_irq->irq_received, 0);
+ cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+ cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+ init_waitqueue_head(&cur_irq->irq_queue);
+ dev_priv->irq_enable_mask |= cur_irq->enable_mask;
+ dev_priv->irq_pending_mask |= cur_irq->pending_mask;
+ cur_irq++;
+
+ DRM_DEBUG("Initializing IRQ %d\n", i);
+ }
+
+ dev_priv->last_vblank_valid = 0;
+
+ /* Clear VSync interrupt regs */
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
+ ~(dev_priv->irq_enable_mask));
+
+ /* Clear bits if they're already high */
+ viadrv_acknowledge_irqs(dev_priv);
+ }
+}
+
+static int via_driver_irq_postinstall(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+
+ DRM_DEBUG("fun: %s\n", __func__);
+ if (!dev_priv)
+ return -EINVAL;
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+ | dev_priv->irq_enable_mask);
+
+ /* Some magic, oh for some data sheets ! */
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
+
+ return 0;
+}
+
+static void via_driver_irq_uninstall(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+
+ DRM_DEBUG("\n");
+ if (dev_priv) {
+
+ /* Some more magic, oh for some data sheets ! */
+
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
+ ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
+ }
+}
+
+static int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_irqwait_t *irqwait = data;
+ struct timespec64 now;
+ int ret = 0;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+ int force_sequence;
+
+ if (irqwait->request.irq >= dev_priv->num_irqs) {
+ DRM_ERROR("Trying to wait on unknown irq %d\n",
+ irqwait->request.irq);
+ return -EINVAL;
+ }
+
+ cur_irq += irqwait->request.irq;
+
+ switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+ case VIA_IRQ_RELATIVE:
+ irqwait->request.sequence +=
+ atomic_read(&cur_irq->irq_received);
+ irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ break;
+ case VIA_IRQ_ABSOLUTE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (irqwait->request.type & VIA_IRQ_SIGNAL) {
+ DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
+ return -EINVAL;
+ }
+
+ force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
+
+ ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
+ &irqwait->request.sequence);
+ ktime_get_ts64(&now);
+ irqwait->reply.tval_sec = now.tv_sec;
+ irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
+
+ return ret;
+}
+
+static void via_init_futex(drm_via_private_t *dev_priv)
+{
+ unsigned int i;
+
+ DRM_DEBUG("\n");
+
+ for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+ init_waitqueue_head(&(dev_priv->decoder_queue[i]));
+ XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
+ }
+}
+
+static void via_cleanup_futex(drm_via_private_t *dev_priv)
+{
+}
+
+static void via_release_futex(drm_via_private_t *dev_priv, int context)
+{
+ unsigned int i;
+ volatile int *lock;
+
+ if (!dev_priv->sarea_priv)
+ return;
+
+ for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+ lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+ if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
+ if (_DRM_LOCK_IS_HELD(*lock)
+ && (*lock & _DRM_LOCK_CONT)) {
+ wake_up(&(dev_priv->decoder_queue[i]));
+ }
+ *lock = 0;
+ }
+ }
+}
+
+static int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_futex_t *fx = data;
+ volatile int *lock;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
+ int ret = 0;
+
+ DRM_DEBUG("\n");
+
+ if (fx->lock >= VIA_NR_XVMC_LOCKS)
+ return -EFAULT;
+
+ lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
+
+ switch (fx->func) {
+ case VIA_FUTEX_WAIT:
+ VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+ (fx->ms / 10) * (HZ / 100), *lock != fx->val);
+ return ret;
+ case VIA_FUTEX_WAKE:
+ wake_up(&(dev_priv->decoder_queue[fx->lock]));
+ return 0;
+ }
+ return 0;
+}
+
+static int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_agp_t *agp = data;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
+
+ dev_priv->agp_initialized = 1;
+ dev_priv->agp_offset = agp->offset;
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+ return 0;
+}
+
+static int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_fb_t *fb = data;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
+
+ dev_priv->vram_initialized = 1;
+ dev_priv->vram_offset = fb->offset;
+
+ mutex_unlock(&dev->struct_mutex);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+
+ return 0;
+
+}
+
+static int via_final_context(struct drm_device *dev, int context)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ via_release_futex(dev_priv, context);
+
+ /* Linux specific until context tracking code gets ported to BSD */
+ /* Last context, perform cleanup */
+ if (list_is_singular(&dev->ctxlist)) {
+ DRM_DEBUG("Last Context\n");
+ drm_legacy_irq_uninstall(dev);
+ via_cleanup_futex(dev_priv);
+ via_do_cleanup_map(dev);
+ }
+ return 1;
+}
+
+static void via_lastclose(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static int via_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ drm_via_mem_t *mem = data;
+ int retval = 0, user_key;
+ struct via_memblock *item;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
+ unsigned long tmpSize;
+
+ if (mem->type > VIA_MEM_AGP) {
+ DRM_ERROR("Unknown memory type allocation\n");
+ return -EINVAL;
+ }
+ mutex_lock(&dev->struct_mutex);
+ if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
+ dev_priv->agp_initialized)) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR
+ ("Attempt to allocate from uninitialized memory manager.\n");
+ return -EINVAL;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
+ if (mem->type == VIA_MEM_AGP)
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ tmpSize);
+ else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ tmpSize);
+ if (retval)
+ goto fail_alloc;
+
+ retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+ if (retval < 0)
+ goto fail_idr;
+ user_key = retval;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+ mem->index = user_key;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->index = 0;
+ DRM_DEBUG("Video memory allocation failed\n");
+
+ return retval;
+}
+
+static int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ drm_via_mem_t *mem = data;
+ struct via_memblock *obj;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = idr_find(&dev_priv->object_idr, mem->index);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->index);
+ list_del(&obj->owner_list);
+ drm_mm_remove_node(&obj->mm_node);
+ kfree(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG("free = 0x%lx\n", mem->index);
+
+ return 0;
+}
+
+
+static void via_reclaim_buffers_locked(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+ struct via_memblock *entry, *next;
+
+ if (!(dev->master && file->master->lock.hw_lock))
+ return;
+
+ drm_legacy_idlelock_take(&file->master->lock);
+
+ mutex_lock(&dev->struct_mutex);
+ if (list_empty(&file_priv->obj_list)) {
+ mutex_unlock(&dev->struct_mutex);
+ drm_legacy_idlelock_release(&file->master->lock);
+
+ return;
+ }
+
+ via_driver_dma_quiescent(dev);
+
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ drm_mm_remove_node(&entry->mm_node);
+ kfree(entry);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_legacy_idlelock_release(&file->master->lock);
+
+ return;
+}
+
+static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ dev_priv->sarea = drm_legacy_getsarea(dev);
+ if (!dev_priv->sarea) {
+ DRM_ERROR("could not find sarea!\n");
+ dev->dev_private = (void *)dev_priv;
+ via_do_cleanup_map(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
+ if (!dev_priv->fb) {
+ DRM_ERROR("could not find framebuffer!\n");
+ dev->dev_private = (void *)dev_priv;
+ via_do_cleanup_map(dev);
+ return -EINVAL;
+ }
+ dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
+ if (!dev_priv->mmio) {
+ DRM_ERROR("could not find mmio region!\n");
+ dev->dev_private = (void *)dev_priv;
+ via_do_cleanup_map(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->sarea_priv =
+ (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+ dev_priv->agpAddr = init->agpAddr;
+
+ via_init_futex(dev_priv);
+
+ via_init_dmablit(dev);
+
+ dev->dev_private = (void *)dev_priv;
+ return 0;
+}
+
+int via_do_cleanup_map(struct drm_device *dev)
+{
+ via_dma_cleanup(dev);
+
+ return 0;
+}
+
+static int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_init_t *init = data;
+
+ DRM_DEBUG("\n");
+
+ switch (init->func) {
+ case VIA_INIT_MAP:
+ return via_do_init_map(dev, init);
+ case VIA_CLEANUP_MAP:
+ return via_do_cleanup_map(dev);
+ }
+
+ return -EINVAL;
+}
+
+static int via_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ drm_via_private_t *dev_priv;
+ int ret = 0;
+
+ dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ idr_init_base(&dev_priv->object_idr, 1);
+ dev->dev_private = (void *)dev_priv;
+
+ dev_priv->chipset = chipset;
+
+ pci_set_master(pdev);
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret) {
+ kfree(dev_priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void via_driver_unload(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ idr_destroy(&dev_priv->object_idr);
+
+ kfree(dev_priv);
+}
+
+static void via_cmdbuf_start(drm_via_private_t *dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
+static int via_wait_idle(drm_via_private_t *dev_priv);
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
+
+/*
+ * Free space in command buffer.
+ */
+
+static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
+{
+ uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+ return ((hw_addr <= dev_priv->dma_low) ?
+ (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
+ (hw_addr - dev_priv->dma_low));
+}
+
+/*
+ * How much does the command regulator lag behind?
+ */
+
+static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
+{
+ uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+ return ((hw_addr <= dev_priv->dma_low) ?
+ (dev_priv->dma_low - hw_addr) :
+ (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
+}
+
+/*
+ * Check that the given size fits in the buffer, otherwise wait.
+ */
+
+static inline int
+via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
+{
+ uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ uint32_t cur_addr, hw_addr, next_addr;
+ volatile uint32_t *hw_addr_ptr;
+ uint32_t count;
+ hw_addr_ptr = dev_priv->hw_addr_ptr;
+ cur_addr = dev_priv->dma_low;
+ next_addr = cur_addr + size + 512 * 1024;
+ count = 1000000;
+ do {
+ hw_addr = *hw_addr_ptr - agp_base;
+ if (count-- == 0) {
+ DRM_ERROR
+ ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
+ hw_addr, cur_addr, next_addr);
+ return -1;
+ }
+ if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
+ msleep(1);
+ } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
+ return 0;
+}
+
+/*
+ * Checks whether buffer head has reach the end. Rewind the ring buffer
+ * when necessary.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+
+static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
+ unsigned int size)
+{
+ if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
+ dev_priv->dma_high) {
+ via_cmdbuf_rewind(dev_priv);
+ }
+ if (via_cmdbuf_wait(dev_priv, size) != 0)
+ return NULL;
+
+ return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+int via_dma_cleanup(struct drm_device *dev)
+{
+ if (dev->dev_private) {
+ drm_via_private_t *dev_priv =
+ (drm_via_private_t *) dev->dev_private;
+
+ if (dev_priv->ring.virtual_start && dev_priv->mmio) {
+ via_cmdbuf_reset(dev_priv);
+
+ drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = NULL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int via_initialize(struct drm_device *dev,
+ drm_via_private_t *dev_priv,
+ drm_via_dma_init_t *init)
+{
+ if (!dev_priv || !dev_priv->mmio) {
+ DRM_ERROR("via_dma_init called before via_map_init\n");
+ return -EFAULT;
+ }
+
+ if (dev_priv->ring.virtual_start != NULL) {
+ DRM_ERROR("called again without calling cleanup\n");
+ return -EFAULT;
+ }
+
+ if (!dev->agp || !dev->agp->base) {
+ DRM_ERROR("called with no agp memory available\n");
+ return -EFAULT;
+ }
+
+ if (dev_priv->chipset == VIA_DX9_0) {
+ DRM_ERROR("AGP DMA is not supported on this chip\n");
+ return -EINVAL;
+ }
+
+ dev_priv->ring.map.offset = dev->agp->base + init->offset;
+ dev_priv->ring.map.size = init->size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_legacy_ioremap(&dev_priv->ring.map, dev);
+
+ if (dev_priv->ring.map.handle == NULL) {
+ via_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+ dev_priv->dma_ptr = dev_priv->ring.virtual_start;
+ dev_priv->dma_low = 0;
+ dev_priv->dma_high = init->size;
+ dev_priv->dma_wrap = init->size;
+ dev_priv->dma_offset = init->offset;
+ dev_priv->last_pause_ptr = NULL;
+ dev_priv->hw_addr_ptr =
+ (volatile uint32_t *)((char *)dev_priv->mmio->handle +
+ init->reg_pause_addr);
+
+ via_cmdbuf_start(dev_priv);
+
+ return 0;
+}
+
+static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_dma_init_t *init = data;
+ int retcode = 0;
+
+ switch (init->func) {
+ case VIA_INIT_DMA:
+ if (!capable(CAP_SYS_ADMIN))
+ retcode = -EPERM;
+ else
+ retcode = via_initialize(dev, dev_priv, init);
+ break;
+ case VIA_CLEANUP_DMA:
+ if (!capable(CAP_SYS_ADMIN))
+ retcode = -EPERM;
+ else
+ retcode = via_dma_cleanup(dev);
+ break;
+ case VIA_DMA_INITIALIZED:
+ retcode = (dev_priv->ring.virtual_start != NULL) ?
+ 0 : -EFAULT;
+ break;
+ default:
+ retcode = -EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
+{
+ drm_via_private_t *dev_priv;
+ uint32_t *vb;
+ int ret;
+
+ dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ if (dev_priv->ring.virtual_start == NULL) {
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
+ return -EFAULT;
+ }
+
+ if (cmd->size > VIA_PCI_BUF_SIZE)
+ return -ENOMEM;
+
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
+ return -EFAULT;
+
+ /*
+ * Running this function on AGP memory is dead slow. Therefore
+ * we run it on a temporary cacheable system memory buffer and
+ * copy it to AGP memory when ready.
+ */
+
+ if ((ret =
+ via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+ cmd->size, dev, 1))) {
+ return ret;
+ }
+
+ vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
+ if (vb == NULL)
+ return -EAGAIN;
+
+ memcpy(vb, dev_priv->pci_buf, cmd->size);
+
+ dev_priv->dma_low += cmd->size;
+
+ /*
+ * Small submissions somehow stalls the CPU. (AGP cache effects?)
+ * pad to greater size.
+ */
+
+ if (cmd->size < 0x100)
+ via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
+ via_cmdbuf_pause(dev_priv);
+
+ return 0;
+}
+
+int via_driver_dma_quiescent(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ if (!via_wait_idle(dev_priv))
+ return -EBUSY;
+ return 0;
+}
+
+static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ return via_driver_dma_quiescent(dev);
+}
+
+static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_cmdbuffer_t *cmdbuf = data;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+ ret = via_dispatch_cmdbuffer(dev, cmdbuf);
+ return ret;
+}
+
+static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
+ drm_via_cmdbuffer_t *cmd)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (cmd->size > VIA_PCI_BUF_SIZE)
+ return -ENOMEM;
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
+ return -EFAULT;
+
+ if ((ret =
+ via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+ cmd->size, dev, 0))) {
+ return ret;
+ }
+
+ ret =
+ via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
+ cmd->size);
+ return ret;
+}
+
+static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_cmdbuffer_t *cmdbuf = data;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+ ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
+ return ret;
+}
+
+static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
+ uint32_t * vb, int qw_count)
+{
+ for (; qw_count > 0; --qw_count)
+ VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
+ return vb;
+}
+
+/*
+ * This function is used internally by ring buffer management code.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
+{
+ return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+/*
+ * Hooks a segment of data into the tail of the ring-buffer by
+ * modifying the pause address stored in the buffer itself. If
+ * the regulator has already paused, restart it.
+ */
+static int via_hook_segment(drm_via_private_t *dev_priv,
+ uint32_t pause_addr_hi, uint32_t pause_addr_lo,
+ int no_pci_fire)
+{
+ int paused, count;
+ volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
+ uint32_t reader, ptr;
+ uint32_t diff;
+
+ paused = 0;
+ via_flush_write_combine();
+ (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
+
+ *paused_at = pause_addr_lo;
+ via_flush_write_combine();
+ (void) *paused_at;
+
+ reader = *(dev_priv->hw_addr_ptr);
+ ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
+ dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+ dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
+
+ /*
+ * If there is a possibility that the command reader will
+ * miss the new pause address and pause on the old one,
+ * In that case we need to program the new start address
+ * using PCI.
+ */
+
+ diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+ count = 10000000;
+ while (diff == 0 && count--) {
+ paused = (via_read(dev_priv, 0x41c) & 0x80000000);
+ if (paused)
+ break;
+ reader = *(dev_priv->hw_addr_ptr);
+ diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+ }
+
+ paused = via_read(dev_priv, 0x41c) & 0x80000000;
+
+ if (paused && !no_pci_fire) {
+ reader = *(dev_priv->hw_addr_ptr);
+ diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+ diff &= (dev_priv->dma_high - 1);
+ if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
+ DRM_ERROR("Paused at incorrect address. "
+ "0x%08x, 0x%08x 0x%08x\n",
+ ptr, reader, dev_priv->dma_diff);
+ } else if (diff == 0) {
+ /*
+ * There is a concern that these writes may stall the PCI bus
+ * if the GPU is not idle. However, idling the GPU first
+ * doesn't make a difference.
+ */
+
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
+ }
+ }
+ return paused;
+}
+
+static int via_wait_idle(drm_via_private_t *dev_priv)
+{
+ int count = 10000000;
+
+ while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
+ ;
+
+ while (count && (via_read(dev_priv, VIA_REG_STATUS) &
+ (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
+ VIA_3D_ENG_BUSY)))
+ --count;
+ return count;
+}
+
+static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
+ uint32_t addr, uint32_t *cmd_addr_hi,
+ uint32_t *cmd_addr_lo, int skip_wait)
+{
+ uint32_t agp_base;
+ uint32_t cmd_addr, addr_lo, addr_hi;
+ uint32_t *vb;
+ uint32_t qw_pad_count;
+
+ if (!skip_wait)
+ via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
+
+ vb = via_get_dma(dev_priv);
+ VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
+ (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
+ agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
+ ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
+
+ cmd_addr = (addr) ? addr :
+ agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
+ addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
+ (cmd_addr & HC_HAGPBpL_MASK));
+ addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
+
+ vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
+ VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
+ return vb;
+}
+
+static void via_cmdbuf_start(drm_via_private_t *dev_priv)
+{
+ uint32_t pause_addr_lo, pause_addr_hi;
+ uint32_t start_addr, start_addr_lo;
+ uint32_t end_addr, end_addr_lo;
+ uint32_t command;
+ uint32_t agp_base;
+ uint32_t ptr;
+ uint32_t reader;
+ int count;
+
+ dev_priv->dma_low = 0;
+
+ agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ start_addr = agp_base;
+ end_addr = agp_base + dev_priv->dma_high;
+
+ start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
+ end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
+ command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
+ ((end_addr & 0xff000000) >> 16));
+
+ dev_priv->last_pause_ptr =
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
+ &pause_addr_hi, &pause_addr_lo, 1) - 1;
+
+ via_flush_write_combine();
+ (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
+
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, command);
+ via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
+
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
+ wmb();
+ via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
+
+ dev_priv->dma_diff = 0;
+
+ count = 10000000;
+ while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
+
+ reader = *(dev_priv->hw_addr_ptr);
+ ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
+ dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+ /*
+ * This is the difference between where we tell the
+ * command reader to pause and where it actually pauses.
+ * This differs between hw implementation so we need to
+ * detect it.
+ */
+
+ dev_priv->dma_diff = ptr - reader;
+}
+
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
+{
+ uint32_t *vb;
+
+ via_cmdbuf_wait(dev_priv, qwords + 2);
+ vb = via_get_dma(dev_priv);
+ VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
+ via_align_buffer(dev_priv, vb, qwords);
+}
+
+static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
+{
+ uint32_t *vb = via_get_dma(dev_priv);
+ SetReg2DAGP(0x0C, (0 | (0 << 16)));
+ SetReg2DAGP(0x10, 0 | (0 << 16));
+ SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
+}
+
+static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
+{
+ uint32_t pause_addr_lo, pause_addr_hi;
+ uint32_t jump_addr_lo, jump_addr_hi;
+ volatile uint32_t *last_pause_ptr;
+ uint32_t dma_low_save1, dma_low_save2;
+
+ via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
+ &jump_addr_lo, 0);
+
+ dev_priv->dma_wrap = dev_priv->dma_low;
+
+ /*
+ * Wrap command buffer to the beginning.
+ */
+
+ dev_priv->dma_low = 0;
+ if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
+ DRM_ERROR("via_cmdbuf_jump failed\n");
+
+ via_dummy_bitblt(dev_priv);
+ via_dummy_bitblt(dev_priv);
+
+ last_pause_ptr =
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0) - 1;
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0);
+
+ *last_pause_ptr = pause_addr_lo;
+ dma_low_save1 = dev_priv->dma_low;
+
+ /*
+ * Now, set a trap that will pause the regulator if it tries to rerun the old
+ * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
+ * and reissues the jump command over PCI, while the regulator has already taken the jump
+ * and actually paused at the current buffer end).
+ * There appears to be no other way to detect this condition, since the hw_addr_pointer
+ * does not seem to get updated immediately when a jump occurs.
+ */
+
+ last_pause_ptr =
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0) - 1;
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0);
+ *last_pause_ptr = pause_addr_lo;
+
+ dma_low_save2 = dev_priv->dma_low;
+ dev_priv->dma_low = dma_low_save1;
+ via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
+ dev_priv->dma_low = dma_low_save2;
+ via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
+{
+ via_cmdbuf_jump(dev_priv);
+}
+
+static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
+{
+ uint32_t pause_addr_lo, pause_addr_hi;
+
+ via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
+ via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
+{
+ via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
+}
+
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
+{
+ via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
+ via_wait_idle(dev_priv);
+}
+
+/*
+ * User interface to the space and lag functions.
+ */
+
+static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_cmdbuf_size_t *d_siz = data;
+ int ret = 0;
+ uint32_t tmp_size, count;
+ drm_via_private_t *dev_priv;
+
+ DRM_DEBUG("\n");
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ if (dev_priv->ring.virtual_start == NULL) {
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
+ return -EFAULT;
+ }
+
+ count = 1000000;
+ tmp_size = d_siz->size;
+ switch (d_siz->func) {
+ case VIA_CMDBUF_SPACE:
+ while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
+ && --count) {
+ if (!d_siz->wait)
+ break;
+ }
+ if (!count) {
+ DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
+ ret = -EAGAIN;
+ }
+ break;
+ case VIA_CMDBUF_LAG:
+ while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
+ && --count) {
+ if (!d_siz->wait)
+ break;
+ }
+ if (!count) {
+ DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
+ ret = -EAGAIN;
+ }
+ break;
+ default:
+ ret = -EFAULT;
+ }
+ d_siz->size = tmp_size;
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc via_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+};
+
+static int via_max_ioctl = ARRAY_SIZE(via_ioctls);
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
+static struct pci_device_id pciidlist[] = {
+ viadrv_PCI_IDS
+};
+
+static const struct file_operations via_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_legacy_mmap,
+ .poll = drm_poll,
+ .compat_ioctl = drm_compat_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
+ .load = via_driver_load,
+ .unload = via_driver_unload,
+ .open = via_driver_open,
+ .preclose = via_reclaim_buffers_locked,
+ .postclose = via_driver_postclose,
+ .context_dtor = via_final_context,
+ .get_vblank_counter = via_get_vblank_counter,
+ .enable_vblank = via_enable_vblank,
+ .disable_vblank = via_disable_vblank,
+ .irq_preinstall = via_driver_irq_preinstall,
+ .irq_postinstall = via_driver_irq_postinstall,
+ .irq_uninstall = via_driver_irq_uninstall,
+ .irq_handler = via_driver_irq_handler,
+ .dma_quiescent = via_driver_dma_quiescent,
+ .lastclose = via_lastclose,
+ .ioctls = via_ioctls,
+ .fops = &via_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver via_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
+static int __init via_init(void)
+{
+ driver.num_ioctls = via_max_ioctl;
+ via_init_command_verifier();
+ return drm_legacy_pci_init(&driver, &via_pci_driver);
+}
+
+static void __exit via_exit(void)
+{
+ drm_legacy_pci_exit(&driver, &via_pci_driver);
+}
+
+module_init(via_init);
+module_exit(via_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
deleted file mode 100644
index 5da38082821f..000000000000
--- a/drivers/gpu/drm/via/via_drv.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-
-static int via_driver_open(struct drm_device *dev, struct drm_file *file)
-{
- struct via_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- INIT_LIST_HEAD(&file_priv->obj_list);
-
- return 0;
-}
-
-static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct via_file_private *file_priv = file->driver_priv;
-
- kfree(file_priv);
-}
-
-static struct pci_device_id pciidlist[] = {
- viadrv_PCI_IDS
-};
-
-static const struct file_operations via_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
- .load = via_driver_load,
- .unload = via_driver_unload,
- .open = via_driver_open,
- .preclose = via_reclaim_buffers_locked,
- .postclose = via_driver_postclose,
- .context_dtor = via_final_context,
- .get_vblank_counter = via_get_vblank_counter,
- .enable_vblank = via_enable_vblank,
- .disable_vblank = via_disable_vblank,
- .irq_preinstall = via_driver_irq_preinstall,
- .irq_postinstall = via_driver_irq_postinstall,
- .irq_uninstall = via_driver_irq_uninstall,
- .irq_handler = via_driver_irq_handler,
- .dma_quiescent = via_driver_dma_quiescent,
- .lastclose = via_lastclose,
- .ioctls = via_ioctls,
- .fops = &via_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver via_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init via_init(void)
-{
- driver.num_ioctls = via_max_ioctl;
- via_init_command_verifier();
- return drm_legacy_pci_init(&driver, &via_pci_driver);
-}
-
-static void __exit via_exit(void)
-{
- drm_legacy_pci_exit(&driver, &via_pci_driver);
-}
-
-module_init(via_init);
-module_exit(via_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
deleted file mode 100644
index d5ad1b05bf77..000000000000
--- a/drivers/gpu/drm/via/via_drv.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _VIA_DRV_H_
-#define _VIA_DRV_H_
-
-#include <linux/irqreturn.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <linux/sched/signal.h>
-#include <linux/wait.h>
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_mm.h>
-#include <drm/via_drm.h>
-
-#define DRIVER_AUTHOR "Various"
-
-#define DRIVER_NAME "via"
-#define DRIVER_DESC "VIA Unichrome / Pro"
-#define DRIVER_DATE "20070202"
-
-#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 11
-#define DRIVER_PATCHLEVEL 1
-
-#include "via_verifier.h"
-
-#include "via_dmablit.h"
-
-#define VIA_PCI_BUF_SIZE 60000
-#define VIA_FIRE_BUF_SIZE 1024
-#define VIA_NUM_IRQS 4
-
-typedef struct drm_via_ring_buffer {
- drm_local_map_t map;
- char *virtual_start;
-} drm_via_ring_buffer_t;
-
-typedef uint32_t maskarray_t[5];
-
-typedef struct drm_via_irq {
- atomic_t irq_received;
- uint32_t pending_mask;
- uint32_t enable_mask;
- wait_queue_head_t irq_queue;
-} drm_via_irq_t;
-
-typedef struct drm_via_private {
- drm_via_sarea_t *sarea_priv;
- drm_local_map_t *sarea;
- drm_local_map_t *fb;
- drm_local_map_t *mmio;
- unsigned long agpAddr;
- wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
- char *dma_ptr;
- unsigned int dma_low;
- unsigned int dma_high;
- unsigned int dma_offset;
- uint32_t dma_wrap;
- volatile uint32_t *last_pause_ptr;
- volatile uint32_t *hw_addr_ptr;
- drm_via_ring_buffer_t ring;
- ktime_t last_vblank;
- int last_vblank_valid;
- ktime_t nsec_per_vblank;
- atomic_t vbl_received;
- drm_via_state_t hc_state;
- char pci_buf[VIA_PCI_BUF_SIZE];
- const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
- uint32_t num_fire_offsets;
- int chipset;
- drm_via_irq_t via_irqs[VIA_NUM_IRQS];
- unsigned num_irqs;
- maskarray_t *irq_masks;
- uint32_t irq_enable_mask;
- uint32_t irq_pending_mask;
- int *irq_map;
- unsigned int idle_fault;
- int vram_initialized;
- struct drm_mm vram_mm;
- int agp_initialized;
- struct drm_mm agp_mm;
- /** Mapping of userspace keys to mm objects */
- struct idr object_idr;
- unsigned long vram_offset;
- unsigned long agp_offset;
- drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
- uint32_t dma_diff;
-} drm_via_private_t;
-
-struct via_file_private {
- struct list_head obj_list;
-};
-
-enum via_family {
- VIA_OTHER = 0, /* Baseline */
- VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
- VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
-};
-
-/* VIA MMIO register access */
-static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
-{
- return readl((void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
- u32 val)
-{
- writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
- u32 val)
-{
- writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write8_mask(struct drm_via_private *dev_priv,
- u32 reg, u32 mask, u32 val)
-{
- u32 tmp;
-
- tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
- tmp = (tmp & ~mask) | (val & mask);
- writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-/*
- * Poll in a loop waiting for 'contidition' to be true.
- * Note: A direct replacement with wait_event_interruptible_timeout()
- * will not work unless driver is updated to emit wake_up()
- * in relevant places that can impact the 'condition'
- *
- * Returns:
- * ret keeps current value if 'condition' becomes true
- * ret = -BUSY if timeout happens
- * ret = -EINTR if a signal interrupted the waiting period
- */
-#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
-do { \
- DECLARE_WAITQUEUE(entry, current); \
- unsigned long end = jiffies + (timeout); \
- add_wait_queue(&(queue), &entry); \
- \
- for (;;) { \
- __set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (time_after_eq(jiffies, end)) { \
- ret = -EBUSY; \
- break; \
- } \
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
- if (signal_pending(current)) { \
- ret = -EINTR; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- remove_wait_queue(&(queue), &entry); \
-} while (0)
-
-extern const struct drm_ioctl_desc via_ioctls[];
-extern int via_max_ioctl;
-
-extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
-
-extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
-extern void via_driver_unload(struct drm_device *dev);
-
-extern int via_init_context(struct drm_device *dev, int context);
-extern int via_final_context(struct drm_device *dev, int context);
-
-extern int via_do_cleanup_map(struct drm_device *dev);
-extern u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-extern int via_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void via_disable_vblank(struct drm_device *dev, unsigned int pipe);
-
-extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
-extern void via_driver_irq_preinstall(struct drm_device *dev);
-extern int via_driver_irq_postinstall(struct drm_device *dev);
-extern void via_driver_irq_uninstall(struct drm_device *dev);
-
-extern int via_dma_cleanup(struct drm_device *dev);
-extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(struct drm_device *dev);
-extern void via_init_futex(drm_via_private_t *dev_priv);
-extern void via_cleanup_futex(drm_via_private_t *dev_priv);
-extern void via_release_futex(drm_via_private_t *dev_priv, int context);
-
-extern void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void via_lastclose(struct drm_device *dev);
-
-extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
-extern void via_init_dmablit(struct drm_device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
deleted file mode 100644
index faeae5d881fb..000000000000
--- a/drivers/gpu/drm/via/via_irq.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/* via_irq.c
- *
- * Copyright 2004 BEAM Ltd.
- * Copyright 2002 Tungsten Graphics, Inc.
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Terry Barnaby <terry1@beam.ltd.uk>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Thomas Hellstrom <unichrome@shipmail.org>
- *
- * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
- * interrupt, as well as an infrastructure to handle other interrupts of the chip.
- * The refresh rate is also calculated for video playback sync purposes.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_vblank.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-#define VIA_REG_INTERRUPT 0x200
-
-/* VIA_REG_INTERRUPT */
-#define VIA_IRQ_GLOBAL (1 << 31)
-#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
-#define VIA_IRQ_VBLANK_PENDING (1 << 3)
-#define VIA_IRQ_HQV0_ENABLE (1 << 11)
-#define VIA_IRQ_HQV1_ENABLE (1 << 25)
-#define VIA_IRQ_HQV0_PENDING (1 << 9)
-#define VIA_IRQ_HQV1_PENDING (1 << 10)
-#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
-#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
-#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
-#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
-#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
-#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
-#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
-#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
-
-
-/*
- * Device-specific IRQs go here. This type might need to be extended with
- * the register if there are multiple IRQ control registers.
- * Currently we activate the HQV interrupts of Unichrome Pro group A.
- */
-
-static maskarray_t via_pro_group_a_irqs[] = {
- {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
- 0x00000000 },
- {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
- 0x00000000 },
- {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
- {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
-};
-static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
-static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
-
-static maskarray_t via_unichrome_irqs[] = {
- {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
- {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
-};
-static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
-static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
-
-
-u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- if (pipe != 0)
- return 0;
-
- return atomic_read(&dev_priv->vbl_received);
-}
-
-irqreturn_t via_driver_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
- int handled = 0;
- ktime_t cur_vblank;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
- int i;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- if (status & VIA_IRQ_VBLANK_PENDING) {
- atomic_inc(&dev_priv->vbl_received);
- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
- cur_vblank = ktime_get();
- if (dev_priv->last_vblank_valid) {
- dev_priv->nsec_per_vblank =
- ktime_sub(cur_vblank,
- dev_priv->last_vblank) >> 4;
- }
- dev_priv->last_vblank = cur_vblank;
- dev_priv->last_vblank_valid = 1;
- }
- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
- DRM_DEBUG("nsec per vblank is: %llu\n",
- ktime_to_ns(dev_priv->nsec_per_vblank));
- }
- drm_handle_vblank(dev, 0);
- handled = 1;
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- if (status & cur_irq->pending_mask) {
- atomic_inc(&cur_irq->irq_received);
- wake_up(&cur_irq->irq_queue);
- handled = 1;
- if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
- via_dmablit_handler(dev, 0, 1);
- else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
- via_dmablit_handler(dev, 1, 1);
- }
- cur_irq++;
- }
-
- /* Acknowledge interrupts */
- via_write(dev_priv, VIA_REG_INTERRUPT, status);
-
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
-{
- u32 status;
-
- if (dev_priv) {
- /* Acknowledge interrupts */
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status |
- dev_priv->irq_pending_mask);
- }
-}
-
-int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- u32 status;
-
- if (pipe != 0) {
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
- return -EINVAL;
- }
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
-
- return 0;
-}
-
-void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- u32 status;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
-
- if (pipe != 0)
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
-}
-
-static int
-via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
- unsigned int *sequence)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- unsigned int cur_irq_sequence;
- drm_via_irq_t *cur_irq;
- int ret = 0;
- maskarray_t *masks;
- int real_irq;
-
- DRM_DEBUG("\n");
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- if (irq >= drm_via_irq_num) {
- DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
- return -EINVAL;
- }
-
- real_irq = dev_priv->irq_map[irq];
-
- if (real_irq < 0) {
- DRM_ERROR("Video IRQ %d not available on this hardware.\n",
- irq);
- return -EINVAL;
- }
-
- masks = dev_priv->irq_masks;
- cur_irq = dev_priv->via_irqs + real_irq;
-
- if (masks[real_irq][2] && !force_sequence) {
- VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
- masks[irq][4]));
- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
- } else {
- VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- (((cur_irq_sequence =
- atomic_read(&cur_irq->irq_received)) -
- *sequence) <= (1 << 23)));
- }
- *sequence = cur_irq_sequence;
- return ret;
-}
-
-
-/*
- * drm_dma.h hooks
- */
-
-void via_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
- drm_via_irq_t *cur_irq;
- int i;
-
- DRM_DEBUG("dev_priv: %p\n", dev_priv);
- if (dev_priv) {
- cur_irq = dev_priv->via_irqs;
-
- dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
- dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
-
- if (dev_priv->chipset == VIA_PRO_GROUP_A ||
- dev_priv->chipset == VIA_DX9_0) {
- dev_priv->irq_masks = via_pro_group_a_irqs;
- dev_priv->num_irqs = via_num_pro_group_a;
- dev_priv->irq_map = via_irqmap_pro_group_a;
- } else {
- dev_priv->irq_masks = via_unichrome_irqs;
- dev_priv->num_irqs = via_num_unichrome;
- dev_priv->irq_map = via_irqmap_unichrome;
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- atomic_set(&cur_irq->irq_received, 0);
- cur_irq->enable_mask = dev_priv->irq_masks[i][0];
- cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- init_waitqueue_head(&cur_irq->irq_queue);
- dev_priv->irq_enable_mask |= cur_irq->enable_mask;
- dev_priv->irq_pending_mask |= cur_irq->pending_mask;
- cur_irq++;
-
- DRM_DEBUG("Initializing IRQ %d\n", i);
- }
-
- dev_priv->last_vblank_valid = 0;
-
- /* Clear VSync interrupt regs */
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status &
- ~(dev_priv->irq_enable_mask));
-
- /* Clear bits if they're already high */
- viadrv_acknowledge_irqs(dev_priv);
- }
-}
-
-int via_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
-
- DRM_DEBUG("fun: %s\n", __func__);
- if (!dev_priv)
- return -EINVAL;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
- | dev_priv->irq_enable_mask);
-
- /* Some magic, oh for some data sheets ! */
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
-
- return 0;
-}
-
-void via_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
-
- DRM_DEBUG("\n");
- if (dev_priv) {
-
- /* Some more magic, oh for some data sheets ! */
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status &
- ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
- }
-}
-
-int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_irqwait_t *irqwait = data;
- struct timespec64 now;
- int ret = 0;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
- int force_sequence;
-
- if (irqwait->request.irq >= dev_priv->num_irqs) {
- DRM_ERROR("Trying to wait on unknown irq %d\n",
- irqwait->request.irq);
- return -EINVAL;
- }
-
- cur_irq += irqwait->request.irq;
-
- switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
- case VIA_IRQ_RELATIVE:
- irqwait->request.sequence +=
- atomic_read(&cur_irq->irq_received);
- irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- break;
- case VIA_IRQ_ABSOLUTE:
- break;
- default:
- return -EINVAL;
- }
-
- if (irqwait->request.type & VIA_IRQ_SIGNAL) {
- DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
- return -EINVAL;
- }
-
- force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
-
- ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
- &irqwait->request.sequence);
- ktime_get_ts64(&now);
- irqwait->reply.tval_sec = now.tv_sec;
- irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
-
- return ret;
-}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
deleted file mode 100644
index a9f6b0c11966..000000000000
--- a/drivers/gpu/drm/via/via_map.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_vblank.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
-
- dev_priv->sarea = drm_legacy_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("could not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
-
- dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
- if (!dev_priv->fb) {
- DRM_ERROR("could not find framebuffer!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
- dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio) {
- DRM_ERROR("could not find mmio region!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
-
- dev_priv->sarea_priv =
- (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
- dev_priv->agpAddr = init->agpAddr;
-
- via_init_futex(dev_priv);
-
- via_init_dmablit(dev);
-
- dev->dev_private = (void *)dev_priv;
- return 0;
-}
-
-int via_do_cleanup_map(struct drm_device *dev)
-{
- via_dma_cleanup(dev);
-
- return 0;
-}
-
-int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_init_t *init = data;
-
- DRM_DEBUG("\n");
-
- switch (init->func) {
- case VIA_INIT_MAP:
- return via_do_init_map(dev, init);
- case VIA_CLEANUP_MAP:
- return via_do_cleanup_map(dev);
- }
-
- return -EINVAL;
-}
-
-int via_driver_load(struct drm_device *dev, unsigned long chipset)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_via_private_t *dev_priv;
- int ret = 0;
-
- dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- idr_init(&dev_priv->object_idr);
- dev->dev_private = (void *)dev_priv;
-
- dev_priv->chipset = chipset;
-
- pci_set_master(pdev);
-
- ret = drm_vblank_init(dev, 1);
- if (ret) {
- kfree(dev_priv);
- return ret;
- }
-
- return 0;
-}
-
-void via_driver_unload(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- idr_destroy(&dev_priv->object_idr);
-
- kfree(dev_priv);
-}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
deleted file mode 100644
index c9afa1a51f23..000000000000
--- a/drivers/gpu/drm/via/via_mm.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/slab.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-#define VIA_MM_ALIGN_SHIFT 4
-#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
-
-struct via_memblock {
- struct drm_mm_node mm_node;
- struct list_head owner_list;
-};
-
-int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_agp_t *agp = data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
-
- dev_priv->agp_initialized = 1;
- dev_priv->agp_offset = agp->offset;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
- return 0;
-}
-
-int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_fb_t *fb = data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
-
- dev_priv->vram_initialized = 1;
- dev_priv->vram_offset = fb->offset;
-
- mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
-
- return 0;
-
-}
-
-int via_final_context(struct drm_device *dev, int context)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- via_release_futex(dev_priv, context);
-
- /* Linux specific until context tracking code gets ported to BSD */
- /* Last context, perform cleanup */
- if (list_is_singular(&dev->ctxlist)) {
- DRM_DEBUG("Last Context\n");
- drm_legacy_irq_uninstall(dev);
- via_cleanup_futex(dev_priv);
- via_do_cleanup_map(dev);
- }
- return 1;
-}
-
-void via_lastclose(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (!dev_priv)
- return;
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->vram_initialized) {
- drm_mm_takedown(&dev_priv->vram_mm);
- dev_priv->vram_initialized = 0;
- }
- if (dev_priv->agp_initialized) {
- drm_mm_takedown(&dev_priv->agp_mm);
- dev_priv->agp_initialized = 0;
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-int via_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- drm_via_mem_t *mem = data;
- int retval = 0, user_key;
- struct via_memblock *item;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- struct via_file_private *file_priv = file->driver_priv;
- unsigned long tmpSize;
-
- if (mem->type > VIA_MEM_AGP) {
- DRM_ERROR("Unknown memory type allocation\n");
- return -EINVAL;
- }
- mutex_lock(&dev->struct_mutex);
- if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
- dev_priv->agp_initialized)) {
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR
- ("Attempt to allocate from uninitialized memory manager.\n");
- return -EINVAL;
- }
-
- item = kzalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- retval = -ENOMEM;
- goto fail_alloc;
- }
-
- tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- if (mem->type == VIA_MEM_AGP)
- retval = drm_mm_insert_node(&dev_priv->agp_mm,
- &item->mm_node,
- tmpSize);
- else
- retval = drm_mm_insert_node(&dev_priv->vram_mm,
- &item->mm_node,
- tmpSize);
- if (retval)
- goto fail_alloc;
-
- retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
- if (retval < 0)
- goto fail_idr;
- user_key = retval;
-
- list_add(&item->owner_list, &file_priv->obj_list);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
- mem->index = user_key;
-
- return 0;
-
-fail_idr:
- drm_mm_remove_node(&item->mm_node);
-fail_alloc:
- kfree(item);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = 0;
- mem->size = 0;
- mem->index = 0;
- DRM_DEBUG("Video memory allocation failed\n");
-
- return retval;
-}
-
-int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- drm_via_mem_t *mem = data;
- struct via_memblock *obj;
-
- mutex_lock(&dev->struct_mutex);
- obj = idr_find(&dev_priv->object_idr, mem->index);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- idr_remove(&dev_priv->object_idr, mem->index);
- list_del(&obj->owner_list);
- drm_mm_remove_node(&obj->mm_node);
- kfree(obj);
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("free = 0x%lx\n", mem->index);
-
- return 0;
-}
-
-
-void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file)
-{
- struct via_file_private *file_priv = file->driver_priv;
- struct via_memblock *entry, *next;
-
- if (!(dev->master && file->master->lock.hw_lock))
- return;
-
- drm_legacy_idlelock_take(&file->master->lock);
-
- mutex_lock(&dev->struct_mutex);
- if (list_empty(&file_priv->obj_list)) {
- mutex_unlock(&dev->struct_mutex);
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
- }
-
- via_driver_dma_quiescent(dev);
-
- list_for_each_entry_safe(entry, next, &file_priv->obj_list,
- owner_list) {
- list_del(&entry->owner_list);
- drm_mm_remove_node(&entry->mm_node);
- kfree(entry);
- }
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
-}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
deleted file mode 100644
index 3d6e3a70f318..000000000000
--- a/drivers/gpu/drm/via/via_verifier.c
+++ /dev/null
@@ -1,1110 +0,0 @@
-/*
- * Copyright 2004 The Unichrome Project. All Rights Reserved.
- * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellstrom 2004, 2005.
- * This code was written using docs obtained under NDA from VIA Inc.
- *
- * Don't run this code directly on an AGP buffer. Due to cache problems it will
- * be very slow.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-#include <drm/via_drm.h>
-
-#include "via_3d_reg.h"
-#include "via_drv.h"
-#include "via_verifier.h"
-
-typedef enum {
- state_command,
- state_header2,
- state_header1,
- state_vheader5,
- state_vheader6,
- state_error
-} verifier_state_t;
-
-typedef enum {
- no_check = 0,
- check_for_header2,
- check_for_header1,
- check_for_header2_err,
- check_for_header1_err,
- check_for_fire,
- check_z_buffer_addr0,
- check_z_buffer_addr1,
- check_z_buffer_addr_mode,
- check_destination_addr0,
- check_destination_addr1,
- check_destination_addr_mode,
- check_for_dummy,
- check_for_dd,
- check_texture_addr0,
- check_texture_addr1,
- check_texture_addr2,
- check_texture_addr3,
- check_texture_addr4,
- check_texture_addr5,
- check_texture_addr6,
- check_texture_addr7,
- check_texture_addr8,
- check_texture_addr_mode,
- check_for_vertex_count,
- check_number_texunits,
- forbidden_command
-} hazard_t;
-
-/*
- * Associates each hazard above with a possible multi-command
- * sequence. For example an address that is split over multiple
- * commands and that needs to be checked at the first command
- * that does not include any part of the address.
- */
-
-static drm_via_sequence_t seqs[] = {
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- z_address,
- z_address,
- z_address,
- dest_address,
- dest_address,
- dest_address,
- no_sequence,
- no_sequence,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- no_sequence
-};
-
-typedef struct {
- unsigned int code;
- hazard_t hz;
-} hz_init_t;
-
-static hz_init_t init_table1[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xee, check_for_fire},
- {0xcc, check_for_dummy},
- {0xdd, check_for_dd},
- {0x00, no_check},
- {0x10, check_z_buffer_addr0},
- {0x11, check_z_buffer_addr1},
- {0x12, check_z_buffer_addr_mode},
- {0x13, no_check},
- {0x14, no_check},
- {0x15, no_check},
- {0x23, no_check},
- {0x24, no_check},
- {0x33, no_check},
- {0x34, no_check},
- {0x35, no_check},
- {0x36, no_check},
- {0x37, no_check},
- {0x38, no_check},
- {0x39, no_check},
- {0x3A, no_check},
- {0x3B, no_check},
- {0x3C, no_check},
- {0x3D, no_check},
- {0x3E, no_check},
- {0x40, check_destination_addr0},
- {0x41, check_destination_addr1},
- {0x42, check_destination_addr_mode},
- {0x43, no_check},
- {0x44, no_check},
- {0x50, no_check},
- {0x51, no_check},
- {0x52, no_check},
- {0x53, no_check},
- {0x54, no_check},
- {0x55, no_check},
- {0x56, no_check},
- {0x57, no_check},
- {0x58, no_check},
- {0x70, no_check},
- {0x71, no_check},
- {0x78, no_check},
- {0x79, no_check},
- {0x7A, no_check},
- {0x7B, no_check},
- {0x7C, no_check},
- {0x7D, check_for_vertex_count}
-};
-
-static hz_init_t init_table2[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xee, check_for_fire},
- {0xcc, check_for_dummy},
- {0x00, check_texture_addr0},
- {0x01, check_texture_addr0},
- {0x02, check_texture_addr0},
- {0x03, check_texture_addr0},
- {0x04, check_texture_addr0},
- {0x05, check_texture_addr0},
- {0x06, check_texture_addr0},
- {0x07, check_texture_addr0},
- {0x08, check_texture_addr0},
- {0x09, check_texture_addr0},
- {0x20, check_texture_addr1},
- {0x21, check_texture_addr1},
- {0x22, check_texture_addr1},
- {0x23, check_texture_addr4},
- {0x2B, check_texture_addr3},
- {0x2C, check_texture_addr3},
- {0x2D, check_texture_addr3},
- {0x2E, check_texture_addr3},
- {0x2F, check_texture_addr3},
- {0x30, check_texture_addr3},
- {0x31, check_texture_addr3},
- {0x32, check_texture_addr3},
- {0x33, check_texture_addr3},
- {0x34, check_texture_addr3},
- {0x4B, check_texture_addr5},
- {0x4C, check_texture_addr6},
- {0x51, check_texture_addr7},
- {0x52, check_texture_addr8},
- {0x77, check_texture_addr2},
- {0x78, no_check},
- {0x79, no_check},
- {0x7A, no_check},
- {0x7B, check_texture_addr_mode},
- {0x7C, no_check},
- {0x7D, no_check},
- {0x7E, no_check},
- {0x7F, no_check},
- {0x80, no_check},
- {0x81, no_check},
- {0x82, no_check},
- {0x83, no_check},
- {0x85, no_check},
- {0x86, no_check},
- {0x87, no_check},
- {0x88, no_check},
- {0x89, no_check},
- {0x8A, no_check},
- {0x90, no_check},
- {0x91, no_check},
- {0x92, no_check},
- {0x93, no_check}
-};
-
-static hz_init_t init_table3[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xcc, check_for_dummy},
- {0x00, check_number_texunits}
-};
-
-static hazard_t table1[256];
-static hazard_t table2[256];
-static hazard_t table3[256];
-
-static __inline__ int
-eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
-{
- if ((buf_end - *buf) >= num_words) {
- *buf += num_words;
- return 0;
- }
- DRM_ERROR("Illegal termination of DMA command buffer\n");
- return 1;
-}
-
-/*
- * Partially stolen from drm_memory.h
- */
-
-static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
- unsigned long offset,
- unsigned long size,
- struct drm_device *dev)
-{
- struct drm_map_list *r_list;
- drm_local_map_t *map = seq->map_cache;
-
- if (map && map->offset <= offset
- && (offset + size) <= (map->offset + map->size)) {
- return map;
- }
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->offset <= offset
- && (offset + size) <= (map->offset + map->size)
- && !(map->flags & _DRM_RESTRICTED)
- && (map->type == _DRM_AGP)) {
- seq->map_cache = map;
- return map;
- }
- }
- return NULL;
-}
-
-/*
- * Require that all AGP texture levels reside in the same AGP map which should
- * be mappable by the client. This is not a big restriction.
- * FIXME: To actually enforce this security policy strictly, drm_rmmap
- * would have to wait for dma quiescent before removing an AGP map.
- * The via_drm_lookup_agp_map call in reality seems to take
- * very little CPU time.
- */
-
-static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
-{
- switch (cur_seq->unfinished) {
- case z_address:
- DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
- break;
- case dest_address:
- DRM_DEBUG("Destination start address is 0x%x\n",
- cur_seq->d_addr);
- break;
- case tex_address:
- if (cur_seq->agp_texture) {
- unsigned start =
- cur_seq->tex_level_lo[cur_seq->texture];
- unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
- unsigned long lo = ~0, hi = 0, tmp;
- uint32_t *addr, *pitch, *height, tex;
- unsigned i;
- int npot;
-
- if (end > 9)
- end = 9;
- if (start > 9)
- start = 9;
-
- addr =
- &(cur_seq->t_addr[tex = cur_seq->texture][start]);
- pitch = &(cur_seq->pitch[tex][start]);
- height = &(cur_seq->height[tex][start]);
- npot = cur_seq->tex_npot[tex];
- for (i = start; i <= end; ++i) {
- tmp = *addr++;
- if (tmp < lo)
- lo = tmp;
- if (i == 0 && npot)
- tmp += (*height++ * *pitch++);
- else
- tmp += (*height++ << *pitch++);
- if (tmp > hi)
- hi = tmp;
- }
-
- if (!via_drm_lookup_agp_map
- (cur_seq, lo, hi - lo, cur_seq->dev)) {
- DRM_ERROR
- ("AGP texture is not in allowed map\n");
- return 2;
- }
- }
- break;
- default:
- break;
- }
- cur_seq->unfinished = no_sequence;
- return 0;
-}
-
-static __inline__ int
-investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
-{
- register uint32_t tmp, *tmp_addr;
-
- if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
- int ret;
- if ((ret = finish_current_sequence(cur_seq)))
- return ret;
- }
-
- switch (hz) {
- case check_for_header2:
- if (cmd == HALCYON_HEADER2)
- return 1;
- return 0;
- case check_for_header1:
- if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- return 1;
- return 0;
- case check_for_header2_err:
- if (cmd == HALCYON_HEADER2)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
- break;
- case check_for_header1_err:
- if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
- break;
- case check_for_fire:
- if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
- break;
- case check_for_dummy:
- if (HC_DUMMY == cmd)
- return 0;
- DRM_ERROR("Illegal DMA HC_DUMMY command\n");
- break;
- case check_for_dd:
- if (0xdddddddd == cmd)
- return 0;
- DRM_ERROR("Illegal DMA 0xdddddddd command\n");
- break;
- case check_z_buffer_addr0:
- cur_seq->unfinished = z_address;
- cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
- (cmd & 0x00FFFFFF);
- return 0;
- case check_z_buffer_addr1:
- cur_seq->unfinished = z_address;
- cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
- ((cmd & 0xFF) << 24);
- return 0;
- case check_z_buffer_addr_mode:
- cur_seq->unfinished = z_address;
- if ((cmd & 0x0000C000) == 0)
- return 0;
- DRM_ERROR("Attempt to place Z buffer in system memory\n");
- return 2;
- case check_destination_addr0:
- cur_seq->unfinished = dest_address;
- cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
- (cmd & 0x00FFFFFF);
- return 0;
- case check_destination_addr1:
- cur_seq->unfinished = dest_address;
- cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
- ((cmd & 0xFF) << 24);
- return 0;
- case check_destination_addr_mode:
- cur_seq->unfinished = dest_address;
- if ((cmd & 0x0000C000) == 0)
- return 0;
- DRM_ERROR
- ("Attempt to place 3D drawing buffer in system memory\n");
- return 2;
- case check_texture_addr0:
- cur_seq->unfinished = tex_address;
- tmp = (cmd >> 24);
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
- *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
- return 0;
- case check_texture_addr1:
- cur_seq->unfinished = tex_address;
- tmp = ((cmd >> 24) - 0x20);
- tmp += tmp << 1;
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
- tmp_addr++;
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
- tmp_addr++;
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
- return 0;
- case check_texture_addr2:
- cur_seq->unfinished = tex_address;
- cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
- cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
- return 0;
- case check_texture_addr3:
- cur_seq->unfinished = tex_address;
- tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
- if (tmp == 0 &&
- (cmd & HC_HTXnEnPit_MASK)) {
- cur_seq->pitch[cur_seq->texture][tmp] =
- (cmd & HC_HTXnLnPit_MASK);
- cur_seq->tex_npot[cur_seq->texture] = 1;
- } else {
- cur_seq->pitch[cur_seq->texture][tmp] =
- (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
- cur_seq->tex_npot[cur_seq->texture] = 0;
- if (cmd & 0x000FFFFF) {
- DRM_ERROR
- ("Unimplemented texture level 0 pitch mode.\n");
- return 2;
- }
- }
- return 0;
- case check_texture_addr4:
- cur_seq->unfinished = tex_address;
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
- return 0;
- case check_texture_addr5:
- case check_texture_addr6:
- cur_seq->unfinished = tex_address;
- /*
- * Texture width. We don't care since we have the pitch.
- */
- return 0;
- case check_texture_addr7:
- cur_seq->unfinished = tex_address;
- tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
- tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
- tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
- tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
- tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
- tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
- tmp_addr[0] = 1 << (cmd & 0x0000000F);
- return 0;
- case check_texture_addr8:
- cur_seq->unfinished = tex_address;
- tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
- tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
- tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
- tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
- tmp_addr[6] = 1 << (cmd & 0x0000000F);
- return 0;
- case check_texture_addr_mode:
- cur_seq->unfinished = tex_address;
- if (2 == (tmp = cmd & 0x00000003)) {
- DRM_ERROR
- ("Attempt to fetch texture from system memory.\n");
- return 2;
- }
- cur_seq->agp_texture = (tmp == 3);
- cur_seq->tex_palette_size[cur_seq->texture] =
- (cmd >> 16) & 0x000000007;
- return 0;
- case check_for_vertex_count:
- cur_seq->vertex_count = cmd & 0x0000FFFF;
- return 0;
- case check_number_texunits:
- cur_seq->multitex = (cmd >> 3) & 1;
- return 0;
- default:
- DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
- return 2;
- }
- return 2;
-}
-
-static __inline__ int
-via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t *cur_seq)
-{
- drm_via_private_t *dev_priv =
- (drm_via_private_t *) cur_seq->dev->dev_private;
- uint32_t a_fire, bcmd, dw_count;
- int ret = 0;
- int have_fire;
- const uint32_t *buf = *buffer;
-
- while (buf < buf_end) {
- have_fire = 0;
- if ((buf_end - buf) < 2) {
- DRM_ERROR
- ("Unexpected termination of primitive list.\n");
- ret = 1;
- break;
- }
- if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
- break;
- bcmd = *buf++;
- if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
- DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
- *buf);
- ret = 1;
- break;
- }
- a_fire =
- *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
- HC_HE3Fire_MASK;
-
- /*
- * How many dwords per vertex ?
- */
-
- if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
- DRM_ERROR("Illegal B command vertex data for AGP.\n");
- ret = 1;
- break;
- }
-
- dw_count = 0;
- if (bcmd & (1 << 7))
- dw_count += (cur_seq->multitex) ? 2 : 1;
- if (bcmd & (1 << 8))
- dw_count += (cur_seq->multitex) ? 2 : 1;
- if (bcmd & (1 << 9))
- dw_count++;
- if (bcmd & (1 << 10))
- dw_count++;
- if (bcmd & (1 << 11))
- dw_count++;
- if (bcmd & (1 << 12))
- dw_count++;
- if (bcmd & (1 << 13))
- dw_count++;
- if (bcmd & (1 << 14))
- dw_count++;
-
- while (buf < buf_end) {
- if (*buf == a_fire) {
- if (dev_priv->num_fire_offsets >=
- VIA_FIRE_BUF_SIZE) {
- DRM_ERROR("Fire offset buffer full.\n");
- ret = 1;
- break;
- }
- dev_priv->fire_offsets[dev_priv->
- num_fire_offsets++] =
- buf;
- have_fire = 1;
- buf++;
- if (buf < buf_end && *buf == a_fire)
- buf++;
- break;
- }
- if ((*buf == HALCYON_HEADER2) ||
- ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
- DRM_ERROR("Missing Vertex Fire command, "
- "Stray Vertex Fire command or verifier "
- "lost sync.\n");
- ret = 1;
- break;
- }
- if ((ret = eat_words(&buf, buf_end, dw_count)))
- break;
- }
- if (buf >= buf_end && !have_fire) {
- DRM_ERROR("Missing Vertex Fire command or verifier "
- "lost sync.\n");
- ret = 1;
- break;
- }
- if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
- DRM_ERROR("AGP Primitive list end misaligned.\n");
- ret = 1;
- break;
- }
- }
- *buffer = buf;
- return ret;
-}
-
-static __inline__ verifier_state_t
-via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
- drm_via_state_t *hc_state)
-{
- uint32_t cmd;
- int hz_mode;
- hazard_t hz;
- const uint32_t *buf = *buffer;
- const hazard_t *hz_table;
-
- if ((buf_end - buf) < 2) {
- DRM_ERROR
- ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
- return state_error;
- }
- buf++;
- cmd = (*buf++ & 0xFFFF0000) >> 16;
-
- switch (cmd) {
- case HC_ParaType_CmdVdata:
- if (via_check_prim_list(&buf, buf_end, hc_state))
- return state_error;
- *buffer = buf;
- return state_command;
- case HC_ParaType_NotTex:
- hz_table = table1;
- break;
- case HC_ParaType_Tex:
- hc_state->texture = 0;
- hz_table = table2;
- break;
- case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
- hc_state->texture = 1;
- hz_table = table2;
- break;
- case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
- hz_table = table3;
- break;
- case HC_ParaType_Auto:
- if (eat_words(&buf, buf_end, 2))
- return state_error;
- *buffer = buf;
- return state_command;
- case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
- if (eat_words(&buf, buf_end, 32))
- return state_error;
- *buffer = buf;
- return state_command;
- case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
- case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
- DRM_ERROR("Texture palettes are rejected because of "
- "lack of info how to determine their size.\n");
- return state_error;
- case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
- DRM_ERROR("Fog factor palettes are rejected because of "
- "lack of info how to determine their size.\n");
- return state_error;
- default:
-
- /*
- * There are some unimplemented HC_ParaTypes here, that
- * need to be implemented if the Mesa driver is extended.
- */
-
- DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
- "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
- cmd, *(buf - 2));
- *buffer = buf;
- return state_error;
- }
-
- while (buf < buf_end) {
- cmd = *buf++;
- if ((hz = hz_table[cmd >> 24])) {
- if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
- if (hz_mode == 1) {
- buf--;
- break;
- }
- return state_error;
- }
- } else if (hc_state->unfinished &&
- finish_current_sequence(hc_state)) {
- return state_error;
- }
- }
- if (hc_state->unfinished && finish_current_sequence(hc_state))
- return state_error;
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end, int *fire_count)
-{
- uint32_t cmd;
- const uint32_t *buf = *buffer;
- const uint32_t *next_fire;
- int burst = 0;
-
- next_fire = dev_priv->fire_offsets[*fire_count];
- buf++;
- cmd = (*buf & 0xFFFF0000) >> 16;
- via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
- switch (cmd) {
- case HC_ParaType_CmdVdata:
- while ((buf < buf_end) &&
- (*fire_count < dev_priv->num_fire_offsets) &&
- (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
- while (buf <= next_fire) {
- via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
- (burst & 63), *buf++);
- burst += 4;
- }
- if ((buf < buf_end)
- && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
- buf++;
-
- if (++(*fire_count) < dev_priv->num_fire_offsets)
- next_fire = dev_priv->fire_offsets[*fire_count];
- }
- break;
- default:
- while (buf < buf_end) {
-
- if (*buf == HC_HEADER2 ||
- (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
- (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
- (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- break;
-
- via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
- (burst & 63), *buf++);
- burst += 4;
- }
- }
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ int verify_mmio_address(uint32_t address)
-{
- if ((address > 0x3FF) && (address < 0xC00)) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access 3D- or command burst area.\n");
- return 1;
- } else if ((address > 0xCFF) && (address < 0x1300)) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access PCI DMA area.\n");
- return 1;
- } else if (address > 0x13FF) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access VGA registers.\n");
- return 1;
- }
- return 0;
-}
-
-static __inline__ int
-verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
- uint32_t dwords)
-{
- const uint32_t *buf = *buffer;
-
- if (buf_end - buf < dwords) {
- DRM_ERROR("Illegal termination of video command.\n");
- return 1;
- }
- while (dwords--) {
- if (*buf++) {
- DRM_ERROR("Illegal video command tail.\n");
- return 1;
- }
- }
- *buffer = buf;
- return 0;
-}
-
-static __inline__ verifier_state_t
-via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
-{
- uint32_t cmd;
- const uint32_t *buf = *buffer;
- verifier_state_t ret = state_command;
-
- while (buf < buf_end) {
- cmd = *buf;
- if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
- (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- DRM_ERROR("Invalid HALCYON_HEADER1 command. "
- "Attempt to access 3D- or command burst area.\n");
- ret = state_error;
- break;
- } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- DRM_ERROR("Invalid HALCYON_HEADER1 command. "
- "Attempt to access VGA registers.\n");
- ret = state_error;
- break;
- } else {
- buf += 2;
- }
- }
- *buffer = buf;
- return ret;
-}
-
-static __inline__ verifier_state_t
-via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
- register uint32_t cmd;
- const uint32_t *buf = *buffer;
-
- while (buf < buf_end) {
- cmd = *buf;
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
- buf++;
- }
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
-{
- uint32_t data;
- const uint32_t *buf = *buffer;
-
- if (buf_end - buf < 4) {
- DRM_ERROR("Illegal termination of video header5 command\n");
- return state_error;
- }
-
- data = *buf++ & ~VIA_VIDEOMASK;
- if (verify_mmio_address(data))
- return state_error;
-
- data = *buf++;
- if (*buf++ != 0x00F50000) {
- DRM_ERROR("Illegal header5 header data\n");
- return state_error;
- }
- if (*buf++ != 0x00000000) {
- DRM_ERROR("Illegal header5 header data\n");
- return state_error;
- }
- if (eat_words(&buf, buf_end, data))
- return state_error;
- if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
- return state_error;
- *buffer = buf;
- return state_command;
-
-}
-
-static __inline__ verifier_state_t
-via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
- uint32_t addr, count, i;
- const uint32_t *buf = *buffer;
-
- addr = *buf++ & ~VIA_VIDEOMASK;
- i = count = *buf;
- buf += 3;
- while (i--)
- via_write(dev_priv, addr, *buf++);
- if (count & 3)
- buf += 4 - (count & 3);
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
-{
- uint32_t data;
- const uint32_t *buf = *buffer;
- uint32_t i;
-
- if (buf_end - buf < 4) {
- DRM_ERROR("Illegal termination of video header6 command\n");
- return state_error;
- }
- buf++;
- data = *buf++;
- if (*buf++ != 0x00F60000) {
- DRM_ERROR("Illegal header6 header data\n");
- return state_error;
- }
- if (*buf++ != 0x00000000) {
- DRM_ERROR("Illegal header6 header data\n");
- return state_error;
- }
- if ((buf_end - buf) < (data << 1)) {
- DRM_ERROR("Illegal termination of video header6 command\n");
- return state_error;
- }
- for (i = 0; i < data; ++i) {
- if (verify_mmio_address(*buf++))
- return state_error;
- buf++;
- }
- data <<= 1;
- if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
- return state_error;
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
-
- uint32_t addr, count, i;
- const uint32_t *buf = *buffer;
-
- i = count = *++buf;
- buf += 3;
- while (i--) {
- addr = *buf++;
- via_write(dev_priv, addr, *buf++);
- }
- count <<= 1;
- if (count & 3)
- buf += 4 - (count & 3);
- *buffer = buf;
- return state_command;
-}
-
-int
-via_verify_command_stream(const uint32_t * buf, unsigned int size,
- struct drm_device * dev, int agp)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_state_t *hc_state = &dev_priv->hc_state;
- drm_via_state_t saved_state = *hc_state;
- uint32_t cmd;
- const uint32_t *buf_end = buf + (size >> 2);
- verifier_state_t state = state_command;
- int cme_video;
- int supported_3d;
-
- cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
- dev_priv->chipset == VIA_DX9_0);
-
- supported_3d = dev_priv->chipset != VIA_DX9_0;
-
- hc_state->dev = dev;
- hc_state->unfinished = no_sequence;
- hc_state->map_cache = NULL;
- hc_state->agp = agp;
- hc_state->buf_start = buf;
- dev_priv->num_fire_offsets = 0;
-
- while (buf < buf_end) {
-
- switch (state) {
- case state_header2:
- state = via_check_header2(&buf, buf_end, hc_state);
- break;
- case state_header1:
- state = via_check_header1(&buf, buf_end);
- break;
- case state_vheader5:
- state = via_check_vheader5(&buf, buf_end);
- break;
- case state_vheader6:
- state = via_check_vheader6(&buf, buf_end);
- break;
- case state_command:
- cmd = *buf;
- if ((cmd == HALCYON_HEADER2) && supported_3d)
- state = state_header2;
- else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- state = state_header1;
- else if (cme_video
- && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
- state = state_vheader5;
- else if (cme_video
- && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- state = state_vheader6;
- else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
- DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
- state = state_error;
- } else {
- DRM_ERROR
- ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
- cmd);
- state = state_error;
- }
- break;
- case state_error:
- default:
- *hc_state = saved_state;
- return -EINVAL;
- }
- }
- if (state == state_error) {
- *hc_state = saved_state;
- return -EINVAL;
- }
- return 0;
-}
-
-int
-via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
- unsigned int size)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- uint32_t cmd;
- const uint32_t *buf_end = buf + (size >> 2);
- verifier_state_t state = state_command;
- int fire_count = 0;
-
- while (buf < buf_end) {
-
- switch (state) {
- case state_header2:
- state =
- via_parse_header2(dev_priv, &buf, buf_end,
- &fire_count);
- break;
- case state_header1:
- state = via_parse_header1(dev_priv, &buf, buf_end);
- break;
- case state_vheader5:
- state = via_parse_vheader5(dev_priv, &buf, buf_end);
- break;
- case state_vheader6:
- state = via_parse_vheader6(dev_priv, &buf, buf_end);
- break;
- case state_command:
- cmd = *buf;
- if (cmd == HALCYON_HEADER2)
- state = state_header2;
- else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- state = state_header1;
- else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
- state = state_vheader5;
- else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- state = state_vheader6;
- else {
- DRM_ERROR
- ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
- cmd);
- state = state_error;
- }
- break;
- case state_error:
- default:
- return -EINVAL;
- }
- }
- if (state == state_error)
- return -EINVAL;
- return 0;
-}
-
-static void
-setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
-{
- int i;
-
- for (i = 0; i < 256; ++i)
- table[i] = forbidden_command;
-
- for (i = 0; i < size; ++i)
- table[init_table[i].code] = init_table[i].hz;
-}
-
-void via_init_command_verifier(void)
-{
- setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
- setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
- setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
-}
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
deleted file mode 100644
index 26b6d361ab95..000000000000
--- a/drivers/gpu/drm/via/via_verifier.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2004 The Unichrome Project. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellström 2004.
- */
-
-#ifndef _VIA_VERIFIER_H_
-#define _VIA_VERIFIER_H_
-
-typedef enum {
- no_sequence = 0,
- z_address,
- dest_address,
- tex_address
-} drm_via_sequence_t;
-
-typedef struct {
- unsigned texture;
- uint32_t z_addr;
- uint32_t d_addr;
- uint32_t t_addr[2][10];
- uint32_t pitch[2][10];
- uint32_t height[2][10];
- uint32_t tex_level_lo[2];
- uint32_t tex_level_hi[2];
- uint32_t tex_palette_size[2];
- uint32_t tex_npot[2];
- drm_via_sequence_t unfinished;
- int agp_texture;
- int multitex;
- struct drm_device *dev;
- drm_local_map_t *map_cache;
- uint32_t vertex_count;
- int agp;
- const uint32_t *buf_start;
-} drm_via_state_t;
-
-extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
- struct drm_device *dev, int agp);
-extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
- unsigned int size);
-
-#endif
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
deleted file mode 100644
index 53b1f58f99b4..000000000000
--- a/drivers/gpu/drm/via/via_video.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellstrom 2005.
- *
- * Video and XvMC related functions.
- */
-
-#include <drm/drm_device.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-void via_init_futex(drm_via_private_t *dev_priv)
-{
- unsigned int i;
-
- DRM_DEBUG("\n");
-
- for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- init_waitqueue_head(&(dev_priv->decoder_queue[i]));
- XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
- }
-}
-
-void via_cleanup_futex(drm_via_private_t *dev_priv)
-{
-}
-
-void via_release_futex(drm_via_private_t *dev_priv, int context)
-{
- unsigned int i;
- volatile int *lock;
-
- if (!dev_priv->sarea_priv)
- return;
-
- for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
- if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
- if (_DRM_LOCK_IS_HELD(*lock)
- && (*lock & _DRM_LOCK_CONT)) {
- wake_up(&(dev_priv->decoder_queue[i]));
- }
- *lock = 0;
- }
- }
-}
-
-int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_futex_t *fx = data;
- volatile int *lock;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
- int ret = 0;
-
- DRM_DEBUG("\n");
-
- if (fx->lock >= VIA_NR_XVMC_LOCKS)
- return -EFAULT;
-
- lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
-
- switch (fx->func) {
- case VIA_FUTEX_WAIT:
- VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
- (fx->ms / 10) * (HZ / 100), *lock != fx->val);
- return ret;
- case VIA_FUTEX_WAKE:
- wake_up(&(dev_priv->decoder_queue[fx->lock]));
- return 0;
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 5c7f198c0712..9ea7611a9e0f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -349,6 +349,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
vgdev->ddev->mode_config.max_width = XRES_MAX;
vgdev->ddev->mode_config.max_height = YRES_MAX;
+ vgdev->ddev->mode_config.fb_modifiers_not_supported = true;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5f25a8d15464..0035affc3e59 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -46,12 +46,11 @@ static int virtio_gpu_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
-static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
+static int virtio_gpu_pci_quirk(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
- char unique[20];
int ret;
DRM_INFO("pci: %s detected at %s\n",
@@ -63,39 +62,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
return ret;
}
- /*
- * Normally the drm_dev_set_unique() call is done by core DRM.
- * The following comment covers, why virtio cannot rely on it.
- *
- * Unlike the other virtual GPU drivers, virtio abstracts the
- * underlying bus type by using struct virtio_device.
- *
- * Hence the dev_is_pci() check, used in core DRM, will fail
- * and the unique returned will be the virtio_device "virtio0",
- * while a "pci:..." one is required.
- *
- * A few other ideas were considered:
- * - Extend the dev_is_pci() check [in drm_set_busid] to
- * consider virtio.
- * Seems like a bigger hack than what we have already.
- *
- * - Point drm_device::dev to the parent of the virtio_device
- * Semantic changes:
- * * Using the wrong device for i2c, framebuffer_alloc and
- * prime import.
- * Visual changes:
- * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
- * will print the wrong information.
- *
- * We could address the latter issues, by introducing
- * drm_device::bus_dev, ... which would be used solely for this.
- *
- * So for the moment keep things as-is, with a bulky comment
- * for the next person who feels like removing this
- * drm_dev_set_unique() quirk.
- */
- snprintf(unique, sizeof(unique), "pci:%s", pname);
- return drm_dev_set_unique(dev, unique);
+ return 0;
}
static int virtio_gpu_probe(struct virtio_device *vdev)
@@ -109,18 +76,24 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
if (virtio_gpu_modeset == 0)
return -EINVAL;
- dev = drm_dev_alloc(&driver, &vdev->dev);
+ /*
+ * The virtio-gpu device is a virtual device that doesn't have DMA
+ * ops assigned to it, nor DMA mask set and etc. Its parent device
+ * is actual GPU device we want to use it for the DRM's device in
+ * order to benefit from using generic DRM APIs.
+ */
+ dev = drm_dev_alloc(&driver, vdev->dev.parent);
if (IS_ERR(dev))
return PTR_ERR(dev);
vdev->priv = dev;
- if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
- ret = virtio_gpu_pci_quirk(dev, vdev);
+ if (dev_is_pci(vdev->dev.parent)) {
+ ret = virtio_gpu_pci_quirk(dev);
if (ret)
goto err_free;
}
- ret = virtio_gpu_init(dev);
+ ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f80664cf98d0..9b98470593b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -101,8 +101,6 @@ struct virtio_gpu_object {
struct virtio_gpu_object_shmem {
struct virtio_gpu_object base;
- struct sg_table *pages;
- uint32_t mapped;
};
struct virtio_gpu_object_vram {
@@ -215,7 +213,6 @@ struct virtio_gpu_drv_cap_cache {
};
struct virtio_gpu_device {
- struct device *dev;
struct drm_device *ddev;
struct virtio_device *vdev;
@@ -283,7 +280,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
/* virtgpu_kms.c */
-int virtio_gpu_init(struct drm_device *dev);
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 580a78809836..7db48d17ee3a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -228,8 +228,10 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
for (i = 0; i < objs->nents; ++i) {
ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
- if (ret)
+ if (ret) {
+ virtio_gpu_array_unlock_resv(objs);
return ret;
+ }
}
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9b2702116f93..5d05093014ac 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -47,7 +47,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct virtio_gpu_fence_event *e = NULL;
int ret;
- if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+ if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
return 0;
e = kzalloc(sizeof(*e), GFP_KERNEL);
@@ -168,7 +168,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
* array contains any fence from a foreign context.
*/
ret = 0;
- if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
+ if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx))
ret = dma_fence_wait(in_fence, true);
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 3313b92db531..27b7f14dae89 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -28,6 +28,7 @@
#include <linux/virtio_ring.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include "virtgpu_drv.h"
@@ -66,10 +67,11 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
{
int i, ret;
bool invalid_capset_id = false;
+ struct drm_device *drm = vgdev->ddev;
- vgdev->capsets = kcalloc(num_capsets,
- sizeof(struct virtio_gpu_drv_capset),
- GFP_KERNEL);
+ vgdev->capsets = drmm_kcalloc(drm, num_capsets,
+ sizeof(struct virtio_gpu_drv_capset),
+ GFP_KERNEL);
if (!vgdev->capsets) {
DRM_ERROR("failed to allocate cap sets\n");
return;
@@ -94,7 +96,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
- kfree(vgdev->capsets);
+ drmm_kfree(drm, vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
@@ -110,7 +112,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
vgdev->num_capsets = num_capsets;
}
-int virtio_gpu_init(struct drm_device *dev)
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
{
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
@@ -123,17 +125,16 @@ int virtio_gpu_init(struct drm_device *dev)
u32 num_scanouts, num_capsets;
int ret = 0;
- if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
- vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
+ vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL);
if (!vgdev)
return -ENOMEM;
vgdev->ddev = dev;
dev->dev_private = vgdev;
- vgdev->vdev = dev_to_virtio(dev->dev);
- vgdev->dev = dev->dev;
+ vgdev->vdev = vdev;
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
@@ -257,7 +258,6 @@ err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
dev->dev_private = NULL;
- kfree(vgdev);
return ret;
}
@@ -296,9 +296,6 @@ void virtio_gpu_release(struct drm_device *dev)
if (vgdev->has_host_visible)
drm_mm_takedown(&vgdev->host_visible_mm);
-
- kfree(vgdev->capsets);
- kfree(vgdev);
}
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 1cc8f3fc8e4b..8d7728181de0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -67,21 +67,6 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
if (virtio_gpu_is_shmem(bo)) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
-
- if (shmem->pages) {
- if (shmem->mapped) {
- dma_unmap_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- shmem->mapped = 0;
- }
-
- sg_free_table(shmem->pages);
- kfree(shmem->pages);
- shmem->pages = NULL;
- drm_gem_shmem_unpin(&bo->base);
- }
-
drm_gem_shmem_free(&bo->base);
} else if (virtio_gpu_is_vram(bo)) {
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
@@ -153,35 +138,18 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
unsigned int *nents)
{
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
- int si, ret;
+ struct sg_table *pages;
+ int si;
- ret = drm_gem_shmem_pin(&bo->base);
- if (ret < 0)
- return -EINVAL;
-
- /*
- * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
- * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
- * dma-ops. This is discouraged for other drivers, but should be fine
- * since virtio_gpu doesn't support dma-buf import from other devices.
- */
- shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
- if (IS_ERR(shmem->pages)) {
- drm_gem_shmem_unpin(&bo->base);
- return PTR_ERR(shmem->pages);
- }
+ pages = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- if (use_dma_api) {
- ret = dma_map_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- if (ret)
- return ret;
- *nents = shmem->mapped = shmem->pages->nents;
- } else {
- *nents = shmem->pages->orig_nents;
- }
+ if (use_dma_api)
+ *nents = pages->nents;
+ else
+ *nents = pages->orig_nents;
*ents = kvmalloc_array(*nents,
sizeof(struct virtio_gpu_mem_entry),
@@ -192,13 +160,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
if (use_dma_api) {
- for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+ for_each_sgtable_dma_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
(*ents)[si].padding = 0;
}
} else {
- for_each_sgtable_sg(shmem->pages, sg, si) {
+ for_each_sgtable_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
@@ -234,6 +202,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo->dumb = params->dumb;
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0)
+ goto err_put_id;
+
if (fence) {
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
@@ -246,13 +218,6 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
goto err_put_objs;
}
- ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
- if (ret != 0) {
- virtio_gpu_array_put_free(objs);
- virtio_gpu_free_object(&shmem_obj->base);
- return ret;
- }
-
if (params->blob) {
if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
bo->guest_blob = true;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6d3cc9e238a4..4c09e313bebc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,7 +26,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "virtgpu_drv.h"
@@ -67,16 +66,9 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
-static void virtio_gpu_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(plane);
-}
-
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = virtio_gpu_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -100,8 +92,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
is_cursor, true);
return ret;
}
@@ -266,14 +258,14 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
}
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *state)
{
struct virtio_gpu_framebuffer *vgfb;
- if (!plane->state->fb)
+ if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgfb = to_virtio_gpu_framebuffer(state->fb);
if (vgfb->fence) {
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
@@ -379,11 +371,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
const uint32_t *formats;
- int ret, nformats;
-
- plane = kzalloc(sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return ERR_PTR(-ENOMEM);
+ int nformats;
if (type == DRM_PLANE_TYPE_CURSOR) {
formats = virtio_gpu_cursor_formats;
@@ -394,17 +382,13 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
nformats = ARRAY_SIZE(virtio_gpu_formats);
funcs = &virtio_gpu_primary_helper_funcs;
}
- ret = drm_universal_plane_init(dev, plane, 1 << index,
- &virtio_gpu_plane_funcs,
- formats, nformats,
- NULL, type, NULL);
- if (ret)
- goto err_plane_init;
+
+ plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
+ 1 << index, &virtio_gpu_plane_funcs,
+ formats, nformats, NULL, type, NULL);
+ if (IS_ERR(plane))
+ return plane;
drm_plane_helper_add(plane, funcs);
return plane;
-
-err_plane_init:
- kfree(plane);
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index b7529b2b9883..9ff8660b50ad 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -322,7 +322,7 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
free_vbuf(vgdev, vbuf);
- return -1;
+ return -ENODEV;
}
if (vgdev->has_indirect)
@@ -386,7 +386,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (!sgt) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
- return -1;
+ return -ENOMEM;
}
elemcnt += sg_ents;
@@ -595,11 +595,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
- dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -721,7 +720,7 @@ static int virtio_get_edid_block(void *data, u8 *buf,
size_t start = block * EDID_LENGTH;
if (start + len > le32_to_cpu(resp->size))
- return -1;
+ return -EINVAL;
memcpy(buf, resp->edid + start, len);
return 0;
}
@@ -1019,11 +1018,9 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- if (virtio_gpu_is_shmem(bo) && use_dma_api) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
- }
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 72f779cbfedd..1b28a6a32948 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -3,6 +3,7 @@ vkms-y := \
vkms_drv.o \
vkms_plane.o \
vkms_output.o \
+ vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
vkms_writeback.o
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 775b97766e08..8e53fa80742b 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -7,203 +7,185 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
+#include <linux/minmax.h>
#include "vkms_drv.h"
-static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer,
- const struct vkms_composer *composer)
+static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
{
- u32 pixel;
- int src_offset = composer->offset + (y * composer->pitch)
- + (x * composer->cpp);
+ u32 new_color;
- pixel = *(u32 *)&buffer[src_offset];
+ new_color = (src * 0xffff + dst * (0xffff - alpha));
- return pixel;
+ return DIV_ROUND_CLOSEST(new_color, 0xffff);
}
/**
- * compute_crc - Compute CRC value on output frame
+ * pre_mul_alpha_blend - alpha blending equation
+ * @src_frame_info: source framebuffer's metadata
+ * @stage_buffer: The line with the pixels from src_plane
+ * @output_buffer: A line buffer that receives all the blends output
*
- * @vaddr: address to final framebuffer
- * @composer: framebuffer's metadata
+ * Using the information from the `frame_info`, this blends only the
+ * necessary pixels from the `stage_buffer` to the `output_buffer`
+ * using premultiplied blend formula.
*
- * returns CRC value computed using crc32 on the visible portion of
- * the final framebuffer at vaddr_out
+ * The current DRM assumption is that pixel color values have been already
+ * pre-multiplied with the alpha channel values. See more
+ * drm_plane_create_blend_mode_property(). Also, this formula assumes a
+ * completely opaque background.
*/
-static uint32_t compute_crc(const u8 *vaddr,
- const struct vkms_composer *composer)
+static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
+ struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer)
{
- int x, y;
- u32 crc = 0, pixel = 0;
- int x_src = composer->src.x1 >> 16;
- int y_src = composer->src.y1 >> 16;
- int h_src = drm_rect_height(&composer->src) >> 16;
- int w_src = drm_rect_width(&composer->src) >> 16;
-
- for (y = y_src; y < y_src + h_src; ++y) {
- for (x = x_src; x < x_src + w_src; ++x) {
- pixel = get_pixel_from_buffer(x, y, vaddr, composer);
- crc = crc32_le(crc, (void *)&pixel, sizeof(u32));
- }
+ int x_dst = frame_info->dst.x1;
+ struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
+ struct pixel_argb_u16 *in = stage_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (int x = 0; x < x_limit; x++) {
+ out[x].a = (u16)0xffff;
+ out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
+ out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
+ out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
}
-
- return crc;
}
-static u8 blend_channel(u8 src, u8 dst, u8 alpha)
+static bool check_y_limit(struct vkms_frame_info *frame_info, int y)
{
- u32 pre_blend;
- u8 new_color;
-
- pre_blend = (src * 255 + dst * (255 - alpha));
-
- /* Faster div by 255 */
- new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
+ if (y >= frame_info->dst.y1 && y < frame_info->dst.y2)
+ return true;
- return new_color;
+ return false;
}
-/**
- * alpha_blend - alpha blending equation
- * @argb_src: src pixel on premultiplied alpha mode
- * @argb_dst: dst pixel completely opaque
- *
- * blend pixels using premultiplied blend formula. The current DRM assumption
- * is that pixel color values have been already pre-multiplied with the alpha
- * channel values. See more drm_plane_create_blend_mode_property(). Also, this
- * formula assumes a completely opaque background.
- */
-static void alpha_blend(const u8 *argb_src, u8 *argb_dst)
+static void fill_background(const struct pixel_argb_u16 *background_color,
+ struct line_buffer *output_buffer)
{
- u8 alpha;
-
- alpha = argb_src[3];
- argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
- argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
- argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
+ for (size_t i = 0; i < output_buffer->n_pixels; i++)
+ output_buffer->pixels[i] = *background_color;
}
/**
- * x_blend - blending equation that ignores the pixel alpha
+ * @wb_frame_info: The writeback frame buffer metadata
+ * @crtc_state: The crtc state
+ * @crc32: The crc output of the final frame
+ * @output_buffer: A buffer of a row that will receive the result of the blend(s)
+ * @stage_buffer: The line with the pixels from plane being blend to the output
*
- * overwrites RGB color value from src pixel to dst pixel.
+ * This function blends the pixels (Using the `pre_mul_alpha_blend`)
+ * from all planes, calculates the crc32 of the output from the former step,
+ * and, if necessary, convert and store the output to the writeback buffer.
*/
-static void x_blend(const u8 *xrgb_src, u8 *xrgb_dst)
+static void blend(struct vkms_writeback_job *wb,
+ struct vkms_crtc_state *crtc_state,
+ u32 *crc32, struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer, size_t row_size)
{
- memcpy(xrgb_dst, xrgb_src, sizeof(u8) * 3);
-}
+ struct vkms_plane_state **plane = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
-/**
- * blend - blend value at vaddr_src with value at vaddr_dst
- * @vaddr_dst: destination address
- * @vaddr_src: source address
- * @dst_composer: destination framebuffer's metadata
- * @src_composer: source framebuffer's metadata
- * @pixel_blend: blending equation based on plane format
- *
- * Blend the vaddr_src value with the vaddr_dst value using a pixel blend
- * equation according to the supported plane formats DRM_FORMAT_(A/XRGB8888)
- * and clearing alpha channel to an completely opaque background. This function
- * uses buffer's metadata to locate the new composite values at vaddr_dst.
- *
- * TODO: completely clear the primary plane (a = 0xff) before starting to blend
- * pixel color values
- */
-static void blend(void *vaddr_dst, void *vaddr_src,
- struct vkms_composer *dst_composer,
- struct vkms_composer *src_composer,
- void (*pixel_blend)(const u8 *, u8 *))
-{
- int i, j, j_dst, i_dst;
- int offset_src, offset_dst;
- u8 *pixel_dst, *pixel_src;
-
- int x_src = src_composer->src.x1 >> 16;
- int y_src = src_composer->src.y1 >> 16;
-
- int x_dst = src_composer->dst.x1;
- int y_dst = src_composer->dst.y1;
- int h_dst = drm_rect_height(&src_composer->dst);
- int w_dst = drm_rect_width(&src_composer->dst);
-
- int y_limit = y_src + h_dst;
- int x_limit = x_src + w_dst;
-
- for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
- for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
- offset_dst = dst_composer->offset
- + (i_dst * dst_composer->pitch)
- + (j_dst++ * dst_composer->cpp);
- offset_src = src_composer->offset
- + (i * src_composer->pitch)
- + (j * src_composer->cpp);
-
- pixel_src = (u8 *)(vaddr_src + offset_src);
- pixel_dst = (u8 *)(vaddr_dst + offset_dst);
- pixel_blend(pixel_src, pixel_dst);
- /* clearing alpha channel (0xff)*/
- pixel_dst[3] = 0xff;
+ const struct pixel_argb_u16 background_color = { .a = 0xffff };
+
+ size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay;
+
+ for (size_t y = 0; y < crtc_y_limit; y++) {
+ fill_background(&background_color, output_buffer);
+
+ /* The active planes are composed associatively in z-order. */
+ for (size_t i = 0; i < n_active_planes; i++) {
+ if (!check_y_limit(plane[i]->frame_info, y))
+ continue;
+
+ plane[i]->plane_read(stage_buffer, plane[i]->frame_info, y);
+ pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
+ output_buffer);
}
- i_dst++;
+
+ *crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
+
+ if (wb)
+ wb->wb_write(&wb->wb_frame_info, output_buffer, y);
}
}
-static void compose_plane(struct vkms_composer *primary_composer,
- struct vkms_composer *plane_composer,
- void *vaddr_out)
+static int check_format_funcs(struct vkms_crtc_state *crtc_state,
+ struct vkms_writeback_job *active_wb)
{
- struct drm_framebuffer *fb = &plane_composer->fb;
- void *vaddr;
- void (*pixel_blend)(const u8 *p_src, u8 *p_dst);
+ struct vkms_plane_state **planes = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
- if (WARN_ON(iosys_map_is_null(&plane_composer->map[0])))
- return;
+ for (size_t i = 0; i < n_active_planes; i++)
+ if (!planes[i]->plane_read)
+ return -1;
- vaddr = plane_composer->map[0].vaddr;
+ if (active_wb && !active_wb->wb_write)
+ return -1;
- if (fb->format->format == DRM_FORMAT_ARGB8888)
- pixel_blend = &alpha_blend;
- else
- pixel_blend = &x_blend;
+ return 0;
+}
+
+static int check_iosys_map(struct vkms_crtc_state *crtc_state)
+{
+ struct vkms_plane_state **plane_state = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
+
+ for (size_t i = 0; i < n_active_planes; i++)
+ if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
+ return -1;
- blend(vaddr_out, vaddr, primary_composer, plane_composer, pixel_blend);
+ return 0;
}
-static int compose_active_planes(void **vaddr_out,
- struct vkms_composer *primary_composer,
- struct vkms_crtc_state *crtc_state)
+static int compose_active_planes(struct vkms_writeback_job *active_wb,
+ struct vkms_crtc_state *crtc_state,
+ u32 *crc32)
{
- struct drm_framebuffer *fb = &primary_composer->fb;
- struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
- const void *vaddr;
- int i;
-
- if (!*vaddr_out) {
- *vaddr_out = kvzalloc(gem_obj->size, GFP_KERNEL);
- if (!*vaddr_out) {
- DRM_ERROR("Cannot allocate memory for output frame.");
- return -ENOMEM;
- }
- }
+ size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
+ struct line_buffer output_buffer, stage_buffer;
+ int ret = 0;
+
+ /*
+ * This check exists so we can call `crc32_le` for the entire line
+ * instead doing it for each channel of each pixel in case
+ * `struct `pixel_argb_u16` had any gap added by the compiler
+ * between the struct fields.
+ */
+ static_assert(sizeof(struct pixel_argb_u16) == 8);
- if (WARN_ON(iosys_map_is_null(&primary_composer->map[0])))
+ if (WARN_ON(check_iosys_map(crtc_state)))
return -EINVAL;
- vaddr = primary_composer->map[0].vaddr;
+ if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
+ return -EINVAL;
- memcpy(*vaddr_out, vaddr, gem_obj->size);
+ line_width = crtc_state->base.crtc->mode.hdisplay;
+ stage_buffer.n_pixels = line_width;
+ output_buffer.n_pixels = line_width;
- /* If there are other planes besides primary, we consider the active
- * planes should be in z-order and compose them associatively:
- * ((primary <- overlay) <- cursor)
- */
- for (i = 1; i < crtc_state->num_active_planes; i++)
- compose_plane(primary_composer,
- crtc_state->active_planes[i]->composer,
- *vaddr_out);
+ stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
+ if (!stage_buffer.pixels) {
+ DRM_ERROR("Cannot allocate memory for the output line buffer");
+ return -ENOMEM;
+ }
- return 0;
+ output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
+ if (!output_buffer.pixels) {
+ DRM_ERROR("Cannot allocate memory for intermediate line buffer");
+ ret = -ENOMEM;
+ goto free_stage_buffer;
+ }
+
+ blend(active_wb, crtc_state, crc32, &stage_buffer,
+ &output_buffer, line_width * pixel_size);
+
+ kvfree(output_buffer.pixels);
+free_stage_buffer:
+ kvfree(stage_buffer.pixels);
+
+ return ret;
}
/**
@@ -221,13 +203,11 @@ void vkms_composer_worker(struct work_struct *work)
struct vkms_crtc_state,
composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- struct vkms_composer *primary_composer = NULL;
- struct vkms_plane_state *act_plane = NULL;
bool crc_pending, wb_pending;
- void *vaddr_out = NULL;
- u32 crc32 = 0;
u64 frame_start, frame_end;
+ u32 crc32 = 0;
int ret;
spin_lock_irq(&out->composer_lock);
@@ -247,35 +227,19 @@ void vkms_composer_worker(struct work_struct *work)
if (!crc_pending)
return;
- if (crtc_state->num_active_planes >= 1) {
- act_plane = crtc_state->active_planes[0];
- if (act_plane->base.base.plane->type == DRM_PLANE_TYPE_PRIMARY)
- primary_composer = act_plane->composer;
- }
-
- if (!primary_composer)
- return;
-
if (wb_pending)
- vaddr_out = crtc_state->active_writeback->data[0].vaddr;
+ ret = compose_active_planes(active_wb, crtc_state, &crc32);
+ else
+ ret = compose_active_planes(NULL, crtc_state, &crc32);
- ret = compose_active_planes(&vaddr_out, primary_composer,
- crtc_state);
- if (ret) {
- if (ret == -EINVAL && !wb_pending)
- kvfree(vaddr_out);
+ if (ret)
return;
- }
-
- crc32 = compute_crc(vaddr_out, primary_composer);
if (wb_pending) {
drm_writeback_signal_completion(&out->wb_connector, 0);
spin_lock_irq(&out->composer_lock);
crtc_state->wb_pending = false;
spin_unlock_irq(&out->composer_lock);
- } else {
- kvfree(vaddr_out);
}
/*
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 1d60654b553b..0a67b8073f7e 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -23,28 +23,41 @@
#define NUM_OVERLAY_PLANES 8
-struct vkms_writeback_job {
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
-};
-
-struct vkms_composer {
- struct drm_framebuffer fb;
+struct vkms_frame_info {
+ struct drm_framebuffer *fb;
struct drm_rect src, dst;
- struct iosys_map map[4];
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
unsigned int offset;
unsigned int pitch;
unsigned int cpp;
};
+struct pixel_argb_u16 {
+ u16 a, r, g, b;
+};
+
+struct line_buffer {
+ size_t n_pixels;
+ struct pixel_argb_u16 *pixels;
+};
+
+struct vkms_writeback_job {
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
+ struct vkms_frame_info wb_frame_info;
+ void (*wb_write)(struct vkms_frame_info *frame_info,
+ const struct line_buffer *buffer, int y);
+};
+
/**
* vkms_plane_state - Driver specific plane state
* @base: base plane state
- * @composer: data required for composing computation
+ * @frame_info: data required for composing computation
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
- struct vkms_composer *composer;
+ struct vkms_frame_info *frame_info;
+ void (*plane_read)(struct line_buffer *buffer,
+ const struct vkms_frame_info *frame_info, int y);
};
struct vkms_plane {
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
new file mode 100644
index 000000000000..d4950688b3f1
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_fixed.h>
+
+#include "vkms_formats.h"
+
+static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
+{
+ return frame_info->offset + (y * frame_info->pitch)
+ + (x * frame_info->cpp);
+}
+
+/*
+ * packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x(width) coordinate of the 2D buffer
+ * @y: The y(Heigth) coordinate of the 2D buffer
+ *
+ * Takes the information stored in the frame_info, a pair of coordinates, and
+ * returns the address of the first color channel.
+ * This function assumes the channels are packed together, i.e. a color channel
+ * comes immediately after another in the memory. And therefore, this function
+ * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
+ */
+static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
+ int x, int y)
+{
+ size_t offset = pixel_offset(frame_info, x, y);
+
+ return (u8 *)frame_info->map[0].vaddr + offset;
+}
+
+static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
+{
+ int x_src = frame_info->src.x1 >> 16;
+ int y_src = y - frame_info->dst.y1 + (frame_info->src.y1 >> 16);
+
+ return packed_pixels_addr(frame_info, x_src, y_src);
+}
+
+static void ARGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u8 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ /*
+ * The 257 is the "conversion ratio". This number is obtained by the
+ * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
+ * the best color value in a pixel format with more possibilities.
+ * A similar idea applies to others RGB color conversions.
+ */
+ out_pixels[x].a = (u16)src_pixels[3] * 257;
+ out_pixels[x].r = (u16)src_pixels[2] * 257;
+ out_pixels[x].g = (u16)src_pixels[1] * 257;
+ out_pixels[x].b = (u16)src_pixels[0] * 257;
+ }
+}
+
+static void XRGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u8 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ out_pixels[x].a = (u16)0xffff;
+ out_pixels[x].r = (u16)src_pixels[2] * 257;
+ out_pixels[x].g = (u16)src_pixels[1] * 257;
+ out_pixels[x].b = (u16)src_pixels[0] * 257;
+ }
+}
+
+static void ARGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info,
+ int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u16 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ out_pixels[x].a = le16_to_cpu(src_pixels[3]);
+ out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+ out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+ out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+ }
+}
+
+static void XRGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info,
+ int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u16 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ out_pixels[x].a = (u16)0xffff;
+ out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+ out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+ out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+ }
+}
+
+static void RGB565_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u16 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels++) {
+ u16 rgb_565 = le16_to_cpu(*src_pixels);
+ s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
+ s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
+ s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
+
+ out_pixels[x].a = (u16)0xffff;
+ out_pixels[x].r = drm_fixp2int(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixels[x].g = drm_fixp2int(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixels[x].b = drm_fixp2int(drm_fixp_mul(fp_b, fp_rb_ratio));
+ }
+}
+
+/*
+ * The following functions take an line of argb_u16 pixels from the
+ * src_buffer, convert them to a specific format, and store them in the
+ * destination.
+ *
+ * They are used in the `compose_active_planes` to convert and store a line
+ * from the src_buffer to the writeback buffer.
+ */
+static void argb_u16_to_ARGB8888(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ /*
+ * This sequence below is important because the format's byte order is
+ * in little-endian. In the case of the ARGB8888 the memory is
+ * organized this way:
+ *
+ * | Addr | = blue channel
+ * | Addr + 1 | = green channel
+ * | Addr + 2 | = Red channel
+ * | Addr + 3 | = Alpha channel
+ */
+ dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixels[x].a, 257);
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
+ }
+}
+
+static void argb_u16_to_XRGB8888(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ dst_pixels[3] = 0xff;
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
+ }
+}
+
+static void argb_u16_to_ARGB16161616(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ dst_pixels[3] = cpu_to_le16(in_pixels[x].a);
+ dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
+ dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
+ dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
+ }
+}
+
+static void argb_u16_to_XRGB16161616(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ dst_pixels[3] = 0xffff;
+ dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
+ dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
+ dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
+ }
+}
+
+static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels++) {
+ s64 fp_r = drm_int2fixp(in_pixels[x].r);
+ s64 fp_g = drm_int2fixp(in_pixels[x].g);
+ s64 fp_b = drm_int2fixp(in_pixels[x].b);
+
+ u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+ u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+ u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
+
+ *dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ }
+}
+
+void *get_frame_to_line_function(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return &ARGB8888_to_argb_u16;
+ case DRM_FORMAT_XRGB8888:
+ return &XRGB8888_to_argb_u16;
+ case DRM_FORMAT_ARGB16161616:
+ return &ARGB16161616_to_argb_u16;
+ case DRM_FORMAT_XRGB16161616:
+ return &XRGB16161616_to_argb_u16;
+ case DRM_FORMAT_RGB565:
+ return &RGB565_to_argb_u16;
+ default:
+ return NULL;
+ }
+}
+
+void *get_line_to_frame_function(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return &argb_u16_to_ARGB8888;
+ case DRM_FORMAT_XRGB8888:
+ return &argb_u16_to_XRGB8888;
+ case DRM_FORMAT_ARGB16161616:
+ return &argb_u16_to_ARGB16161616;
+ case DRM_FORMAT_XRGB16161616:
+ return &argb_u16_to_XRGB16161616;
+ case DRM_FORMAT_RGB565:
+ return &argb_u16_to_RGB565;
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
new file mode 100644
index 000000000000..43b7c1979018
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_FORMATS_H_
+#define _VKMS_FORMATS_H_
+
+#include "vkms_drv.h"
+
+void *get_frame_to_line_function(u32 format);
+
+void *get_line_to_frame_function(u32 format);
+
+#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index d8eb674b49a6..c3a845220e10 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -7,37 +7,42 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include "vkms_drv.h"
+#include "vkms_formats.h"
static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_RGB565
};
static const u32 vkms_plane_formats[] = {
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_RGB565
};
static struct drm_plane_state *
vkms_plane_duplicate_state(struct drm_plane *plane)
{
struct vkms_plane_state *vkms_state;
- struct vkms_composer *composer;
+ struct vkms_frame_info *frame_info;
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state)
return NULL;
- composer = kzalloc(sizeof(*composer), GFP_KERNEL);
- if (!composer) {
- DRM_DEBUG_KMS("Couldn't allocate composer\n");
+ frame_info = kzalloc(sizeof(*frame_info), GFP_KERNEL);
+ if (!frame_info) {
+ DRM_DEBUG_KMS("Couldn't allocate frame_info\n");
kfree(vkms_state);
return NULL;
}
- vkms_state->composer = composer;
+ vkms_state->frame_info = frame_info;
__drm_gem_duplicate_shadow_plane_state(plane, &vkms_state->base);
@@ -50,16 +55,16 @@ static void vkms_plane_destroy_state(struct drm_plane *plane,
struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state);
struct drm_crtc *crtc = vkms_state->base.base.crtc;
- if (crtc) {
+ if (crtc && vkms_state->frame_info->fb) {
/* dropping the reference we acquired in
* vkms_primary_plane_update()
*/
- if (drm_framebuffer_read_refcount(&vkms_state->composer->fb))
- drm_framebuffer_put(&vkms_state->composer->fb);
+ if (drm_framebuffer_read_refcount(vkms_state->frame_info->fb))
+ drm_framebuffer_put(vkms_state->frame_info->fb);
}
- kfree(vkms_state->composer);
- vkms_state->composer = NULL;
+ kfree(vkms_state->frame_info);
+ vkms_state->frame_info = NULL;
__drm_gem_destroy_shadow_plane_state(&vkms_state->base);
kfree(vkms_state);
@@ -99,23 +104,26 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
struct vkms_plane_state *vkms_plane_state;
struct drm_shadow_plane_state *shadow_plane_state;
struct drm_framebuffer *fb = new_state->fb;
- struct vkms_composer *composer;
+ struct vkms_frame_info *frame_info;
+ u32 fmt;
if (!new_state->crtc || !fb)
return;
+ fmt = fb->format->format;
vkms_plane_state = to_vkms_plane_state(new_state);
shadow_plane_state = &vkms_plane_state->base;
- composer = vkms_plane_state->composer;
- memcpy(&composer->src, &new_state->src, sizeof(struct drm_rect));
- memcpy(&composer->dst, &new_state->dst, sizeof(struct drm_rect));
- memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer));
- memcpy(&composer->map, &shadow_plane_state->data, sizeof(composer->map));
- drm_framebuffer_get(&composer->fb);
- composer->offset = fb->offsets[0];
- composer->pitch = fb->pitches[0];
- composer->cpp = fb->format->cpp[0];
+ frame_info = vkms_plane_state->frame_info;
+ memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
+ memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
+ frame_info->fb = fb;
+ memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
+ drm_framebuffer_get(frame_info->fb);
+ frame_info->offset = fb->offsets[0];
+ frame_info->pitch = fb->pitches[0];
+ frame_info->cpp = fb->format->cpp[0];
+ vkms_plane_state->plane_read = get_frame_to_line_function(fmt);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -139,8 +147,8 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
can_position = true;
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
can_position, true);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 3b3c1e757ab4..84a51cd281b9 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -12,9 +12,13 @@
#include <drm/drm_gem_shmem_helper.h>
#include "vkms_drv.h"
+#include "vkms_formats.h"
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_RGB565
};
static const struct drm_connector_funcs vkms_wb_connector_funcs = {
@@ -31,6 +35,7 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
{
struct drm_framebuffer *fb;
const struct drm_display_mode *mode = &crtc_state->mode;
+ int ret;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
@@ -42,11 +47,9 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
- if (fb->format->format != vkms_wb_formats[0]) {
- DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
- &fb->format->format);
- return -EINVAL;
- }
+ ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -76,12 +79,15 @@ static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
if (!vkmsjob)
return -ENOMEM;
- ret = drm_gem_fb_vmap(job->fb, vkmsjob->map, vkmsjob->data);
+ ret = drm_gem_fb_vmap(job->fb, vkmsjob->wb_frame_info.map, vkmsjob->data);
if (ret) {
DRM_ERROR("vmap failed: %d\n", ret);
goto err_kfree;
}
+ vkmsjob->wb_frame_info.fb = job->fb;
+ drm_framebuffer_get(vkmsjob->wb_frame_info.fb);
+
job->priv = vkmsjob;
return 0;
@@ -100,7 +106,9 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
if (!job->fb)
return;
- drm_gem_fb_vunmap(job->fb, vkmsjob->map);
+ drm_gem_fb_vunmap(job->fb, vkmsjob->wb_frame_info.map);
+
+ drm_framebuffer_put(vkmsjob->wb_frame_info.fb);
vkmsdev = drm_device_to_vkms_device(job->fb->dev);
vkms_set_composer(&vkmsdev->output, false);
@@ -117,17 +125,32 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
struct drm_writeback_connector *wb_conn = &output->wb_connector;
struct drm_connector_state *conn_state = wb_conn->base.state;
struct vkms_crtc_state *crtc_state = output->composer_state;
+ struct drm_framebuffer *fb = connector_state->writeback_job->fb;
+ u16 crtc_height = crtc_state->base.crtc->mode.vdisplay;
+ u16 crtc_width = crtc_state->base.crtc->mode.hdisplay;
+ struct vkms_writeback_job *active_wb;
+ struct vkms_frame_info *wb_frame_info;
+ u32 wb_format = fb->format->format;
if (!conn_state)
return;
vkms_set_composer(&vkmsdev->output, true);
+ active_wb = conn_state->writeback_job->priv;
+ wb_frame_info = &active_wb->wb_frame_info;
+
spin_lock_irq(&output->composer_lock);
- crtc_state->active_writeback = conn_state->writeback_job->priv;
+ crtc_state->active_writeback = active_wb;
+ wb_frame_info->offset = fb->offsets[0];
+ wb_frame_info->pitch = fb->pitches[0];
+ wb_frame_info->cpp = fb->format->cpp[0];
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
drm_writeback_queue_job(wb_conn, connector_state);
+ active_wb->wb_write = get_line_to_frame_function(wb_format);
+ drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
+ drm_rect_init(&wb_frame_info->dst, 0, 0, crtc_width, crtc_height);
}
static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
index 1f6e3bbc6605..f84376718086 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -121,7 +121,7 @@ typedef __attribute__((aligned(32))) struct MKSGuestStatInfoEntry {
*
* Since the MKSGuestStatInfoEntry structures contain userlevel
* pointers, the InstanceDescriptor also contains pointers to the
- * begining of these sections allowing the host side code to correctly
+ * beginning of these sections allowing the host side code to correctly
* interpret the pointers.
*
* Because the host side code never acknowledges anything back to the
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 4c8700027c6d..1a2fa0f83f5f 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -96,7 +96,7 @@ struct ttm_object_device;
*
* This struct is intended to be used as a base struct for objects that
* are visible to user-space. It provides a global name, race-safe
- * access and refcounting, minimal access contol and hooks for unref actions.
+ * access and refcounting, minimal access control and hooks for unref actions.
*/
struct ttm_base_object {
@@ -138,7 +138,7 @@ struct ttm_prime_object {
*
* @tfile: Pointer to a struct ttm_object_file.
* @base: The struct ttm_base_object to initialize.
- * @shareable: This object is shareable with other applcations.
+ * @shareable: This object is shareable with other applications.
* (different @tfile pointers.)
* @type: The object type.
* @refcount_release: See the struct ttm_base_object description.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 85a66014c2b6..822251aaab0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -429,9 +429,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
drm_gem_private_object_init(vdev, &bo->base, size);
- ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
- ttm_bo_type_kernel, placement, 0,
- &ctx, NULL, NULL, vmw_bo_default_destroy);
+ ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
+ placement, 0, &ctx, NULL, NULL,
+ vmw_bo_default_destroy);
if (unlikely(ret))
goto error_free;
@@ -512,10 +512,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,
size = ALIGN(size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
- ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
- ttm_bo_type_device,
- placement,
- 0, &ctx, NULL, NULL, bo_free);
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
+ placement, 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
return ret;
}
@@ -729,7 +727,7 @@ int vmw_user_bo_lookup(struct drm_file *filp,
* Any persistent usage of the object requires a refcount to be taken using
* ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
* needs to be paired with vmw_user_bo_noref_release() and no sleeping-
- * or scheduling functions may be called inbetween these function calls.
+ * or scheduling functions may be called in between these function calls.
*
* Return: A struct vmw_buffer_object pointer if successful or negative
* error pointer on failure.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 415774fde796..82ef58ccdd42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -36,7 +36,7 @@
* @res: Refcounted pointer to a struct vmw_resource.
* @hash: Hash entry for the manager hash table.
* @head: List head used either by the staging list or the manager list
- * of commited resources.
+ * of committed resources.
* @state: Staging state of this resource entry.
* @man: Pointer to a resource manager for this entry.
*/
@@ -51,9 +51,9 @@ struct vmw_cmdbuf_res {
/**
* struct vmw_cmdbuf_res_manager - Command buffer resource manager.
*
- * @resources: Hash table containing staged and commited command buffer
+ * @resources: Hash table containing staged and committed command buffer
* resources
- * @list: List of commited command buffer resources.
+ * @list: List of committed command buffer resources.
* @dev_priv: Pointer to a device private structure.
*
* @resources and @list are protected by the cmdbuf mutex for now.
@@ -118,7 +118,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
* This function commits a list of command buffer resource
* additions or removals.
* It is typically called when the execbuf ioctl call triggering these
- * actions has commited the fifo contents to the device.
+ * actions has committed the fifo contents to the device.
*/
void vmw_cmdbuf_res_commit(struct list_head *list)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 01a5b47e95f9..d7bd5eb1d3ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1398,18 +1398,6 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_mob_ttm");
}
-static unsigned long
-vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct drm_file *file_priv = file->private_data;
- struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
-
- return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
- dev_priv->drm.vma_offset_manager);
-}
-
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr)
{
@@ -1576,7 +1564,6 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
- .get_unmapped_area = vmw_get_unmapped_area,
};
static const struct drm_driver driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index be19aa6e1f13..09e2d738aa87 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -877,7 +877,6 @@ static inline void vmw_user_resource_noref_release(void)
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
-extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo);
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
struct ttm_placement *placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d49de4905efa..f085dbd4736d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1172,7 +1172,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
- if (IS_ERR_OR_NULL(vmw_bo)) {
+ if (IS_ERR(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
@@ -1226,7 +1226,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
- if (IS_ERR_OR_NULL(vmw_bo)) {
+ if (IS_ERR(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ff2f735bbe7a..214829c32ed8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_vblank.h>
@@ -720,8 +719,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
new_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (!ret && new_fb) {
@@ -762,8 +761,8 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
new_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
@@ -2257,7 +2256,7 @@ out_fini:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
-
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 7046dfd0d1c6..85f86faa3243 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -70,7 +70,7 @@ struct vmw_du_update_plane {
*
* Some surface resource or buffer object need some extra cmd submission
* like update GB image for proxy surface and define a GMRFB for screen
- * object. That should should be done here as this callback will be
+ * object. That should be done here as this callback will be
* called after FIFO allocation with the address of command buufer.
*
* This callback is optional.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index e4347faccee0..b8761f16dd78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -28,7 +28,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 2aceac7856e2..089046fa21be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -1076,6 +1076,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
if (desc_len < 0) {
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
+ __free_page(page);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a7d62a4eb47b..f66caa540e14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -525,7 +525,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate
* one, reserve and validate it.
*
- * @ticket: The ww aqcquire context to use, or NULL if trylocking.
+ * @ticket: The ww acquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
@@ -686,7 +686,7 @@ out_no_unbind:
* @intr: Perform waits interruptible if possible.
* @dirtying: Pending GPU operation will dirty the resource
*
- * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * On successful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
@@ -804,7 +804,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
* @dx_query_mob: Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist. This function
- * assumings binding_mutex is held.
+ * assumes binding_mutex is held.
*/
int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
{
@@ -1125,7 +1125,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
}
/*
- * In order of increasing backup_offset, clean dirty resorces
+ * In order of increasing backup_offset, clean dirty resources
* intersecting the range.
*/
while (found) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index c89ad3a2d141..ecd3c2fc978b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
@@ -1383,6 +1382,6 @@ out_revert:
vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
-
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 483ad544ea54..0d51b4542269 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -196,7 +196,7 @@ out_ret:
* type.
*
* Returns: Refcounted pointer to the embedded struct vmw_resource if
- * successfule. Error pointer otherwise.
+ * successful. Error pointer otherwise.
*/
struct vmw_resource *
vmw_simple_resource_lookup(struct ttm_object_file *tfile,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index eb014b97d156..8650c3aea8f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index f9cf93c9e7e3..68ee897de9d7 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -8,7 +8,7 @@ config DRM_ZYNQMP_DPSUB
select DMA_ENGINE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
help
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index cc32aa89cf8f..bbb365f2d087 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -15,12 +15,11 @@
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include <linux/clk.h>
@@ -1099,14 +1098,14 @@ static int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
unsigned int height = state->crtc_h / (i ? info->vsub : 1);
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
struct dma_async_tx_descriptor *desc;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
- paddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
+ dma_addr = drm_fb_dma_get_gem_addr(state->fb, state, i);
dma->xt.numf = height;
dma->sgl.size = width * info->cpp[i];
dma->sgl.icg = state->fb->pitches[i] - dma->sgl.size;
- dma->xt.src_start = paddr;
+ dma->xt.src_start = dma_addr;
dma->xt.frame_size = 1;
dma->xt.dir = DMA_MEM_TO_DEV;
dma->xt.src_sgl = true;
@@ -1151,8 +1150,8 @@ zynqmp_disp_plane_atomic_check(struct drm_plane *plane,
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 824b510e337b..1de2d927c32b 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -21,7 +21,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
@@ -47,7 +47,7 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
/* Enforce the alignment constraints of the DMA engine. */
args->pitch = ALIGN(pitch, dpsub->dma_align);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
static struct drm_framebuffer *
@@ -75,13 +75,13 @@ static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
* DRM/KMS Driver
*/
-DEFINE_DRM_GEM_CMA_FOPS(zynqmp_dpsub_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(zynqmp_dpsub_drm_fops);
static const struct drm_driver zynqmp_dpsub_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
.fops = &zynqmp_dpsub_drm_fops,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 4b90c86ee5f8..47774b9ab3de 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -288,11 +288,29 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
return 0;
}
+static const struct dmi_system_id dmi_nodevs[] = {
+ {
+ /*
+ * Google Chromebooks use Chrome OS Embedded Controller Sensor
+ * Hub instead of Sensor Hub Fusion and leaves MP2
+ * uninitialized, which disables all functionalities, even
+ * including the registers necessary for feature detections.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ },
+ },
+ { }
+};
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
int rc;
+ if (dmi_first_match(dmi_nodevs))
+ return -ENODEV;
+
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
if (!privdata)
return -ENOMEM;
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 08c9a9a60ae4..b59c3dafa6a4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1212,6 +1212,13 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc = new_rdesc;
}
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD &&
+ *rsize == 331 && rdesc[190] == 0x85 && rdesc[191] == 0x5a &&
+ rdesc[204] == 0x95 && rdesc[205] == 0x05) {
+ hid_info(hdev, "Fixing up Asus N-KEY keyb report descriptor\n");
+ rdesc[205] = 0x01;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0fb720a96399..f80d6193fca6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -185,6 +185,8 @@
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
@@ -414,6 +416,7 @@
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
+#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 48c1c02c69f4..859aeb07542e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -383,6 +383,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1532,7 +1534,10 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
* assume ours
*/
if (!report->tool)
- hid_report_set_tool(report, input, usage->code);
+ report->tool = usage->code;
+
+ /* drivers may have changed the value behind our back, resend it */
+ hid_report_set_tool(report, input, report->tool);
} else {
hid_report_release_tool(report, input, usage->code);
}
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 92ac4f605f13..6028af3c3aae 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -1221,6 +1221,7 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
spin_lock_irqsave(&ctlr->lock, flags);
if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report &&
+ ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED &&
(msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS &&
(ctlr->rumble_queue_head != ctlr->rumble_queue_tail ||
ctlr->rumble_zero_countdown > 0)) {
@@ -1545,12 +1546,13 @@ static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l,
ctlr->rumble_queue_head = 0;
memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data,
JC_RUMBLE_DATA_SIZE);
- spin_unlock_irqrestore(&ctlr->lock, flags);
/* don't wait for the periodic send (reduces latency) */
- if (schedule_now)
+ if (schedule_now && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED)
queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
return 0;
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index dc67717d2dab..70f602c64fd1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -314,6 +314,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index a3b151b29bd7..fc616db4231b 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index c3e6d69fdfbd..cf1679b0d4fb 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -67,12 +67,13 @@ static const struct tm_wheel_info tm_wheels_infos[] = {
{0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
{0x0206, 0x0005, "Thrustmaster T300RS"},
{0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
+ {0x020a, 0x0005, "Thrustmaster T300RS (Sparco R383 Mod)"},
{0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
{0x0002, 0x0002, "Thrustmaster T500RS"}
//{0x0407, 0x0001, "Thrustmaster TMX"}
};
-static const uint8_t tm_wheels_infos_length = 4;
+static const uint8_t tm_wheels_infos_length = 7;
/*
* This structs contains (in little endian) the response data
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 681614a8302a..197b1e7bf029 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -350,6 +350,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ for (int i = list->tail; i < list->head; i++)
+ kfree(list->buffer[i].value);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index c078f09a2318..95cefae47adf 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -1064,7 +1064,7 @@ err_powered:
}
EXPORT_SYMBOL_GPL(i2c_hid_core_probe);
-int i2c_hid_core_remove(struct i2c_client *client)
+void i2c_hid_core_remove(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
@@ -1078,8 +1078,6 @@ int i2c_hid_core_remove(struct i2c_client *client)
i2c_hid_free_buffers(ihid);
i2c_hid_core_power_down(ihid);
-
- return 0;
}
EXPORT_SYMBOL_GPL(i2c_hid_core_remove);
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index 236cc062d5ef..96c75510ad3f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -33,7 +33,7 @@ struct i2chid_ops {
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
u16 hid_descriptor_address, u32 quirks);
-int i2c_hid_core_remove(struct i2c_client *client);
+void i2c_hid_core_remove(struct i2c_client *client);
void i2c_hid_core_shutdown(struct i2c_client *client);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index e600dbf04dfc..fc108f19a64c 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -32,6 +32,7 @@
#define ADL_P_DEVICE_ID 0x51FC
#define ADL_N_DEVICE_ID 0x54FC
#define RPL_S_DEVICE_ID 0x7A78
+#define MTL_P_DEVICE_ID 0x7E45
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 2c67ec17bec6..7120b30ac51d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -43,6 +43,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 6a5cc11aefd8..35dddc5015b3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -105,7 +105,7 @@ struct report_list {
* @multi_packet_cnt: Count of fragmented packet count
*
* This structure is used to store completion flags and per client data like
- * like report description, number of HID devices etc.
+ * report description, number of HID devices etc.
*/
struct ishtp_cl_data {
/* completion flags */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 405e0d5212cc..df0a825694f5 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
}
/**
- * ipc_tx_callback() - IPC tx callback function
+ * ipc_tx_send() - IPC tx send function
* @prm: Pointer to client device instance
*
- * Send message over IPC either first time or on callback on previous message
- * completion
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
*/
-static void ipc_tx_callback(void *prm)
+static void ipc_tx_send(void *prm)
{
struct ishtp_cl *cl = prm;
struct ishtp_cl_tx_ring *cl_msg;
@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
list);
rem = cl_msg->send_buf.size - cl->tx_offs;
- ishtp_hdr.host_addr = cl->host_client_id;
- ishtp_hdr.fw_addr = cl->fw_client_id;
- ishtp_hdr.reserved = 0;
- pmsg = cl_msg->send_buf.data + cl->tx_offs;
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
- if (rem <= dev->mtu) {
- ishtp_hdr.length = rem;
- ishtp_hdr.msg_complete = 1;
- cl->sending = 0;
- list_del_init(&cl_msg->list); /* Must be before write */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- /* Submit to IPC queue with no callback */
- ishtp_write_message(dev, &ishtp_hdr, pmsg);
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
- ++cl->tx_ring_free_size;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
- tx_free_flags);
- } else {
- /* Send IPC fragment */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- cl->tx_offs += dev->mtu;
- ishtp_hdr.length = dev->mtu;
- ishtp_hdr.msg_complete = 0;
- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
}
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
}
/**
@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
return;
cl->tx_offs = 0;
- ipc_tx_callback(cl);
+ ipc_tx_send(cl);
++cl->send_msg_cnt_ipc;
}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 660036da7449..922d83eb7ddf 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy)
/*
* The strings sent from the host are encoded in
- * in utf16; convert it to utf8 strings.
+ * utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 23c680d1a0f5..7b9f3fc3adf7 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
+#include <linux/pci.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -2262,26 +2263,43 @@ static int vmbus_acpi_remove(struct acpi_device *device)
static void vmbus_reserve_fb(void)
{
- int size;
+ resource_size_t start = 0, size;
+ struct pci_dev *pdev;
+
+ if (efi_enabled(EFI_BOOT)) {
+ /* Gen2 VM: get FB base from EFI framebuffer */
+ start = screen_info.lfb_base;
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ } else {
+ /* Gen1 VM: get FB base from PCI */
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev)
+ return;
+
+ if (pdev->resource[0].flags & IORESOURCE_MEM) {
+ start = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ }
+
+ /*
+ * Release the PCI device so hyperv_drm or hyperv_fb driver can
+ * grab it later.
+ */
+ pci_dev_put(pdev);
+ }
+
+ if (!start)
+ return;
+
/*
* Make a claim for the frame buffer in the resource tree under the
* first node, which will be the one below 4GB. The length seems to
* be underreported, particularly in a Generation 1 VM. So start out
* reserving a larger area and make it smaller until it succeeds.
*/
-
- if (screen_info.lfb_base) {
- if (efi_enabled(EFI_BOOT))
- size = max_t(__u32, screen_info.lfb_size, 0x800000);
- else
- size = max_t(__u32, screen_info.lfb_size, 0x4000000);
-
- for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
- fb_mmio = __request_region(hyperv_mmio,
- screen_info.lfb_base, size,
- fb_mmio_name, 0);
- }
- }
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+ fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
}
/**
@@ -2313,7 +2331,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
- resource_size_t range_min, range_max, start;
+ resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2348,6 +2366,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
@@ -2427,7 +2453,8 @@ static int vmbus_acpi_add(struct acpi_device *device)
* Some ancestor of the vmbus acpi device (Gen1 or Gen2
* firmware) is the VMOD that has the mmio ranges. Get that.
*/
- for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
+ for (ancestor = acpi_dev_parent(device); ancestor;
+ ancestor = acpi_dev_parent(ancestor)) {
result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e70d9614bec2..5695b266abcf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -257,14 +257,14 @@ config SENSORS_AHT10
will be called aht10.
config SENSORS_AQUACOMPUTER_D5NEXT
- tristate "Aquacomputer D5 Next, Octo, Quadro, Farbwerk, and Farbwerk 360"
+ tristate "Aquacomputer D5 Next, Octo, Quadro, Farbwerk, Farbwerk 360, High Flow Next"
depends on USB_HID
select CRC16
help
If you say yes here you get support for sensors and fans of
the Aquacomputer D5 Next watercooling pump, Octo and Quadro fan
- controllers, Farbwerk and Farbwerk 360 RGB controllers, where
- available.
+ controllers, Farbwerk and Farbwerk 360 RGB controllers, High Flow
+ Next sensor, where available.
This driver can also be built as a module. If so, the module
will be called aquacomputer_d5next.
@@ -393,6 +393,7 @@ config SENSORS_ASB100
config SENSORS_ASPEED
tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
depends on THERMAL || THERMAL=n
select REGMAP
help
@@ -1066,6 +1067,18 @@ config SENSORS_MAX31730
This driver can also be built as a module. If so, the module
will be called max31730.
+config SENSORS_MAX31760
+ tristate "MAX31760 fan speed controller"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Support for the Analog Devices MAX31760 Precision Fan-Speed
+ Controller. MAX31760 integrates temperature sensing along with
+ precision PWM fan control.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31760.
+
config SENSORS_MAX6620
tristate "Maxim MAX6620 fan controller"
depends on I2C
@@ -1785,6 +1798,19 @@ config SENSORS_EMC2103
This driver can also be built as a module. If so, the module
will be called emc2103.
+config SENSORS_EMC2305
+ tristate "Microchip EMC2305 and compatible EMC2301/2/3"
+ depends on I2C
+ imply THERMAL
+ help
+ If you say yes here you get support for the Microchip EMC2305
+ fan controller chips.
+ The Microchip EMC2305 is a fan controller for up to 5 fans.
+ Fan rotation speeds are reported in RPM.
+
+ This driver can also be built as a module. If so, the module
+ will be called emc2305.
+
config SENSORS_EMC6W201
tristate "SMSC EMC6W201"
depends on I2C
@@ -2341,21 +2367,6 @@ config SENSORS_ASUS_WMI
This driver can also be built as a module. If so, the module
will be called asus_wmi_sensors.
-config SENSORS_ASUS_WMI_EC
- tristate "ASUS WMI B550/X570"
- depends on ACPI_WMI && SENSORS_ASUS_EC=n
- help
- If you say yes here you get support for the ACPI embedded controller
- hardware monitoring interface found in B550/X570 ASUS motherboards.
- This driver will provide readings of fans, voltages and temperatures
- through the system firmware.
-
- This driver is deprecated in favor of the ASUS EC Sensors driver
- which provides fully compatible output.
-
- This driver can also be built as a module. If so, the module
- will be called asus_wmi_sensors_ec.
-
config SENSORS_ASUS_EC
tristate "ASUS EC Sensors"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 007e829d1d0d..11d076cad8a2 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o
obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
obj-$(CONFIG_SENSORS_ASUS_EC) += asus-ec-sensors.o
obj-$(CONFIG_SENSORS_ASUS_WMI) += asus_wmi_sensors.o
-obj-$(CONFIG_SENSORS_ASUS_WMI_EC) += asus_wmi_ec_sensors.o
# Native drivers
# asb100, then w83781d go first, as they can override other drivers' addresses.
@@ -70,6 +69,7 @@ obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
+obj-$(CONFIG_SENSORS_EMC2305) += emc2305.o
obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
@@ -140,6 +140,7 @@ obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
+obj-$(CONFIG_SENSORS_MAX31760) += max31760.o
obj-$(CONFIG_SENSORS_MAX6620) += max6620.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 681f0623868f..a7cae6568155 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1504,7 +1504,6 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM_SLEEP
static int abituguru_suspend(struct device *dev)
{
struct abituguru_data *data = dev_get_drvdata(dev);
@@ -1526,16 +1525,12 @@ static int abituguru_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
-#define ABIT_UGURU_PM (&abituguru_pm)
-#else
-#define ABIT_UGURU_PM NULL
-#endif /* CONFIG_PM */
+static DEFINE_SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
static struct platform_driver abituguru_driver = {
.driver = {
.name = ABIT_UGURU_NAME,
- .pm = ABIT_UGURU_PM,
+ .pm = pm_sleep_ptr(&abituguru_pm),
},
.probe = abituguru_probe,
.remove = abituguru_remove,
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 8229ad30c909..afb21f73032d 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1127,7 +1127,6 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM_SLEEP
static int abituguru3_suspend(struct device *dev)
{
struct abituguru3_data *data = dev_get_drvdata(dev);
@@ -1146,16 +1145,12 @@ static int abituguru3_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
-#define ABIT_UGURU3_PM (&abituguru3_pm)
-#else
-#define ABIT_UGURU3_PM NULL
-#endif /* CONFIG_PM */
+static DEFINE_SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
static struct platform_driver abituguru3_driver = {
.driver = {
.name = ABIT_UGURU3_NAME,
- .pm = ABIT_UGURU3_PM
+ .pm = pm_sleep_ptr(&abituguru3_pm),
},
.probe = abituguru3_probe,
.remove = abituguru3_remove,
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index d2545a1be9fc..0962c12eba5a 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -598,7 +598,7 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
continue;
/* Create a symlink to domain objects */
- obj = acpi_bus_get_acpi_device(element->reference.handle);
+ obj = acpi_get_acpi_dev(element->reference.handle);
resource->domain_devices[i] = obj;
if (!obj)
continue;
@@ -927,8 +927,6 @@ static int acpi_power_meter_remove(struct acpi_device *device)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
static int acpi_power_meter_resume(struct device *dev)
{
struct acpi_power_meter_resource *resource;
@@ -946,9 +944,8 @@ static int acpi_power_meter_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL,
+ acpi_power_meter_resume);
static struct acpi_driver acpi_power_meter_driver = {
.name = "power_meter",
@@ -959,7 +956,7 @@ static struct acpi_driver acpi_power_meter_driver = {
.remove = acpi_power_meter_remove,
.notify = acpi_power_meter_notify,
},
- .drv.pm = &acpi_power_meter_pm,
+ .drv.pm = pm_sleep_ptr(&acpi_power_meter_pm),
};
/* Module init/exit routines */
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index fd938c70293f..97b330b6c165 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -384,7 +384,7 @@ static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
if (i2c_smbus_read_byte_data(client, ADC128_REG_BUSY_STATUS) & 0xfc)
return -ENODEV;
- strlcpy(info->type, "adc128d818", I2C_NAME_SIZE);
+ strscpy(info->type, "adc128d818", I2C_NAME_SIZE);
return 0;
}
@@ -495,14 +495,12 @@ error:
return err;
}
-static int adc128_remove(struct i2c_client *client)
+static void adc128_remove(struct i2c_client *client)
{
struct adc128_data *data = i2c_get_clientdata(client);
if (data->regulator)
regulator_disable(data->regulator);
-
- return 0;
}
static const struct i2c_device_id adc128_id[] = {
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 91ecfee243bf..2dc45e958730 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -426,7 +426,7 @@ static int adm1021_detect(struct i2c_client *client,
pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 4352f6a884e8..2984c4f98496 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -470,7 +470,7 @@ static int adm1025_detect(struct i2c_client *client,
else
return -ENODEV;
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 69b3ec752944..1f084f708743 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1610,7 +1610,7 @@ static int adm1026_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "adm1026", I2C_NAME_SIZE);
+ strscpy(info->type, "adm1026", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 3e1999413f32..eaf6e5e04aac 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -329,7 +329,7 @@ static int adm1029_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "adm1029", I2C_NAME_SIZE);
+ strscpy(info->type, "adm1029", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index ac841fa3a369..b42797bcb5b4 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -985,7 +985,7 @@ static int adm1031_detect(struct i2c_client *client,
return -ENODEV;
name = (id == 0x30) ? "adm1030" : "adm1031";
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 483cd757abd3..40e3558d3709 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -501,17 +501,23 @@ static int adm9240_fan_read(struct device *dev, u32 attr, int channel, long *val
switch (attr) {
case hwmon_fan_input:
+ mutex_lock(&data->update_lock);
err = regmap_read(data->regmap, ADM9240_REG_FAN(channel), &regval);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&data->update_lock);
return err;
+ }
if (regval == 255 && data->fan_div[channel] < 3) {
/* adjust fan clock divider on overflow */
err = adm9240_write_fan_div(data, channel,
++data->fan_div[channel]);
- if (err)
+ if (err) {
+ mutex_unlock(&data->update_lock);
return err;
+ }
}
*val = FAN_FROM_REG(regval, BIT(data->fan_div[channel]));
+ mutex_unlock(&data->update_lock);
break;
case hwmon_fan_div:
*val = BIT(data->fan_div[channel]);
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index 1efc0bdcceab..067865f4887a 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
- .pm = ADT7X10_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
},
.probe = adt7310_spi_probe,
.id_table = adt7310_id,
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index aede5baca7b9..0cebf6777239 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -98,7 +98,7 @@ static struct i2c_driver adt7410_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "adt7410",
- .pm = ADT7X10_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
},
.probe_new = adt7410_i2c_probe,
.id_table = adt7410_ids,
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index fad74aa62b64..bf5c5618f8d0 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -590,7 +590,7 @@ static int adt7411_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "adt7411", I2C_NAME_SIZE);
+ strscpy(info->type, "adt7411", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index e75bbd87ad09..9c0235849d4b 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -1782,7 +1782,7 @@ static int adt7462_detect(struct i2c_client *client,
if (revision != ADT7462_REVISION)
return -ENODEV;
- strlcpy(info->type, "adt7462", I2C_NAME_SIZE);
+ strscpy(info->type, "adt7462", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c67cd037a93f..927f8df05b7c 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1296,12 +1296,11 @@ static int adt7470_probe(struct i2c_client *client)
return 0;
}
-static int adt7470_remove(struct i2c_client *client)
+static void adt7470_remove(struct i2c_client *client)
{
struct adt7470_data *data = i2c_get_clientdata(client);
kthread_stop(data->auto_update);
- return 0;
}
static const struct i2c_device_id adt7470_id[] = {
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index ac480e6e4818..51b3d16c3223 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1342,7 +1342,7 @@ static int adt7475_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index ce54bffab2ec..da67734edafd 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -397,8 +397,6 @@ int adt7x10_probe(struct device *dev, const char *name, int irq,
}
EXPORT_SYMBOL_GPL(adt7x10_probe);
-#ifdef CONFIG_PM_SLEEP
-
static int adt7x10_suspend(struct device *dev)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
@@ -414,10 +412,7 @@ static int adt7x10_resume(struct device *dev)
return regmap_write(data->regmap, ADT7X10_CONFIG, data->config);
}
-SIMPLE_DEV_PM_OPS(adt7x10_dev_pm_ops, adt7x10_suspend, adt7x10_resume);
-EXPORT_SYMBOL_GPL(adt7x10_dev_pm_ops);
-
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SIMPLE_DEV_PM_OPS(adt7x10_dev_pm_ops, adt7x10_suspend, adt7x10_resume);
MODULE_AUTHOR("Hartmut Knaack");
MODULE_DESCRIPTION("ADT7410/ADT7420, ADT7310/ADT7320 common code");
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index ba22c32c8355..46caf3e21978 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -20,11 +20,6 @@ struct device;
int adt7x10_probe(struct device *dev, const char *name, int irq,
struct regmap *regmap);
-#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops adt7x10_dev_pm_ops;
-#define ADT7X10_DEV_PM_OPS (&adt7x10_dev_pm_ops)
-#else
-#define ADT7X10_DEV_PM_OPS NULL
-#endif
#endif
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 0c16face3fd3..3bfd12ff4b3c 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -809,7 +809,7 @@ static int amc6821_detect(
}
dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address);
- strlcpy(info->type, "amc6821", I2C_NAME_SIZE);
+ strscpy(info->type, "amc6821", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 66430553cc45..c51a2678f0eb 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo,
- * Quadro)
+ * Quadro, High Flow Next)
*
* Aquacomputer devices send HID reports (with ID 0x01) every second to report
* sensor values.
@@ -26,15 +26,17 @@
#define USB_PRODUCT_ID_D5NEXT 0xf00e
#define USB_PRODUCT_ID_FARBWERK360 0xf010
#define USB_PRODUCT_ID_OCTO 0xf011
+#define USB_PRODUCT_ID_HIGHFLOWNEXT 0xf012
-enum kinds { d5next, farbwerk, farbwerk360, octo, quadro };
+enum kinds { d5next, farbwerk, farbwerk360, octo, quadro, highflownext };
static const char *const aqc_device_names[] = {
[d5next] = "d5next",
[farbwerk] = "farbwerk",
[farbwerk360] = "farbwerk360",
[octo] = "octo",
- [quadro] = "quadro"
+ [quadro] = "quadro",
+ [highflownext] = "highflownext"
};
#define DRIVER_NAME "aquacomputer_d5next"
@@ -71,6 +73,8 @@ static u8 secondary_ctrl_report[] = {
#define D5NEXT_COOLANT_TEMP 0x57
#define D5NEXT_NUM_FANS 2
#define D5NEXT_NUM_SENSORS 1
+#define D5NEXT_NUM_VIRTUAL_SENSORS 8
+#define D5NEXT_VIRTUAL_SENSORS_START 0x3f
#define D5NEXT_PUMP_OFFSET 0x6c
#define D5NEXT_FAN_OFFSET 0x5f
#define D5NEXT_5V_VOLTAGE 0x39
@@ -86,14 +90,18 @@ static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 };
#define FARBWERK_SENSOR_START 0x2f
/* Register offsets for the Farbwerk 360 RGB controller */
-#define FARBWERK360_NUM_SENSORS 4
-#define FARBWERK360_SENSOR_START 0x32
+#define FARBWERK360_NUM_SENSORS 4
+#define FARBWERK360_SENSOR_START 0x32
+#define FARBWERK360_NUM_VIRTUAL_SENSORS 16
+#define FARBWERK360_VIRTUAL_SENSORS_START 0x3a
/* Register offsets for the Octo fan controller */
#define OCTO_POWER_CYCLES 0x18
#define OCTO_NUM_FANS 8
#define OCTO_NUM_SENSORS 4
#define OCTO_SENSOR_START 0x3D
+#define OCTO_NUM_VIRTUAL_SENSORS 16
+#define OCTO_VIRTUAL_SENSORS_START 0x45
#define OCTO_CTRL_REPORT_SIZE 0x65F
static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
@@ -105,12 +113,24 @@ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0
#define QUADRO_NUM_FANS 4
#define QUADRO_NUM_SENSORS 4
#define QUADRO_SENSOR_START 0x34
+#define QUADRO_NUM_VIRTUAL_SENSORS 16
+#define QUADRO_VIRTUAL_SENSORS_START 0x3c
#define QUADRO_CTRL_REPORT_SIZE 0x3c1
#define QUADRO_FLOW_SENSOR_OFFSET 0x6e
static u8 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 };
/* Fan speed registers in Quadro control report (from 0-100%) */
-static u16 quadro_ctrl_fan_offsets[] = { 0x36, 0x8b, 0xe0, 0x135 };
+static u16 quadro_ctrl_fan_offsets[] = { 0x37, 0x8c, 0xe1, 0x136 };
+
+/* Register offsets for the High Flow Next */
+#define HIGHFLOWNEXT_NUM_SENSORS 2
+#define HIGHFLOWNEXT_SENSOR_START 85
+#define HIGHFLOWNEXT_FLOW 81
+#define HIGHFLOWNEXT_WATER_QUALITY 89
+#define HIGHFLOWNEXT_POWER 91
+#define HIGHFLOWNEXT_CONDUCTIVITY 95
+#define HIGHFLOWNEXT_5V_VOLTAGE 97
+#define HIGHFLOWNEXT_5V_VOLTAGE_USB 99
/* Labels for D5 Next */
static const char *const label_d5next_temp[] = {
@@ -147,6 +167,25 @@ static const char *const label_temp_sensors[] = {
"Sensor 4"
};
+static const char *const label_virtual_temp_sensors[] = {
+ "Virtual sensor 1",
+ "Virtual sensor 2",
+ "Virtual sensor 3",
+ "Virtual sensor 4",
+ "Virtual sensor 5",
+ "Virtual sensor 6",
+ "Virtual sensor 7",
+ "Virtual sensor 8",
+ "Virtual sensor 9",
+ "Virtual sensor 10",
+ "Virtual sensor 11",
+ "Virtual sensor 12",
+ "Virtual sensor 13",
+ "Virtual sensor 14",
+ "Virtual sensor 15",
+ "Virtual sensor 16",
+};
+
/* Labels for Octo and Quadro (except speed) */
static const char *const label_fan_speed[] = {
"Fan 1 speed",
@@ -201,6 +240,27 @@ static const char *const label_quadro_speeds[] = {
"Flow speed [dL/h]"
};
+/* Labels for High Flow Next */
+static const char *const label_highflownext_temp_sensors[] = {
+ "Coolant temp",
+ "External sensor"
+};
+
+static const char *const label_highflownext_fan_speed[] = {
+ "Flow [dL/h]",
+ "Water quality [%]",
+ "Conductivity [nS/cm]",
+};
+
+static const char *const label_highflownext_power[] = {
+ "Dissipated power",
+};
+
+static const char *const label_highflownext_voltage[] = {
+ "+5V voltage",
+ "+5V USB voltage"
+};
+
struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
@@ -220,6 +280,8 @@ struct aqc_data {
u16 *fan_ctrl_offsets;
int num_temp_sensors;
int temp_sensor_start_offset;
+ int num_virtual_temp_sensors;
+ int virtual_temp_sensor_start_offset;
u16 power_cycle_count_offset;
u8 flow_sensor_offset;
@@ -231,7 +293,7 @@ struct aqc_data {
u32 power_cycles;
/* Sensor values */
- s32 temp_input[4];
+ s32 temp_input[20]; /* Max 4 physical and 16 virtual */
u16 speed_input[8];
u32 power_input[8];
u16 voltage_input[8];
@@ -239,6 +301,7 @@ struct aqc_data {
/* Label values */
const char *const *temp_label;
+ const char *const *virtual_temp_label;
const char *const *speed_label;
const char *const *power_label;
const char *const *voltage_label;
@@ -345,7 +408,7 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
switch (type) {
case hwmon_temp:
- if (channel < priv->num_temp_sensors)
+ if (channel < priv->num_temp_sensors + priv->num_virtual_temp_sensors)
return 0444;
break;
case hwmon_pwm:
@@ -360,6 +423,11 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
break;
case hwmon_fan:
switch (priv->kind) {
+ case highflownext:
+ /* Special case to support flow sensor, water quality and conductivity */
+ if (channel < 3)
+ return 0444;
+ break;
case quadro:
/* Special case to support flow sensor */
if (channel < priv->num_fans + 1)
@@ -372,6 +440,18 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
}
break;
case hwmon_power:
+ switch (priv->kind) {
+ case highflownext:
+ /* Special case to support one power sensor */
+ if (channel == 0)
+ return 0444;
+ break;
+ default:
+ if (channel < priv->num_fans)
+ return 0444;
+ break;
+ }
+ break;
case hwmon_curr:
if (channel < priv->num_fans)
return 0444;
@@ -383,6 +463,11 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
if (channel < priv->num_fans + 2)
return 0444;
break;
+ case highflownext:
+ /* Special case to support two voltage sensors */
+ if (channel < 2)
+ return 0444;
+ break;
default:
if (channel < priv->num_fans)
return 0444;
@@ -447,7 +532,10 @@ static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32
switch (type) {
case hwmon_temp:
- *str = priv->temp_label[channel];
+ if (channel < priv->num_temp_sensors)
+ *str = priv->temp_label[channel];
+ else
+ *str = priv->virtual_temp_label[channel - priv->num_temp_sensors];
break;
case hwmon_fan:
*str = priv->speed_label[channel];
@@ -512,6 +600,22 @@ static const struct hwmon_channel_info *aqc_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL,
@@ -568,7 +672,7 @@ static const struct hwmon_chip_info aqc_chip_info = {
static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size)
{
- int i, sensor_value;
+ int i, j, sensor_value;
struct aqc_data *priv;
if (report->id != STATUS_REPORT_ID)
@@ -581,7 +685,7 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART);
priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION);
- /* Temperature sensor readings */
+ /* Physical temperature sensor readings */
for (i = 0; i < priv->num_temp_sensors; i++) {
sensor_value = get_unaligned_be16(data +
priv->temp_sensor_start_offset +
@@ -592,6 +696,18 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv->temp_input[i] = sensor_value * 10;
}
+ /* Virtual temperature sensor readings */
+ for (j = 0; j < priv->num_virtual_temp_sensors; j++) {
+ sensor_value = get_unaligned_be16(data +
+ priv->virtual_temp_sensor_start_offset +
+ j * AQC_TEMP_SENSOR_SIZE);
+ if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED)
+ priv->temp_input[i] = -ENODATA;
+ else
+ priv->temp_input[i] = sensor_value * 10;
+ i++;
+ }
+
/* Fan speed and related readings */
for (i = 0; i < priv->num_fans; i++) {
priv->speed_input[i] =
@@ -618,6 +734,22 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
case quadro:
priv->speed_input[4] = get_unaligned_be16(data + priv->flow_sensor_offset);
break;
+ case highflownext:
+ /* If external temp sensor is not connected, its power reading is also N/A */
+ if (priv->temp_input[1] == -ENODATA)
+ priv->power_input[0] = -ENODATA;
+ else
+ priv->power_input[0] =
+ get_unaligned_be16(data + HIGHFLOWNEXT_POWER) * 1000000;
+
+ priv->voltage_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE) * 10;
+ priv->voltage_input[1] =
+ get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE_USB) * 10;
+
+ priv->speed_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_FLOW);
+ priv->speed_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_WATER_QUALITY);
+ priv->speed_input[2] = get_unaligned_be16(data + HIGHFLOWNEXT_CONDUCTIVITY);
+ break;
default:
break;
}
@@ -717,10 +849,13 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->fan_ctrl_offsets = d5next_ctrl_fan_offsets;
priv->num_temp_sensors = D5NEXT_NUM_SENSORS;
priv->temp_sensor_start_offset = D5NEXT_COOLANT_TEMP;
+ priv->num_virtual_temp_sensors = D5NEXT_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES;
priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE;
priv->temp_label = label_d5next_temp;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
priv->speed_label = label_d5next_speeds;
priv->power_label = label_d5next_power;
priv->voltage_label = label_d5next_voltages;
@@ -740,7 +875,11 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->num_fans = 0;
priv->num_temp_sensors = FARBWERK360_NUM_SENSORS;
priv->temp_sensor_start_offset = FARBWERK360_SENSOR_START;
+ priv->num_virtual_temp_sensors = FARBWERK360_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = FARBWERK360_VIRTUAL_SENSORS_START;
+
priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
break;
case USB_PRODUCT_ID_OCTO:
priv->kind = octo;
@@ -750,10 +889,13 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->fan_ctrl_offsets = octo_ctrl_fan_offsets;
priv->num_temp_sensors = OCTO_NUM_SENSORS;
priv->temp_sensor_start_offset = OCTO_SENSOR_START;
+ priv->num_virtual_temp_sensors = OCTO_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
priv->speed_label = label_fan_speed;
priv->power_label = label_fan_power;
priv->voltage_label = label_fan_voltage;
@@ -767,16 +909,32 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->fan_ctrl_offsets = quadro_ctrl_fan_offsets;
priv->num_temp_sensors = QUADRO_NUM_SENSORS;
priv->temp_sensor_start_offset = QUADRO_SENSOR_START;
+ priv->num_virtual_temp_sensors = QUADRO_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET;
priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
priv->speed_label = label_quadro_speeds;
priv->power_label = label_fan_power;
priv->voltage_label = label_fan_voltage;
priv->current_label = label_fan_current;
break;
+ case USB_PRODUCT_ID_HIGHFLOWNEXT:
+ priv->kind = highflownext;
+
+ priv->num_fans = 0;
+ priv->num_temp_sensors = HIGHFLOWNEXT_NUM_SENSORS;
+ priv->temp_sensor_start_offset = HIGHFLOWNEXT_SENSOR_START;
+ priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
+
+ priv->temp_label = label_highflownext_temp_sensors;
+ priv->speed_label = label_highflownext_fan_speed;
+ priv->power_label = label_highflownext_power;
+ priv->voltage_label = label_highflownext_voltage;
+ break;
default:
break;
}
@@ -833,6 +991,7 @@ static const struct hid_device_id aqc_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOWNEXT) },
{ }
};
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 8cf0bcb85eb4..ce4da836765c 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -208,7 +208,7 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
static int asb100_probe(struct i2c_client *client);
static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int asb100_remove(struct i2c_client *client);
+static void asb100_remove(struct i2c_client *client);
static struct asb100_data *asb100_update_device(struct device *dev);
static void asb100_init_client(struct i2c_client *client);
@@ -769,7 +769,7 @@ static int asb100_detect(struct i2c_client *client,
if (val1 != 0x31 || val2 != 0x06)
return -ENODEV;
- strlcpy(info->type, "asb100", I2C_NAME_SIZE);
+ strscpy(info->type, "asb100", I2C_NAME_SIZE);
return 0;
}
@@ -822,7 +822,7 @@ ERROR3:
return err;
}
-static int asb100_remove(struct i2c_client *client)
+static void asb100_remove(struct i2c_client *client)
{
struct asb100_data *data = i2c_get_clientdata(client);
@@ -831,8 +831,6 @@ static int asb100_remove(struct i2c_client *client)
i2c_unregister_device(data->lm75[1]);
i2c_unregister_device(data->lm75[0]);
-
- return 0;
}
/*
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index e835605a7456..54595454537b 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1153,7 +1153,7 @@ static int asc7621_detect(struct i2c_client *client,
if (company == asc7621_chips[chip_index].company_id &&
verstep == asc7621_chips[chip_index].verstep_id) {
- strlcpy(info->type, asc7621_chips[chip_index].name,
+ strscpy(info->type, asc7621_chips[chip_index].name,
I2C_NAME_SIZE);
dev_info(&adapter->dev, "Matched %s at 0x%02x\n",
@@ -1165,7 +1165,7 @@ static int asc7621_detect(struct i2c_client *client,
return -ENODEV;
}
-static int asc7621_remove(struct i2c_client *client)
+static void asc7621_remove(struct i2c_client *client)
{
struct asc7621_data *data = i2c_get_clientdata(client);
int i;
@@ -1176,8 +1176,6 @@ static int asc7621_remove(struct i2c_client *client)
device_remove_file(&client->dev,
&(asc7621_params[i].sda.dev_attr));
}
-
- return 0;
}
static const struct i2c_device_id asc7621_id[] = {
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 61a4684fc020..81e688975c6a 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -266,9 +266,7 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
#define SENSOR_SET_WATER_BLOCK \
(SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
-
struct ec_board_info {
- const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
unsigned long sensors;
/*
* Defines which mutex to use for guarding access to the state and the
@@ -281,152 +279,194 @@ struct ec_board_info {
enum board_family family;
};
-static const struct ec_board_info board_info[] = {
- {
- .board_names = {"PRIME X470-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_400_series,
- },
- {
- .board_names = {"PRIME X570-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ProArt X570-CREATOR WIFI"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- },
- {
- .board_names = {"Pro WS X570-ACE"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII DARK HERO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG CROSSHAIR VIII FORMULA",
- "ROG CROSSHAIR VIII HERO",
- "ROG CROSSHAIR VIII HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG MAXIMUS XI HERO",
- "ROG MAXIMUS XI HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_intel_300_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII IMPACT"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-I GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING WIFI II"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-F GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-I GAMING"},
- .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"},
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
- .family = family_intel_600_series,
- },
- {
- .board_names = {"ROG ZENITH II EXTREME"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
- SENSOR_SET_WATER_BLOCK |
- SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
- SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
- .family = family_amd_500_series,
- },
- {}
+static const struct ec_board_info board_info_prime_x470_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_400_series,
+};
+
+static const struct ec_board_info board_info_prime_x570_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_maximus_xi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_f_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_i_gaming = {
+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_zenith_ii_extreme = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
+ SENSOR_SET_WATER_BLOCK |
+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_500_series,
+};
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, \
+ "ASUSTeK COMPUTER INC."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = (void *)board_info, \
+ }
+
+static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
+ &board_info_prime_x470_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
+ &board_info_prime_x570_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
+ &board_info_pro_art_x570_creator_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
+ &board_info_pro_ws_x570_ace),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
+ &board_info_crosshair_viii_dark_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+ &board_info_crosshair_viii_impact),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
+ &board_info_strix_b550_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
+ &board_info_strix_b550_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
+ &board_info_strix_x570_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
+ &board_info_strix_x570_e_gaming_wifi_ii),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING",
+ &board_info_strix_x570_f_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
+ &board_info_strix_x570_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
+ &board_info_strix_z690_a_gaming_wifi_d4),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
+ &board_info_zenith_ii_extreme),
+ {},
};
struct ec_sensor {
@@ -537,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec,
return -ENOENT;
}
-static int __init bank_compare(const void *a, const void *b)
+static int bank_compare(const void *a, const void *b)
{
return *((const s8 *)a) - *((const s8 *)b);
}
-static void __init setup_sensor_data(struct ec_sensors_data *ec)
+static void setup_sensor_data(struct ec_sensors_data *ec)
{
struct ec_sensor *s = ec->sensors;
bool bank_found;
@@ -574,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec)
sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
}
-static void __init fill_ec_registers(struct ec_sensors_data *ec)
+static void fill_ec_registers(struct ec_sensors_data *ec)
{
const struct ec_sensor_info *si;
unsigned int i, j, register_idx = 0;
@@ -589,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec)
}
}
-static int __init setup_lock_data(struct device *dev)
+static int setup_lock_data(struct device *dev)
{
const char *mutex_path;
int status;
@@ -812,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
}
-static int __init
+static int
asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
struct device *dev, int num,
enum hwmon_sensor_types type, u32 config)
@@ -841,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = {
.ops = &asus_ec_hwmon_ops,
};
-static const struct ec_board_info * __init get_board_info(void)
+static const struct ec_board_info *get_board_info(void)
{
- const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
- const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME);
- const struct ec_board_info *board;
-
- if (!dmi_board_vendor || !dmi_board_name ||
- strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC."))
- return NULL;
-
- for (board = board_info; board->sensors; board++) {
- if (match_string(board->board_names,
- MAX_IDENTICAL_BOARD_VARIATIONS,
- dmi_board_name) >= 0)
- return board;
- }
+ const struct dmi_system_id *dmi_entry;
- return NULL;
+ dmi_entry = dmi_first_match(dmi_table);
+ return dmi_entry ? dmi_entry->driver_data : NULL;
}
-static int __init asus_ec_probe(struct platform_device *pdev)
+static int asus_ec_probe(struct platform_device *pdev)
{
const struct hwmon_channel_info **ptr_asus_ec_ci;
int nr_count[hwmon_max] = { 0 }, nr_types = 0;
@@ -970,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwdev);
}
-
-static const struct acpi_device_id acpi_ec_ids[] = {
- /* Embedded Controller Device */
- { "PNP0C09", 0 },
- {}
-};
+MODULE_DEVICE_TABLE(dmi, dmi_table);
static struct platform_driver asus_ec_sensors_platform_driver = {
.driver = {
.name = "asus-ec-sensors",
- .acpi_match_table = acpi_ec_ids,
},
+ .probe = asus_ec_probe,
};
-MODULE_DEVICE_TABLE(acpi, acpi_ec_ids);
-/*
- * we use module_platform_driver_probe() rather than module_platform_driver()
- * because the probe function (and its dependants) are marked with __init, which
- * means we can't put it into the .probe member of the platform_driver struct
- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init
- * because the object is referenced from the module exit code.
- */
-module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+static struct platform_device *asus_ec_sensors_platform_device;
+
+static int __init asus_ec_init(void)
+{
+ asus_ec_sensors_platform_device =
+ platform_create_bundle(&asus_ec_sensors_platform_driver,
+ asus_ec_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(asus_ec_sensors_platform_device))
+ return PTR_ERR(asus_ec_sensors_platform_device);
+
+ return 0;
+}
+
+static void __exit asus_ec_exit(void)
+{
+ platform_device_unregister(asus_ec_sensors_platform_device);
+ platform_driver_unregister(&asus_ec_sensors_platform_driver);
+}
+
+module_init(asus_ec_init);
+module_exit(asus_ec_exit);
module_param_named(mutex_path, mutex_path_override, charp, 0);
MODULE_PARM_DESC(mutex_path,
diff --git a/drivers/hwmon/asus_wmi_ec_sensors.c b/drivers/hwmon/asus_wmi_ec_sensors.c
deleted file mode 100644
index a3a2f014dec0..000000000000
--- a/drivers/hwmon/asus_wmi_ec_sensors.c
+++ /dev/null
@@ -1,622 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * HWMON driver for ASUS B550/X570 motherboards that publish sensor
- * values via the embedded controller registers.
- *
- * Copyright (C) 2021 Eugene Shalygin <eugene.shalygin@gmail.com>
- * Copyright (C) 2018-2019 Ed Brindley <kernel@maidavale.org>
- *
- * EC provides:
- * - Chipset temperature
- * - CPU temperature
- * - Motherboard temperature
- * - T_Sensor temperature
- * - VRM temperature
- * - Water In temperature
- * - Water Out temperature
- * - CPU Optional Fan RPM
- * - Chipset Fan RPM
- * - Water Flow Fan RPM
- * - CPU current
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/hwmon.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nls.h>
-#include <linux/units.h>
-#include <linux/wmi.h>
-
-#include <asm/unaligned.h>
-
-#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
-#define ASUSWMI_METHODID_BLOCK_READ_EC 0x42524543 /* BREC */
-/* From the ASUS DSDT source */
-#define ASUSWMI_BREC_REGISTERS_MAX 16
-#define ASUSWMI_MAX_BUF_LEN 128
-#define SENSOR_LABEL_LEN 16
-
-static u32 hwmon_attributes[hwmon_max] = {
- [hwmon_chip] = HWMON_C_REGISTER_TZ,
- [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
- [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
- [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL,
- [hwmon_fan] = HWMON_F_INPUT | HWMON_F_LABEL,
-};
-
-struct asus_wmi_ec_sensor_address {
- u8 index;
- u8 bank;
- u8 size;
-};
-
-#define MAKE_SENSOR_ADDRESS(size_i, bank_i, index_i) { \
- .size = size_i, \
- .bank = bank_i, \
- .index = index_i, \
-}
-
-struct ec_sensor_info {
- struct asus_wmi_ec_sensor_address addr;
- char label[SENSOR_LABEL_LEN];
- enum hwmon_sensor_types type;
-};
-
-#define EC_SENSOR(sensor_label, sensor_type, size, bank, index) { \
- .addr = MAKE_SENSOR_ADDRESS(size, bank, index), \
- .label = sensor_label, \
- .type = sensor_type, \
-}
-
-enum known_ec_sensor {
- SENSOR_TEMP_CHIPSET,
- SENSOR_TEMP_CPU,
- SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR,
- SENSOR_TEMP_VRM,
- SENSOR_FAN_CPU_OPT,
- SENSOR_FAN_CHIPSET,
- SENSOR_FAN_VRM_HS,
- SENSOR_FAN_WATER_FLOW,
- SENSOR_CURR_CPU,
- SENSOR_TEMP_WATER_IN,
- SENSOR_TEMP_WATER_OUT,
- SENSOR_MAX
-};
-
-/* All known sensors for ASUS EC controllers */
-static const struct ec_sensor_info known_ec_sensors[] = {
- [SENSOR_TEMP_CHIPSET] = EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
- [SENSOR_TEMP_CPU] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
- [SENSOR_TEMP_MB] = EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
- [SENSOR_TEMP_T_SENSOR] = EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
- [SENSOR_TEMP_VRM] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
- [SENSOR_FAN_CPU_OPT] = EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
- [SENSOR_FAN_VRM_HS] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
- [SENSOR_FAN_CHIPSET] = EC_SENSOR("Chipset", hwmon_fan, 2, 0x00, 0xb4),
- [SENSOR_FAN_WATER_FLOW] = EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
- [SENSOR_CURR_CPU] = EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
- [SENSOR_TEMP_WATER_IN] = EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
- [SENSOR_TEMP_WATER_OUT] = EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
-};
-
-struct asus_wmi_data {
- const enum known_ec_sensor known_board_sensors[SENSOR_MAX + 1];
-};
-
-/* boards with EC support */
-static struct asus_wmi_data sensors_board_PW_X570_P = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CHIPSET,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_PW_X570_A = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB, SENSOR_TEMP_VRM,
- SENSOR_FAN_CHIPSET,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_R_C8H = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_TEMP_WATER_IN, SENSOR_TEMP_WATER_OUT,
- SENSOR_FAN_CPU_OPT, SENSOR_FAN_CHIPSET, SENSOR_FAN_WATER_FLOW,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-/* Same as Hero but without chipset fan */
-static struct asus_wmi_data sensors_board_R_C8DH = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_TEMP_WATER_IN, SENSOR_TEMP_WATER_OUT,
- SENSOR_FAN_CPU_OPT, SENSOR_FAN_WATER_FLOW,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-/* Same as Hero but without water */
-static struct asus_wmi_data sensors_board_R_C8F = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CPU_OPT, SENSOR_FAN_CHIPSET,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_RS_B550_E_G = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CPU_OPT,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_RS_B550_I_G = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_VRM_HS,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_RS_X570_E_G = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CHIPSET,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, sensors) { \
- .matches = { \
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), \
- DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
- }, \
- .driver_data = sensors, \
-}
-
-static const struct dmi_system_id asus_wmi_ec_dmi_table[] = {
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO", &sensors_board_PW_X570_P),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE", &sensors_board_PW_X570_A),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO", &sensors_board_R_C8DH),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA", &sensors_board_R_C8F),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO", &sensors_board_R_C8H),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING", &sensors_board_RS_B550_E_G),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING", &sensors_board_RS_B550_I_G),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING", &sensors_board_RS_X570_E_G),
- {}
-};
-MODULE_DEVICE_TABLE(dmi, asus_wmi_ec_dmi_table);
-
-struct ec_sensor {
- enum known_ec_sensor info_index;
- long cached_value;
-};
-
-/**
- * struct asus_wmi_ec_info - sensor info.
- * @sensors: list of sensors.
- * @read_arg: UTF-16LE string to pass to BRxx() WMI function.
- * @read_buffer: decoded output from WMI result.
- * @nr_sensors: number of board EC sensors.
- * @nr_registers: number of EC registers to read (sensor might span more than 1 register).
- * @last_updated: in jiffies.
- */
-struct asus_wmi_ec_info {
- struct ec_sensor sensors[SENSOR_MAX];
- char read_arg[(ASUSWMI_BREC_REGISTERS_MAX * 4 + 1) * 2];
- u8 read_buffer[ASUSWMI_BREC_REGISTERS_MAX];
- unsigned int nr_sensors;
- unsigned int nr_registers;
- unsigned long last_updated;
-};
-
-struct asus_wmi_sensors {
- struct asus_wmi_ec_info ec;
- /* lock access to internal cache */
- struct mutex lock;
-};
-
-static int asus_wmi_ec_fill_board_sensors(struct asus_wmi_ec_info *ec,
- const enum known_ec_sensor *bsi)
-{
- struct ec_sensor *s = ec->sensors;
- int i;
-
- ec->nr_sensors = 0;
- ec->nr_registers = 0;
-
- for (i = 0; bsi[i] != SENSOR_MAX; i++) {
- s[i].info_index = bsi[i];
- ec->nr_sensors++;
- ec->nr_registers += known_ec_sensors[bsi[i]].addr.size;
- }
-
- return 0;
-}
-
-/*
- * The next four functions convert to or from BRxx string argument format.
- * The format of the string is as follows:
- * - The string consists of two-byte UTF-16LE characters.
- * - The value of the very first byte in the string is equal to the total
- * length of the next string in bytes, thus excluding the first two-byte
- * character.
- * - The rest of the string encodes the pairs of (bank, index) pairs, where
- * both values are byte-long (0x00 to 0xFF).
- * - Numbers are encoded as UTF-16LE hex values.
- */
-static int asus_wmi_ec_decode_reply_buffer(const u8 *in, u32 length, u8 *out)
-{
- char buffer[ASUSWMI_MAX_BUF_LEN * 2];
- u32 len = min_t(u32, get_unaligned_le16(in), length - 2);
-
- utf16s_to_utf8s((wchar_t *)(in + 2), len / 2, UTF16_LITTLE_ENDIAN, buffer, sizeof(buffer));
-
- return hex2bin(out, buffer, len / 4);
-}
-
-static void asus_wmi_ec_encode_registers(const u8 *in, u32 len, char *out)
-{
- char buffer[ASUSWMI_MAX_BUF_LEN * 2];
-
- bin2hex(buffer, in, len);
-
- utf8s_to_utf16s(buffer, len * 2, UTF16_LITTLE_ENDIAN, (wchar_t *)(out + 2), len * 2);
-
- put_unaligned_le16(len * 4, out);
-}
-
-static void asus_wmi_ec_make_block_read_query(struct asus_wmi_ec_info *ec)
-{
- u8 registers[ASUSWMI_BREC_REGISTERS_MAX * 2];
- const struct ec_sensor_info *si;
- int i, j, offset;
-
- offset = 0;
- for (i = 0; i < ec->nr_sensors; i++) {
- si = &known_ec_sensors[ec->sensors[i].info_index];
- for (j = 0; j < si->addr.size; j++) {
- registers[offset++] = si->addr.bank;
- registers[offset++] = si->addr.index + j;
- }
- }
-
- asus_wmi_ec_encode_registers(registers, offset, ec->read_arg);
-}
-
-static int asus_wmi_ec_block_read(u32 method_id, char *query, u8 *out)
-{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_buffer input;
- union acpi_object *obj;
- acpi_status status;
- int ret;
-
- /* The first byte of the BRxx() argument string has to be the string size. */
- input.length = query[0] + 2;
- input.pointer = query;
- status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0, method_id, &input, &output);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- obj = output.pointer;
- if (!obj)
- return -EIO;
-
- if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 2) {
- ret = -EIO;
- goto out_free_obj;
- }
-
- ret = asus_wmi_ec_decode_reply_buffer(obj->buffer.pointer, obj->buffer.length, out);
-
-out_free_obj:
- ACPI_FREE(obj);
- return ret;
-}
-
-static inline long get_sensor_value(const struct ec_sensor_info *si, u8 *data)
-{
- switch (si->addr.size) {
- case 1:
- return *data;
- case 2:
- return get_unaligned_be16(data);
- case 4:
- return get_unaligned_be32(data);
- default:
- return 0;
- }
-}
-
-static void asus_wmi_ec_update_ec_sensors(struct asus_wmi_ec_info *ec)
-{
- const struct ec_sensor_info *si;
- struct ec_sensor *s;
- u8 i_sensor;
- u8 *data;
-
- data = ec->read_buffer;
- for (i_sensor = 0; i_sensor < ec->nr_sensors; i_sensor++) {
- s = &ec->sensors[i_sensor];
- si = &known_ec_sensors[s->info_index];
- s->cached_value = get_sensor_value(si, data);
- data += si->addr.size;
- }
-}
-
-static long asus_wmi_ec_scale_sensor_value(long value, int data_type)
-{
- switch (data_type) {
- case hwmon_curr:
- case hwmon_temp:
- case hwmon_in:
- return value * MILLI;
- default:
- return value;
- }
-}
-
-static int asus_wmi_ec_find_sensor_index(const struct asus_wmi_ec_info *ec,
- enum hwmon_sensor_types type, int channel)
-{
- int i;
-
- for (i = 0; i < ec->nr_sensors; i++) {
- if (known_ec_sensors[ec->sensors[i].info_index].type == type) {
- if (channel == 0)
- return i;
-
- channel--;
- }
- }
- return -EINVAL;
-}
-
-static int asus_wmi_ec_get_cached_value_or_update(struct asus_wmi_sensors *sensor_data,
- int sensor_index,
- long *value)
-{
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int ret = 0;
-
- mutex_lock(&sensor_data->lock);
-
- if (time_after(jiffies, ec->last_updated + HZ)) {
- ret = asus_wmi_ec_block_read(ASUSWMI_METHODID_BLOCK_READ_EC,
- ec->read_arg, ec->read_buffer);
- if (ret)
- goto unlock;
-
- asus_wmi_ec_update_ec_sensors(ec);
- ec->last_updated = jiffies;
- }
-
- *value = ec->sensors[sensor_index].cached_value;
-
-unlock:
- mutex_unlock(&sensor_data->lock);
-
- return ret;
-}
-
-/* Now follow the functions that implement the hwmon interface */
-
-static int asus_wmi_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int ret, sidx, info_index;
- long value = 0;
-
- sidx = asus_wmi_ec_find_sensor_index(ec, type, channel);
- if (sidx < 0)
- return sidx;
-
- ret = asus_wmi_ec_get_cached_value_or_update(sensor_data, sidx, &value);
- if (ret)
- return ret;
-
- info_index = ec->sensors[sidx].info_index;
- *val = asus_wmi_ec_scale_sensor_value(value, known_ec_sensors[info_index].type);
-
- return ret;
-}
-
-static int asus_wmi_ec_hwmon_read_string(struct device *dev,
- enum hwmon_sensor_types type, u32 attr,
- int channel, const char **str)
-{
- struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int sensor_index;
-
- sensor_index = asus_wmi_ec_find_sensor_index(ec, type, channel);
- *str = known_ec_sensors[ec->sensors[sensor_index].info_index].label;
-
- return 0;
-}
-
-static umode_t asus_wmi_ec_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type, u32 attr,
- int channel)
-{
- const struct asus_wmi_sensors *sensor_data = drvdata;
- const struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int index;
-
- index = asus_wmi_ec_find_sensor_index(ec, type, channel);
-
- return index < 0 ? 0 : 0444;
-}
-
-static int asus_wmi_hwmon_add_chan_info(struct hwmon_channel_info *asus_wmi_hwmon_chan,
- struct device *dev, int num,
- enum hwmon_sensor_types type, u32 config)
-{
- u32 *cfg;
-
- cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
-
- asus_wmi_hwmon_chan->type = type;
- asus_wmi_hwmon_chan->config = cfg;
- memset32(cfg, config, num);
-
- return 0;
-}
-
-static const struct hwmon_ops asus_wmi_ec_hwmon_ops = {
- .is_visible = asus_wmi_ec_hwmon_is_visible,
- .read = asus_wmi_ec_hwmon_read,
- .read_string = asus_wmi_ec_hwmon_read_string,
-};
-
-static struct hwmon_chip_info asus_wmi_ec_chip_info = {
- .ops = &asus_wmi_ec_hwmon_ops,
-};
-
-static int asus_wmi_ec_configure_sensor_setup(struct device *dev,
- const enum known_ec_sensor *bsi)
-{
- struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- struct hwmon_channel_info *asus_wmi_hwmon_chan;
- const struct hwmon_channel_info **asus_wmi_ci;
- int nr_count[hwmon_max] = {}, nr_types = 0;
- const struct hwmon_chip_info *chip_info;
- const struct ec_sensor_info *si;
- enum hwmon_sensor_types type;
- struct device *hwdev;
- int i, ret;
-
- ret = asus_wmi_ec_fill_board_sensors(ec, bsi);
- if (ret)
- return ret;
-
- if (!sensor_data->ec.nr_sensors)
- return -ENODEV;
-
- for (i = 0; i < ec->nr_sensors; i++) {
- si = &known_ec_sensors[ec->sensors[i].info_index];
- if (!nr_count[si->type])
- nr_types++;
- nr_count[si->type]++;
- }
-
- if (nr_count[hwmon_temp]) {
- nr_count[hwmon_chip]++;
- nr_types++;
- }
-
- /*
- * If we can get values for all the registers in a single query,
- * the query will not change from call to call.
- */
- asus_wmi_ec_make_block_read_query(ec);
-
- asus_wmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*asus_wmi_hwmon_chan),
- GFP_KERNEL);
- if (!asus_wmi_hwmon_chan)
- return -ENOMEM;
-
- asus_wmi_ci = devm_kcalloc(dev, nr_types + 1, sizeof(*asus_wmi_ci), GFP_KERNEL);
- if (!asus_wmi_ci)
- return -ENOMEM;
-
- asus_wmi_ec_chip_info.info = asus_wmi_ci;
- chip_info = &asus_wmi_ec_chip_info;
-
- for (type = 0; type < hwmon_max; type++) {
- if (!nr_count[type])
- continue;
-
- ret = asus_wmi_hwmon_add_chan_info(asus_wmi_hwmon_chan, dev,
- nr_count[type], type,
- hwmon_attributes[type]);
- if (ret)
- return ret;
-
- *asus_wmi_ci++ = asus_wmi_hwmon_chan++;
- }
-
- dev_dbg(dev, "board has %d EC sensors that span %d registers",
- ec->nr_sensors, ec->nr_registers);
-
- hwdev = devm_hwmon_device_register_with_info(dev, "asus_wmi_ec_sensors",
- sensor_data, chip_info, NULL);
-
- return PTR_ERR_OR_ZERO(hwdev);
-}
-
-static int asus_wmi_probe(struct wmi_device *wdev, const void *context)
-{
- struct asus_wmi_sensors *sensor_data;
- struct asus_wmi_data *board_sensors;
- const struct dmi_system_id *dmi_id;
- const enum known_ec_sensor *bsi;
- struct device *dev = &wdev->dev;
-
- dmi_id = dmi_first_match(asus_wmi_ec_dmi_table);
- if (!dmi_id)
- return -ENODEV;
-
- board_sensors = dmi_id->driver_data;
- bsi = board_sensors->known_board_sensors;
-
- sensor_data = devm_kzalloc(dev, sizeof(*sensor_data), GFP_KERNEL);
- if (!sensor_data)
- return -ENOMEM;
-
- mutex_init(&sensor_data->lock);
-
- dev_set_drvdata(dev, sensor_data);
-
- return asus_wmi_ec_configure_sensor_setup(dev, bsi);
-}
-
-static const struct wmi_device_id asus_ec_wmi_id_table[] = {
- { ASUSWMI_MONITORING_GUID, NULL },
- { }
-};
-
-static struct wmi_driver asus_sensors_wmi_driver = {
- .driver = {
- .name = "asus_wmi_ec_sensors",
- },
- .id_table = asus_ec_wmi_id_table,
- .probe = asus_wmi_probe,
-};
-module_wmi_driver(asus_sensors_wmi_driver);
-
-MODULE_AUTHOR("Ed Brindley <kernel@maidavale.org>");
-MODULE_AUTHOR("Eugene Shalygin <eugene.shalygin@gmail.com>");
-MODULE_DESCRIPTION("Asus WMI Sensors Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
index 96c4a5c45291..6724e0dd3088 100644
--- a/drivers/hwmon/axi-fan-control.c
+++ b/drivers/hwmon/axi-fan-control.c
@@ -394,11 +394,6 @@ static int axi_fan_control_init(struct axi_fan_control_data *ctl,
return ret;
}
-static void axi_fan_control_clk_disable(void *clk)
-{
- clk_disable_unprepare(clk);
-}
-
static const struct hwmon_channel_info *axi_fan_control_info[] = {
HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT),
HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_LABEL),
@@ -478,20 +473,12 @@ static int axi_fan_control_probe(struct platform_device *pdev)
if (IS_ERR(ctl->base))
return PTR_ERR(ctl->base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "clk_get failed with %ld\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&pdev->dev, axi_fan_control_clk_disable, clk);
- if (ret)
- return ret;
-
ctl->clk_rate = clk_get_rate(clk);
if (!ctl->clk_rate)
return -EINVAL;
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 14389fd7afb8..345d883ab044 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -55,6 +55,7 @@
#define SECONDS_PER_DAY (SECONDS_PER_HOUR * 24)
#define RAIL_COUNT 3 /* 3v3 + 5v + 12v */
#define TEMP_COUNT 2
+#define OCP_MULTI_RAIL 0x02
#define PSU_CMD_SELECT_RAIL 0x00 /* expects length 2 */
#define PSU_CMD_RAIL_VOLTS_HCRIT 0x40 /* the rest of the commands expect length 3 */
@@ -71,9 +72,10 @@
#define PSU_CMD_RAIL_WATTS 0x96
#define PSU_CMD_VEND_STR 0x99
#define PSU_CMD_PROD_STR 0x9A
-#define PSU_CMD_TOTAL_WATTS 0xEE
#define PSU_CMD_TOTAL_UPTIME 0xD1
#define PSU_CMD_UPTIME 0xD2
+#define PSU_CMD_OCPMODE 0xD8
+#define PSU_CMD_TOTAL_WATTS 0xEE
#define PSU_CMD_INIT 0xFE
#define L_IN_VOLTS "v_in"
@@ -268,6 +270,7 @@ static int corsairpsu_get_value(struct corsairpsu_data *priv, u8 cmd, u8 rail, l
break;
case PSU_CMD_TOTAL_UPTIME:
case PSU_CMD_UPTIME:
+ case PSU_CMD_OCPMODE:
*val = tmp;
break;
default:
@@ -660,6 +663,29 @@ static int product_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(product);
+static int ocpmode_show(struct seq_file *seqf, void *unused)
+{
+ struct corsairpsu_data *priv = seqf->private;
+ long val;
+ int ret;
+
+ /*
+ * The rail mode is switchable on the fly. The RAW interface can be used for this. But it
+ * will not be included here, because I consider it somewhat dangerous for the health of the
+ * PSU. The returned value can be a bogus one, if the PSU is in the process of switching and
+ * getting of the value itself can also fail during this. Because of this every other value
+ * than OCP_MULTI_RAIL can be considered as "single rail".
+ */
+ ret = corsairpsu_get_value(priv, PSU_CMD_OCPMODE, 0, &val);
+ if (ret < 0)
+ seq_puts(seqf, "N/A\n");
+ else
+ seq_printf(seqf, "%s\n", (val == OCP_MULTI_RAIL) ? "multi rail" : "single rail");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ocpmode);
+
static void corsairpsu_debugfs_init(struct corsairpsu_data *priv)
{
char name[32];
@@ -671,6 +697,7 @@ static void corsairpsu_debugfs_init(struct corsairpsu_data *priv)
debugfs_create_file("uptime_total", 0444, priv->debugfs, priv, &uptime_total_fops);
debugfs_create_file("vendor", 0444, priv->debugfs, priv, &vendor_fops);
debugfs_create_file("product", 0444, priv->debugfs, priv, &product_fops);
+ debugfs_create_file("ocpmode", 0444, priv->debugfs, priv, &ocpmode_fops);
}
#else
@@ -786,13 +813,14 @@ static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
{ HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */
{ HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */
- { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i revision 1 */
{ HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */
{ HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsaur HX1000i revision 2 */
{ },
};
MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 7f8d95dd2717..1572b5416015 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1355,15 +1355,21 @@ static int __init dell_smm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
- dev_warn(&pdev->dev, "broken Dell BIOS detected, disallow fan support\n");
- if (!force)
+ if (!force) {
+ dev_notice(&pdev->dev, "Disabling fan support due to BIOS bugs\n");
data->disallow_fan_support = true;
+ } else {
+ dev_warn(&pdev->dev, "Enabling fan support despite BIOS bugs\n");
+ }
}
if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
- dev_warn(&pdev->dev, "broken Dell BIOS detected, disallow fan type call\n");
- if (!force)
+ if (!force) {
+ dev_notice(&pdev->dev, "Disabling fan type call due to BIOS bugs\n");
data->disallow_fan_type_call = true;
+ } else {
+ dev_warn(&pdev->dev, "Enabling fan type call despite BIOS bugs\n");
+ }
}
strscpy(data->bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index e3ad4c2d0038..66c48f70fae7 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2456,7 +2456,7 @@ static int dme1737_i2c_detect(struct i2c_client *client,
dev_info(dev, "Found a %s chip at 0x%02x (rev 0x%02x).\n",
verstep == SCH5027_VERSTEP ? "SCH5027" : "DME1737",
client->addr, verstep);
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
@@ -2508,14 +2508,12 @@ exit_remove:
return err;
}
-static int dme1737_i2c_remove(struct i2c_client *client)
+static void dme1737_i2c_remove(struct i2c_client *client)
{
struct dme1737_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
dme1737_remove_files(&client->dev);
-
- return 0;
}
static const struct i2c_device_id dme1737_id[] = {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 314838272049..61d59189a6d1 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -329,22 +329,22 @@ static int emc1403_detect(struct i2c_client *client,
id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
switch (id) {
case 0x20:
- strlcpy(info->type, "emc1402", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1402", I2C_NAME_SIZE);
break;
case 0x21:
- strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1403", I2C_NAME_SIZE);
break;
case 0x22:
- strlcpy(info->type, "emc1422", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1422", I2C_NAME_SIZE);
break;
case 0x23:
- strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1423", I2C_NAME_SIZE);
break;
case 0x25:
- strlcpy(info->type, "emc1404", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1404", I2C_NAME_SIZE);
break;
case 0x27:
- strlcpy(info->type, "emc1424", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1424", I2C_NAME_SIZE);
break;
default:
return -ENODEV;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index e4c95ca9e19f..361cf9292456 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -643,7 +643,7 @@ emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info)
if ((product != 0x24) && (product != 0x26))
return -ENODEV;
- strlcpy(info->type, "emc2103", I2C_NAME_SIZE);
+ strscpy(info->type, "emc2103", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
new file mode 100644
index 000000000000..aa1f25add0b6
--- /dev/null
+++ b/drivers/hwmon/emc2305.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for EMC2305 fan controller
+ *
+ * Copyright (C) 2022 Nvidia Technologies Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_data/emc2305.h>
+#include <linux/thermal.h>
+
+static const unsigned short
+emc2305_normal_i2c[] = { 0x27, 0x2c, 0x2d, 0x2e, 0x2f, 0x4c, 0x4d, I2C_CLIENT_END };
+
+#define EMC2305_REG_DRIVE_FAIL_STATUS 0x27
+#define EMC2305_REG_DEVICE 0xfd
+#define EMC2305_REG_VENDOR 0xfe
+#define EMC2305_FAN_MAX 0xff
+#define EMC2305_FAN_MIN 0x00
+#define EMC2305_FAN_MAX_STATE 10
+#define EMC2305_DEVICE 0x34
+#define EMC2305_VENDOR 0x5d
+#define EMC2305_REG_PRODUCT_ID 0xfd
+#define EMC2305_TACH_REGS_UNUSE_BITS 3
+#define EMC2305_TACH_CNT_MULTIPLIER 0x02
+#define EMC2305_TACH_RANGE_MIN 480
+
+#define EMC2305_PWM_DUTY2STATE(duty, max_state, pwm_max) \
+ DIV_ROUND_CLOSEST((duty) * (max_state), (pwm_max))
+#define EMC2305_PWM_STATE2DUTY(state, max_state, pwm_max) \
+ DIV_ROUND_CLOSEST((state) * (pwm_max), (max_state))
+
+/*
+ * Factor by equations [2] and [3] from data sheet; valid for fans where the number of edges
+ * equal (poles * 2 + 1).
+ */
+#define EMC2305_RPM_FACTOR 3932160
+
+#define EMC2305_REG_FAN_DRIVE(n) (0x30 + 0x10 * (n))
+#define EMC2305_REG_FAN_MIN_DRIVE(n) (0x38 + 0x10 * (n))
+#define EMC2305_REG_FAN_TACH(n) (0x3e + 0x10 * (n))
+
+enum emc230x_product_id {
+ EMC2305 = 0x34,
+ EMC2303 = 0x35,
+ EMC2302 = 0x36,
+ EMC2301 = 0x37,
+};
+
+static const struct i2c_device_id emc2305_ids[] = {
+ { "emc2305", 0 },
+ { "emc2303", 0 },
+ { "emc2302", 0 },
+ { "emc2301", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, emc2305_ids);
+
+/**
+ * @cdev: cooling device;
+ * @curr_state: cooling current state;
+ * @last_hwmon_state: last cooling state updated by hwmon subsystem;
+ * @last_thermal_state: last cooling state updated by thermal subsystem;
+ *
+ * The 'last_hwmon_state' and 'last_thermal_state' fields are provided to support fan low limit
+ * speed feature. The purpose of this feature is to provides ability to limit fan speed
+ * according to some system wise considerations, like absence of some replaceable units (PSU or
+ * line cards), high system ambient temperature, unreliable transceivers temperature sensing or
+ * some other factors which indirectly impacts system's airflow
+ * Fan low limit feature is supported through 'hwmon' interface: 'hwmon' 'pwm' attribute is
+ * used for setting low limit for fan speed in case 'thermal' subsystem is configured in
+ * kernel. In this case setting fan speed through 'hwmon' will never let the 'thermal'
+ * subsystem to select a lower duty cycle than the duty cycle selected with the 'pwm'
+ * attribute.
+ * From other side, fan speed is to be updated in hardware through 'pwm' only in case the
+ * requested fan speed is above last speed set by 'thermal' subsystem, otherwise requested fan
+ * speed will be just stored with no PWM update.
+ */
+struct emc2305_cdev_data {
+ struct thermal_cooling_device *cdev;
+ unsigned int cur_state;
+ unsigned long last_hwmon_state;
+ unsigned long last_thermal_state;
+};
+
+/**
+ * @client: i2c client;
+ * @hwmon_dev: hwmon device;
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of PWM channels;
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ * @cdev_data: array of cooling devices data;
+ */
+struct emc2305_data {
+ struct i2c_client *client;
+ struct device *hwmon_dev;
+ u8 max_state;
+ u8 pwm_num;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+ struct emc2305_cdev_data cdev_data[EMC2305_PWM_MAX];
+};
+
+static char *emc2305_fan_name[] = {
+ "emc2305_fan",
+ "emc2305_fan1",
+ "emc2305_fan2",
+ "emc2305_fan3",
+ "emc2305_fan4",
+ "emc2305_fan5",
+};
+
+static void emc2305_unset_tz(struct device *dev);
+
+static int emc2305_get_max_channel(const struct emc2305_data *data)
+{
+ return data->pwm_num;
+}
+
+static int emc2305_get_cdev_idx(struct thermal_cooling_device *cdev)
+{
+ struct emc2305_data *data = cdev->devdata;
+ size_t len = strlen(cdev->type);
+ int ret;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ /*
+ * Returns index of cooling device 0..4 in case of separate PWM setting.
+ * Zero index is used in case of one common PWM setting.
+ * If the mode is not set as pwm_separate, all PWMs are to be bound
+ * to the common thermal zone and should work at the same speed
+ * to perform cooling for the same thermal junction.
+ * Otherwise, return specific channel that will be used in bound
+ * related PWM to the thermal zone.
+ */
+ if (!data->pwm_separate)
+ return 0;
+
+ ret = cdev->type[len - 1];
+ switch (ret) {
+ case '1' ... '5':
+ return ret - '1';
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int emc2305_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ int cdev_idx;
+ struct emc2305_data *data = cdev->devdata;
+
+ cdev_idx = emc2305_get_cdev_idx(cdev);
+ if (cdev_idx < 0)
+ return cdev_idx;
+
+ *state = data->cdev_data[cdev_idx].cur_state;
+ return 0;
+}
+
+static int emc2305_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct emc2305_data *data = cdev->devdata;
+ *state = data->max_state;
+ return 0;
+}
+
+static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ int cdev_idx, ret;
+ struct emc2305_data *data = cdev->devdata;
+ struct i2c_client *client = data->client;
+ u8 val, i;
+
+ if (state > data->max_state)
+ return -EINVAL;
+
+ cdev_idx = emc2305_get_cdev_idx(cdev);
+ if (cdev_idx < 0)
+ return cdev_idx;
+
+ /* Save thermal state. */
+ data->cdev_data[cdev_idx].last_thermal_state = state;
+ state = max_t(unsigned long, state, data->cdev_data[cdev_idx].last_hwmon_state);
+
+ val = EMC2305_PWM_STATE2DUTY(state, data->max_state, EMC2305_FAN_MAX);
+
+ data->cdev_data[cdev_idx].cur_state = state;
+ if (data->pwm_separate) {
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_DRIVE(cdev_idx), val);
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * Set the same PWM value in all channels
+ * if common PWM channel is used.
+ */
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_DRIVE(i), val);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops emc2305_cooling_ops = {
+ .get_max_state = emc2305_get_max_state,
+ .get_cur_state = emc2305_get_cur_state,
+ .set_cur_state = emc2305_set_cur_state,
+};
+
+static int emc2305_show_fault(struct device *dev, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int status_reg;
+
+ status_reg = i2c_smbus_read_byte_data(client, EMC2305_REG_DRIVE_FAIL_STATUS);
+ if (status_reg < 0)
+ return status_reg;
+
+ return status_reg & (1 << channel) ? 1 : 0;
+}
+
+static int emc2305_show_fan(struct device *dev, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(client, EMC2305_REG_FAN_TACH(channel));
+ if (ret <= 0)
+ return ret;
+
+ ret = ret >> EMC2305_TACH_REGS_UNUSE_BITS;
+ ret = EMC2305_RPM_FACTOR / ret;
+ if (ret <= EMC2305_TACH_RANGE_MIN)
+ return 0;
+
+ return ret * EMC2305_TACH_CNT_MULTIPLIER;
+}
+
+static int emc2305_show_pwm(struct device *dev, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ return i2c_smbus_read_byte_data(client, EMC2305_REG_FAN_DRIVE(channel));
+}
+
+static int emc2305_set_pwm(struct device *dev, long val, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ if (val < data->pwm_min[channel] || val > EMC2305_FAN_MAX)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_DRIVE(channel), val);
+ if (ret < 0)
+ return ret;
+ data->cdev_data[channel].cur_state = EMC2305_PWM_DUTY2STATE(val, data->max_state,
+ EMC2305_FAN_MAX);
+ return 0;
+}
+
+static int emc2305_set_single_tz(struct device *dev, int idx)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ long pwm;
+ int i, cdev_idx, ret;
+
+ cdev_idx = (idx) ? idx - 1 : 0;
+ pwm = data->pwm_min[cdev_idx];
+
+ data->cdev_data[cdev_idx].cdev =
+ thermal_cooling_device_register(emc2305_fan_name[idx], data,
+ &emc2305_cooling_ops);
+
+ if (IS_ERR(data->cdev_data[cdev_idx].cdev)) {
+ dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]);
+ return PTR_ERR(data->cdev_data[cdev_idx].cdev);
+ }
+ /* Set minimal PWM speed. */
+ if (data->pwm_separate) {
+ ret = emc2305_set_pwm(dev, pwm, cdev_idx);
+ if (ret < 0)
+ return ret;
+ } else {
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = emc2305_set_pwm(dev, pwm, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ data->cdev_data[cdev_idx].cur_state =
+ EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
+ EMC2305_FAN_MAX);
+ data->cdev_data[cdev_idx].last_hwmon_state =
+ EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
+ EMC2305_FAN_MAX);
+ return 0;
+}
+
+static int emc2305_set_tz(struct device *dev)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (!data->pwm_separate)
+ return emc2305_set_single_tz(dev, 0);
+
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = emc2305_set_single_tz(dev, i + 1);
+ if (ret)
+ goto thermal_cooling_device_register_fail;
+ }
+ return 0;
+
+thermal_cooling_device_register_fail:
+ emc2305_unset_tz(dev);
+ return ret;
+}
+
+static void emc2305_unset_tz(struct device *dev)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ int i;
+
+ /* Unregister cooling device. */
+ for (i = 0; i < EMC2305_PWM_MAX; i++)
+ if (data->cdev_data[i].cdev)
+ thermal_cooling_device_unregister(data->cdev_data[i].cdev);
+}
+
+static umode_t
+emc2305_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ int max_channel = emc2305_get_max_channel(data);
+
+ /* Don't show channels which are not physically connected. */
+ if (channel >= max_channel)
+ return 0;
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+};
+
+static int
+emc2305_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ int cdev_idx;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ /* If thermal is configured - handle PWM limit setting. */
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ if (data->pwm_separate)
+ cdev_idx = channel;
+ else
+ cdev_idx = 0;
+ data->cdev_data[cdev_idx].last_hwmon_state =
+ EMC2305_PWM_DUTY2STATE(val, data->max_state,
+ EMC2305_FAN_MAX);
+ /*
+ * Update PWM only in case requested state is not less than the
+ * last thermal state.
+ */
+ if (data->cdev_data[cdev_idx].last_hwmon_state >=
+ data->cdev_data[cdev_idx].last_thermal_state)
+ return emc2305_set_cur_state(data->cdev_data[cdev_idx].cdev,
+ data->cdev_data[cdev_idx].last_hwmon_state);
+ return 0;
+ }
+ return emc2305_set_pwm(dev, val, channel);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+};
+
+static int
+emc2305_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = emc2305_show_fan(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ case hwmon_fan_fault:
+ ret = emc2305_show_fault(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = emc2305_show_pwm(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+};
+
+static const struct hwmon_ops emc2305_ops = {
+ .is_visible = emc2305_is_visible,
+ .read = emc2305_read,
+ .write = emc2305_write,
+};
+
+static const struct hwmon_channel_info *emc2305_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info emc2305_chip_info = {
+ .ops = &emc2305_ops,
+ .info = emc2305_info,
+};
+
+static int emc2305_identify(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct emc2305_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, EMC2305_REG_PRODUCT_ID);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case EMC2305:
+ data->pwm_num = 5;
+ break;
+ case EMC2303:
+ data->pwm_num = 3;
+ break;
+ case EMC2302:
+ data->pwm_num = 2;
+ break;
+ case EMC2301:
+ data->pwm_num = 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int emc2305_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct emc2305_data *data;
+ struct emc2305_platform_data *pdata;
+ int vendor, device;
+ int ret;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ vendor = i2c_smbus_read_byte_data(client, EMC2305_REG_VENDOR);
+ if (vendor != EMC2305_VENDOR)
+ return -ENODEV;
+
+ device = i2c_smbus_read_byte_data(client, EMC2305_REG_DEVICE);
+ if (device != EMC2305_DEVICE)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ ret = emc2305_identify(dev);
+ if (ret)
+ return ret;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata) {
+ if (!pdata->max_state || pdata->max_state > EMC2305_FAN_MAX_STATE)
+ return -EINVAL;
+ data->max_state = pdata->max_state;
+ /*
+ * Validate a number of active PWM channels. Note that
+ * configured number can be less than the actual maximum
+ * supported by the device.
+ */
+ if (!pdata->pwm_num || pdata->pwm_num > EMC2305_PWM_MAX)
+ return -EINVAL;
+ data->pwm_num = pdata->pwm_num;
+ data->pwm_separate = pdata->pwm_separate;
+ for (i = 0; i < EMC2305_PWM_MAX; i++)
+ data->pwm_min[i] = pdata->pwm_min[i];
+ } else {
+ data->max_state = EMC2305_FAN_MAX_STATE;
+ data->pwm_separate = false;
+ for (i = 0; i < EMC2305_PWM_MAX; i++)
+ data->pwm_min[i] = EMC2305_FAN_MIN;
+ }
+
+ data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "emc2305", data,
+ &emc2305_chip_info, NULL);
+ if (IS_ERR(data->hwmon_dev))
+ return PTR_ERR(data->hwmon_dev);
+
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ ret = emc2305_set_tz(dev);
+ if (ret != 0)
+ return ret;
+ }
+
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_MIN_DRIVE(i),
+ data->pwm_min[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void emc2305_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ emc2305_unset_tz(dev);
+}
+
+static struct i2c_driver emc2305_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc2305",
+ },
+ .probe = emc2305_probe,
+ .remove = emc2305_remove,
+ .id_table = emc2305_ids,
+ .address_list = emc2305_normal_i2c,
+};
+
+module_i2c_driver(emc2305_driver);
+
+MODULE_AUTHOR("Nvidia");
+MODULE_DESCRIPTION("Microchip EMC2305 fan controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 29082c8463f4..bcd93f0fe982 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -439,7 +439,7 @@ static int emc6w201_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "emc6w201", I2C_NAME_SIZE);
+ strscpy(info->type, "emc6w201", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 19b6c643059a..70121482a617 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -237,13 +237,6 @@ static const char f71882fg_nr_temps[] = {
static struct platform_device *f71882fg_pdev;
-/* Super-I/O Function prototypes */
-static inline int superio_inb(int base, int reg);
-static inline int superio_inw(int base, int reg);
-static inline int superio_enter(int base);
-static inline void superio_select(int base, int ld);
-static inline void superio_exit(int base);
-
struct f71882fg_sio_data {
enum chips type;
};
@@ -292,108 +285,422 @@ struct f71882fg_data {
s8 pwm_auto_point_temp[4][4];
};
-/* Sysfs in */
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf);
-static ssize_t show_in_max(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_in_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_in_beep(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_in_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_in_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf);
-/* Sysfs Fan */
-static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
- char *buf);
-static ssize_t show_fan_full_speed(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_fan_full_speed(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_fan_beep(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_fan_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf);
-/* Sysfs Temp */
-static ssize_t show_temp(struct device *dev, struct device_attribute
- *devattr, char *buf);
+static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg)
+{
+ u8 val;
+
+ outb(reg, data->addr + ADDR_REG_OFFSET);
+ val = inb(data->addr + DATA_REG_OFFSET);
+
+ return val;
+}
+
+static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
+{
+ u16 val;
+
+ val = f71882fg_read8(data, reg) << 8;
+ val |= f71882fg_read8(data, reg + 1);
+
+ return val;
+}
+
+static inline int fan_from_reg(u16 reg)
+{
+ return reg ? (1500000 / reg) : 0;
+}
+
+static inline u16 fan_to_reg(int fan)
+{
+ return fan ? (1500000 / fan) : 0;
+}
+
+static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
+{
+ outb(reg, data->addr + ADDR_REG_OFFSET);
+ outb(val, data->addr + DATA_REG_OFFSET);
+}
+
+static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
+{
+ f71882fg_write8(data, reg, val >> 8);
+ f71882fg_write8(data, reg + 1, val & 0xff);
+}
+
+static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
+{
+ if (data->type == f71858fg)
+ return f71882fg_read16(data, F71882FG_REG_TEMP(nr));
+ else
+ return f71882fg_read8(data, F71882FG_REG_TEMP(nr));
+}
+
+static struct f71882fg_data *f71882fg_update_device(struct device *dev)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int nr_fans = f71882fg_nr_fans[data->type];
+ int nr_temps = f71882fg_nr_temps[data->type];
+ int nr, reg, point;
+
+ mutex_lock(&data->update_lock);
+
+ /* Update once every 60 seconds */
+ if (time_after(jiffies, data->last_limits + 60 * HZ) ||
+ !data->valid) {
+ if (f71882fg_has_in1_alarm[data->type]) {
+ if (data->type == f81866a) {
+ data->in1_max =
+ f71882fg_read8(data,
+ F81866_REG_IN1_HIGH);
+ data->in_beep =
+ f71882fg_read8(data,
+ F81866_REG_IN_BEEP);
+ } else {
+ data->in1_max =
+ f71882fg_read8(data,
+ F71882FG_REG_IN1_HIGH);
+ data->in_beep =
+ f71882fg_read8(data,
+ F71882FG_REG_IN_BEEP);
+ }
+ }
+
+ /* Get High & boundary temps*/
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++) {
+ data->temp_ovt[nr] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_OVT(nr));
+ data->temp_high[nr] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_HIGH(nr));
+ }
+
+ if (data->type != f8000) {
+ data->temp_hyst[0] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_HYST(0));
+ data->temp_hyst[1] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_HYST(1));
+ }
+ /* All but the f71858fg / f8000 have this register */
+ if ((data->type != f71858fg) && (data->type != f8000)) {
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+ data->temp_type[1] = (reg & 0x02) ? 2 : 4;
+ data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+ data->temp_type[3] = (reg & 0x08) ? 2 : 4;
+ }
+
+ if (f71882fg_fan_has_beep[data->type])
+ data->fan_beep = f71882fg_read8(data,
+ F71882FG_REG_FAN_BEEP);
+
+ if (f71882fg_temp_has_beep[data->type])
+ data->temp_beep = f71882fg_read8(data,
+ F71882FG_REG_TEMP_BEEP);
+
+ data->pwm_enable = f71882fg_read8(data,
+ F71882FG_REG_PWM_ENABLE);
+ data->pwm_auto_point_hyst[0] =
+ f71882fg_read8(data, F71882FG_REG_FAN_HYST(0));
+ data->pwm_auto_point_hyst[1] =
+ f71882fg_read8(data, F71882FG_REG_FAN_HYST(1));
+
+ for (nr = 0; nr < nr_fans; nr++) {
+ data->pwm_auto_point_mapping[nr] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_MAPPING(nr));
+
+ switch (data->type) {
+ default:
+ for (point = 0; point < 5; point++) {
+ data->pwm_auto_point_pwm[nr][point] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM
+ (nr, point));
+ }
+ for (point = 0; point < 4; point++) {
+ data->pwm_auto_point_temp[nr][point] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_TEMP
+ (nr, point));
+ }
+ break;
+ case f71808e:
+ case f71869:
+ data->pwm_auto_point_pwm[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM(nr, 0));
+ fallthrough;
+ case f71862fg:
+ data->pwm_auto_point_pwm[nr][1] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM
+ (nr, 1));
+ data->pwm_auto_point_pwm[nr][4] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM
+ (nr, 4));
+ data->pwm_auto_point_temp[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_TEMP
+ (nr, 0));
+ data->pwm_auto_point_temp[nr][3] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_TEMP
+ (nr, 3));
+ break;
+ }
+ }
+ data->last_limits = jiffies;
+ }
+
+ /* Update every second */
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->temp_status = f71882fg_read8(data,
+ F71882FG_REG_TEMP_STATUS);
+ data->temp_diode_open = f71882fg_read8(data,
+ F71882FG_REG_TEMP_DIODE_OPEN);
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++)
+ data->temp[nr] = f71882fg_read_temp(data, nr);
+
+ data->fan_status = f71882fg_read8(data,
+ F71882FG_REG_FAN_STATUS);
+ for (nr = 0; nr < nr_fans; nr++) {
+ data->fan[nr] = f71882fg_read16(data,
+ F71882FG_REG_FAN(nr));
+ data->fan_target[nr] =
+ f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr));
+ data->fan_full_speed[nr] =
+ f71882fg_read16(data,
+ F71882FG_REG_FAN_FULL_SPEED(nr));
+ data->pwm[nr] =
+ f71882fg_read8(data, F71882FG_REG_PWM(nr));
+ }
+ /* Some models have 1 more fan with limited capabilities */
+ if (data->type == f71808a) {
+ data->fan[2] = f71882fg_read16(data,
+ F71882FG_REG_FAN(2));
+ data->pwm[2] = f71882fg_read8(data,
+ F71882FG_REG_PWM(2));
+ }
+ if (data->type == f8000)
+ data->fan[3] = f71882fg_read16(data,
+ F71882FG_REG_FAN(3));
+
+ if (f71882fg_has_in1_alarm[data->type]) {
+ if (data->type == f81866a)
+ data->in_status = f71882fg_read8(data,
+ F81866_REG_IN_STATUS);
+
+ else
+ data->in_status = f71882fg_read8(data,
+ F71882FG_REG_IN_STATUS);
+ }
+
+ for (nr = 0; nr < F71882FG_MAX_INS; nr++)
+ if (f71882fg_has_in[data->type][nr])
+ data->in[nr] = f71882fg_read8(data,
+ F71882FG_REG_IN(nr));
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", f71882fg_names[data->type]);
+}
+
+static DEVICE_ATTR_RO(name);
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int sign, temp;
+
+ if (data->type == f71858fg) {
+ /* TEMP_TABLE_SEL 1 or 3 ? */
+ if (data->temp_config & 1) {
+ sign = data->temp[nr] & 0x0001;
+ temp = (data->temp[nr] >> 5) & 0x7ff;
+ } else {
+ sign = data->temp[nr] & 0x8000;
+ temp = (data->temp[nr] >> 5) & 0x3ff;
+ }
+ temp *= 125;
+ if (sign)
+ temp -= 128000;
+ } else {
+ temp = ((s8)data->temp[nr]) * 1000;
+ }
+
+ return sprintf(buf, "%d\n", temp);
+}
+
static ssize_t show_temp_max(struct device *dev, struct device_attribute
- *devattr, char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp_high[nr] * 1000);
+}
+
static ssize_t store_temp_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
+ data->temp_high[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int temp_max_hyst;
+
+ mutex_lock(&data->update_lock);
+ if (nr & 1)
+ temp_max_hyst = data->temp_hyst[nr / 2] >> 4;
+ else
+ temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f;
+ temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000;
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", temp_max_hyst);
+}
+
static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ ssize_t ret = count;
+ u8 reg;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+
+ mutex_lock(&data->update_lock);
+
+ /* convert abs to relative and check */
+ data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
+ val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
+ val = data->temp_high[nr] - val;
+
+ /* convert value to register contents */
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2));
+ if (nr & 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+ f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg);
+ data->temp_hyst[nr / 2] = reg;
+
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->temp_status & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
static ssize_t show_temp_crit(struct device *dev, struct device_attribute
- *devattr, char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000);
+}
+
static ssize_t store_temp_crit(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
+ data->temp_ovt[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t show_temp_type(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t show_temp_beep(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_temp_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf);
-/* PWM and Auto point control */
-static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
- char *buf);
-static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count);
-static ssize_t show_simple_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_simple_pwm(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_enable(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-/* Sysfs misc */
-static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
- char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int temp_crit_hyst;
-static int f71882fg_probe(struct platform_device *pdev);
-static int f71882fg_remove(struct platform_device *pdev);
+ mutex_lock(&data->update_lock);
+ if (nr & 1)
+ temp_crit_hyst = data->temp_hyst[nr / 2] >> 4;
+ else
+ temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f;
+ temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000;
+ mutex_unlock(&data->update_lock);
-static struct platform_driver f71882fg_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = f71882fg_probe,
- .remove = f71882fg_remove,
-};
+ return sprintf(buf, "%d\n", temp_crit_hyst);
+}
-static DEVICE_ATTR_RO(name);
+static ssize_t show_temp_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->temp_diode_open & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
/*
* Temp attr for the f71858fg, the f71858fg is special as it has its
@@ -438,6 +745,15 @@ static struct sensor_device_attribute_2 f71858fg_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+static ssize_t show_temp_type(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp_type[nr]);
+}
+
/* Temp attr for the standard models */
static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
@@ -490,6 +806,42 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3),
} };
+static ssize_t show_temp_beep(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->temp_beep & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_temp_beep(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
+ if (val)
+ data->temp_beep |= 1 << nr;
+ else
+ data->temp_beep &= ~(1 << nr);
+
+ f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Temp attr for models which can beep on temp alarm */
static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { {
SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
@@ -555,6 +907,15 @@ static struct sensor_device_attribute_2 f8000_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->in[nr] * 8);
+}
+
/* in attr for all models */
static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
@@ -570,6 +931,94 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 0, 10),
};
+static ssize_t show_in_max(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+
+ return sprintf(buf, "%d\n", data->in1_max * 8);
+}
+
+static ssize_t store_in_max(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 8;
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ if (data->type == f81866a)
+ f71882fg_write8(data, F81866_REG_IN1_HIGH, val);
+ else
+ f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
+ data->in1_max = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_in_beep(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->in_beep & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_in_beep(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ if (data->type == f81866a)
+ data->in_beep = f71882fg_read8(data, F81866_REG_IN_BEEP);
+ else
+ data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+
+ if (val)
+ data->in_beep |= 1 << nr;
+ else
+ data->in_beep &= ~(1 << nr);
+
+ if (data->type == f81866a)
+ f71882fg_write8(data, F81866_REG_IN_BEEP, data->in_beep);
+ else
+ f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_in_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->in_status & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
/* For models with in1 alarm capability */
static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
@@ -579,6 +1028,242 @@ static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
};
+static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int speed = fan_from_reg(data->fan[nr]);
+
+ if (speed == FAN_MIN_DETECT)
+ speed = 0;
+
+ return sprintf(buf, "%d\n", speed);
+}
+
+static ssize_t show_fan_full_speed(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int speed = fan_from_reg(data->fan_full_speed[nr]);
+ return sprintf(buf, "%d\n", speed);
+}
+
+static ssize_t store_fan_full_speed(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 23, 1500000);
+ val = fan_to_reg(val);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val);
+ data->fan_full_speed[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->fan_status & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int val, nr = to_sensor_dev_attr_2(devattr)->index;
+ mutex_lock(&data->update_lock);
+ if (data->pwm_enable & (1 << (2 * nr)))
+ /* PWM mode */
+ val = data->pwm[nr];
+ else {
+ /* RPM mode */
+ val = 255 * fan_from_reg(data->fan_target[nr])
+ / fan_from_reg(data->fan_full_speed[nr]);
+ }
+ mutex_unlock(&data->update_lock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+ if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) ||
+ (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) {
+ count = -EROFS;
+ goto leave;
+ }
+ if (data->pwm_enable & (1 << (2 * nr))) {
+ /* PWM mode */
+ f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
+ data->pwm[nr] = val;
+ } else {
+ /* RPM mode */
+ int target, full_speed;
+ full_speed = f71882fg_read16(data,
+ F71882FG_REG_FAN_FULL_SPEED(nr));
+ target = fan_to_reg(val * fan_from_reg(full_speed) / 255);
+ f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target);
+ data->fan_target[nr] = target;
+ data->fan_full_speed[nr] = full_speed;
+ }
+leave:
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int result = 0;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ switch ((data->pwm_enable >> 2 * nr) & 3) {
+ case 0:
+ case 1:
+ result = 2; /* Normal auto mode */
+ break;
+ case 2:
+ result = 1; /* Manual mode */
+ break;
+ case 3:
+ if (data->type == f8000)
+ result = 3; /* Thermostat mode */
+ else
+ result = 1; /* Manual mode */
+ break;
+ }
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ /* Special case for F8000 pwm channel 3 which only does auto mode */
+ if (data->type == f8000 && nr == 2 && val != 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+ /* Special case for F8000 auto PWM mode / Thermostat mode */
+ if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) {
+ switch (val) {
+ case 2:
+ data->pwm_enable &= ~(2 << (2 * nr));
+ break; /* Normal auto mode */
+ case 3:
+ data->pwm_enable |= 2 << (2 * nr);
+ break; /* Thermostat mode */
+ default:
+ count = -EINVAL;
+ goto leave;
+ }
+ } else {
+ switch (val) {
+ case 1:
+ /* The f71858fg does not support manual RPM mode */
+ if (data->type == f71858fg &&
+ ((data->pwm_enable >> (2 * nr)) & 1)) {
+ count = -EINVAL;
+ goto leave;
+ }
+ data->pwm_enable |= 2 << (2 * nr);
+ break; /* Manual */
+ case 2:
+ data->pwm_enable &= ~(2 << (2 * nr));
+ break; /* Normal auto mode */
+ default:
+ count = -EINVAL;
+ goto leave;
+ }
+ }
+ f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable);
+leave:
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_interpolate(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ result = (data->pwm_auto_point_mapping[nr] >> 4) & 1;
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_interpolate(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->pwm_auto_point_mapping[nr] =
+ f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
+ if (val)
+ val = data->pwm_auto_point_mapping[nr] | (1 << 4);
+ else
+ val = data->pwm_auto_point_mapping[nr] & (~(1 << 4));
+ f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
+ data->pwm_auto_point_mapping[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Fan / PWM attr common to all models */
static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0),
@@ -626,6 +1311,38 @@ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
show_pwm_interpolate, store_pwm_interpolate, 0, 3),
} };
+static ssize_t show_simple_pwm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int val, nr = to_sensor_dev_attr_2(devattr)->index;
+
+ val = data->pwm[nr];
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_simple_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
+ data->pwm[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Attr for the third fan of the f71808a, which only has manual pwm */
static struct sensor_device_attribute_2 f71808a_fan3_attr[] = {
SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2),
@@ -634,6 +1351,42 @@ static struct sensor_device_attribute_2 f71808a_fan3_attr[] = {
show_simple_pwm, store_simple_pwm, 0, 2),
};
+static ssize_t show_fan_beep(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->fan_beep & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_fan_beep(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
+ if (val)
+ data->fan_beep |= 1 << nr;
+ else
+ data->fan_beep &= ~(1 << nr);
+
+ f71882fg_write8(data, F71882FG_REG_FAN_BEEP, data->fan_beep);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Attr for models which can beep on Fan alarm */
static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
@@ -646,6 +1399,209 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
store_fan_beep, 0, 3),
};
+static ssize_t show_pwm_auto_point_channel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) -
+ data->temp_start);
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_channel(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ switch (val) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 4:
+ val = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val += data->temp_start;
+ mutex_lock(&data->update_lock);
+ data->pwm_auto_point_mapping[nr] =
+ f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
+ val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val;
+ f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
+ data->pwm_auto_point_mapping[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+
+ mutex_lock(&data->update_lock);
+ if (data->pwm_enable & (1 << (2 * pwm))) {
+ /* PWM mode */
+ result = data->pwm_auto_point_pwm[pwm][point];
+ } else {
+ /* RPM mode */
+ result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+ if (data->pwm_enable & (1 << (2 * pwm))) {
+ /* PWM mode */
+ } else {
+ /* RPM mode */
+ if (val < 29) /* Prevent negative numbers */
+ val = 255;
+ else
+ val = (255 - val) * 32 / val;
+ }
+ f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val);
+ data->pwm_auto_point_pwm[pwm][point] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_temp(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+
+ result = data->pwm_auto_point_temp[pwm][point];
+ return sprintf(buf, "%d\n", 1000 * result);
+}
+
+static ssize_t store_pwm_auto_point_temp(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+
+ if (data->auto_point_temp_signed)
+ val = clamp_val(val, -128, 127);
+ else
+ val = clamp_val(val, 0, 127);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
+ data->pwm_auto_point_temp[pwm][point] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result = 0;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+
+ mutex_lock(&data->update_lock);
+ if (nr & 1)
+ result = data->pwm_auto_point_hyst[nr / 2] >> 4;
+ else
+ result = data->pwm_auto_point_hyst[nr / 2] & 0x0f;
+ result = 1000 * (data->pwm_auto_point_temp[nr][point] - result);
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+ u8 reg;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+
+ mutex_lock(&data->update_lock);
+ data->pwm_auto_point_temp[nr][point] =
+ f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
+ val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
+ data->pwm_auto_point_temp[nr][point]);
+ val = data->pwm_auto_point_temp[nr][point] - val;
+
+ reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
+ if (nr & 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+
+ f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg);
+ data->pwm_auto_point_hyst[nr / 2] = reg;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/*
* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the
* standard models
@@ -1144,1071 +2100,6 @@ static inline void superio_exit(int base)
release_region(base, 2);
}
-static inline int fan_from_reg(u16 reg)
-{
- return reg ? (1500000 / reg) : 0;
-}
-
-static inline u16 fan_to_reg(int fan)
-{
- return fan ? (1500000 / fan) : 0;
-}
-
-static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg)
-{
- u8 val;
-
- outb(reg, data->addr + ADDR_REG_OFFSET);
- val = inb(data->addr + DATA_REG_OFFSET);
-
- return val;
-}
-
-static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
-{
- u16 val;
-
- val = f71882fg_read8(data, reg) << 8;
- val |= f71882fg_read8(data, reg + 1);
-
- return val;
-}
-
-static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
-{
- outb(reg, data->addr + ADDR_REG_OFFSET);
- outb(val, data->addr + DATA_REG_OFFSET);
-}
-
-static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
-{
- f71882fg_write8(data, reg, val >> 8);
- f71882fg_write8(data, reg + 1, val & 0xff);
-}
-
-static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
-{
- if (data->type == f71858fg)
- return f71882fg_read16(data, F71882FG_REG_TEMP(nr));
- else
- return f71882fg_read8(data, F71882FG_REG_TEMP(nr));
-}
-
-static struct f71882fg_data *f71882fg_update_device(struct device *dev)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr_fans = f71882fg_nr_fans[data->type];
- int nr_temps = f71882fg_nr_temps[data->type];
- int nr, reg, point;
-
- mutex_lock(&data->update_lock);
-
- /* Update once every 60 seconds */
- if (time_after(jiffies, data->last_limits + 60 * HZ) ||
- !data->valid) {
- if (f71882fg_has_in1_alarm[data->type]) {
- if (data->type == f81866a) {
- data->in1_max =
- f71882fg_read8(data,
- F81866_REG_IN1_HIGH);
- data->in_beep =
- f71882fg_read8(data,
- F81866_REG_IN_BEEP);
- } else {
- data->in1_max =
- f71882fg_read8(data,
- F71882FG_REG_IN1_HIGH);
- data->in_beep =
- f71882fg_read8(data,
- F71882FG_REG_IN_BEEP);
- }
- }
-
- /* Get High & boundary temps*/
- for (nr = data->temp_start; nr < nr_temps + data->temp_start;
- nr++) {
- data->temp_ovt[nr] = f71882fg_read8(data,
- F71882FG_REG_TEMP_OVT(nr));
- data->temp_high[nr] = f71882fg_read8(data,
- F71882FG_REG_TEMP_HIGH(nr));
- }
-
- if (data->type != f8000) {
- data->temp_hyst[0] = f71882fg_read8(data,
- F71882FG_REG_TEMP_HYST(0));
- data->temp_hyst[1] = f71882fg_read8(data,
- F71882FG_REG_TEMP_HYST(1));
- }
- /* All but the f71858fg / f8000 have this register */
- if ((data->type != f71858fg) && (data->type != f8000)) {
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- data->temp_type[2] = (reg & 0x04) ? 2 : 4;
- data->temp_type[3] = (reg & 0x08) ? 2 : 4;
- }
-
- if (f71882fg_fan_has_beep[data->type])
- data->fan_beep = f71882fg_read8(data,
- F71882FG_REG_FAN_BEEP);
-
- if (f71882fg_temp_has_beep[data->type])
- data->temp_beep = f71882fg_read8(data,
- F71882FG_REG_TEMP_BEEP);
-
- data->pwm_enable = f71882fg_read8(data,
- F71882FG_REG_PWM_ENABLE);
- data->pwm_auto_point_hyst[0] =
- f71882fg_read8(data, F71882FG_REG_FAN_HYST(0));
- data->pwm_auto_point_hyst[1] =
- f71882fg_read8(data, F71882FG_REG_FAN_HYST(1));
-
- for (nr = 0; nr < nr_fans; nr++) {
- data->pwm_auto_point_mapping[nr] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_MAPPING(nr));
-
- switch (data->type) {
- default:
- for (point = 0; point < 5; point++) {
- data->pwm_auto_point_pwm[nr][point] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM
- (nr, point));
- }
- for (point = 0; point < 4; point++) {
- data->pwm_auto_point_temp[nr][point] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_TEMP
- (nr, point));
- }
- break;
- case f71808e:
- case f71869:
- data->pwm_auto_point_pwm[nr][0] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM(nr, 0));
- fallthrough;
- case f71862fg:
- data->pwm_auto_point_pwm[nr][1] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM
- (nr, 1));
- data->pwm_auto_point_pwm[nr][4] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM
- (nr, 4));
- data->pwm_auto_point_temp[nr][0] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_TEMP
- (nr, 0));
- data->pwm_auto_point_temp[nr][3] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_TEMP
- (nr, 3));
- break;
- }
- }
- data->last_limits = jiffies;
- }
-
- /* Update every second */
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->temp_status = f71882fg_read8(data,
- F71882FG_REG_TEMP_STATUS);
- data->temp_diode_open = f71882fg_read8(data,
- F71882FG_REG_TEMP_DIODE_OPEN);
- for (nr = data->temp_start; nr < nr_temps + data->temp_start;
- nr++)
- data->temp[nr] = f71882fg_read_temp(data, nr);
-
- data->fan_status = f71882fg_read8(data,
- F71882FG_REG_FAN_STATUS);
- for (nr = 0; nr < nr_fans; nr++) {
- data->fan[nr] = f71882fg_read16(data,
- F71882FG_REG_FAN(nr));
- data->fan_target[nr] =
- f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr));
- data->fan_full_speed[nr] =
- f71882fg_read16(data,
- F71882FG_REG_FAN_FULL_SPEED(nr));
- data->pwm[nr] =
- f71882fg_read8(data, F71882FG_REG_PWM(nr));
- }
- /* Some models have 1 more fan with limited capabilities */
- if (data->type == f71808a) {
- data->fan[2] = f71882fg_read16(data,
- F71882FG_REG_FAN(2));
- data->pwm[2] = f71882fg_read8(data,
- F71882FG_REG_PWM(2));
- }
- if (data->type == f8000)
- data->fan[3] = f71882fg_read16(data,
- F71882FG_REG_FAN(3));
-
- if (f71882fg_has_in1_alarm[data->type]) {
- if (data->type == f81866a)
- data->in_status = f71882fg_read8(data,
- F81866_REG_IN_STATUS);
-
- else
- data->in_status = f71882fg_read8(data,
- F71882FG_REG_IN_STATUS);
- }
-
- for (nr = 0; nr < F71882FG_MAX_INS; nr++)
- if (f71882fg_has_in[data->type][nr])
- data->in[nr] = f71882fg_read8(data,
- F71882FG_REG_IN(nr));
-
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
-/* Sysfs Interface */
-static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int speed = fan_from_reg(data->fan[nr]);
-
- if (speed == FAN_MIN_DETECT)
- speed = 0;
-
- return sprintf(buf, "%d\n", speed);
-}
-
-static ssize_t show_fan_full_speed(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int speed = fan_from_reg(data->fan_full_speed[nr]);
- return sprintf(buf, "%d\n", speed);
-}
-
-static ssize_t store_fan_full_speed(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 23, 1500000);
- val = fan_to_reg(val);
-
- mutex_lock(&data->update_lock);
- f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val);
- data->fan_full_speed[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_fan_beep(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->fan_beep & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t store_fan_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
- if (val)
- data->fan_beep |= 1 << nr;
- else
- data->fan_beep &= ~(1 << nr);
-
- f71882fg_write8(data, F71882FG_REG_FAN_BEEP, data->fan_beep);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->fan_status & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->in[nr] * 8);
-}
-
-static ssize_t show_in_max(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
-
- return sprintf(buf, "%d\n", data->in1_max * 8);
-}
-
-static ssize_t store_in_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 8;
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- if (data->type == f81866a)
- f71882fg_write8(data, F81866_REG_IN1_HIGH, val);
- else
- f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
- data->in1_max = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_in_beep(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->in_beep & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t store_in_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- if (data->type == f81866a)
- data->in_beep = f71882fg_read8(data, F81866_REG_IN_BEEP);
- else
- data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
-
- if (val)
- data->in_beep |= 1 << nr;
- else
- data->in_beep &= ~(1 << nr);
-
- if (data->type == f81866a)
- f71882fg_write8(data, F81866_REG_IN_BEEP, data->in_beep);
- else
- f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_in_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->in_status & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int sign, temp;
-
- if (data->type == f71858fg) {
- /* TEMP_TABLE_SEL 1 or 3 ? */
- if (data->temp_config & 1) {
- sign = data->temp[nr] & 0x0001;
- temp = (data->temp[nr] >> 5) & 0x7ff;
- } else {
- sign = data->temp[nr] & 0x8000;
- temp = (data->temp[nr] >> 5) & 0x3ff;
- }
- temp *= 125;
- if (sign)
- temp -= 128000;
- } else {
- temp = ((s8)data->temp[nr]) * 1000;
- }
-
- return sprintf(buf, "%d\n", temp);
-}
-
-static ssize_t show_temp_max(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->temp_high[nr] * 1000);
-}
-
-static ssize_t store_temp_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
- data->temp_high[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int temp_max_hyst;
-
- mutex_lock(&data->update_lock);
- if (nr & 1)
- temp_max_hyst = data->temp_hyst[nr / 2] >> 4;
- else
- temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f;
- temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000;
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", temp_max_hyst);
-}
-
-static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- ssize_t ret = count;
- u8 reg;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
-
- mutex_lock(&data->update_lock);
-
- /* convert abs to relative and check */
- data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
- val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
- val = data->temp_high[nr] - val;
-
- /* convert value to register contents */
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2));
- if (nr & 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
- f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg);
- data->temp_hyst[nr / 2] = reg;
-
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
-static ssize_t show_temp_crit(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000);
-}
-
-static ssize_t store_temp_crit(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
- data->temp_ovt[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int temp_crit_hyst;
-
- mutex_lock(&data->update_lock);
- if (nr & 1)
- temp_crit_hyst = data->temp_hyst[nr / 2] >> 4;
- else
- temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f;
- temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000;
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", temp_crit_hyst);
-}
-
-static ssize_t show_temp_type(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->temp_type[nr]);
-}
-
-static ssize_t show_temp_beep(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->temp_beep & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t store_temp_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
- if (val)
- data->temp_beep |= 1 << nr;
- else
- data->temp_beep &= ~(1 << nr);
-
- f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->temp_status & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->temp_diode_open & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int val, nr = to_sensor_dev_attr_2(devattr)->index;
- mutex_lock(&data->update_lock);
- if (data->pwm_enable & (1 << (2 * nr)))
- /* PWM mode */
- val = data->pwm[nr];
- else {
- /* RPM mode */
- val = 255 * fan_from_reg(data->fan_target[nr])
- / fan_from_reg(data->fan_full_speed[nr]);
- }
- mutex_unlock(&data->update_lock);
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t store_pwm(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) ||
- (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) {
- count = -EROFS;
- goto leave;
- }
- if (data->pwm_enable & (1 << (2 * nr))) {
- /* PWM mode */
- f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
- data->pwm[nr] = val;
- } else {
- /* RPM mode */
- int target, full_speed;
- full_speed = f71882fg_read16(data,
- F71882FG_REG_FAN_FULL_SPEED(nr));
- target = fan_to_reg(val * fan_from_reg(full_speed) / 255);
- f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target);
- data->fan_target[nr] = target;
- data->fan_full_speed[nr] = full_speed;
- }
-leave:
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_simple_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int val, nr = to_sensor_dev_attr_2(devattr)->index;
-
- val = data->pwm[nr];
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t store_simple_pwm(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
- data->pwm[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int result = 0;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- switch ((data->pwm_enable >> 2 * nr) & 3) {
- case 0:
- case 1:
- result = 2; /* Normal auto mode */
- break;
- case 2:
- result = 1; /* Manual mode */
- break;
- case 3:
- if (data->type == f8000)
- result = 3; /* Thermostat mode */
- else
- result = 1; /* Manual mode */
- break;
- }
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- /* Special case for F8000 pwm channel 3 which only does auto mode */
- if (data->type == f8000 && nr == 2 && val != 2)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- /* Special case for F8000 auto PWM mode / Thermostat mode */
- if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) {
- switch (val) {
- case 2:
- data->pwm_enable &= ~(2 << (2 * nr));
- break; /* Normal auto mode */
- case 3:
- data->pwm_enable |= 2 << (2 * nr);
- break; /* Thermostat mode */
- default:
- count = -EINVAL;
- goto leave;
- }
- } else {
- switch (val) {
- case 1:
- /* The f71858fg does not support manual RPM mode */
- if (data->type == f71858fg &&
- ((data->pwm_enable >> (2 * nr)) & 1)) {
- count = -EINVAL;
- goto leave;
- }
- data->pwm_enable |= 2 << (2 * nr);
- break; /* Manual */
- case 2:
- data->pwm_enable &= ~(2 << (2 * nr));
- break; /* Normal auto mode */
- default:
- count = -EINVAL;
- goto leave;
- }
- }
- f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable);
-leave:
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
-
- mutex_lock(&data->update_lock);
- if (data->pwm_enable & (1 << (2 * pwm))) {
- /* PWM mode */
- result = data->pwm_auto_point_pwm[pwm][point];
- } else {
- /* RPM mode */
- result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]);
- }
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- if (data->pwm_enable & (1 << (2 * pwm))) {
- /* PWM mode */
- } else {
- /* RPM mode */
- if (val < 29) /* Prevent negative numbers */
- val = 255;
- else
- val = (255 - val) * 32 / val;
- }
- f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val);
- data->pwm_auto_point_pwm[pwm][point] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result = 0;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
-
- mutex_lock(&data->update_lock);
- if (nr & 1)
- result = data->pwm_auto_point_hyst[nr / 2] >> 4;
- else
- result = data->pwm_auto_point_hyst[nr / 2] & 0x0f;
- result = 1000 * (data->pwm_auto_point_temp[nr][point] - result);
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
- u8 reg;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
-
- mutex_lock(&data->update_lock);
- data->pwm_auto_point_temp[nr][point] =
- f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
- val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
- data->pwm_auto_point_temp[nr][point]);
- val = data->pwm_auto_point_temp[nr][point] - val;
-
- reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
- if (nr & 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
-
- f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg);
- data->pwm_auto_point_hyst[nr / 2] = reg;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- result = (data->pwm_auto_point_mapping[nr] >> 4) & 1;
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->pwm_auto_point_mapping[nr] =
- f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
- if (val)
- val = data->pwm_auto_point_mapping[nr] | (1 << 4);
- else
- val = data->pwm_auto_point_mapping[nr] & (~(1 << 4));
- f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
- data->pwm_auto_point_mapping[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) -
- data->temp_start);
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- switch (val) {
- case 1:
- val = 0;
- break;
- case 2:
- val = 1;
- break;
- case 4:
- val = 2;
- break;
- default:
- return -EINVAL;
- }
- val += data->temp_start;
- mutex_lock(&data->update_lock);
- data->pwm_auto_point_mapping[nr] =
- f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
- val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val;
- f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
- data->pwm_auto_point_mapping[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
-
- result = data->pwm_auto_point_temp[pwm][point];
- return sprintf(buf, "%d\n", 1000 * result);
-}
-
-static ssize_t store_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
-
- if (data->auto_point_temp_signed)
- val = clamp_val(val, -128, 127);
- else
- val = clamp_val(val, 0, 127);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
- data->pwm_auto_point_temp[pwm][point] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", f71882fg_names[data->type]);
-}
-
static int f71882fg_create_sysfs_files(struct platform_device *pdev,
struct sensor_device_attribute_2 *attr, int count)
{
@@ -2329,6 +2220,119 @@ static int f71882fg_create_fan_sysfs_files(
return err;
}
+static int f71882fg_remove(struct platform_device *pdev)
+{
+ struct f71882fg_data *data = platform_get_drvdata(pdev);
+ int nr_fans = f71882fg_nr_fans[data->type];
+ int nr_temps = f71882fg_nr_temps[data->type];
+ int i;
+ u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ device_remove_file(&pdev->dev, &dev_attr_name);
+
+ if (start_reg & 0x01) {
+ switch (data->type) {
+ case f71858fg:
+ if (data->temp_config & 0x10)
+ f71882fg_remove_sysfs_files(pdev,
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
+ else
+ f71882fg_remove_sysfs_files(pdev,
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
+ break;
+ case f8000:
+ f71882fg_remove_sysfs_files(pdev,
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
+ break;
+ case f81866a:
+ f71882fg_remove_sysfs_files(pdev,
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
+ break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
+ }
+ if (f71882fg_temp_has_beep[data->type]) {
+ if (data->type == f81866a)
+ f71882fg_remove_sysfs_files(pdev,
+ &f81866_temp_beep_attr[0][0],
+ ARRAY_SIZE(f81866_temp_beep_attr[0])
+ * nr_temps);
+ else
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+ * nr_temps);
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ device_remove_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
+ }
+ }
+
+ if (start_reg & 0x02) {
+ f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
+ ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
+
+ if (f71882fg_fan_has_beep[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_fan_beep_attr, nr_fans);
+ }
+
+ switch (data->type) {
+ case f71808a:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_auto_pwm_attr[0][0],
+ ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
+ f71882fg_remove_sysfs_files(pdev,
+ f71808a_fan3_attr,
+ ARRAY_SIZE(f71808a_fan3_attr));
+ break;
+ case f71862fg:
+ f71882fg_remove_sysfs_files(pdev,
+ &f71862fg_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f71862fg_auto_pwm_attr[0]) *
+ nr_fans);
+ break;
+ case f71808e:
+ case f71869:
+ f71882fg_remove_sysfs_files(pdev,
+ &f71869_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f71869_auto_pwm_attr[0]) * nr_fans);
+ break;
+ case f8000:
+ f71882fg_remove_sysfs_files(pdev,
+ f8000_fan_attr,
+ ARRAY_SIZE(f8000_fan_attr));
+ f71882fg_remove_sysfs_files(pdev,
+ &f8000_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f8000_auto_pwm_attr[0]) * nr_fans);
+ break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_auto_pwm_attr[0][0],
+ ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
+ }
+ }
+ return 0;
+}
+
static int f71882fg_probe(struct platform_device *pdev)
{
struct f71882fg_data *data;
@@ -2502,119 +2506,6 @@ exit_unregister_sysfs:
return err; /* f71882fg_remove() also frees our data */
}
-static int f71882fg_remove(struct platform_device *pdev)
-{
- struct f71882fg_data *data = platform_get_drvdata(pdev);
- int nr_fans = f71882fg_nr_fans[data->type];
- int nr_temps = f71882fg_nr_temps[data->type];
- int i;
- u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- device_remove_file(&pdev->dev, &dev_attr_name);
-
- if (start_reg & 0x01) {
- switch (data->type) {
- case f71858fg:
- if (data->temp_config & 0x10)
- f71882fg_remove_sysfs_files(pdev,
- f8000_temp_attr,
- ARRAY_SIZE(f8000_temp_attr));
- else
- f71882fg_remove_sysfs_files(pdev,
- f71858fg_temp_attr,
- ARRAY_SIZE(f71858fg_temp_attr));
- break;
- case f8000:
- f71882fg_remove_sysfs_files(pdev,
- f8000_temp_attr,
- ARRAY_SIZE(f8000_temp_attr));
- break;
- case f81866a:
- f71882fg_remove_sysfs_files(pdev,
- f71858fg_temp_attr,
- ARRAY_SIZE(f71858fg_temp_attr));
- break;
- default:
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_temp_attr[0][0],
- ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
- }
- if (f71882fg_temp_has_beep[data->type]) {
- if (data->type == f81866a)
- f71882fg_remove_sysfs_files(pdev,
- &f81866_temp_beep_attr[0][0],
- ARRAY_SIZE(f81866_temp_beep_attr[0])
- * nr_temps);
- else
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_temp_beep_attr[0][0],
- ARRAY_SIZE(fxxxx_temp_beep_attr[0])
- * nr_temps);
- }
-
- for (i = 0; i < F71882FG_MAX_INS; i++) {
- if (f71882fg_has_in[data->type][i]) {
- device_remove_file(&pdev->dev,
- &fxxxx_in_attr[i].dev_attr);
- }
- }
- if (f71882fg_has_in1_alarm[data->type]) {
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- }
- }
-
- if (start_reg & 0x02) {
- f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
- ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
-
- if (f71882fg_fan_has_beep[data->type]) {
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_fan_beep_attr, nr_fans);
- }
-
- switch (data->type) {
- case f71808a:
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_auto_pwm_attr[0][0],
- ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
- f71882fg_remove_sysfs_files(pdev,
- f71808a_fan3_attr,
- ARRAY_SIZE(f71808a_fan3_attr));
- break;
- case f71862fg:
- f71882fg_remove_sysfs_files(pdev,
- &f71862fg_auto_pwm_attr[0][0],
- ARRAY_SIZE(f71862fg_auto_pwm_attr[0]) *
- nr_fans);
- break;
- case f71808e:
- case f71869:
- f71882fg_remove_sysfs_files(pdev,
- &f71869_auto_pwm_attr[0][0],
- ARRAY_SIZE(f71869_auto_pwm_attr[0]) * nr_fans);
- break;
- case f8000:
- f71882fg_remove_sysfs_files(pdev,
- f8000_fan_attr,
- ARRAY_SIZE(f8000_fan_attr));
- f71882fg_remove_sysfs_files(pdev,
- &f8000_auto_pwm_attr[0][0],
- ARRAY_SIZE(f8000_auto_pwm_attr[0]) * nr_fans);
- break;
- default:
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_auto_pwm_attr[0][0],
- ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
- }
- }
- return 0;
-}
-
static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data)
{
u16 devid;
@@ -2760,6 +2651,14 @@ exit_device_put:
return err;
}
+static struct platform_driver f71882fg_driver = {
+ .driver = {
+ .name = DRVNAME,
+ },
+ .probe = f71882fg_probe,
+ .remove = f71882fg_remove,
+};
+
static int __init f71882fg_init(void)
{
int err;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 57c8a473698d..64fbb8cf687c 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -114,7 +114,7 @@ struct f75375_data {
static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int f75375_probe(struct i2c_client *client);
-static int f75375_remove(struct i2c_client *client);
+static void f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
{ "f75373", f75373 },
@@ -864,12 +864,11 @@ exit_remove:
return err;
}
-static int f75375_remove(struct i2c_client *client)
+static void f75375_remove(struct i2c_client *client)
{
struct f75375_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &f75375_group);
- return 0;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
@@ -897,7 +896,7 @@ static int f75375_detect(struct i2c_client *client,
version = f75375_read8(client, F75375_REG_VERSION);
dev_info(&adapter->dev, "found %s version: %02X\n", name, version);
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index c26195e3aad7..0a77d6161928 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -217,7 +217,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 };
static int fschmd_probe(struct i2c_client *client);
static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int fschmd_remove(struct i2c_client *client);
+static void fschmd_remove(struct i2c_client *client);
static struct fschmd_data *fschmd_update_device(struct device *dev);
/*
@@ -1075,7 +1075,7 @@ static int fschmd_detect(struct i2c_client *client,
else
return -ENODEV;
- strlcpy(info->type, fschmd_id[kind].name, I2C_NAME_SIZE);
+ strscpy(info->type, fschmd_id[kind].name, I2C_NAME_SIZE);
return 0;
}
@@ -1248,7 +1248,7 @@ exit_detach:
return err;
}
-static int fschmd_remove(struct i2c_client *client)
+static void fschmd_remove(struct i2c_client *client)
{
struct fschmd_data *data = i2c_get_clientdata(client);
int i;
@@ -1291,8 +1291,6 @@ static int fschmd_remove(struct i2c_client *client)
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, fschmd_release_resources);
mutex_unlock(&watchdog_data_mutex);
-
- return 0;
}
static struct fschmd_data *fschmd_update_device(struct device *dev)
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index ceffc76a0c51..f5b8e724a8ca 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -739,17 +739,16 @@ static int fts_detect(struct i2c_client *client,
if (val != 0x11)
return -ENODEV;
- strlcpy(info->type, fts_id[0].name, I2C_NAME_SIZE);
+ strscpy(info->type, fts_id[0].name, I2C_NAME_SIZE);
info->flags = 0;
return 0;
}
-static int fts_remove(struct i2c_client *client)
+static void fts_remove(struct i2c_client *client)
{
struct fts_data *data = dev_get_drvdata(&client->dev);
watchdog_unregister_device(&data->wdd);
- return 0;
}
static int fts_probe(struct i2c_client *client)
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index dd683b0a648f..95286c40f55a 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -586,7 +586,7 @@ static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info)
if (rev != 0x00 && rev != 0x80)
return -ENODEV;
- strlcpy(info->type, "gl518sm", I2C_NAME_SIZE);
+ strscpy(info->type, "gl518sm", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 096ba9797211..394da4ac977c 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -811,7 +811,7 @@ static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, "gl520sm", I2C_NAME_SIZE);
+ strscpy(info->type, "gl520sm", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index befe989ca7b9..ba408942dbe7 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -37,9 +37,7 @@ struct gpio_fan_data {
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
-#ifdef CONFIG_PM_SLEEP
int resume_speed;
-#endif
bool pwm_enable;
struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
@@ -391,6 +389,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (!fan_data)
return -EINVAL;
+ if (state >= fan_data->num_speed)
+ return -EINVAL;
+
set_fan_speed(fan_data, state);
return 0;
}
@@ -554,7 +555,6 @@ static void gpio_fan_shutdown(struct platform_device *pdev)
set_fan_speed(fan_data, 0);
}
-#ifdef CONFIG_PM_SLEEP
static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
@@ -577,18 +577,14 @@ static int gpio_fan_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
-#define GPIO_FAN_PM (&gpio_fan_pm)
-#else
-#define GPIO_FAN_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
- .pm = GPIO_FAN_PM,
+ .pm = pm_sleep_ptr(&gpio_fan_pm),
.of_match_table = of_match_ptr(of_gpio_fan_match),
},
};
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index d64be48f1ef6..b60ec95b5edb 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -267,6 +267,7 @@ gsc_hwmon_get_devtree_pdata(struct device *dev)
pdata->nchannels = nchannels;
/* fan controller base address */
+ of_node_get(dev->parent->of_node);
fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan");
if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) {
of_node_put(fan);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 2e2cd79d89eb..4218750d5a66 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -151,9 +151,9 @@ static DEFINE_IDA(hwmon_ida);
* between hwmon and thermal_sys modules.
*/
#ifdef CONFIG_THERMAL_OF
-static int hwmon_thermal_get_temp(void *data, int *temp)
+static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct hwmon_thermal_data *tdata = data;
+ struct hwmon_thermal_data *tdata = tz->devdata;
struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
@@ -168,9 +168,9 @@ static int hwmon_thermal_get_temp(void *data, int *temp)
return 0;
}
-static int hwmon_thermal_set_trips(void *data, int low, int high)
+static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct hwmon_thermal_data *tdata = data;
+ struct hwmon_thermal_data *tdata = tz->devdata;
struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
const struct hwmon_chip_info *chip = hwdev->chip;
const struct hwmon_channel_info **info = chip->info;
@@ -203,7 +203,7 @@ static int hwmon_thermal_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
+static const struct thermal_zone_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
.set_trips = hwmon_thermal_set_trips,
};
@@ -227,8 +227,8 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
tdata->dev = dev;
tdata->index = index;
- tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
- &hwmon_thermal_ops);
+ tzd = devm_thermal_of_zone_register(dev, index, tdata,
+ &hwmon_thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) != -ENODEV)
return PTR_ERR(tzd);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 580a7d125b88..3aa40893fc09 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -6,11 +6,13 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+
#include <linux/hwmon.h>
-#include <linux/of.h>
#include <linux/hwmon-sysfs.h>
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
@@ -149,8 +151,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attr_group.attrs = st->attrs;
st->groups[0] = &st->attr_group;
- if (dev->of_node) {
- sname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+ if (dev_fwnode(dev)) {
+ sname = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", dev_fwnode(dev));
if (!sname)
return -ENOMEM;
strreplace(sname, '-', '_');
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index fc3007c3e85c..9b58655d2de4 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -568,13 +568,11 @@ out_restore_conf:
return ret;
}
-static int ina209_remove(struct i2c_client *client)
+static void ina209_remove(struct i2c_client *client)
{
struct ina209_data *data = i2c_get_clientdata(client);
ina209_restore_conf(client, data);
-
- return 0;
}
static const struct i2c_device_id ina209_id[] = {
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 58d3828e2ec0..2a57f4b60c29 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -913,7 +913,7 @@ fail:
return ret;
}
-static int ina3221_remove(struct i2c_client *client)
+static void ina3221_remove(struct i2c_client *client)
{
struct ina3221_data *ina = dev_get_drvdata(&client->dev);
int i;
@@ -926,11 +926,9 @@ static int ina3221_remove(struct i2c_client *client)
pm_runtime_put_noidle(ina->pm_dev);
mutex_destroy(&ina->lock);
-
- return 0;
}
-static int __maybe_unused ina3221_suspend(struct device *dev)
+static int ina3221_suspend(struct device *dev)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
int ret;
@@ -953,7 +951,7 @@ static int __maybe_unused ina3221_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ina3221_resume(struct device *dev)
+static int ina3221_resume(struct device *dev)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
int ret;
@@ -996,11 +994,8 @@ static int __maybe_unused ina3221_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops ina3221_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(ina3221_suspend, ina3221_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(ina3221_pm, ina3221_suspend, ina3221_resume,
+ NULL);
static const struct of_device_id ina3221_of_match_table[] = {
{ .compatible = "ti,ina3221", },
@@ -1020,7 +1015,7 @@ static struct i2c_driver ina3221_i2c_driver = {
.driver = {
.name = INA3221_DRIVER_NAME,
.of_match_table = ina3221_of_match_table,
- .pm = &ina3221_pm,
+ .pm = pm_ptr(&ina3221_pm),
},
.id_table = ina3221_ids,
};
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 0e543dbe0a6b..7bd154ba351b 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3179,7 +3179,7 @@ static int it87_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static void __maybe_unused it87_resume_sio(struct platform_device *pdev)
+static void it87_resume_sio(struct platform_device *pdev)
{
struct it87_data *data = dev_get_drvdata(&pdev->dev);
int err;
@@ -3211,7 +3211,7 @@ static void __maybe_unused it87_resume_sio(struct platform_device *pdev)
superio_exit(data->sioaddr);
}
-static int __maybe_unused it87_resume(struct device *dev)
+static int it87_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct it87_data *data = dev_get_drvdata(dev);
@@ -3238,12 +3238,12 @@ static int __maybe_unused it87_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(it87_dev_pm_ops, NULL, it87_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(it87_dev_pm_ops, NULL, it87_resume);
static struct platform_driver it87_driver = {
.driver = {
.name = DRVNAME,
- .pm = &it87_dev_pm_ops,
+ .pm = pm_sleep_ptr(&it87_dev_pm_ops),
},
.probe = it87_probe,
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 07f7f8b5b73d..30888feaf589 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -441,7 +441,7 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
struct jc42_chips *chip = &jc42_chips[i];
if (manid == chip->manid &&
(devid & chip->devid_mask) == chip->devid) {
- strlcpy(info->type, "jc42", I2C_NAME_SIZE);
+ strscpy(info->type, "jc42", I2C_NAME_SIZE);
return 0;
}
}
@@ -524,7 +524,7 @@ static int jc42_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int jc42_remove(struct i2c_client *client)
+static void jc42_remove(struct i2c_client *client)
{
struct jc42_data *data = i2c_get_clientdata(client);
@@ -537,7 +537,6 @@ static int jc42_remove(struct i2c_client *client)
| (data->config & JC42_CFG_HYST_MASK);
i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
}
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 339a145afc09..9ab2cab4c710 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -996,11 +996,11 @@ static int lm63_detect(struct i2c_client *client,
}
if (chip_id == 0x41 && address == 0x4c)
- strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ strscpy(info->type, "lm63", I2C_NAME_SIZE);
else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
- strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ strscpy(info->type, "lm64", I2C_NAME_SIZE);
else if (chip_id == 0x49 && address == 0x4c)
- strlcpy(info->type, "lm96163", I2C_NAME_SIZE);
+ strscpy(info->type, "lm96163", I2C_NAME_SIZE);
else
return -ENODEV;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index beb0d61bcd82..1346b3b3f463 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -257,7 +257,7 @@ static int lm73_detect(struct i2c_client *new_client,
if (id < 0 || id != LM73_ID)
return -ENODEV;
- strlcpy(info->type, "lm73", I2C_NAME_SIZE);
+ strscpy(info->type, "lm73", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 66dc826f7962..bcc3adcb3af1 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -893,7 +893,7 @@ static int lm75_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE);
+ strscpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index df6af85e170a..645cb2191abe 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -302,7 +302,7 @@ static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info)
|| i2c_smbus_read_word_data(client, 7) != min)
return -ENODEV;
- strlcpy(info->type, "lm77", I2C_NAME_SIZE);
+ strscpy(info->type, "lm77", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 5e129cbec1cb..694e171cab7f 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -617,7 +617,7 @@ static int lm78_i2c_detect(struct i2c_client *client,
if (isa)
mutex_unlock(&isa->update_lock);
- strlcpy(info->type, client_name, I2C_NAME_SIZE);
+ strscpy(info->type, client_name, I2C_NAME_SIZE);
return 0;
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index e85e062bbf32..35db0b97f912 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -586,7 +586,7 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
name = "lm80";
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 905f5689f907..616449f2cc50 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -412,7 +412,7 @@ static int lm83_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 88cf2012d34b..8d33c2484755 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1539,7 +1539,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
if (!type_name)
return -ENODEV;
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 1750bc588856..818fb6195245 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -833,7 +833,7 @@ static int lm87_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 221de01a327a..db595f7d01f8 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2547,7 +2547,7 @@ static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
@@ -2956,7 +2956,7 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
}
}
-static int __maybe_unused lm90_suspend(struct device *dev)
+static int lm90_suspend(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -2967,7 +2967,7 @@ static int __maybe_unused lm90_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused lm90_resume(struct device *dev)
+static int lm90_resume(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -2978,14 +2978,14 @@ static int __maybe_unused lm90_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(lm90_pm_ops, lm90_suspend, lm90_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(lm90_pm_ops, lm90_suspend, lm90_resume);
static struct i2c_driver lm90_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm90",
.of_match_table = of_match_ptr(lm90_of_match),
- .pm = &lm90_pm_ops,
+ .pm = pm_sleep_ptr(&lm90_pm_ops),
},
.probe_new = lm90_probe,
.alert = lm90_alert,
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 5bae6eedcaf1..2ff3044a677d 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -287,7 +287,7 @@ static int lm92_detect(struct i2c_client *new_client,
else
return -ENODEV;
- strlcpy(info->type, "lm92", I2C_NAME_SIZE);
+ strscpy(info->type, "lm92", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index dc67bf954b21..4cf50d5f4f59 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2575,7 +2575,7 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
dev_dbg(&adapter->dev, "loading %s at %d, 0x%02x\n",
client->name, i2c_adapter_id(client->adapter),
client->addr);
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index ac169a994ae0..b4a9d0c223c4 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -644,7 +644,7 @@ static int lm95234_detect(struct i2c_client *client,
if (val & model_mask)
return -ENODEV;
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 8ea46ff20be5..f1ed777a8735 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -389,7 +389,7 @@ static int lm95241_detect(struct i2c_client *new_client,
}
/* Fill the i2c board info */
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 29388fcf5f74..c433f0af2d31 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -461,7 +461,7 @@ static int lm95245_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index 5423466de697..7404e974762f 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -956,13 +956,6 @@ static struct attribute *ltc2947_attrs[] = {
};
ATTRIBUTE_GROUPS(ltc2947);
-static void ltc2947_clk_disable(void *data)
-{
- struct clk *extclk = data;
-
- clk_disable_unprepare(extclk);
-}
-
static int ltc2947_setup(struct ltc2947_data *st)
{
int ret;
@@ -989,7 +982,7 @@ static int ltc2947_setup(struct ltc2947_data *st)
return ret;
/* check external clock presence */
- extclk = devm_clk_get_optional(st->dev, NULL);
+ extclk = devm_clk_get_optional_enabled(st->dev, NULL);
if (IS_ERR(extclk))
return dev_err_probe(st->dev, PTR_ERR(extclk),
"Failed to get external clock\n");
@@ -1007,14 +1000,6 @@ static int ltc2947_setup(struct ltc2947_data *st)
return -EINVAL;
}
- ret = clk_prepare_enable(extclk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(st->dev, ltc2947_clk_disable,
- extclk);
- if (ret)
- return ret;
/* as in table 1 of the datasheet */
if (rate_hz >= LTC2947_CLK_MIN && rate_hz <= 1000000)
pre = 0;
@@ -1135,7 +1120,7 @@ int ltc2947_core_probe(struct regmap *map, const char *name)
}
EXPORT_SYMBOL_GPL(ltc2947_core_probe);
-static int __maybe_unused ltc2947_resume(struct device *dev)
+static int ltc2947_resume(struct device *dev)
{
struct ltc2947_data *st = dev_get_drvdata(dev);
u32 ctrl = 0;
@@ -1164,7 +1149,7 @@ static int __maybe_unused ltc2947_resume(struct device *dev)
LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
}
-static int __maybe_unused ltc2947_suspend(struct device *dev)
+static int ltc2947_suspend(struct device *dev)
{
struct ltc2947_data *st = dev_get_drvdata(dev);
@@ -1172,8 +1157,7 @@ static int __maybe_unused ltc2947_suspend(struct device *dev)
LTC2947_SHUTDOWN_MASK, 1);
}
-SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
-EXPORT_SYMBOL_GPL(ltc2947_pm_ops);
+EXPORT_SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
const struct of_device_id ltc2947_of_match[] = {
{ .compatible = "adi,ltc2947" },
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
index ad0dfd3efbf8..96852bc8a964 100644
--- a/drivers/hwmon/ltc2947-i2c.c
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -36,7 +36,7 @@ static struct i2c_driver ltc2947_driver = {
.driver = {
.name = "ltc2947",
.of_match_table = ltc2947_of_match,
- .pm = &ltc2947_pm_ops,
+ .pm = pm_sleep_ptr(&ltc2947_pm_ops),
},
.probe_new = ltc2947_probe,
.id_table = ltc2947_id,
diff --git a/drivers/hwmon/ltc2947-spi.c b/drivers/hwmon/ltc2947-spi.c
index c24ca569db1b..a33be110098c 100644
--- a/drivers/hwmon/ltc2947-spi.c
+++ b/drivers/hwmon/ltc2947-spi.c
@@ -38,7 +38,7 @@ static struct spi_driver ltc2947_driver = {
.driver = {
.name = "ltc2947",
.of_match_table = ltc2947_of_match,
- .pm = &ltc2947_pm_ops,
+ .pm = pm_sleep_ptr(&ltc2947_pm_ops),
},
.probe = ltc2947_probe,
.id_table = ltc2947_id,
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index eae9e68027bc..445c77197f69 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -241,7 +241,7 @@ static int max1619_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "max1619", I2C_NAME_SIZE);
+ strscpy(info->type, "max1619", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 78688e6cb87d..9f748973d6a3 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -386,7 +386,7 @@ static int max1668_detect(struct i2c_client *client,
if (!type_name)
return -ENODEV;
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 93e048ee4955..9a31ef388396 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -113,7 +113,7 @@ static void max31722_remove(struct spi_device *spi)
dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
}
-static int __maybe_unused max31722_suspend(struct device *dev)
+static int max31722_suspend(struct device *dev)
{
struct spi_device *spi_device = to_spi_device(dev);
struct max31722_data *data = spi_get_drvdata(spi_device);
@@ -121,7 +121,7 @@ static int __maybe_unused max31722_suspend(struct device *dev)
return max31722_set_mode(data, MAX31722_MODE_STANDBY);
}
-static int __maybe_unused max31722_resume(struct device *dev)
+static int max31722_resume(struct device *dev)
{
struct spi_device *spi_device = to_spi_device(dev);
struct max31722_data *data = spi_get_drvdata(spi_device);
@@ -129,7 +129,7 @@ static int __maybe_unused max31722_resume(struct device *dev)
return max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
}
-static SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
static const struct spi_device_id max31722_spi_id[] = {
{"max31722", 0},
@@ -141,7 +141,7 @@ MODULE_DEVICE_TABLE(spi, max31722_spi_id);
static struct spi_driver max31722_driver = {
.driver = {
.name = "max31722",
- .pm = &max31722_pm_ops,
+ .pm = pm_sleep_ptr(&max31722_pm_ops),
},
.probe = max31722_probe,
.remove = max31722_remove,
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
index 23598b8b8793..746a767c9fc6 100644
--- a/drivers/hwmon/max31730.c
+++ b/drivers/hwmon/max31730.c
@@ -399,33 +399,33 @@ static int max31730_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+ strscpy(info->type, "max31730", I2C_NAME_SIZE);
return 0;
}
-static int __maybe_unused max31730_suspend(struct device *dev)
+static int max31730_suspend(struct device *dev)
{
struct max31730_data *data = dev_get_drvdata(dev);
return max31730_write_config(data, MAX31730_STOP, 0);
}
-static int __maybe_unused max31730_resume(struct device *dev)
+static int max31730_resume(struct device *dev)
{
struct max31730_data *data = dev_get_drvdata(dev);
return max31730_write_config(data, 0, MAX31730_STOP);
}
-static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
static struct i2c_driver max31730_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max31730",
.of_match_table = of_match_ptr(max31730_of_match),
- .pm = &max31730_pm_ops,
+ .pm = pm_sleep_ptr(&max31730_pm_ops),
},
.probe_new = max31730_probe,
.id_table = max31730_ids,
diff --git a/drivers/hwmon/max31760.c b/drivers/hwmon/max31760.c
new file mode 100644
index 000000000000..06d5f39dc33d
--- /dev/null
+++ b/drivers/hwmon/max31760.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
+
+#define REG_CR1 0x00
+#define CR1_HYST BIT(5)
+#define CR1_DRV GENMASK(4, 3)
+#define CR1_TEMP_SRC GENMASK(1, 0)
+#define REG_CR2 0x01
+#define CR2_STBY BIT(7)
+#define CR2_ALERTS BIT(6)
+#define CR2_DFC BIT(0)
+#define REG_CR3 0x02
+#define REG_PWMR 0x50
+#define REG_PWMV 0x51
+#define REG_STATUS 0x5A
+#define STATUS_ALARM_CRIT(ch) BIT(2 + 2 * (ch))
+#define STATUS_ALARM_MAX(ch) BIT(3 + 2 * (ch))
+#define STATUS_RDFA BIT(6)
+
+#define REG_TACH(ch) (0x52 + (ch) * 2)
+#define REG_TEMP_INPUT(ch) (0x56 + (ch) * 2)
+#define REG_TEMP_MAX(ch) (0x06 + (ch) * 2)
+#define REG_TEMP_CRIT(ch) (0x0A + (ch) * 2)
+
+#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
+#define TEMP11_TO_REG(val) (DIV_ROUND_CLOSEST(clamp_val((val), -128000, \
+ 127875), 125) * 32)
+
+#define LUT_SIZE 48
+
+#define REG_LUT(index) (0x20 + (index))
+
+struct max31760_state {
+ struct regmap *regmap;
+
+ struct lut_attribute {
+ char name[24];
+ struct sensor_device_attribute sda;
+ } lut[LUT_SIZE];
+
+ struct attribute *attrs[LUT_SIZE + 2];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+};
+
+static bool max31760_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg > 0x50;
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x5B,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = max31760_volatile_reg,
+};
+
+static const int max31760_pwm_freq[] = {33, 150, 1500, 25000};
+
+static int tach_to_rpm(u16 tach)
+{
+ if (tach == 0)
+ tach = 1;
+
+ return 60 * 100000 / tach / 2;
+}
+
+static int max31760_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int regval;
+ unsigned int reg_temp;
+ s16 temp;
+ u8 reg[2];
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_fault:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(STATUS_RDFA, regval);
+
+ return 0;
+ case hwmon_temp_max_alarm:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(STATUS_ALARM_MAX(1), regval);
+ else
+ *val = FIELD_GET(STATUS_ALARM_MAX(0), regval);
+
+ return 0;
+ case hwmon_temp_crit_alarm:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(STATUS_ALARM_CRIT(1), regval);
+ else
+ *val = FIELD_GET(STATUS_ALARM_CRIT(0), regval);
+
+ return 0;
+ case hwmon_temp_input:
+ reg_temp = REG_TEMP_INPUT(channel);
+ break;
+ case hwmon_temp_max:
+ reg_temp = REG_TEMP_MAX(channel);
+ break;
+ case hwmon_temp_crit:
+ reg_temp = REG_TEMP_CRIT(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = regmap_bulk_read(state->regmap, reg_temp, reg, 2);
+ if (ret)
+ return ret;
+
+ temp = (reg[0] << 8) | reg[1];
+
+ *val = TEMP11_FROM_REG(temp);
+
+ return 0;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = regmap_bulk_read(state->regmap, REG_TACH(channel), reg, 2);
+ if (ret)
+ return ret;
+
+ *val = tach_to_rpm(reg[0] * 256 + reg[1]);
+
+ return 0;
+ case hwmon_fan_fault:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(BIT(1), regval);
+ else
+ *val = FIELD_GET(BIT(0), regval);
+
+ return 0;
+ case hwmon_fan_enable:
+ ret = regmap_read(state->regmap, REG_CR3, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(BIT(1), regval);
+ else
+ *val = FIELD_GET(BIT(0), regval);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = regmap_read(state->regmap, REG_PWMV, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+ case hwmon_pwm_freq:
+ ret = regmap_read(state->regmap, REG_CR1, &regval);
+ if (ret)
+ return ret;
+
+ regval = FIELD_GET(CR1_DRV, regval);
+ if (regval >= ARRAY_SIZE(max31760_pwm_freq))
+ return -EINVAL;
+
+ *val = max31760_pwm_freq[regval];
+
+ return 0;
+ case hwmon_pwm_enable:
+ ret = regmap_read(state->regmap, REG_CR2, &regval);
+ if (ret)
+ return ret;
+
+ *val = 2 - FIELD_GET(CR2_DFC, regval);
+
+ return 0;
+ case hwmon_pwm_auto_channels_temp:
+ ret = regmap_read(state->regmap, REG_CR1, &regval);
+ if (ret)
+ return ret;
+
+ switch (FIELD_GET(CR1_TEMP_SRC, regval)) {
+ case 0:
+ *val = 2;
+ break;
+ case 1:
+ *val = 1;
+ break;
+ case 2:
+ case 3:
+ *val = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max31760_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int pwm_index;
+ unsigned int reg_temp;
+ int temp;
+ u8 reg_val[2];
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_max:
+ reg_temp = REG_TEMP_MAX(channel);
+ break;
+ case hwmon_temp_crit:
+ reg_temp = REG_TEMP_CRIT(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ temp = TEMP11_TO_REG(val);
+ reg_val[0] = temp >> 8;
+ reg_val[1] = temp & 0xFF;
+
+ return regmap_bulk_write(state->regmap, reg_temp, reg_val, 2);
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_enable:
+ if (val == 0)
+ return regmap_clear_bits(state->regmap, REG_CR3, BIT(channel));
+
+ if (val == 1)
+ return regmap_set_bits(state->regmap, REG_CR3, BIT(channel));
+
+ return -EINVAL;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ return regmap_write(state->regmap, REG_PWMR, val);
+ case hwmon_pwm_enable:
+ if (val == 1)
+ return regmap_set_bits(state->regmap, REG_CR2, CR2_DFC);
+
+ if (val == 2)
+ return regmap_clear_bits(state->regmap, REG_CR2, CR2_DFC);
+
+ return -EINVAL;
+ case hwmon_pwm_freq:
+ pwm_index = find_closest(val, max31760_pwm_freq,
+ ARRAY_SIZE(max31760_pwm_freq));
+
+ return regmap_update_bits(state->regmap,
+ REG_CR1, CR1_DRV,
+ FIELD_PREP(CR1_DRV, pwm_index));
+ case hwmon_pwm_auto_channels_temp:
+ switch (val) {
+ case 1:
+ break;
+ case 2:
+ val = 0;
+ break;
+ case 3:
+ val = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(state->regmap, REG_CR1, CR1_TEMP_SRC, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *max31760_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_ENABLE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_FAULT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_ENABLE | HWMON_PWM_FREQ | HWMON_PWM_INPUT |
+ HWMON_PWM_AUTO_CHANNELS_TEMP),
+ NULL
+};
+
+static umode_t max31760_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ case hwmon_fan_enable:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_input:
+ case hwmon_pwm_freq:
+ case hwmon_pwm_auto_channels_temp:
+ return 0644;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int max31760_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = channel ? "local" : "remote";
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops max31760_hwmon_ops = {
+ .is_visible = max31760_is_visible,
+ .read = max31760_read,
+ .write = max31760_write,
+ .read_string = max31760_read_string
+};
+
+static const struct hwmon_chip_info max31760_chip_info = {
+ .ops = &max31760_hwmon_ops,
+ .info = max31760_info,
+};
+
+static ssize_t lut_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(devattr);
+ struct max31760_state *state = dev_get_drvdata(dev);
+ int ret;
+ unsigned int regval;
+
+ ret = regmap_read(state->regmap, REG_LUT(sda->index), &regval);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", regval);
+}
+
+static ssize_t lut_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(devattr);
+ struct max31760_state *state = dev_get_drvdata(dev);
+ int ret;
+ u8 pwm;
+
+ ret = kstrtou8(buf, 10, &pwm);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(state->regmap, REG_LUT(sda->index), pwm);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t pwm1_auto_point_temp_hyst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(state->regmap, REG_CR1, &regval);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", (1 + (int)FIELD_GET(CR1_HYST, regval)) * 2000);
+}
+
+static ssize_t pwm1_auto_point_temp_hyst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int hyst;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &hyst);
+ if (ret)
+ return ret;
+
+ if (hyst < 3000)
+ ret = regmap_clear_bits(state->regmap, REG_CR1, CR1_HYST);
+ else
+ ret = regmap_set_bits(state->regmap, REG_CR1, CR1_HYST);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(pwm1_auto_point_temp_hyst);
+
+static void max31760_create_lut_nodes(struct max31760_state *state)
+{
+ int i;
+ struct sensor_device_attribute *sda;
+ struct lut_attribute *lut;
+
+ for (i = 0; i < LUT_SIZE; ++i) {
+ lut = &state->lut[i];
+ sda = &lut->sda;
+
+ snprintf(lut->name, sizeof(lut->name),
+ "pwm1_auto_point%d_pwm", i + 1);
+
+ sda->dev_attr.attr.mode = 0644;
+ sda->index = i;
+ sda->dev_attr.show = lut_show;
+ sda->dev_attr.store = lut_store;
+ sda->dev_attr.attr.name = lut->name;
+
+ sysfs_attr_init(&sda->dev_attr.attr);
+
+ state->attrs[i] = &sda->dev_attr.attr;
+ }
+
+ state->attrs[i] = &dev_attr_pwm1_auto_point_temp_hyst.attr;
+
+ state->group.attrs = state->attrs;
+ state->groups[0] = &state->group;
+}
+
+static int max31760_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max31760_state *state;
+ struct device *hwmon_dev;
+ int ret;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(state->regmap))
+ return dev_err_probe(dev,
+ PTR_ERR(state->regmap),
+ "regmap initialization failed\n");
+
+ dev_set_drvdata(dev, state);
+
+ /* Set alert output to comparator mode */
+ ret = regmap_set_bits(state->regmap, REG_CR2, CR2_ALERTS);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot write register\n");
+
+ max31760_create_lut_nodes(state);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ state,
+ &max31760_chip_info,
+ state->groups);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id max31760_of_match[] = {
+ {.compatible = "adi,max31760"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, max31760_of_match);
+
+static const struct i2c_device_id max31760_id[] = {
+ {"max31760"},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31760_id);
+
+static int max31760_suspend(struct device *dev)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+
+ return regmap_set_bits(state->regmap, REG_CR2, CR2_STBY);
+}
+
+static int max31760_resume(struct device *dev)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+
+ return regmap_clear_bits(state->regmap, REG_CR2, CR2_STBY);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max31760_pm_ops, max31760_suspend,
+ max31760_resume);
+
+static struct i2c_driver max31760_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max31760",
+ .of_match_table = max31760_of_match,
+ .pm = pm_ptr(&max31760_pm_ops)
+ },
+ .probe_new = max31760_probe,
+ .id_table = max31760_id
+};
+module_i2c_driver(max31760_driver);
+
+MODULE_AUTHOR("Ibrahim Tilki <Ibrahim.Tilki@analog.com>");
+MODULE_DESCRIPTION("Analog Devices MAX31760 Fan Speed Controller");
+MODULE_SOFTDEP("pre: regmap_i2c");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 7e9362f6dc29..20bf5ffadefe 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -202,6 +202,9 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
}
mutex_unlock(&data->update_lock);
return 0;
+ case hwmon_fan_enable:
+ *val = !!(data->fan_config[channel] & MAX31790_FAN_CFG_TACH_INPUT_EN);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -214,7 +217,7 @@ static int max31790_write_fan(struct device *dev, u32 attr, int channel,
struct i2c_client *client = data->client;
int target_count;
int err = 0;
- u8 bits;
+ u8 bits, fan_config;
int sr;
mutex_lock(&data->update_lock);
@@ -243,6 +246,23 @@ static int max31790_write_fan(struct device *dev, u32 attr, int channel,
MAX31790_REG_TARGET_COUNT(channel),
data->target_count[channel]);
break;
+ case hwmon_fan_enable:
+ fan_config = data->fan_config[channel];
+ if (val == 0) {
+ fan_config &= ~MAX31790_FAN_CFG_TACH_INPUT_EN;
+ } else if (val == 1) {
+ fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN;
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ if (fan_config != data->fan_config[channel]) {
+ err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
+ fan_config);
+ if (!err)
+ data->fan_config[channel] = fan_config;
+ }
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -270,6 +290,10 @@ static umode_t max31790_fan_is_visible(const void *_data, u32 attr, int channel)
!(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
return 0644;
return 0;
+ case hwmon_fan_enable:
+ if (channel < NR_CHANNEL)
+ return 0644;
+ return 0;
default:
return 0;
}
@@ -423,12 +447,12 @@ static umode_t max31790_is_visible(const void *data,
static const struct hwmon_channel_info *max31790_info[] = {
HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 14bb7726f8d7..9b895402c80d 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -514,7 +514,7 @@ static int max6639_detect(struct i2c_client *client,
if (dev_id != 0x58 || manu_id != 0x4D)
return -ENODEV;
- strlcpy(info->type, "max6639", I2C_NAME_SIZE);
+ strscpy(info->type, "max6639", I2C_NAME_SIZE);
return 0;
}
@@ -571,7 +571,6 @@ static int max6639_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-#ifdef CONFIG_PM_SLEEP
static int max6639_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -609,7 +608,6 @@ static int max6639_resume(struct device *dev)
return i2c_smbus_write_byte_data(client,
MAX6639_REG_GCONFIG, ret & ~MAX6639_GCONFIG_STANDBY);
}
-#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id max6639_id[] = {
{"max6639", 0},
@@ -618,13 +616,13 @@ static const struct i2c_device_id max6639_id[] = {
MODULE_DEVICE_TABLE(i2c, max6639_id);
-static SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6639",
- .pm = &max6639_pm_ops,
+ .pm = pm_sleep_ptr(&max6639_pm_ops),
},
.probe_new = max6639_probe,
.id_table = max6639_id,
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 699d265aae2e..47ea34ff78f3 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -148,7 +148,7 @@ static int max6642_detect(struct i2c_client *client,
if ((reg_status & 0x2b) != 0x00)
return -ENODEV;
- strlcpy(info->type, "max6642", I2C_NAME_SIZE);
+ strscpy(info->type, "max6642", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 26278b0f17a9..394a4c7e46ab 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -9,6 +9,7 @@
*/
#include <linux/bits.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/hwmon.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -17,6 +18,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/units.h>
/* PVT Common register */
@@ -30,6 +32,8 @@
#define CH_NUM_MSK GENMASK(31, 24)
#define CH_NUM_SFT 24
+#define VM_NUM_MAX (VM_NUM_MSK >> VM_NUM_SFT)
+
/* Macro Common Register */
#define CLK_SYNTH 0x00
#define CLK_SYNTH_LO_SFT 0
@@ -68,8 +72,9 @@
/* VM Individual Macro Register */
#define VM_COM_REG_SIZE 0x200
-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
+#define VM_SDIF_DATA(vm, ch) \
+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
/* SDA Slave Register */
#define IP_CTRL 0x00
@@ -98,13 +103,67 @@
#define PVT_POLL_DELAY_US 20
#define PVT_POLL_TIMEOUT_US 20000
-#define PVT_H_CONST 100000
-#define PVT_CAL5_CONST 2047
-#define PVT_G_CONST 40000
#define PVT_CONV_BITS 10
#define PVT_N_CONST 90
#define PVT_R_CONST 245805
+#define PVT_TEMP_MIN_mC -40000
+#define PVT_TEMP_MAX_mC 125000
+
+/* Temperature coefficients for series 5 */
+#define PVT_SERIES5_H_CONST 200000
+#define PVT_SERIES5_G_CONST 60000
+#define PVT_SERIES5_J_CONST -100
+#define PVT_SERIES5_CAL5_CONST 4094
+
+/* Temperature coefficients for series 6 */
+#define PVT_SERIES6_H_CONST 249400
+#define PVT_SERIES6_G_CONST 57400
+#define PVT_SERIES6_J_CONST 0
+#define PVT_SERIES6_CAL5_CONST 4096
+
+#define TEMPERATURE_SENSOR_SERIES_5 5
+#define TEMPERATURE_SENSOR_SERIES_6 6
+
+#define PRE_SCALER_X1 1
+#define PRE_SCALER_X2 2
+
+/**
+ * struct voltage_device - VM single input parameters.
+ * @vm_map: Map channel number to VM index.
+ * @ch_map: Map channel number to channel index.
+ * @pre_scaler: Pre scaler value (1 or 2) used to normalize the voltage output
+ * result.
+ *
+ * The structure provides mapping between channel-number (0..N-1) to VM-index
+ * (0..num_vm-1) and channel-index (0..ch_num-1) where N = num_vm * ch_num.
+ * It also provides normalization factor for the VM equation.
+ */
+struct voltage_device {
+ u32 vm_map;
+ u32 ch_map;
+ u32 pre_scaler;
+};
+
+/**
+ * struct voltage_channels - VM channel count.
+ * @total: Total number of channels in all VMs.
+ * @max: Maximum number of channels among all VMs.
+ *
+ * The structure provides channel count information across all VMs.
+ */
+struct voltage_channels {
+ u32 total;
+ u8 max;
+};
+
+struct temp_coeff {
+ u32 h;
+ u32 g;
+ u32 cal5;
+ s32 j;
+};
+
struct pvt_device {
struct regmap *c_map;
struct regmap *t_map;
@@ -112,13 +171,74 @@ struct pvt_device {
struct regmap *v_map;
struct clk *clk;
struct reset_control *rst;
+ struct dentry *dbgfs_dir;
+ struct voltage_device *vd;
+ struct voltage_channels vm_channels;
+ struct temp_coeff ts_coeff;
u32 t_num;
u32 p_num;
u32 v_num;
u32 ip_freq;
- u8 *vm_idx;
};
+static ssize_t pvt_ts_coeff_j_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pvt_device *pvt = file->private_data;
+ unsigned int len;
+ char buf[13];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", pvt->ts_coeff.j);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t pvt_ts_coeff_j_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pvt_device *pvt = file->private_data;
+ int ret;
+
+ ret = kstrtos32_from_user(user_buf, count, 0, &pvt->ts_coeff.j);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations pvt_ts_coeff_j_fops = {
+ .read = pvt_ts_coeff_j_read,
+ .write = pvt_ts_coeff_j_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void devm_pvt_ts_dbgfs_remove(void *data)
+{
+ struct pvt_device *pvt = (struct pvt_device *)data;
+
+ debugfs_remove_recursive(pvt->dbgfs_dir);
+ pvt->dbgfs_dir = NULL;
+}
+
+static int pvt_ts_dbgfs_create(struct pvt_device *pvt, struct device *dev)
+{
+ pvt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+ debugfs_create_u32("ts_coeff_h", 0644, pvt->dbgfs_dir,
+ &pvt->ts_coeff.h);
+ debugfs_create_u32("ts_coeff_g", 0644, pvt->dbgfs_dir,
+ &pvt->ts_coeff.g);
+ debugfs_create_u32("ts_coeff_cal5", 0644, pvt->dbgfs_dir,
+ &pvt->ts_coeff.cal5);
+ debugfs_create_file("ts_coeff_j", 0644, pvt->dbgfs_dir, pvt,
+ &pvt_ts_coeff_j_fops);
+
+ return devm_add_action_or_reset(dev, devm_pvt_ts_dbgfs_remove, pvt);
+}
+
static umode_t pvt_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -137,13 +257,28 @@ static umode_t pvt_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
+static long pvt_calc_temp(struct pvt_device *pvt, u32 nbs)
+{
+ /*
+ * Convert the register value to degrees centigrade temperature:
+ * T = G + H * (n / cal5 - 0.5) + J * F
+ */
+ struct temp_coeff *ts_coeff = &pvt->ts_coeff;
+
+ s64 tmp = ts_coeff->g +
+ div_s64(ts_coeff->h * (s64)nbs, ts_coeff->cal5) -
+ ts_coeff->h / 2 +
+ div_s64(ts_coeff->j * (s64)pvt->ip_freq, HZ_PER_MHZ);
+
+ return clamp_val(tmp, PVT_TEMP_MIN_mC, PVT_TEMP_MAX_mC);
+}
+
static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *t_map = pvt->t_map;
u32 stat, nbs;
int ret;
- u64 tmp;
switch (attr) {
case hwmon_temp_input:
@@ -155,7 +290,7 @@ static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
return ret;
ret = regmap_read(t_map, SDIF_DATA(channel), &nbs);
- if(ret < 0)
+ if (ret < 0)
return ret;
nbs &= SAMPLE_DATA_MSK;
@@ -164,9 +299,7 @@ static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
* Convert the register value to
* degrees centigrade temperature
*/
- tmp = nbs * PVT_H_CONST;
- do_div(tmp, PVT_CAL5_CONST);
- *val = tmp - PVT_G_CONST - pvt->ip_freq;
+ *val = pvt_calc_temp(pvt, nbs);
return 0;
default:
@@ -178,14 +311,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *v_map = pvt->v_map;
- u32 n, stat;
- u8 vm_idx;
+ u32 n, stat, pre_scaler;
+ u8 vm_idx, ch_idx;
int ret;
- if (channel >= pvt->v_num)
+ if (channel >= pvt->vm_channels.total)
return -EINVAL;
- vm_idx = pvt->vm_idx[channel];
+ vm_idx = pvt->vd[channel].vm_map;
+ ch_idx = pvt->vd[channel].ch_map;
switch (attr) {
case hwmon_in_input:
@@ -196,13 +330,25 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
if (ret)
return ret;
- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
- if(ret < 0)
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
+ if (ret < 0)
return ret;
n &= SAMPLE_DATA_MSK;
- /* Convert the N bitstream count into voltage */
- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+ pre_scaler = pvt->vd[channel].pre_scaler;
+ /*
+ * Convert the N bitstream count into voltage.
+ * To support negative voltage calculation for 64bit machines
+ * n must be cast to long, since n and *val differ both in
+ * signedness and in size.
+ * Division is used instead of right shift, because for signed
+ * numbers, the sign bit is used to fill the vacated bit
+ * positions, and if the number is negative, 1 is used.
+ * BIT(x) may not be used instead of (1 << x) because it's
+ * unsigned.
+ */
+ *val = pre_scaler * (PVT_N_CONST * (long)n - PVT_R_CONST) /
+ (1 << PVT_CONV_BITS);
return 0;
default:
@@ -277,23 +423,23 @@ static int pvt_init(struct pvt_device *pvt)
(key >> 1) << CLK_SYNTH_HI_SFT |
(key >> 1) << CLK_SYNTH_HOLD_SFT | CLK_SYNTH_EN;
- pvt->ip_freq = sys_freq * 100 / (key + 2);
+ pvt->ip_freq = clk_get_rate(pvt->clk) / (key + 2);
if (t_num) {
ret = regmap_write(t_map, SDIF_SMPL_CTRL, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(t_map, SDIF_HALT, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(t_map, CLK_SYNTH, clk_synth);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(t_map, SDIF_DISABLE, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
@@ -306,7 +452,7 @@ static int pvt_init(struct pvt_device *pvt)
val = CFG0_MODE_2 | CFG0_PARALLEL_OUT | CFG0_12_BIT |
IP_CFG << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(t_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
@@ -319,7 +465,7 @@ static int pvt_init(struct pvt_device *pvt)
val = POWER_DELAY_CYCLE_256 | IP_TMR << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(t_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
@@ -333,39 +479,52 @@ static int pvt_init(struct pvt_device *pvt)
IP_CTRL << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(t_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
}
if (p_num) {
ret = regmap_write(p_map, SDIF_HALT, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(p_map, SDIF_DISABLE, BIT(p_num) - 1);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(p_map, CLK_SYNTH, clk_synth);
- if(ret < 0)
+ if (ret < 0)
return ret;
}
if (v_num) {
ret = regmap_write(v_map, SDIF_SMPL_CTRL, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(v_map, SDIF_HALT, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(v_map, CLK_SYNTH, clk_synth);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(v_map, SDIF_DISABLE, 0x0);
- if(ret < 0)
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = (BIT(pvt->vm_channels.max) - 1) | VM_CH_INIT |
+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
@@ -379,7 +538,7 @@ static int pvt_init(struct pvt_device *pvt)
CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(v_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
@@ -392,7 +551,7 @@ static int pvt_init(struct pvt_device *pvt)
val = POWER_DELAY_CYCLE_64 | IP_TMR << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(v_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
@@ -406,7 +565,7 @@ static int pvt_init(struct pvt_device *pvt)
IP_CTRL << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(v_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
}
@@ -451,46 +610,163 @@ static int pvt_get_regmap(struct platform_device *pdev, char *reg_name,
return 0;
}
-static void pvt_clk_disable(void *data)
+static void pvt_reset_control_assert(void *data)
{
struct pvt_device *pvt = data;
- clk_disable_unprepare(pvt->clk);
+ reset_control_assert(pvt->rst);
}
-static int pvt_clk_enable(struct device *dev, struct pvt_device *pvt)
+static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt)
{
int ret;
- ret = clk_prepare_enable(pvt->clk);
+ ret = reset_control_deassert(pvt->rst);
if (ret)
return ret;
- return devm_add_action_or_reset(dev, pvt_clk_disable, pvt);
+ return devm_add_action_or_reset(dev, pvt_reset_control_assert, pvt);
}
-static void pvt_reset_control_assert(void *data)
+static int pvt_get_active_channel(struct device *dev, struct pvt_device *pvt,
+ u32 vm_num, u32 ch_num, u8 *vm_idx)
{
- struct pvt_device *pvt = data;
+ u8 vm_active_ch[VM_NUM_MAX];
+ int ret, i, j, k;
- reset_control_assert(pvt->rst);
+ ret = device_property_read_u8_array(dev, "moortec,vm-active-channels",
+ vm_active_ch, vm_num);
+ if (ret) {
+ /*
+ * Incase "moortec,vm-active-channels" property is not defined,
+ * we assume each VM sensor has all of its channels active.
+ */
+ memset(vm_active_ch, ch_num, vm_num);
+ pvt->vm_channels.max = ch_num;
+ pvt->vm_channels.total = ch_num * vm_num;
+ } else {
+ for (i = 0; i < vm_num; i++) {
+ if (vm_active_ch[i] > ch_num) {
+ dev_err(dev, "invalid active channels: %u\n",
+ vm_active_ch[i]);
+ return -EINVAL;
+ }
+
+ pvt->vm_channels.total += vm_active_ch[i];
+
+ if (vm_active_ch[i] > pvt->vm_channels.max)
+ pvt->vm_channels.max = vm_active_ch[i];
+ }
+ }
+
+ /*
+ * Map between the channel-number to VM-index and channel-index.
+ * Example - 3 VMs, "moortec,vm_active_ch" = <5 2 4>:
+ * vm_map = [0 0 0 0 0 1 1 2 2 2 2]
+ * ch_map = [0 1 2 3 4 0 1 0 1 2 3]
+ */
+ pvt->vd = devm_kcalloc(dev, pvt->vm_channels.total, sizeof(*pvt->vd),
+ GFP_KERNEL);
+ if (!pvt->vd)
+ return -ENOMEM;
+
+ k = 0;
+ for (i = 0; i < vm_num; i++) {
+ for (j = 0; j < vm_active_ch[i]; j++) {
+ pvt->vd[k].vm_map = vm_idx[i];
+ pvt->vd[k].ch_map = j;
+ k++;
+ }
+ }
+
+ return 0;
}
-static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt)
+static int pvt_get_pre_scaler(struct device *dev, struct pvt_device *pvt)
{
+ u8 *pre_scaler_ch_list;
+ int i, ret, num_ch;
+ u32 channel;
+
+ /* Set default pre-scaler value to be 1. */
+ for (i = 0; i < pvt->vm_channels.total; i++)
+ pvt->vd[i].pre_scaler = PRE_SCALER_X1;
+
+ /* Get number of channels configured in "moortec,vm-pre-scaler-x2". */
+ num_ch = device_property_count_u8(dev, "moortec,vm-pre-scaler-x2");
+ if (num_ch <= 0)
+ return 0;
+
+ pre_scaler_ch_list = kcalloc(num_ch, sizeof(*pre_scaler_ch_list),
+ GFP_KERNEL);
+ if (!pre_scaler_ch_list)
+ return -ENOMEM;
+
+ /* Get list of all channels that have pre-scaler of 2. */
+ ret = device_property_read_u8_array(dev, "moortec,vm-pre-scaler-x2",
+ pre_scaler_ch_list, num_ch);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < num_ch; i++) {
+ channel = pre_scaler_ch_list[i];
+ pvt->vd[channel].pre_scaler = PRE_SCALER_X2;
+ }
+
+out:
+ kfree(pre_scaler_ch_list);
+
+ return ret;
+}
+
+static int pvt_set_temp_coeff(struct device *dev, struct pvt_device *pvt)
+{
+ struct temp_coeff *ts_coeff = &pvt->ts_coeff;
+ u32 series;
int ret;
- ret = reset_control_deassert(pvt->rst);
+ /* Incase ts-series property is not defined, use default 5. */
+ ret = device_property_read_u32(dev, "moortec,ts-series", &series);
if (ret)
- return ret;
+ series = TEMPERATURE_SENSOR_SERIES_5;
+
+ switch (series) {
+ case TEMPERATURE_SENSOR_SERIES_5:
+ ts_coeff->h = PVT_SERIES5_H_CONST;
+ ts_coeff->g = PVT_SERIES5_G_CONST;
+ ts_coeff->j = PVT_SERIES5_J_CONST;
+ ts_coeff->cal5 = PVT_SERIES5_CAL5_CONST;
+ break;
+ case TEMPERATURE_SENSOR_SERIES_6:
+ ts_coeff->h = PVT_SERIES6_H_CONST;
+ ts_coeff->g = PVT_SERIES6_G_CONST;
+ ts_coeff->j = PVT_SERIES6_J_CONST;
+ ts_coeff->cal5 = PVT_SERIES6_CAL5_CONST;
+ break;
+ default:
+ dev_err(dev, "invalid temperature sensor series (%u)\n",
+ series);
+ return -EINVAL;
+ }
- return devm_add_action_or_reset(dev, pvt_reset_control_assert, pvt);
+ dev_dbg(dev, "temperature sensor series = %u\n", series);
+
+ /* Override ts-coeff-h/g/j/cal5 if they are defined. */
+ device_property_read_u32(dev, "moortec,ts-coeff-h", &ts_coeff->h);
+ device_property_read_u32(dev, "moortec,ts-coeff-g", &ts_coeff->g);
+ device_property_read_u32(dev, "moortec,ts-coeff-j", &ts_coeff->j);
+ device_property_read_u32(dev, "moortec,ts-coeff-cal5", &ts_coeff->cal5);
+
+ dev_dbg(dev, "ts-coeff: h = %u, g = %u, j = %d, cal5 = %u\n",
+ ts_coeff->h, ts_coeff->g, ts_coeff->j, ts_coeff->cal5);
+
+ return 0;
}
static int mr75203_probe(struct platform_device *pdev)
{
+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
const struct hwmon_channel_info **pvt_info;
- u32 ts_num, vm_num, pd_num, val, index, i;
struct device *dev = &pdev->dev;
u32 *temp_config, *in_config;
struct device *hwmon_dev;
@@ -505,32 +781,30 @@ static int mr75203_probe(struct platform_device *pdev)
if (ret)
return ret;
- pvt->clk = devm_clk_get(dev, NULL);
+ pvt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(pvt->clk))
return dev_err_probe(dev, PTR_ERR(pvt->clk), "failed to get clock\n");
- ret = pvt_clk_enable(dev, pvt);
- if (ret) {
- dev_err(dev, "failed to enable clock\n");
- return ret;
- }
-
- pvt->rst = devm_reset_control_get_exclusive(dev, NULL);
+ pvt->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(pvt->rst))
return dev_err_probe(dev, PTR_ERR(pvt->rst),
"failed to get reset control\n");
- ret = pvt_reset_control_deassert(dev, pvt);
- if (ret)
- return dev_err_probe(dev, ret, "cannot deassert reset control\n");
+ if (pvt->rst) {
+ ret = pvt_reset_control_deassert(dev, pvt);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "cannot deassert reset control\n");
+ }
ret = regmap_read(pvt->c_map, PVT_IP_CONFIG, &val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
pvt->t_num = ts_num;
pvt->p_num = pd_num;
pvt->v_num = vm_num;
@@ -553,6 +827,10 @@ static int mr75203_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = pvt_set_temp_coeff(dev, pvt);
+ if (ret)
+ return ret;
+
temp_config = devm_kcalloc(dev, ts_num + 1,
sizeof(*temp_config), GFP_KERNEL);
if (!temp_config)
@@ -561,6 +839,8 @@ static int mr75203_probe(struct platform_device *pdev)
memset32(temp_config, HWMON_T_INPUT, ts_num);
pvt_temp.config = temp_config;
pvt_info[index++] = &pvt_temp;
+
+ pvt_ts_dbgfs_create(pvt, dev);
}
if (pd_num) {
@@ -570,44 +850,45 @@ static int mr75203_probe(struct platform_device *pdev)
}
if (vm_num) {
- u32 num = vm_num;
+ u8 vm_idx[VM_NUM_MAX];
ret = pvt_get_regmap(pdev, "vm", pvt);
if (ret)
return ret;
- pvt->vm_idx = devm_kcalloc(dev, vm_num, sizeof(*pvt->vm_idx),
- GFP_KERNEL);
- if (!pvt->vm_idx)
- return -ENOMEM;
-
- ret = device_property_read_u8_array(dev, "intel,vm-map",
- pvt->vm_idx, vm_num);
+ ret = device_property_read_u8_array(dev, "intel,vm-map", vm_idx,
+ vm_num);
if (ret) {
- num = 0;
+ /*
+ * Incase intel,vm-map property is not defined, we
+ * assume incremental channel numbers.
+ */
+ for (i = 0; i < vm_num; i++)
+ vm_idx[i] = i;
} else {
for (i = 0; i < vm_num; i++)
- if (pvt->vm_idx[i] >= vm_num ||
- pvt->vm_idx[i] == 0xff) {
- num = i;
+ if (vm_idx[i] >= vm_num || vm_idx[i] == 0xff) {
+ pvt->v_num = i;
+ vm_num = i;
break;
}
}
- /*
- * Incase intel,vm-map property is not defined, we assume
- * incremental channel numbers.
- */
- for (i = num; i < vm_num; i++)
- pvt->vm_idx[i] = i;
+ ret = pvt_get_active_channel(dev, pvt, vm_num, ch_num, vm_idx);
+ if (ret)
+ return ret;
+
+ ret = pvt_get_pre_scaler(dev, pvt);
+ if (ret)
+ return ret;
- in_config = devm_kcalloc(dev, num + 1,
+ in_config = devm_kcalloc(dev, pvt->vm_channels.total + 1,
sizeof(*in_config), GFP_KERNEL);
if (!in_config)
return -ENOMEM;
- memset32(in_config, HWMON_I_INPUT, num);
- in_config[num] = 0;
+ memset32(in_config, HWMON_I_INPUT, pvt->vm_channels.total);
+ in_config[pvt->vm_channels.total] = 0;
pvt_in.config = in_config;
pvt_info[index++] = &pvt_in;
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 6a9f420e7d32..a872f783e9cc 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -412,7 +412,7 @@ nct6683_create_attr_group(struct device *dev,
struct sensor_device_attr_u *su;
struct attribute_group *group;
struct attribute **attrs;
- int i, j, count;
+ int i, count;
if (repeat <= 0)
return ERR_PTR(-EINVAL);
@@ -443,7 +443,7 @@ nct6683_create_attr_group(struct device *dev,
for (i = 0; i < repeat; i++) {
t = tg->templates;
- for (j = 0; *t != NULL; j++) {
+ while (*t) {
snprintf(su->name, sizeof(su->name),
(*t)->dev_attr.attr.name, tg->base + i);
if ((*t)->s2) {
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 41c97cfacfb8..b34783784213 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -355,7 +355,7 @@ static void nct6791_enable_io_mapping(struct nct6775_sio_data *sio_data)
}
}
-static int __maybe_unused nct6775_suspend(struct device *dev)
+static int nct6775_suspend(struct device *dev)
{
int err;
u16 tmp;
@@ -386,7 +386,7 @@ out:
return err;
}
-static int __maybe_unused nct6775_resume(struct device *dev)
+static int nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
@@ -467,7 +467,7 @@ abort:
return err;
}
-static SIMPLE_DEV_PM_OPS(nct6775_dev_pm_ops, nct6775_suspend, nct6775_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(nct6775_dev_pm_ops, nct6775_suspend, nct6775_resume);
static void
nct6775_check_fan_inputs(struct nct6775_data *data, struct nct6775_sio_data *sio_data)
@@ -934,7 +934,7 @@ static int nct6775_platform_probe(struct platform_device *pdev)
static struct platform_driver nct6775_driver = {
.driver = {
.name = DRVNAME,
- .pm = &nct6775_dev_pm_ops,
+ .pm = pm_sleep_ptr(&nct6775_dev_pm_ops),
},
.probe = nct6775_platform_probe,
};
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index d1eeef02b6dc..a175f8283695 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -1038,7 +1038,7 @@ static int nct7802_detect(struct i2c_client *client,
if (reg < 0 || (reg & 0x3f))
return -ENODEV;
- strlcpy(info->type, "nct7802", I2C_NAME_SIZE);
+ strscpy(info->type, "nct7802", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b1c837fc407a..ecc5db0011a3 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -798,7 +798,7 @@ static int nct7904_detect(struct i2c_client *client,
(i2c_smbus_read_byte_data(client, BANK_SEL_REG) & 0xf8) != 0x00)
return -ENODEV;
- strlcpy(info->type, "nct7904", I2C_NAME_SIZE);
+ strscpy(info->type, "nct7904", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
index dd892ff5a3e8..533f38b0b4e9 100644
--- a/drivers/hwmon/nzxt-smart2.c
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -787,6 +787,7 @@ static void nzxt_smart2_hid_remove(struct hid_device *hdev)
static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
{ HID_USB_DEVICE(0x1e71, 0x2006) }, /* NZXT Smart Device V2 */
{ HID_USB_DEVICE(0x1e71, 0x200d) }, /* NZXT Smart Device V2 */
+ { HID_USB_DEVICE(0x1e71, 0x200f) }, /* NZXT Smart Device V2 */
{ HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index b221be1f35f3..9e1744fccb35 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -227,13 +227,11 @@ static int p8_i2c_occ_probe(struct i2c_client *client)
return occ_setup(occ);
}
-static int p8_i2c_occ_remove(struct i2c_client *client)
+static void p8_i2c_occ_remove(struct i2c_client *client)
{
struct occ *occ = dev_get_drvdata(&client->dev);
occ_shutdown(occ);
-
- return 0;
}
static const struct of_device_id p8_i2c_occ_of_match[] = {
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 0828436a1f6c..a4adc8bd531f 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -35,6 +35,18 @@
#include <linux/acpi.h>
#include <linux/io.h>
+#define DRIVER_NAME "pc87360"
+
+/* (temp & vin) channel conversion status register flags (pdf sec.11.5.12) */
+#define CHAN_CNVRTD 0x80 /* new data ready */
+#define CHAN_ENA 0x01 /* enabled channel (temp or vin) */
+#define CHAN_ALM_ENA 0x10 /* propagate to alarms-reg ?? (chk val!) */
+#define CHAN_READY (CHAN_ENA|CHAN_CNVRTD) /* sample ready mask */
+
+#define TEMP_OTS_OE 0x20 /* OTS Output Enable */
+#define VIN_RW1C_MASK (CHAN_READY|CHAN_ALM_MAX|CHAN_ALM_MIN) /* 0x87 */
+#define TEMP_RW1C_MASK (VIN_RW1C_MASK|TEMP_ALM_CRIT|TEMP_FAULT) /* 0xCF */
+
static u8 devid;
static struct platform_device *pdev;
static unsigned short extra_isa[3];
@@ -211,183 +223,181 @@ struct pc87360_data {
};
/*
- * Functions declaration
+ * ldi is the logical device index
+ * bank is for voltages and temperatures only
*/
-
-static int pc87360_probe(struct platform_device *pdev);
-static int pc87360_remove(struct platform_device *pdev);
-
static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg);
-static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg, u8 value);
-static void pc87360_init_device(struct platform_device *pdev,
- int use_thermistors);
-static struct pc87360_data *pc87360_update_device(struct device *dev);
-
-/*
- * Driver data
- */
-
-static struct platform_driver pc87360_driver = {
- .driver = {
- .name = "pc87360",
- },
- .probe = pc87360_probe,
- .remove = pc87360_remove,
-};
+ u8 reg)
+{
+ int res;
-/*
- * Sysfs stuff
- */
+ mutex_lock(&(data->lock));
+ if (bank != NO_BANK)
+ outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
+ res = inb_p(data->address[ldi] + reg);
+ mutex_unlock(&(data->lock));
-static ssize_t fan_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[attr->index],
- FAN_DIV_FROM_REG(data->fan_status[attr->index])));
-}
-static ssize_t fan_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[attr->index],
- FAN_DIV_FROM_REG(data->fan_status[attr->index])));
-}
-static ssize_t fan_div_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n",
- FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+ return res;
}
-static ssize_t fan_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+
+static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
+ u8 reg, u8 value)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n",
- FAN_STATUS_FROM_REG(data->fan_status[attr->index]));
+ mutex_lock(&(data->lock));
+ if (bank != NO_BANK)
+ outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
+ outb_p(value, data->address[ldi] + reg);
+ mutex_unlock(&(data->lock));
}
-static ssize_t fan_min_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+
+static void pc87360_autodiv(struct device *dev, int nr)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
- long fan_min;
- int err;
-
- err = kstrtol(buf, 10, &fan_min);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- fan_min = FAN_TO_REG(fan_min,
- FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+ u8 old_min = data->fan_min[nr];
- /* If it wouldn't fit, change clock divisor */
- while (fan_min > 255
- && (data->fan_status[attr->index] & 0x60) != 0x60) {
- fan_min >>= 1;
- data->fan[attr->index] >>= 1;
- data->fan_status[attr->index] += 0x20;
+ /* Increase clock divider if needed and possible */
+ if ((data->fan_status[nr] & 0x04) /* overflow flag */
+ || (data->fan[nr] >= 224)) { /* next to overflow */
+ if ((data->fan_status[nr] & 0x60) != 0x60) {
+ data->fan_status[nr] += 0x20;
+ data->fan_min[nr] >>= 1;
+ data->fan[nr] >>= 1;
+ dev_dbg(dev,
+ "Increasing clock divider to %d for fan %d\n",
+ FAN_DIV_FROM_REG(data->fan_status[nr]), nr + 1);
+ }
+ } else {
+ /* Decrease clock divider if possible */
+ while (!(data->fan_min[nr] & 0x80) /* min "nails" divider */
+ && data->fan[nr] < 85 /* bad accuracy */
+ && (data->fan_status[nr] & 0x60) != 0x00) {
+ data->fan_status[nr] -= 0x20;
+ data->fan_min[nr] <<= 1;
+ data->fan[nr] <<= 1;
+ dev_dbg(dev,
+ "Decreasing clock divider to %d for fan %d\n",
+ FAN_DIV_FROM_REG(data->fan_status[nr]),
+ nr + 1);
+ }
}
- data->fan_min[attr->index] = fan_min > 255 ? 255 : fan_min;
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_MIN(attr->index),
- data->fan_min[attr->index]);
-
- /* Write new divider, preserve alarm bits */
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_STATUS(attr->index),
- data->fan_status[attr->index] & 0xF9);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-static struct sensor_device_attribute fan_input[] = {
- SENSOR_ATTR_RO(fan1_input, fan_input, 0),
- SENSOR_ATTR_RO(fan2_input, fan_input, 1),
- SENSOR_ATTR_RO(fan3_input, fan_input, 2),
-};
-static struct sensor_device_attribute fan_status[] = {
- SENSOR_ATTR_RO(fan1_status, fan_status, 0),
- SENSOR_ATTR_RO(fan2_status, fan_status, 1),
- SENSOR_ATTR_RO(fan3_status, fan_status, 2),
-};
-static struct sensor_device_attribute fan_div[] = {
- SENSOR_ATTR_RO(fan1_div, fan_div, 0),
- SENSOR_ATTR_RO(fan2_div, fan_div, 1),
- SENSOR_ATTR_RO(fan3_div, fan_div, 2),
-};
-static struct sensor_device_attribute fan_min[] = {
- SENSOR_ATTR_RW(fan1_min, fan_min, 0),
- SENSOR_ATTR_RW(fan2_min, fan_min, 1),
- SENSOR_ATTR_RW(fan3_min, fan_min, 2),
-};
-
-#define FAN_UNIT_ATTRS(X) \
-{ &fan_input[X].dev_attr.attr, \
- &fan_status[X].dev_attr.attr, \
- &fan_div[X].dev_attr.attr, \
- &fan_min[X].dev_attr.attr, \
- NULL \
+ /* Write new fan min if it changed */
+ if (old_min != data->fan_min[nr]) {
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_MIN(nr),
+ data->fan_min[nr]);
+ }
}
-static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n",
- PWM_FROM_REG(data->pwm[attr->index],
- FAN_CONFIG_INVERT(data->fan_conf,
- attr->index)));
-}
-static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static struct pc87360_data *pc87360_update_device(struct device *dev)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
+ u8 i;
mutex_lock(&data->update_lock);
- data->pwm[attr->index] = PWM_TO_REG(val,
- FAN_CONFIG_INVERT(data->fan_conf, attr->index));
- pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(attr->index),
- data->pwm[attr->index]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static struct sensor_device_attribute pwm[] = {
- SENSOR_ATTR_RW(pwm1, pwm, 0),
- SENSOR_ATTR_RW(pwm2, pwm, 1),
- SENSOR_ATTR_RW(pwm3, pwm, 2),
-};
+ if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
+ dev_dbg(dev, "Data update\n");
-static struct attribute *pc8736x_fan_attr[][5] = {
- FAN_UNIT_ATTRS(0),
- FAN_UNIT_ATTRS(1),
- FAN_UNIT_ATTRS(2)
-};
+ /* Fans */
+ for (i = 0; i < data->fannr; i++) {
+ if (FAN_CONFIG_MONITOR(data->fan_conf, i)) {
+ data->fan_status[i] =
+ pc87360_read_value(data, LD_FAN,
+ NO_BANK, PC87360_REG_FAN_STATUS(i));
+ data->fan[i] = pc87360_read_value(data, LD_FAN,
+ NO_BANK, PC87360_REG_FAN(i));
+ data->fan_min[i] = pc87360_read_value(data,
+ LD_FAN, NO_BANK,
+ PC87360_REG_FAN_MIN(i));
+ /* Change clock divider if needed */
+ pc87360_autodiv(dev, i);
+ /* Clear bits and write new divider */
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_STATUS(i),
+ data->fan_status[i]);
+ }
+ if (FAN_CONFIG_CONTROL(data->fan_conf, i))
+ data->pwm[i] = pc87360_read_value(data, LD_FAN,
+ NO_BANK, PC87360_REG_PWM(i));
+ }
-static const struct attribute_group pc8736x_fan_attr_group[] = {
- { .attrs = pc8736x_fan_attr[0], },
- { .attrs = pc8736x_fan_attr[1], },
- { .attrs = pc8736x_fan_attr[2], },
-};
+ /* Voltages */
+ for (i = 0; i < data->innr; i++) {
+ data->in_status[i] = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ /* Clear bits */
+ pc87360_write_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS,
+ data->in_status[i]);
+ if ((data->in_status[i] & CHAN_READY) == CHAN_READY) {
+ data->in[i] = pc87360_read_value(data, LD_IN,
+ i, PC87365_REG_IN);
+ }
+ if (data->in_status[i] & CHAN_ENA) {
+ data->in_min[i] = pc87360_read_value(data,
+ LD_IN, i,
+ PC87365_REG_IN_MIN);
+ data->in_max[i] = pc87360_read_value(data,
+ LD_IN, i,
+ PC87365_REG_IN_MAX);
+ if (i >= 11)
+ data->in_crit[i-11] =
+ pc87360_read_value(data, LD_IN,
+ i, PC87365_REG_TEMP_CRIT);
+ }
+ }
+ if (data->innr) {
+ data->in_alarms = pc87360_read_value(data, LD_IN,
+ NO_BANK, PC87365_REG_IN_ALARMS1)
+ | ((pc87360_read_value(data, LD_IN,
+ NO_BANK, PC87365_REG_IN_ALARMS2)
+ & 0x07) << 8);
+ data->vid = (data->vid_conf & 0xE0) ?
+ pc87360_read_value(data, LD_IN,
+ NO_BANK, PC87365_REG_VID) : 0x1F;
+ }
+
+ /* Temperatures */
+ for (i = 0; i < data->tempnr; i++) {
+ data->temp_status[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS);
+ /* Clear bits */
+ pc87360_write_value(data, LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS,
+ data->temp_status[i]);
+ if ((data->temp_status[i] & CHAN_READY) == CHAN_READY) {
+ data->temp[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP);
+ }
+ if (data->temp_status[i] & CHAN_ENA) {
+ data->temp_min[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_MIN);
+ data->temp_max[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_MAX);
+ data->temp_crit[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_CRIT);
+ }
+ }
+ if (data->tempnr) {
+ data->temp_alarms = pc87360_read_value(data, LD_TEMP,
+ NO_BANK, PC87365_REG_TEMP_ALARMS)
+ & 0x3F;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
static ssize_t in_input_show(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -397,29 +407,52 @@ static ssize_t in_input_show(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
data->in_vref));
}
-static ssize_t in_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+
+static struct sensor_device_attribute in_input[] = {
+ SENSOR_ATTR_RO(in0_input, in_input, 0),
+ SENSOR_ATTR_RO(in1_input, in_input, 1),
+ SENSOR_ATTR_RO(in2_input, in_input, 2),
+ SENSOR_ATTR_RO(in3_input, in_input, 3),
+ SENSOR_ATTR_RO(in4_input, in_input, 4),
+ SENSOR_ATTR_RO(in5_input, in_input, 5),
+ SENSOR_ATTR_RO(in6_input, in_input, 6),
+ SENSOR_ATTR_RO(in7_input, in_input, 7),
+ SENSOR_ATTR_RO(in8_input, in_input, 8),
+ SENSOR_ATTR_RO(in9_input, in_input, 9),
+ SENSOR_ATTR_RO(in10_input, in_input, 10),
+};
+
+static ssize_t in_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
- data->in_vref));
+ return sprintf(buf, "%u\n", data->in_status[attr->index]);
}
-static ssize_t in_max_show(struct device *dev,
+
+static struct sensor_device_attribute in_status[] = {
+ SENSOR_ATTR_RO(in0_status, in_status, 0),
+ SENSOR_ATTR_RO(in1_status, in_status, 1),
+ SENSOR_ATTR_RO(in2_status, in_status, 2),
+ SENSOR_ATTR_RO(in3_status, in_status, 3),
+ SENSOR_ATTR_RO(in4_status, in_status, 4),
+ SENSOR_ATTR_RO(in5_status, in_status, 5),
+ SENSOR_ATTR_RO(in6_status, in_status, 6),
+ SENSOR_ATTR_RO(in7_status, in_status, 7),
+ SENSOR_ATTR_RO(in8_status, in_status, 8),
+ SENSOR_ATTR_RO(in9_status, in_status, 9),
+ SENSOR_ATTR_RO(in10_status, in_status, 10),
+};
+
+static ssize_t in_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
data->in_vref));
}
-static ssize_t in_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", data->in_status[attr->index]);
-}
+
static ssize_t in_min_store(struct device *dev,
struct device_attribute *devattr, const char *buf,
size_t count)
@@ -440,6 +473,30 @@ static ssize_t in_min_store(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
+
+static struct sensor_device_attribute in_min[] = {
+ SENSOR_ATTR_RW(in0_min, in_min, 0),
+ SENSOR_ATTR_RW(in1_min, in_min, 1),
+ SENSOR_ATTR_RW(in2_min, in_min, 2),
+ SENSOR_ATTR_RW(in3_min, in_min, 3),
+ SENSOR_ATTR_RW(in4_min, in_min, 4),
+ SENSOR_ATTR_RW(in5_min, in_min, 5),
+ SENSOR_ATTR_RW(in6_min, in_min, 6),
+ SENSOR_ATTR_RW(in7_min, in_min, 7),
+ SENSOR_ATTR_RW(in8_min, in_min, 8),
+ SENSOR_ATTR_RW(in9_min, in_min, 9),
+ SENSOR_ATTR_RW(in10_min, in_min, 10),
+};
+
+static ssize_t in_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
+ data->in_vref));
+}
+
static ssize_t in_max_store(struct device *dev,
struct device_attribute *devattr, const char *buf,
size_t count)
@@ -462,45 +519,6 @@ static ssize_t in_max_store(struct device *dev,
return count;
}
-static struct sensor_device_attribute in_input[] = {
- SENSOR_ATTR_RO(in0_input, in_input, 0),
- SENSOR_ATTR_RO(in1_input, in_input, 1),
- SENSOR_ATTR_RO(in2_input, in_input, 2),
- SENSOR_ATTR_RO(in3_input, in_input, 3),
- SENSOR_ATTR_RO(in4_input, in_input, 4),
- SENSOR_ATTR_RO(in5_input, in_input, 5),
- SENSOR_ATTR_RO(in6_input, in_input, 6),
- SENSOR_ATTR_RO(in7_input, in_input, 7),
- SENSOR_ATTR_RO(in8_input, in_input, 8),
- SENSOR_ATTR_RO(in9_input, in_input, 9),
- SENSOR_ATTR_RO(in10_input, in_input, 10),
-};
-static struct sensor_device_attribute in_status[] = {
- SENSOR_ATTR_RO(in0_status, in_status, 0),
- SENSOR_ATTR_RO(in1_status, in_status, 1),
- SENSOR_ATTR_RO(in2_status, in_status, 2),
- SENSOR_ATTR_RO(in3_status, in_status, 3),
- SENSOR_ATTR_RO(in4_status, in_status, 4),
- SENSOR_ATTR_RO(in5_status, in_status, 5),
- SENSOR_ATTR_RO(in6_status, in_status, 6),
- SENSOR_ATTR_RO(in7_status, in_status, 7),
- SENSOR_ATTR_RO(in8_status, in_status, 8),
- SENSOR_ATTR_RO(in9_status, in_status, 9),
- SENSOR_ATTR_RO(in10_status, in_status, 10),
-};
-static struct sensor_device_attribute in_min[] = {
- SENSOR_ATTR_RW(in0_min, in_min, 0),
- SENSOR_ATTR_RW(in1_min, in_min, 1),
- SENSOR_ATTR_RW(in2_min, in_min, 2),
- SENSOR_ATTR_RW(in3_min, in_min, 3),
- SENSOR_ATTR_RW(in4_min, in_min, 4),
- SENSOR_ATTR_RW(in5_min, in_min, 5),
- SENSOR_ATTR_RW(in6_min, in_min, 6),
- SENSOR_ATTR_RW(in7_min, in_min, 7),
- SENSOR_ATTR_RW(in8_min, in_min, 8),
- SENSOR_ATTR_RW(in9_min, in_min, 9),
- SENSOR_ATTR_RW(in10_min, in_min, 10),
-};
static struct sensor_device_attribute in_max[] = {
SENSOR_ATTR_RW(in0_max, in_max, 0),
SENSOR_ATTR_RW(in1_max, in_max, 1),
@@ -534,14 +552,6 @@ static ssize_t in_min_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
}
-static ssize_t in_max_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct pc87360_data *data = pc87360_update_device(dev);
- unsigned nr = to_sensor_dev_attr(devattr)->index;
-
- return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
-}
static struct sensor_device_attribute in_min_alarm[] = {
SENSOR_ATTR_RO(in0_min_alarm, in_min_alarm, 0),
@@ -556,6 +566,16 @@ static struct sensor_device_attribute in_min_alarm[] = {
SENSOR_ATTR_RO(in9_min_alarm, in_min_alarm, 9),
SENSOR_ATTR_RO(in10_min_alarm, in_min_alarm, 10),
};
+
+static ssize_t in_max_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pc87360_data *data = pc87360_update_device(dev);
+ unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
+}
+
static struct sensor_device_attribute in_max_alarm[] = {
SENSOR_ATTR_RO(in0_max_alarm, in_max_alarm, 0),
SENSOR_ATTR_RO(in1_max_alarm, in_max_alarm, 1),
@@ -592,6 +612,7 @@ static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
struct pc87360_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->vrm);
}
+
static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -648,37 +669,39 @@ static ssize_t therm_input_show(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
data->in_vref));
}
-static ssize_t therm_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+
+/*
+ * the +11 term below reflects the fact that VLM units 11,12,13 are
+ * used in the chip to measure voltage across the thermistors
+ */
+static struct sensor_device_attribute therm_input[] = {
+ SENSOR_ATTR_RO(temp4_input, therm_input, 0 + 11),
+ SENSOR_ATTR_RO(temp5_input, therm_input, 1 + 11),
+ SENSOR_ATTR_RO(temp6_input, therm_input, 2 + 11),
+};
+
+static ssize_t therm_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
- data->in_vref));
+ return sprintf(buf, "%u\n", data->in_status[attr->index]);
}
-static ssize_t therm_max_show(struct device *dev,
+
+static struct sensor_device_attribute therm_status[] = {
+ SENSOR_ATTR_RO(temp4_status, therm_status, 0 + 11),
+ SENSOR_ATTR_RO(temp5_status, therm_status, 1 + 11),
+ SENSOR_ATTR_RO(temp6_status, therm_status, 2 + 11),
+};
+
+static ssize_t therm_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
- data->in_vref));
-}
-static ssize_t therm_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[attr->index-11],
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
data->in_vref));
}
-static ssize_t therm_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", data->in_status[attr->index]);
-}
static ssize_t therm_min_store(struct device *dev,
struct device_attribute *devattr,
@@ -701,6 +724,21 @@ static ssize_t therm_min_store(struct device *dev,
return count;
}
+static struct sensor_device_attribute therm_min[] = {
+ SENSOR_ATTR_RW(temp4_min, therm_min, 0 + 11),
+ SENSOR_ATTR_RW(temp5_min, therm_min, 1 + 11),
+ SENSOR_ATTR_RW(temp6_min, therm_min, 2 + 11),
+};
+
+static ssize_t therm_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
+ data->in_vref));
+}
+
static ssize_t therm_max_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -721,6 +759,22 @@ static ssize_t therm_max_store(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
+
+static struct sensor_device_attribute therm_max[] = {
+ SENSOR_ATTR_RW(temp4_max, therm_max, 0 + 11),
+ SENSOR_ATTR_RW(temp5_max, therm_max, 1 + 11),
+ SENSOR_ATTR_RW(temp6_max, therm_max, 2 + 11),
+};
+
+static ssize_t therm_crit_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[attr->index-11],
+ data->in_vref));
+}
+
static ssize_t therm_crit_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -742,30 +796,6 @@ static ssize_t therm_crit_store(struct device *dev,
return count;
}
-/*
- * the +11 term below reflects the fact that VLM units 11,12,13 are
- * used in the chip to measure voltage across the thermistors
- */
-static struct sensor_device_attribute therm_input[] = {
- SENSOR_ATTR_RO(temp4_input, therm_input, 0 + 11),
- SENSOR_ATTR_RO(temp5_input, therm_input, 1 + 11),
- SENSOR_ATTR_RO(temp6_input, therm_input, 2 + 11),
-};
-static struct sensor_device_attribute therm_status[] = {
- SENSOR_ATTR_RO(temp4_status, therm_status, 0 + 11),
- SENSOR_ATTR_RO(temp5_status, therm_status, 1 + 11),
- SENSOR_ATTR_RO(temp6_status, therm_status, 2 + 11),
-};
-static struct sensor_device_attribute therm_min[] = {
- SENSOR_ATTR_RW(temp4_min, therm_min, 0 + 11),
- SENSOR_ATTR_RW(temp5_min, therm_min, 1 + 11),
- SENSOR_ATTR_RW(temp6_min, therm_min, 2 + 11),
-};
-static struct sensor_device_attribute therm_max[] = {
- SENSOR_ATTR_RW(temp4_max, therm_max, 0 + 11),
- SENSOR_ATTR_RW(temp5_max, therm_max, 1 + 11),
- SENSOR_ATTR_RW(temp6_max, therm_max, 2 + 11),
-};
static struct sensor_device_attribute therm_crit[] = {
SENSOR_ATTR_RW(temp4_crit, therm_crit, 0 + 11),
SENSOR_ATTR_RW(temp5_crit, therm_crit, 1 + 11),
@@ -776,7 +806,6 @@ static struct sensor_device_attribute therm_crit[] = {
* show_therm_min/max_alarm() reads data from the per-channel voltage
* status register (sec 11.5.12)
*/
-
static ssize_t therm_min_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -786,6 +815,13 @@ static ssize_t therm_min_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
}
+
+static struct sensor_device_attribute therm_min_alarm[] = {
+ SENSOR_ATTR_RO(temp4_min_alarm, therm_min_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_min_alarm, therm_min_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_min_alarm, therm_min_alarm, 2 + 11),
+};
+
static ssize_t therm_max_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -795,6 +831,13 @@ static ssize_t therm_max_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
}
+
+static struct sensor_device_attribute therm_max_alarm[] = {
+ SENSOR_ATTR_RO(temp4_max_alarm, therm_max_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_max_alarm, therm_max_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_max_alarm, therm_max_alarm, 2 + 11),
+};
+
static ssize_t therm_crit_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -805,16 +848,6 @@ static ssize_t therm_crit_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & TEMP_ALM_CRIT));
}
-static struct sensor_device_attribute therm_min_alarm[] = {
- SENSOR_ATTR_RO(temp4_min_alarm, therm_min_alarm, 0 + 11),
- SENSOR_ATTR_RO(temp5_min_alarm, therm_min_alarm, 1 + 11),
- SENSOR_ATTR_RO(temp6_min_alarm, therm_min_alarm, 2 + 11),
-};
-static struct sensor_device_attribute therm_max_alarm[] = {
- SENSOR_ATTR_RO(temp4_max_alarm, therm_max_alarm, 0 + 11),
- SENSOR_ATTR_RO(temp5_max_alarm, therm_max_alarm, 1 + 11),
- SENSOR_ATTR_RO(temp6_max_alarm, therm_max_alarm, 2 + 11),
-};
static struct sensor_device_attribute therm_crit_alarm[] = {
SENSOR_ATTR_RO(temp4_crit_alarm, therm_crit_alarm, 0 + 11),
SENSOR_ATTR_RO(temp5_crit_alarm, therm_crit_alarm, 1 + 11),
@@ -849,37 +882,32 @@ static ssize_t temp_input_show(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
-static ssize_t temp_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[attr->index]));
-}
+static struct sensor_device_attribute temp_input[] = {
+ SENSOR_ATTR_RO(temp1_input, temp_input, 0),
+ SENSOR_ATTR_RO(temp2_input, temp_input, 1),
+ SENSOR_ATTR_RO(temp3_input, temp_input, 2),
+};
-static ssize_t temp_max_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[attr->index]));
+ return sprintf(buf, "%d\n", data->temp_status[attr->index]);
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n",
- TEMP_FROM_REG(data->temp_crit[attr->index]));
-}
+static struct sensor_device_attribute temp_status[] = {
+ SENSOR_ATTR_RO(temp1_status, temp_status, 0),
+ SENSOR_ATTR_RO(temp2_status, temp_status, 1),
+ SENSOR_ATTR_RO(temp3_status, temp_status, 2),
+};
-static ssize_t temp_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n", data->temp_status[attr->index]);
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[attr->index]));
}
static ssize_t temp_min_store(struct device *dev,
@@ -903,6 +931,20 @@ static ssize_t temp_min_store(struct device *dev,
return count;
}
+static struct sensor_device_attribute temp_min[] = {
+ SENSOR_ATTR_RW(temp1_min, temp_min, 0),
+ SENSOR_ATTR_RW(temp2_min, temp_min, 1),
+ SENSOR_ATTR_RW(temp3_min, temp_min, 2),
+};
+
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[attr->index]));
+}
+
static ssize_t temp_max_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -924,6 +966,21 @@ static ssize_t temp_max_store(struct device *dev,
return count;
}
+static struct sensor_device_attribute temp_max[] = {
+ SENSOR_ATTR_RW(temp1_max, temp_max, 0),
+ SENSOR_ATTR_RW(temp2_max, temp_max, 1),
+ SENSOR_ATTR_RW(temp3_max, temp_max, 2),
+};
+
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%d\n",
+ TEMP_FROM_REG(data->temp_crit[attr->index]));
+}
+
static ssize_t temp_crit_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -945,47 +1002,17 @@ static ssize_t temp_crit_store(struct device *dev,
return count;
}
-static struct sensor_device_attribute temp_input[] = {
- SENSOR_ATTR_RO(temp1_input, temp_input, 0),
- SENSOR_ATTR_RO(temp2_input, temp_input, 1),
- SENSOR_ATTR_RO(temp3_input, temp_input, 2),
-};
-static struct sensor_device_attribute temp_status[] = {
- SENSOR_ATTR_RO(temp1_status, temp_status, 0),
- SENSOR_ATTR_RO(temp2_status, temp_status, 1),
- SENSOR_ATTR_RO(temp3_status, temp_status, 2),
-};
-static struct sensor_device_attribute temp_min[] = {
- SENSOR_ATTR_RW(temp1_min, temp_min, 0),
- SENSOR_ATTR_RW(temp2_min, temp_min, 1),
- SENSOR_ATTR_RW(temp3_min, temp_min, 2),
-};
-static struct sensor_device_attribute temp_max[] = {
- SENSOR_ATTR_RW(temp1_max, temp_max, 0),
- SENSOR_ATTR_RW(temp2_max, temp_max, 1),
- SENSOR_ATTR_RW(temp3_max, temp_max, 2),
-};
static struct sensor_device_attribute temp_crit[] = {
SENSOR_ATTR_RW(temp1_crit, temp_crit, 0),
SENSOR_ATTR_RW(temp2_crit, temp_crit, 1),
SENSOR_ATTR_RW(temp3_crit, temp_crit, 2),
};
-static ssize_t alarms_temp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", data->temp_alarms);
-}
-
-static DEVICE_ATTR_RO(alarms_temp);
-
/*
- * show_temp_min/max_alarm() reads data from the per-channel status
+ * temp_min/max_alarm_show() reads data from the per-channel status
* register (sec 12.3.7), not the temp event status registers (sec
* 12.3.2) that show_temp_alarm() reads (via data->temp_alarms)
*/
-
static ssize_t temp_min_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -996,6 +1023,12 @@ static ssize_t temp_min_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MIN));
}
+static struct sensor_device_attribute temp_min_alarm[] = {
+ SENSOR_ATTR_RO(temp1_min_alarm, temp_min_alarm, 0),
+ SENSOR_ATTR_RO(temp2_min_alarm, temp_min_alarm, 1),
+ SENSOR_ATTR_RO(temp3_min_alarm, temp_min_alarm, 2),
+};
+
static ssize_t temp_max_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -1006,6 +1039,12 @@ static ssize_t temp_max_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MAX));
}
+static struct sensor_device_attribute temp_max_alarm[] = {
+ SENSOR_ATTR_RO(temp1_max_alarm, temp_max_alarm, 0),
+ SENSOR_ATTR_RO(temp2_max_alarm, temp_max_alarm, 1),
+ SENSOR_ATTR_RO(temp3_max_alarm, temp_max_alarm, 2),
+};
+
static ssize_t temp_crit_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -1016,18 +1055,6 @@ static ssize_t temp_crit_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_ALM_CRIT));
}
-static struct sensor_device_attribute temp_min_alarm[] = {
- SENSOR_ATTR_RO(temp1_min_alarm, temp_min_alarm, 0),
- SENSOR_ATTR_RO(temp2_min_alarm, temp_min_alarm, 1),
- SENSOR_ATTR_RO(temp3_min_alarm, temp_min_alarm, 2),
-};
-
-static struct sensor_device_attribute temp_max_alarm[] = {
- SENSOR_ATTR_RO(temp1_max_alarm, temp_max_alarm, 0),
- SENSOR_ATTR_RO(temp2_max_alarm, temp_max_alarm, 1),
- SENSOR_ATTR_RO(temp3_max_alarm, temp_max_alarm, 2),
-};
-
static struct sensor_device_attribute temp_crit_alarm[] = {
SENSOR_ATTR_RO(temp1_crit_alarm, temp_crit_alarm, 0),
SENSOR_ATTR_RO(temp2_crit_alarm, temp_crit_alarm, 1),
@@ -1043,6 +1070,7 @@ static ssize_t temp_fault_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_FAULT));
}
+
static struct sensor_device_attribute temp_fault[] = {
SENSOR_ATTR_RO(temp1_fault, temp_fault, 0),
SENSOR_ATTR_RO(temp2_fault, temp_fault, 1),
@@ -1074,106 +1102,180 @@ static const struct attribute_group pc8736x_temp_attr_group[] = {
{ .attrs = pc8736x_temp_attr[2] }
};
-static ssize_t name_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t alarms_temp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pc87360_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", data->temp_alarms);
}
-static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(alarms_temp);
-/*
- * Device detection, registration and update
- */
+static ssize_t fan_input_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[attr->index],
+ FAN_DIV_FROM_REG(data->fan_status[attr->index])));
+}
-static int __init pc87360_find(int sioaddr, u8 *devid,
- unsigned short *addresses)
+static struct sensor_device_attribute fan_input[] = {
+ SENSOR_ATTR_RO(fan1_input, fan_input, 0),
+ SENSOR_ATTR_RO(fan2_input, fan_input, 1),
+ SENSOR_ATTR_RO(fan3_input, fan_input, 2),
+};
+
+static ssize_t fan_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
- u16 val;
- int i;
- int nrdev; /* logical device count */
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n",
+ FAN_STATUS_FROM_REG(data->fan_status[attr->index]));
+}
- /* No superio_enter */
+static struct sensor_device_attribute fan_status[] = {
+ SENSOR_ATTR_RO(fan1_status, fan_status, 0),
+ SENSOR_ATTR_RO(fan2_status, fan_status, 1),
+ SENSOR_ATTR_RO(fan3_status, fan_status, 2),
+};
- /* Identify device */
- val = force_id ? force_id : superio_inb(sioaddr, DEVID);
- switch (val) {
- case 0xE1: /* PC87360 */
- case 0xE8: /* PC87363 */
- case 0xE4: /* PC87364 */
- nrdev = 1;
- break;
- case 0xE5: /* PC87365 */
- case 0xE9: /* PC87366 */
- nrdev = 3;
- break;
- default:
- superio_exit(sioaddr);
- return -ENODEV;
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n",
+ FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+}
+
+static struct sensor_device_attribute fan_div[] = {
+ SENSOR_ATTR_RO(fan1_div, fan_div, 0),
+ SENSOR_ATTR_RO(fan2_div, fan_div, 1),
+ SENSOR_ATTR_RO(fan3_div, fan_div, 2),
+};
+
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[attr->index],
+ FAN_DIV_FROM_REG(data->fan_status[attr->index])));
+}
+
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ long fan_min;
+ int err;
+
+ err = kstrtol(buf, 10, &fan_min);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ fan_min = FAN_TO_REG(fan_min,
+ FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+
+ /* If it wouldn't fit, change clock divisor */
+ while (fan_min > 255
+ && (data->fan_status[attr->index] & 0x60) != 0x60) {
+ fan_min >>= 1;
+ data->fan[attr->index] >>= 1;
+ data->fan_status[attr->index] += 0x20;
}
- /* Remember the device id */
- *devid = val;
+ data->fan_min[attr->index] = fan_min > 255 ? 255 : fan_min;
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_MIN(attr->index),
+ data->fan_min[attr->index]);
- for (i = 0; i < nrdev; i++) {
- /* select logical device */
- superio_outb(sioaddr, DEV, logdev[i]);
+ /* Write new divider, preserve alarm bits */
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_STATUS(attr->index),
+ data->fan_status[attr->index] & 0xF9);
+ mutex_unlock(&data->update_lock);
- val = superio_inb(sioaddr, ACT);
- if (!(val & 0x01)) {
- pr_info("Device 0x%02x not activated\n", logdev[i]);
- continue;
- }
+ return count;
+}
- val = (superio_inb(sioaddr, BASE) << 8)
- | superio_inb(sioaddr, BASE + 1);
- if (!val) {
- pr_info("Base address not set for device 0x%02x\n",
- logdev[i]);
- continue;
- }
+static struct sensor_device_attribute fan_min[] = {
+ SENSOR_ATTR_RW(fan1_min, fan_min, 0),
+ SENSOR_ATTR_RW(fan2_min, fan_min, 1),
+ SENSOR_ATTR_RW(fan3_min, fan_min, 2),
+};
- addresses[i] = val;
+#define FAN_UNIT_ATTRS(X) \
+{ &fan_input[X].dev_attr.attr, \
+ &fan_status[X].dev_attr.attr, \
+ &fan_div[X].dev_attr.attr, \
+ &fan_min[X].dev_attr.attr, \
+ NULL \
+}
- if (i == 0) { /* Fans */
- confreg[0] = superio_inb(sioaddr, 0xF0);
- confreg[1] = superio_inb(sioaddr, 0xF1);
+static struct attribute *pc8736x_fan_attr[][5] = {
+ FAN_UNIT_ATTRS(0),
+ FAN_UNIT_ATTRS(1),
+ FAN_UNIT_ATTRS(2)
+};
- pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
- (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
- (confreg[0] >> 4) & 1);
- pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
- (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
- (confreg[0] >> 7) & 1);
- pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
- confreg[1] & 1, (confreg[1] >> 1) & 1,
- (confreg[1] >> 2) & 1);
- } else if (i == 1) { /* Voltages */
- /* Are we using thermistors? */
- if (*devid == 0xE9) { /* PC87366 */
- /*
- * These registers are not logical-device
- * specific, just that we won't need them if
- * we don't use the VLM device
- */
- confreg[2] = superio_inb(sioaddr, 0x2B);
- confreg[3] = superio_inb(sioaddr, 0x25);
+static const struct attribute_group pc8736x_fan_attr_group[] = {
+ { .attrs = pc8736x_fan_attr[0], },
+ { .attrs = pc8736x_fan_attr[1], },
+ { .attrs = pc8736x_fan_attr[2], },
+};
- if (confreg[2] & 0x40) {
- pr_info("Using thermistors for temperature monitoring\n");
- }
- if (confreg[3] & 0xE0) {
- pr_info("VID inputs routed (mode %u)\n",
- confreg[3] >> 5);
- }
- }
- }
- }
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n",
+ PWM_FROM_REG(data->pwm[attr->index],
+ FAN_CONFIG_INVERT(data->fan_conf,
+ attr->index)));
+}
- superio_exit(sioaddr);
- return 0;
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->pwm[attr->index] = PWM_TO_REG(val,
+ FAN_CONFIG_INVERT(data->fan_conf, attr->index));
+ pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(attr->index),
+ data->pwm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
}
+static struct sensor_device_attribute pwm[] = {
+ SENSOR_ATTR_RW(pwm1, pwm, 0),
+ SENSOR_ATTR_RW(pwm2, pwm, 1),
+ SENSOR_ATTR_RW(pwm3, pwm, 2),
+};
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
static void pc87360_remove_files(struct device *dev)
{
int i;
@@ -1190,6 +1292,146 @@ static void pc87360_remove_files(struct device *dev)
sysfs_remove_group(&dev->kobj, &pc8736x_vin_group);
}
+static void pc87360_init_device(struct platform_device *pdev,
+ int use_thermistors)
+{
+ struct pc87360_data *data = platform_get_drvdata(pdev);
+ int i, nr;
+ const u8 init_in[14] = { 2, 2, 2, 2, 2, 2, 2, 1, 1, 3, 1, 2, 2, 2 };
+ const u8 init_temp[3] = { 2, 2, 1 };
+ u8 reg;
+
+ if (init >= 2 && data->innr) {
+ reg = pc87360_read_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONVRATE);
+ dev_info(&pdev->dev,
+ "VLM conversion set to 1s period, 160us delay\n");
+ pc87360_write_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONVRATE,
+ (reg & 0xC0) | 0x11);
+ }
+
+ nr = data->innr < 11 ? data->innr : 11;
+ for (i = 0; i < nr; i++) {
+ reg = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ dev_dbg(&pdev->dev, "bios in%d status:0x%02x\n", i, reg);
+ if (init >= init_in[i]) {
+ /* Forcibly enable voltage channel */
+ if (!(reg & CHAN_ENA)) {
+ dev_dbg(&pdev->dev, "Forcibly enabling in%d\n",
+ i);
+ pc87360_write_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS,
+ (reg & 0x68) | 0x87);
+ }
+ }
+ }
+
+ /*
+ * We can't blindly trust the Super-I/O space configuration bit,
+ * most BIOS won't set it properly
+ */
+ dev_dbg(&pdev->dev, "bios thermistors:%d\n", use_thermistors);
+ for (i = 11; i < data->innr; i++) {
+ reg = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_TEMP_STATUS);
+ use_thermistors = use_thermistors || (reg & CHAN_ENA);
+ /* thermistors are temp[4-6], measured on vin[11-14] */
+ dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i-7, reg);
+ }
+ dev_dbg(&pdev->dev, "using thermistors:%d\n", use_thermistors);
+
+ i = use_thermistors ? 2 : 0;
+ for (; i < data->tempnr; i++) {
+ reg = pc87360_read_value(data, LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS);
+ dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i + 1, reg);
+ if (init >= init_temp[i]) {
+ /* Forcibly enable temperature channel */
+ if (!(reg & CHAN_ENA)) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling temp%d\n", i + 1);
+ pc87360_write_value(data, LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS,
+ 0xCF);
+ }
+ }
+ }
+
+ if (use_thermistors) {
+ for (i = 11; i < data->innr; i++) {
+ if (init >= init_in[i]) {
+ /*
+ * The pin may already be used by thermal
+ * diodes
+ */
+ reg = pc87360_read_value(data, LD_TEMP,
+ (i - 11) / 2, PC87365_REG_TEMP_STATUS);
+ if (reg & CHAN_ENA) {
+ dev_dbg(&pdev->dev,
+ "Skipping temp%d, pin already in use by temp%d\n",
+ i - 7, (i - 11) / 2);
+ continue;
+ }
+
+ /* Forcibly enable thermistor channel */
+ reg = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ if (!(reg & CHAN_ENA)) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling temp%d\n",
+ i - 7);
+ pc87360_write_value(data, LD_IN, i,
+ PC87365_REG_TEMP_STATUS,
+ (reg & 0x60) | 0x8F);
+ }
+ }
+ }
+ }
+
+ if (data->innr) {
+ reg = pc87360_read_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONFIG);
+ dev_dbg(&pdev->dev, "bios vin-cfg:0x%02x\n", reg);
+ if (reg & CHAN_ENA) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling monitoring (VLM)\n");
+ pc87360_write_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONFIG,
+ reg & 0xFE);
+ }
+ }
+
+ if (data->tempnr) {
+ reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
+ PC87365_REG_TEMP_CONFIG);
+ dev_dbg(&pdev->dev, "bios temp-cfg:0x%02x\n", reg);
+ if (reg & CHAN_ENA) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling monitoring (TMS)\n");
+ pc87360_write_value(data, LD_TEMP, NO_BANK,
+ PC87365_REG_TEMP_CONFIG,
+ reg & 0xFE);
+ }
+
+ if (init >= 2) {
+ /* Chip config as documented by National Semi. */
+ pc87360_write_value(data, LD_TEMP, 0xF, 0xA, 0x08);
+ /*
+ * We voluntarily omit the bank here, in case the
+ * sequence itself matters. It shouldn't be a problem,
+ * since nobody else is supposed to access the
+ * device at that point.
+ */
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xB, 0x04);
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xC, 0x35);
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xD, 0x05);
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xE, 0x05);
+ }
+ }
+}
+
static int pc87360_probe(struct platform_device *pdev)
{
int i;
@@ -1239,7 +1481,7 @@ static int pc87360_probe(struct platform_device *pdev)
data->address[i] = extra_isa[i];
if (data->address[i]
&& !devm_request_region(dev, extra_isa[i], PC87360_EXTENT,
- pc87360_driver.driver.name)) {
+ DRIVER_NAME)) {
dev_err(dev,
"Region 0x%x-0x%x already in use!\n",
extra_isa[i], extra_isa[i]+PC87360_EXTENT-1);
@@ -1355,330 +1597,105 @@ static int pc87360_remove(struct platform_device *pdev)
}
/*
- * ldi is the logical device index
- * bank is for voltages and temperatures only
+ * Driver data
*/
-static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg)
-{
- int res;
-
- mutex_lock(&(data->lock));
- if (bank != NO_BANK)
- outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
- res = inb_p(data->address[ldi] + reg);
- mutex_unlock(&(data->lock));
+static struct platform_driver pc87360_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = pc87360_probe,
+ .remove = pc87360_remove,
+};
- return res;
-}
+/*
+ * Device detection, registration and update
+ */
-static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg, u8 value)
+static int __init pc87360_find(int sioaddr, u8 *devid,
+ unsigned short *addresses)
{
- mutex_lock(&(data->lock));
- if (bank != NO_BANK)
- outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
- outb_p(value, data->address[ldi] + reg);
- mutex_unlock(&(data->lock));
-}
+ u16 val;
+ int i;
+ int nrdev; /* logical device count */
-/* (temp & vin) channel conversion status register flags (pdf sec.11.5.12) */
-#define CHAN_CNVRTD 0x80 /* new data ready */
-#define CHAN_ENA 0x01 /* enabled channel (temp or vin) */
-#define CHAN_ALM_ENA 0x10 /* propagate to alarms-reg ?? (chk val!) */
-#define CHAN_READY (CHAN_ENA|CHAN_CNVRTD) /* sample ready mask */
+ /* No superio_enter */
-#define TEMP_OTS_OE 0x20 /* OTS Output Enable */
-#define VIN_RW1C_MASK (CHAN_READY|CHAN_ALM_MAX|CHAN_ALM_MIN) /* 0x87 */
-#define TEMP_RW1C_MASK (VIN_RW1C_MASK|TEMP_ALM_CRIT|TEMP_FAULT) /* 0xCF */
+ /* Identify device */
+ val = force_id ? force_id : superio_inb(sioaddr, DEVID);
+ switch (val) {
+ case 0xE1: /* PC87360 */
+ case 0xE8: /* PC87363 */
+ case 0xE4: /* PC87364 */
+ nrdev = 1;
+ break;
+ case 0xE5: /* PC87365 */
+ case 0xE9: /* PC87366 */
+ nrdev = 3;
+ break;
+ default:
+ superio_exit(sioaddr);
+ return -ENODEV;
+ }
+ /* Remember the device id */
+ *devid = val;
-static void pc87360_init_device(struct platform_device *pdev,
- int use_thermistors)
-{
- struct pc87360_data *data = platform_get_drvdata(pdev);
- int i, nr;
- const u8 init_in[14] = { 2, 2, 2, 2, 2, 2, 2, 1, 1, 3, 1, 2, 2, 2 };
- const u8 init_temp[3] = { 2, 2, 1 };
- u8 reg;
+ for (i = 0; i < nrdev; i++) {
+ /* select logical device */
+ superio_outb(sioaddr, DEV, logdev[i]);
- if (init >= 2 && data->innr) {
- reg = pc87360_read_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONVRATE);
- dev_info(&pdev->dev,
- "VLM conversion set to 1s period, 160us delay\n");
- pc87360_write_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONVRATE,
- (reg & 0xC0) | 0x11);
- }
+ val = superio_inb(sioaddr, ACT);
+ if (!(val & 0x01)) {
+ pr_info("Device 0x%02x not activated\n", logdev[i]);
+ continue;
+ }
- nr = data->innr < 11 ? data->innr : 11;
- for (i = 0; i < nr; i++) {
- reg = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS);
- dev_dbg(&pdev->dev, "bios in%d status:0x%02x\n", i, reg);
- if (init >= init_in[i]) {
- /* Forcibly enable voltage channel */
- if (!(reg & CHAN_ENA)) {
- dev_dbg(&pdev->dev, "Forcibly enabling in%d\n",
- i);
- pc87360_write_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS,
- (reg & 0x68) | 0x87);
- }
+ val = (superio_inb(sioaddr, BASE) << 8)
+ | superio_inb(sioaddr, BASE + 1);
+ if (!val) {
+ pr_info("Base address not set for device 0x%02x\n",
+ logdev[i]);
+ continue;
}
- }
- /*
- * We can't blindly trust the Super-I/O space configuration bit,
- * most BIOS won't set it properly
- */
- dev_dbg(&pdev->dev, "bios thermistors:%d\n", use_thermistors);
- for (i = 11; i < data->innr; i++) {
- reg = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_TEMP_STATUS);
- use_thermistors = use_thermistors || (reg & CHAN_ENA);
- /* thermistors are temp[4-6], measured on vin[11-14] */
- dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i-7, reg);
- }
- dev_dbg(&pdev->dev, "using thermistors:%d\n", use_thermistors);
+ addresses[i] = val;
- i = use_thermistors ? 2 : 0;
- for (; i < data->tempnr; i++) {
- reg = pc87360_read_value(data, LD_TEMP, i,
- PC87365_REG_TEMP_STATUS);
- dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i + 1, reg);
- if (init >= init_temp[i]) {
- /* Forcibly enable temperature channel */
- if (!(reg & CHAN_ENA)) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling temp%d\n", i + 1);
- pc87360_write_value(data, LD_TEMP, i,
- PC87365_REG_TEMP_STATUS,
- 0xCF);
- }
- }
- }
+ if (i == 0) { /* Fans */
+ confreg[0] = superio_inb(sioaddr, 0xF0);
+ confreg[1] = superio_inb(sioaddr, 0xF1);
- if (use_thermistors) {
- for (i = 11; i < data->innr; i++) {
- if (init >= init_in[i]) {
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
+ (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
+ (confreg[0] >> 4) & 1);
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
+ (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
+ (confreg[0] >> 7) & 1);
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
+ confreg[1] & 1, (confreg[1] >> 1) & 1,
+ (confreg[1] >> 2) & 1);
+ } else if (i == 1) { /* Voltages */
+ /* Are we using thermistors? */
+ if (*devid == 0xE9) { /* PC87366 */
/*
- * The pin may already be used by thermal
- * diodes
+ * These registers are not logical-device
+ * specific, just that we won't need them if
+ * we don't use the VLM device
*/
- reg = pc87360_read_value(data, LD_TEMP,
- (i - 11) / 2, PC87365_REG_TEMP_STATUS);
- if (reg & CHAN_ENA) {
- dev_dbg(&pdev->dev,
- "Skipping temp%d, pin already in use by temp%d\n",
- i - 7, (i - 11) / 2);
- continue;
- }
+ confreg[2] = superio_inb(sioaddr, 0x2B);
+ confreg[3] = superio_inb(sioaddr, 0x25);
- /* Forcibly enable thermistor channel */
- reg = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS);
- if (!(reg & CHAN_ENA)) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling temp%d\n",
- i - 7);
- pc87360_write_value(data, LD_IN, i,
- PC87365_REG_TEMP_STATUS,
- (reg & 0x60) | 0x8F);
+ if (confreg[2] & 0x40) {
+ pr_info("Using thermistors for temperature monitoring\n");
+ }
+ if (confreg[3] & 0xE0) {
+ pr_info("VID inputs routed (mode %u)\n",
+ confreg[3] >> 5);
}
}
}
}
- if (data->innr) {
- reg = pc87360_read_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONFIG);
- dev_dbg(&pdev->dev, "bios vin-cfg:0x%02x\n", reg);
- if (reg & CHAN_ENA) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling monitoring (VLM)\n");
- pc87360_write_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONFIG,
- reg & 0xFE);
- }
- }
-
- if (data->tempnr) {
- reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
- PC87365_REG_TEMP_CONFIG);
- dev_dbg(&pdev->dev, "bios temp-cfg:0x%02x\n", reg);
- if (reg & CHAN_ENA) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling monitoring (TMS)\n");
- pc87360_write_value(data, LD_TEMP, NO_BANK,
- PC87365_REG_TEMP_CONFIG,
- reg & 0xFE);
- }
-
- if (init >= 2) {
- /* Chip config as documented by National Semi. */
- pc87360_write_value(data, LD_TEMP, 0xF, 0xA, 0x08);
- /*
- * We voluntarily omit the bank here, in case the
- * sequence itself matters. It shouldn't be a problem,
- * since nobody else is supposed to access the
- * device at that point.
- */
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xB, 0x04);
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xC, 0x35);
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xD, 0x05);
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xE, 0x05);
- }
- }
-}
-
-static void pc87360_autodiv(struct device *dev, int nr)
-{
- struct pc87360_data *data = dev_get_drvdata(dev);
- u8 old_min = data->fan_min[nr];
-
- /* Increase clock divider if needed and possible */
- if ((data->fan_status[nr] & 0x04) /* overflow flag */
- || (data->fan[nr] >= 224)) { /* next to overflow */
- if ((data->fan_status[nr] & 0x60) != 0x60) {
- data->fan_status[nr] += 0x20;
- data->fan_min[nr] >>= 1;
- data->fan[nr] >>= 1;
- dev_dbg(dev,
- "Increasing clock divider to %d for fan %d\n",
- FAN_DIV_FROM_REG(data->fan_status[nr]), nr + 1);
- }
- } else {
- /* Decrease clock divider if possible */
- while (!(data->fan_min[nr] & 0x80) /* min "nails" divider */
- && data->fan[nr] < 85 /* bad accuracy */
- && (data->fan_status[nr] & 0x60) != 0x00) {
- data->fan_status[nr] -= 0x20;
- data->fan_min[nr] <<= 1;
- data->fan[nr] <<= 1;
- dev_dbg(dev,
- "Decreasing clock divider to %d for fan %d\n",
- FAN_DIV_FROM_REG(data->fan_status[nr]),
- nr + 1);
- }
- }
-
- /* Write new fan min if it changed */
- if (old_min != data->fan_min[nr]) {
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_MIN(nr),
- data->fan_min[nr]);
- }
-}
-
-static struct pc87360_data *pc87360_update_device(struct device *dev)
-{
- struct pc87360_data *data = dev_get_drvdata(dev);
- u8 i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
- dev_dbg(dev, "Data update\n");
-
- /* Fans */
- for (i = 0; i < data->fannr; i++) {
- if (FAN_CONFIG_MONITOR(data->fan_conf, i)) {
- data->fan_status[i] =
- pc87360_read_value(data, LD_FAN,
- NO_BANK, PC87360_REG_FAN_STATUS(i));
- data->fan[i] = pc87360_read_value(data, LD_FAN,
- NO_BANK, PC87360_REG_FAN(i));
- data->fan_min[i] = pc87360_read_value(data,
- LD_FAN, NO_BANK,
- PC87360_REG_FAN_MIN(i));
- /* Change clock divider if needed */
- pc87360_autodiv(dev, i);
- /* Clear bits and write new divider */
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_STATUS(i),
- data->fan_status[i]);
- }
- if (FAN_CONFIG_CONTROL(data->fan_conf, i))
- data->pwm[i] = pc87360_read_value(data, LD_FAN,
- NO_BANK, PC87360_REG_PWM(i));
- }
-
- /* Voltages */
- for (i = 0; i < data->innr; i++) {
- data->in_status[i] = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS);
- /* Clear bits */
- pc87360_write_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS,
- data->in_status[i]);
- if ((data->in_status[i] & CHAN_READY) == CHAN_READY) {
- data->in[i] = pc87360_read_value(data, LD_IN,
- i, PC87365_REG_IN);
- }
- if (data->in_status[i] & CHAN_ENA) {
- data->in_min[i] = pc87360_read_value(data,
- LD_IN, i,
- PC87365_REG_IN_MIN);
- data->in_max[i] = pc87360_read_value(data,
- LD_IN, i,
- PC87365_REG_IN_MAX);
- if (i >= 11)
- data->in_crit[i-11] =
- pc87360_read_value(data, LD_IN,
- i, PC87365_REG_TEMP_CRIT);
- }
- }
- if (data->innr) {
- data->in_alarms = pc87360_read_value(data, LD_IN,
- NO_BANK, PC87365_REG_IN_ALARMS1)
- | ((pc87360_read_value(data, LD_IN,
- NO_BANK, PC87365_REG_IN_ALARMS2)
- & 0x07) << 8);
- data->vid = (data->vid_conf & 0xE0) ?
- pc87360_read_value(data, LD_IN,
- NO_BANK, PC87365_REG_VID) : 0x1F;
- }
-
- /* Temperatures */
- for (i = 0; i < data->tempnr; i++) {
- data->temp_status[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_STATUS);
- /* Clear bits */
- pc87360_write_value(data, LD_TEMP, i,
- PC87365_REG_TEMP_STATUS,
- data->temp_status[i]);
- if ((data->temp_status[i] & CHAN_READY) == CHAN_READY) {
- data->temp[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP);
- }
- if (data->temp_status[i] & CHAN_ENA) {
- data->temp_min[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_MIN);
- data->temp_max[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_MAX);
- data->temp_crit[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_CRIT);
- }
- }
- if (data->tempnr) {
- data->temp_alarms = pc87360_read_value(data, LD_TEMP,
- NO_BANK, PC87365_REG_TEMP_ALARMS)
- & 0x3F;
- }
-
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
+ superio_exit(sioaddr);
+ return 0;
}
static int __init pc87360_device_add(unsigned short address)
@@ -1777,10 +1794,10 @@ static void __exit pc87360_exit(void)
platform_driver_unregister(&pc87360_driver);
}
-
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("PC8736x hardware monitor");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
module_init(pc87360_init);
module_exit(pc87360_exit);
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index a97a51005c61..af9614e918a4 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -228,14 +228,13 @@ exit_sysfs_remove:
return err;
}
-static int pcf8591_remove(struct i2c_client *client)
+static void pcf8591_remove(struct i2c_client *client)
{
struct pcf8591_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
- return 0;
}
/* Called when we have found a new PCF8591. */
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 951e4a9ff2d6..89668af67206 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -397,6 +397,15 @@ config SENSORS_TPS53679
This driver can also be built as a module. If so, the module will
be called tps53679.
+config SENSORS_TPS546D24
+ tristate "TPS546D24"
+ help
+ If you say yes here you get hardware monitoring support for TEXAS
+ TPS546D24.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps546d24
+
config SENSORS_UCD9000
tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e2fe86f98965..0002dbe22d52 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
+obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
index 8ecd4adfef40..24e5194706cf 100644
--- a/drivers/hwmon/pmbus/mp2888.c
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -34,7 +34,7 @@ struct mp2888_data {
int curr_sense_gain;
};
-#define to_mp2888_data(x) container_of(x, struct mp2888_data, info)
+#define to_mp2888_data(x) container_of(x, struct mp2888_data, info)
static int mp2888_read_byte_data(struct i2c_client *client, int page, int reg)
{
@@ -109,7 +109,7 @@ mp2888_read_phase(struct i2c_client *client, struct mp2888_data *data, int page,
* - Kcs is the DrMOS current sense gain of power stage, which is obtained from the
* register MP2888_MFR_VR_CONFIG1, bits 13-12 with the following selection of DrMOS
* (data->curr_sense_gain):
- * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A.
+ * 00b - 8.5µA/A, 01b - 9.7µA/A, 1b - 10µA/A, 11b - 5µA/A.
* - Rcs is the internal phase current sense resistor. This parameter depends on hardware
* assembly. By default it is set to 1kΩ. In case of different assembly, user should
* scale this parameter by dividing it by Rcs.
@@ -118,10 +118,9 @@ mp2888_read_phase(struct i2c_client *client, struct mp2888_data *data, int page,
* because sampling of current occurrence of bit weight has a big deviation, especially for
* light load.
*/
- ret = DIV_ROUND_CLOSEST(ret * 100 - 9800, data->curr_sense_gain);
- ret = (data->phase_curr_resolution) ? ret * 2 : ret;
+ ret = DIV_ROUND_CLOSEST(ret * 200 - 19600, data->curr_sense_gain);
/* Scale according to total current resolution. */
- ret = (data->total_curr_resolution) ? ret * 8 : ret * 4;
+ ret = (data->total_curr_resolution) ? ret * 2 : ret;
return ret;
}
@@ -212,7 +211,7 @@ static int mp2888_read_word_data(struct i2c_client *client, int page, int phase,
ret = pmbus_read_word_data(client, page, phase, reg);
if (ret < 0)
return ret;
- ret = data->total_curr_resolution ? ret * 2 : ret;
+ ret = data->total_curr_resolution ? ret : DIV_ROUND_CLOSEST(ret, 2);
break;
case PMBUS_POUT_OP_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase, reg);
@@ -223,7 +222,7 @@ static int mp2888_read_word_data(struct i2c_client *client, int page, int phase,
* set 1. Actual power is reported with 0.5W or 1W respectively resolution. Scaling
* is needed to match both.
*/
- ret = data->total_curr_resolution ? ret * 4 : ret * 2;
+ ret = data->total_curr_resolution ? ret * 2 : ret;
break;
/*
* The below registers are not implemented by device or implemented not according to the
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f10bac8860fc..7ec04934747e 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1270,9 +1270,9 @@ struct pmbus_thermal_data {
struct pmbus_sensor *sensor;
};
-static int pmbus_thermal_get_temp(void *data, int *temp)
+static int pmbus_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct pmbus_thermal_data *tdata = data;
+ struct pmbus_thermal_data *tdata = tz->devdata;
struct pmbus_sensor *sensor = tdata->sensor;
struct pmbus_data *pmbus_data = tdata->pmbus_data;
struct i2c_client *client = to_i2c_client(pmbus_data->dev);
@@ -1296,7 +1296,7 @@ static int pmbus_thermal_get_temp(void *data, int *temp)
return ret;
}
-static const struct thermal_zone_of_device_ops pmbus_thermal_ops = {
+static const struct thermal_zone_device_ops pmbus_thermal_ops = {
.get_temp = pmbus_thermal_get_temp,
};
@@ -1314,8 +1314,8 @@ static int pmbus_thermal_add_sensor(struct pmbus_data *pmbus_data,
tdata->sensor = sensor;
tdata->pmbus_data = pmbus_data;
- tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
- &pmbus_thermal_ops);
+ tzd = devm_thermal_of_zone_register(dev, index, tdata,
+ &pmbus_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
* so ignore that error but forward any other error.
@@ -2861,7 +2861,7 @@ static int pmbus_regulator_get_low_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_low[page]) {
+ if (data->vout_low[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MIN))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MIN);
@@ -2887,7 +2887,7 @@ static int pmbus_regulator_get_high_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_high[page]) {
+ if (data->vout_high[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MAX))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MAX);
@@ -3016,11 +3016,10 @@ static int pmbus_regulator_register(struct pmbus_data *data)
rdev = devm_regulator_register(dev, &info->reg_desc[i],
&config);
- if (IS_ERR(rdev)) {
- dev_err(dev, "Failed to register %s regulator\n",
- info->reg_desc[i].name);
- return PTR_ERR(rdev);
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ info->reg_desc[i].name);
}
return 0;
@@ -3320,6 +3319,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
struct pmbus_data *data;
size_t groups_num = 0;
int ret;
+ int i;
char *name;
if (!info)
@@ -3353,6 +3353,11 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
data->currpage = -1;
data->currphase = -1;
+ for (i = 0; i < ARRAY_SIZE(data->vout_low); i++) {
+ data->vout_low[i] = -1;
+ data->vout_high[i] = -1;
+ }
+
ret = pmbus_init_common(client, data, info);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pmbus/tps546d24.c b/drivers/hwmon/pmbus/tps546d24.c
new file mode 100644
index 000000000000..435f94304ad8
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps546d24.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for TEXAS TPS546D24 buck converter
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info tps546d24_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int tps546d24_probe(struct i2c_client *client)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+ if (reg < 0)
+ return reg;
+
+ if (reg & 0x80) {
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, PMBUS_VOUT_MODE, reg & 0x7f);
+ if (err < 0)
+ return err;
+ }
+ return pmbus_do_probe(client, &tps546d24_info);
+}
+
+static const struct i2c_device_id tps546d24_id[] = {
+ {"tps546d24", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps546d24_id);
+
+static const struct of_device_id __maybe_unused tps546d24_of_match[] = {
+ {.compatible = "ti,tps546d24"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps546d24_of_match);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver tps546d24_driver = {
+ .driver = {
+ .name = "tps546d24",
+ .of_match_table = of_match_ptr(tps546d24_of_match),
+ },
+ .probe_new = tps546d24_probe,
+ .id_table = tps546d24_id,
+};
+
+module_i2c_driver(tps546d24_driver);
+
+MODULE_AUTHOR("Duke Du <dukedu83@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for TI tps546d24");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 6c08551d8d14..dc3d9a22d917 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -28,11 +28,23 @@ struct pwm_fan_tach {
u8 pulses_per_revolution;
};
+enum pwm_fan_enable_mode {
+ pwm_off_reg_off,
+ pwm_disable_reg_enable,
+ pwm_enable_reg_enable,
+ pwm_disable_reg_disable,
+};
+
struct pwm_fan_ctx {
+ struct device *dev;
+
struct mutex lock;
struct pwm_device *pwm;
struct pwm_state pwm_state;
struct regulator *reg_en;
+ enum pwm_fan_enable_mode enable_mode;
+ bool regulator_enabled;
+ bool enabled;
int tach_count;
struct pwm_fan_tach *tachs;
@@ -82,25 +94,140 @@ static void sample_timer(struct timer_list *t)
mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
-static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+static void pwm_fan_enable_mode_2_state(int enable_mode,
+ struct pwm_state *state,
+ bool *enable_regulator)
+{
+ switch (enable_mode) {
+ case pwm_disable_reg_enable:
+ /* disable pwm, keep regulator enabled */
+ state->enabled = false;
+ *enable_regulator = true;
+ break;
+ case pwm_enable_reg_enable:
+ /* keep pwm and regulator enabled */
+ state->enabled = true;
+ *enable_regulator = true;
+ break;
+ case pwm_off_reg_off:
+ case pwm_disable_reg_disable:
+ /* disable pwm and regulator */
+ state->enabled = false;
+ *enable_regulator = false;
+ }
+}
+
+static int pwm_fan_switch_power(struct pwm_fan_ctx *ctx, bool on)
{
- unsigned long period;
int ret = 0;
+
+ if (!ctx->reg_en)
+ return ret;
+
+ if (!ctx->regulator_enabled && on) {
+ ret = regulator_enable(ctx->reg_en);
+ if (ret == 0)
+ ctx->regulator_enabled = true;
+ } else if (ctx->regulator_enabled && !on) {
+ ret = regulator_disable(ctx->reg_en);
+ if (ret == 0)
+ ctx->regulator_enabled = false;
+ }
+ return ret;
+}
+
+static int pwm_fan_power_on(struct pwm_fan_ctx *ctx)
+{
struct pwm_state *state = &ctx->pwm_state;
+ int ret;
- mutex_lock(&ctx->lock);
- if (ctx->pwm_value == pwm)
- goto exit_set_pwm_err;
+ if (ctx->enabled)
+ return 0;
+
+ ret = pwm_fan_switch_power(ctx, true);
+ if (ret < 0) {
+ dev_err(ctx->dev, "failed to enable power supply\n");
+ return ret;
+ }
+
+ state->enabled = true;
+ ret = pwm_apply_state(ctx->pwm, state);
+ if (ret) {
+ dev_err(ctx->dev, "failed to enable PWM\n");
+ goto disable_regulator;
+ }
+
+ ctx->enabled = true;
+
+ return 0;
+
+disable_regulator:
+ pwm_fan_switch_power(ctx, false);
+ return ret;
+}
- period = state->period;
- state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
- state->enabled = pwm ? true : false;
+static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
+{
+ struct pwm_state *state = &ctx->pwm_state;
+ bool enable_regulator = false;
+ int ret;
+ if (!ctx->enabled)
+ return 0;
+
+ pwm_fan_enable_mode_2_state(ctx->enable_mode,
+ state,
+ &enable_regulator);
+
+ state->enabled = false;
+ state->duty_cycle = 0;
ret = pwm_apply_state(ctx->pwm, state);
+ if (ret) {
+ dev_err(ctx->dev, "failed to disable PWM\n");
+ return ret;
+ }
+
+ pwm_fan_switch_power(ctx, enable_regulator);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+{
+ struct pwm_state *state = &ctx->pwm_state;
+ unsigned long period;
+ int ret = 0;
+
+ if (pwm > 0) {
+ if (ctx->enable_mode == pwm_off_reg_off)
+ /* pwm-fan hard disabled */
+ return 0;
+
+ period = state->period;
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+ ret = pwm_apply_state(ctx->pwm, state);
+ if (ret)
+ return ret;
+ ret = pwm_fan_power_on(ctx);
+ } else {
+ ret = pwm_fan_power_off(ctx);
+ }
if (!ret)
ctx->pwm_value = pwm;
-exit_set_pwm_err:
+
+ return ret;
+}
+
+static int set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+{
+ int ret;
+
+ mutex_lock(&ctx->lock);
+ ret = __set_pwm(ctx, pwm);
mutex_unlock(&ctx->lock);
+
return ret;
}
@@ -115,20 +242,76 @@ static void pwm_fan_update_state(struct pwm_fan_ctx *ctx, unsigned long pwm)
ctx->pwm_fan_state = i;
}
+static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val)
+{
+ int ret = 0;
+ int old_val;
+
+ mutex_lock(&ctx->lock);
+
+ if (ctx->enable_mode == val)
+ goto out;
+
+ old_val = ctx->enable_mode;
+ ctx->enable_mode = val;
+
+ if (val == 0) {
+ /* Disable pwm-fan unconditionally */
+ ret = __set_pwm(ctx, 0);
+ if (ret)
+ ctx->enable_mode = old_val;
+ pwm_fan_update_state(ctx, 0);
+ } else {
+ /*
+ * Change PWM and/or regulator state if currently disabled
+ * Nothing to do if currently enabled
+ */
+ if (!ctx->enabled) {
+ struct pwm_state *state = &ctx->pwm_state;
+ bool enable_regulator = false;
+
+ state->duty_cycle = 0;
+ pwm_fan_enable_mode_2_state(val,
+ state,
+ &enable_regulator);
+
+ pwm_apply_state(ctx->pwm, state);
+ pwm_fan_switch_power(ctx, enable_regulator);
+ pwm_fan_update_state(ctx, 0);
+ }
+ }
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
static int pwm_fan_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
int ret;
- if (val < 0 || val > MAX_PWM)
- return -EINVAL;
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > MAX_PWM)
+ return -EINVAL;
+ ret = set_pwm(ctx, val);
+ if (ret)
+ return ret;
+ pwm_fan_update_state(ctx, val);
+ break;
+ case hwmon_pwm_enable:
+ if (val < 0 || val > 3)
+ ret = -EINVAL;
+ else
+ ret = pwm_fan_update_enable(ctx, val);
- ret = __set_pwm(ctx, val);
- if (ret)
return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
- pwm_fan_update_state(ctx, val);
return 0;
}
@@ -139,9 +322,15 @@ static int pwm_fan_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_pwm:
- *val = ctx->pwm_value;
- return 0;
-
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = ctx->pwm_value;
+ return 0;
+ case hwmon_pwm_enable:
+ *val = ctx->enable_mode;
+ return 0;
+ }
+ return -EOPNOTSUPP;
case hwmon_fan:
*val = ctx->tachs[channel].rpm;
return 0;
@@ -212,7 +401,7 @@ pwm_fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
if (state == ctx->pwm_fan_state)
return 0;
- ret = __set_pwm(ctx, ctx->pwm_fan_cooling_levels[state]);
+ ret = set_pwm(ctx, ctx->pwm_fan_cooling_levels[state]);
if (ret) {
dev_err(&cdev->device, "Cannot set pwm!\n");
return ret;
@@ -270,18 +459,14 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
return 0;
}
-static void pwm_fan_regulator_disable(void *data)
-{
- regulator_disable(data);
-}
-
-static void pwm_fan_pwm_disable(void *__ctx)
+static void pwm_fan_cleanup(void *__ctx)
{
struct pwm_fan_ctx *ctx = __ctx;
- ctx->pwm_state.enabled = false;
- pwm_apply_state(ctx->pwm, &ctx->pwm_state);
del_timer_sync(&ctx->rpm_timer);
+ /* Switch off everything */
+ ctx->enable_mode = pwm_disable_reg_disable;
+ pwm_fan_power_off(ctx);
}
static int pwm_fan_probe(struct platform_device *pdev)
@@ -302,7 +487,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
mutex_init(&ctx->lock);
- ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
+ ctx->dev = &pdev->dev;
+ ctx->pwm = devm_pwm_get(dev, NULL);
if (IS_ERR(ctx->pwm))
return dev_err_probe(dev, PTR_ERR(ctx->pwm), "Could not get PWM\n");
@@ -314,22 +500,12 @@ static int pwm_fan_probe(struct platform_device *pdev)
return PTR_ERR(ctx->reg_en);
ctx->reg_en = NULL;
- } else {
- ret = regulator_enable(ctx->reg_en);
- if (ret) {
- dev_err(dev, "Failed to enable fan supply: %d\n", ret);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
- ctx->reg_en);
- if (ret)
- return ret;
}
pwm_init_state(ctx->pwm, &ctx->pwm_state);
/*
- * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
* long. Check this here to prevent the fan running at a too low
* frequency.
*/
@@ -338,14 +514,19 @@ static int pwm_fan_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* Set duty cycle to maximum allowed and enable PWM output */
- ret = __set_pwm(ctx, MAX_PWM);
+ ctx->enable_mode = pwm_disable_reg_enable;
+
+ /*
+ * Set duty cycle to maximum allowed and enable PWM output as well as
+ * the regulator. In case of error nothing is changed
+ */
+ ret = set_pwm(ctx, MAX_PWM);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
timer_setup(&ctx->rpm_timer, sample_timer, 0);
- ret = devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
+ ret = devm_add_action_or_reset(dev, pwm_fan_cleanup, ctx);
if (ret)
return ret;
@@ -377,7 +558,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (!channels)
return -ENOMEM;
- channels[0] = HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT);
+ channels[0] = HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE);
for (i = 0; i < ctx->tach_count; i++) {
struct pwm_fan_tach *tach = &ctx->tachs[i];
@@ -451,65 +632,28 @@ static int pwm_fan_probe(struct platform_device *pdev)
return 0;
}
-static int pwm_fan_disable(struct device *dev)
-{
- struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- int ret;
-
- if (ctx->pwm_value) {
- /* keep ctx->pwm_state unmodified for pwm_fan_resume() */
- struct pwm_state state = ctx->pwm_state;
-
- state.duty_cycle = 0;
- state.enabled = false;
- ret = pwm_apply_state(ctx->pwm, &state);
- if (ret < 0)
- return ret;
- }
-
- if (ctx->reg_en) {
- ret = regulator_disable(ctx->reg_en);
- if (ret) {
- dev_err(dev, "Failed to disable fan supply: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static void pwm_fan_shutdown(struct platform_device *pdev)
{
- pwm_fan_disable(&pdev->dev);
+ struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
+
+ pwm_fan_cleanup(ctx);
}
-#ifdef CONFIG_PM_SLEEP
static int pwm_fan_suspend(struct device *dev)
{
- return pwm_fan_disable(dev);
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ return pwm_fan_power_off(ctx);
}
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- int ret;
-
- if (ctx->reg_en) {
- ret = regulator_enable(ctx->reg_en);
- if (ret) {
- dev_err(dev, "Failed to enable fan supply: %d\n", ret);
- return ret;
- }
- }
-
- if (ctx->pwm_value == 0)
- return 0;
- return pwm_apply_state(ctx->pwm, &ctx->pwm_state);
+ return set_pwm(ctx, ctx->pwm_value);
}
-#endif
-static SIMPLE_DEV_PM_OPS(pwm_fan_pm, pwm_fan_suspend, pwm_fan_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_fan_pm, pwm_fan_suspend, pwm_fan_resume);
static const struct of_device_id of_pwm_fan_match[] = {
{ .compatible = "pwm-fan", },
@@ -522,7 +666,7 @@ static struct platform_driver pwm_fan_driver = {
.shutdown = pwm_fan_shutdown,
.driver = {
.name = "pwm-fan",
- .pm = &pwm_fan_pm,
+ .pm = pm_sleep_ptr(&pwm_fan_pm),
.of_match_table = of_pwm_fan_match,
},
};
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 5187c6dd5a4f..4d75385f7d5e 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -62,9 +62,9 @@ static void scpi_scale_reading(u64 *value, struct sensor_data *sensor)
}
}
-static int scpi_read_temp(void *dev, int *temp)
+static int scpi_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct scpi_thermal_zone *zone = dev;
+ struct scpi_thermal_zone *zone = tz->devdata;
struct scpi_sensors *scpi_sensors = zone->scpi_sensors;
struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops;
struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id];
@@ -121,7 +121,7 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", sensor->info.name);
}
-static const struct thermal_zone_of_device_ops scpi_sensor_ops = {
+static const struct thermal_zone_device_ops scpi_sensor_ops = {
.get_temp = scpi_read_temp,
};
@@ -275,10 +275,10 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
zone->sensor_id = i;
zone->scpi_sensors = scpi_sensors;
- z = devm_thermal_zone_of_sensor_register(dev,
- sensor->info.sensor_id,
- zone,
- &scpi_sensor_ops);
+ z = devm_thermal_of_zone_register(dev,
+ sensor->info.sensor_id,
+ zone,
+ &scpi_sensor_ops);
/*
* The call to thermal_zone_of_sensor_register returns
* an error for sensors that are not associated with
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index c19df3ade48e..13ac2d8f22c7 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -129,7 +129,7 @@ unlock:
static ssize_t sht4x_interval_write(struct sht4x_data *data, long val)
{
- data->update_interval = clamp_val(val, SHT4X_MIN_POLL_INTERVAL, UINT_MAX);
+ data->update_interval = clamp_val(val, SHT4X_MIN_POLL_INTERVAL, INT_MAX);
return 0;
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 018cb5a7651f..b0b05fd12221 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -37,6 +37,7 @@
* 735 0008 0735
*/
+#define DRIVER_NAME "sis5595"
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
@@ -191,21 +192,75 @@ struct sis5595_data {
static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */
-static int sis5595_probe(struct platform_device *pdev);
-static int sis5595_remove(struct platform_device *pdev);
+/* ISA access must be locked explicitly. */
+static int sis5595_read_value(struct sis5595_data *data, u8 reg)
+{
+ int res;
-static int sis5595_read_value(struct sis5595_data *data, u8 reg);
-static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value);
-static struct sis5595_data *sis5595_update_device(struct device *dev);
-static void sis5595_init_device(struct sis5595_data *data);
+ mutex_lock(&data->lock);
+ outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+ res = inb_p(data->addr + SIS5595_DATA_REG_OFFSET);
+ mutex_unlock(&data->lock);
+ return res;
+}
-static struct platform_driver sis5595_driver = {
- .driver = {
- .name = "sis5595",
- },
- .probe = sis5595_probe,
- .remove = sis5595_remove,
-};
+static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
+{
+ mutex_lock(&data->lock);
+ outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+ outb_p(value, data->addr + SIS5595_DATA_REG_OFFSET);
+ mutex_unlock(&data->lock);
+}
+
+static struct sis5595_data *sis5595_update_device(struct device *dev)
+{
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+
+ for (i = 0; i <= data->maxins; i++) {
+ data->in[i] =
+ sis5595_read_value(data, SIS5595_REG_IN(i));
+ data->in_min[i] =
+ sis5595_read_value(data,
+ SIS5595_REG_IN_MIN(i));
+ data->in_max[i] =
+ sis5595_read_value(data,
+ SIS5595_REG_IN_MAX(i));
+ }
+ for (i = 0; i < 2; i++) {
+ data->fan[i] =
+ sis5595_read_value(data, SIS5595_REG_FAN(i));
+ data->fan_min[i] =
+ sis5595_read_value(data,
+ SIS5595_REG_FAN_MIN(i));
+ }
+ if (data->maxins == 3) {
+ data->temp =
+ sis5595_read_value(data, SIS5595_REG_TEMP);
+ data->temp_over =
+ sis5595_read_value(data, SIS5595_REG_TEMP_OVER);
+ data->temp_hyst =
+ sis5595_read_value(data, SIS5595_REG_TEMP_HYST);
+ }
+ i = sis5595_read_value(data, SIS5595_REG_FANDIV);
+ data->fan_div[0] = (i >> 4) & 0x03;
+ data->fan_div[1] = i >> 6;
+ data->alarms =
+ sis5595_read_value(data, SIS5595_REG_ALARM1) |
+ (sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
/* 4 Voltages */
static ssize_t in_show(struct device *dev, struct device_attribute *da,
@@ -568,6 +623,15 @@ static const struct attribute_group sis5595_group_temp1 = {
.attrs = sis5595_attributes_temp1,
};
+/* Called when we have found a new SIS5595. */
+static void sis5595_init_device(struct sis5595_data *data)
+{
+ u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
+ if (!(config & 0x01))
+ sis5595_write_value(data, SIS5595_REG_CONFIG,
+ (config & 0xf7) | 0x01);
+}
+
/* This is called when the module is loaded */
static int sis5595_probe(struct platform_device *pdev)
{
@@ -580,7 +644,7 @@ static int sis5595_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, SIS5595_EXTENT,
- sis5595_driver.driver.name))
+ DRIVER_NAME))
return -EBUSY;
data = devm_kzalloc(&pdev->dev, sizeof(struct sis5595_data),
@@ -591,7 +655,7 @@ static int sis5595_probe(struct platform_device *pdev)
mutex_init(&data->lock);
mutex_init(&data->update_lock);
data->addr = res->start;
- data->name = "sis5595";
+ data->name = DRIVER_NAME;
platform_set_drvdata(pdev, data);
/*
@@ -657,85 +721,6 @@ static int sis5595_remove(struct platform_device *pdev)
return 0;
}
-/* ISA access must be locked explicitly. */
-static int sis5595_read_value(struct sis5595_data *data, u8 reg)
-{
- int res;
-
- mutex_lock(&data->lock);
- outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
- res = inb_p(data->addr + SIS5595_DATA_REG_OFFSET);
- mutex_unlock(&data->lock);
- return res;
-}
-
-static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
-{
- mutex_lock(&data->lock);
- outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
- outb_p(value, data->addr + SIS5595_DATA_REG_OFFSET);
- mutex_unlock(&data->lock);
-}
-
-/* Called when we have found a new SIS5595. */
-static void sis5595_init_device(struct sis5595_data *data)
-{
- u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
- if (!(config & 0x01))
- sis5595_write_value(data, SIS5595_REG_CONFIG,
- (config & 0xf7) | 0x01);
-}
-
-static struct sis5595_data *sis5595_update_device(struct device *dev)
-{
- struct sis5595_data *data = dev_get_drvdata(dev);
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
-
- for (i = 0; i <= data->maxins; i++) {
- data->in[i] =
- sis5595_read_value(data, SIS5595_REG_IN(i));
- data->in_min[i] =
- sis5595_read_value(data,
- SIS5595_REG_IN_MIN(i));
- data->in_max[i] =
- sis5595_read_value(data,
- SIS5595_REG_IN_MAX(i));
- }
- for (i = 0; i < 2; i++) {
- data->fan[i] =
- sis5595_read_value(data, SIS5595_REG_FAN(i));
- data->fan_min[i] =
- sis5595_read_value(data,
- SIS5595_REG_FAN_MIN(i));
- }
- if (data->maxins == 3) {
- data->temp =
- sis5595_read_value(data, SIS5595_REG_TEMP);
- data->temp_over =
- sis5595_read_value(data, SIS5595_REG_TEMP_OVER);
- data->temp_hyst =
- sis5595_read_value(data, SIS5595_REG_TEMP_HYST);
- }
- i = sis5595_read_value(data, SIS5595_REG_FANDIV);
- data->fan_div[0] = (i >> 4) & 0x03;
- data->fan_div[1] = i >> 6;
- data->alarms =
- sis5595_read_value(data, SIS5595_REG_ALARM1) |
- (sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
static const struct pci_device_id sis5595_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
@@ -764,7 +749,7 @@ static int sis5595_device_add(unsigned short address)
struct resource res = {
.start = address,
.end = address + SIS5595_EXTENT - 1,
- .name = "sis5595",
+ .name = DRIVER_NAME,
.flags = IORESOURCE_IO,
};
int err;
@@ -773,7 +758,7 @@ static int sis5595_device_add(unsigned short address)
if (err)
goto exit;
- pdev = platform_device_alloc("sis5595", address);
+ pdev = platform_device_alloc(DRIVER_NAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -800,6 +785,14 @@ exit:
return err;
}
+static struct platform_driver sis5595_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = sis5595_probe,
+ .remove = sis5595_remove,
+};
+
static int sis5595_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
@@ -886,7 +879,7 @@ exit:
}
static struct pci_driver sis5595_pci_driver = {
- .name = "sis5595",
+ .name = DRIVER_NAME,
.id_table = sis5595_pci_ids,
.probe = sis5595_pci_probe,
};
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index 8c4ed72e5d68..c36bdbe423de 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -671,12 +671,11 @@ out_unregister:
return ret;
}
-static int smm665_remove(struct i2c_client *client)
+static void smm665_remove(struct i2c_client *client)
{
struct smm665_data *data = i2c_get_clientdata(client);
i2c_unregister_device(data->cmdreg);
- return 0;
}
static const struct i2c_device_id smm665_id[] = {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index a5db15c087ae..70d2152234e2 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -582,7 +582,7 @@ static int smsc47m192_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE);
+ strscpy(info->type, "smsc47m192", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/sparx5-temp.c b/drivers/hwmon/sparx5-temp.c
index 98be48e3a22a..04fd8505e5d6 100644
--- a/drivers/hwmon/sparx5-temp.c
+++ b/drivers/hwmon/sparx5-temp.c
@@ -26,13 +26,6 @@ struct s5_hwmon {
struct clk *clk;
};
-static void s5_temp_clk_disable(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static void s5_temp_enable(struct s5_hwmon *hwmon)
{
u32 val = readl(hwmon->base + TEMP_CFG);
@@ -113,7 +106,6 @@ static int s5_temp_probe(struct platform_device *pdev)
{
struct device *hwmon_dev;
struct s5_hwmon *hwmon;
- int ret;
hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -123,19 +115,10 @@ static int s5_temp_probe(struct platform_device *pdev)
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
- hwmon->clk = devm_clk_get(&pdev->dev, NULL);
+ hwmon->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(hwmon->clk))
return PTR_ERR(hwmon->clk);
- ret = clk_prepare_enable(hwmon->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&pdev->dev, s5_temp_clk_disable,
- hwmon->clk);
- if (ret)
- return ret;
-
s5_temp_enable(hwmon);
hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 0ed28408aa07..2f67c6747ead 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -692,7 +692,7 @@ static int stts751_detect(struct i2c_client *new_client,
}
dev_dbg(&new_client->dev, "Chip %s detected", name);
- strlcpy(info->type, stts751_id[0].name, I2C_NAME_SIZE);
+ strscpy(info->type, stts751_id[0].name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 6a804f5036f4..81cdb012993c 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -352,7 +352,7 @@ static int thmc50_detect(struct i2c_client *client,
pr_debug("thmc50: Detected %s (version %x, revision %x)\n",
type_name, (revision >> 4) - 0xc, revision & 0xf);
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index e867a0c2e539..2bf496a62206 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -260,7 +260,6 @@ static int tmp102_probe(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int tmp102_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -283,9 +282,8 @@ static int tmp102_resume(struct device *dev)
return err;
}
-#endif /* CONFIG_PM */
-static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
static const struct i2c_device_id tmp102_id[] = {
{ "tmp102", 0 },
@@ -302,7 +300,7 @@ MODULE_DEVICE_TABLE(of, tmp102_of_match);
static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = of_match_ptr(tmp102_of_match),
- .driver.pm = &tmp102_dev_pm_ops,
+ .driver.pm = pm_sleep_ptr(&tmp102_dev_pm_ops),
.probe_new = tmp102_probe,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index 5cab4436aa77..56d5cbf36a45 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -178,7 +178,7 @@ static int tmp103_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __maybe_unused tmp103_suspend(struct device *dev)
+static int tmp103_suspend(struct device *dev)
{
struct regmap *regmap = dev_get_drvdata(dev);
@@ -186,7 +186,7 @@ static int __maybe_unused tmp103_suspend(struct device *dev)
TMP103_CONF_SD_MASK, 0);
}
-static int __maybe_unused tmp103_resume(struct device *dev)
+static int tmp103_resume(struct device *dev)
{
struct regmap *regmap = dev_get_drvdata(dev);
@@ -194,7 +194,7 @@ static int __maybe_unused tmp103_resume(struct device *dev)
TMP103_CONF_SD_MASK, TMP103_CONF_SD);
}
-static SIMPLE_DEV_PM_OPS(tmp103_dev_pm_ops, tmp103_suspend, tmp103_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tmp103_dev_pm_ops, tmp103_suspend, tmp103_resume);
static const struct i2c_device_id tmp103_id[] = {
{ "tmp103", 0 },
@@ -212,7 +212,7 @@ static struct i2c_driver tmp103_driver = {
.driver = {
.name = "tmp103",
.of_match_table = of_match_ptr(tmp103_of_match),
- .pm = &tmp103_dev_pm_ops,
+ .pm = pm_sleep_ptr(&tmp103_dev_pm_ops),
},
.probe_new = tmp103_probe,
.id_table = tmp103_id,
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 5435664c3f6e..acb4ba750b09 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -390,7 +390,7 @@ static int tmp108_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __maybe_unused tmp108_suspend(struct device *dev)
+static int tmp108_suspend(struct device *dev)
{
struct tmp108 *tmp108 = dev_get_drvdata(dev);
@@ -398,7 +398,7 @@ static int __maybe_unused tmp108_suspend(struct device *dev)
TMP108_CONF_MODE_MASK, TMP108_MODE_SHUTDOWN);
}
-static int __maybe_unused tmp108_resume(struct device *dev)
+static int tmp108_resume(struct device *dev)
{
struct tmp108 *tmp108 = dev_get_drvdata(dev);
int err;
@@ -410,7 +410,7 @@ static int __maybe_unused tmp108_resume(struct device *dev)
return err;
}
-static SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
static const struct i2c_device_id tmp108_i2c_ids[] = {
{ "tmp108", 0 },
@@ -429,7 +429,7 @@ MODULE_DEVICE_TABLE(of, tmp108_of_ids);
static struct i2c_driver tmp108_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &tmp108_dev_pm_ops,
+ .pm = pm_sleep_ptr(&tmp108_dev_pm_ops),
.of_match_table = of_match_ptr(tmp108_of_ids),
},
.probe_new = tmp108_probe,
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index cc0a1c219b1f..f358ba679626 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -671,7 +671,7 @@ static int tmp401_detect(struct i2c_client *client,
if (reg > 15)
return -ENODEV;
- strlcpy(info->type, tmp401_id[kind].name, I2C_NAME_SIZE);
+ strscpy(info->type, tmp401_id[kind].name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 1fd8d41d90c8..45fd7fb5ee01 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -353,7 +353,7 @@ static int tmp421_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, tmp421_id[kind].name, I2C_NAME_SIZE);
+ strscpy(info->type, tmp421_id[kind].name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n",
names[kind], client->addr);
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 42762e87b014..68c77c493270 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -372,29 +372,12 @@ static const struct hwmon_chip_info tps23861_chip_info = {
.info = tps23861_info,
};
-static char *tps23861_port_operating_mode(struct tps23861_data *data, int port)
+static char *port_operating_mode_string(uint8_t mode_reg, unsigned int port)
{
- unsigned int regval;
- int mode;
-
- regmap_read(data->regmap, OPERATING_MODE, &regval);
+ unsigned int mode = ~0;
- switch (port) {
- case 1:
- mode = FIELD_GET(OPERATING_MODE_PORT_1_MASK, regval);
- break;
- case 2:
- mode = FIELD_GET(OPERATING_MODE_PORT_2_MASK, regval);
- break;
- case 3:
- mode = FIELD_GET(OPERATING_MODE_PORT_3_MASK, regval);
- break;
- case 4:
- mode = FIELD_GET(OPERATING_MODE_PORT_4_MASK, regval);
- break;
- default:
- mode = -EINVAL;
- }
+ if (port < TPS23861_NUM_PORTS)
+ mode = (mode_reg >> (2 * port)) & OPERATING_MODE_PORT_1_MASK;
switch (mode) {
case OPERATING_MODE_OFF:
@@ -410,15 +393,9 @@ static char *tps23861_port_operating_mode(struct tps23861_data *data, int port)
}
}
-static char *tps23861_port_detect_status(struct tps23861_data *data, int port)
+static char *port_detect_status_string(uint8_t status_reg)
{
- unsigned int regval;
-
- regmap_read(data->regmap,
- PORT_1_STATUS + (port - 1),
- &regval);
-
- switch (FIELD_GET(PORT_STATUS_DETECT_MASK, regval)) {
+ switch (FIELD_GET(PORT_STATUS_DETECT_MASK, status_reg)) {
case PORT_DETECT_UNKNOWN:
return "Unknown device";
case PORT_DETECT_SHORT:
@@ -448,15 +425,9 @@ static char *tps23861_port_detect_status(struct tps23861_data *data, int port)
}
}
-static char *tps23861_port_class_status(struct tps23861_data *data, int port)
+static char *port_class_status_string(uint8_t status_reg)
{
- unsigned int regval;
-
- regmap_read(data->regmap,
- PORT_1_STATUS + (port - 1),
- &regval);
-
- switch (FIELD_GET(PORT_STATUS_CLASS_MASK, regval)) {
+ switch (FIELD_GET(PORT_STATUS_CLASS_MASK, status_reg)) {
case PORT_CLASS_UNKNOWN:
return "Unknown";
case PORT_CLASS_RESERVED:
@@ -479,32 +450,27 @@ static char *tps23861_port_class_status(struct tps23861_data *data, int port)
}
}
-static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
+static char *port_poe_plus_status_string(uint8_t poe_plus, unsigned int port)
{
- unsigned int regval;
-
- regmap_read(data->regmap, POE_PLUS, &regval);
-
- if (BIT(port + 3) & regval)
- return "Yes";
- else
- return "No";
+ return (BIT(port + 4) & poe_plus) ? "Yes" : "No";
}
static int tps23861_port_resistance(struct tps23861_data *data, int port)
{
- u16 regval;
+ unsigned int raw_val;
+ __le16 regval;
regmap_bulk_read(data->regmap,
- PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
+ PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * port,
&regval,
2);
- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+ raw_val = le16_to_cpu(regval);
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) {
case PORT_RESISTANCE_RSN_OTHER:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000;
case PORT_RESISTANCE_RSN_LOW:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000;
case PORT_RESISTANCE_RSN_SHORT:
case PORT_RESISTANCE_RSN_OPEN:
default:
@@ -515,14 +481,19 @@ static int tps23861_port_resistance(struct tps23861_data *data, int port)
static int tps23861_port_status_show(struct seq_file *s, void *data)
{
struct tps23861_data *priv = s->private;
- int i;
-
- for (i = 1; i < TPS23861_NUM_PORTS + 1; i++) {
- seq_printf(s, "Port: \t\t%d\n", i);
- seq_printf(s, "Operating mode: %s\n", tps23861_port_operating_mode(priv, i));
- seq_printf(s, "Detected: \t%s\n", tps23861_port_detect_status(priv, i));
- seq_printf(s, "Class: \t\t%s\n", tps23861_port_class_status(priv, i));
- seq_printf(s, "PoE Plus: \t%s\n", tps23861_port_poe_plus_status(priv, i));
+ unsigned int i, mode, poe_plus, status;
+
+ regmap_read(priv->regmap, OPERATING_MODE, &mode);
+ regmap_read(priv->regmap, POE_PLUS, &poe_plus);
+
+ for (i = 0; i < TPS23861_NUM_PORTS; i++) {
+ regmap_read(priv->regmap, PORT_1_STATUS + i, &status);
+
+ seq_printf(s, "Port: \t\t%d\n", i + 1);
+ seq_printf(s, "Operating mode: %s\n", port_operating_mode_string(mode, i));
+ seq_printf(s, "Detected: \t%s\n", port_detect_status_string(status));
+ seq_printf(s, "Class: \t\t%s\n", port_class_status_string(status));
+ seq_printf(s, "PoE Plus: \t%s\n", port_poe_plus_status_string(poe_plus, i));
seq_printf(s, "Resistance: \t%d\n", tps23861_port_resistance(priv, i));
seq_putc(s, '\n');
}
@@ -532,9 +503,17 @@ static int tps23861_port_status_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(tps23861_port_status);
-static void tps23861_init_debugfs(struct tps23861_data *data)
+static void tps23861_init_debugfs(struct tps23861_data *data,
+ struct device *hwmon_dev)
{
- data->debugfs_dir = debugfs_create_dir(data->client->name, NULL);
+ const char *debugfs_name;
+
+ debugfs_name = devm_kasprintf(&data->client->dev, GFP_KERNEL, "%s-%s",
+ data->client->name, dev_name(hwmon_dev));
+ if (!debugfs_name)
+ return;
+
+ data->debugfs_dir = debugfs_create_dir(debugfs_name, NULL);
debugfs_create_file("port_status",
0400,
@@ -583,18 +562,16 @@ static int tps23861_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- tps23861_init_debugfs(data);
+ tps23861_init_debugfs(data, hwmon_dev);
return 0;
}
-static int tps23861_remove(struct i2c_client *client)
+static void tps23861_remove(struct i2c_client *client)
{
struct tps23861_data *data = i2c_get_clientdata(client);
debugfs_remove_recursive(data->debugfs_dir);
-
- return 0;
}
static const struct of_device_id __maybe_unused tps23861_of_match[] = {
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 55634110c2f9..37d7374896f6 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -34,6 +34,8 @@
#include <linux/acpi.h>
#include <linux/io.h>
+#define DRIVER_NAME "via686a"
+
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -321,9 +323,6 @@ struct via686a_data {
static struct pci_dev *s_bridge; /* pointer to the (only) via686a */
-static int via686a_probe(struct platform_device *pdev);
-static int via686a_remove(struct platform_device *pdev);
-
static inline int via686a_read_value(struct via686a_data *data, u8 reg)
{
return inb_p(data->addr + reg);
@@ -335,8 +334,76 @@ static inline void via686a_write_value(struct via686a_data *data, u8 reg,
outb_p(value, data->addr + reg);
}
-static struct via686a_data *via686a_update_device(struct device *dev);
-static void via686a_init_device(struct via686a_data *data);
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+ int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+ data->fan_div[0] = (reg >> 4) & 0x03;
+ data->fan_div[1] = reg >> 6;
+}
+
+static struct via686a_data *via686a_update_device(struct device *dev)
+{
+ struct via686a_data *data = dev_get_drvdata(dev);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i <= 4; i++) {
+ data->in[i] =
+ via686a_read_value(data, VIA686A_REG_IN(i));
+ data->in_min[i] = via686a_read_value(data,
+ VIA686A_REG_IN_MIN
+ (i));
+ data->in_max[i] =
+ via686a_read_value(data, VIA686A_REG_IN_MAX(i));
+ }
+ for (i = 1; i <= 2; i++) {
+ data->fan[i - 1] =
+ via686a_read_value(data, VIA686A_REG_FAN(i));
+ data->fan_min[i - 1] = via686a_read_value(data,
+ VIA686A_REG_FAN_MIN(i));
+ }
+ for (i = 0; i <= 2; i++) {
+ data->temp[i] = via686a_read_value(data,
+ VIA686A_REG_TEMP[i]) << 2;
+ data->temp_over[i] =
+ via686a_read_value(data,
+ VIA686A_REG_TEMP_OVER[i]);
+ data->temp_hyst[i] =
+ via686a_read_value(data,
+ VIA686A_REG_TEMP_HYST[i]);
+ }
+ /*
+ * add in lower 2 bits
+ * temp1 uses bits 7-6 of VIA686A_REG_TEMP_LOW1
+ * temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23
+ * temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23
+ */
+ data->temp[0] |= (via686a_read_value(data,
+ VIA686A_REG_TEMP_LOW1)
+ & 0xc0) >> 6;
+ data->temp[1] |=
+ (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
+ 0x30) >> 4;
+ data->temp[2] |=
+ (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
+ 0xc0) >> 6;
+
+ via686a_update_fan_div(data);
+ data->alarms =
+ via686a_read_value(data,
+ VIA686A_REG_ALARM1) |
+ (via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
/* following are the sysfs callback functions */
@@ -654,13 +721,23 @@ static const struct attribute_group via686a_group = {
.attrs = via686a_attributes,
};
-static struct platform_driver via686a_driver = {
- .driver = {
- .name = "via686a",
- },
- .probe = via686a_probe,
- .remove = via686a_remove,
-};
+static void via686a_init_device(struct via686a_data *data)
+{
+ u8 reg;
+
+ /* Start monitoring */
+ reg = via686a_read_value(data, VIA686A_REG_CONFIG);
+ via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F);
+
+ /* Configure temp interrupt mode for continuous-interrupt operation */
+ reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE);
+ via686a_write_value(data, VIA686A_REG_TEMP_MODE,
+ (reg & ~VIA686A_TEMP_MODE_MASK)
+ | VIA686A_TEMP_MODE_CONTINUOUS);
+
+ /* Pre-read fan clock divisor values */
+ via686a_update_fan_div(data);
+}
/* This is called when the module is loaded */
static int via686a_probe(struct platform_device *pdev)
@@ -672,7 +749,7 @@ static int via686a_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, VIA686A_EXTENT,
- via686a_driver.driver.name)) {
+ DRIVER_NAME)) {
dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
@@ -685,7 +762,7 @@ static int via686a_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->addr = res->start;
- data->name = "via686a";
+ data->name = DRIVER_NAME;
mutex_init(&data->update_lock);
/* Initialize the VIA686A chip */
@@ -719,94 +796,13 @@ static int via686a_remove(struct platform_device *pdev)
return 0;
}
-static void via686a_update_fan_div(struct via686a_data *data)
-{
- int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
- data->fan_div[0] = (reg >> 4) & 0x03;
- data->fan_div[1] = reg >> 6;
-}
-
-static void via686a_init_device(struct via686a_data *data)
-{
- u8 reg;
-
- /* Start monitoring */
- reg = via686a_read_value(data, VIA686A_REG_CONFIG);
- via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F);
-
- /* Configure temp interrupt mode for continuous-interrupt operation */
- reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE);
- via686a_write_value(data, VIA686A_REG_TEMP_MODE,
- (reg & ~VIA686A_TEMP_MODE_MASK)
- | VIA686A_TEMP_MODE_CONTINUOUS);
-
- /* Pre-read fan clock divisor values */
- via686a_update_fan_div(data);
-}
-
-static struct via686a_data *via686a_update_device(struct device *dev)
-{
- struct via686a_data *data = dev_get_drvdata(dev);
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- for (i = 0; i <= 4; i++) {
- data->in[i] =
- via686a_read_value(data, VIA686A_REG_IN(i));
- data->in_min[i] = via686a_read_value(data,
- VIA686A_REG_IN_MIN
- (i));
- data->in_max[i] =
- via686a_read_value(data, VIA686A_REG_IN_MAX(i));
- }
- for (i = 1; i <= 2; i++) {
- data->fan[i - 1] =
- via686a_read_value(data, VIA686A_REG_FAN(i));
- data->fan_min[i - 1] = via686a_read_value(data,
- VIA686A_REG_FAN_MIN(i));
- }
- for (i = 0; i <= 2; i++) {
- data->temp[i] = via686a_read_value(data,
- VIA686A_REG_TEMP[i]) << 2;
- data->temp_over[i] =
- via686a_read_value(data,
- VIA686A_REG_TEMP_OVER[i]);
- data->temp_hyst[i] =
- via686a_read_value(data,
- VIA686A_REG_TEMP_HYST[i]);
- }
- /*
- * add in lower 2 bits
- * temp1 uses bits 7-6 of VIA686A_REG_TEMP_LOW1
- * temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23
- * temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23
- */
- data->temp[0] |= (via686a_read_value(data,
- VIA686A_REG_TEMP_LOW1)
- & 0xc0) >> 6;
- data->temp[1] |=
- (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
- 0x30) >> 4;
- data->temp[2] |=
- (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
- 0xc0) >> 6;
-
- via686a_update_fan_div(data);
- data->alarms =
- via686a_read_value(data,
- VIA686A_REG_ALARM1) |
- (via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct platform_driver via686a_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = via686a_probe,
+ .remove = via686a_remove,
+};
static const struct pci_device_id via686a_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
@@ -819,7 +815,7 @@ static int via686a_device_add(unsigned short address)
struct resource res = {
.start = address,
.end = address + VIA686A_EXTENT - 1,
- .name = "via686a",
+ .name = DRIVER_NAME,
.flags = IORESOURCE_IO,
};
int err;
@@ -828,7 +824,7 @@ static int via686a_device_add(unsigned short address)
if (err)
goto exit;
- pdev = platform_device_alloc("via686a", address);
+ pdev = platform_device_alloc(DRIVER_NAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -918,7 +914,7 @@ exit:
}
static struct pci_driver via686a_pci_driver = {
- .name = "via686a",
+ .name = DRIVER_NAME,
.id_table = via686a_pci_ids,
.probe = via686a_pci_probe,
};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 03275ac8ba72..3b7f8922b0d5 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -38,6 +38,8 @@ static struct platform_device *pdev;
#define VT8231_BASE_REG 0x70
#define VT8231_ENABLE_REG 0x74
+#define DRIVER_NAME "vt8231"
+
/*
* The VT8231 registers
*
@@ -162,10 +164,6 @@ struct vt8231_data {
};
static struct pci_dev *s_bridge;
-static int vt8231_probe(struct platform_device *pdev);
-static int vt8231_remove(struct platform_device *pdev);
-static struct vt8231_data *vt8231_update_device(struct device *dev);
-static void vt8231_init_device(struct vt8231_data *data);
static inline int vt8231_read_value(struct vt8231_data *data, u8 reg)
{
@@ -178,6 +176,74 @@ static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
outb_p(value, data->addr + reg);
}
+static struct vt8231_data *vt8231_update_device(struct device *dev)
+{
+ struct vt8231_data *data = dev_get_drvdata(dev);
+ int i;
+ u16 low;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i < 6; i++) {
+ if (ISVOLT(i, data->uch_config)) {
+ data->in[i] = vt8231_read_value(data,
+ regvolt[i]);
+ data->in_min[i] = vt8231_read_value(data,
+ regvoltmin[i]);
+ data->in_max[i] = vt8231_read_value(data,
+ regvoltmax[i]);
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ data->fan[i] = vt8231_read_value(data,
+ VT8231_REG_FAN(i));
+ data->fan_min[i] = vt8231_read_value(data,
+ VT8231_REG_FAN_MIN(i));
+ }
+
+ low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01);
+ low = (low >> 6) | ((low & 0x30) >> 2)
+ | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4);
+ for (i = 0; i < 6; i++) {
+ if (ISTEMP(i, data->uch_config)) {
+ data->temp[i] = (vt8231_read_value(data,
+ regtemp[i]) << 2)
+ | ((low >> (2 * i)) & 0x03);
+ data->temp_max[i] = vt8231_read_value(data,
+ regtempmax[i]);
+ data->temp_min[i] = vt8231_read_value(data,
+ regtempmin[i]);
+ }
+ }
+
+ i = vt8231_read_value(data, VT8231_REG_FANDIV);
+ data->fan_div[0] = (i >> 4) & 0x03;
+ data->fan_div[1] = i >> 6;
+ data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) |
+ (vt8231_read_value(data, VT8231_REG_ALARM2) << 8);
+
+ /* Set alarm flags correctly */
+ if (!data->fan[0] && data->fan_min[0])
+ data->alarms |= 0x40;
+ else if (data->fan[0] && !data->fan_min[0])
+ data->alarms &= ~0x40;
+
+ if (!data->fan[1] && data->fan_min[1])
+ data->alarms |= 0x80;
+ else if (data->fan[1] && !data->fan_min[1])
+ data->alarms &= ~0x80;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
/* following are the sysfs callback functions */
static ssize_t in_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -751,29 +817,11 @@ static const struct attribute_group vt8231_group = {
.attrs = vt8231_attributes,
};
-static struct platform_driver vt8231_driver = {
- .driver = {
- .name = "vt8231",
- },
- .probe = vt8231_probe,
- .remove = vt8231_remove,
-};
-
-static const struct pci_device_id vt8231_pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
-
-static int vt8231_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
-
-static struct pci_driver vt8231_pci_driver = {
- .name = "vt8231",
- .id_table = vt8231_pci_ids,
- .probe = vt8231_pci_probe,
-};
+static void vt8231_init_device(struct vt8231_data *data)
+{
+ vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0);
+ vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0);
+}
static int vt8231_probe(struct platform_device *pdev)
{
@@ -784,7 +832,7 @@ static int vt8231_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, VT8231_EXTENT,
- vt8231_driver.driver.name)) {
+ DRIVER_NAME)) {
dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
@@ -796,7 +844,7 @@ static int vt8231_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->addr = res->start;
- data->name = "vt8231";
+ data->name = DRIVER_NAME;
mutex_init(&data->update_lock);
vt8231_init_device(data);
@@ -863,86 +911,28 @@ static int vt8231_remove(struct platform_device *pdev)
return 0;
}
-static void vt8231_init_device(struct vt8231_data *data)
-{
- vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0);
- vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0);
-}
-
-static struct vt8231_data *vt8231_update_device(struct device *dev)
-{
- struct vt8231_data *data = dev_get_drvdata(dev);
- int i;
- u16 low;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- for (i = 0; i < 6; i++) {
- if (ISVOLT(i, data->uch_config)) {
- data->in[i] = vt8231_read_value(data,
- regvolt[i]);
- data->in_min[i] = vt8231_read_value(data,
- regvoltmin[i]);
- data->in_max[i] = vt8231_read_value(data,
- regvoltmax[i]);
- }
- }
- for (i = 0; i < 2; i++) {
- data->fan[i] = vt8231_read_value(data,
- VT8231_REG_FAN(i));
- data->fan_min[i] = vt8231_read_value(data,
- VT8231_REG_FAN_MIN(i));
- }
-
- low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01);
- low = (low >> 6) | ((low & 0x30) >> 2)
- | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4);
- for (i = 0; i < 6; i++) {
- if (ISTEMP(i, data->uch_config)) {
- data->temp[i] = (vt8231_read_value(data,
- regtemp[i]) << 2)
- | ((low >> (2 * i)) & 0x03);
- data->temp_max[i] = vt8231_read_value(data,
- regtempmax[i]);
- data->temp_min[i] = vt8231_read_value(data,
- regtempmin[i]);
- }
- }
-
- i = vt8231_read_value(data, VT8231_REG_FANDIV);
- data->fan_div[0] = (i >> 4) & 0x03;
- data->fan_div[1] = i >> 6;
- data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) |
- (vt8231_read_value(data, VT8231_REG_ALARM2) << 8);
-
- /* Set alarm flags correctly */
- if (!data->fan[0] && data->fan_min[0])
- data->alarms |= 0x40;
- else if (data->fan[0] && !data->fan_min[0])
- data->alarms &= ~0x40;
-
- if (!data->fan[1] && data->fan_min[1])
- data->alarms |= 0x80;
- else if (data->fan[1] && !data->fan_min[1])
- data->alarms &= ~0x80;
- data->last_updated = jiffies;
- data->valid = true;
- }
+static struct platform_driver vt8231_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = vt8231_probe,
+ .remove = vt8231_remove,
+};
- mutex_unlock(&data->update_lock);
+static const struct pci_device_id vt8231_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
+ { 0, }
+};
- return data;
-}
+MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
static int vt8231_device_add(unsigned short address)
{
struct resource res = {
.start = address,
.end = address + VT8231_EXTENT - 1,
- .name = "vt8231",
+ .name = DRIVER_NAME,
.flags = IORESOURCE_IO,
};
int err;
@@ -951,7 +941,7 @@ static int vt8231_device_add(unsigned short address)
if (err)
goto exit;
- pdev = platform_device_alloc("vt8231", address);
+ pdev = platform_device_alloc(DRIVER_NAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -1040,6 +1030,12 @@ exit:
return -ENODEV;
}
+static struct pci_driver vt8231_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = vt8231_pci_ids,
+ .probe = vt8231_pci_probe,
+};
+
static int __init sm_vt8231_init(void)
{
return pci_register_driver(&vt8231_pci_driver);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index af89b32a93a5..939d4c35e713 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1944,7 +1944,7 @@ static int __init w83627ehf_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __maybe_unused w83627ehf_suspend(struct device *dev)
+static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
@@ -1955,7 +1955,7 @@ static int __maybe_unused w83627ehf_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused w83627ehf_resume(struct device *dev)
+static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
int i;
@@ -2010,12 +2010,12 @@ static int __maybe_unused w83627ehf_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(w83627ehf_dev_pm_ops, w83627ehf_suspend, w83627ehf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(w83627ehf_dev_pm_ops, w83627ehf_suspend, w83627ehf_resume);
static struct platform_driver w83627ehf_driver = {
.driver = {
.name = DRVNAME,
- .pm = &w83627ehf_dev_pm_ops,
+ .pm = pm_sleep_ptr(&w83627ehf_dev_pm_ops),
},
};
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 9be277156ed2..b638d672ac45 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -389,14 +389,184 @@ struct w83627hf_data {
#endif
};
-static int w83627hf_probe(struct platform_device *pdev);
-static int w83627hf_remove(struct platform_device *pdev);
+/* Registers 0x50-0x5f are banked */
+static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
+{
+ if ((reg & 0x00f0) == 0x50) {
+ outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
+ outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET);
+ }
+}
+
+/* Not strictly necessary, but play it safe for now */
+static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg)
+{
+ if (reg & 0xff00) {
+ outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
+ outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
+ }
+}
+
+static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
+{
+ int res, word_sized;
+
+ mutex_lock(&data->lock);
+ word_sized = (((reg & 0xff00) == 0x100)
+ || ((reg & 0xff00) == 0x200))
+ && (((reg & 0x00ff) == 0x50)
+ || ((reg & 0x00ff) == 0x53)
+ || ((reg & 0x00ff) == 0x55));
+ w83627hf_set_bank(data, reg);
+ outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
+ res = inb_p(data->addr + W83781D_DATA_REG_OFFSET);
+ if (word_sized) {
+ outb_p((reg & 0xff) + 1,
+ data->addr + W83781D_ADDR_REG_OFFSET);
+ res =
+ (res << 8) + inb_p(data->addr +
+ W83781D_DATA_REG_OFFSET);
+ }
+ w83627hf_reset_bank(data, reg);
+ mutex_unlock(&data->lock);
+ return res;
+}
+
+static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
+{
+ int word_sized;
+
+ mutex_lock(&data->lock);
+ word_sized = (((reg & 0xff00) == 0x100)
+ || ((reg & 0xff00) == 0x200))
+ && (((reg & 0x00ff) == 0x53)
+ || ((reg & 0x00ff) == 0x55));
+ w83627hf_set_bank(data, reg);
+ outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
+ if (word_sized) {
+ outb_p(value >> 8,
+ data->addr + W83781D_DATA_REG_OFFSET);
+ outb_p((reg & 0xff) + 1,
+ data->addr + W83781D_ADDR_REG_OFFSET);
+ }
+ outb_p(value & 0xff,
+ data->addr + W83781D_DATA_REG_OFFSET);
+ w83627hf_reset_bank(data, reg);
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+static void w83627hf_update_fan_div(struct w83627hf_data *data)
+{
+ int reg;
+
+ reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
+ data->fan_div[0] = (reg >> 4) & 0x03;
+ data->fan_div[1] = (reg >> 6) & 0x03;
+ if (data->type != w83697hf) {
+ data->fan_div[2] = (w83627hf_read_value(data,
+ W83781D_REG_PIN) >> 6) & 0x03;
+ }
+ reg = w83627hf_read_value(data, W83781D_REG_VBAT);
+ data->fan_div[0] |= (reg >> 3) & 0x04;
+ data->fan_div[1] |= (reg >> 4) & 0x04;
+ if (data->type != w83697hf)
+ data->fan_div[2] |= (reg >> 5) & 0x04;
+}
-static int w83627hf_read_value(struct w83627hf_data *data, u16 reg);
-static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value);
-static void w83627hf_update_fan_div(struct w83627hf_data *data);
-static struct w83627hf_data *w83627hf_update_device(struct device *dev);
-static void w83627hf_init_device(struct platform_device *pdev);
+static struct w83627hf_data *w83627hf_update_device(struct device *dev)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ int i, num_temps = (data->type == w83697hf) ? 2 : 3;
+ int num_pwms = (data->type == w83697hf) ? 2 : 3;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i <= 8; i++) {
+ /* skip missing sensors */
+ if (((data->type == w83697hf) && (i == 1)) ||
+ ((data->type != w83627hf && data->type != w83697hf)
+ && (i == 5 || i == 6)))
+ continue;
+ data->in[i] =
+ w83627hf_read_value(data, W83781D_REG_IN(i));
+ data->in_min[i] =
+ w83627hf_read_value(data,
+ W83781D_REG_IN_MIN(i));
+ data->in_max[i] =
+ w83627hf_read_value(data,
+ W83781D_REG_IN_MAX(i));
+ }
+ for (i = 0; i <= 2; i++) {
+ data->fan[i] =
+ w83627hf_read_value(data, W83627HF_REG_FAN(i));
+ data->fan_min[i] =
+ w83627hf_read_value(data,
+ W83627HF_REG_FAN_MIN(i));
+ }
+ for (i = 0; i <= 2; i++) {
+ u8 tmp = w83627hf_read_value(data,
+ W836X7HF_REG_PWM(data->type, i));
+ /* bits 0-3 are reserved in 627THF */
+ if (data->type == w83627thf)
+ tmp &= 0xf0;
+ data->pwm[i] = tmp;
+ if (i == 1 &&
+ (data->type == w83627hf || data->type == w83697hf))
+ break;
+ }
+ if (data->type == w83627hf) {
+ u8 tmp = w83627hf_read_value(data,
+ W83627HF_REG_PWM_FREQ);
+ data->pwm_freq[0] = tmp & 0x07;
+ data->pwm_freq[1] = (tmp >> 4) & 0x07;
+ } else if (data->type != w83627thf) {
+ for (i = 1; i <= 3; i++) {
+ data->pwm_freq[i - 1] =
+ w83627hf_read_value(data,
+ W83637HF_REG_PWM_FREQ[i - 1]);
+ if (i == 2 && (data->type == w83697hf))
+ break;
+ }
+ }
+ if (data->type != w83627hf) {
+ for (i = 0; i < num_pwms; i++) {
+ u8 tmp = w83627hf_read_value(data,
+ W83627THF_REG_PWM_ENABLE[i]);
+ data->pwm_enable[i] =
+ ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i])
+ & 0x03) + 1;
+ }
+ }
+ for (i = 0; i < num_temps; i++) {
+ data->temp[i] = w83627hf_read_value(
+ data, w83627hf_reg_temp[i]);
+ data->temp_max[i] = w83627hf_read_value(
+ data, w83627hf_reg_temp_over[i]);
+ data->temp_max_hyst[i] = w83627hf_read_value(
+ data, w83627hf_reg_temp_hyst[i]);
+ }
+
+ w83627hf_update_fan_div(data);
+
+ data->alarms =
+ w83627hf_read_value(data, W83781D_REG_ALARM1) |
+ (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) |
+ (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16);
+ i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2);
+ data->beep_mask = (i << 8) |
+ w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) |
+ w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16;
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
#ifdef CONFIG_PM
static int w83627hf_suspend(struct device *dev)
@@ -464,99 +634,171 @@ static const struct dev_pm_ops w83627hf_dev_pm_ops = {
#define W83627HF_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
-static struct platform_driver w83627hf_driver = {
- .driver = {
- .name = DRVNAME,
- .pm = W83627HF_DEV_PM_OPS,
- },
- .probe = w83627hf_probe,
- .remove = w83627hf_remove,
-};
-
-static ssize_t
-in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
-}
-static ssize_t
-in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
-}
-static ssize_t
-in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
+static int w83627thf_read_gpio5(struct platform_device *pdev)
{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ int res = 0xff, sel;
+
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
+ superio_select(sio_data, W83627HF_LD_GPIO5);
+
+ res = 0xff;
+
+ /* Make sure these GPIO pins are enabled */
+ if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
+ dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
+ goto exit;
+ }
+
+ /*
+ * Make sure the pins are configured for input
+ * There must be at least five (VRM 9), and possibly 6 (VRM 10)
+ */
+ sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f;
+ if ((sel & 0x1f) != 0x1f) {
+ dev_dbg(&pdev->dev, "GPIO5 not configured for VID "
+ "function\n");
+ goto exit;
+ }
+
+ dev_info(&pdev->dev, "Reading VID from GPIO5\n");
+ res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel;
+
+exit:
+ superio_exit(sio_data);
+ return res;
}
-static ssize_t
-in_min_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+
+static int w83687thf_read_vid(struct platform_device *pdev)
{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- long val;
- int err;
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ int res = 0xff;
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
- mutex_lock(&data->update_lock);
- data->in_min[nr] = IN_TO_REG(val);
- w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
+ superio_select(sio_data, W83627HF_LD_HWM);
+
+ /* Make sure these GPIO pins are enabled */
+ if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) {
+ dev_dbg(&pdev->dev, "VID disabled, no VID function\n");
+ goto exit;
+ }
+
+ /* Make sure the pins are configured for input */
+ if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) {
+ dev_dbg(&pdev->dev, "VID configured as output, "
+ "no VID function\n");
+ goto exit;
+ }
+
+ res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f;
+
+exit:
+ superio_exit(sio_data);
+ return res;
}
-static ssize_t
-in_max_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+
+static void w83627hf_init_device(struct platform_device *pdev)
{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- long val;
- int err;
+ struct w83627hf_data *data = platform_get_drvdata(pdev);
+ int i;
+ enum chips type = data->type;
+ u8 tmp;
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
+ /* Minimize conflicts with other winbond i2c-only clients... */
+ /* disable i2c subclients... how to disable main i2c client?? */
+ /* force i2c address to relatively uncommon address */
+ if (type == w83627hf) {
+ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
+ w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ }
- mutex_lock(&data->update_lock);
- data->in_max[nr] = IN_TO_REG(val);
- w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
+ /* Read VID only once */
+ if (type == w83627hf || type == w83637hf) {
+ int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
+ int hi = w83627hf_read_value(data, W83781D_REG_CHIPID);
+ data->vid = (lo & 0x0f) | ((hi & 0x01) << 4);
+ } else if (type == w83627thf) {
+ data->vid = w83627thf_read_gpio5(pdev);
+ } else if (type == w83687thf) {
+ data->vid = w83687thf_read_vid(pdev);
+ }
-static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
-static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
-static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
-static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
-static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
-static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
-static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
-static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
-static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
-static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
-static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
-static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
-static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
-static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
-static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
-static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
-static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
-static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
-static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
-static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
-static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
-static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
-static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
-static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+ /* Read VRM & OVT Config only once */
+ if (type == w83627thf || type == w83637hf || type == w83687thf) {
+ data->vrm_ovt =
+ w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG);
+ }
+
+ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1);
+ for (i = 1; i <= 3; i++) {
+ if (!(tmp & BIT_SCFG1[i - 1])) {
+ data->sens[i - 1] = 4;
+ } else {
+ if (w83627hf_read_value
+ (data,
+ W83781D_REG_SCFG2) & BIT_SCFG2[i - 1])
+ data->sens[i - 1] = 1;
+ else
+ data->sens[i - 1] = 2;
+ }
+ if ((type == w83697hf) && (i == 2))
+ break;
+ }
+
+ if(init) {
+ /* Enable temp2 */
+ tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG);
+ if (tmp & 0x01) {
+ dev_warn(&pdev->dev, "Enabling temp2, readings "
+ "might not make sense\n");
+ w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG,
+ tmp & 0xfe);
+ }
+
+ /* Enable temp3 */
+ if (type != w83697hf) {
+ tmp = w83627hf_read_value(data,
+ W83627HF_REG_TEMP3_CONFIG);
+ if (tmp & 0x01) {
+ dev_warn(&pdev->dev, "Enabling temp3, "
+ "readings might not make sense\n");
+ w83627hf_write_value(data,
+ W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe);
+ }
+ }
+ }
+
+ /* Start monitoring */
+ w83627hf_write_value(data, W83781D_REG_CONFIG,
+ (w83627hf_read_value(data,
+ W83781D_REG_CONFIG) & 0xf7)
+ | 0x01);
+
+ /* Enable VBAT monitoring if needed */
+ tmp = w83627hf_read_value(data, W83781D_REG_VBAT);
+ if (!(tmp & 0x01))
+ w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01);
+}
/* use a different set of functions for in0 */
static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
@@ -582,6 +824,7 @@ static ssize_t in0_input_show(struct device *dev,
struct w83627hf_data *data = w83627hf_update_device(dev);
return show_in_0(data, buf, data->in[0]);
}
+static DEVICE_ATTR_RO(in0_input);
static ssize_t in0_min_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -590,13 +833,6 @@ static ssize_t in0_min_show(struct device *dev, struct device_attribute *attr,
return show_in_0(data, buf, data->in_min[0]);
}
-static ssize_t in0_max_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return show_in_0(data, buf, data->in_max[0]);
-}
-
static ssize_t in0_min_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -627,6 +863,15 @@ static ssize_t in0_min_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(in0_min);
+
+static ssize_t in0_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return show_in_0(data, buf, data->in_max[0]);
+}
+
static ssize_t in0_max_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -657,193 +902,16 @@ static ssize_t in0_max_store(struct device *dev,
return count;
}
-static DEVICE_ATTR_RO(in0_input);
-static DEVICE_ATTR_RW(in0_min);
static DEVICE_ATTR_RW(in0_max);
static ssize_t
-fan_input_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr],
- (long)DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t
-fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr],
- (long)DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t
-fan_min_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr),
- data->fan_min[nr]);
-
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
-static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
-static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
-static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
-static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
-static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
-
-static ssize_t
-temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
-
- u16 tmp = data->temp[nr];
- return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
- : (long) TEMP_FROM_REG(tmp));
-}
-
-static ssize_t
-temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
-
- u16 tmp = data->temp_max[nr];
- return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
- : (long) TEMP_FROM_REG(tmp));
-}
-
-static ssize_t
-temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
-
- u16 tmp = data->temp_max_hyst[nr];
- return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
- : (long) TEMP_FROM_REG(tmp));
-}
-
-static ssize_t
-temp_max_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- u16 tmp;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
- mutex_lock(&data->update_lock);
- data->temp_max[nr] = tmp;
- w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- u16 tmp;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
- mutex_lock(&data->update_lock);
- data->temp_max_hyst[nr] = tmp;
- w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
-
-static ssize_t
-cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
-}
-static DEVICE_ATTR_RO(cpu0_vid);
-
-static ssize_t
-vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%ld\n", (long) data->vrm);
-}
-static ssize_t
-vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- if (val > 255)
- return -EINVAL;
- data->vrm = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(vrm);
-
-static ssize_t
-alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long) data->alarms);
-}
-static DEVICE_ATTR_RO(alarms);
-
-static ssize_t
alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
+
static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
@@ -861,44 +929,6 @@ static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 13);
static ssize_t
-beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n",
- (long)BEEP_MASK_FROM_REG(data->beep_mask));
-}
-
-static ssize_t
-beep_mask_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
-
- /* preserve beep enable */
- data->beep_mask = (data->beep_mask & 0x8000)
- | BEEP_MASK_TO_REG(val);
- w83627hf_write_value(data, W83781D_REG_BEEP_INTS1,
- data->beep_mask & 0xff);
- w83627hf_write_value(data, W83781D_REG_BEEP_INTS3,
- ((data->beep_mask) >> 16) & 0xff);
- w83627hf_write_value(data, W83781D_REG_BEEP_INTS2,
- (data->beep_mask >> 8) & 0xff);
-
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static DEVICE_ATTR_RW(beep_mask);
-
-static ssize_t
beep_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -974,6 +1004,143 @@ static SENSOR_DEVICE_ATTR_RW(temp3_beep, beep, 13);
static SENSOR_DEVICE_ATTR_RW(beep_enable, beep, 15);
static ssize_t
+in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
+}
+
+static ssize_t
+in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
+}
+
+static ssize_t
+in_min_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->in_min[nr] = IN_TO_REG(val);
+ w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
+}
+
+static ssize_t
+in_max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->in_max[nr] = IN_TO_REG(val);
+ w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+
+static ssize_t
+fan_input_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr],
+ (long)DIV_FROM_REG(data->fan_div[nr])));
+}
+
+static ssize_t
+fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr],
+ (long)DIV_FROM_REG(data->fan_div[nr])));
+}
+
+static ssize_t
+fan_min_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
+ w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr),
+ data->fan_min[nr]);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+
+static ssize_t
fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -981,6 +1148,7 @@ fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%ld\n",
(long) DIV_FROM_REG(data->fan_div[nr]));
}
+
/*
* Note: we save and restore the fan minimum here, because its value is
* determined in part by the fan divisor. This follows the principle of
@@ -1033,138 +1201,92 @@ static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
static ssize_t
-pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long) data->pwm[nr]);
-}
-static ssize_t
-pwm_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
-
- if (data->type == w83627thf) {
- /* bits 0-3 are reserved in 627THF */
- data->pwm[nr] = PWM_TO_REG(val) & 0xf0;
- w83627hf_write_value(data,
- W836X7HF_REG_PWM(data->type, nr),
- data->pwm[nr] |
- (w83627hf_read_value(data,
- W836X7HF_REG_PWM(data->type, nr)) & 0x0f));
- } else {
- data->pwm[nr] = PWM_TO_REG(val);
- w83627hf_write_value(data,
- W836X7HF_REG_PWM(data->type, nr),
- data->pwm[nr]);
- }
-
- mutex_unlock(&data->update_lock);
- return count;
+ u16 tmp = data->temp[nr];
+ return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
+ : (long) TEMP_FROM_REG(tmp));
}
-static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
-
static ssize_t
-pwm_enable_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm_enable[nr]);
+
+ u16 tmp = data->temp_max[nr];
+ return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
+ : (long) TEMP_FROM_REG(tmp));
}
static ssize_t
-pwm_enable_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+temp_max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
- u8 reg;
- unsigned long val;
+ u16 tmp;
+ long val;
int err;
- err = kstrtoul(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
- if (!val || val > 3) /* modes 1, 2 and 3 are supported */
- return -EINVAL;
+ tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
mutex_lock(&data->update_lock);
- data->pwm_enable[nr] = val;
- reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]);
- reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr];
- w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg);
+ data->temp_max[nr] = tmp;
+ w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp);
mutex_unlock(&data->update_lock);
return count;
}
-static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
-
static ssize_t
-pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
- if (data->type == w83627hf)
- return sprintf(buf, "%ld\n",
- pwm_freq_from_reg_627hf(data->pwm_freq[nr]));
- else
- return sprintf(buf, "%ld\n",
- pwm_freq_from_reg(data->pwm_freq[nr]));
+
+ u16 tmp = data->temp_max_hyst[nr];
+ return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
+ : (long) TEMP_FROM_REG(tmp));
}
static ssize_t
-pwm_freq_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
- static const u8 mask[]={0xF8, 0x8F};
- unsigned long val;
+ u16 tmp;
+ long val;
int err;
- err = kstrtoul(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
+ tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
mutex_lock(&data->update_lock);
-
- if (data->type == w83627hf) {
- data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val);
- w83627hf_write_value(data, W83627HF_REG_PWM_FREQ,
- (data->pwm_freq[nr] << (nr*4)) |
- (w83627hf_read_value(data,
- W83627HF_REG_PWM_FREQ) & mask[nr]));
- } else {
- data->pwm_freq[nr] = pwm_freq_to_reg(val);
- w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr],
- data->pwm_freq[nr]);
- }
-
+ data->temp_max_hyst[nr] = tmp;
+ w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp);
mutex_unlock(&data->update_lock);
return count;
}
-static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
static ssize_t
temp_type_show(struct device *dev, struct device_attribute *devattr,
@@ -1236,81 +1358,12 @@ static SENSOR_DEVICE_ATTR_RW(temp2_type, temp_type, 1);
static SENSOR_DEVICE_ATTR_RW(temp3_type, temp_type, 2);
static ssize_t
-name_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static int __init w83627hf_find(int sioaddr, unsigned short *addr,
- struct w83627hf_sio_data *sio_data)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int err;
- u16 val;
-
- static __initconst char *const names[] = {
- "W83627HF",
- "W83627THF",
- "W83697HF",
- "W83637HF",
- "W83687THF",
- };
-
- sio_data->sioaddr = sioaddr;
- err = superio_enter(sio_data);
- if (err)
- return err;
-
- err = -ENODEV;
- val = force_id ? force_id : superio_inb(sio_data, DEVID);
- switch (val) {
- case W627_DEVID:
- sio_data->type = w83627hf;
- break;
- case W627THF_DEVID:
- sio_data->type = w83627thf;
- break;
- case W697_DEVID:
- sio_data->type = w83697hf;
- break;
- case W637_DEVID:
- sio_data->type = w83637hf;
- break;
- case W687THF_DEVID:
- sio_data->type = w83687thf;
- break;
- case 0xff: /* No device at all */
- goto exit;
- default:
- pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val);
- goto exit;
- }
-
- superio_select(sio_data, W83627HF_LD_HWM);
- val = (superio_inb(sio_data, WINB_BASE_REG) << 8) |
- superio_inb(sio_data, WINB_BASE_REG + 1);
- *addr = val & WINB_ALIGNMENT;
- if (*addr == 0) {
- pr_warn("Base address not set, skipping\n");
- goto exit;
- }
-
- val = superio_inb(sio_data, WINB_ACT_REG);
- if (!(val & 0x01)) {
- pr_warn("Enabling HWM logical device\n");
- superio_outb(sio_data, WINB_ACT_REG, val | 0x01);
- }
-
- err = 0;
- pr_info(DRVNAME ": Found %s chip at %#x\n",
- names[sio_data->type], *addr);
-
- exit:
- superio_exit(sio_data);
- return err;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long) data->alarms);
}
+static DEVICE_ATTR_RO(alarms);
#define VIN_UNIT_ATTRS(_X_) \
&sensor_dev_attr_in##_X_##_input.dev_attr.attr, \
@@ -1334,6 +1387,100 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
&sensor_dev_attr_temp##_X_##_alarm.dev_attr.attr, \
&sensor_dev_attr_temp##_X_##_beep.dev_attr.attr
+static ssize_t
+beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n",
+ (long)BEEP_MASK_FROM_REG(data->beep_mask));
+}
+
+static ssize_t
+beep_mask_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ /* preserve beep enable */
+ data->beep_mask = (data->beep_mask & 0x8000)
+ | BEEP_MASK_TO_REG(val);
+ w83627hf_write_value(data, W83781D_REG_BEEP_INTS1,
+ data->beep_mask & 0xff);
+ w83627hf_write_value(data, W83781D_REG_BEEP_INTS3,
+ ((data->beep_mask) >> 16) & 0xff);
+ w83627hf_write_value(data, W83781D_REG_BEEP_INTS2,
+ (data->beep_mask >> 8) & 0xff);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static DEVICE_ATTR_RW(beep_mask);
+
+static ssize_t
+pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long) data->pwm[nr]);
+}
+
+static ssize_t
+pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->type == w83627thf) {
+ /* bits 0-3 are reserved in 627THF */
+ data->pwm[nr] = PWM_TO_REG(val) & 0xf0;
+ w83627hf_write_value(data,
+ W836X7HF_REG_PWM(data->type, nr),
+ data->pwm[nr] |
+ (w83627hf_read_value(data,
+ W836X7HF_REG_PWM(data->type, nr)) & 0x0f));
+ } else {
+ data->pwm[nr] = PWM_TO_REG(val);
+ w83627hf_write_value(data,
+ W836X7HF_REG_PWM(data->type, nr),
+ data->pwm[nr]);
+ }
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+
+static ssize_t
+name_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
static struct attribute *w83627hf_attributes[] = {
&dev_attr_in0_input.attr,
&dev_attr_in0_min.attr,
@@ -1366,6 +1513,131 @@ static const struct attribute_group w83627hf_group = {
.attrs = w83627hf_attributes,
};
+static ssize_t
+pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ if (data->type == w83627hf)
+ return sprintf(buf, "%ld\n",
+ pwm_freq_from_reg_627hf(data->pwm_freq[nr]));
+ else
+ return sprintf(buf, "%ld\n",
+ pwm_freq_from_reg(data->pwm_freq[nr]));
+}
+
+static ssize_t
+pwm_freq_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ static const u8 mask[]={0xF8, 0x8F};
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->type == w83627hf) {
+ data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val);
+ w83627hf_write_value(data, W83627HF_REG_PWM_FREQ,
+ (data->pwm_freq[nr] << (nr*4)) |
+ (w83627hf_read_value(data,
+ W83627HF_REG_PWM_FREQ) & mask[nr]));
+ } else {
+ data->pwm_freq[nr] = pwm_freq_to_reg(val);
+ w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr],
+ data->pwm_freq[nr]);
+ }
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
+
+static ssize_t
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
+}
+
+static DEVICE_ATTR_RO(cpu0_vid);
+
+static ssize_t
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%ld\n", (long) data->vrm);
+}
+
+static ssize_t
+vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val > 255)
+ return -EINVAL;
+ data->vrm = val;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(vrm);
+
+static ssize_t
+pwm_enable_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm_enable[nr]);
+}
+
+static ssize_t
+pwm_enable_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ u8 reg;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (!val || val > 3) /* modes 1, 2 and 3 are supported */
+ return -EINVAL;
+ mutex_lock(&data->update_lock);
+ data->pwm_enable[nr] = val;
+ reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]);
+ reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr];
+ w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
+
static struct attribute *w83627hf_attributes_opt[] = {
VIN_UNIT_ATTRS(1),
VIN_UNIT_ATTRS(5),
@@ -1568,349 +1840,81 @@ static int w83627hf_remove(struct platform_device *pdev)
return 0;
}
-/* Registers 0x50-0x5f are banked */
-static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
-{
- if ((reg & 0x00f0) == 0x50) {
- outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
- outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET);
- }
-}
-
-/* Not strictly necessary, but play it safe for now */
-static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg)
-{
- if (reg & 0xff00) {
- outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
- outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
- }
-}
-
-static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
-{
- int res, word_sized;
-
- mutex_lock(&data->lock);
- word_sized = (((reg & 0xff00) == 0x100)
- || ((reg & 0xff00) == 0x200))
- && (((reg & 0x00ff) == 0x50)
- || ((reg & 0x00ff) == 0x53)
- || ((reg & 0x00ff) == 0x55));
- w83627hf_set_bank(data, reg);
- outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
- res = inb_p(data->addr + W83781D_DATA_REG_OFFSET);
- if (word_sized) {
- outb_p((reg & 0xff) + 1,
- data->addr + W83781D_ADDR_REG_OFFSET);
- res =
- (res << 8) + inb_p(data->addr +
- W83781D_DATA_REG_OFFSET);
- }
- w83627hf_reset_bank(data, reg);
- mutex_unlock(&data->lock);
- return res;
-}
+static struct platform_driver w83627hf_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .pm = W83627HF_DEV_PM_OPS,
+ },
+ .probe = w83627hf_probe,
+ .remove = w83627hf_remove,
+};
-static int w83627thf_read_gpio5(struct platform_device *pdev)
+static int __init w83627hf_find(int sioaddr, unsigned short *addr,
+ struct w83627hf_sio_data *sio_data)
{
- struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
- int res = 0xff, sel;
-
- if (superio_enter(sio_data)) {
- /*
- * Some other driver reserved the address space for itself.
- * We don't want to fail driver instantiation because of that,
- * so display a warning and keep going.
- */
- dev_warn(&pdev->dev,
- "Can not read VID data: Failed to enable SuperIO access\n");
- return res;
- }
+ int err;
+ u16 val;
- superio_select(sio_data, W83627HF_LD_GPIO5);
+ static __initconst char *const names[] = {
+ "W83627HF",
+ "W83627THF",
+ "W83697HF",
+ "W83637HF",
+ "W83687THF",
+ };
- res = 0xff;
+ sio_data->sioaddr = sioaddr;
+ err = superio_enter(sio_data);
+ if (err)
+ return err;
- /* Make sure these GPIO pins are enabled */
- if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
- dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
+ err = -ENODEV;
+ val = force_id ? force_id : superio_inb(sio_data, DEVID);
+ switch (val) {
+ case W627_DEVID:
+ sio_data->type = w83627hf;
+ break;
+ case W627THF_DEVID:
+ sio_data->type = w83627thf;
+ break;
+ case W697_DEVID:
+ sio_data->type = w83697hf;
+ break;
+ case W637_DEVID:
+ sio_data->type = w83637hf;
+ break;
+ case W687THF_DEVID:
+ sio_data->type = w83687thf;
+ break;
+ case 0xff: /* No device at all */
goto exit;
- }
-
- /*
- * Make sure the pins are configured for input
- * There must be at least five (VRM 9), and possibly 6 (VRM 10)
- */
- sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f;
- if ((sel & 0x1f) != 0x1f) {
- dev_dbg(&pdev->dev, "GPIO5 not configured for VID "
- "function\n");
+ default:
+ pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val);
goto exit;
}
- dev_info(&pdev->dev, "Reading VID from GPIO5\n");
- res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel;
-
-exit:
- superio_exit(sio_data);
- return res;
-}
-
-static int w83687thf_read_vid(struct platform_device *pdev)
-{
- struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
- int res = 0xff;
-
- if (superio_enter(sio_data)) {
- /*
- * Some other driver reserved the address space for itself.
- * We don't want to fail driver instantiation because of that,
- * so display a warning and keep going.
- */
- dev_warn(&pdev->dev,
- "Can not read VID data: Failed to enable SuperIO access\n");
- return res;
- }
-
superio_select(sio_data, W83627HF_LD_HWM);
-
- /* Make sure these GPIO pins are enabled */
- if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) {
- dev_dbg(&pdev->dev, "VID disabled, no VID function\n");
+ val = (superio_inb(sio_data, WINB_BASE_REG) << 8) |
+ superio_inb(sio_data, WINB_BASE_REG + 1);
+ *addr = val & WINB_ALIGNMENT;
+ if (*addr == 0) {
+ pr_warn("Base address not set, skipping\n");
goto exit;
}
- /* Make sure the pins are configured for input */
- if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) {
- dev_dbg(&pdev->dev, "VID configured as output, "
- "no VID function\n");
- goto exit;
+ val = superio_inb(sio_data, WINB_ACT_REG);
+ if (!(val & 0x01)) {
+ pr_warn("Enabling HWM logical device\n");
+ superio_outb(sio_data, WINB_ACT_REG, val | 0x01);
}
- res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f;
+ err = 0;
+ pr_info(DRVNAME ": Found %s chip at %#x\n",
+ names[sio_data->type], *addr);
-exit:
+ exit:
superio_exit(sio_data);
- return res;
-}
-
-static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
-{
- int word_sized;
-
- mutex_lock(&data->lock);
- word_sized = (((reg & 0xff00) == 0x100)
- || ((reg & 0xff00) == 0x200))
- && (((reg & 0x00ff) == 0x53)
- || ((reg & 0x00ff) == 0x55));
- w83627hf_set_bank(data, reg);
- outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
- if (word_sized) {
- outb_p(value >> 8,
- data->addr + W83781D_DATA_REG_OFFSET);
- outb_p((reg & 0xff) + 1,
- data->addr + W83781D_ADDR_REG_OFFSET);
- }
- outb_p(value & 0xff,
- data->addr + W83781D_DATA_REG_OFFSET);
- w83627hf_reset_bank(data, reg);
- mutex_unlock(&data->lock);
- return 0;
-}
-
-static void w83627hf_init_device(struct platform_device *pdev)
-{
- struct w83627hf_data *data = platform_get_drvdata(pdev);
- int i;
- enum chips type = data->type;
- u8 tmp;
-
- /* Minimize conflicts with other winbond i2c-only clients... */
- /* disable i2c subclients... how to disable main i2c client?? */
- /* force i2c address to relatively uncommon address */
- if (type == w83627hf) {
- w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
- w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
- }
-
- /* Read VID only once */
- if (type == w83627hf || type == w83637hf) {
- int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
- int hi = w83627hf_read_value(data, W83781D_REG_CHIPID);
- data->vid = (lo & 0x0f) | ((hi & 0x01) << 4);
- } else if (type == w83627thf) {
- data->vid = w83627thf_read_gpio5(pdev);
- } else if (type == w83687thf) {
- data->vid = w83687thf_read_vid(pdev);
- }
-
- /* Read VRM & OVT Config only once */
- if (type == w83627thf || type == w83637hf || type == w83687thf) {
- data->vrm_ovt =
- w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG);
- }
-
- tmp = w83627hf_read_value(data, W83781D_REG_SCFG1);
- for (i = 1; i <= 3; i++) {
- if (!(tmp & BIT_SCFG1[i - 1])) {
- data->sens[i - 1] = 4;
- } else {
- if (w83627hf_read_value
- (data,
- W83781D_REG_SCFG2) & BIT_SCFG2[i - 1])
- data->sens[i - 1] = 1;
- else
- data->sens[i - 1] = 2;
- }
- if ((type == w83697hf) && (i == 2))
- break;
- }
-
- if(init) {
- /* Enable temp2 */
- tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG);
- if (tmp & 0x01) {
- dev_warn(&pdev->dev, "Enabling temp2, readings "
- "might not make sense\n");
- w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG,
- tmp & 0xfe);
- }
-
- /* Enable temp3 */
- if (type != w83697hf) {
- tmp = w83627hf_read_value(data,
- W83627HF_REG_TEMP3_CONFIG);
- if (tmp & 0x01) {
- dev_warn(&pdev->dev, "Enabling temp3, "
- "readings might not make sense\n");
- w83627hf_write_value(data,
- W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe);
- }
- }
- }
-
- /* Start monitoring */
- w83627hf_write_value(data, W83781D_REG_CONFIG,
- (w83627hf_read_value(data,
- W83781D_REG_CONFIG) & 0xf7)
- | 0x01);
-
- /* Enable VBAT monitoring if needed */
- tmp = w83627hf_read_value(data, W83781D_REG_VBAT);
- if (!(tmp & 0x01))
- w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01);
-}
-
-static void w83627hf_update_fan_div(struct w83627hf_data *data)
-{
- int reg;
-
- reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
- data->fan_div[0] = (reg >> 4) & 0x03;
- data->fan_div[1] = (reg >> 6) & 0x03;
- if (data->type != w83697hf) {
- data->fan_div[2] = (w83627hf_read_value(data,
- W83781D_REG_PIN) >> 6) & 0x03;
- }
- reg = w83627hf_read_value(data, W83781D_REG_VBAT);
- data->fan_div[0] |= (reg >> 3) & 0x04;
- data->fan_div[1] |= (reg >> 4) & 0x04;
- if (data->type != w83697hf)
- data->fan_div[2] |= (reg >> 5) & 0x04;
-}
-
-static struct w83627hf_data *w83627hf_update_device(struct device *dev)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- int i, num_temps = (data->type == w83697hf) ? 2 : 3;
- int num_pwms = (data->type == w83697hf) ? 2 : 3;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- for (i = 0; i <= 8; i++) {
- /* skip missing sensors */
- if (((data->type == w83697hf) && (i == 1)) ||
- ((data->type != w83627hf && data->type != w83697hf)
- && (i == 5 || i == 6)))
- continue;
- data->in[i] =
- w83627hf_read_value(data, W83781D_REG_IN(i));
- data->in_min[i] =
- w83627hf_read_value(data,
- W83781D_REG_IN_MIN(i));
- data->in_max[i] =
- w83627hf_read_value(data,
- W83781D_REG_IN_MAX(i));
- }
- for (i = 0; i <= 2; i++) {
- data->fan[i] =
- w83627hf_read_value(data, W83627HF_REG_FAN(i));
- data->fan_min[i] =
- w83627hf_read_value(data,
- W83627HF_REG_FAN_MIN(i));
- }
- for (i = 0; i <= 2; i++) {
- u8 tmp = w83627hf_read_value(data,
- W836X7HF_REG_PWM(data->type, i));
- /* bits 0-3 are reserved in 627THF */
- if (data->type == w83627thf)
- tmp &= 0xf0;
- data->pwm[i] = tmp;
- if (i == 1 &&
- (data->type == w83627hf || data->type == w83697hf))
- break;
- }
- if (data->type == w83627hf) {
- u8 tmp = w83627hf_read_value(data,
- W83627HF_REG_PWM_FREQ);
- data->pwm_freq[0] = tmp & 0x07;
- data->pwm_freq[1] = (tmp >> 4) & 0x07;
- } else if (data->type != w83627thf) {
- for (i = 1; i <= 3; i++) {
- data->pwm_freq[i - 1] =
- w83627hf_read_value(data,
- W83637HF_REG_PWM_FREQ[i - 1]);
- if (i == 2 && (data->type == w83697hf))
- break;
- }
- }
- if (data->type != w83627hf) {
- for (i = 0; i < num_pwms; i++) {
- u8 tmp = w83627hf_read_value(data,
- W83627THF_REG_PWM_ENABLE[i]);
- data->pwm_enable[i] =
- ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i])
- & 0x03) + 1;
- }
- }
- for (i = 0; i < num_temps; i++) {
- data->temp[i] = w83627hf_read_value(
- data, w83627hf_reg_temp[i]);
- data->temp_max[i] = w83627hf_read_value(
- data, w83627hf_reg_temp_over[i]);
- data->temp_max_hyst[i] = w83627hf_read_value(
- data, w83627hf_reg_temp_hyst[i]);
- }
-
- w83627hf_update_fan_div(data);
-
- data->alarms =
- w83627hf_read_value(data, W83781D_REG_ALARM1) |
- (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) |
- (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16);
- i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2);
- data->beep_mask = (i << 8) |
- w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) |
- w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16;
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
+ return err;
}
static int __init w83627hf_device_add(unsigned short address,
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index b3579721265f..dacabf25e83f 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1171,7 +1171,7 @@ w83781d_detect(struct i2c_client *client, struct i2c_board_info *info)
if (isa)
mutex_unlock(&isa->update_lock);
- strlcpy(info->type, client_name, I2C_NAME_SIZE);
+ strscpy(info->type, client_name, I2C_NAME_SIZE);
return 0;
@@ -1239,7 +1239,7 @@ static int w83781d_probe(struct i2c_client *client)
return err;
}
-static int
+static void
w83781d_remove(struct i2c_client *client)
{
struct w83781d_data *data = i2c_get_clientdata(client);
@@ -1250,8 +1250,6 @@ w83781d_remove(struct i2c_client *client)
i2c_unregister_device(data->lm75[0]);
i2c_unregister_device(data->lm75[1]);
-
- return 0;
}
static int
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 80a9a78d7ce9..eaf691365023 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -315,7 +315,7 @@ struct w83791d_data {
static int w83791d_probe(struct i2c_client *client);
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83791d_remove(struct i2c_client *client);
+static void w83791d_remove(struct i2c_client *client);
static int w83791d_read(struct i2c_client *client, u8 reg);
static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
@@ -1333,7 +1333,7 @@ static int w83791d_detect(struct i2c_client *client,
if (val1 != 0x71 || val2 != 0x5c)
return -ENODEV;
- strlcpy(info->type, "w83791d", I2C_NAME_SIZE);
+ strscpy(info->type, "w83791d", I2C_NAME_SIZE);
return 0;
}
@@ -1405,14 +1405,12 @@ error4:
return err;
}
-static int w83791d_remove(struct i2c_client *client)
+static void w83791d_remove(struct i2c_client *client)
{
struct w83791d_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
-
- return 0;
}
static void w83791d_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 31a1cdc30877..6d160eee1446 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -286,7 +286,7 @@ struct w83792d_data {
static int w83792d_probe(struct i2c_client *client);
static int w83792d_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83792d_remove(struct i2c_client *client);
+static void w83792d_remove(struct i2c_client *client);
static struct w83792d_data *w83792d_update_device(struct device *dev);
#ifdef DEBUG
@@ -1346,7 +1346,7 @@ w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
if (val1 != 0x7a || val2 != 0x5c)
return -ENODEV;
- strlcpy(info->type, "w83792d", I2C_NAME_SIZE);
+ strscpy(info->type, "w83792d", I2C_NAME_SIZE);
return 0;
}
@@ -1429,7 +1429,7 @@ exit_remove_files:
return err;
}
-static int
+static void
w83792d_remove(struct i2c_client *client)
{
struct w83792d_data *data = i2c_get_clientdata(client);
@@ -1440,8 +1440,6 @@ w83792d_remove(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
-
- return 0;
}
static void
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0a65d164c8f0..a4926d907198 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -285,7 +285,7 @@ static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client);
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83793_remove(struct i2c_client *client);
+static void w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
@@ -1495,7 +1495,7 @@ static struct notifier_block watchdog_notifier = {
* Init / remove routines
*/
-static int w83793_remove(struct i2c_client *client)
+static void w83793_remove(struct i2c_client *client)
{
struct w83793_data *data = i2c_get_clientdata(client);
struct device *dev = &client->dev;
@@ -1554,8 +1554,6 @@ static int w83793_remove(struct i2c_client *client)
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
-
- return 0;
}
static int
@@ -1636,7 +1634,7 @@ static int w83793_detect(struct i2c_client *client,
if (chip_id != 0x7b)
return -ENODEV;
- strlcpy(info->type, "w83793", I2C_NAME_SIZE);
+ strscpy(info->type, "w83793", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 45b12c4287df..84ff5c57e98c 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -1967,7 +1967,7 @@ static int w83795_detect(struct i2c_client *client,
else
chip_name = "w83795g";
- strlcpy(info->type, chip_name, I2C_NAME_SIZE);
+ strscpy(info->type, chip_name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Found %s rev. %c at 0x%02hx\n", chip_name,
'A' + (device_id & 0xf), address);
@@ -2235,14 +2235,12 @@ exit_remove:
return err;
}
-static int w83795_remove(struct i2c_client *client)
+static void w83795_remove(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
w83795_handle_files(&client->dev, device_remove_file_wrapper);
-
- return 0;
}
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index a41f989d66e2..f3622de0d96f 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -65,7 +65,7 @@ static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
static int w83l785ts_probe(struct i2c_client *client);
static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83l785ts_remove(struct i2c_client *client);
+static void w83l785ts_remove(struct i2c_client *client);
static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval);
static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
@@ -157,7 +157,7 @@ static int w83l785ts_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "w83l785ts", I2C_NAME_SIZE);
+ strscpy(info->type, "w83l785ts", I2C_NAME_SIZE);
return 0;
}
@@ -203,7 +203,7 @@ exit_remove:
return err;
}
-static int w83l785ts_remove(struct i2c_client *client)
+static void w83l785ts_remove(struct i2c_client *client)
{
struct w83l785ts_data *data = i2c_get_clientdata(client);
@@ -212,8 +212,6 @@ static int w83l785ts_remove(struct i2c_client *client)
&sensor_dev_attr_temp1_input.dev_attr);
device_remove_file(&client->dev,
&sensor_dev_attr_temp1_max.dev_attr);
-
- return 0;
}
static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval)
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 11ba23c1af85..2c4646fa8426 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -687,7 +687,7 @@ w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, "w83l786ng", I2C_NAME_SIZE);
+ strscpy(info->type, "w83l786ng", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index d39660a3e50c..80fefaba58ee 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -966,7 +966,7 @@ static inline bool cpu_supports_sysreg_trace(void)
{
u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
- return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
+ return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0;
}
static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
@@ -1054,7 +1054,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
u64 trfcr;
drvdata->trfcr = 0;
- if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
+ if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT))
return;
/*
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
index 30e4d7db4f8e..98ff1b17ad07 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.h
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -20,7 +20,8 @@
static inline bool is_trbe_available(void)
{
u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
- unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
+ unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0,
+ ID_AA64DFR0_EL1_TraceBuffer_SHIFT);
return trbe >= 0b0001;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 7284206b278b..264e780ae32e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -488,8 +488,8 @@ config I2C_BCM_KONA
config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
- depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCMBCA || \
- ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || \
+ BMIPS_GENERIC || COMPILE_TEST
default y
help
If you say yes to this option, support will be included for the
@@ -1267,6 +1267,16 @@ config I2C_PARPORT
This support is also available as a module. If so, the module
will be called i2c-parport.
+config I2C_PCI1XXXX
+ tristate "PCI1XXXX I2C Host Adapter"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for
+ Microchip PCI1XXXX's I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-mchp-pci1xxxx.
+
config I2C_ROBOTFUZZ_OSIF
tristate "RobotFuzz Open Source InterFace USB adapter"
depends on USB
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c5cac15f075c..e73cdb1d2b5a 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -133,6 +133,7 @@ obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
+obj-$(CONFIG_I2C_PCI1XXXX) += i2c-mchp-pci1xxxx.o
obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 84b7e6cbc67b..423fe0c8a471 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -244,14 +244,18 @@ static const struct i2c_adapter_quirks amd_i2c_dev_quirks = {
static int i2c_amd_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct amd_i2c_dev *i2c_dev;
- struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct amd_mp2_dev *mp2_dev;
- const char *uid;
+ u64 uid;
- if (!adev)
- return -ENODEV;
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing UID/bus id!\n");
+ if (uid >= 2)
+ return dev_err_probe(dev, -EINVAL, "incorrect UID/bus id \"%llu\"!\n", uid);
+ dev_dbg(dev, "bus id is %llu\n", uid);
/* The ACPI namespace doesn't contain information about which MP2 PCI
* device an AMDI0011 ACPI device is related to, so assume that there's
@@ -266,6 +270,7 @@ static int i2c_amd_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
+ i2c_dev->common.bus_id = uid;
i2c_dev->common.mp2_dev = mp2_dev;
i2c_dev->pdev = pdev;
platform_set_drvdata(pdev, i2c_dev);
@@ -276,20 +281,6 @@ static int i2c_amd_probe(struct platform_device *pdev)
i2c_dev->common.resume = &i2c_amd_resume;
#endif
- uid = adev->pnp.unique_id;
- if (!uid) {
- dev_err(&pdev->dev, "missing UID/bus id!\n");
- return -EINVAL;
- } else if (strcmp(uid, "0") == 0) {
- i2c_dev->common.bus_id = 0;
- } else if (strcmp(uid, "1") == 0) {
- i2c_dev->common.bus_id = 1;
- } else {
- dev_err(&pdev->dev, "incorrect UID/bus id \"%s\"!\n", uid);
- return -EINVAL;
- }
- dev_dbg(&pdev->dev, "bus id is %u\n", i2c_dev->common.bus_id);
-
/* Register the adapter */
amd_mp2_pm_runtime_get(mp2_dev);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 33f5588a50c0..fe0cd205502d 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -10,10 +10,12 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
@@ -127,6 +129,8 @@
#define CDNS_I2C_TIMEOUT_MAX 0xFF
#define CDNS_I2C_BROKEN_HOLD_BIT BIT(0)
+#define CDNS_I2C_POLL_US 100000
+#define CDNS_I2C_TIMEOUT_US 500000
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
@@ -204,6 +208,7 @@ struct cdns_i2c {
struct notifier_block clk_rate_change_nb;
u32 quirks;
u32 ctrl_reg;
+ struct i2c_bus_recovery_info rinfo;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
u16 ctrl_reg_diva_divb;
struct i2c_client *slave;
@@ -840,8 +845,14 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
#endif
/* Check if the bus is free */
- if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
+
+ ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET,
+ reg,
+ !(reg & CDNS_I2C_SR_BA),
+ CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US);
+ if (ret) {
ret = -EAGAIN;
+ i2c_recover_bus(adap);
goto out;
}
@@ -1250,6 +1261,12 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->quirks = data->quirks;
}
+ id->rinfo.pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(id->rinfo.pinctrl)) {
+ dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(id->rinfo.pinctrl);
+ }
+
id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
@@ -1266,6 +1283,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->adap.retries = 3; /* Default retry value. */
id->adap.algo_data = id;
id->adap.dev.parent = &pdev->dev;
+ id->adap.bus_recovery_info = &id->rinfo;
init_completion(&id->xfer_done);
snprintf(id->adap.name, sizeof(id->adap.name),
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 471c47db546b..c836cf884185 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -823,7 +823,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
r = pm_runtime_resume_and_get(dev->dev);
if (r < 0) {
dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
- return r;
+ goto err_pm;
}
i2c_davinci_init(dev);
@@ -882,6 +882,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
err_unuse_clocks:
pm_runtime_dont_use_autosuspend(dev->dev);
pm_runtime_put_sync(dev->dev);
+err_pm:
pm_runtime_disable(dev->dev);
return r;
diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c
index b624356c945f..8f36167bce62 100644
--- a/drivers/i2c/busses/i2c-designware-amdpsp.c
+++ b/drivers/i2c/busses/i2c-designware-amdpsp.c
@@ -6,6 +6,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/psp-sev.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <asm/msr.h>
@@ -15,6 +16,8 @@
#define PSP_MBOX_OFFSET 0x10570
#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define PSP_I2C_RESERVATION_TIME_MS 100
+
#define PSP_I2C_REQ_BUS_CMD 0x64
#define PSP_I2C_REQ_RETRY_CNT 400
#define PSP_I2C_REQ_RETRY_DELAY_US (25 * USEC_PER_MSEC)
@@ -240,6 +243,41 @@ cleanup:
return ret;
}
+static void release_bus(void)
+{
+ int status;
+
+ if (!psp_i2c_sem_acquired)
+ return;
+
+ status = psp_send_i2c_req(PSP_I2C_REQ_RELEASE);
+ if (status)
+ return;
+
+ dev_dbg(psp_i2c_dev, "PSP semaphore held for %ums\n",
+ jiffies_to_msecs(jiffies - psp_i2c_sem_acquired));
+
+ psp_i2c_sem_acquired = 0;
+}
+
+static void psp_release_i2c_bus_deferred(struct work_struct *work)
+{
+ mutex_lock(&psp_i2c_access_mutex);
+
+ /*
+ * If there is any pending transaction, cannot release the bus here.
+ * psp_release_i2c_bus will take care of this later.
+ */
+ if (psp_i2c_access_count)
+ goto cleanup;
+
+ release_bus();
+
+cleanup:
+ mutex_unlock(&psp_i2c_access_mutex);
+}
+static DECLARE_DELAYED_WORK(release_queue, psp_release_i2c_bus_deferred);
+
static int psp_acquire_i2c_bus(void)
{
int status;
@@ -250,21 +288,23 @@ static int psp_acquire_i2c_bus(void)
if (psp_i2c_mbox_fail)
goto cleanup;
+ psp_i2c_access_count++;
+
/*
- * Simply increment usage counter and return if PSP semaphore was
- * already taken by kernel.
+ * No need to request bus arbitration once we are inside semaphore
+ * reservation period.
*/
- if (psp_i2c_access_count) {
- psp_i2c_access_count++;
+ if (psp_i2c_sem_acquired)
goto cleanup;
- }
status = psp_send_i2c_req(PSP_I2C_REQ_ACQUIRE);
if (status)
goto cleanup;
psp_i2c_sem_acquired = jiffies;
- psp_i2c_access_count++;
+
+ schedule_delayed_work(&release_queue,
+ msecs_to_jiffies(PSP_I2C_RESERVATION_TIME_MS));
/*
* In case of errors with PSP arbitrator psp_i2c_mbox_fail variable is
@@ -279,8 +319,6 @@ cleanup:
static void psp_release_i2c_bus(void)
{
- int status;
-
mutex_lock(&psp_i2c_access_mutex);
/* Return early if mailbox was malfunctional */
@@ -295,13 +333,12 @@ static void psp_release_i2c_bus(void)
if (psp_i2c_access_count)
goto cleanup;
- /* Send a release command to PSP */
- status = psp_send_i2c_req(PSP_I2C_REQ_RELEASE);
- if (status)
- goto cleanup;
-
- dev_dbg(psp_i2c_dev, "PSP semaphore held for %ums\n",
- jiffies_to_msecs(jiffies - psp_i2c_sem_acquired));
+ /*
+ * Send a release command to PSP if the semaphore reservation timeout
+ * elapsed but x86 still owns the controller.
+ */
+ if (!delayed_work_pending(&release_queue))
+ release_bus();
cleanup:
mutex_unlock(&psp_i2c_access_mutex);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 608e61209455..e499f96506c5 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -27,7 +27,6 @@
#include "i2c-ccgx-ucsi.h"
#define DRIVER_NAME "i2c-designware-pci"
-#define AMD_CLK_RATE_HZ 100000
enum dw_pci_ctl_id_t {
medfield,
@@ -100,11 +99,6 @@ static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
return 25000;
}
-static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
-{
- return AMD_CLK_RATE_HZ;
-}
-
static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
@@ -126,15 +120,6 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
return -ENODEV;
}
-static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
-{
- struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
-
- dev->flags |= MODEL_AMD_NAVI_GPU;
- dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
- return 0;
-}
-
static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
/*
@@ -159,6 +144,20 @@ static u32 ehl_get_clk_rate_khz(struct dw_i2c_dev *dev)
return 100000;
}
+static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return 100000;
+}
+
+static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+
+ dev->flags |= MODEL_AMD_NAVI_GPU;
+ dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ return 0;
+}
+
static struct dw_pci_controller dw_pci_controllers[] = {
[medfield] = {
.bus_num = -1,
@@ -243,6 +242,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
int r;
struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
+ struct i2c_timings *t;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
return dev_err_probe(&pdev->dev, -EINVAL,
@@ -263,7 +263,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return dev_err_probe(&pdev->dev, r,
"I/O memory remapping failed\n");
- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -272,12 +272,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return r;
dev->get_clk_rate_khz = controller->get_clk_rate_khz;
- dev->timings.bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
dev->irq = pci_irq_vector(pdev, 0);
dev->flags |= controller->flags;
+ t = &dev->timings;
+ i2c_parse_fw_timings(&pdev->dev, t, false);
+
pci_set_drvdata(pdev, dev);
if (controller->setup) {
@@ -389,6 +391,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4bbe), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bbf), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bc0), elkhartlake },
+ /* AMD NAVI */
{ PCI_VDEVICE(ATI, 0x7314), navi_amd },
{ PCI_VDEVICE(ATI, 0x73a4), navi_amd },
{ PCI_VDEVICE(ATI, 0x73e4), navi_amd },
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a176296f4fff..e06509edc5f3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1838,6 +1838,7 @@ static struct pci_driver i801_driver = {
.shutdown = i801_shutdown,
.driver = {
.pm = &i801_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index b51ab3cad2b1..188f2a36d2fd 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -94,7 +94,8 @@ enum lpi2c_imx_pincfg {
struct lpi2c_imx_struct {
struct i2c_adapter adapter;
- struct clk *clk;
+ int num_clks;
+ struct clk_bulk_data *clks;
void __iomem *base;
__u8 *rx_buf;
__u8 *tx_buf;
@@ -207,7 +208,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
lpi2c_imx_set_mode(lpi2c_imx);
- clk_rate = clk_get_rate(lpi2c_imx->clk);
+ clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk);
if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
filt = 0;
else
@@ -561,11 +562,12 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
- lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lpi2c_imx->clk)) {
- dev_err(&pdev->dev, "can't get I2C peripheral clock\n");
- return PTR_ERR(lpi2c_imx->clk);
+ ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get I2C peripheral clock, ret=%d\n", ret);
+ return ret;
}
+ lpi2c_imx->num_clks = ret;
ret = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &lpi2c_imx->bitrate);
@@ -582,11 +584,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
platform_set_drvdata(pdev, lpi2c_imx);
- ret = clk_prepare_enable(lpi2c_imx->clk);
- if (ret) {
- dev_err(&pdev->dev, "clk enable failed %d\n", ret);
+ ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
+ if (ret)
return ret;
- }
pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -633,7 +633,7 @@ static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
- clk_disable_unprepare(lpi2c_imx->clk);
+ clk_bulk_disable_unprepare(lpi2c_imx->num_clks, lpi2c_imx->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -645,7 +645,7 @@ static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
int ret;
pinctrl_pm_select_default_state(dev);
- ret = clk_prepare_enable(lpi2c_imx->clk);
+ ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
if (ret) {
dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret);
return ret;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 78fb1a4274a6..3082183bd66a 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
hrtimer_cancel(&i2c_imx->slave_timer);
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret >= 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 6078fa0c0d48..fe2349590f75 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -937,11 +937,8 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "dma_set_mask fail\n");
- return -ENODEV;
- }
+ dev_err(&pdev->dev, "dma_set_mask fail\n");
+ return -ENODEV;
}
err = ismt_dev_init(priv);
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
new file mode 100644
index 000000000000..f5342201eb6b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PCI1XXXX I2C adapter driver for PCIe Switch
+ * which has I2C controller in one of its downstream functions
+ *
+ * Copyright (C) 2021 - 2022 Microchip Technology Inc.
+ *
+ * Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
+ * Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#define SMBUS_MAST_CORE_ADDR_BASE 0x00000
+#define SMBUS_MAST_SYS_REG_ADDR_BASE 0x01000
+
+/* SMB register space. */
+#define SMB_CORE_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x00)
+
+#define SMB_CORE_CTRL_ESO BIT(6)
+#define SMB_CORE_CTRL_FW_ACK BIT(4)
+#define SMB_CORE_CTRL_ACK BIT(0)
+
+#define SMB_CORE_CMD_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x0F)
+#define SMB_CORE_CMD_REG_OFF2 (SMBUS_MAST_CORE_ADDR_BASE + 0x0E)
+#define SMB_CORE_CMD_REG_OFF1 (SMBUS_MAST_CORE_ADDR_BASE + 0x0D)
+
+#define SMB_CORE_CMD_READM BIT(4)
+#define SMB_CORE_CMD_STOP BIT(2)
+#define SMB_CORE_CMD_START BIT(0)
+
+#define SMB_CORE_CMD_REG_OFF0 (SMBUS_MAST_CORE_ADDR_BASE + 0x0C)
+
+#define SMB_CORE_CMD_M_PROCEED BIT(1)
+#define SMB_CORE_CMD_M_RUN BIT(0)
+
+#define SMB_CORE_SR_HOLD_TIME_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x18)
+
+/*
+ * SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
+ * baud clock required to program 'Hold Time' at X KHz.
+ */
+#define SR_HOLD_TIME_100K_TICKS 133
+#define SR_HOLD_TIME_400K_TICKS 20
+#define SR_HOLD_TIME_1000K_TICKS 11
+
+#define SMB_CORE_COMPLETION_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x23)
+
+#define COMPLETION_MDONE BIT(6)
+#define COMPLETION_IDLE BIT(5)
+#define COMPLETION_MNAKX BIT(0)
+
+#define SMB_CORE_IDLE_SCALING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x24)
+
+/*
+ * FAIR_BUS_IDLE_MIN_XK_TICKS field will indicate the number of ticks of
+ * the baud clock required to program 'fair idle delay' at X KHz. Fair idle
+ * delay establishes the MCTP T(IDLE_DELAY) period.
+ */
+#define FAIR_BUS_IDLE_MIN_100K_TICKS 969
+#define FAIR_BUS_IDLE_MIN_400K_TICKS 157
+#define FAIR_BUS_IDLE_MIN_1000K_TICKS 157
+
+/*
+ * FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
+ * baud clock required to satisfy the fairness protocol at X KHz.
+ */
+#define FAIR_IDLE_DELAY_100K_TICKS 1000
+#define FAIR_IDLE_DELAY_400K_TICKS 500
+#define FAIR_IDLE_DELAY_1000K_TICKS 500
+
+#define SMB_IDLE_SCALING_100K \
+ ((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
+#define SMB_IDLE_SCALING_400K \
+ ((FAIR_IDLE_DELAY_400K_TICKS << 16) | FAIR_BUS_IDLE_MIN_400K_TICKS)
+#define SMB_IDLE_SCALING_1000K \
+ ((FAIR_IDLE_DELAY_1000K_TICKS << 16) | FAIR_BUS_IDLE_MIN_1000K_TICKS)
+
+#define SMB_CORE_CONFIG_REG3 (SMBUS_MAST_CORE_ADDR_BASE + 0x2B)
+
+#define SMB_CONFIG3_ENMI BIT(6)
+#define SMB_CONFIG3_ENIDI BIT(5)
+
+#define SMB_CORE_CONFIG_REG2 (SMBUS_MAST_CORE_ADDR_BASE + 0x2A)
+#define SMB_CORE_CONFIG_REG1 (SMBUS_MAST_CORE_ADDR_BASE + 0x29)
+
+#define SMB_CONFIG1_ASR BIT(7)
+#define SMB_CONFIG1_ENAB BIT(2)
+#define SMB_CONFIG1_RESET BIT(1)
+#define SMB_CONFIG1_FEN BIT(0)
+
+#define SMB_CORE_BUS_CLK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x2C)
+
+/*
+ * BUS_CLK_XK_LOW_PERIOD_TICKS field defines the number of I2C Baud Clock
+ * periods that make up the low phase of the I2C/SMBus bus clock at X KHz.
+ */
+#define BUS_CLK_100K_LOW_PERIOD_TICKS 156
+#define BUS_CLK_400K_LOW_PERIOD_TICKS 41
+#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
+
+/*
+ * BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
+ * periods that make up the high phase of the I2C/SMBus bus clock at X KHz.
+ */
+#define BUS_CLK_100K_HIGH_PERIOD_TICKS 154
+#define BUS_CLK_400K_HIGH_PERIOD_TICKS 35
+#define BUS_CLK_1000K_HIGH_PERIOD_TICKS 14
+
+#define BUS_CLK_100K \
+ ((BUS_CLK_100K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_100K_LOW_PERIOD_TICKS)
+#define BUS_CLK_400K \
+ ((BUS_CLK_400K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_400K_LOW_PERIOD_TICKS)
+#define BUS_CLK_1000K \
+ ((BUS_CLK_1000K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_1000K_LOW_PERIOD_TICKS)
+
+#define SMB_CORE_CLK_SYNC_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x3C)
+
+/*
+ * CLK_SYNC_XK defines the number of clock cycles to sync up to the external
+ * clock before comparing the internal and external clocks for clock stretching
+ * at X KHz.
+ */
+#define CLK_SYNC_100K 4
+#define CLK_SYNC_400K 4
+#define CLK_SYNC_1000K 4
+
+#define SMB_CORE_DATA_TIMING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x40)
+
+/*
+ *
+ * FIRST_START_HOLD_XK_TICKS will indicate the number of ticks of the baud
+ * clock required to program 'FIRST_START_HOLD' timer at X KHz. This timer
+ * determines the SCLK hold time following SDAT driven low during the first
+ * START bit in a transfer.
+ */
+#define FIRST_START_HOLD_100K_TICKS 22
+#define FIRST_START_HOLD_400K_TICKS 16
+#define FIRST_START_HOLD_1000K_TICKS 6
+
+/*
+ * STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+ * required to program 'STOP_SETUP' timer at X KHz. This timer determines the
+ * SDAT setup time from the rising edge of SCLK for a STOP condition.
+ */
+#define STOP_SETUP_100K_TICKS 157
+#define STOP_SETUP_400K_TICKS 20
+#define STOP_SETUP_1000K_TICKS 12
+
+/*
+ * RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+ * required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
+ * SDAT setup time from the rising edge of SCLK for a repeated START condition.
+ */
+#define RESTART_SETUP_100K_TICKS 157
+#define RESTART_SETUP_400K_TICKS 20
+#define RESTART_SETUP_1000K_TICKS 12
+
+/*
+ * DATA_HOLD_XK_TICKS will indicate the number of ticks of the baud clock
+ * required to program 'DATA_HOLD' timer at X KHz. This timer determines the
+ * SDAT hold time following SCLK driven low.
+ */
+#define DATA_HOLD_100K_TICKS 2
+#define DATA_HOLD_400K_TICKS 2
+#define DATA_HOLD_1000K_TICKS 2
+
+#define DATA_TIMING_100K \
+ ((FIRST_START_HOLD_100K_TICKS << 24) | (STOP_SETUP_100K_TICKS << 16) | \
+ (RESTART_SETUP_100K_TICKS << 8) | DATA_HOLD_100K_TICKS)
+#define DATA_TIMING_400K \
+ ((FIRST_START_HOLD_400K_TICKS << 24) | (STOP_SETUP_400K_TICKS << 16) | \
+ (RESTART_SETUP_400K_TICKS << 8) | DATA_HOLD_400K_TICKS)
+#define DATA_TIMING_1000K \
+ ((FIRST_START_HOLD_1000K_TICKS << 24) | (STOP_SETUP_1000K_TICKS << 16) | \
+ (RESTART_SETUP_1000K_TICKS << 8) | DATA_HOLD_1000K_TICKS)
+
+#define SMB_CORE_TO_SCALING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x44)
+
+/*
+ * BUS_IDLE_MIN_XK_TICKS defines Bus Idle Minimum Time.
+ * Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
+ * (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
+ */
+#define BUS_IDLE_MIN_100K_TICKS 167UL
+#define BUS_IDLE_MIN_400K_TICKS 139UL
+#define BUS_IDLE_MIN_1000K_TICKS 133UL
+
+/*
+ * CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
+ * SMBus Controller Cumulative Time-Out duration =
+ * CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
+ */
+#define CTRL_CUM_TIME_OUT_100K_TICKS 159
+#define CTRL_CUM_TIME_OUT_400K_TICKS 159
+#define CTRL_CUM_TIME_OUT_1000K_TICKS 159
+
+/*
+ * TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
+ * SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
+ * Baud_Clock_Period x 4096
+ */
+#define TARGET_CUM_TIME_OUT_100K_TICKS 199
+#define TARGET_CUM_TIME_OUT_400K_TICKS 199
+#define TARGET_CUM_TIME_OUT_1000K_TICKS 199
+
+/*
+ * CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
+ * Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
+ */
+#define CLOCK_HIGH_TIME_OUT_100K_TICKS 204
+#define CLOCK_HIGH_TIME_OUT_400K_TICKS 204
+#define CLOCK_HIGH_TIME_OUT_1000K_TICKS 204
+
+#define TO_SCALING_100K \
+ ((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \
+ (TARGET_CUM_TIME_OUT_100K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_100K_TICKS)
+#define TO_SCALING_400K \
+ ((BUS_IDLE_MIN_400K_TICKS << 24) | (CTRL_CUM_TIME_OUT_400K_TICKS << 16) | \
+ (TARGET_CUM_TIME_OUT_400K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_400K_TICKS)
+#define TO_SCALING_1000K \
+ ((BUS_IDLE_MIN_1000K_TICKS << 24) | (CTRL_CUM_TIME_OUT_1000K_TICKS << 16) | \
+ (TARGET_CUM_TIME_OUT_1000K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_1000K_TICKS)
+
+#define I2C_SCL_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x100)
+#define I2C_SDA_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x101)
+
+#define I2C_FOD_EN BIT(4)
+#define I2C_PULL_UP_EN BIT(3)
+#define I2C_PULL_DOWN_EN BIT(2)
+#define I2C_INPUT_EN BIT(1)
+#define I2C_OUTPUT_EN BIT(0)
+
+#define SMBUS_CONTROL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x200)
+
+#define CTL_RESET_COUNTERS BIT(3)
+#define CTL_TRANSFER_DIR BIT(2)
+#define CTL_HOST_FIFO_ENTRY BIT(1)
+#define CTL_RUN BIT(0)
+
+#define I2C_DIRN_WRITE 0
+#define I2C_DIRN_READ 1
+
+#define SMBUS_STATUS_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x204)
+
+#define STA_DMA_TERM BIT(7)
+#define STA_DMA_REQ BIT(6)
+#define STA_THRESHOLD BIT(2)
+#define STA_BUF_FULL BIT(1)
+#define STA_BUF_EMPTY BIT(0)
+
+#define SMBUS_INTR_STAT_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x208)
+
+#define INTR_STAT_DMA_TERM BIT(7)
+#define INTR_STAT_THRESHOLD BIT(2)
+#define INTR_STAT_BUF_FULL BIT(1)
+#define INTR_STAT_BUF_EMPTY BIT(0)
+
+#define SMBUS_INTR_MSK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x20C)
+
+#define INTR_MSK_DMA_TERM BIT(7)
+#define INTR_MSK_THRESHOLD BIT(2)
+#define INTR_MSK_BUF_FULL BIT(1)
+#define INTR_MSK_BUF_EMPTY BIT(0)
+
+#define ALL_NW_LAYER_INTERRUPTS \
+ (INTR_MSK_DMA_TERM | INTR_MSK_THRESHOLD | INTR_MSK_BUF_FULL | \
+ INTR_MSK_BUF_EMPTY)
+
+#define SMBUS_MCU_COUNTER_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x214)
+
+#define SMBALERT_MST_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x230)
+
+#define SMBALERT_MST_PU BIT(0)
+
+#define SMBUS_GEN_INT_STAT_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x23C)
+
+#define SMBUS_GEN_INT_MASK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x240)
+
+#define SMBALERT_INTR_MASK BIT(10)
+#define I2C_BUF_MSTR_INTR_MASK BIT(9)
+#define I2C_INTR_MASK BIT(8)
+#define SMBALERT_WAKE_INTR_MASK BIT(2)
+#define I2C_BUF_MSTR_WAKE_INTR_MASK BIT(1)
+#define I2C_WAKE_INTR_MASK BIT(0)
+
+#define ALL_HIGH_LAYER_INTR \
+ (SMBALERT_INTR_MASK | I2C_BUF_MSTR_INTR_MASK | I2C_INTR_MASK | \
+ SMBALERT_WAKE_INTR_MASK | I2C_BUF_MSTR_WAKE_INTR_MASK | \
+ I2C_WAKE_INTR_MASK)
+
+#define SMBUS_RESET_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x248)
+
+#define PERI_SMBUS_D3_RESET_DIS BIT(16)
+
+#define SMBUS_MST_BUF (SMBUS_MAST_CORE_ADDR_BASE + 0x280)
+
+#define SMBUS_BUF_MAX_SIZE 0x80
+
+#define I2C_FLAGS_DIRECT_MODE BIT(7)
+#define I2C_FLAGS_POLLING_MODE BIT(6)
+#define I2C_FLAGS_STOP BIT(5)
+#define I2C_FLAGS_SMB_BLK_READ BIT(4)
+
+#define PCI1XXXX_I2C_TIMEOUT_MS 1000
+
+/* General Purpose Register. */
+#define SMB_GPR_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x1000 + 0x0c00 + \
+ 0x00)
+
+/* Lock Register. */
+#define SMB_GPR_LOCK_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x1000 + 0x0000 + \
+ 0x00A0)
+
+#define SMBUS_PERI_LOCK BIT(3)
+
+struct pci1xxxx_i2c {
+ struct completion i2c_xfer_done;
+ bool i2c_xfer_in_progress;
+ struct i2c_adapter adap;
+ void __iomem *i2c_base;
+ u32 freq;
+ u32 flags;
+};
+
+static int set_sys_lock(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMB_GPR_LOCK_REG;
+ u8 data;
+
+ writel(SMBUS_PERI_LOCK, p);
+ data = readl(p);
+ if (data != SMBUS_PERI_LOCK)
+ return -EPERM;
+
+ return 0;
+}
+
+static int release_sys_lock(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMB_GPR_LOCK_REG;
+ u8 data;
+
+ data = readl(p);
+ if (data != SMBUS_PERI_LOCK)
+ return 0;
+
+ writel(0, p);
+ data = readl(p);
+ if (data & SMBUS_PERI_LOCK)
+ return -EPERM;
+
+ return 0;
+}
+
+static void pci1xxxx_ack_high_level_intr(struct pci1xxxx_i2c *i2c, u16 intr_msk)
+{
+ writew(intr_msk, i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF);
+}
+
+static void pci1xxxx_i2c_configure_smbalert_pin(struct pci1xxxx_i2c *i2c,
+ bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMBALERT_MST_PAD_CTRL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+
+ if (enable)
+ regval |= SMBALERT_MST_PU;
+ else
+ regval &= ~SMBALERT_MST_PU;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_send_start_stop(struct pci1xxxx_i2c *i2c, bool start)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ u8 regval;
+
+ regval = readb(p);
+
+ if (start)
+ regval |= SMB_CORE_CMD_START;
+ else
+ regval |= SMB_CORE_CMD_STOP;
+
+ writeb(regval, p);
+}
+
+/*
+ * When accessing the core control reg, we should not do a read modified write
+ * as they are write '1' to clear bits. Instead we need to write with the
+ * specific bits that needs to be set.
+ */
+static void pci1xxxx_i2c_set_clear_FW_ACK(struct pci1xxxx_i2c *i2c, bool set)
+{
+ u8 regval;
+
+ if (set)
+ regval = SMB_CORE_CTRL_FW_ACK | SMB_CORE_CTRL_ESO | SMB_CORE_CTRL_ACK;
+ else
+ regval = SMB_CORE_CTRL_ESO | SMB_CORE_CTRL_ACK;
+
+ writeb(regval, i2c->i2c_base + SMB_CORE_CTRL_REG_OFF);
+}
+
+static void pci1xxxx_i2c_buffer_write(struct pci1xxxx_i2c *i2c, u8 slaveaddr,
+ u8 transferlen, unsigned char *buf)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_MST_BUF;
+
+ if (slaveaddr)
+ writeb(slaveaddr, p++);
+
+ if (buf)
+ memcpy_toio(p, buf, transferlen);
+}
+
+/*
+ * When accessing the core control reg, we should not do a read modified write
+ * as there are write '1' to clear bits. Instead we need to write with the
+ * specific bits that needs to be set.
+ */
+static void pci1xxxx_i2c_enable_ESO(struct pci1xxxx_i2c *i2c)
+{
+ writeb(SMB_CORE_CTRL_ESO, i2c->i2c_base + SMB_CORE_CTRL_REG_OFF);
+}
+
+static void pci1xxxx_i2c_reset_counters(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ regval |= CTL_RESET_COUNTERS;
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_set_transfer_dir(struct pci1xxxx_i2c *i2c, u8 direction)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ if (direction == I2C_DIRN_WRITE)
+ regval &= ~CTL_TRANSFER_DIR;
+ else
+ regval |= CTL_TRANSFER_DIR;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_set_mcu_count(struct pci1xxxx_i2c *i2c, u8 count)
+{
+ writeb(count, i2c->i2c_base + SMBUS_MCU_COUNTER_REG_OFF);
+}
+
+static void pci1xxxx_i2c_set_read_count(struct pci1xxxx_i2c *i2c, u8 readcount)
+{
+ writeb(readcount, i2c->i2c_base + SMB_CORE_CMD_REG_OFF3);
+}
+
+static void pci1xxxx_i2c_set_write_count(struct pci1xxxx_i2c *i2c, u8 writecount)
+{
+ writeb(writecount, i2c->i2c_base + SMB_CORE_CMD_REG_OFF2);
+}
+
+static void pci1xxxx_i2c_set_DMA_run(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ regval |= CTL_RUN;
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_set_mrun_proceed(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF0;
+ u8 regval;
+
+ regval = readb(p);
+ regval |= SMB_CORE_CMD_M_RUN;
+ regval |= SMB_CORE_CMD_M_PROCEED;
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_start_DMA(struct pci1xxxx_i2c *i2c)
+{
+ pci1xxxx_i2c_set_DMA_run(i2c);
+ pci1xxxx_i2c_set_mrun_proceed(i2c);
+}
+
+static void pci1xxxx_i2c_config_asr(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CONFIG_REG1;
+ u8 regval;
+
+ regval = readb(p);
+ if (enable)
+ regval |= SMB_CONFIG1_ASR;
+ else
+ regval &= ~SMB_CONFIG1_ASR;
+ writeb(regval, p);
+}
+
+static irqreturn_t pci1xxxx_i2c_isr(int irq, void *dev)
+{
+ struct pci1xxxx_i2c *i2c = dev;
+ void __iomem *p1 = i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF;
+ void __iomem *p2 = i2c->i2c_base + SMBUS_INTR_STAT_REG_OFF;
+ irqreturn_t intr_handled = IRQ_NONE;
+ u16 reg1;
+ u8 reg3;
+
+ /*
+ * Read the SMBus interrupt status register to see if the
+ * DMA_TERM interrupt has caused this callback.
+ */
+ reg1 = readw(p1);
+
+ if (reg1 & I2C_BUF_MSTR_INTR_MASK) {
+ reg3 = readb(p2);
+ if (reg3 & INTR_STAT_DMA_TERM) {
+ complete(&i2c->i2c_xfer_done);
+ intr_handled = IRQ_HANDLED;
+ writeb(INTR_STAT_DMA_TERM, p2);
+ }
+ pci1xxxx_ack_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK);
+ }
+
+ if (reg1 & SMBALERT_INTR_MASK) {
+ intr_handled = IRQ_HANDLED;
+ pci1xxxx_ack_high_level_intr(i2c, SMBALERT_INTR_MASK);
+ }
+
+ return intr_handled;
+}
+
+static void pci1xxxx_i2c_set_count(struct pci1xxxx_i2c *i2c, u8 mcucount,
+ u8 writecount, u8 readcount)
+{
+ pci1xxxx_i2c_set_mcu_count(i2c, mcucount);
+ pci1xxxx_i2c_set_write_count(i2c, writecount);
+ pci1xxxx_i2c_set_read_count(i2c, readcount);
+}
+
+static void pci1xxxx_i2c_set_readm(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ u8 regval;
+
+ regval = readb(p);
+ if (enable)
+ regval |= SMB_CORE_CMD_READM;
+ else
+ regval &= ~SMB_CORE_CMD_READM;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_ack_nw_layer_intr(struct pci1xxxx_i2c *i2c, u8 ack_intr_msk)
+{
+ writeb(ack_intr_msk, i2c->i2c_base + SMBUS_INTR_STAT_REG_OFF);
+}
+
+static void pci1xxxx_config_nw_layer_intr(struct pci1xxxx_i2c *i2c,
+ u8 intr_msk, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_INTR_MSK_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ if (enable)
+ regval &= ~intr_msk;
+ else
+ regval |= intr_msk;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_config_padctrl(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p1 = i2c->i2c_base + I2C_SCL_PAD_CTRL_REG_OFF;
+ void __iomem *p2 = i2c->i2c_base + I2C_SDA_PAD_CTRL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p1);
+ if (enable)
+ regval |= I2C_INPUT_EN | I2C_OUTPUT_EN;
+ else
+ regval &= ~(I2C_INPUT_EN | I2C_OUTPUT_EN);
+
+ writeb(regval, p1);
+
+ regval = readb(p2);
+ if (enable)
+ regval |= I2C_INPUT_EN | I2C_OUTPUT_EN;
+ else
+ regval &= ~(I2C_INPUT_EN | I2C_OUTPUT_EN);
+
+ writeb(regval, p2);
+}
+
+static void pci1xxxx_i2c_set_mode(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ if (i2c->flags & I2C_FLAGS_DIRECT_MODE)
+ regval &= ~CTL_HOST_FIFO_ENTRY;
+ else
+ regval |= CTL_HOST_FIFO_ENTRY;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_config_high_level_intr(struct pci1xxxx_i2c *i2c,
+ u16 intr_msk, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_GEN_INT_MASK_REG_OFF;
+ u16 regval;
+
+ regval = readw(p);
+ if (enable)
+ regval &= ~intr_msk;
+ else
+ regval |= intr_msk;
+ writew(regval, p);
+}
+
+static void pci1xxxx_i2c_configure_core_reg(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p1 = i2c->i2c_base + SMB_CORE_CONFIG_REG1;
+ void __iomem *p3 = i2c->i2c_base + SMB_CORE_CONFIG_REG3;
+ u8 reg1;
+ u8 reg3;
+
+ reg1 = readb(p1);
+ reg3 = readb(p3);
+ if (enable) {
+ reg1 |= SMB_CONFIG1_ENAB | SMB_CONFIG1_FEN;
+ reg3 |= SMB_CONFIG3_ENMI | SMB_CONFIG3_ENIDI;
+ } else {
+ reg1 &= ~(SMB_CONFIG1_ENAB | SMB_CONFIG1_FEN);
+ reg3 &= ~(SMB_CONFIG3_ENMI | SMB_CONFIG3_ENIDI);
+ }
+
+ writeb(reg1, p1);
+ writeb(reg3, p3);
+}
+
+static void pci1xxxx_i2c_set_freq(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *bp = i2c->i2c_base;
+ void __iomem *p_idle_scaling = bp + SMB_CORE_IDLE_SCALING_REG_OFF;
+ void __iomem *p_data_timing = bp + SMB_CORE_DATA_TIMING_REG_OFF;
+ void __iomem *p_hold_time = bp + SMB_CORE_SR_HOLD_TIME_REG_OFF;
+ void __iomem *p_to_scaling = bp + SMB_CORE_TO_SCALING_REG_OFF;
+ void __iomem *p_clk_sync = bp + SMB_CORE_CLK_SYNC_REG_OFF;
+ void __iomem *p_clk_reg = bp + SMB_CORE_BUS_CLK_REG_OFF;
+
+ switch (i2c->freq) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ writeb(SR_HOLD_TIME_100K_TICKS, p_hold_time);
+ writel(SMB_IDLE_SCALING_100K, p_idle_scaling);
+ writew(BUS_CLK_100K, p_clk_reg);
+ writel(CLK_SYNC_100K, p_clk_sync);
+ writel(DATA_TIMING_100K, p_data_timing);
+ writel(TO_SCALING_100K, p_to_scaling);
+ break;
+
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ writeb(SR_HOLD_TIME_1000K_TICKS, p_hold_time);
+ writel(SMB_IDLE_SCALING_1000K, p_idle_scaling);
+ writew(BUS_CLK_1000K, p_clk_reg);
+ writel(CLK_SYNC_1000K, p_clk_sync);
+ writel(DATA_TIMING_1000K, p_data_timing);
+ writel(TO_SCALING_1000K, p_to_scaling);
+ break;
+
+ case I2C_MAX_FAST_MODE_FREQ:
+ default:
+ writeb(SR_HOLD_TIME_400K_TICKS, p_hold_time);
+ writel(SMB_IDLE_SCALING_400K, p_idle_scaling);
+ writew(BUS_CLK_400K, p_clk_reg);
+ writel(CLK_SYNC_400K, p_clk_sync);
+ writel(DATA_TIMING_400K, p_data_timing);
+ writel(TO_SCALING_400K, p_to_scaling);
+ break;
+ }
+}
+
+static void pci1xxxx_i2c_init(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p2 = i2c->i2c_base + SMBUS_STATUS_REG_OFF;
+ void __iomem *p1 = i2c->i2c_base + SMB_GPR_REG;
+ u8 regval;
+ u8 ret;
+
+ ret = set_sys_lock(i2c);
+ if (ret == -EPERM) {
+ /*
+ * Configure I2C Fast Mode as default frequency if unable
+ * to acquire sys lock.
+ */
+ regval = 0;
+ } else {
+ regval = readl(p1);
+ release_sys_lock(i2c);
+ }
+
+ switch (regval) {
+ case 0:
+ i2c->freq = I2C_MAX_FAST_MODE_FREQ;
+ pci1xxxx_i2c_set_freq(i2c);
+ break;
+ case 1:
+ i2c->freq = I2C_MAX_STANDARD_MODE_FREQ;
+ pci1xxxx_i2c_set_freq(i2c);
+ break;
+ case 2:
+ i2c->freq = I2C_MAX_FAST_MODE_PLUS_FREQ;
+ pci1xxxx_i2c_set_freq(i2c);
+ break;
+ case 3:
+ default:
+ break;
+ }
+
+ pci1xxxx_i2c_config_padctrl(i2c, true);
+ i2c->flags |= I2C_FLAGS_DIRECT_MODE;
+ pci1xxxx_i2c_set_mode(i2c);
+
+ /*
+ * Added as a precaution since BUF_EMPTY in status register
+ * also trigered an Interrupt.
+ */
+ writeb(STA_BUF_EMPTY, p2);
+
+ /* Configure core I2c control registers. */
+ pci1xxxx_i2c_configure_core_reg(i2c, true);
+
+ /*
+ * Enable pull-up for the SMB alert pin which is just used for
+ * wakeup right now.
+ */
+ pci1xxxx_i2c_configure_smbalert_pin(i2c, true);
+}
+
+static void pci1xxxx_i2c_clear_flags(struct pci1xxxx_i2c *i2c)
+{
+ u8 regval;
+
+ /* Reset the internal buffer counters. */
+ pci1xxxx_i2c_reset_counters(i2c);
+
+ /* Clear low level interrupts. */
+ regval = COMPLETION_MNAKX | COMPLETION_IDLE | COMPLETION_MDONE;
+ writeb(regval, i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3);
+ reinit_completion(&i2c->i2c_xfer_done);
+ pci1xxxx_ack_nw_layer_intr(i2c, ALL_NW_LAYER_INTERRUPTS);
+ pci1xxxx_ack_high_level_intr(i2c, ALL_HIGH_LAYER_INTR);
+}
+
+static int pci1xxxx_i2c_read(struct pci1xxxx_i2c *i2c, u8 slaveaddr,
+ unsigned char *buf, u16 total_len)
+{
+ void __iomem *p2 = i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3;
+ void __iomem *p1 = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ void __iomem *p3 = i2c->i2c_base + SMBUS_MST_BUF;
+ unsigned long time_left;
+ u16 remainingbytes;
+ u8 transferlen;
+ int retval = 0;
+ u8 read_count;
+ u32 regval;
+ u16 count;
+
+ /* Enable I2C host controller by setting the ESO bit in the CONTROL REG. */
+ pci1xxxx_i2c_enable_ESO(i2c);
+ pci1xxxx_i2c_clear_flags(i2c);
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, true);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, true);
+
+ /*
+ * The I2C transfer could be more than 128 bytes. Our Core is
+ * capable of only sending 128 at a time.
+ * As far as the I2C read is concerned, initailly send the
+ * read slave address along with the number of bytes to read in
+ * ReadCount. After sending the slave address the interrupt
+ * is generated. On seeing the ACK for the slave address, reverse the
+ * buffer direction and run the DMA to initiate Read from slave.
+ */
+ for (count = 0; count < total_len; count += transferlen) {
+
+ /*
+ * Before start of any transaction clear the existing
+ * START/STOP conditions.
+ */
+ writeb(0, p1);
+ remainingbytes = total_len - count;
+ transferlen = min_t(u16, remainingbytes, SMBUS_BUF_MAX_SIZE);
+
+ /*
+ * Send STOP bit for the last chunk in the transaction.
+ * For I2C read transaction of more than BUF_SIZE, NACK should
+ * only be sent for the last read.
+ * Hence a bit FW_ACK is set for all the read chunks except for
+ * the last chunk. For the last chunk NACK should be sent and
+ * FW_ACK is cleared Send STOP only when I2C_FLAGS_STOP bit is
+ * set in the flags and only for the last transaction.
+ */
+ if ((count + transferlen >= total_len) &&
+ (i2c->flags & I2C_FLAGS_STOP)) {
+ pci1xxxx_i2c_set_clear_FW_ACK(i2c, false);
+ pci1xxxx_i2c_send_start_stop(i2c, 0);
+ } else {
+ pci1xxxx_i2c_set_clear_FW_ACK(i2c, true);
+ }
+
+ /* Send START bit for the first transaction. */
+ if (count == 0) {
+ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_WRITE);
+ pci1xxxx_i2c_send_start_stop(i2c, 1);
+
+ /* Write I2c buffer with just the slave addr. */
+ pci1xxxx_i2c_buffer_write(i2c, slaveaddr, 0, NULL);
+
+ /* Set the count. Readcount is the transfer bytes. */
+ pci1xxxx_i2c_set_count(i2c, 1, 1, transferlen);
+
+ /*
+ * Set the Auto_start_read bit so that the HW itself
+ * will take care of the read phase.
+ */
+ pci1xxxx_i2c_config_asr(i2c, true);
+ if (i2c->flags & I2C_FLAGS_SMB_BLK_READ)
+ pci1xxxx_i2c_set_readm(i2c, true);
+ } else {
+ pci1xxxx_i2c_set_count(i2c, 0, 0, transferlen);
+ pci1xxxx_i2c_config_asr(i2c, false);
+ pci1xxxx_i2c_clear_flags(i2c);
+ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_READ);
+ }
+
+ /* Start the DMA. */
+ pci1xxxx_i2c_start_DMA(i2c);
+
+ /* Wait for the DMA_TERM interrupt. */
+ time_left = wait_for_completion_timeout(&i2c->i2c_xfer_done,
+ msecs_to_jiffies(PCI1XXXX_I2C_TIMEOUT_MS));
+ if (time_left == 0) {
+ /* Reset the I2C core to release the bus lock. */
+ pci1xxxx_i2c_init(i2c);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ /* Read the completion reg to know the reason for DMA_TERM. */
+ regval = readb(p2);
+
+ /* Slave did not respond. */
+ if (regval & COMPLETION_MNAKX) {
+ writeb(COMPLETION_MNAKX, p2);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (i2c->flags & I2C_FLAGS_SMB_BLK_READ) {
+ buf[0] = readb(p3);
+ read_count = buf[0];
+ memcpy_fromio(&buf[1], p3 + 1, read_count);
+ } else {
+ memcpy_fromio(&buf[count], p3, transferlen);
+ }
+ }
+
+cleanup:
+ /* Disable all the interrupts. */
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, false);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, false);
+ pci1xxxx_i2c_config_asr(i2c, false);
+ return retval;
+}
+
+static int pci1xxxx_i2c_write(struct pci1xxxx_i2c *i2c, u8 slaveaddr,
+ unsigned char *buf, u16 total_len)
+{
+ void __iomem *p2 = i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3;
+ void __iomem *p1 = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ unsigned long time_left;
+ u16 remainingbytes;
+ u8 actualwritelen;
+ u8 transferlen;
+ int retval = 0;
+ u32 regval;
+ u16 count;
+
+ /* Enable I2C host controller by setting the ESO bit in the CONTROL REG. */
+ pci1xxxx_i2c_enable_ESO(i2c);
+
+ /* Set the Buffer direction. */
+ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_WRITE);
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, true);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, true);
+
+ /*
+ * The i2c transfer could be more than 128 bytes. Our Core is
+ * capable of only sending 128 at a time.
+ */
+ for (count = 0; count < total_len; count += transferlen) {
+ /*
+ * Before start of any transaction clear the existing
+ * START/STOP conditions.
+ */
+ writeb(0, p1);
+ pci1xxxx_i2c_clear_flags(i2c);
+ remainingbytes = total_len - count;
+
+ /* If it is the starting of the transaction send START. */
+ if (count == 0) {
+ pci1xxxx_i2c_send_start_stop(i2c, 1);
+
+ /* -1 for the slave address. */
+ transferlen = min_t(u16, SMBUS_BUF_MAX_SIZE - 1,
+ remainingbytes);
+ pci1xxxx_i2c_buffer_write(i2c, slaveaddr,
+ transferlen, &buf[count]);
+ /*
+ * The actual number of bytes written on the I2C bus
+ * is including the slave address.
+ */
+ actualwritelen = transferlen + 1;
+ } else {
+ transferlen = min_t(u16, SMBUS_BUF_MAX_SIZE, remainingbytes);
+ pci1xxxx_i2c_buffer_write(i2c, 0, transferlen, &buf[count]);
+ actualwritelen = transferlen;
+ }
+
+ pci1xxxx_i2c_set_count(i2c, actualwritelen, actualwritelen, 0);
+
+ /*
+ * Send STOP only when I2C_FLAGS_STOP bit is set in the flags and
+ * only for the last transaction.
+ */
+ if (remainingbytes <= transferlen &&
+ (i2c->flags & I2C_FLAGS_STOP))
+ pci1xxxx_i2c_send_start_stop(i2c, 0);
+
+ pci1xxxx_i2c_start_DMA(i2c);
+
+ /*
+ * Wait for the DMA_TERM interrupt.
+ */
+ time_left = wait_for_completion_timeout(&i2c->i2c_xfer_done,
+ msecs_to_jiffies(PCI1XXXX_I2C_TIMEOUT_MS));
+ if (time_left == 0) {
+ /* Reset the I2C core to release the bus lock. */
+ pci1xxxx_i2c_init(i2c);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ regval = readb(p2);
+ if (regval & COMPLETION_MNAKX) {
+ writeb(COMPLETION_MNAKX, p2);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+ }
+cleanup:
+ /* Disable all the interrupts. */
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, false);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, false);
+
+ return retval;
+}
+
+static int pci1xxxx_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct pci1xxxx_i2c *i2c = i2c_get_adapdata(adap);
+ u8 slaveaddr;
+ int retval;
+ u32 i;
+
+ i2c->i2c_xfer_in_progress = true;
+ for (i = 0; i < num; i++) {
+ slaveaddr = i2c_8bit_addr_from_msg(&msgs[i]);
+
+ /*
+ * Send the STOP bit if the transfer is the final one or
+ * if the I2C_M_STOP flag is set.
+ */
+ if ((i == num - 1) || (msgs[i].flags & I2C_M_STOP))
+ i2c->flags |= I2C_FLAGS_STOP;
+ else
+ i2c->flags &= ~I2C_FLAGS_STOP;
+
+ if (msgs[i].flags & I2C_M_RECV_LEN)
+ i2c->flags |= I2C_FLAGS_SMB_BLK_READ;
+ else
+ i2c->flags &= ~I2C_FLAGS_SMB_BLK_READ;
+
+ if (msgs[i].flags & I2C_M_RD)
+ retval = pci1xxxx_i2c_read(i2c, slaveaddr,
+ msgs[i].buf, msgs[i].len);
+ else
+ retval = pci1xxxx_i2c_write(i2c, slaveaddr,
+ msgs[i].buf, msgs[i].len);
+
+ if (retval < 0)
+ break;
+ }
+ i2c->i2c_xfer_in_progress = false;
+
+ if (retval < 0)
+ return retval;
+
+ return num;
+}
+
+/*
+ * List of supported functions by the driver.
+ */
+static u32 pci1xxxx_i2c_get_funcs(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_PROTOCOL_MANGLING |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm pci1xxxx_i2c_algo = {
+ .master_xfer = pci1xxxx_i2c_xfer,
+ .functionality = pci1xxxx_i2c_get_funcs,
+};
+
+static const struct i2c_adapter_quirks pci1xxxx_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
+static const struct i2c_adapter pci1xxxx_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "PCI1xxxx I2C Adapter",
+ .algo = &pci1xxxx_i2c_algo,
+ .quirks = &pci1xxxx_i2c_quirks,
+};
+
+static int pci1xxxx_i2c_suspend(struct device *dev)
+{
+ struct pci1xxxx_i2c *i2c = dev_get_drvdata(dev);
+ void __iomem *p = i2c->i2c_base + SMBUS_RESET_REG;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 regval;
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ /*
+ * If the system is put into 'suspend' state when the I2C transfer is in
+ * progress, wait until the transfer completes.
+ */
+ while (i2c->i2c_xfer_in_progress)
+ msleep(20);
+
+ pci1xxxx_i2c_config_high_level_intr(i2c, SMBALERT_WAKE_INTR_MASK, true);
+
+ /*
+ * Enable the PERST_DIS bit to mask the PERST from resetting the core
+ * registers.
+ */
+ regval = readl(p);
+ regval |= PERI_SMBUS_D3_RESET_DIS;
+ writel(regval, p);
+
+ /* Enable PCI wake in the PMCSR register. */
+ device_set_wakeup_enable(dev, true);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+
+static int pci1xxxx_i2c_resume(struct device *dev)
+{
+ struct pci1xxxx_i2c *i2c = dev_get_drvdata(dev);
+ void __iomem *p1 = i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF;
+ void __iomem *p2 = i2c->i2c_base + SMBUS_RESET_REG;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 regval;
+
+ regval = readw(p1);
+ writew(regval, p1);
+ pci1xxxx_i2c_config_high_level_intr(i2c, SMBALERT_WAKE_INTR_MASK, false);
+ regval = readl(p2);
+ regval &= ~PERI_SMBUS_D3_RESET_DIS;
+ writel(regval, p2);
+ i2c_mark_adapter_resumed(&i2c->adap);
+ pci_wake_from_d3(pdev, false);
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend,
+ pci1xxxx_i2c_resume);
+
+static void pci1xxxx_i2c_shutdown(struct pci1xxxx_i2c *i2c)
+{
+ pci1xxxx_i2c_config_padctrl(i2c, false);
+ pci1xxxx_i2c_configure_core_reg(i2c, false);
+}
+
+static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct pci1xxxx_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, i2c);
+ i2c->i2c_xfer_in_progress = false;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ /*
+ * We are getting the base address of the SMB core. SMB core uses
+ * BAR0 and size is 32K.
+ */
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret < 0)
+ return ret;
+
+ i2c->i2c_base = pcim_iomap_table(pdev)[0];
+ init_completion(&i2c->i2c_xfer_done);
+ pci1xxxx_i2c_init(i2c);
+
+ ret = devm_add_action(dev, (void (*)(void *))pci1xxxx_i2c_shutdown, i2c);
+ if (ret)
+ return ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(dev, pci_irq_vector(pdev, 0), pci1xxxx_i2c_isr,
+ 0, pci_name(pdev), i2c);
+ if (ret)
+ return ret;
+
+ i2c->adap = pci1xxxx_i2c_ops;
+ i2c->adap.dev.parent = dev;
+
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name),
+ "MCHP PCI1xxxx i2c adapter at %s", pci_name(pdev));
+
+ i2c_set_adapdata(&i2c->adap, i2c);
+
+ ret = devm_i2c_add_adapter(dev, &i2c->adap);
+ if (ret)
+ return dev_err_probe(dev, ret, "i2c add adapter failed\n");
+
+ return 0;
+}
+
+static const struct pci_device_id pci1xxxx_i2c_pci_id_table[] = {
+ { PCI_VDEVICE(EFAR, 0xA003) },
+ { PCI_VDEVICE(EFAR, 0xA013) },
+ { PCI_VDEVICE(EFAR, 0xA023) },
+ { PCI_VDEVICE(EFAR, 0xA033) },
+ { PCI_VDEVICE(EFAR, 0xA043) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pci1xxxx_i2c_pci_id_table);
+
+static struct pci_driver pci1xxxx_i2c_pci_driver = {
+ .name = "i2c-mchp-pci1xxxx",
+ .id_table = pci1xxxx_i2c_pci_id_table,
+ .probe = pci1xxxx_i2c_probe_pci,
+ .driver = {
+ .pm = pm_sleep_ptr(&pci1xxxx_i2c_pm_ops),
+ },
+};
+module_pci_driver(pci1xxxx_i2c_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 8716032f030a..e68e775f187e 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -31,8 +32,6 @@
(MLXBF_I2C_FUNC_SMBUS_DEFAULT | MLXBF_I2C_FUNC_SMBUS_BLOCK | \
I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SLAVE)
-#define MLXBF_I2C_SMBUS_MAX 3
-
/* Shared resources info in BlueField platforms. */
#define MLXBF_I2C_COALESCE_TYU_ADDR 0x02801300
@@ -47,6 +46,9 @@
#define MLXBF_I2C_COREPLL_YU_ADDR 0x02800c30
#define MLXBF_I2C_COREPLL_YU_SIZE 0x00c
+#define MLXBF_I2C_COREPLL_RSH_YU_ADDR 0x13409824
+#define MLXBF_I2C_COREPLL_RSH_YU_SIZE 0x00c
+
#define MLXBF_I2C_SHARED_RES_MAX 3
/*
@@ -63,13 +65,14 @@
*/
#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
/* Reference clock for Bluefield - 156 MHz. */
-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
+#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
/* Constant used to determine the PLL frequency. */
-#define MLNXBF_I2C_COREPLL_CONST 16384
+#define MLNXBF_I2C_COREPLL_CONST 16384ULL
+
+#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
/* PLL registers. */
-#define MLXBF_I2C_CORE_PLL_REG0 0x0
#define MLXBF_I2C_CORE_PLL_REG1 0x4
#define MLXBF_I2C_CORE_PLL_REG2 0x8
@@ -129,14 +132,10 @@
/* Slave busy bit reset. */
#define MLXBF_I2C_CAUSE_S_GW_BUSY_FALL BIT(18)
-#define MLXBF_I2C_CAUSE_SLAVE_ARBITER_BITS_MASK GENMASK(20, 0)
-
/* Cause coalesce registers. */
#define MLXBF_I2C_CAUSE_COALESCE_0 0x00
-#define MLXBF_I2C_CAUSE_COALESCE_1 0x04
-#define MLXBF_I2C_CAUSE_COALESCE_2 0x08
-#define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT MLXBF_I2C_SMBUS_MAX
+#define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT 3
#define MLXBF_I2C_CAUSE_YU_SLAVE_BIT 1
/* Functional enable register. */
@@ -163,15 +162,6 @@
#define MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(num, val) \
((val) | (0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num)))
-/* SMBus timing parameters. */
-#define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00
-#define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04
-#define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08
-#define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c
-#define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10
-#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
-#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
-
/*
* Defines SMBus operating frequency and core clock frequency.
* According to ADB files, default values are compliant to 100KHz SMBus
@@ -181,42 +171,46 @@
#define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
/* Core PLL TYU configuration. */
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
-
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
/* Core PLL YU configuration. */
#define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
#define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
+/* SMBus timing parameters. */
+#define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00
+#define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04
+#define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08
+#define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c
+#define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10
+#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
+#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
-/* Core PLL frequency. */
-static u64 mlxbf_i2c_corepll_frequency;
+#define MLXBF_I2C_SHIFT_0 0
+#define MLXBF_I2C_SHIFT_8 8
+#define MLXBF_I2C_SHIFT_16 16
+#define MLXBF_I2C_SHIFT_24 24
+
+#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
+#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+
+#define MLXBF_I2C_MST_ADDR_OFFSET 0x200
/* SMBus Master GW. */
-#define MLXBF_I2C_SMBUS_MASTER_GW 0x200
+#define MLXBF_I2C_SMBUS_MASTER_GW 0x0
/* Number of bytes received and sent. */
-#define MLXBF_I2C_SMBUS_RS_BYTES 0x300
+#define MLXBF_I2C_YU_SMBUS_RS_BYTES 0x100
+#define MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES 0x10c
/* Packet error check (PEC) value. */
-#define MLXBF_I2C_SMBUS_MASTER_PEC 0x304
+#define MLXBF_I2C_SMBUS_MASTER_PEC 0x104
/* Status bits (ACK/NACK/FW Timeout). */
-#define MLXBF_I2C_SMBUS_MASTER_STATUS 0x308
+#define MLXBF_I2C_SMBUS_MASTER_STATUS 0x108
/* SMbus Master Finite State Machine. */
-#define MLXBF_I2C_SMBUS_MASTER_FSM 0x310
-
-/*
- * When enabled, the master will issue a stop condition in case of
- * timeout while waiting for FW response.
- */
-#define MLXBF_I2C_SMBUS_EN_FW_TIMEOUT 0x31c
+#define MLXBF_I2C_YU_SMBUS_MASTER_FSM 0x110
+#define MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM 0x100
/* SMBus master GW control bits offset in MLXBF_I2C_SMBUS_MASTER_GW[31:3]. */
#define MLXBF_I2C_MASTER_LOCK_BIT BIT(31) /* Lock bit. */
@@ -236,14 +230,14 @@ static u64 mlxbf_i2c_corepll_frequency;
#define MLXBF_I2C_MASTER_ENABLE_READ \
(MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_READ_BIT)
-#define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address shift. */
-#define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes shift. */
-#define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte shift. */
-#define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Parse expected bytes shift. */
-#define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes shift. */
+#define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes */
+#define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte when set to 1 */
+#define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Control parse expected bytes */
+#define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address */
+#define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes */
/* SMBus master GW Data descriptor. */
-#define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x280
+#define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x80
#define MLXBF_I2C_MASTER_DATA_DESC_SIZE 0x80 /* Size in bytes. */
/* Maximum bytes to read/write per SMBus transaction. */
@@ -269,19 +263,21 @@ static u64 mlxbf_i2c_corepll_frequency;
#define MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK BIT(31)
#define MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK BIT(15)
+#define MLXBF_I2C_SLV_ADDR_OFFSET 0x400
+
/* SMBus slave GW. */
-#define MLXBF_I2C_SMBUS_SLAVE_GW 0x400
+#define MLXBF_I2C_SMBUS_SLAVE_GW 0x0
/* Number of bytes received and sent from/to master. */
-#define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x500
+#define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x100
/* Packet error check (PEC) value. */
-#define MLXBF_I2C_SMBUS_SLAVE_PEC 0x504
+#define MLXBF_I2C_SMBUS_SLAVE_PEC 0x104
/* SMBus slave Finite State Machine (FSM). */
-#define MLXBF_I2C_SMBUS_SLAVE_FSM 0x510
+#define MLXBF_I2C_SMBUS_SLAVE_FSM 0x110
/*
* Should be set when all raised causes handled, and cleared by HW on
* every new cause.
*/
-#define MLXBF_I2C_SMBUS_SLAVE_READY 0x52c
+#define MLXBF_I2C_SMBUS_SLAVE_READY 0x12c
/* SMBus slave GW control bits offset in MLXBF_I2C_SMBUS_SLAVE_GW[31:19]. */
#define MLXBF_I2C_SLAVE_BUSY_BIT BIT(30) /* Busy bit. */
@@ -294,23 +290,74 @@ static u64 mlxbf_i2c_corepll_frequency;
#define MLXBF_I2C_SLAVE_SEND_PEC_SHIFT 21 /* Send PEC byte shift. */
/* SMBus slave GW Data descriptor. */
-#define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x480
+#define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x80
#define MLXBF_I2C_SLAVE_DATA_DESC_SIZE 0x80 /* Size in bytes. */
/* SMbus slave configuration registers. */
-#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x514
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x114
#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT 16
-#define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT 7
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT BIT(7)
#define MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK GENMASK(6, 0)
-#define MLXBF_I2C_SLAVE_ADDR_ENABLED(addr) \
- ((addr) & (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT))
-
/*
* Timeout is given in microsends. Note also that timeout handling is not
* exact.
*/
#define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */
+#define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */
+
+/* Polling frequency in microseconds. */
+#define MLXBF_I2C_POLL_FREQ_IN_USEC 200
+
+#define MLXBF_I2C_SMBUS_OP_CNT_1 1
+#define MLXBF_I2C_SMBUS_OP_CNT_2 2
+#define MLXBF_I2C_SMBUS_OP_CNT_3 3
+#define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3
+
+/* Helper macro to define an I2C resource parameters. */
+#define MLXBF_I2C_RES_PARAMS(addr, size, str) \
+ { \
+ .start = (addr), \
+ .end = (addr) + (size) - 1, \
+ .name = (str) \
+ }
+
+enum {
+ MLXBF_I2C_TIMING_100KHZ = 100000,
+ MLXBF_I2C_TIMING_400KHZ = 400000,
+ MLXBF_I2C_TIMING_1000KHZ = 1000000,
+};
+
+enum {
+ MLXBF_I2C_F_READ = BIT(0),
+ MLXBF_I2C_F_WRITE = BIT(1),
+ MLXBF_I2C_F_NORESTART = BIT(3),
+ MLXBF_I2C_F_SMBUS_OPERATION = BIT(4),
+ MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
+ MLXBF_I2C_F_SMBUS_PEC = BIT(6),
+ MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+};
+
+/* Mellanox BlueField chip type. */
+enum mlxbf_i2c_chip_type {
+ MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */
+ MLXBF_I2C_CHIP_TYPE_2, /* Mellanox BlueField-2 chip. */
+ MLXBF_I2C_CHIP_TYPE_3 /* Mellanox BlueField-3 chip. */
+};
+
+/* List of chip resources that are being accessed by the driver. */
+enum {
+ MLXBF_I2C_SMBUS_RES,
+ MLXBF_I2C_MST_CAUSE_RES,
+ MLXBF_I2C_SLV_CAUSE_RES,
+ MLXBF_I2C_COALESCE_RES,
+ MLXBF_I2C_SMBUS_TIMER_RES,
+ MLXBF_I2C_SMBUS_MST_RES,
+ MLXBF_I2C_SMBUS_SLV_RES,
+ MLXBF_I2C_COREPLL_RES,
+ MLXBF_I2C_GPIO_RES,
+ MLXBF_I2C_END_RES
+};
/* Encapsulates timing parameters. */
struct mlxbf_i2c_timings {
@@ -331,27 +378,12 @@ struct mlxbf_i2c_timings {
u32 timeout; /* Detect clock low timeout. */
};
-enum {
- MLXBF_I2C_F_READ = BIT(0),
- MLXBF_I2C_F_WRITE = BIT(1),
- MLXBF_I2C_F_NORESTART = BIT(3),
- MLXBF_I2C_F_SMBUS_OPERATION = BIT(4),
- MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
- MLXBF_I2C_F_SMBUS_PEC = BIT(6),
- MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
-};
-
struct mlxbf_i2c_smbus_operation {
u32 flags;
u32 length; /* Buffer length in bytes. */
u8 *buffer;
};
-#define MLXBF_I2C_SMBUS_OP_CNT_1 1
-#define MLXBF_I2C_SMBUS_OP_CNT_2 2
-#define MLXBF_I2C_SMBUS_OP_CNT_3 3
-#define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3
-
struct mlxbf_i2c_smbus_request {
u8 slave;
u8 operation_cnt;
@@ -365,24 +397,38 @@ struct mlxbf_i2c_resource {
u8 type;
};
-/* List of chip resources that are being accessed by the driver. */
-enum {
- MLXBF_I2C_SMBUS_RES,
- MLXBF_I2C_MST_CAUSE_RES,
- MLXBF_I2C_SLV_CAUSE_RES,
- MLXBF_I2C_COALESCE_RES,
- MLXBF_I2C_COREPLL_RES,
- MLXBF_I2C_GPIO_RES,
- MLXBF_I2C_END_RES,
+struct mlxbf_i2c_chip_info {
+ enum mlxbf_i2c_chip_type type;
+ /* Chip shared resources that are being used by the I2C controller. */
+ struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX];
+
+ /* Callback to calculate the core PLL frequency. */
+ u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res);
+
+ /* Registers' address offset */
+ u32 smbus_master_rs_bytes_off;
+ u32 smbus_master_fsm_off;
};
-/* Helper macro to define an I2C resource parameters. */
-#define MLXBF_I2C_RES_PARAMS(addr, size, str) \
- { \
- .start = (addr), \
- .end = (addr) + (size) - 1, \
- .name = (str) \
- }
+struct mlxbf_i2c_priv {
+ const struct mlxbf_i2c_chip_info *chip;
+ struct i2c_adapter adap;
+ struct mlxbf_i2c_resource *smbus;
+ struct mlxbf_i2c_resource *timer;
+ struct mlxbf_i2c_resource *mst;
+ struct mlxbf_i2c_resource *slv;
+ struct mlxbf_i2c_resource *mst_cause;
+ struct mlxbf_i2c_resource *slv_cause;
+ struct mlxbf_i2c_resource *coalesce;
+ u64 frequency; /* Core frequency in Hz. */
+ int bus; /* Physical bus identifier. */
+ int irq;
+ struct i2c_client *slave[MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT];
+ u32 resource_version;
+};
+
+/* Core PLL frequency. */
+static u64 mlxbf_i2c_corepll_frequency;
static struct resource mlxbf_i2c_coalesce_tyu_params =
MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COALESCE_TYU_ADDR,
@@ -396,6 +442,10 @@ static struct resource mlxbf_i2c_corepll_yu_params =
MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_YU_ADDR,
MLXBF_I2C_COREPLL_YU_SIZE,
"COREPLL_MEM");
+static struct resource mlxbf_i2c_corepll_rsh_yu_params =
+ MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_RSH_YU_ADDR,
+ MLXBF_I2C_COREPLL_RSH_YU_SIZE,
+ "COREPLL_MEM");
static struct resource mlxbf_i2c_gpio_tyu_params =
MLXBF_I2C_RES_PARAMS(MLXBF_I2C_GPIO_TYU_ADDR,
MLXBF_I2C_GPIO_TYU_SIZE,
@@ -405,34 +455,6 @@ static struct mutex mlxbf_i2c_coalesce_lock;
static struct mutex mlxbf_i2c_corepll_lock;
static struct mutex mlxbf_i2c_gpio_lock;
-/* Mellanox BlueField chip type. */
-enum mlxbf_i2c_chip_type {
- MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */
- MLXBF_I2C_CHIP_TYPE_2, /* Mallanox BlueField-2 chip. */
-};
-
-struct mlxbf_i2c_chip_info {
- enum mlxbf_i2c_chip_type type;
- /* Chip shared resources that are being used by the I2C controller. */
- struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX];
-
- /* Callback to calculate the core PLL frequency. */
- u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res);
-};
-
-struct mlxbf_i2c_priv {
- const struct mlxbf_i2c_chip_info *chip;
- struct i2c_adapter adap;
- struct mlxbf_i2c_resource *smbus;
- struct mlxbf_i2c_resource *mst_cause;
- struct mlxbf_i2c_resource *slv_cause;
- struct mlxbf_i2c_resource *coalesce;
- u64 frequency; /* Core frequency in Hz. */
- int bus; /* Physical bus identifier. */
- int irq;
- struct i2c_client *slave;
-};
-
static struct mlxbf_i2c_resource mlxbf_i2c_coalesce_res[] = {
[MLXBF_I2C_CHIP_TYPE_1] = {
.params = &mlxbf_i2c_coalesce_tyu_params,
@@ -452,6 +474,11 @@ static struct mlxbf_i2c_resource mlxbf_i2c_corepll_res[] = {
.params = &mlxbf_i2c_corepll_yu_params,
.lock = &mlxbf_i2c_corepll_lock,
.type = MLXBF_I2C_COREPLL_RES,
+ },
+ [MLXBF_I2C_CHIP_TYPE_3] = {
+ .params = &mlxbf_i2c_corepll_rsh_yu_params,
+ .lock = &mlxbf_i2c_corepll_lock,
+ .type = MLXBF_I2C_COREPLL_RES,
}
};
@@ -468,26 +495,13 @@ static u8 mlxbf_i2c_bus_count;
static struct mutex mlxbf_i2c_bus_lock;
-/* Polling frequency in microseconds. */
-#define MLXBF_I2C_POLL_FREQ_IN_USEC 200
-
-#define MLXBF_I2C_SHIFT_0 0
-#define MLXBF_I2C_SHIFT_8 8
-#define MLXBF_I2C_SHIFT_16 16
-#define MLXBF_I2C_SHIFT_24 24
-
-#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
-#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
-
-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
-
/*
* Function to poll a set of bits at a specific address; it checks whether
* the bits are equal to zero when eq_zero is set to 'true', and not equal
* to zero when eq_zero is set to 'false'.
* Note that the timeout is given in microseconds.
*/
-static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
+static u32 mlxbf_i2c_poll(void __iomem *io, u32 addr, u32 mask,
bool eq_zero, u32 timeout)
{
u32 bits;
@@ -509,18 +523,37 @@ static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
* a transaction. Accordingly, this function polls the Master FSM stop
* bit; it returns false when the bit is asserted, true if not.
*/
-static bool mlxbf_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv)
+static bool mlxbf_i2c_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv)
{
u32 mask = MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK;
- u32 addr = MLXBF_I2C_SMBUS_MASTER_FSM;
+ u32 addr = priv->chip->smbus_master_fsm_off;
u32 timeout = MLXBF_I2C_SMBUS_TIMEOUT;
- if (mlxbf_smbus_poll(priv->smbus->io, addr, mask, true, timeout))
+ if (mlxbf_i2c_poll(priv->mst->io, addr, mask, true, timeout))
return true;
return false;
}
+/*
+ * wait for the lock to be released before acquiring it.
+ */
+static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv)
+{
+ if (mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW,
+ MLXBF_I2C_MASTER_LOCK_BIT, true,
+ MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT))
+ return true;
+
+ return false;
+}
+
+static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv)
+{
+ /* Clear the gw to clear the lock */
+ writel(0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW);
+}
+
static bool mlxbf_i2c_smbus_transaction_success(u32 master_status,
u32 cause_status)
{
@@ -558,7 +591,7 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
* then read the cause and master status bits to determine if
* errors occurred during the transaction.
*/
- mlxbf_smbus_poll(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW,
+ mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW,
MLXBF_I2C_MASTER_BUSY_BIT, true,
MLXBF_I2C_SMBUS_TIMEOUT);
@@ -571,7 +604,7 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
* Parse both Cause and Master GW bits, then return transaction status.
*/
- master_status_bits = readl(priv->smbus->io +
+ master_status_bits = readl(priv->mst->io +
MLXBF_I2C_SMBUS_MASTER_STATUS);
master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK;
@@ -596,7 +629,8 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
}
static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
- const u8 *data, u8 length, u32 addr)
+ const u8 *data, u8 length, u32 addr,
+ bool is_master)
{
u8 offset, aligned_length;
u32 data32;
@@ -613,12 +647,16 @@ static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
*/
for (offset = 0; offset < aligned_length; offset += sizeof(u32)) {
data32 = *((u32 *)(data + offset));
- iowrite32be(data32, priv->smbus->io + addr + offset);
+ if (is_master)
+ iowrite32be(data32, priv->mst->io + addr + offset);
+ else
+ iowrite32be(data32, priv->slv->io + addr + offset);
}
}
static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
- u8 *data, u8 length, u32 addr)
+ u8 *data, u8 length, u32 addr,
+ bool is_master)
{
u32 data32, mask;
u8 byte, offset;
@@ -634,14 +672,20 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
*/
for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) {
- data32 = ioread32be(priv->smbus->io + addr + offset);
+ if (is_master)
+ data32 = ioread32be(priv->mst->io + addr + offset);
+ else
+ data32 = ioread32be(priv->slv->io + addr + offset);
*((u32 *)(data + offset)) = data32;
}
if (!(length & mask))
return;
- data32 = ioread32be(priv->smbus->io + addr + offset);
+ if (is_master)
+ data32 = ioread32be(priv->mst->io + addr + offset);
+ else
+ data32 = ioread32be(priv->slv->io + addr + offset);
for (byte = 0; byte < (length & mask); byte++) {
data[offset + byte] = data32 & GENMASK(7, 0);
@@ -667,16 +711,16 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT);
/* Clear status bits. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
+ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
/* Set the cause data. */
- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
/* Zero PEC byte. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
+ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_PEC);
/* Zero byte count. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_RS_BYTES);
+ writel(0x0, priv->mst->io + priv->chip->smbus_master_rs_bytes_off);
/* GW activation. */
- writel(command, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW);
+ writel(command, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW);
/*
* Poll master status and check status bits. An ACK is sent when
@@ -712,10 +756,19 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
slave = request->slave & GENMASK(6, 0);
addr = slave << 1;
- /* First of all, check whether the HW is idle. */
- if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv)))
+ /*
+ * Try to acquire the smbus gw lock before any reads of the GW register since
+ * a read sets the lock.
+ */
+ if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv)))
return -EBUSY;
+ /* Check whether the HW is idle */
+ if (WARN_ON(!mlxbf_i2c_smbus_master_wait_for_idle(priv))) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
/* Set first byte. */
data_desc[data_idx++] = addr;
@@ -738,6 +791,11 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (flags & MLXBF_I2C_F_WRITE) {
write_en = 1;
write_len += operation->length;
+ if (data_idx + operation->length >
+ MLXBF_I2C_MASTER_DATA_DESC_SIZE) {
+ ret = -ENOBUFS;
+ goto out_unlock;
+ }
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
@@ -763,25 +821,25 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
* must be written to the data registers.
*/
mlxbf_i2c_smbus_write_data(priv, (const u8 *)data_desc, data_len,
- MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
if (write_en) {
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
pec_en, 0);
if (ret)
- return ret;
+ goto out_unlock;
}
if (read_en) {
/* Write slave address to Master GW data descriptor. */
mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
- MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
pec_en, 1);
if (!ret) {
/* Get Master GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
- MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
/* Get data from Master GW data descriptor. */
memcpy(read_buf, data_desc, read_len + 1);
@@ -793,9 +851,12 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
* next tag integration.
*/
writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK,
- priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM);
+ priv->mst->io + priv->chip->smbus_master_fsm_off);
}
+out_unlock:
+ mlxbf_i2c_smbus_master_unlock(priv);
+
return ret;
}
@@ -1082,7 +1143,7 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
timer |= mlxbf_i2c_set_timer(priv, timings->scl_low,
false, MLXBF_I2C_MASK_16,
MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io +
+ writel(timer, priv->timer->io +
MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH);
timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false,
@@ -1093,34 +1154,34 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16);
timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24);
- writel(timer, priv->smbus->io +
+ writel(timer, priv->timer->io +
MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE);
timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io +
+ writel(timer, priv->timer->io +
MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP);
timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
timer = mlxbf_i2c_set_timer(priv, timings->buf, false,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
timer = timings->timeout;
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
enum mlxbf_i2c_timings_config {
@@ -1407,24 +1468,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
return 0;
}
-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
{
- u64 core_frequency, pad_frequency;
+ u64 core_frequency;
u8 core_od, core_r;
u32 corepll_val;
u16 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */
- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
/*
* Compute PLL output frequency as follow:
@@ -1436,31 +1492,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- core_frequency = pad_frequency * (++core_f);
+ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
core_frequency /= (++core_r) * (++core_od);
return core_frequency;
}
-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
{
u32 corepll_reg1_val, corepll_reg2_val;
- u64 corepll_frequency, pad_frequency;
+ u64 corepll_frequency;
u8 core_od, core_r;
u32 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */
- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
/*
* Compute PLL output frequency as follow:
@@ -1472,7 +1523,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
+ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
corepll_frequency /= (++core_r) * (++core_od);
return corepll_frequency;
@@ -1523,28 +1574,26 @@ static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
return 0;
}
-static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
+static int mlxbf_i2c_slave_enable(struct mlxbf_i2c_priv *priv,
+ struct i2c_client *slave)
{
- u32 slave_reg, slave_reg_tmp, slave_reg_avail, slave_addr_mask;
- u8 reg, reg_cnt, byte, addr_tmp, reg_avail, byte_avail;
- bool avail, disabled;
-
- disabled = false;
- avail = false;
+ u8 reg, reg_cnt, byte, addr_tmp;
+ u32 slave_reg, slave_reg_tmp;
if (!priv)
return -EPERM;
reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2;
- slave_addr_mask = MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
/*
* Read the slave registers. There are 4 * 32-bit slave registers.
- * Each slave register can hold up to 4 * 8-bit slave configuration
- * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
+ * Each slave register can hold up to 4 * 8-bit slave configuration:
+ * 1) A 7-bit address
+ * 2) And a status bit (1 if enabled, 0 if not).
+ * Look for the next available slave register slot.
*/
for (reg = 0; reg < reg_cnt; reg++) {
- slave_reg = readl(priv->smbus->io +
+ slave_reg = readl(priv->slv->io +
MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
/*
* Each register holds 4 slave addresses. So, we have to keep
@@ -1556,121 +1605,87 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
addr_tmp = slave_reg_tmp & GENMASK(7, 0);
/*
- * Mark the first available slave address slot, i.e. its
- * enabled bit should be unset. This slot might be used
- * later on to register our slave.
- */
- if (!avail && !MLXBF_I2C_SLAVE_ADDR_ENABLED(addr_tmp)) {
- avail = true;
- reg_avail = reg;
- byte_avail = byte;
- slave_reg_avail = slave_reg;
- }
-
- /*
- * Parse slave address bytes and check whether the
- * slave address already exists and it's enabled,
- * i.e. most significant bit is set.
+ * If an enable bit is not set in the
+ * MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG register, then the
+ * slave address slot associated with that bit is
+ * free. So set the enable bit and write the
+ * slave address bits.
*/
- if ((addr_tmp & slave_addr_mask) == addr) {
- if (MLXBF_I2C_SLAVE_ADDR_ENABLED(addr_tmp))
- return 0;
- disabled = true;
- break;
+ if (!(addr_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT)) {
+ slave_reg &= ~(MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK << (byte * 8));
+ slave_reg |= (slave->addr << (byte * 8));
+ slave_reg |= MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT << (byte * 8);
+ writel(slave_reg, priv->slv->io +
+ MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ (reg * 0x4));
+
+ /*
+ * Set the slave at the corresponding index.
+ */
+ priv->slave[(reg * 4) + byte] = slave;
+
+ return 0;
}
/* Parse next byte. */
slave_reg_tmp >>= 8;
}
-
- /* Exit the loop if the slave address is found. */
- if (disabled)
- break;
}
- if (!avail && !disabled)
- return -EINVAL; /* No room for a new slave address. */
-
- if (avail && !disabled) {
- reg = reg_avail;
- byte = byte_avail;
- /* Set the slave address. */
- slave_reg_avail &= ~(slave_addr_mask << (byte * 8));
- slave_reg_avail |= addr << (byte * 8);
- slave_reg = slave_reg_avail;
- }
-
- /* Enable the slave address and update the register. */
- slave_reg |= (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT) << (byte * 8);
- writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
- reg * 0x4);
-
- return 0;
+ return -EBUSY;
}
-static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
+static int mlxbf_i2c_slave_disable(struct mlxbf_i2c_priv *priv, u8 addr)
{
- u32 slave_reg, slave_reg_tmp, slave_addr_mask;
- u8 addr, addr_tmp, reg, reg_cnt, slave_byte;
- struct i2c_client *client = priv->slave;
- bool exist;
+ u8 addr_tmp, reg, reg_cnt, byte;
+ u32 slave_reg, slave_reg_tmp;
- exist = false;
-
- addr = client->addr;
reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2;
- slave_addr_mask = MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
/*
* Read the slave registers. There are 4 * 32-bit slave registers.
- * Each slave register can hold up to 4 * 8-bit slave configuration
- * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
+ * Each slave register can hold up to 4 * 8-bit slave configuration:
+ * 1) A 7-bit address
+ * 2) And a status bit (1 if enabled, 0 if not).
+ * Check if addr is present in the registers.
*/
for (reg = 0; reg < reg_cnt; reg++) {
- slave_reg = readl(priv->smbus->io +
+ slave_reg = readl(priv->slv->io +
MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
/* Check whether the address slots are empty. */
- if (slave_reg == 0)
+ if (!slave_reg)
continue;
/*
- * Each register holds 4 slave addresses. So, we have to keep
- * the byte order consistent with the value read in order to
- * update the register correctly, if needed.
+ * Check if addr matches any of the 4 slave addresses
+ * in the register.
*/
slave_reg_tmp = slave_reg;
- slave_byte = 0;
- while (slave_reg_tmp != 0) {
- addr_tmp = slave_reg_tmp & slave_addr_mask;
+ for (byte = 0; byte < 4; byte++) {
+ addr_tmp = slave_reg_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
/*
* Parse slave address bytes and check whether the
* slave address already exists.
*/
if (addr_tmp == addr) {
- exist = true;
- break;
+ /* Clear the slave address slot. */
+ slave_reg &= ~(GENMASK(7, 0) << (byte * 8));
+ writel(slave_reg, priv->slv->io +
+ MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ (reg * 0x4));
+ /* Free slave at the corresponding index */
+ priv->slave[(reg * 4) + byte] = NULL;
+
+ return 0;
}
/* Parse next byte. */
slave_reg_tmp >>= 8;
- slave_byte += 1;
}
-
- /* Exit the loop if the slave address is found. */
- if (exist)
- break;
}
- if (!exist)
- return 0; /* Slave is not registered, nothing to do. */
-
- /* Cleanup the slave address slot. */
- slave_reg &= ~(GENMASK(7, 0) << (slave_byte * 8));
- writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
- reg * 0x4);
-
- return 0;
+ return -ENXIO;
}
static int mlxbf_i2c_init_coalesce(struct platform_device *pdev,
@@ -1760,7 +1775,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
int ret;
/* Reset FSM. */
- writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
+ writel(0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
/*
* Enable slave cause interrupt bits. Drive
@@ -1775,7 +1790,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0);
/* Finally, set the 'ready' bit to start handling transactions. */
- writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY);
/* Initialize the cause coalesce resource. */
ret = mlxbf_i2c_init_coalesce(pdev, priv);
@@ -1820,84 +1835,93 @@ static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read,
return true;
}
-static bool mlxbf_smbus_slave_wait_for_idle(struct mlxbf_i2c_priv *priv,
+static bool mlxbf_i2c_slave_wait_for_idle(struct mlxbf_i2c_priv *priv,
u32 timeout)
{
u32 mask = MLXBF_I2C_CAUSE_S_GW_BUSY_FALL;
u32 addr = MLXBF_I2C_CAUSE_ARBITER;
- if (mlxbf_smbus_poll(priv->slv_cause->io, addr, mask, false, timeout))
+ if (mlxbf_i2c_poll(priv->slv_cause->io, addr, mask, false, timeout))
return true;
return false;
}
-/* Send byte to 'external' smbus master. */
-static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+static struct i2c_client *mlxbf_i2c_get_slave_from_addr(
+ struct mlxbf_i2c_priv *priv, u8 addr)
{
- u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
- u8 write_size, pec_en, addr, byte, value, byte_cnt, desc_size;
- struct i2c_client *slave = priv->slave;
- u32 control32, data32;
- int ret;
+ int i;
- if (!slave)
- return -EINVAL;
+ for (i = 0; i < MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT; i++) {
+ if (!priv->slave[i])
+ continue;
+
+ if (priv->slave[i]->addr == addr)
+ return priv->slave[i];
+ }
+
+ return NULL;
+}
- addr = 0;
- byte = 0;
- desc_size = MLXBF_I2C_SLAVE_DATA_DESC_SIZE;
+/*
+ * Send byte to 'external' smbus master. This function is executed when
+ * an external smbus master wants to read data from the BlueField.
+ */
+static int mlxbf_i2c_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+{
+ u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
+ u8 write_size, pec_en, addr, value, byte_cnt;
+ struct i2c_client *slave;
+ u32 control32, data32;
+ int ret = 0;
/*
- * Read bytes received from the external master. These bytes should
- * be located in the first data descriptor register of the slave GW.
- * These bytes are the slave address byte and the internal register
- * address, if supplied.
+ * Read the first byte received from the external master to
+ * determine the slave address. This byte is located in the
+ * first data descriptor register of the slave GW.
*/
- if (recv_bytes > 0) {
- data32 = ioread32be(priv->smbus->io +
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
-
- /* Parse the received bytes. */
- switch (recv_bytes) {
- case 2:
- byte = (data32 >> 8) & GENMASK(7, 0);
- fallthrough;
- case 1:
- addr = (data32 & GENMASK(7, 0)) >> 1;
- }
+ data32 = ioread32be(priv->slv->io +
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+ addr = (data32 & GENMASK(7, 0)) >> 1;
- /* Check whether it's our slave address. */
- if (slave->addr != addr)
- return -EINVAL;
+ /*
+ * Check if the slave address received in the data descriptor register
+ * matches any of the slave addresses registered. If there is a match,
+ * set the slave.
+ */
+ slave = mlxbf_i2c_get_slave_from_addr(priv, addr);
+ if (!slave) {
+ ret = -ENXIO;
+ goto clear_csr;
}
/*
- * I2C read transactions may start by a WRITE followed by a READ.
- * Indeed, most slave devices would expect the internal address
- * following the slave address byte. So, write that byte first,
- * and then, send the requested data bytes to the master.
+ * An I2C read can consist of a WRITE bit transaction followed by
+ * a READ bit transaction. Indeed, slave devices often expect
+ * the slave address to be followed by the internal address.
+ * So, write the internal address byte first, and then, send the
+ * requested data to the master.
*/
if (recv_bytes > 1) {
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
- value = byte;
+ value = (data32 >> 8) & GENMASK(7, 0);
ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED,
&value);
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
if (ret < 0)
- return ret;
+ goto clear_csr;
}
/*
- * Now, send data to the master; currently, the driver supports
- * READ_BYTE, READ_WORD and BLOCK READ protocols. Note that the
- * hardware can send up to 128 bytes per transfer. That is the
- * size of its data registers.
+ * Send data to the master. Currently, the driver supports
+ * READ_BYTE, READ_WORD and BLOCK READ protocols. The
+ * hardware can send up to 128 bytes per transfer which is
+ * the total size of the data registers.
*/
i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
- for (byte_cnt = 0; byte_cnt < desc_size; byte_cnt++) {
+ for (byte_cnt = 0; byte_cnt < MLXBF_I2C_SLAVE_DATA_DESC_SIZE; byte_cnt++) {
data_desc[byte_cnt] = value;
i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
}
@@ -1905,14 +1929,12 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
/* Send a stop condition to the backend. */
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
- /* Handle the actual transfer. */
-
/* Set the number of bytes to write to master. */
write_size = (byte_cnt - 1) & 0x7f;
/* Write data to Slave GW data descriptor. */
mlxbf_i2c_smbus_write_data(priv, data_desc, byte_cnt,
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false);
pec_en = 0; /* Disable PEC since it is not supported. */
@@ -1921,46 +1943,52 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT);
control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT);
- writel(control32, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_GW);
+ writel(control32, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_GW);
/*
* Wait until the transfer is completed; the driver will wait
* until the GW is idle, a cause will rise on fall of GW busy.
*/
- mlxbf_smbus_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
+ mlxbf_i2c_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
+clear_csr:
/* Release the Slave GW. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
- writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY);
- return 0;
+ return ret;
}
-/* Receive bytes from 'external' smbus master. */
-static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+/*
+ * Receive bytes from 'external' smbus master. This function is executed when
+ * an external smbus master wants to write data to the BlueField.
+ */
+static int mlxbf_i2c_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
{
u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
- struct i2c_client *slave = priv->slave;
+ struct i2c_client *slave;
u8 value, byte, addr;
int ret = 0;
- if (!slave)
- return -EINVAL;
-
/* Read data from Slave GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, recv_bytes,
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
-
- /* Check whether its our slave address. */
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false);
addr = data_desc[0] >> 1;
- if (slave->addr != addr)
- return -EINVAL;
/*
- * Notify the slave backend; another I2C master wants to write data
- * to us. This event is sent once the slave address and the write bit
- * is detected.
+ * Check if the slave address received in the data descriptor register
+ * matches any of the slave addresses registered.
+ */
+ slave = mlxbf_i2c_get_slave_from_addr(priv, addr);
+ if (!slave) {
+ ret = -EINVAL;
+ goto clear_csr;
+ }
+
+ /*
+ * Notify the slave backend that an smbus master wants to write data
+ * to the BlueField.
*/
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
@@ -1973,18 +2001,22 @@ static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
break;
}
- /* Send a stop condition to the backend. */
+ /*
+ * Send a stop event to the slave backend, to signal
+ * the end of the write transactions.
+ */
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+clear_csr:
/* Release the Slave GW. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
- writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY);
return ret;
}
-static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
+static irqreturn_t mlxbf_i2c_irq(int irq, void *ptr)
{
struct mlxbf_i2c_priv *priv = ptr;
bool read, write, irq_is_set;
@@ -2014,7 +2046,7 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
* slave, if the higher 8 bits are sent then the slave expect N bytes
* from the master.
*/
- rw_bytes_reg = readl(priv->smbus->io +
+ rw_bytes_reg = readl(priv->slv->io +
MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0);
@@ -2032,9 +2064,9 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
MLXBF_I2C_SLAVE_DATA_DESC_SIZE : recv_bytes;
if (read)
- mlxbf_smbus_irq_send(priv, recv_bytes);
+ mlxbf_i2c_irq_send(priv, recv_bytes);
else
- mlxbf_smbus_irq_recv(priv, recv_bytes);
+ mlxbf_i2c_irq_recv(priv, recv_bytes);
return IRQ_HANDLED;
}
@@ -2129,23 +2161,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
static int mlxbf_i2c_reg_slave(struct i2c_client *slave)
{
struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+ struct device *dev = &slave->dev;
int ret;
- if (priv->slave)
- return -EBUSY;
-
/*
* Do not support ten bit chip address and do not use Packet Error
* Checking (PEC).
*/
- if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC))
+ if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC)) {
+ dev_err(dev, "SMBus PEC and 10 bit address not supported\n");
return -EAFNOSUPPORT;
+ }
- ret = mlxbf_slave_enable(priv, slave->addr);
- if (ret < 0)
- return ret;
-
- priv->slave = slave;
+ ret = mlxbf_i2c_slave_enable(priv, slave);
+ if (ret)
+ dev_err(dev, "Surpassed max number of registered slaves allowed\n");
return 0;
}
@@ -2153,18 +2183,19 @@ static int mlxbf_i2c_reg_slave(struct i2c_client *slave)
static int mlxbf_i2c_unreg_slave(struct i2c_client *slave)
{
struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+ struct device *dev = &slave->dev;
int ret;
- WARN_ON(!priv->slave);
-
- /* Unregister slave, i.e. disable the slave address in hardware. */
- ret = mlxbf_slave_disable(priv);
- if (ret < 0)
- return ret;
-
- priv->slave = NULL;
+ /*
+ * Unregister slave by:
+ * 1) Disabling the slave address in hardware
+ * 2) Freeing priv->slave at the corresponding index
+ */
+ ret = mlxbf_i2c_slave_disable(priv, slave->addr);
+ if (ret)
+ dev_err(dev, "Unable to find slave 0x%x\n", slave->addr);
- return 0;
+ return ret;
}
static u32 mlxbf_i2c_functionality(struct i2c_adapter *adap)
@@ -2180,14 +2211,27 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
[1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
[2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
},
- .calculate_freq = mlxbf_calculate_freq_from_tyu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu,
+ .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES,
+ .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM
},
[MLXBF_I2C_CHIP_TYPE_2] = {
.type = MLXBF_I2C_CHIP_TYPE_2,
.shared_res = {
[0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
},
- .calculate_freq = mlxbf_calculate_freq_from_yu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu,
+ .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES,
+ .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM
+ },
+ [MLXBF_I2C_CHIP_TYPE_3] = {
+ .type = MLXBF_I2C_CHIP_TYPE_3,
+ .shared_res = {
+ [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_3]
+ },
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu,
+ .smbus_master_rs_bytes_off = MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES,
+ .smbus_master_fsm_off = MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM
}
};
@@ -2203,24 +2247,11 @@ static struct i2c_adapter_quirks mlxbf_i2c_quirks = {
.max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH,
};
-static const struct of_device_id mlxbf_i2c_dt_ids[] = {
- {
- .compatible = "mellanox,i2c-mlxbf1",
- .data = &mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1]
- },
- {
- .compatible = "mellanox,i2c-mlxbf2",
- .data = &mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2]
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mlxbf_i2c_dt_ids);
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = {
{ "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] },
{ "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] },
+ { "MLNXBF31", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_3] },
{},
};
@@ -2229,35 +2260,27 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_i2c_acpi_ids);
static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
{
const struct acpi_device_id *aid;
- struct acpi_device *adev;
- unsigned long bus_id = 0;
- const char *uid;
+ u64 bus_id;
int ret;
if (acpi_disabled)
return -ENOENT;
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENXIO;
-
aid = acpi_match_device(mlxbf_i2c_acpi_ids, dev);
if (!aid)
return -ENODEV;
priv->chip = (struct mlxbf_i2c_chip_info *)aid->driver_data;
- uid = acpi_device_uid(adev);
- if (!uid || !(*uid)) {
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &bus_id);
+ if (ret) {
dev_err(dev, "Cannot retrieve UID\n");
- return -ENODEV;
+ return ret;
}
- ret = kstrtoul(uid, 0, &bus_id);
- if (!ret)
- priv->bus = bus_id;
+ priv->bus = bus_id;
- return ret;
+ return 0;
}
#else
static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
@@ -2266,36 +2289,12 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
}
#endif /* CONFIG_ACPI */
-static int mlxbf_i2c_of_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
-{
- const struct of_device_id *oid;
- int bus_id = -1;
-
- if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- oid = of_match_node(mlxbf_i2c_dt_ids, dev->of_node);
- if (!oid)
- return -ENODEV;
-
- priv->chip = oid->data;
-
- bus_id = of_alias_get_id(dev->of_node, "i2c");
- if (bus_id >= 0)
- priv->bus = bus_id;
- }
-
- if (bus_id < 0) {
- dev_err(dev, "Cannot get bus id");
- return bus_id;
- }
-
- return 0;
-}
-
static int mlxbf_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mlxbf_i2c_priv *priv;
struct i2c_adapter *adap;
+ u32 resource_version;
int irq, ret;
priv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_priv), GFP_KERNEL);
@@ -2303,17 +2302,63 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mlxbf_i2c_acpi_probe(dev, priv);
- if (ret < 0 && ret != -ENOENT && ret != -ENXIO)
- ret = mlxbf_i2c_of_probe(dev, priv);
-
if (ret < 0)
return ret;
- ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
- MLXBF_I2C_SMBUS_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch smbus resource info");
- return ret;
+ /* This property allows the driver to stay backward compatible with older
+ * ACPI tables.
+ * Starting BlueField-3 SoC, the "smbus" resource was broken down into 3
+ * separate resources "timer", "master" and "slave".
+ */
+ if (device_property_read_u32(dev, "resource_version", &resource_version))
+ resource_version = 0;
+
+ priv->resource_version = resource_version;
+
+ if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && resource_version == 0) {
+ priv->timer = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL);
+ if (!priv->timer)
+ return -ENOMEM;
+
+ priv->mst = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL);
+ if (!priv->mst)
+ return -ENOMEM;
+
+ priv->slv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL);
+ if (!priv->slv)
+ return -ENOMEM;
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
+ MLXBF_I2C_SMBUS_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch smbus resource info");
+ return ret;
+ }
+
+ priv->timer->io = priv->smbus->io;
+ priv->mst->io = priv->smbus->io + MLXBF_I2C_MST_ADDR_OFFSET;
+ priv->slv->io = priv->smbus->io + MLXBF_I2C_SLV_ADDR_OFFSET;
+ } else {
+ ret = mlxbf_i2c_init_resource(pdev, &priv->timer,
+ MLXBF_I2C_SMBUS_TIMER_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch timer resource info");
+ return ret;
+ }
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->mst,
+ MLXBF_I2C_SMBUS_MST_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch master resource info");
+ return ret;
+ }
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->slv,
+ MLXBF_I2C_SMBUS_SLV_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch slave resource info");
+ return ret;
+ }
}
ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause,
@@ -2372,8 +2417,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
+ ret = devm_request_irq(dev, irq, mlxbf_i2c_irq,
+ IRQF_SHARED | IRQF_PROBE_SHARED,
dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "Cannot get irq %d\n", irq);
@@ -2401,8 +2446,19 @@ static int mlxbf_i2c_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *params;
- params = priv->smbus->params;
- devm_release_mem_region(dev, params->start, resource_size(params));
+ if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && priv->resource_version == 0) {
+ params = priv->smbus->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+ } else {
+ params = priv->timer->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ params = priv->mst->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ params = priv->slv->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+ }
params = priv->mst_cause->params;
devm_release_mem_region(dev, params->start, resource_size(params));
@@ -2434,7 +2490,6 @@ static struct platform_driver mlxbf_i2c_driver = {
.remove = mlxbf_i2c_remove,
.driver = {
.name = "i2c-mlxbf",
- .of_match_table = mlxbf_i2c_dt_ids,
#ifdef CONFIG_ACPI
.acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
#endif /* CONFIG_ACPI */
@@ -2467,4 +2522,5 @@ module_exit(mlxbf_i2c_exit);
MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver");
MODULE_AUTHOR("Khalil Blaiech <kblaiech@nvidia.com>");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index ecba1dfc1278..849848ccb080 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -400,7 +400,6 @@ static int riic_i2c_probe(struct platform_device *pdev)
{
struct riic_dev *riic;
struct i2c_adapter *adap;
- struct resource *res;
struct i2c_timings i2c_t;
struct reset_control *rstc;
int i, ret;
@@ -409,8 +408,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
if (!riic)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- riic->base = devm_ioremap_resource(&pdev->dev, res);
+ riic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(riic->base))
return PTR_ERR(riic->base);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 2e98e7793bba..d1658ed76562 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1165,6 +1165,11 @@ static const struct rk3x_i2c_soc_data rv1108_soc_data = {
.calc_timings = rk3x_i2c_v1_calc_timings,
};
+static const struct rk3x_i2c_soc_data rv1126_soc_data = {
+ .grf_offset = 0x118,
+ .calc_timings = rk3x_i2c_v1_calc_timings,
+};
+
static const struct rk3x_i2c_soc_data rk3066_soc_data = {
.grf_offset = 0x154,
.calc_timings = rk3x_i2c_v0_calc_timings,
@@ -1196,6 +1201,10 @@ static const struct of_device_id rk3x_i2c_match[] = {
.data = &rv1108_soc_data
},
{
+ .compatible = "rockchip,rv1126-i2c",
+ .data = &rv1126_soc_data
+ },
+ {
.compatible = "rockchip,rk3066-i2c",
.data = &rk3066_soc_data
},
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 79798fc7462a..0239e134b90f 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -6,15 +6,13 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#define ACPI_SMBUS_HC_CLASS "smbus"
-#define ACPI_SMBUS_HC_DEVICE_NAME "cmi"
-
/* SMBUS HID definition as supported by Microsoft Windows */
#define ACPI_SMBUS_MS_HID "SMB0001"
@@ -358,8 +356,9 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
return AE_OK;
}
-static int acpi_smbus_cmi_add(struct acpi_device *device)
+static int smbus_cmi_probe(struct platform_device *device)
{
+ struct device *dev = &device->dev;
struct acpi_smbus_cmi *smbus_cmi;
int ret;
@@ -367,11 +366,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
if (!smbus_cmi)
return -ENOMEM;
- smbus_cmi->handle = device->handle;
- smbus_cmi->methods = device_get_match_data(&device->dev);
- strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
- device->driver_data = smbus_cmi;
+ smbus_cmi->handle = ACPI_HANDLE(dev);
+ smbus_cmi->methods = device_get_match_data(dev);
+
+ platform_set_drvdata(device, smbus_cmi);
+
smbus_cmi->cap_info = 0;
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
@@ -385,8 +384,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
}
snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
- "SMBus CMI adapter %s",
- acpi_device_name(device));
+ "SMBus CMI adapter %s", dev_name(dev));
smbus_cmi->adapter.owner = THIS_MODULE;
smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
smbus_cmi->adapter.algo_data = smbus_cmi;
@@ -403,31 +401,28 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
err:
kfree(smbus_cmi);
- device->driver_data = NULL;
return ret;
}
-static int acpi_smbus_cmi_remove(struct acpi_device *device)
+static int smbus_cmi_remove(struct platform_device *device)
{
- struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device);
+ struct acpi_smbus_cmi *smbus_cmi = platform_get_drvdata(device);
i2c_del_adapter(&smbus_cmi->adapter);
kfree(smbus_cmi);
- device->driver_data = NULL;
return 0;
}
-static struct acpi_driver acpi_smbus_cmi_driver = {
- .name = ACPI_SMBUS_HC_DEVICE_NAME,
- .class = ACPI_SMBUS_HC_CLASS,
- .ids = acpi_smbus_cmi_ids,
- .ops = {
- .add = acpi_smbus_cmi_add,
- .remove = acpi_smbus_cmi_remove,
+static struct platform_driver smbus_cmi_driver = {
+ .probe = smbus_cmi_probe,
+ .remove = smbus_cmi_remove,
+ .driver = {
+ .name = "smbus_cmi",
+ .acpi_match_table = acpi_smbus_cmi_ids,
},
};
-module_acpi_driver(acpi_smbus_cmi_driver);
+module_platform_driver(smbus_cmi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crane Cai <crane.cai@amd.com>");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 031c78ac42e6..954022c04cc4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -443,11 +443,16 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
u32 *dma_buf;
int err;
- if (!i2c_dev->hw->has_apb_dma || i2c_dev->is_vi)
+ if (i2c_dev->is_vi)
return 0;
- if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
- dev_dbg(i2c_dev->dev, "DMA support not enabled\n");
+ if (!i2c_dev->hw->has_apb_dma) {
+ if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
+ dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n");
+ return 0;
+ }
+ } else if (!IS_ENABLED(CONFIG_TEGRA186_GPC_DMA)) {
+ dev_dbg(i2c_dev->dev, "GPC DMA support not enabled\n");
return 0;
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 08b561f0709d..da6568a20177 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -26,7 +26,7 @@ struct gsb_buffer {
union {
u16 wdata;
u8 bdata;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
} __packed;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 91007558bcb2..8c7e3494ca5f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -599,13 +599,9 @@ static void i2c_device_remove(struct device *dev)
driver = to_i2c_driver(dev->driver);
if (driver->remove) {
- int status;
-
dev_dbg(dev, "remove\n");
- status = driver->remove(client);
- if (status)
- dev_warn(dev, "remove failed (%pe), will be ignored\n", ERR_PTR(status));
+ driver->remove(client);
}
devres_release_group(&client->dev, client->devres_group_id);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 774507b54b57..313904be5f3b 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
int (*deselect)(struct i2c_mux_core *, u32))
{
struct i2c_mux_core *muxc;
+ size_t mux_size;
- muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters)
- + sizeof_priv, GFP_KERNEL);
+ mux_size = struct_size(muxc, adapter, max_adapters);
+ muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL);
if (!muxc)
return NULL;
if (sizeof_priv)
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 5c7ae421cacf..4abc2d919881 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -181,14 +181,12 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de
return 0;
};
-static int i2c_slave_eeprom_remove(struct i2c_client *client)
+static void i2c_slave_eeprom_remove(struct i2c_client *client)
{
struct eeprom_data *eeprom = i2c_get_clientdata(client);
i2c_slave_unregister(client);
sysfs_remove_bin_file(&client->dev.kobj, &eeprom->bin);
-
- return 0;
}
static const struct i2c_device_id i2c_slave_eeprom_id[] = {
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 56dae08dfd48..75ee7ebdb614 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -153,13 +153,12 @@ static int i2c_slave_testunit_probe(struct i2c_client *client)
return i2c_slave_register(client, i2c_slave_testunit_slave_cb);
};
-static int i2c_slave_testunit_remove(struct i2c_client *client)
+static void i2c_slave_testunit_remove(struct i2c_client *client)
{
struct testunit_data *tu = i2c_get_clientdata(client);
cancel_delayed_work_sync(&tu->worker);
i2c_slave_unregister(client);
- return 0;
}
static const struct i2c_device_id i2c_slave_testunit_id[] = {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 8ba9b59a3c40..07c92c8495a3 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -153,12 +153,11 @@ static int smbalert_probe(struct i2c_client *ara,
}
/* IRQ and memory resources are managed so they are freed automatically */
-static int smbalert_remove(struct i2c_client *ara)
+static void smbalert_remove(struct i2c_client *ara)
{
struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
cancel_work_sync(&alert->alert);
- return 0;
}
static const struct i2c_device_id smbalert_ids[] = {
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 1708b1a82da2..ea838dbae32e 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -9,7 +9,7 @@ menu "Multiplexer I2C Chip support"
config I2C_ARB_GPIO_CHALLENGE
tristate "GPIO-based I2C arbitration"
depends on GPIOLIB || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for an
I2C multimaster arbitration scheme using GPIOs and a challenge &
@@ -34,7 +34,7 @@ config I2C_MUX_GPIO
config I2C_MUX_GPMUX
tristate "General Purpose I2C multiplexer"
select MULTIPLEXER
- depends on OF || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for a
general purpose I2C multiplexer. This driver provides access to
@@ -77,7 +77,7 @@ config I2C_MUX_PCA954x
config I2C_MUX_PINCTRL
tristate "pinctrl-based I2C multiplexer"
depends on PINCTRL
- depends on OF || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for an I2C
multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 704f1e50f6f4..70835825083f 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -294,13 +294,11 @@ static int ltc4306_probe(struct i2c_client *client)
return 0;
}
-static int ltc4306_remove(struct i2c_client *client)
+static void ltc4306_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
i2c_mux_del_adapters(muxc);
-
- return 0;
}
static struct i2c_driver ltc4306_driver = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 6daec8d3d331..ea83de78f52d 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -325,12 +325,11 @@ static int pca9541_probe(struct i2c_client *client,
return 0;
}
-static int pca9541_remove(struct i2c_client *client)
+static void pca9541_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
i2c_mux_del_adapters(muxc);
- return 0;
}
static struct i2c_driver pca9541_driver = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 4ad665757dd8..a5f458b635df 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -521,14 +521,13 @@ fail_cleanup:
return ret;
}
-static int pca954x_remove(struct i2c_client *client)
+static void pca954x_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
device_remove_file(&client->dev, &dev_attr_idle_state);
pca954x_cleanup(muxc);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index f1bb00a11ad6..d5ad904756fd 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
static struct platform_driver i2c_mux_pinctrl_driver = {
.driver = {
.name = "i2c-mux-pinctrl",
- .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+ .of_match_table = i2c_mux_pinctrl_of_match,
},
.probe = i2c_mux_pinctrl_probe,
.remove = i2c_mux_pinctrl_remove,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3e101719689a..cfeb24d40d37 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -928,6 +928,51 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state adl_n_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 2,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 195,
+ .target_residency = 585,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C8",
+ .desc = "MWAIT 0x40",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 260,
+ .target_residency = 1040,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C10",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 660,
+ .target_residency = 1980,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state spr_cstates[] __initdata = {
{
.name = "C1",
@@ -1309,6 +1354,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
.state_table = adl_l_cstates,
};
+static const struct idle_cpu idle_cpu_adl_n __initconst = {
+ .state_table = adl_n_cstates,
+};
+
static const struct idle_cpu idle_cpu_spr __initconst = {
.state_table = spr_cstates,
.disable_promotion_to_c1e = true,
@@ -1379,6 +1428,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
@@ -1507,7 +1557,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
state = &drv->states[drv->state_count++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
- strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
/*
* For C1-type C-states use the same number for both the exit
@@ -1816,6 +1866,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
break;
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
+ case INTEL_FAM6_ALDERLAKE_N:
adl_idle_state_table_update();
break;
}
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 9c9e98578667..d03fc3400f94 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -1045,7 +1045,7 @@ err_disable_vdd:
return ret;
}
-static int bma180_remove(struct i2c_client *client)
+static void bma180_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bma180_data *data = iio_priv(indio_dev);
@@ -1062,8 +1062,6 @@ static int bma180_remove(struct i2c_client *client)
mutex_unlock(&data->mutex);
regulator_disable(data->vddio_supply);
regulator_disable(data->vdd_supply);
-
- return 0;
}
static int bma180_suspend(struct device *dev)
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index dff4d7dd101c..be8cc598b88e 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -209,13 +209,11 @@ static int bmc150_accel_probe(struct i2c_client *client,
return 0;
}
-static int bmc150_accel_remove(struct i2c_client *client)
+static void bmc150_accel_remove(struct i2c_client *client)
{
bmc150_acpi_dual_accel_remove(client);
bmc150_accel_core_remove(&client->dev);
-
- return 0;
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 748b35c2f0c3..94f7b6ac5c87 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1611,7 +1611,7 @@ err_poweroff:
return ret;
}
-static int kxcjk1013_remove(struct i2c_client *client)
+static void kxcjk1013_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct kxcjk1013_data *data = iio_priv(indio_dev);
@@ -1630,8 +1630,6 @@ static int kxcjk1013_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
kxcjk1013_set_mode(data, STANDBY);
mutex_unlock(&data->mutex);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index d57f264bd6c8..61346ea8ef19 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -32,11 +32,9 @@ static int kxsd9_i2c_probe(struct i2c_client *i2c,
i2c->name);
}
-static int kxsd9_i2c_remove(struct i2c_client *client)
+static void kxsd9_i2c_remove(struct i2c_client *client)
{
kxsd9_common_remove(&client->dev);
-
- return 0;
}
static const struct of_device_id kxsd9_of_match[] = {
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index c15d16e7f1da..2462000e0519 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -151,15 +151,13 @@ static int mc3230_probe(struct i2c_client *client,
return ret;
}
-static int mc3230_remove(struct i2c_client *client)
+static void mc3230_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
mc3230_set_opcon(iio_priv(indio_dev), MC3230_MODE_OPCON_STANDBY);
-
- return 0;
}
static int mc3230_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c
index a3b84e8a3ea8..c63b321b01cd 100644
--- a/drivers/iio/accel/mma7455_i2c.c
+++ b/drivers/iio/accel/mma7455_i2c.c
@@ -26,11 +26,9 @@ static int mma7455_i2c_probe(struct i2c_client *i2c,
return mma7455_core_probe(&i2c->dev, regmap, name);
}
-static int mma7455_i2c_remove(struct i2c_client *i2c)
+static void mma7455_i2c_remove(struct i2c_client *i2c)
{
mma7455_core_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id mma7455_i2c_ids[] = {
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 794f2f383303..85829990bbad 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -207,7 +207,7 @@ static int mma7660_probe(struct i2c_client *client,
return ret;
}
-static int mma7660_remove(struct i2c_client *client)
+static void mma7660_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
@@ -218,8 +218,6 @@ static int mma7660_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to put device in stand-by mode (%pe), ignoring\n",
ERR_PTR(ret));
-
- return 0;
}
static int mma7660_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index c7d9ca96dbaa..3ba28c2ff68a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1735,7 +1735,7 @@ disable_regulator_vdd:
return ret;
}
-static int mma8452_remove(struct i2c_client *client)
+static void mma8452_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mma8452_data *data = iio_priv(indio_dev);
@@ -1751,8 +1751,6 @@ static int mma8452_remove(struct i2c_client *client)
regulator_disable(data->vddio_reg);
regulator_disable(data->vdd_reg);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 123cdbbb265c..f7a793f4a8e3 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -509,7 +509,7 @@ out_poweroff:
return ret;
}
-static int mma9551_remove(struct i2c_client *client)
+static void mma9551_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mma9551_data *data = iio_priv(indio_dev);
@@ -522,8 +522,6 @@ static int mma9551_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
mma9551_set_device_state(data->client, false);
mutex_unlock(&data->mutex);
-
- return 0;
}
static int mma9551_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 09df58d4be33..2da0e005b13e 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1148,7 +1148,7 @@ out_poweroff:
return ret;
}
-static int mma9553_remove(struct i2c_client *client)
+static void mma9553_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mma9553_data *data = iio_priv(indio_dev);
@@ -1161,8 +1161,6 @@ static int mma9553_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
mma9551_set_device_state(data->client, false);
mutex_unlock(&data->mutex);
-
- return 0;
}
static int mma9553_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index ceca28913355..7b1d6fb692b3 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -597,7 +597,7 @@ err_power_off:
return ret;
}
-static int stk8312_remove(struct i2c_client *client)
+static void stk8312_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct stk8312_data *data = iio_priv(indio_dev);
@@ -609,8 +609,6 @@ static int stk8312_remove(struct i2c_client *client)
iio_trigger_unregister(data->dready_trig);
stk8312_set_mode(data, STK8312_MODE_STANDBY);
-
- return 0;
}
static int stk8312_suspend(struct device *dev)
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 7d59efb41e22..2f5e4ab2a6e7 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -490,7 +490,7 @@ err_power_off:
return ret;
}
-static int stk8ba50_remove(struct i2c_client *client)
+static void stk8ba50_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct stk8ba50_data *data = iio_priv(indio_dev);
@@ -502,8 +502,6 @@ static int stk8ba50_remove(struct i2c_client *client)
iio_trigger_unregister(data->dready_trig);
stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
-
- return 0;
}
static int stk8ba50_suspend(struct device *dev)
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 92c68d467c50..a2f9fda25ff3 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -287,10 +287,8 @@ static int ad7292_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev,
ad7292_regulator_disable, st);
- if (ret) {
- regulator_disable(st->reg);
+ if (ret)
return ret;
- }
ret = regulator_get_voltage(st->reg);
if (ret < 0)
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 262bd7665b33..6dbe9d5e08a2 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -880,7 +880,7 @@ error_disable_reg:
return ret;
}
-static int ad799x_remove(struct i2c_client *client)
+static void ad799x_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ad799x_state *st = iio_priv(indio_dev);
@@ -892,8 +892,6 @@ static int ad799x_remove(struct i2c_client *client)
regulator_disable(st->vref);
regulator_disable(st->reg);
kfree(st->rx_buf);
-
- return 0;
}
static int ad799x_suspend(struct device *dev)
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 240e6c420701..910e7e965fc4 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -1034,7 +1034,7 @@ static int ina2xx_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int ina2xx_remove(struct i2c_client *client)
+static void ina2xx_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
@@ -1048,8 +1048,6 @@ static int ina2xx_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static const struct i2c_device_id ina2xx_id[] = {
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index f7c786f37ceb..d58a432bafe1 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -74,13 +74,11 @@ static int ltc2497_probe(struct i2c_client *client,
return ltc2497core_probe(dev, indio_dev);
}
-static int ltc2497_remove(struct i2c_client *client)
+static void ltc2497_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
ltc2497core_remove(indio_dev);
-
- return 0;
}
static const struct i2c_device_id ltc2497_id[] = {
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 1cb4590fe412..890af7dca62d 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -40,8 +40,8 @@
#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
-/* Internal voltage reference in uV */
-#define MCP3911_INT_VREF_UV 1200000
+/* Internal voltage reference in mV */
+#define MCP3911_INT_VREF_MV 1200
#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
@@ -113,6 +113,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
if (ret)
goto out;
+ *val = sign_extend32(*val, 23);
+
ret = IIO_VAL_INT;
break;
@@ -137,11 +139,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
*val = ret / 1000;
} else {
- *val = MCP3911_INT_VREF_UV;
+ *val = MCP3911_INT_VREF_MV;
}
- *val2 = 24;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * For 24bit Conversion
+ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
+ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
+ */
+
+ /* val2 = (2^23 * 1.5) */
+ *val2 = 12582912;
+ ret = IIO_VAL_FRACTIONAL;
break;
}
@@ -208,7 +217,14 @@ static int mcp3911_config(struct mcp3911 *adc)
u32 configreg;
int ret;
- device_property_read_u32(dev, "device-addr", &adc->dev_addr);
+ ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
+
+ /*
+ * Fallback to "device-addr" due to historical mismatch between
+ * dt-bindings and implementation
+ */
+ if (ret)
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
if (adc->dev_addr > 3) {
dev_err(&adc->spi->dev,
"invalid device address (%i). Must be in range 0-3.\n",
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 2d393a4dfff6..a6ade70dedf8 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -412,9 +412,9 @@ static int sun4i_gpadc_runtime_resume(struct device *dev)
return 0;
}
-static int sun4i_gpadc_get_temp(void *data, int *temp)
+static int sun4i_gpadc_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct sun4i_gpadc_iio *info = data;
+ struct sun4i_gpadc_iio *info = tz->devdata;
int val, scale, offset;
if (sun4i_gpadc_temp_read(info->indio_dev, &val))
@@ -428,7 +428,7 @@ static int sun4i_gpadc_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+static const struct thermal_zone_device_ops sun4i_ts_tz_ops = {
.get_temp = &sun4i_gpadc_get_temp,
};
@@ -637,9 +637,9 @@ static int sun4i_gpadc_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (IS_ENABLED(CONFIG_THERMAL_OF)) {
- info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
- 0, info,
- &sun4i_ts_tz_ops);
+ info->tzd = devm_thermal_of_zone_register(info->sensor_device,
+ 0, info,
+ &sun4i_ts_tz_ops);
/*
* Do not fail driver probing when failing to register in
* thermal because no thermal DT node is found.
@@ -681,8 +681,6 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_THERMAL_OF))
return 0;
- thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
-
if (!info->no_irq)
iio_map_array_unregister(indio_dev);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index e3dfc155fbe2..8bceba694026 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -1094,7 +1094,7 @@ static int ads1015_probe(struct i2c_client *client,
return 0;
}
-static int ads1015_remove(struct i2c_client *client)
+static void ads1015_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
@@ -1110,8 +1110,6 @@ static int ads1015_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 8378c00fa2ff..7cac77a931c7 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -722,7 +722,7 @@ unregister_trigger:
return ret;
}
-static int atlas_remove(struct i2c_client *client)
+static void atlas_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct atlas_data *data = iio_priv(indio_dev);
@@ -739,8 +739,6 @@ static int atlas_remove(struct i2c_client *client)
if (ret)
dev_err(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int atlas_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 560183efb36f..ba4045e20303 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -532,7 +532,7 @@ err_poweroff:
return ret;
}
-static int ccs811_remove(struct i2c_client *client)
+static void ccs811_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ccs811_data *data = iio_priv(indio_dev);
@@ -548,8 +548,6 @@ static int ccs811_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static const struct i2c_device_id ccs811_id[] = {
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 2343d444604d..e2c13c78c7e0 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -552,15 +552,13 @@ static int sgp_probe(struct i2c_client *client,
return 0;
}
-static int sgp_remove(struct i2c_client *client)
+static void sgp_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct sgp_data *data = iio_priv(indio_dev);
if (data->iaq_thread)
kthread_stop(data->iaq_thread);
-
- return 0;
}
static const struct i2c_device_id sgp_id[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 81775152aac6..a81bfa47a221 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -559,11 +559,9 @@ static int ad5380_i2c_probe(struct i2c_client *i2c,
return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
}
-static int ad5380_i2c_remove(struct i2c_client *i2c)
+static void ad5380_i2c_remove(struct i2c_client *i2c)
{
ad5380_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5380_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 09e242949cd0..7324065d3782 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -575,11 +575,9 @@ static int ad5446_i2c_probe(struct i2c_client *i2c,
&ad5446_i2c_chip_info[id->driver_data]);
}
-static int ad5446_i2c_remove(struct i2c_client *i2c)
+static void ad5446_i2c_remove(struct i2c_client *i2c)
{
ad5446_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5446_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 34e1319a9712..92be661034a6 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -97,11 +97,9 @@ static int ad5593r_i2c_probe(struct i2c_client *i2c,
return ad5592r_probe(&i2c->dev, id->name, &ad5593r_rw_ops);
}
-static int ad5593r_i2c_remove(struct i2c_client *i2c)
+static void ad5593r_i2c_remove(struct i2c_client *i2c)
{
ad5592r_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5593r_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 762503c1901b..aa36cbf0137c 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -65,11 +65,9 @@ static int ad5686_i2c_probe(struct i2c_client *i2c,
ad5686_i2c_write, ad5686_i2c_read);
}
-static int ad5686_i2c_remove(struct i2c_client *i2c)
+static void ad5686_i2c_remove(struct i2c_client *i2c)
{
ad5686_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5686_i2c_id[] = {
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index 509394690bcc..3e17a681174e 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -281,15 +281,13 @@ fail:
return ret;
}
-static int ds4424_remove(struct i2c_client *client)
+static void ds4424_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ds4424_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(data->vcc_reg);
-
- return 0;
}
static const struct i2c_device_id ds4424_id[] = {
diff --git a/drivers/iio/dac/m62332.c b/drivers/iio/dac/m62332.c
index 22b02f50fe41..5a812f87970c 100644
--- a/drivers/iio/dac/m62332.c
+++ b/drivers/iio/dac/m62332.c
@@ -218,7 +218,7 @@ err:
return ret;
}
-static int m62332_remove(struct i2c_client *client)
+static void m62332_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -226,8 +226,6 @@ static int m62332_remove(struct i2c_client *client)
iio_map_array_unregister(indio_dev);
m62332_set_value(indio_dev, 0, 0);
m62332_set_value(indio_dev, 0, 1);
-
- return 0;
}
static const struct i2c_device_id m62332_id[] = {
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index bb4b85a7b95b..446d1a8fe4be 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -486,7 +486,7 @@ err_disable_vdd_reg:
return err;
}
-static int mcp4725_remove(struct i2c_client *client)
+static void mcp4725_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mcp4725_data *data = iio_priv(indio_dev);
@@ -496,8 +496,6 @@ static int mcp4725_remove(struct i2c_client *client)
if (data->vref_reg)
regulator_disable(data->vref_reg);
regulator_disable(data->vdd_reg);
-
- return 0;
}
static const struct i2c_device_id mcp4725_id[] = {
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index f91f8a504989..3210e3098f9a 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -382,15 +382,13 @@ static int dac5571_probe(struct i2c_client *client,
return ret;
}
-static int dac5571_remove(struct i2c_client *i2c)
+static void dac5571_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct dac5571_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(data->vref);
-
- return 0;
}
static const struct of_device_id dac5571_of_id[] = {
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index b3fa46bd02cb..908ccc385254 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -32,11 +32,9 @@ static int bmg160_i2c_probe(struct i2c_client *client,
return bmg160_core_probe(&client->dev, regmap, client->irq, name);
}
-static int bmg160_i2c_remove(struct i2c_client *client)
+static void bmg160_i2c_remove(struct i2c_client *client)
{
bmg160_core_remove(&client->dev);
-
- return 0;
}
static const struct acpi_device_id bmg160_acpi_match[] = {
diff --git a/drivers/iio/gyro/fxas21002c_i2c.c b/drivers/iio/gyro/fxas21002c_i2c.c
index a7807fd97483..13bb52c594d1 100644
--- a/drivers/iio/gyro/fxas21002c_i2c.c
+++ b/drivers/iio/gyro/fxas21002c_i2c.c
@@ -33,11 +33,9 @@ static int fxas21002c_i2c_probe(struct i2c_client *i2c)
return fxas21002c_core_probe(&i2c->dev, regmap, i2c->irq, i2c->name);
}
-static int fxas21002c_i2c_remove(struct i2c_client *i2c)
+static void fxas21002c_i2c_remove(struct i2c_client *i2c)
{
fxas21002c_core_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id fxas21002c_i2c_id[] = {
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 0491c64e1b32..421501584587 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -350,7 +350,7 @@ error_unconfigure_buffer:
return ret;
}
-static int itg3200_remove(struct i2c_client *client)
+static void itg3200_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -360,8 +360,6 @@ static int itg3200_remove(struct i2c_client *client)
itg3200_remove_trigger(indio_dev);
itg3200_buffer_unconfigure(indio_dev);
-
- return 0;
}
static int itg3200_suspend(struct device *dev)
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 78f4a0102986..12e3afa9dd11 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -78,7 +78,7 @@ static int mpu3050_i2c_probe(struct i2c_client *client,
return 0;
}
-static int mpu3050_i2c_remove(struct i2c_client *client)
+static void mpu3050_i2c_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
@@ -87,8 +87,6 @@ static int mpu3050_i2c_remove(struct i2c_client *client)
i2c_mux_del_adapters(mpu3050->i2cmux);
mpu3050_common_remove(&client->dev);
-
- return 0;
}
/*
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index dd7800159051..8fca787b2524 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -578,7 +578,7 @@ disable_reg:
return ret;
}
-static int afe4404_remove(struct i2c_client *client)
+static void afe4404_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct afe4404_data *afe = iio_priv(indio_dev);
@@ -594,8 +594,6 @@ static int afe4404_remove(struct i2c_client *client)
ret = regulator_disable(afe->regulator);
if (ret)
dev_err(afe->dev, "Unable to disable regulator\n");
-
- return 0;
}
static const struct i2c_device_id afe4404_ids[] = {
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index ad5717965223..2cca5e0519f8 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -471,15 +471,13 @@ static int max30100_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int max30100_remove(struct i2c_client *client)
+static void max30100_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct max30100_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
max30100_set_powermode(data, false);
-
- return 0;
}
static const struct i2c_device_id max30100_id[] = {
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index abbcef563807..437298a29f2d 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -592,15 +592,13 @@ static int max30102_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int max30102_remove(struct i2c_client *client)
+static void max30102_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct max30102_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
max30102_set_power(data, false);
-
- return 0;
}
static const struct i2c_device_id max30102_id[] = {
diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c
index 1381df46187c..d6858ccb056e 100644
--- a/drivers/iio/humidity/hdc2010.c
+++ b/drivers/iio/humidity/hdc2010.c
@@ -308,7 +308,7 @@ static int hdc2010_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int hdc2010_remove(struct i2c_client *client)
+static void hdc2010_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct hdc2010_data *data = iio_priv(indio_dev);
@@ -318,8 +318,6 @@ static int hdc2010_remove(struct i2c_client *client)
/* Disable Automatic Measurement Mode */
if (hdc2010_update_drdy_config(data, HDC2010_AMM, 0))
dev_warn(&client->dev, "Unable to restore default AMM\n");
-
- return 0;
}
static const struct i2c_device_id hdc2010_id[] = {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 2aa647704a79..14255a918eb1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -157,7 +157,7 @@ out_del_mux:
return result;
}
-static int inv_mpu_remove(struct i2c_client *client)
+static void inv_mpu_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -166,8 +166,6 @@ static int inv_mpu_remove(struct i2c_client *client)
inv_mpu_acpi_delete_mux_client(client);
i2c_mux_del_adapters(st->muxc);
}
-
- return 0;
}
/*
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index ec23b1ee472b..b10c0dcac0bb 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1418,7 +1418,7 @@ err_chip_uninit:
return ret;
}
-static int kmx61_remove(struct i2c_client *client)
+static void kmx61_remove(struct i2c_client *client)
{
struct kmx61_data *data = i2c_get_clientdata(client);
@@ -1439,8 +1439,6 @@ static int kmx61_remove(struct i2c_client *client)
mutex_lock(&data->lock);
kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
mutex_unlock(&data->lock);
-
- return 0;
}
static int kmx61_suspend(struct device *dev)
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 0f9d77598997..b70f2681bcb3 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -452,7 +452,7 @@ err:
return ret;
}
-static int apds9300_remove(struct i2c_client *client)
+static void apds9300_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct apds9300_data *data = iio_priv(indio_dev);
@@ -462,8 +462,6 @@ static int apds9300_remove(struct i2c_client *client)
/* Ensure that power off and interrupts are disabled */
apds9300_set_intr_state(data, 0);
apds9300_set_power_state(data, 0);
-
- return 0;
}
static int apds9300_suspend(struct device *dev)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 09b831f9f40b..b62c139baf41 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1067,7 +1067,7 @@ error_power_down:
return ret;
}
-static int apds9960_remove(struct i2c_client *client)
+static void apds9960_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct apds9960_data *data = iio_priv(indio_dev);
@@ -1076,8 +1076,6 @@ static int apds9960_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
apds9960_set_powermode(data, 0);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 471985c220bb..3e92820bc820 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -263,7 +263,7 @@ static int bh1750_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int bh1750_remove(struct i2c_client *client)
+static void bh1750_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bh1750_data *data = iio_priv(indio_dev);
@@ -273,8 +273,6 @@ static int bh1750_remove(struct i2c_client *client)
mutex_lock(&data->lock);
i2c_smbus_write_byte(client, BH1750_POWER_DOWN);
mutex_unlock(&data->lock);
-
- return 0;
}
static int bh1750_suspend(struct device *dev)
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index fc7141390117..90bca392b262 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -202,7 +202,7 @@ out_disable_pm:
return ret;
}
-static int bh1780_remove(struct i2c_client *client)
+static void bh1780_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bh1780_data *bh1780 = iio_priv(indio_dev);
@@ -216,8 +216,6 @@ static int bh1780_remove(struct i2c_client *client)
if (ret < 0)
dev_err(&client->dev, "failed to power off (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int bh1780_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index edbe6a3138d0..001055d09750 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -505,7 +505,7 @@ static int cm32181_resume(struct device *dev)
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
}
-DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
static const struct of_device_id cm32181_of_match[] = {
{ .compatible = "capella,cm3218" },
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 2c80a0535d2c..5214cd014cf8 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -357,7 +357,7 @@ static int cm3232_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int cm3232_remove(struct i2c_client *client)
+static void cm3232_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -365,8 +365,6 @@ static int cm3232_remove(struct i2c_client *client)
CM3232_CMD_ALS_DISABLE);
iio_device_unregister(indio_dev);
-
- return 0;
}
static const struct i2c_device_id cm3232_id[] = {
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index c721b69d5095..0b30db77f78b 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -226,8 +226,10 @@ static int cm3605_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return dev_err_probe(dev, irq, "failed to get irq\n");
+ if (irq < 0) {
+ ret = dev_err_probe(dev, irq, "failed to get irq\n");
+ goto out_disable_aset;
+ }
ret = devm_request_threaded_irq(dev, irq, cm3605_prox_irq,
NULL, 0, "cm3605", indio_dev);
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 89f5e48a6642..6615c98b601c 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -700,7 +700,7 @@ error_disable_reg:
return ret;
}
-static int cm36651_remove(struct i2c_client *client)
+static void cm36651_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct cm36651_data *cm36651 = iio_priv(indio_dev);
@@ -710,8 +710,6 @@ static int cm36651_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
i2c_unregister_device(cm36651->ps_client);
i2c_unregister_device(cm36651->ara_client);
-
- return 0;
}
static const struct i2c_device_id cm36651_id[] = {
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index e2707416f9a8..8000fa347344 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -619,7 +619,7 @@ out_disable_vdd:
return ret;
}
-static int gp2ap002_remove(struct i2c_client *client)
+static void gp2ap002_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
@@ -631,8 +631,6 @@ static int gp2ap002_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
regulator_disable(gp2ap002->vio);
regulator_disable(gp2ap002->vdd);
-
- return 0;
}
static int gp2ap002_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index b820041159f7..826439299e8b 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1573,7 +1573,7 @@ error_regulator_disable:
return err;
}
-static int gp2ap020a00f_remove(struct i2c_client *client)
+static void gp2ap020a00f_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct gp2ap020a00f_data *data = iio_priv(indio_dev);
@@ -1589,8 +1589,6 @@ static int gp2ap020a00f_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(data->vled_reg);
-
- return 0;
}
static const struct i2c_device_id gp2ap020a00f_id[] = {
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index ff5996d77818..32d58e18f26d 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -636,7 +636,7 @@ static int isl29028_probe(struct i2c_client *client,
return 0;
}
-static int isl29028_remove(struct i2c_client *client)
+static void isl29028_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct isl29028_chip *chip = iio_priv(indio_dev);
@@ -647,8 +647,6 @@ static int isl29028_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
isl29028_clear_configure_reg(chip);
-
- return 0;
}
static int isl29028_suspend(struct device *dev)
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index eb68a52aab82..c199e63cce82 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -300,15 +300,13 @@ static int isl29125_powerdown(struct isl29125_data *data)
(data->conf1 & ~ISL29125_MODE_MASK) | ISL29125_MODE_PD);
}
-static int isl29125_remove(struct i2c_client *client)
+static void isl29125_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
isl29125_powerdown(iio_priv(indio_dev));
-
- return 0;
}
static int isl29125_suspend(struct device *dev)
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 5387c12231cf..57ce6d75966c 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -373,7 +373,7 @@ static int jsa1212_power_off(struct jsa1212_data *data)
return ret;
}
-static int jsa1212_remove(struct i2c_client *client)
+static void jsa1212_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct jsa1212_data *data = iio_priv(indio_dev);
@@ -381,8 +381,6 @@ static int jsa1212_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
jsa1212_power_off(data);
-
- return 0;
}
static int jsa1212_suspend(struct device *dev)
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 679a1e1086ae..74a1ccda8b9c 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1600,15 +1600,13 @@ powerdown_on_error:
return ret;
}
-static int ltr501_remove(struct i2c_client *client)
+static void ltr501_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
ltr501_powerdown(iio_priv(indio_dev));
-
- return 0;
}
static int ltr501_suspend(struct device *dev)
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index a326d47afc9b..a26d1c3f9543 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -794,7 +794,7 @@ static int opt3001_probe(struct i2c_client *client,
return 0;
}
-static int opt3001_remove(struct i2c_client *client)
+static void opt3001_remove(struct i2c_client *client)
{
struct iio_dev *iio = i2c_get_clientdata(client);
struct opt3001 *opt = iio_priv(iio);
@@ -808,7 +808,7 @@ static int opt3001_remove(struct i2c_client *client)
if (ret < 0) {
dev_err(opt->dev, "failed to read register %02x\n",
OPT3001_CONFIGURATION);
- return 0;
+ return;
}
reg = ret;
@@ -820,8 +820,6 @@ static int opt3001_remove(struct i2c_client *client)
dev_err(opt->dev, "failed to write register %02x\n",
OPT3001_CONFIGURATION);
}
-
- return 0;
}
static const struct i2c_device_id opt3001_id[] = {
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 772874e707ae..3cb2de51f4aa 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -394,7 +394,7 @@ out_err:
return ret;
}
-static int pa12203001_remove(struct i2c_client *client)
+static void pa12203001_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
@@ -408,8 +408,6 @@ static int pa12203001_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM)
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index dabdd05f0e2c..d1c16dd76058 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -1041,7 +1041,7 @@ err_poweroff:
return ret;
}
-static int rpr0521_remove(struct i2c_client *client)
+static void rpr0521_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -1051,8 +1051,6 @@ static int rpr0521_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
rpr0521_poweroff(iio_priv(indio_dev));
-
- return 0;
}
static int rpr0521_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index f7cc7a6c0c8d..7b8e0da6aabc 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -649,14 +649,12 @@ err_standby:
return ret;
}
-static int stk3310_remove(struct i2c_client *client)
+static void stk3310_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
stk3310_set_state(iio_priv(indio_dev), STK3310_STATE_STANDBY);
-
- return 0;
}
static int stk3310_suspend(struct device *dev)
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 823435f59bb6..db17fec634be 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -559,7 +559,7 @@ static int tcs3472_powerdown(struct tcs3472_data *data)
return ret;
}
-static int tcs3472_remove(struct i2c_client *client)
+static void tcs3472_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -568,8 +568,6 @@ static int tcs3472_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
tcs3472_powerdown(iio_priv(indio_dev));
-
- return 0;
}
static int tcs3472_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index bbb577459fb9..951f35ef3f41 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -796,7 +796,7 @@ fail:
return err;
}
-static int tsl2563_remove(struct i2c_client *client)
+static void tsl2563_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct tsl2563_chip *chip = iio_priv(indio_dev);
@@ -809,8 +809,6 @@ static int tsl2563_remove(struct i2c_client *client)
i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
chip->intr);
tsl2563_set_power(chip, 0);
-
- return 0;
}
static int tsl2563_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 82662dab87c0..0a2ca1a8146d 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -873,7 +873,7 @@ static int tsl2583_probe(struct i2c_client *clientp,
return 0;
}
-static int tsl2583_remove(struct i2c_client *client)
+static void tsl2583_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -884,8 +884,6 @@ static int tsl2583_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
-
- return 0;
}
static int tsl2583_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index 6ae1b27e50b6..090038fed889 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -207,12 +207,10 @@ static int tsl4531_powerdown(struct i2c_client *client)
TSL4531_MODE_POWERDOWN);
}
-static int tsl4531_remove(struct i2c_client *client)
+static void tsl4531_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
tsl4531_powerdown(client);
-
- return 0;
}
static int tsl4531_suspend(struct device *dev)
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 80d2299da561..3e652d7f3b0e 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -904,7 +904,7 @@ out_err:
}
-static int us5182d_remove(struct i2c_client *client)
+static void us5182d_remove(struct i2c_client *client)
{
struct us5182d_data *data = iio_priv(i2c_get_clientdata(client));
int ret;
@@ -918,8 +918,6 @@ static int us5182d_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to shut down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int us5182d_suspend(struct device *dev)
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 3db4e26731bb..f6c83ecaad8b 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1111,7 +1111,7 @@ static const struct of_device_id vcnl_4000_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vcnl_4000_of_match);
-static int vcnl4000_remove(struct i2c_client *client)
+static void vcnl4000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct vcnl4000_data *data = iio_priv(indio_dev);
@@ -1126,8 +1126,6 @@ static int vcnl4000_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int vcnl4000_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 6a196cf2270b..3ed37f6057fb 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -601,7 +601,7 @@ fail_poweroff:
return ret;
}
-static int vcnl4035_remove(struct i2c_client *client)
+static void vcnl4035_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
@@ -616,8 +616,6 @@ static int vcnl4035_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to put device into standby (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int vcnl4035_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index 1e55e09a8d16..cfa4e9e7c803 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -180,15 +180,13 @@ fail:
return ret;
}
-static int veml6070_remove(struct i2c_client *client)
+static void veml6070_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct veml6070_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
i2c_unregister_device(data->client2);
-
- return 0;
}
static const struct i2c_device_id veml6070_id[] = {
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index c89a91db0690..7ec9ab3beb45 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -969,7 +969,7 @@ disable_pm:
return ret;
}
-static int ak8974_remove(struct i2c_client *i2c)
+static void ak8974_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -981,8 +981,6 @@ static int ak8974_remove(struct i2c_client *i2c)
pm_runtime_disable(&i2c->dev);
ak8974_set_power(ak8974, AK8974_PWR_OFF);
regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
-
- return 0;
}
static int ak8974_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 2432e697150c..caf03a2a98a5 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -1018,7 +1018,7 @@ power_off:
return err;
}
-static int ak8975_remove(struct i2c_client *client)
+static void ak8975_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ak8975_data *data = iio_priv(indio_dev);
@@ -1030,8 +1030,6 @@ static int ak8975_remove(struct i2c_client *client)
iio_triggered_buffer_cleanup(indio_dev);
ak8975_set_mode(data, POWER_DOWN);
ak8975_power_off(data);
-
- return 0;
}
static int ak8975_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 65c004411d0f..570deaa87836 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -34,11 +34,9 @@ static int bmc150_magn_i2c_probe(struct i2c_client *client,
return bmc150_magn_probe(&client->dev, regmap, client->irq, name);
}
-static int bmc150_magn_i2c_remove(struct i2c_client *client)
+static void bmc150_magn_i2c_remove(struct i2c_client *client)
{
bmc150_magn_remove(&client->dev);
-
- return 0;
}
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 8d2ff8fc204d..fe5e8415b2f2 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -65,11 +65,9 @@ static int hmc5843_i2c_probe(struct i2c_client *cli,
id->driver_data, id->name);
}
-static int hmc5843_i2c_remove(struct i2c_client *client)
+static void hmc5843_i2c_remove(struct i2c_client *client)
{
hmc5843_common_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id hmc5843_id[] = {
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 226439d0bfb5..b870ad803862 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -559,7 +559,7 @@ disable_regulator_vdd:
return ret;
}
-static int mag3110_remove(struct i2c_client *client)
+static void mag3110_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mag3110_data *data = iio_priv(indio_dev);
@@ -569,8 +569,6 @@ static int mag3110_remove(struct i2c_client *client)
mag3110_standby(iio_priv(indio_dev));
regulator_disable(data->vddio_reg);
regulator_disable(data->vdd_reg);
-
- return 0;
}
static int mag3110_suspend(struct device *dev)
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index aeaa4da6923b..c3a10942654e 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -943,7 +943,7 @@ reg_off:
return ret;
}
-static int yas5xx_remove(struct i2c_client *i2c)
+static void yas5xx_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct yas5xx *yas5xx = iio_priv(indio_dev);
@@ -961,8 +961,6 @@ static int yas5xx_remove(struct i2c_client *i2c)
pm_runtime_disable(dev);
gpiod_set_value_cansleep(yas5xx->reset, 1);
regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
-
- return 0;
}
static int yas5xx_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index fe514f0b5506..5ec7060d31d9 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -384,7 +384,7 @@ error_unreg_trigger:
return ret;
}
-static int lmp91000_remove(struct i2c_client *client)
+static void lmp91000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct lmp91000_data *data = iio_priv(indio_dev);
@@ -396,8 +396,6 @@ static int lmp91000_remove(struct i2c_client *client)
iio_triggered_buffer_cleanup(indio_dev);
iio_trigger_unregister(data->trig);
-
- return 0;
}
static const struct of_device_id lmp91000_of_match[] = {
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index d4f89e4babed..2f22aba61e4d 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -290,15 +290,13 @@ static int mpl3115_standby(struct mpl3115_data *data)
data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE);
}
-static int mpl3115_remove(struct i2c_client *client)
+static void mpl3115_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
mpl3115_standby(iio_priv(indio_dev));
-
- return 0;
}
static int mpl3115_suspend(struct device *dev)
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 3b1de71e0d15..b681a4183909 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -105,11 +105,9 @@ static int ms5611_i2c_probe(struct i2c_client *client,
return ms5611_probe(indio_dev, &client->dev, id->name, id->driver_data);
}
-static int ms5611_i2c_remove(struct i2c_client *client)
+static void ms5611_i2c_remove(struct i2c_client *client)
{
ms5611_remove(i2c_get_clientdata(client));
-
- return 0;
}
static const struct of_device_id ms5611_i2c_matches[] = {
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index 0db0860d386b..f26dd8cbb387 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -53,11 +53,9 @@ static int zpa2326_probe_i2c(struct i2c_client *client,
zpa2326_i2c_hwid(client), regmap);
}
-static int zpa2326_remove_i2c(struct i2c_client *client)
+static void zpa2326_remove_i2c(struct i2c_client *client)
{
zpa2326_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id zpa2326_i2c_ids[] = {
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 648ae576d6fa..791a33d5286c 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -311,7 +311,7 @@ error_unreg_buffer:
return ret;
}
-static int lidar_remove(struct i2c_client *client)
+static void lidar_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -320,8 +320,6 @@ static int lidar_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct i2c_device_id lidar_id[] = {
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 42589d6200ad..d4670864ddc7 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -979,7 +979,7 @@ out_trigger_unregister:
return ret;
}
-static int sx9500_remove(struct i2c_client *client)
+static void sx9500_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct sx9500_data *data = iio_priv(indio_dev);
@@ -989,8 +989,6 @@ static int sx9500_remove(struct i2c_client *client)
if (client->irq > 0)
iio_trigger_unregister(data->trig);
kfree(data->buffer);
-
- return 0;
}
static int sx9500_suspend(struct device *dev)
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index c253a5315988..0808bb865928 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -571,7 +571,7 @@ static int mlx90614_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int mlx90614_remove(struct i2c_client *client)
+static void mlx90614_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mlx90614_data *data = iio_priv(indio_dev);
@@ -584,8 +584,6 @@ static int mlx90614_remove(struct i2c_client *client)
mlx90614_sleep(data);
pm_runtime_set_suspended(&client->dev);
}
-
- return 0;
}
static const struct i2c_device_id mlx90614_id[] = {
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 7ee7ff8047a4..e8ef47147e2b 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -924,7 +924,7 @@ static int mlx90632_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int mlx90632_remove(struct i2c_client *client)
+static void mlx90632_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mlx90632_data *data = iio_priv(indio_dev);
@@ -936,8 +936,6 @@ static int mlx90632_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
mlx90632_sleep(data);
-
- return 0;
}
static const struct i2c_device_id mlx90632_id[] = {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 46d06678dfbe..be317f2665a9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1841,8 +1841,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req->listen_addr_storage,
- (struct sockaddr *)&req->src_addr_storage)) {
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index fce80a4a5147..04c04e6d24c3 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg;
unsigned long start, end, cur = 0;
unsigned int nmap = 0;
+ long ret;
int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 186ed8859920..d39e16c211e8 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -462,7 +462,7 @@ retry:
mutex_unlock(&umem_odp->umem_mutex);
out_put_mm:
- mmput(owning_mm);
+ mmput_async(owning_mm);
out_put_task:
if (owning_process)
put_task_struct(owning_process);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c16017f6e8db..14392c942f49 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1);
}
- skb_get(skb);
- rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
- skb_trim(skb, sizeof(*rpl5));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 72f08171a28a..bc3ec22a62c5 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
to_erdma_access_flags(reg_wr(send_wr)->access);
regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
regmr_sge->length = cpu_to_le32(mr->ibmr.length);
- regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index a7a3d42e2016..699bd3f59cd3 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
attr->vendor_part_id = dev->pdev->device;
attr->hw_ver = dev->pdev->revision;
- attr->max_qp = dev->attrs.max_qp;
+ attr->max_qp = dev->attrs.max_qp - 1;
attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
attr->max_qp_rd_atom = dev->attrs.max_ord;
attr->max_qp_init_rd_atom = dev->attrs.max_ird;
@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_send_sge = dev->attrs.max_send_sge;
attr->max_recv_sge = dev->attrs.max_recv_sge;
attr->max_sge_rd = dev->attrs.max_sge_rd;
- attr->max_cq = dev->attrs.max_cq;
+ attr->max_cq = dev->attrs.max_cq - 1;
attr->max_cqe = dev->attrs.max_cqe;
attr->max_mr = dev->attrs.max_mr;
attr->max_pd = dev->attrs.max_pd;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f848eedc6a23..d24996526c4d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -730,7 +730,6 @@ struct hns_roce_caps {
u32 num_qps;
u32 num_pi_qps;
u32 reserved_qps;
- int num_qpc_timer;
u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index cbdafaac678a..c780646bd60a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1977,7 +1977,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+ caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
@@ -2273,7 +2273,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
- caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f96debac30fe..64797109bab6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -36,11 +36,11 @@
#include <linux/bitops.h>
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
@@ -83,7 +83,7 @@
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c8af4ebd7cbd..4ccb217b2841 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -725,7 +725,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
- hr_dev->caps.num_qpc_timer, 1);
+ hr_dev->caps.qpc_timer_bt_num, 1);
if (ret) {
dev_err(dev,
"Failed to init QPC timer memory, aborting.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 48d3616a6d71..7bee7f6c5e70 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -462,11 +462,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
hr_qp->rq.rsv_sge);
- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
- hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index daeab5daed5b..a6e5d350a94c 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -1005,6 +1006,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
int ret_code;
bool move_cq_head = true;
u8 polarity;
+ u8 op_type;
bool ext_valid;
__le64 *ext_cqe;
@@ -1187,7 +1189,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
@@ -1204,6 +1205,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
break;
}
} while (1);
+ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index fdf4cc88cb91..075defaabee5 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
err_code = -EIO;
- if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
- cqp_request->compl_info.min_err_code == 0x8029) {
- if (!rf->reset) {
- rf->reset = true;
- rf->gen_ops.request_reset(rf);
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002)
+ err_code = -EBUSY;
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
}
}
}
@@ -2598,7 +2601,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwrcq);
+ irdma_comp_handler(iwqp->iwscq);
} else {
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b07b8af2997..9b207f5084eb 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_cq = rf->max_cq - rf->used_cqs;
- props->max_cqe = rf->max_cqe;
+ props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
- if (rdma_protocol_roce(ibdev, 1))
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ }
+
props->max_ah = rf->max_ah;
props->max_mcast_grp = rf->max_mcg;
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
@@ -3009,6 +3012,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
+ int status;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
@@ -3039,8 +3043,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
cqp_info->post_sq = 1;
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
irdma_free_stag(iwdev, iwmr->stag);
done:
if (iwpbl->pbl_allocated)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 293ed709e5ed..9c8a7b206dcf 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -147,6 +147,28 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
+static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out,
+ size_t sz)
+{
+ u32 *in;
+ int err;
+
+ in = kvzalloc(sz, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+ err = mlx5_core_access_reg(dev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ kvfree(in);
+ return err;
+}
+
static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -166,6 +188,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
@@ -202,8 +230,7 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
goto done;
}
- err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
- out_cnt, sz);
+ err = query_ib_ppcnt(mdev, mdev_port_num, out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a174a0eee8dc..883d7c60143e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
- dev->port_caps[port - 1].has_smi = false;
- if (MLX5_CAP_GEN(dev->mdev, port_type) ==
- MLX5_CAP_PORT_TYPE_IB) {
- if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port, 0,
- &vport_ctx);
- if (err) {
- mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
- port, err);
- return err;
- }
- dev->port_caps[port - 1].has_smi =
- vport_ctx.has_smi;
- } else {
- dev->port_caps[port - 1].has_smi = true;
- }
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
}
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
}
+
return 0;
}
@@ -4338,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2e2ad3918385..62338f44a30e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -708,6 +708,7 @@ struct mlx5_ib_umr_context {
};
enum {
+ MLX5_UMR_STATE_UNINIT,
MLX5_UMR_STATE_ACTIVE,
MLX5_UMR_STATE_RECOVER,
MLX5_UMR_STATE_ERR,
@@ -1540,6 +1541,18 @@ int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
{
+ /*
+ * If the driver is in hash mode and the port_select_flow_table_bypass cap
+ * is supported, it means that the driver no longer needs to assign the port
+ * affinity by default. If a user wants to set the port affinity explicitly,
+ * the user has a dedicated API to do that, so there is no need to assign
+ * the port affinity by default.
+ */
+ if (dev->lag_active &&
+ mlx5_lag_mode_is_hash(dev->mdev) &&
+ MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+ return 0;
+
return dev->lag_active ||
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index e00b94d1b1ea..d5105b5c9979 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -177,6 +177,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
sema_init(&dev->umrc.sem, MAX_UMR_WR);
mutex_init(&dev->umrc.lock);
+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
return 0;
@@ -191,6 +192,8 @@ destroy_pd:
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
{
+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+ return;
ib_destroy_qp(dev->umrc.qp);
ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 1f4e60257700..7d47b521070b 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
- return virt_to_page(paddr);
+ return virt_to_page((void *)paddr);
return NULL;
}
@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
kunmap_local(kaddr);
}
} else {
- u64 va = sge->laddr + sge_off;
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
- page_array[seg] = virt_to_page(va & PAGE_MASK);
+ /*
+ * virt_to_page() takes a (void *) pointer
+ * so cast to a (void *) meaning it will be 64
+ * bits on a 64 bit platform and 32 bits on a
+ * 32 bit platform.
+ */
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(uintptr_t)va,
+ (void *)va,
plen);
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index bd5f3b5e1727..7b83f48f60c5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr;
char *data;
int length;
+ bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- if (iser_conn->iscsi_conn->session->discovery_sess)
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index baecde41d126..449904dac0a9 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1004,7 +1004,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, bool fr_en,
- u32 size, u32 imm, struct ib_send_wr *wr,
+ u32 count, u32 size, u32 imm,
+ struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
@@ -1024,12 +1025,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
num_sge = 2;
ptail = tail;
} else {
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ for_each_sg(req->sglist, sg, count, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
- num_sge = 1 + req->sg_cnt;
+ num_sge = 1 + count;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
@@ -1142,7 +1143,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, WRITE);
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
imm, wr, &inv_wr);
if (ret) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 34c03bde5064..4894e7329d88 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -595,7 +595,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
- int nr, chunks;
+ int nr, nr_sgt, chunks;
chunks = chunks_per_mr * mri;
if (!always_invalidate)
@@ -610,19 +610,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
- if (nr < sgt->nents) {
- err = nr < 0 ? nr : -EINVAL;
+ if (!nr_sgt) {
+ err = -EINVAL;
goto free_sg;
}
mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sgt->nents);
+ nr_sgt);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto unmap_sg;
}
- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
if (nr < 0 || nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
@@ -641,7 +641,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
}
}
/* Eventually dma addr for each chunk can be cached */
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ for_each_sg(sgt->sgl, s, nr_sgt, i)
srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7720ea270ed8..d7f69e593a63 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1961,7 +1961,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
if (scmnd) {
req = scsi_cmd_priv(scmnd);
scmnd = srp_claim_req(ch, req, NULL, scmnd);
- } else {
+ }
+ if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
rsp->tag, ch - target->ch, ch->qp->qp_num);
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 34bcd99a46f5..2beda29021a3 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -327,7 +327,7 @@ err_free_mem:
return error;
}
-static int as5011_remove(struct i2c_client *client)
+static void as5011_remove(struct i2c_client *client)
{
struct as5011_device *as5011 = i2c_get_clientdata(client);
@@ -337,8 +337,6 @@ static int as5011_remove(struct i2c_client *client)
input_unregister_device(as5011->input_dev);
kfree(as5011);
-
- return 0;
}
static const struct i2c_device_id as5011_id[] = {
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b2a68bc9f0b4..b86de1312512 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index f95a81b9fac7..2380546d7978 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
again:
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -64,7 +64,7 @@ again:
if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
goto again;
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
}
@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
iforce_serio->cmd_response_len = iforce_serio->len;
/* Signal that command is done */
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
} else if (likely(iforce->type)) {
iforce_process_packet(iforce, iforce_serio->id,
iforce_serio->data_in,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ea58805c480f..cba92bd590a8 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
spin_lock_irqsave(&iforce->xmit_lock, flags);
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
XMIT_INC(iforce->xmit.tail, n);
if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_warn(&iforce_usb->intf->dev,
"usb_submit_urb failed %d\n", n);
+ iforce_clear_xmit_and_wake(iforce);
}
/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = &iforce_usb->iforce;
if (urb->status) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
urb->status);
+ iforce_clear_xmit_and_wake(iforce);
return;
}
__iforce_usb_xmit(iforce);
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
}
static int iforce_usb_probe(struct usb_interface *intf,
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 6aa761ebbdf7..9ccb9107ccbe 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
response_data, response_len);
}
+static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
+{
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ wake_up_all(&iforce->wait);
+}
+
/* Public functions */
/* iforce-main.c */
int iforce_init_device(struct device *parent, u16 bustype,
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1a1a05d7cd42..e2719737360a 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -592,12 +592,11 @@ static int adp5588_probe(struct i2c_client *client,
return 0;
}
-static int adp5588_remove(struct i2c_client *client)
+static void adp5588_remove(struct i2c_client *client)
{
adp5588_write(client, CFG, 0);
/* all resources will be freed by devm */
- return 0;
}
static int __maybe_unused adp5588_suspend(struct device *dev)
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 93446b21f98f..db793a550c25 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -77,6 +77,7 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to read switch code: %d\n",
ret);
+ fwnode_handle_put(child);
return ret;
}
iqs62x_keys->switches[i].code = val;
@@ -90,6 +91,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
IQS62X_EVENT_HALL_N_T :
IQS62X_EVENT_HALL_S_T);
+
+ fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 6c38d034ec6e..407dd2ad6302 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -752,7 +752,7 @@ fail1:
return err;
}
-static int lm8323_remove(struct i2c_client *client)
+static void lm8323_remove(struct i2c_client *client)
{
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -769,8 +769,6 @@ static int lm8323_remove(struct i2c_client *client)
led_classdev_unregister(&lm->pwm[i].cdev);
kfree(lm);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 7c5f8c6bb957..9dac22c14125 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -200,15 +200,13 @@ static int lm8333_probe(struct i2c_client *client,
return err;
}
-static int lm8333_remove(struct i2c_client *client)
+static void lm8333_remove(struct i2c_client *client)
{
struct lm8333 *lm8333 = i2c_get_clientdata(client);
free_irq(client->irq, lm8333);
input_unregister_device(lm8333->input);
kfree(lm8333);
-
- return 0;
}
static const struct i2c_device_id lm8333_id[] = {
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 8cb0062b98e4..ac1637a3389e 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -194,7 +194,7 @@ err_free_mem:
return error;
}
-static int mcs_touchkey_remove(struct i2c_client *client)
+static void mcs_touchkey_remove(struct i2c_client *client)
{
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
@@ -203,8 +203,6 @@ static int mcs_touchkey_remove(struct i2c_client *client)
data->poweron(false);
input_unregister_device(data->input_dev);
kfree(data);
-
- return 0;
}
static void mcs_touchkey_shutdown(struct i2c_client *client)
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 7174e1df1ee3..9fcce18b1d65 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -216,7 +216,7 @@ err_free_mem:
return err;
}
-static int qt1070_remove(struct i2c_client *client)
+static void qt1070_remove(struct i2c_client *client)
{
struct qt1070_data *data = i2c_get_clientdata(client);
@@ -225,8 +225,6 @@ static int qt1070_remove(struct i2c_client *client)
input_unregister_device(data->input);
kfree(data);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 32d4a076eaa3..382b1519218c 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -432,7 +432,7 @@ err_free_mem:
return error;
}
-static int qt2160_remove(struct i2c_client *client)
+static void qt2160_remove(struct i2c_client *client)
{
struct qt2160_data *qt2160 = i2c_get_clientdata(client);
@@ -446,8 +446,6 @@ static int qt2160_remove(struct i2c_client *client)
input_unregister_device(qt2160->input);
kfree(qt2160);
-
- return 0;
}
static const struct i2c_device_id qt2160_idtable[] = {
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 65286762b02a..ad8660be0127 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -20,7 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#define SNVS_HPVIDR1_REG 0xF8
+#define SNVS_HPVIDR1_REG 0xBF8
#define SNVS_LPSR_REG 0x4C /* LP Status Register */
#define SNVS_LPCR_REG 0x38 /* LP Control Register */
#define SNVS_HPSR_REG 0x14
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 2a9755910065..afcdfbb002ff 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -307,7 +307,7 @@ fail1:
return error;
}
-static int tca6416_keypad_remove(struct i2c_client *client)
+static void tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
@@ -318,8 +318,6 @@ static int tca6416_keypad_remove(struct i2c_client *client)
input_unregister_device(chip->input);
kfree(chip);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index a3b5f88d2bd1..5be636aaa94f 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -99,13 +99,11 @@ static int adxl34x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int adxl34x_i2c_remove(struct i2c_client *client)
+static void adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_remove(ac);
-
- return 0;
}
static int __maybe_unused adxl34x_i2c_suspend(struct device *dev)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index a9d984da95f3..84fe394da7a6 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -513,11 +513,9 @@ static int bma150_probe(struct i2c_client *client,
return 0;
}
-static int bma150_remove(struct i2c_client *client)
+static void bma150_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused bma150_suspend(struct device *dev)
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index 03fb49127c3a..3b23210c46b7 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -58,13 +58,11 @@ static int cma3000_i2c_probe(struct i2c_client *client,
return 0;
}
-static int cma3000_i2c_remove(struct i2c_client *client)
+static void cma3000_i2c_remove(struct i2c_client *client)
{
struct cma3000_accl_data *data = i2c_get_clientdata(client);
cma3000_exit(data);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index abc423165522..cfd6640e4f82 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -157,7 +157,7 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i
return ret;
}
-static int pcf8574_kp_remove(struct i2c_client *client)
+static void pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
@@ -165,8 +165,6 @@ static int pcf8574_kp_remove(struct i2c_client *client)
input_unregister_device(lp->idev);
kfree(lp);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
index 3fb64dbda1a2..76873aa005b4 100644
--- a/drivers/input/misc/rk805-pwrkey.c
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
};
module_platform_driver(rk805_pwrkey_driver);
+MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 434d48ae4b12..ffad142801b3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -186,7 +186,6 @@ static const char * const smbus_pnp_ids[] = {
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
- "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
"LEN2068", /* T14 Gen 1 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index fa304648d611..987ee67a1045 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -587,7 +587,7 @@ err_mem_free:
return ret;
}
-static int synaptics_i2c_remove(struct i2c_client *client)
+static void synaptics_i2c_remove(struct i2c_client *client)
{
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -596,8 +596,6 @@ static int synaptics_i2c_remove(struct i2c_client *client)
input_unregister_device(touch->input);
kfree(touch);
-
- return 0;
}
static int __maybe_unused synaptics_i2c_suspend(struct device *dev)
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 2407ea43de59..c130468541b7 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -338,13 +338,11 @@ static int rmi_smb_probe(struct i2c_client *client,
return 0;
}
-static int rmi_smb_remove(struct i2c_client *client)
+static void rmi_smb_remove(struct i2c_client *client)
{
struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
rmi_unregister_transport_device(&rmi_smb->xport);
-
- return 0;
}
static int __maybe_unused rmi_smb_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index eb66cd2689b7..4eedea08b0b5 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3284,7 +3284,7 @@ err_disable_regulators:
return error;
}
-static int mxt_remove(struct i2c_client *client)
+static void mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
@@ -3294,8 +3294,6 @@ static int mxt_remove(struct i2c_client *client)
mxt_free_object_table(data);
regulator_bulk_disable(ARRAY_SIZE(data->regulators),
data->regulators);
-
- return 0;
}
static int __maybe_unused mxt_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 2f1f0d7607f8..34f422e246ef 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -552,15 +552,13 @@ static int bu21013_probe(struct i2c_client *client,
return 0;
}
-static int bu21013_remove(struct i2c_client *client)
+static void bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts *ts = i2c_get_clientdata(client);
/* Make sure IRQ will exit quickly even if there is contact */
ts->touch_stopped = true;
/* The resources will be freed by devm */
-
- return 0;
}
static int __maybe_unused bu21013_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index c65ccb2f4716..28ae7c15397a 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -43,13 +43,11 @@ static int cyttsp4_i2c_probe(struct i2c_client *client,
return PTR_ERR_OR_ZERO(ts);
}
-static int cyttsp4_i2c_remove(struct i2c_client *client)
+static void cyttsp4_i2c_remove(struct i2c_client *client)
{
struct cyttsp4 *ts = i2c_get_clientdata(client);
cyttsp4_remove(ts);
-
- return 0;
}
static const struct i2c_device_id cyttsp4_i2c_id[] = {
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 82beddb28761..5fb441387fe5 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1346,13 +1346,11 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return 0;
}
-static int edt_ft5x06_ts_remove(struct i2c_client *client)
+static void edt_ft5x06_ts_remove(struct i2c_client *client)
{
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
-
- return 0;
}
static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index d016505fc081..a33cc7950cf5 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -95,6 +95,7 @@ static const struct goodix_chip_data gt9x_chip_data = {
static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "1151", .data = &gt1x_chip_data },
+ { .id = "1158", .data = &gt1x_chip_data },
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
@@ -1382,14 +1383,12 @@ reset:
return 0;
}
-static int goodix_ts_remove(struct i2c_client *client)
+static void goodix_ts_remove(struct i2c_client *client)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
if (ts->load_cfg_from_disk)
wait_for_completion(&ts->firmware_loading_complete);
-
- return 0;
}
static int __maybe_unused goodix_suspend(struct device *dev)
@@ -1508,6 +1507,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt1158" },
{ .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 2745bf1aee38..83f4be05e27b 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
"ce", GPIOD_OUT_LOW);
if (IS_ERR(ts->gpio_ce)) {
error = PTR_ERR(ts->gpio_ce);
- if (error != EPROBE_DEFER)
+ if (error != -EPROBE_DEFER)
dev_err(&client->dev,
"Failed to get gpio: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 42d3fd7e04d7..79cd660d879e 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -176,7 +176,7 @@ static int migor_ts_probe(struct i2c_client *client,
return error;
}
-static int migor_ts_remove(struct i2c_client *client)
+static void migor_ts_remove(struct i2c_client *client)
{
struct migor_ts_priv *priv = i2c_get_clientdata(client);
@@ -185,8 +185,6 @@ static int migor_ts_remove(struct i2c_client *client)
kfree(priv);
dev_set_drvdata(&client->dev, NULL);
-
- return 0;
}
static int __maybe_unused migor_ts_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 85a1f465c097..1a7d00289b4c 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -475,11 +475,9 @@ static int s6sy761_probe(struct i2c_client *client,
return 0;
}
-static int s6sy761_remove(struct i2c_client *client)
+static void s6sy761_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused s6sy761_runtime_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c175d44c52f3..d5bd170808fb 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -738,11 +738,9 @@ static int stmfts_probe(struct i2c_client *client,
return 0;
}
-static int stmfts_remove(struct i2c_client *client)
+static void stmfts_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused stmfts_runtime_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 742a7e96c1b5..73eb8f80be6e 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -192,12 +192,12 @@ static int sun4i_get_temp(const struct sun4i_ts_data *ts, int *temp)
return 0;
}
-static int sun4i_get_tz_temp(void *data, int *temp)
+static int sun4i_get_tz_temp(struct thermal_zone_device *tz, int *temp)
{
- return sun4i_get_temp(data, temp);
+ return sun4i_get_temp(tz->devdata, temp);
}
-static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+static const struct thermal_zone_device_ops sun4i_ts_tz_ops = {
.get_temp = sun4i_get_tz_temp,
};
@@ -356,8 +356,8 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
- &sun4i_ts_tz_ops);
+ thermal = devm_thermal_of_zone_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
if (IS_ERR(thermal))
return PTR_ERR(thermal);
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 9fdd870c4c0b..a9565353ee98 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -43,11 +43,9 @@ static int tsc2004_probe(struct i2c_client *i2c,
tsc2004_cmd);
}
-static int tsc2004_remove(struct i2c_client *i2c)
+static void tsc2004_remove(struct i2c_client *i2c)
{
tsc200x_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id tsc2004_idtable[] = {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 65b8e4fd8217..828672a46a3d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -939,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = data;
+ cmd->data[2] = lower_32_bits(data);
+ cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 696d5555be57..6a1f02c62dff 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (dev_state->domain == NULL)
goto out_free_states;
+ /* See iommu_is_default_domain() */
+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 1ef7bbb4acf3..5968a568aae2 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
}
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd);
@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
* addresses larger than what we support.
*/
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas)
return false;
/* We can support bigger ASIDs than the CPU, but not smaller */
- fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits)
return false;
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 51bd66a45a11..e190bb8c225c 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct irq_desc *desc;
int ret = 0;
if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* Hypver-V IO APIC irq affinity should be in the scope of
* ioapic_max_cpumask because no irq remapping support.
*/
- desc = irq_data_to_desc(irq_data);
- cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+ irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
return 0;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 7cca030a508e..31bc50e538a3 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -163,38 +163,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-static inline void context_clear_pasid_enable(struct context_entry *context)
-{
- context->lo &= ~(1ULL << 11);
-}
-
-static inline bool context_pasid_enabled(struct context_entry *context)
-{
- return !!(context->lo & (1ULL << 11));
-}
-
-static inline void context_set_copied(struct context_entry *context)
-{
- context->hi |= (1ull << 3);
-}
-
-static inline bool context_copied(struct context_entry *context)
-{
- return !!(context->hi & (1ULL << 3));
-}
-
-static inline bool __context_present(struct context_entry *context)
-{
- return (context->lo & 1);
-}
-
-bool context_present(struct context_entry *context)
-{
- return context_pasid_enabled(context) ?
- __context_present(context) :
- __context_present(context) && !context_copied(context);
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -242,6 +210,26 @@ static inline void context_clear_entry(struct context_entry *context)
context->hi = 0;
}
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ if (!iommu->copied_tables)
+ return false;
+
+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -402,14 +390,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
+/*
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+ * the returned SAGAW.
+ */
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+{
+ unsigned long fl_sagaw, sl_sagaw;
+
+ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(iommu->cap);
+
+ /* Second level only. */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return sl_sagaw;
+
+ /* First level only. */
+ if (!ecap_slts(iommu->ecap))
+ return fl_sagaw;
+
+ return fl_sagaw & sl_sagaw;
+}
+
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
+ sagaw = __iommu_calculate_sagaw(iommu);
+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
@@ -505,8 +515,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
@@ -518,7 +529,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
@@ -578,6 +589,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
struct context_entry *context;
u64 *entry;
+ /*
+ * Except that the caller requested to allocate a new entry,
+ * returning a copied context entry makes no sense.
+ */
+ if (!alloc && context_copied(iommu, bus, devfn))
+ return NULL;
+
entry = &root->lo;
if (sm_supported(iommu)) {
if (devfn >= 0x80) {
@@ -795,32 +813,11 @@ static void free_context_table(struct intel_iommu *iommu)
}
#ifdef CONFIG_DMAR_DEBUG
-static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn)
+static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
+ u8 bus, u8 devfn, struct dma_pte *parent, int level)
{
- struct device_domain_info *info;
- struct dma_pte *parent, *pte;
- struct dmar_domain *domain;
- struct pci_dev *pdev;
- int offset, level;
-
- pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
- if (!pdev)
- return;
-
- info = dev_iommu_priv_get(&pdev->dev);
- if (!info || !info->domain) {
- pr_info("device [%02x:%02x.%d] not probed\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- return;
- }
-
- domain = info->domain;
- level = agaw_to_level(domain->agaw);
- parent = domain->pgd;
- if (!parent) {
- pr_info("no page table setup\n");
- return;
- }
+ struct dma_pte *pte;
+ int offset;
while (1) {
offset = pfn_level_offset(pfn, level);
@@ -847,9 +844,10 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
struct pasid_entry *entries, *pte;
struct context_entry *ctx_entry;
struct root_entry *rt_entry;
+ int i, dir_index, index, level;
u8 devfn = source_id & 0xff;
u8 bus = source_id >> 8;
- int i, dir_index, index;
+ struct dma_pte *pgtable;
pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
@@ -877,8 +875,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
ctx_entry->hi, ctx_entry->lo);
/* legacy mode does not require PASID entries */
- if (!sm_supported(iommu))
+ if (!sm_supported(iommu)) {
+ level = agaw_to_level(ctx_entry->hi & 7);
+ pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
goto pgtable_walk;
+ }
/* get the pointer to pasid directory entry */
dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
@@ -905,8 +906,16 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
for (i = 0; i < ARRAY_SIZE(pte->val); i++)
pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
+ if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
+ level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
+ pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
+ } else {
+ level = agaw_to_level((pte->val[0] >> 2) & 0x7);
+ pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
+ }
+
pgtable_walk:
- pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn);
+ pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
}
#endif
@@ -1345,19 +1354,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!iommu->qi)
return NULL;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return info->ats_supported ? info : NULL;
}
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
@@ -1366,8 +1376,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
@@ -1375,7 +1386,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1467,14 +1478,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!domain->has_iotlb_device)
return;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1688,6 +1700,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
+ if (iommu->copied_tables) {
+ bitmap_free(iommu->copied_tables);
+ iommu->copied_tables = NULL;
+ }
+
/* free context mapping */
free_context_table(iommu);
@@ -1913,7 +1930,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
ret = 0;
- if (context_present(context))
+ if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
/*
@@ -1925,7 +1942,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
- if (context_copied(context)) {
+ if (context_copied(iommu, bus, devfn)) {
u16 did_old = context_domain_id(context);
if (did_old < cap_ndoms(iommu->cap)) {
@@ -1936,6 +1953,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
+
+ clear_context_copied(iommu, bus, devfn);
}
context_clear_entry(context);
@@ -2429,6 +2448,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
+ unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2440,9 +2460,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2684,32 +2704,14 @@ static int copy_context_table(struct intel_iommu *iommu,
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
- if (!__context_present(&ce))
+ if (!context_present(&ce))
continue;
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
set_bit(did, iommu->domain_ids);
- /*
- * We need a marker for copied context entries. This
- * marker needs to work for the old format as well as
- * for extended context entries.
- *
- * Bit 67 of the context entry is used. In the old
- * format this bit is available to software, in the
- * extended format it is the PGE bit, but PGE is ignored
- * by HW if PASIDs are disabled (and thus still
- * available).
- *
- * So disable PASIDs first and then mark the entry
- * copied. This means that we don't copy PASID
- * translations from the old kernel, but this is fine as
- * faults there are not fatal.
- */
- context_clear_pasid_enable(&ce);
- context_set_copied(&ce);
-
+ set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
}
@@ -2735,8 +2737,8 @@ static int copy_translation_tables(struct intel_iommu *iommu)
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
- new_ext = !!ecap_ecs(iommu->ecap);
+ ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
+ new_ext = !!sm_supported(iommu);
/*
* The RTT bit can only be changed when translation is disabled,
@@ -2747,6 +2749,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
if (new_ext != ext)
return -EINVAL;
+ iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
+ if (!iommu->copied_tables)
+ return -ENOMEM;
+
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
@@ -4080,6 +4086,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4091,9 +4098,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
@@ -4412,19 +4419,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock(&dmar_domain->lock);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index fae45bbb0c7f..74b0e19e23ee 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -197,7 +197,6 @@
#define ecap_dis(e) (((e) >> 27) & 0x1)
#define ecap_nest(e) (((e) >> 26) & 0x1)
#define ecap_mts(e) (((e) >> 25) & 0x1)
-#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
@@ -265,7 +264,6 @@
#define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
#define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */
@@ -579,6 +577,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
+ unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -701,6 +700,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
}
+static inline bool context_present(struct context_entry *context)
+{
+ return (context->lo & 1);
+}
+
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu);
@@ -784,7 +788,6 @@ static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 780fb7071577..3a808146b50f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3076,6 +3076,24 @@ out:
return ret;
}
+static bool iommu_is_default_domain(struct iommu_group *group)
+{
+ if (group->domain == group->default_domain)
+ return true;
+
+ /*
+ * If the default domain was set to identity and it is still an identity
+ * domain then we consider this a pass. This happens because of
+ * amd_iommu_init_device() replacing the default idenytity domain with an
+ * identity domain that has a different configuration for AMDGPU.
+ */
+ if (group->default_domain &&
+ group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
+ group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
+ return true;
+ return false;
+}
+
/**
* iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API.
@@ -3094,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->domain != group->default_domain ||
- group->owner) {
+ if (group->owner || !iommu_is_default_domain(group)) {
ret = -EBUSY;
goto unlock_out;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 41f4eb005219..5696314ae69e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -40,7 +40,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return -ENODEV;
+ return driver_deferred_probe_check_state(dev);
if (!try_module_get(ops->owner))
return -ENODEV;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 08eeafc9529f..80151176ba12 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static bool viommu_capable(enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct iommu_ops viommu_ops = {
+ .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 66b9fa408bf2..eb5ea5b69cfa 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -561,6 +561,11 @@ config IRQ_LOONGARCH_CPU
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select LOONGSON_LIOINTC
+ select LOONGSON_EIOINTC
+ select LOONGSON_PCH_PIC
+ select LOONGSON_PCH_MSI
+ select LOONGSON_PCH_LPC
help
Support for the LoongArch CPU Interrupt Controller. For details of
irq chip hierarchy on LoongArch platforms please read the document
@@ -623,8 +628,9 @@ config LOONGSON_PCH_MSI
config LOONGSON_PCH_LPC
bool "Loongson PCH LPC Controller"
+ depends on LOONGARCH
depends on MACH_LOONGSON64
- default (MACH_LOONGSON64 && LOONGARCH)
+ default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
help
Support for the Loongson PCH LPC Controller.
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5ff09de6c48f..beead1a0191c 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1574,13 +1574,15 @@ static int its_select_cpu(struct irq_data *d,
const struct cpumask *aff_mask)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- cpumask_var_t tmpmask;
+ static DEFINE_RAW_SPINLOCK(tmpmask_lock);
+ static struct cpumask __tmpmask;
+ struct cpumask *tmpmask;
+ unsigned long flags;
int cpu, node;
-
- if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
- return -ENOMEM;
-
node = its_dev->its->numa_node;
+ tmpmask = &__tmpmask;
+
+ raw_spin_lock_irqsave(&tmpmask_lock, flags);
if (!irqd_affinity_is_managed(d)) {
/* First try the NUMA node */
@@ -1634,7 +1636,7 @@ static int its_select_cpu(struct irq_data *d,
cpu = cpumask_pick_least_loaded(d, tmpmask);
}
out:
- free_cpumask_var(tmpmask);
+ raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
return cpu;
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 4ea71b28f9f5..a6277dea4c7a 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3;
}
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 327f3ab62c03..741612ba6a52 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -129,7 +129,7 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
- cpuintc_handle = irq_domain_alloc_fwnode(NULL);
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 80d8ca6f2d46..16e9af8d8b1e 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -111,11 +111,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
/* Mask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
/* Set route for target vector */
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
/* Unmask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -286,7 +290,7 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
}
}
-struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
{
int i;
@@ -344,7 +348,8 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
if (!priv)
return -ENOMEM;
- priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
if (!priv->domain_handle) {
pr_err("Unable to allocate domain handle\n");
goto out_free_priv;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index c4f3c886ad61..0da8716f8f24 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -207,7 +207,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
"reg-names", core_reg_names[i]);
if (index < 0)
- return -EINVAL;
+ goto out_iounmap;
priv->core_isr[i] = of_iomap(node, index);
}
@@ -360,7 +360,7 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index d0e8551bebfa..a72ede90ffc6 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -282,7 +282,7 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
int ret;
struct fwnode_handle *domain_handle;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
acpi_pchmsi->count, parent, domain_handle);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index b6f1392964b1..c01b9c257005 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -48,25 +48,6 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-int find_pch_pic(u32 gsi)
-{
- int i;
-
- /* Find the PCH_PIC that manages this GSI. */
- for (i = 0; i < MAX_IO_PICS; i++) {
- struct pch_pic *priv = pch_pic_priv[i];
-
- if (!priv)
- return -1;
-
- if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
- return i;
- }
-
- pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
- return -1;
-}
-
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -325,6 +306,25 @@ IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
#endif
#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
static int __init
pch_lpc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
@@ -349,7 +349,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a73763d475f0..6a3f7498ea8e 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,7 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
- if (!host_data->drv_data || !host_data->drv_data->desc_irqs)
+ if (!host_data->drv_data->desc_irqs)
return -EINVAL;
desc_irq = host_data->drv_data->desc_irqs[hwirq];
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index e69c4bf557bf..ae24848af233 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -798,7 +798,7 @@ u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
u16 ret;
if (contr == 0) {
- strlcpy(serial, driver_serial, CAPI_SERIAL_LEN);
+ strscpy(serial, driver_serial, CAPI_SERIAL_LEN);
return CAPI_NOERROR;
}
@@ -806,7 +806,7 @@ u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
- strlcpy(serial, ctr->serial, CAPI_SERIAL_LEN);
+ strscpy(serial, ctr->serial, CAPI_SERIAL_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index 7ea10db20e3a..48133d022812 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -59,6 +59,7 @@ struct l1oip {
int bundle; /* bundle channels in one frm */
int codec; /* codec to use for transmis. */
int limit; /* limit number of bchannels */
+ bool shutdown; /* if card is released */
/* timer */
struct timer_list keep_tl;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 2c40412466e6..a77195e378b7 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
p = frame;
/* restart timer */
- if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
@@ -601,7 +601,9 @@ multiframe:
goto multiframe;
/* restart timer */
- if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
+ if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
+ !hc->timeout_on) &&
+ !hc->shutdown) {
hc->timeout_on = 1;
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
@@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc)
{
int ch;
- if (timer_pending(&hc->keep_tl))
- del_timer(&hc->keep_tl);
+ hc->shutdown = true;
- if (timer_pending(&hc->timeout_tl))
- del_timer(&hc->timeout_tl);
+ del_timer_sync(&hc->keep_tl);
+ del_timer_sync(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c
index aa3f82be0a9c..bb2249771acb 100644
--- a/drivers/leds/flash/leds-as3645a.c
+++ b/drivers/leds/flash/leds-as3645a.c
@@ -724,7 +724,7 @@ out_put_nodes:
return rval;
}
-static int as3645a_remove(struct i2c_client *client)
+static void as3645a_remove(struct i2c_client *client)
{
struct as3645a *flash = i2c_get_clientdata(client);
@@ -740,8 +740,6 @@ static int as3645a_remove(struct i2c_client *client)
fwnode_handle_put(flash->flash_node);
fwnode_handle_put(flash->indicator_node);
-
- return 0;
}
static const struct i2c_device_id as3645a_id_table[] = {
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index d0e1d4814042..78730e066a73 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -440,15 +440,16 @@ static int lm3601x_probe(struct i2c_client *client)
return lm3601x_register_leds(led, fwnode);
}
-static int lm3601x_remove(struct i2c_client *client)
+static void lm3601x_remove(struct i2c_client *client)
{
struct lm3601x_led *led = i2c_get_clientdata(client);
+ int ret;
- mutex_destroy(&led->lock);
-
- return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
- LM3601X_ENABLE_MASK,
- LM3601X_MODE_STANDBY);
+ ret = regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
+ LM3601X_ENABLE_MASK, LM3601X_MODE_STANDBY);
+ if (ret)
+ dev_warn(&client->dev,
+ "Failed to put into standby (%pe)\n", ERR_PTR(ret));
}
static const struct i2c_device_id lm3601x_id[] = {
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
index ee129ab7255d..e404fe8b0314 100644
--- a/drivers/leds/flash/leds-rt4505.c
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -393,12 +393,11 @@ static int rt4505_probe(struct i2c_client *client)
return 0;
}
-static int rt4505_remove(struct i2c_client *client)
+static void rt4505_remove(struct i2c_client *client)
{
struct rt4505_priv *priv = i2c_get_clientdata(client);
v4l2_flash_release(priv->v4l2_flash);
- return 0;
}
static void rt4505_shutdown(struct i2c_client *client)
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index a0df1fb28774..e072ee5409f7 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -334,13 +334,11 @@ exit:
return err;
}
-static int an30259a_remove(struct i2c_client *client)
+static void an30259a_remove(struct i2c_client *client)
{
struct an30259a *chip = i2c_get_clientdata(client);
mutex_destroy(&chip->mutex);
-
- return 0;
}
static const struct of_device_id an30259a_match_table[] = {
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 80d937454aee..0b52fc9097c6 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -401,15 +401,13 @@ error:
return ret;
}
-static int aw2013_remove(struct i2c_client *client)
+static void aw2013_remove(struct i2c_client *client)
{
struct aw2013 *chip = i2c_get_clientdata(client);
aw2013_chip_disable(chip);
mutex_destroy(&chip->mutex);
-
- return 0;
}
static const struct of_device_id aw2013_match_table[] = {
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 8bbaef5a2986..2b6678f6bd56 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -722,7 +722,7 @@ failed_unregister_dev_file:
return ret;
}
-static int bd2802_remove(struct i2c_client *client)
+static void bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
int i;
@@ -733,8 +733,6 @@ static int bd2802_remove(struct i2c_client *client)
bd2802_disable_adv_conf(led);
for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
device_remove_file(&led->client->dev, bd2802_attributes[i]);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index bd7d0d5cf3b6..3fb6a2fdaefa 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -677,7 +677,7 @@ exit:
return err;
}
-static int blinkm_remove(struct i2c_client *client)
+static void blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
int ret = 0;
@@ -716,7 +716,6 @@ static int blinkm_remove(struct i2c_client *client)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
- return 0;
}
static const struct i2c_device_id blinkm_id[] = {
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index fc63fce38c19..0d219c1ac3b5 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -457,7 +457,7 @@ static int is31fl32xx_probe(struct i2c_client *client,
return 0;
}
-static int is31fl32xx_remove(struct i2c_client *client)
+static void is31fl32xx_remove(struct i2c_client *client)
{
struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
int ret;
@@ -466,8 +466,6 @@ static int is31fl32xx_remove(struct i2c_client *client)
if (ret)
dev_err(&client->dev, "Failed to reset registers on removal (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
/*
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index e72393534b72..ba906c253c7f 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -470,13 +470,12 @@ static int lm3530_probe(struct i2c_client *client,
return 0;
}
-static int lm3530_remove(struct i2c_client *client)
+static void lm3530_remove(struct i2c_client *client)
{
struct lm3530_data *drvdata = i2c_get_clientdata(client);
lm3530_led_disable(drvdata);
led_classdev_unregister(&drvdata->led_dev);
- return 0;
}
static const struct i2c_device_id lm3530_id[] = {
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index beb53040e09e..db64d44bcbbf 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -704,7 +704,7 @@ static int lm3532_probe(struct i2c_client *client,
return ret;
}
-static int lm3532_remove(struct i2c_client *client)
+static void lm3532_remove(struct i2c_client *client)
{
struct lm3532_data *drvdata = i2c_get_clientdata(client);
@@ -712,8 +712,6 @@ static int lm3532_remove(struct i2c_client *client)
if (drvdata->enable_gpio)
gpiod_direction_output(drvdata->enable_gpio, 0);
-
- return 0;
}
static const struct of_device_id of_lm3532_leds_match[] = {
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 2d3e11845ba5..daa35927b301 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -491,7 +491,7 @@ err_out:
return err;
}
-static int lm355x_remove(struct i2c_client *client)
+static void lm355x_remove(struct i2c_client *client)
{
struct lm355x_chip_data *chip = i2c_get_clientdata(client);
struct lm355x_reg_data *preg = chip->regs;
@@ -501,8 +501,6 @@ static int lm355x_remove(struct i2c_client *client)
led_classdev_unregister(&chip->cdev_torch);
led_classdev_unregister(&chip->cdev_flash);
dev_info(&client->dev, "%s is removed\n", lm355x_name[chip->type]);
-
- return 0;
}
static const struct i2c_device_id lm355x_id[] = {
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 435309154e6b..428a5d928150 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -380,7 +380,7 @@ err_out:
return err;
}
-static int lm3642_remove(struct i2c_client *client)
+static void lm3642_remove(struct i2c_client *client)
{
struct lm3642_chip_data *chip = i2c_get_clientdata(client);
@@ -388,7 +388,6 @@ static int lm3642_remove(struct i2c_client *client)
led_classdev_unregister(&chip->cdev_torch);
led_classdev_unregister(&chip->cdev_flash);
regmap_write(chip->regmap, REG_ENABLE, 0);
- return 0;
}
static const struct i2c_device_id lm3642_id[] = {
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 87cd24ce3f95..54b4662bff41 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -491,14 +491,12 @@ static int lm3692x_probe(struct i2c_client *client,
return 0;
}
-static int lm3692x_remove(struct i2c_client *client)
+static void lm3692x_remove(struct i2c_client *client)
{
struct lm3692x_led *led = i2c_get_clientdata(client);
lm3692x_leds_disable(led);
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lm3692x_id[] = {
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index a8c9322558cc..71231a60eebc 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -337,7 +337,7 @@ static int lm3697_probe(struct i2c_client *client,
return lm3697_init(led);
}
-static int lm3697_remove(struct i2c_client *client)
+static void lm3697_remove(struct i2c_client *client)
{
struct lm3697 *led = i2c_get_clientdata(client);
struct device *dev = &led->client->dev;
@@ -345,10 +345,8 @@ static int lm3697_remove(struct i2c_client *client)
ret = regmap_update_bits(led->regmap, LM3697_CTRL_ENABLE,
LM3697_CTRL_A_B_EN, 0);
- if (ret) {
+ if (ret)
dev_err(dev, "Failed to disable the device\n");
- return ret;
- }
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -360,8 +358,6 @@ static int lm3697_remove(struct i2c_client *client)
}
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lm3697_id[] = {
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 437c711b2a27..673ad8c04f41 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -397,7 +397,7 @@ static int lp3944_probe(struct i2c_client *client,
return 0;
}
-static int lp3944_remove(struct i2c_client *client)
+static void lp3944_remove(struct i2c_client *client)
{
struct lp3944_platform_data *pdata = dev_get_platdata(&client->dev);
struct lp3944_data *data = i2c_get_clientdata(client);
@@ -414,8 +414,6 @@ static int lp3944_remove(struct i2c_client *client)
default:
break;
}
-
- return 0;
}
/* lp3944 i2c driver struct */
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index 6ee9131fbf25..bf0ad1b5ce24 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -255,15 +255,13 @@ static int lp3952_probe(struct i2c_client *client,
return 0;
}
-static int lp3952_remove(struct i2c_client *client)
+static void lp3952_remove(struct i2c_client *client)
{
struct lp3952_led_array *priv;
priv = i2c_get_clientdata(client);
lp3952_on_off(priv, LP3952_LED_ALL, false);
gpiod_set_value(priv->enable_gpio, 0);
-
- return 0;
}
static const struct i2c_device_id lp3952_id[] = {
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index e129dcc656b8..28d6b39fa72d 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -563,7 +563,7 @@ static int lp50xx_probe(struct i2c_client *client)
return lp50xx_probe_dt(led);
}
-static int lp50xx_remove(struct i2c_client *client)
+static void lp50xx_remove(struct i2c_client *client)
{
struct lp50xx *led = i2c_get_clientdata(client);
int ret;
@@ -579,8 +579,6 @@ static int lp50xx_remove(struct i2c_client *client)
}
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lp50xx_id[] = {
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index a9e7507c998c..7ff20c260504 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -579,7 +579,7 @@ err_init:
return ret;
}
-static int lp5521_remove(struct i2c_client *client)
+static void lp5521_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -587,8 +587,6 @@ static int lp5521_remove(struct i2c_client *client)
lp5521_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp5521_id[] = {
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index b1590cb4a188..369d40b0b65b 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -947,7 +947,7 @@ err_init:
return ret;
}
-static int lp5523_remove(struct i2c_client *client)
+static void lp5523_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -955,8 +955,6 @@ static int lp5523_remove(struct i2c_client *client)
lp5523_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp5523_id[] = {
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 31c14016d289..0e490085ff35 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -573,7 +573,7 @@ err_init:
return ret;
}
-static int lp5562_remove(struct i2c_client *client)
+static void lp5562_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -582,8 +582,6 @@ static int lp5562_remove(struct i2c_client *client)
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp5562_id[] = {
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index 2d2fda2ab104..ae11a02c0ab2 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -362,7 +362,7 @@ err_init:
return ret;
}
-static int lp8501_remove(struct i2c_client *client)
+static void lp8501_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -370,8 +370,6 @@ static int lp8501_remove(struct i2c_client *client)
lp8501_stop_engine(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp8501_id[] = {
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 3c693d5e3b44..e2b36d3187eb 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -445,7 +445,7 @@ static int lp8860_probe(struct i2c_client *client,
return 0;
}
-static int lp8860_remove(struct i2c_client *client)
+static void lp8860_remove(struct i2c_client *client)
{
struct lp8860_led *led = i2c_get_clientdata(client);
int ret;
@@ -461,8 +461,6 @@ static int lp8860_remove(struct i2c_client *client)
}
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lp8860_id[] = {
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index f72b5d1be3a6..df83d97cb479 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -52,7 +52,7 @@ struct pca9532_data {
static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int pca9532_remove(struct i2c_client *client);
+static void pca9532_remove(struct i2c_client *client);
enum {
pca9530,
@@ -546,13 +546,11 @@ static int pca9532_probe(struct i2c_client *client,
return pca9532_configure(client, data, pca9532_pdata);
}
-static int pca9532_remove(struct i2c_client *client)
+static void pca9532_remove(struct i2c_client *client)
{
struct pca9532_data *data = i2c_get_clientdata(client);
pca9532_destroy_devices(data, data->chip_info->num_leds);
-
- return 0;
}
module_i2c_driver(pca9532_driver);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 1473ced8664c..161bef65c6b7 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -790,7 +790,7 @@ exit:
return err;
}
-static int tca6507_remove(struct i2c_client *client)
+static void tca6507_remove(struct i2c_client *client)
{
int i;
struct tca6507_chip *tca = i2c_get_clientdata(client);
@@ -802,8 +802,6 @@ static int tca6507_remove(struct i2c_client *client)
}
tca6507_remove_gpio(tca);
cancel_work_sync(&tca->work);
-
- return 0;
}
static struct i2c_driver tca6507_driver = {
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index eac6f4a573b2..c7c9851c894a 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -242,7 +242,7 @@ static int omnia_leds_probe(struct i2c_client *client,
return 0;
}
-static int omnia_leds_remove(struct i2c_client *client)
+static void omnia_leds_remove(struct i2c_client *client)
{
u8 buf[5];
@@ -258,8 +258,6 @@ static int omnia_leds_remove(struct i2c_client *client)
buf[4] = 255;
i2c_master_send(client, buf, 5);
-
- return 0;
}
static const struct of_device_id of_omnia_leds_match[] = {
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
index 4c9e663a90ba..b9eeb8702df0 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
@@ -13,28 +13,45 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
-static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+static struct gpiod_lookup_table *simatic_ipc_led_gpio_table;
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table_127e = {
.dev_id = "leds-gpio",
.table = {
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
},
};
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table_227g = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 0, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 1, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 2, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 3, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 4, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 5, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-3", 6, NULL, 6, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-f7188x-3", 7, NULL, 7, GPIO_ACTIVE_HIGH),
+ }
+};
+
static const struct gpio_led simatic_ipc_gpio_leds[] = {
- { .name = "green:" LED_FUNCTION_STATUS "-3" },
{ .name = "red:" LED_FUNCTION_STATUS "-1" },
{ .name = "green:" LED_FUNCTION_STATUS "-1" },
{ .name = "red:" LED_FUNCTION_STATUS "-2" },
{ .name = "green:" LED_FUNCTION_STATUS "-2" },
{ .name = "red:" LED_FUNCTION_STATUS "-3" },
+ { .name = "green:" LED_FUNCTION_STATUS "-3" },
};
static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
@@ -46,7 +63,7 @@ static struct platform_device *simatic_leds_pdev;
static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
{
- gpiod_remove_lookup_table(&simatic_ipc_led_gpio_table);
+ gpiod_remove_lookup_table(simatic_ipc_led_gpio_table);
platform_device_unregister(simatic_leds_pdev);
return 0;
@@ -54,10 +71,25 @@ static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
{
+ const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
struct gpio_desc *gpiod;
int err;
- gpiod_add_lookup_table(&simatic_ipc_led_gpio_table);
+ switch (plat->devmode) {
+ case SIMATIC_IPC_DEVICE_127E:
+ simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_127e;
+ break;
+ case SIMATIC_IPC_DEVICE_227G:
+ if (!IS_ENABLED(CONFIG_GPIO_F7188X))
+ return -ENODEV;
+ request_module("gpio-f7188x");
+ simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_227g;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ gpiod_add_lookup_table(simatic_ipc_led_gpio_table);
simatic_leds_pdev = platform_device_register_resndata(NULL,
"leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
&simatic_ipc_gpio_leds_pdata,
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index d2f0cde6f9c7..3ded340699fb 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -58,7 +58,7 @@ enum ams_i2c_cmd {
static int ams_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int ams_i2c_remove(struct i2c_client *client);
+static void ams_i2c_remove(struct i2c_client *client);
static const struct i2c_device_id ams_id[] = {
{ "MAC,accelerometer_1", 0 },
@@ -230,7 +230,7 @@ static int ams_i2c_probe(struct i2c_client *client,
return 0;
}
-static int ams_i2c_remove(struct i2c_client *client)
+static void ams_i2c_remove(struct i2c_client *client)
{
if (ams_info.has_device) {
ams_sensor_detach();
@@ -245,8 +245,6 @@ static int ams_i2c_remove(struct i2c_client *client)
ams_info.has_device = 0;
}
-
- return 0;
}
static void ams_i2c_exit(void)
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index e604cbc91763..b004ea2a1102 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -563,7 +563,7 @@ static int probe_thermostat(struct i2c_client *client,
return 0;
}
-static int remove_thermostat(struct i2c_client *client)
+static void remove_thermostat(struct i2c_client *client)
{
struct thermostat *th = i2c_get_clientdata(client);
int i;
@@ -585,8 +585,6 @@ static int remove_thermostat(struct i2c_client *client)
write_both_fan_speed(th, -1);
kfree(th);
-
- return 0;
}
static const struct i2c_device_id therm_adt746x_id[] = {
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 9226b74fa08f..61fe2ab910b8 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -334,7 +334,7 @@ static void do_attach(struct i2c_adapter *adapter)
}
}
-static int
+static void
do_remove(struct i2c_client *client)
{
if (x.running) {
@@ -348,8 +348,6 @@ do_remove(struct i2c_client *client)
x.fan = NULL;
else
printk(KERN_ERR "g4fan: bad client\n");
-
- return 0;
}
static int
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
index 6ad6441abcbc..c5c54a4ce91f 100644
--- a/drivers/macintosh/windfarm_ad7417_sensor.c
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -289,7 +289,7 @@ static int wf_ad7417_probe(struct i2c_client *client,
return 0;
}
-static int wf_ad7417_remove(struct i2c_client *client)
+static void wf_ad7417_remove(struct i2c_client *client)
{
struct wf_ad7417_priv *pv = dev_get_drvdata(&client->dev);
int i;
@@ -302,8 +302,6 @@ static int wf_ad7417_remove(struct i2c_client *client)
wf_unregister_sensor(&pv->sensors[i]);
kref_put(&pv->ref, wf_ad7417_release);
-
- return 0;
}
static const struct i2c_device_id wf_ad7417_id[] = {
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 82e7b2005ae7..c5b1ca5bcd73 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -560,7 +560,7 @@ static int wf_fcu_probe(struct i2c_client *client,
return 0;
}
-static int wf_fcu_remove(struct i2c_client *client)
+static void wf_fcu_remove(struct i2c_client *client)
{
struct wf_fcu_priv *pv = dev_get_drvdata(&client->dev);
struct wf_fcu_fan *fan;
@@ -571,7 +571,6 @@ static int wf_fcu_remove(struct i2c_client *client)
wf_unregister_control(&fan->ctrl);
}
kref_put(&pv->ref, wf_fcu_release);
- return 0;
}
static const struct i2c_device_id wf_fcu_id[] = {
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index eb7e7f0bd219..204661c8e918 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -147,7 +147,7 @@ static int wf_lm75_probe(struct i2c_client *client,
return rc;
}
-static int wf_lm75_remove(struct i2c_client *client)
+static void wf_lm75_remove(struct i2c_client *client)
{
struct wf_lm75_sensor *lm = i2c_get_clientdata(client);
@@ -156,8 +156,6 @@ static int wf_lm75_remove(struct i2c_client *client)
/* release sensor */
wf_unregister_sensor(&lm->sens);
-
- return 0;
}
static const struct i2c_device_id wf_lm75_id[] = {
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index 807efdde86bc..40d25463346e 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -145,7 +145,7 @@ static int wf_lm87_probe(struct i2c_client *client,
return rc;
}
-static int wf_lm87_remove(struct i2c_client *client)
+static void wf_lm87_remove(struct i2c_client *client)
{
struct wf_lm87_sensor *lm = i2c_get_clientdata(client);
@@ -154,8 +154,6 @@ static int wf_lm87_remove(struct i2c_client *client)
/* release sensor */
wf_unregister_sensor(&lm->sens);
-
- return 0;
}
static const struct i2c_device_id wf_lm87_id[] = {
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 55ee417fb878..c0d404ebc792 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -104,14 +104,12 @@ static int wf_max6690_probe(struct i2c_client *client,
return rc;
}
-static int wf_max6690_remove(struct i2c_client *client)
+static void wf_max6690_remove(struct i2c_client *client)
{
struct wf_6690_sensor *max = i2c_get_clientdata(client);
max->i2c = NULL;
wf_unregister_sensor(&max->sens);
-
- return 0;
}
static const struct i2c_device_id wf_max6690_id[] = {
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 5ade627eaa78..be5d4593db93 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -316,7 +316,7 @@ static int wf_sat_probe(struct i2c_client *client,
return 0;
}
-static int wf_sat_remove(struct i2c_client *client)
+static void wf_sat_remove(struct i2c_client *client)
{
struct wf_sat *sat = i2c_get_clientdata(client);
struct wf_sat_sensor *sens;
@@ -330,8 +330,6 @@ static int wf_sat_remove(struct i2c_client *client)
}
sat->i2c = NULL;
kref_put(&sat->ref, wf_sat_release);
-
- return 0;
}
static const struct i2c_device_id wf_sat_id[] = {
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2acda9cea0f9..aebb7ef10e63 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -107,7 +107,7 @@
*
* BTREE NODES:
*
- * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
+ * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
* free smaller than a bucket - so, that's how big our btree nodes are.
*
* (If buckets are really big we'll only use part of the bucket for a btree node
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 94d38e8a59b3..2bba4d6aaaa2 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1264,7 +1264,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
*
* Don't worry event 'out' is allocated from mempool, it can
* still be swapped here. Because state->pool is a page mempool
- * creaated by by mempool_init_page_pool(), which allocates
+ * created by mempool_init_page_pool(), which allocates
* pages by alloc_pages() indeed.
*/
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index ca4f435f7216..bd3afc856d53 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -54,7 +54,6 @@ void bch_cache_accounting_destroy(struct cache_accounting *acc);
void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass);
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
void bch_mark_cache_miss_collision(struct cache_set *c,
struct bcache_device *d);
void bch_mark_sectors_bypassed(struct cache_set *c,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3f0ff3aab6f2..0285b676e983 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -157,6 +157,53 @@ static void __update_writeback_rate(struct cached_dev *dc)
dc->writeback_rate_target = target;
}
+static bool idle_counter_exceeded(struct cache_set *c)
+{
+ int counter, dev_nr;
+
+ /*
+ * If c->idle_counter is overflow (idel for really long time),
+ * reset as 0 and not set maximum rate this time for code
+ * simplicity.
+ */
+ counter = atomic_inc_return(&c->idle_counter);
+ if (counter <= 0) {
+ atomic_set(&c->idle_counter, 0);
+ return false;
+ }
+
+ dev_nr = atomic_read(&c->attached_dev_nr);
+ if (dev_nr == 0)
+ return false;
+
+ /*
+ * c->idle_counter is increased by writeback thread of all
+ * attached backing devices, in order to represent a rough
+ * time period, counter should be divided by dev_nr.
+ * Otherwise the idle time cannot be larger with more backing
+ * device attached.
+ * The following calculation equals to checking
+ * (counter / dev_nr) < (dev_nr * 6)
+ */
+ if (counter < (dev_nr * dev_nr * 6))
+ return false;
+
+ return true;
+}
+
+/*
+ * Idle_counter is increased every time when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
@@ -167,21 +214,8 @@ static bool set_at_max_writeback_rate(struct cache_set *c,
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
- /*
- * Idle_counter is increased everytime when update_writeback_rate() is
- * called. If all backing devices attached to the same cache set have
- * identical dc->writeback_rate_update_seconds values, it is about 6
- * rounds of update_writeback_rate() on each backing device before
- * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
- * to each dc->writeback_rate.rate.
- * In order to avoid extra locking cost for counting exact dirty cached
- * devices number, c->attached_dev_nr is used to calculate the idle
- * throushold. It might be bigger if not all cached device are in write-
- * back mode, but it still works well with limited extra rounds of
- * update_writeback_rate().
- */
- if (atomic_inc_return(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6)
+
+ if (!idle_counter_exceeded(c))
return false;
if (atomic_read(&c->at_max_writeback_rate) != 1)
@@ -195,13 +229,10 @@ static bool set_at_max_writeback_rate(struct cache_set *c,
dc->writeback_rate_change = 0;
/*
- * Check c->idle_counter and c->at_max_writeback_rate agagain in case
- * new I/O arrives during before set_at_max_writeback_rate() returns.
- * Then the writeback rate is set to 1, and its new value should be
- * decided via __update_writeback_rate().
+ * In case new I/O arrives during before
+ * set_at_max_writeback_rate() returns.
*/
- if ((atomic_read(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6) ||
+ if (!idle_counter_exceeded(c) ||
!atomic_read(&c->at_max_writeback_rate))
return false;
@@ -801,10 +832,9 @@ static int bch_writeback_thread(void *arg)
}
}
- if (dc->writeback_write_wq) {
- flush_workqueue(dc->writeback_write_wq);
+ if (dc->writeback_write_wq)
destroy_workqueue(dc->writeback_write_wq);
- }
+
cached_dev_put(dc);
wait_for_kthread_stop();
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4f49bbcce4f1..3001b10a3fbf 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -292,11 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-static void end_clone_request(struct request *clone, blk_status_t error)
+static enum rq_end_io_ret end_clone_request(struct request *clone,
+ blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
dm_complete_request(tio->orig, error);
+ return RQ_END_IO_NONE;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 332f96b58252..d8034ff0cb24 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1856,9 +1856,7 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_nowait(q);
+ return !bdev_nowait(dev->bdev);
}
static bool dm_table_supports_nowait(struct dm_table *t)
diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c
index 387ec43aef72..4f78cc55c251 100644
--- a/drivers/md/dm-verity-loadpin.c
+++ b/drivers/md/dm-verity-loadpin.c
@@ -14,6 +14,7 @@ LIST_HEAD(dm_verity_loadpin_trusted_root_digests);
static bool is_trusted_verity_target(struct dm_target *ti)
{
+ int verity_mode;
u8 *root_digest;
unsigned int digest_size;
struct dm_verity_loadpin_trusted_root_digest *trd;
@@ -22,6 +23,13 @@ static bool is_trusted_verity_target(struct dm_target *ti)
if (!dm_is_verity_target(ti))
return false;
+ verity_mode = dm_verity_get_mode(ti);
+
+ if ((verity_mode != DM_VERITY_MODE_EIO) &&
+ (verity_mode != DM_VERITY_MODE_RESTART) &&
+ (verity_mode != DM_VERITY_MODE_PANIC))
+ return false;
+
if (dm_verity_get_root_digest(ti, &root_digest, &digest_size))
return false;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 94b6cb599db4..8a00cc42e498 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1447,6 +1447,22 @@ bool dm_is_verity_target(struct dm_target *ti)
}
/*
+ * Get the verity mode (error behavior) of a verity target.
+ *
+ * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
+ * target.
+ */
+int dm_verity_get_mode(struct dm_target *ti)
+{
+ struct dm_verity *v = ti->private;
+
+ if (!dm_is_verity_target(ti))
+ return -EINVAL;
+
+ return v->mode;
+}
+
+/*
* Get the root digest of a verity target.
*
* Returns a copy of the root digest, the caller is responsible for
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 45455de1b4bc..98f306ec6a33 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -134,6 +134,7 @@ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
extern bool dm_is_verity_target(struct dm_target *ti);
+extern int dm_verity_get_mode(struct dm_target *ti);
extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest,
unsigned int *digest_size);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 742b2349fea3..10e0c5381d01 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -876,8 +876,8 @@ static int join(struct mddev *mddev, int nodes)
memset(str, 0, 64);
sprintf(str, "%pU", mddev->uuid);
ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
- DLM_LSFL_FS, LVB_SIZE,
- &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
+ 0, LVB_SIZE, &md_ls_ops, mddev,
+ &ops_rv, &cinfo->lockspace);
if (ret)
goto err;
wait_for_completion(&cinfo->completion);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index afaf36b2f6ab..a467b492d4ad 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5620,6 +5620,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
* removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
+ flush_workqueue(md_rdev_misc_wq);
mutex_lock(&disks_mutex);
mddev = mddev_alloc(dev);
@@ -5844,7 +5845,7 @@ int md_run(struct mddev *mddev)
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
- nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
+ nowait = nowait && bdev_nowait(rdev->bdev);
}
if (!bioset_initialized(&mddev->bio_set)) {
@@ -6238,11 +6239,11 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
+ md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
if (mddev->event_work.func)
flush_workqueue(md_misc_wq);
- md_bitmap_destroy(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
@@ -6260,6 +6261,7 @@ void md_stop(struct mddev *mddev)
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
+ __md_stop_writes(mddev);
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -6980,7 +6982,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
* If the new disk does not support REQ_NOWAIT,
* disable on the whole MD.
*/
- if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
+ if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
@@ -8154,7 +8156,6 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
- mddev_get(mddev);
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 78addfe4a0c9..857c49399c28 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -47,7 +47,7 @@ static void dump_zones(struct mddev *mddev)
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
+ len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
conf->devlist[j * raid_disks + k]->bdev);
pr_debug("md: zone%d=[%s]\n", j, line);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9117fcdee1be..3aa8b6e11d58 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -79,6 +79,21 @@ static void end_reshape(struct r10conf *conf);
#include "raid1-10.c"
+#define NULL_CMD
+#define cmd_before(conf, cmd) \
+ do { \
+ write_sequnlock_irq(&(conf)->resync_lock); \
+ cmd; \
+ } while (0)
+#define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
+
+#define wait_event_barrier_cmd(conf, cond, cmd) \
+ wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
+ cmd_after(conf))
+
+#define wait_event_barrier(conf, cond) \
+ wait_event_barrier_cmd(conf, cond, NULL_CMD)
+
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
* 'struct resync_pages'.
@@ -274,6 +289,12 @@ static void put_buf(struct r10bio *r10_bio)
lower_barrier(conf);
}
+static void wake_up_barrier(struct r10conf *conf)
+{
+ if (wq_has_sleeper(&conf->wait_barrier))
+ wake_up(&conf->wait_barrier);
+}
+
static void reschedule_retry(struct r10bio *r10_bio)
{
unsigned long flags;
@@ -930,78 +951,101 @@ static void flush_pending_writes(struct r10conf *conf)
static void raise_barrier(struct r10conf *conf, int force)
{
+ write_seqlock_irq(&conf->resync_lock);
BUG_ON(force && !conf->barrier);
- spin_lock_irq(&conf->resync_lock);
/* Wait until no block IO is waiting (unless 'force') */
- wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock);
+ wait_event_barrier(conf, force || !conf->nr_waiting);
/* block any new IO from starting */
- conf->barrier++;
+ WRITE_ONCE(conf->barrier, conf->barrier + 1);
/* Now wait for all pending IO to complete */
- wait_event_lock_irq(conf->wait_barrier,
- !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock);
+ wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
+ conf->barrier < RESYNC_DEPTH);
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
}
static void lower_barrier(struct r10conf *conf)
{
unsigned long flags;
- spin_lock_irqsave(&conf->resync_lock, flags);
- conf->barrier--;
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+
+ write_seqlock_irqsave(&conf->resync_lock, flags);
+ WRITE_ONCE(conf->barrier, conf->barrier - 1);
+ write_sequnlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
+static bool stop_waiting_barrier(struct r10conf *conf)
+{
+ struct bio_list *bio_list = current->bio_list;
+
+ /* barrier is dropped */
+ if (!conf->barrier)
+ return true;
+
+ /*
+ * If there are already pending requests (preventing the barrier from
+ * rising completely), and the pre-process bio queue isn't empty, then
+ * don't wait, as we need to empty that queue to get the nr_pending
+ * count down.
+ */
+ if (atomic_read(&conf->nr_pending) && bio_list &&
+ (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
+ return true;
+
+ /* move on if recovery thread is blocked by us */
+ if (conf->mddev->thread->tsk == current &&
+ test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) &&
+ conf->nr_queued > 0)
+ return true;
+
+ return false;
+}
+
+static bool wait_barrier_nolock(struct r10conf *conf)
+{
+ unsigned int seq = read_seqbegin(&conf->resync_lock);
+
+ if (READ_ONCE(conf->barrier))
+ return false;
+
+ atomic_inc(&conf->nr_pending);
+ if (!read_seqretry(&conf->resync_lock, seq))
+ return true;
+
+ if (atomic_dec_and_test(&conf->nr_pending))
+ wake_up_barrier(conf);
+
+ return false;
+}
+
static bool wait_barrier(struct r10conf *conf, bool nowait)
{
bool ret = true;
- spin_lock_irq(&conf->resync_lock);
+ if (wait_barrier_nolock(conf))
+ return true;
+
+ write_seqlock_irq(&conf->resync_lock);
if (conf->barrier) {
- struct bio_list *bio_list = current->bio_list;
- conf->nr_waiting++;
- /* Wait for the barrier to drop.
- * However if there are already pending
- * requests (preventing the barrier from
- * rising completely), and the
- * pre-process bio queue isn't empty,
- * then don't wait, as we need to empty
- * that queue to get the nr_pending
- * count down.
- */
/* Return false when nowait flag is set */
if (nowait) {
ret = false;
} else {
+ conf->nr_waiting++;
raid10_log(conf->mddev, "wait barrier");
- wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (atomic_read(&conf->nr_pending) &&
- bio_list &&
- (!bio_list_empty(&bio_list[0]) ||
- !bio_list_empty(&bio_list[1]))) ||
- /* move on if recovery thread is
- * blocked by us
- */
- (conf->mddev->thread->tsk == current &&
- test_bit(MD_RECOVERY_RUNNING,
- &conf->mddev->recovery) &&
- conf->nr_queued > 0),
- conf->resync_lock);
+ wait_event_barrier(conf, stop_waiting_barrier(conf));
+ conf->nr_waiting--;
}
- conf->nr_waiting--;
if (!conf->nr_waiting)
wake_up(&conf->wait_barrier);
}
/* Only increment nr_pending when we wait */
if (ret)
atomic_inc(&conf->nr_pending);
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
return ret;
}
@@ -1009,7 +1053,7 @@ static void allow_barrier(struct r10conf *conf)
{
if ((atomic_dec_and_test(&conf->nr_pending)) ||
(conf->array_freeze_pending))
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
}
static void freeze_array(struct r10conf *conf, int extra)
@@ -1026,27 +1070,24 @@ static void freeze_array(struct r10conf *conf, int extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
- spin_lock_irq(&conf->resync_lock);
+ write_seqlock_irq(&conf->resync_lock);
conf->array_freeze_pending++;
- conf->barrier++;
+ WRITE_ONCE(conf->barrier, conf->barrier + 1);
conf->nr_waiting++;
- wait_event_lock_irq_cmd(conf->wait_barrier,
- atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
- conf->resync_lock,
- flush_pending_writes(conf));
-
+ wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
+ conf->nr_queued + extra, flush_pending_writes(conf));
conf->array_freeze_pending--;
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r10conf *conf)
{
/* reverse the effect of the freeze */
- spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
+ write_seqlock_irq(&conf->resync_lock);
+ WRITE_ONCE(conf->barrier, conf->barrier - 1);
conf->nr_waiting--;
wake_up(&conf->wait_barrier);
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
}
static sector_t choose_data_offset(struct r10bio *r10_bio,
@@ -1885,7 +1926,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
__make_request(mddev, bio, sectors);
/* In case raid10d snuck in to freeze_array */
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
return true;
}
@@ -1980,7 +2021,7 @@ static int enough(struct r10conf *conf, int ignore)
* Otherwise, it must be degraded:
* - recovery is interrupted.
* - &mddev->degraded is bumped.
-
+ *
* @rdev is marked as &Faulty excluding case when array is failed and
* &mddev->fail_last_dev is off.
*/
@@ -2639,18 +2680,18 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, enum req_op op)
{
sector_t first_bad;
int bad_sectors;
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
- && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (op == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
@@ -2780,7 +2821,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage, WRITE)
+ s, conf->tmppage, REQ_OP_WRITE)
== 0) {
/* Well, this device is dead */
pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
@@ -2814,8 +2855,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage,
- READ)) {
+ s, conf->tmppage, REQ_OP_READ)) {
case 0:
/* Well, this device is dead */
pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
@@ -4033,7 +4073,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
INIT_LIST_HEAD(&conf->bio_end_io_list);
- spin_lock_init(&conf->resync_lock);
+ seqlock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
atomic_set(&conf->nr_pending, 0);
@@ -4352,7 +4392,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
rdev->new_raid_disk = rdev->raid_disk * 2;
rdev->sectors = size;
}
- conf->barrier = 1;
+ WRITE_ONCE(conf->barrier, 1);
}
return conf;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 5c0804d8bb1f..8c072ce0bc54 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -76,7 +76,7 @@ struct r10conf {
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- spinlock_t resync_lock;
+ seqlock_t resync_lock;
atomic_t nr_pending;
int nr_waiting;
int nr_queued;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index f4e1cc1ece43..79c73330020b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -125,7 +125,7 @@ struct r5l_log {
* reclaimed. if it's 0, reclaim spaces
* used by io_units which are in
* IO_UNIT_STRIPE_END state (eg, reclaim
- * dones't wait for specific io_unit
+ * doesn't wait for specific io_unit
* switching to IO_UNIT_STRIPE_END
* state) */
wait_queue_head_t iounit_wait;
@@ -1327,9 +1327,9 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* superblock is updated to new log tail. Updating superblock (either
* directly call md_update_sb() or depend on md thread) must hold
* reconfig mutex. On the other hand, raid5_quiesce is called with
- * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
- * for all IO finish, hence waitting for reclaim thread, while reclaim
- * thread is calling this function and waitting for reconfig mutex. So
+ * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
+ * for all IO finish, hence waiting for reclaim thread, while reclaim
+ * thread is calling this function and waiting for reconfig mutex. So
* there is a deadlock. We workaround this issue with a trylock.
* FIXME: we could miss discard if we can't take reconfig mutex
*/
@@ -1923,7 +1923,8 @@ r5c_recovery_alloc_stripe(
{
struct stripe_head *sh;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
+ sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
+ noblock ? R5_GAS_NOBLOCK : 0);
if (!sh)
return NULL; /* no more stripe available */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31a0cbf63384..7b820b81d8c2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -789,87 +790,80 @@ struct stripe_request_ctx {
*/
static bool is_inactive_blocked(struct r5conf *conf, int hash)
{
- int active = atomic_read(&conf->active_stripes);
-
if (list_empty(conf->inactive_list + hash))
return false;
if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return true;
- return active < (conf->max_nr_stripes * 3 / 4);
+ return (atomic_read(&conf->active_stripes) <
+ (conf->max_nr_stripes * 3 / 4));
}
-static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
struct stripe_request_ctx *ctx, sector_t sector,
- bool previous, bool noblock, bool noquiesce)
+ unsigned int flags)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
+ int previous = !!(flags & R5_GAS_PREVIOUS);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(conf->hash_locks + hash);
-retry:
- if (!noquiesce && conf->quiesce) {
- /*
- * Must release the reference to batch_last before waiting,
- * on quiesce, otherwise the batch_last will hold a reference
- * to a stripe and raid5_quiesce() will deadlock waiting for
- * active_stripes to go to zero.
- */
- if (ctx && ctx->batch_last) {
- raid5_release_stripe(ctx->batch_last);
- ctx->batch_last = NULL;
- }
-
- wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
- *(conf->hash_locks + hash));
- }
+ for (;;) {
+ if (!(flags & R5_GAS_NOQUIESCE) && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before
+ * waiting, on quiesce, otherwise the batch_last will
+ * hold a reference to a stripe and raid5_quiesce()
+ * will deadlock waiting for active_stripes to go to
+ * zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
- sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
- if (sh)
- goto out;
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ !conf->quiesce,
+ *(conf->hash_locks + hash));
+ }
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- goto wait_for_stripe;
+ sh = find_get_stripe(conf, sector, conf->generation - previous,
+ hash);
+ if (sh)
+ break;
- sh = get_free_stripe(conf, hash);
- if (sh) {
- r5c_check_stripe_cache_usage(conf);
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- goto out;
- }
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ break;
+ }
- if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
- set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ }
-wait_for_stripe:
- if (noblock)
- goto out;
+ if (flags & R5_GAS_NOBLOCK)
+ break;
- set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(conf->wait_for_stripe,
- is_inactive_blocked(conf, hash),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- goto retry;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ }
-out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
-struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
- sector_t sector, bool previous, bool noblock, bool noquiesce)
-{
- return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
- noquiesce);
-}
-
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -4047,7 +4041,7 @@ static void handle_stripe_fill(struct stripe_head *sh,
* back cache (prexor with orig_page, and then xor with
* page) in the read path
*/
- if (s->injournal && s->failed) {
+ if (s->to_read && s->injournal && s->failed) {
if (test_bit(STRIPE_R5C_CACHING, &sh->state))
r5c_make_stripe_write_out(sh);
goto out;
@@ -4636,7 +4630,8 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
sector_t bn = raid5_compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
+ sh2 = raid5_get_active_stripe(conf, NULL, s,
+ R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -5273,7 +5268,9 @@ static void handle_stripe(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
- = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
+ = raid5_get_active_stripe(conf, NULL, sh->sector,
+ R5_GAS_PREVIOUS | R5_GAS_NOBLOCK |
+ R5_GAS_NOQUIESCE);
if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
/* sh cannot be written until sh_src has been read.
* so arrange for sh to be delayed a little
@@ -5542,7 +5539,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
&bad_sectors)) {
- bio_put(raid_bio);
rdev_dec_pending(rdev, mddev);
return 0;
}
@@ -5823,7 +5819,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
DEFINE_WAIT(w);
int d;
again:
- sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
@@ -5978,7 +5974,7 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
enum stripe_result ret;
struct stripe_head *sh;
sector_t new_sector;
- int previous = 0;
+ int previous = 0, flags = 0;
int seq, dd_idx;
seq = read_seqcount_begin(&conf->gen_lock);
@@ -6012,8 +6008,11 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
new_sector, logical_sector);
- sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
+ if (previous)
+ flags |= R5_GAS_PREVIOUS;
+ if (bi->bi_opf & REQ_RAHEAD)
+ flags |= R5_GAS_NOBLOCK;
+ sh = raid5_get_active_stripe(conf, ctx, new_sector, flags);
if (unlikely(!sh)) {
/* cannot get stripe, just give-up */
bi->bi_status = BLK_STS_IOERR;
@@ -6362,7 +6361,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
int j;
int skipped_disk = 0;
- sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
+ sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i,
+ R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -6411,7 +6411,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
+ sh = raid5_get_active_stripe(conf, NULL, first_sector,
+ R5_GAS_PREVIOUS | R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -6531,9 +6532,10 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
- sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, NULL, sector_nr,
+ R5_GAS_NOBLOCK);
if (sh == NULL) {
- sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -6596,8 +6598,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
/* already done this stripe */
continue;
- sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
-
+ sh = raid5_get_active_stripe(conf, NULL, sector,
+ R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
if (!sh) {
/* failed to get a stripe - must wait */
conf->retry_read_aligned = raid_bio;
@@ -6781,7 +6783,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a5082bed83c8..e873938a6125 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -803,16 +803,24 @@ raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
}
#endif
-extern void md_raid5_kick_device(struct r5conf *conf);
-extern int raid5_set_cache_size(struct mddev *mddev, int size);
-extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
-extern void raid5_release_stripe(struct stripe_head *sh);
-extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
- int previous, int *dd_idx,
- struct stripe_head *sh);
-extern struct stripe_head *
-raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- bool previous, bool noblock, bool noquiesce);
-extern int raid5_calc_degraded(struct r5conf *conf);
-extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
+void md_raid5_kick_device(struct r5conf *conf);
+int raid5_set_cache_size(struct mddev *mddev, int size);
+sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
+void raid5_release_stripe(struct stripe_head *sh);
+sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
+ int previous, int *dd_idx, struct stripe_head *sh);
+
+struct stripe_request_ctx;
+/* get stripe from previous generation (when reshaping) */
+#define R5_GAS_PREVIOUS (1 << 0)
+/* do not block waiting for a free stripe */
+#define R5_GAS_NOBLOCK (1 << 1)
+/* do not block waiting for quiesce to be released */
+#define R5_GAS_NOQUIESCE (1 << 2)
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, sector_t sector,
+ unsigned int flags);
+
+int raid5_calc_degraded(struct r5conf *conf);
+int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
#endif
diff --git a/drivers/media/cec/i2c/ch7322.c b/drivers/media/cec/i2c/ch7322.c
index 0814338c43e4..34fad7123704 100644
--- a/drivers/media/cec/i2c/ch7322.c
+++ b/drivers/media/cec/i2c/ch7322.c
@@ -565,7 +565,7 @@ err_mutex:
return ret;
}
-static int ch7322_remove(struct i2c_client *client)
+static void ch7322_remove(struct i2c_client *client)
{
struct ch7322 *ch7322 = i2c_get_clientdata(client);
@@ -578,8 +578,6 @@ static int ch7322_remove(struct i2c_client *client)
mutex_destroy(&ch7322->mutex);
dev_info(&client->dev, "device unregistered\n");
-
- return 0;
}
static const struct of_device_id ch7322_of_match[] = {
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index a1bd6d9c9223..909df82fed33 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -354,6 +354,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
vb2_core_querybuf(&ctx->vb_q, b->index, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
@@ -378,8 +384,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
int ret;
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 57f52c004a23..ba38783b2b4f 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -98,14 +98,13 @@ err:
return ret;
}
-static int a8293_remove(struct i2c_client *client)
+static void a8293_remove(struct i2c_client *client)
{
struct a8293_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(dev);
- return 0;
}
static const struct i2c_device_id a8293_id_table[] = {
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 7d7c341b2bd8..d85929582c3f 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1540,7 +1540,7 @@ err:
return ret;
}
-static int af9013_remove(struct i2c_client *client)
+static void af9013_remove(struct i2c_client *client)
{
struct af9013_state *state = i2c_get_clientdata(client);
@@ -1551,8 +1551,6 @@ static int af9013_remove(struct i2c_client *client)
regmap_exit(state->regmap);
kfree(state);
-
- return 0;
}
static const struct i2c_device_id af9013_id_table[] = {
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 785c49b3d307..808da7a9ffe7 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1163,7 +1163,7 @@ err:
return ret;
}
-static int af9033_remove(struct i2c_client *client)
+static void af9033_remove(struct i2c_client *client)
{
struct af9033_dev *dev = i2c_get_clientdata(client);
@@ -1171,8 +1171,6 @@ static int af9033_remove(struct i2c_client *client)
regmap_exit(dev->regmap);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id af9033_id_table[] = {
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 8cdca051e51b..e4f99bd468cb 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -758,13 +758,12 @@ static int au8522_probe(struct i2c_client *client,
return 0;
}
-static int au8522_remove(struct i2c_client *client)
+static void au8522_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
au8522_release_state(to_state(sd));
- return 0;
}
static const struct i2c_device_id au8522_id[] = {
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index 1c8207ab8988..fbc666fa04ec 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -664,14 +664,12 @@ err:
return ret;
}
-static int cxd2099_remove(struct i2c_client *client)
+static void cxd2099_remove(struct i2c_client *client)
{
struct cxd *ci = i2c_get_clientdata(client);
regmap_exit(ci->regmap);
kfree(ci);
-
- return 0;
}
static const struct i2c_device_id cxd2099_id[] = {
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index b1618339eec0..5d98222f9df0 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -705,7 +705,7 @@ err:
return ret;
}
-static int cxd2820r_remove(struct i2c_client *client)
+static void cxd2820r_remove(struct i2c_client *client)
{
struct cxd2820r_priv *priv = i2c_get_clientdata(client);
@@ -721,8 +721,6 @@ static int cxd2820r_remove(struct i2c_client *client)
regmap_exit(priv->regmap[0]);
kfree(priv);
-
- return 0;
}
static const struct i2c_device_id cxd2820r_id_table[] = {
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index d45b4ddc8f91..baf2a378e565 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -899,14 +899,13 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int dvb_pll_remove(struct i2c_client *client)
+static void dvb_pll_remove(struct i2c_client *client)
{
struct dvb_frontend *fe = i2c_get_clientdata(client);
struct dvb_pll_priv *priv = fe->tuner_priv;
ida_simple_remove(&pll_ida, priv->nr);
dvb_pll_release(fe);
- return 0;
}
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 136b76cb4807..424311afb2bf 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2226,7 +2226,7 @@ fail:
return ret;
}
-static int lgdt3306a_remove(struct i2c_client *client)
+static void lgdt3306a_remove(struct i2c_client *client)
{
struct lgdt3306a_state *state = i2c_get_clientdata(client);
@@ -2237,8 +2237,6 @@ static int lgdt3306a_remove(struct i2c_client *client)
kfree(state->cfg);
kfree(state);
-
- return 0;
}
static const struct i2c_device_id lgdt3306a_id_table[] = {
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index da3a8c5e18d8..ea9ae22fd201 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -974,15 +974,13 @@ static const struct dvb_frontend_ops lgdt3303_ops = {
.release = lgdt330x_release,
};
-static int lgdt330x_remove(struct i2c_client *client)
+static void lgdt330x_remove(struct i2c_client *client)
{
struct lgdt330x_state *state = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(state);
-
- return 0;
}
static const struct i2c_device_id lgdt330x_id_table[] = {
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index bce0f42f3d19..4e844b2ef597 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1914,7 +1914,7 @@ err:
return ret;
}
-static int m88ds3103_remove(struct i2c_client *client)
+static void m88ds3103_remove(struct i2c_client *client)
{
struct m88ds3103_dev *dev = i2c_get_clientdata(client);
@@ -1926,7 +1926,6 @@ static int m88ds3103_remove(struct i2c_client *client)
i2c_mux_del_adapters(dev->muxc);
kfree(dev);
- return 0;
}
static const struct i2c_device_id m88ds3103_id_table[] = {
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index fff212c0bf3b..452571b380b7 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -762,15 +762,13 @@ err_i2c_t:
return ret;
}
-static int mn88443x_remove(struct i2c_client *client)
+static void mn88443x_remove(struct i2c_client *client)
{
struct mn88443x_priv *chip = i2c_get_clientdata(client);
mn88443x_cmn_power_off(chip);
i2c_unregister_device(chip->client_t);
-
- return 0;
}
static const struct mn88443x_spec mn88443x_spec_pri = {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 73922fc8f39c..2b01cc678f7e 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -691,7 +691,7 @@ err:
return ret;
}
-static int mn88472_remove(struct i2c_client *client)
+static void mn88472_remove(struct i2c_client *client)
{
struct mn88472_dev *dev = i2c_get_clientdata(client);
@@ -706,8 +706,6 @@ static int mn88472_remove(struct i2c_client *client)
regmap_exit(dev->regmap[0]);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id mn88472_id_table[] = {
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 4838969ef735..f0ecf5910c02 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -726,7 +726,7 @@ err:
return ret;
}
-static int mn88473_remove(struct i2c_client *client)
+static void mn88473_remove(struct i2c_client *client)
{
struct mn88473_dev *dev = i2c_get_clientdata(client);
@@ -741,8 +741,6 @@ static int mn88473_remove(struct i2c_client *client)
regmap_exit(dev->regmap[0]);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id mn88473_id_table[] = {
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index dd7954e8f553..129630cbffff 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -1337,15 +1337,13 @@ err:
return -ENODEV;
}
-static int mxl692_remove(struct i2c_client *client)
+static void mxl692_remove(struct i2c_client *client)
{
struct mxl692_dev *dev = i2c_get_clientdata(client);
dev->fe.demodulator_priv = NULL;
i2c_set_clientdata(client, NULL);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id mxl692_id_table[] = {
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index e6b8367c8cce..e0fbf41316ae 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -865,7 +865,7 @@ err:
return ret;
}
-static int rtl2830_remove(struct i2c_client *client)
+static void rtl2830_remove(struct i2c_client *client)
{
struct rtl2830_dev *dev = i2c_get_clientdata(client);
@@ -874,8 +874,6 @@ static int rtl2830_remove(struct i2c_client *client)
i2c_mux_del_adapters(dev->muxc);
regmap_exit(dev->regmap);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id rtl2830_id_table[] = {
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index dcbeb9f5e12a..4fa884eda5d5 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1110,7 +1110,7 @@ err:
return ret;
}
-static int rtl2832_remove(struct i2c_client *client)
+static void rtl2832_remove(struct i2c_client *client)
{
struct rtl2832_dev *dev = i2c_get_clientdata(client);
@@ -1123,8 +1123,6 @@ static int rtl2832_remove(struct i2c_client *client)
regmap_exit(dev->regmap);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id rtl2832_id_table[] = {
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index ebee230afb7b..86b0d59169dd 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1274,14 +1274,13 @@ error:
return ret;
}
-static int si2165_remove(struct i2c_client *client)
+static void si2165_remove(struct i2c_client *client)
{
struct si2165_state *state = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(state);
- return 0;
}
static const struct i2c_device_id si2165_id_table[] = {
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 196e028a6617..8157df4570d1 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -774,7 +774,7 @@ err:
return ret;
}
-static int si2168_remove(struct i2c_client *client)
+static void si2168_remove(struct i2c_client *client)
{
struct si2168_dev *dev = i2c_get_clientdata(client);
@@ -786,8 +786,6 @@ static int si2168_remove(struct i2c_client *client)
dev->fe.demodulator_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id si2168_id_table[] = {
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 992f22167fbe..27e7037e130e 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -398,14 +398,13 @@ err:
return ret;
}
-static int sp2_remove(struct i2c_client *client)
+static void sp2_remove(struct i2c_client *client)
{
struct sp2 *s = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
sp2_exit(client);
kfree(s);
- return 0;
}
static const struct i2c_device_id sp2_id[] = {
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 90d24131d335..0a600c1d7d1b 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5032,12 +5032,11 @@ error:
return ret;
}
-static int stv090x_remove(struct i2c_client *client)
+static void stv090x_remove(struct i2c_client *client)
{
struct stv090x_state *state = i2c_get_clientdata(client);
stv090x_release(&state->frontend);
- return 0;
}
struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index 5012d0231652..fbc4dbd62151 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -436,12 +436,11 @@ static int stv6110x_probe(struct i2c_client *client,
return 0;
}
-static int stv6110x_remove(struct i2c_client *client)
+static void stv6110x_remove(struct i2c_client *client)
{
struct stv6110x_state *stv6110x = i2c_get_clientdata(client);
stv6110x_release(stv6110x->frontend);
- return 0;
}
const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index e83836b29715..c22d2a2b2a45 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -819,14 +819,13 @@ free_state:
return ret;
}
-static int tc90522_remove(struct i2c_client *client)
+static void tc90522_remove(struct i2c_client *client)
{
struct tc90522_state *state;
state = cfg_to_state(i2c_get_clientdata(client));
i2c_del_adapter(&state->tuner_i2c);
kfree(state);
- return 0;
}
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 685c0ac71819..d1098ef20a8b 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -1221,14 +1221,13 @@ err:
return ret;
}
-static int tda10071_remove(struct i2c_client *client)
+static void tda10071_remove(struct i2c_client *client)
{
struct tda10071_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(dev);
- return 0;
}
static const struct i2c_device_id tda10071_id_table[] = {
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 3e383912bcfd..02338256b974 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -696,7 +696,7 @@ err:
return ret;
}
-static int ts2020_remove(struct i2c_client *client)
+static void ts2020_remove(struct i2c_client *client)
{
struct ts2020_priv *dev = i2c_get_clientdata(client);
@@ -708,7 +708,6 @@ static int ts2020_remove(struct i2c_client *client)
regmap_exit(dev->regmap);
kfree(dev);
- return 0;
}
static const struct i2c_device_id ts2020_id_table[] = {
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 2958a4694461..516de278cc49 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -342,7 +342,7 @@ cleanup:
return ret;
}
-static int ad5820_remove(struct i2c_client *client)
+static void ad5820_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ad5820_device *coil = to_ad5820_device(subdev);
@@ -351,7 +351,6 @@ static int ad5820_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&coil->ctrls);
media_entity_cleanup(&coil->subdev.entity);
mutex_destroy(&coil->power_lock);
- return 0;
}
static const struct i2c_device_id ad5820_id_table[] = {
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 8679a44e6413..4a255a492918 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1174,7 +1174,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int ad9389b_remove(struct i2c_client *client)
+static void ad9389b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ad9389b_state *state = get_ad9389b_state(sd);
@@ -1192,7 +1192,6 @@ static int ad9389b_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 522a0b10e415..1f353157df07 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -510,7 +510,7 @@ free_and_quit:
return ret;
}
-static int adp1653_remove(struct i2c_client *client)
+static void adp1653_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct adp1653_flash *flash = to_adp1653_flash(subdev);
@@ -518,8 +518,6 @@ static int adp1653_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&flash->subdev);
v4l2_ctrl_handler_free(&flash->ctrls);
media_entity_cleanup(&flash->subdev.entity);
-
- return 0;
}
static const struct i2c_device_id adp1653_id_table[] = {
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 714e31f993e1..61a2f87d3c62 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -368,12 +368,11 @@ static int adv7170_probe(struct i2c_client *client,
return 0;
}
-static int adv7170_remove(struct i2c_client *client)
+static void adv7170_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 1813f67f0fe1..b58689728243 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -423,12 +423,11 @@ static int adv7175_probe(struct i2c_client *client,
return 0;
}
-static int adv7175_remove(struct i2c_client *client)
+static void adv7175_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 5fde5243722d..216fe396973f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1514,7 +1514,7 @@ err_unregister_csi_client:
return ret;
}
-static int adv7180_remove(struct i2c_client *client)
+static void adv7180_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
@@ -1534,8 +1534,6 @@ static int adv7180_remove(struct i2c_client *client)
adv7180_set_power_pin(state, false);
mutex_destroy(&state->mutex);
-
- return 0;
}
static const struct i2c_device_id adv7180_id[] = {
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index ba746a19fd39..313c706e8335 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -613,13 +613,12 @@ static int adv7183_probe(struct i2c_client *client,
return 0;
}
-static int adv7183_remove(struct i2c_client *client)
+static void adv7183_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id adv7183_id[] = {
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 63e94dfcb5d3..7e84869d2434 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -492,15 +492,13 @@ done:
return err;
}
-static int adv7343_remove(struct i2c_client *client)
+static void adv7343_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7343_state *state = to_state(sd);
v4l2_async_unregister_subdev(&state->sd);
v4l2_ctrl_handler_free(&state->hdl);
-
- return 0;
}
static const struct i2c_device_id adv7343_id[] = {
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index b6234c8231c9..fb5fefa83b18 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -437,15 +437,13 @@ static int adv7393_probe(struct i2c_client *client,
return err;
}
-static int adv7393_remove(struct i2c_client *client)
+static void adv7393_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7393_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
-
- return 0;
}
static const struct i2c_device_id adv7393_id[] = {
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index 4e54148147b9..4498d78a2357 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -815,7 +815,7 @@ err_free_mutex:
return ret;
}
-static int adv748x_remove(struct i2c_client *client)
+static void adv748x_remove(struct i2c_client *client)
{
struct adv748x_state *state = i2c_get_clientdata(client);
@@ -828,8 +828,6 @@ static int adv748x_remove(struct i2c_client *client)
adv748x_unregister_clients(state);
adv748x_dt_cleanup(state);
mutex_destroy(&state->mutex);
-
- return 0;
}
static const struct of_device_id adv748x_of_table[] = {
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 202e0cd83f90..49aca579576a 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1923,7 +1923,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv7511_remove(struct i2c_client *client)
+static void adv7511_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7511_state *state = get_adv7511_state(sd);
@@ -1943,7 +1943,6 @@ static int adv7511_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 497419a5cfdd..5988a4fa0c46 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3660,7 +3660,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv76xx_remove(struct i2c_client *client)
+static void adv76xx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv76xx_state *state = to_state(sd);
@@ -3677,7 +3677,6 @@ static int adv76xx_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
adv76xx_unregister_clients(to_state(sd));
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 22caa070273b..a8dd92948df0 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3593,7 +3593,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv7842_remove(struct i2c_client *client)
+static void adv7842_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7842_state *state = to_state(sd);
@@ -3604,7 +3604,6 @@ static int adv7842_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
adv7842_unregister_clients(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
index 40b1a4aa846c..1af9f698eecf 100644
--- a/drivers/media/i2c/ak7375.c
+++ b/drivers/media/i2c/ak7375.c
@@ -169,7 +169,7 @@ err_cleanup:
return ret;
}
-static int ak7375_remove(struct i2c_client *client)
+static void ak7375_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
@@ -177,8 +177,6 @@ static int ak7375_remove(struct i2c_client *client)
ak7375_subdev_cleanup(ak7375_dev);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
/*
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index dc569d5a4d9d..0370ad6b6811 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -297,13 +297,11 @@ static int ak881x_probe(struct i2c_client *client,
return 0;
}
-static int ak881x_remove(struct i2c_client *client)
+static void ak881x_remove(struct i2c_client *client)
{
struct ak881x *ak881x = to_ak881x(client);
v4l2_device_unregister_subdev(&ak881x->subdev);
-
- return 0;
}
static const struct i2c_device_id ak881x_id[] = {
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index c7bdfc69b9be..c6ab531532be 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -1018,7 +1018,7 @@ entity_cleanup:
return ret;
}
-static int ar0521_remove(struct i2c_client *client)
+static void ar0521_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ar0521_dev *sensor = to_ar0521_dev(sd);
@@ -1031,7 +1031,6 @@ static int ar0521_remove(struct i2c_client *client)
ar0521_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&sensor->lock);
- return 0;
}
static const struct dev_pm_ops ar0521_pm_ops = {
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 73bc50c919d7..4d9bb6eb7d65 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -446,14 +446,13 @@ static int bt819_probe(struct i2c_client *client,
return 0;
}
-static int bt819_remove(struct i2c_client *client)
+static void bt819_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct bt819 *decoder = to_bt819(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index c134fda270a1..70443ef1ac46 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -223,12 +223,11 @@ static int bt856_probe(struct i2c_client *client,
return 0;
}
-static int bt856_remove(struct i2c_client *client)
+static void bt856_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id bt856_id[] = {
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index 1a8df9f18ffb..c2508cbafd02 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -190,12 +190,11 @@ static int bt866_probe(struct i2c_client *client,
return 0;
}
-static int bt866_remove(struct i2c_client *client)
+static void bt866_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id bt866_id[] = {
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 7609add2aff4..4a14d7e5d9f2 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3665,7 +3665,7 @@ out_power_off:
return rval;
}
-static int ccs_remove(struct i2c_client *client)
+static void ccs_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -3687,8 +3687,6 @@ static int ccs_remove(struct i2c_client *client)
kfree(sensor->ccs_limits);
kvfree(sensor->sdata.backing);
kvfree(sensor->mdata.backing);
-
- return 0;
}
static const struct ccs_device smia_device = {
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index ebe55e261bff..d901a59883a9 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -99,13 +99,12 @@ static int cs3308_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int cs3308_remove(struct i2c_client *client)
+static void cs3308_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index f6dd5edf77dd..591b1e7b24ee 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -178,14 +178,13 @@ static int cs5345_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int cs5345_remove(struct i2c_client *client)
+static void cs5345_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cs5345_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 9a411106cfb3..9461589aea30 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -190,14 +190,13 @@ static int cs53l32a_probe(struct i2c_client *client,
return 0;
}
-static int cs53l32a_remove(struct i2c_client *client)
+static void cs53l32a_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cs53l32a_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id cs53l32a_id[] = {
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index dc31944c7d5b..f1a978af82ef 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -6026,7 +6026,7 @@ static int cx25840_probe(struct i2c_client *client,
return 0;
}
-static int cx25840_remove(struct i2c_client *client)
+static void cx25840_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cx25840_state *state = to_state(sd);
@@ -6034,7 +6034,6 @@ static int cx25840_remove(struct i2c_client *client)
cx25840_ir_remove(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id cx25840_id[] = {
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 206d74338b9c..af59687383aa 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -190,7 +190,7 @@ err_cleanup:
return rval;
}
-static int dw9714_remove(struct i2c_client *client)
+static void dw9714_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
@@ -206,8 +206,6 @@ static int dw9714_remove(struct i2c_client *client)
}
pm_runtime_set_suspended(&client->dev);
dw9714_subdev_cleanup(dw9714_dev);
-
- return 0;
}
/*
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index c086580efac7..0f47ef015a1d 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -499,7 +499,7 @@ err_free_handler:
return ret;
}
-static int dw9768_remove(struct i2c_client *client)
+static void dw9768_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9768 *dw9768 = sd_to_dw9768(sd);
@@ -511,8 +511,6 @@ static int dw9768_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
dw9768_runtime_suspend(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct of_device_id dw9768_of_table[] = {
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
index 01c372925a80..3599720db7e9 100644
--- a/drivers/media/i2c/dw9807-vcm.c
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -216,7 +216,7 @@ err_cleanup:
return rval;
}
-static int dw9807_remove(struct i2c_client *client)
+static void dw9807_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
@@ -224,8 +224,6 @@ static int dw9807_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
dw9807_subdev_cleanup(dw9807_dev);
-
- return 0;
}
/*
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 873d614339bb..ff9bb9fc97dd 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1460,7 +1460,7 @@ err_mutex:
return ret;
}
-static int __exit et8ek8_remove(struct i2c_client *client)
+static void __exit et8ek8_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
@@ -1477,8 +1477,6 @@ static int __exit et8ek8_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&sensor->subdev);
media_entity_cleanup(&sensor->subdev.entity);
mutex_destroy(&sensor->power_lock);
-
- return 0;
}
static const struct of_device_id et8ek8_of_table[] = {
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 055d1aa8410e..e422ac7609b5 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1101,7 +1101,7 @@ check_hwcfg_error:
return ret;
}
-static int hi556_remove(struct i2c_client *client)
+static void hi556_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi556 *hi556 = to_hi556(sd);
@@ -1111,8 +1111,6 @@ static int hi556_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&hi556->mutex);
-
- return 0;
}
static int hi556_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index ad35c3ff3611..c5b69823f257 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -2143,7 +2143,7 @@ err_mutex:
return ret;
}
-static int hi846_remove(struct i2c_client *client)
+static void hi846_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi846 *hi846 = to_hi846(sd);
@@ -2158,8 +2158,6 @@ static int hi846_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&hi846->mutex);
-
- return 0;
}
static const struct dev_pm_ops hi846_pm_ops = {
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 7e85349e1852..5a82b15a9513 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -2903,7 +2903,7 @@ check_hwcfg_error:
return ret;
}
-static int hi847_remove(struct i2c_client *client)
+static void hi847_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi847 *hi847 = to_hi847(sd);
@@ -2913,8 +2913,6 @@ static int hi847_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&hi847->mutex);
-
- return 0;
}
static int hi847_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index b9516b2f1c15..a0e17bb9d4ca 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -1061,7 +1061,7 @@ error_probe:
return ret;
}
-static int imx208_remove(struct i2c_client *client)
+static void imx208_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx208 *imx208 = to_imx208(sd);
@@ -1075,8 +1075,6 @@ static int imx208_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx208->imx208_mx);
-
- return 0;
}
static const struct dev_pm_ops imx208_pm_ops = {
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 83c1737abeec..710c9fb515fd 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -1080,7 +1080,7 @@ free_ctrl:
return ret;
}
-static int imx214_remove(struct i2c_client *client)
+static void imx214_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx214 *imx214 = to_imx214(sd);
@@ -1093,8 +1093,6 @@ static int imx214_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx214->mutex);
-
- return 0;
}
static const struct of_device_id imx214_of_match[] = {
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index e10af3f74b38..77bd79a5954e 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -1562,7 +1562,7 @@ error_power_off:
return ret;
}
-static int imx219_remove(struct i2c_client *client)
+static void imx219_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx219 *imx219 = to_imx219(sd);
@@ -1575,8 +1575,6 @@ static int imx219_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
imx219_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct of_device_id imx219_dt_ids[] = {
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index c249507aa2db..eab5fc1ee2f7 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1338,7 +1338,7 @@ error_identify:
return ret;
}
-static int imx258_remove(struct i2c_client *client)
+static void imx258_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx258 *imx258 = to_imx258(sd);
@@ -1351,8 +1351,6 @@ static int imx258_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
imx258_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops imx258_pm_ops = {
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 7de1f2948e53..a00761b1e18c 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -2142,7 +2142,7 @@ err_regmap:
return ret;
}
-static int imx274_remove(struct i2c_client *client)
+static void imx274_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct stimx274 *imx274 = to_imx274(sd);
@@ -2157,7 +2157,6 @@ static int imx274_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
mutex_destroy(&imx274->lock);
- return 0;
}
static const struct dev_pm_ops imx274_pm_ops = {
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 99f2a50d39a4..1ce64dcdf7f0 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1119,7 +1119,7 @@ free_err:
return ret;
}
-static int imx290_remove(struct i2c_client *client)
+static void imx290_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx290 *imx290 = to_imx290(sd);
@@ -1134,8 +1134,6 @@ static int imx290_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(imx290->dev))
imx290_power_off(imx290->dev);
pm_runtime_set_suspended(imx290->dev);
-
- return 0;
}
static const struct of_device_id imx290_of_match[] = {
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index a2b5a34de76b..245a18fb40ad 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2523,7 +2523,7 @@ error_probe:
return ret;
}
-static int imx319_remove(struct i2c_client *client)
+static void imx319_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx319 *imx319 = to_imx319(sd);
@@ -2536,8 +2536,6 @@ static int imx319_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx319->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx319_pm_ops = {
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 062125501788..7b0a9086447d 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -1089,7 +1089,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_remove(struct i2c_client *client)
+static void imx334_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx334 *imx334 = to_imx334(sd);
@@ -1102,8 +1102,6 @@ static int imx334_remove(struct i2c_client *client)
pm_runtime_suspended(&client->dev);
mutex_destroy(&imx334->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx334_pm_ops = {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 410d6b86feb5..078ede2b7a00 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -1083,7 +1083,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx335_remove(struct i2c_client *client)
+static void imx335_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx335 *imx335 = to_imx335(sd);
@@ -1098,8 +1098,6 @@ static int imx335_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx335->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx335_pm_ops = {
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 3922b9305978..b46178681c05 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1810,7 +1810,7 @@ error_probe:
return ret;
}
-static int imx355_remove(struct i2c_client *client)
+static void imx355_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx355 *imx355 = to_imx355(sd);
@@ -1823,8 +1823,6 @@ static int imx355_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx355->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx355_pm_ops = {
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index a1394d6c1432..7f6d29e0e7c4 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -1257,7 +1257,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx412_remove(struct i2c_client *client)
+static void imx412_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx412 *imx412 = to_imx412(sd);
@@ -1272,8 +1272,6 @@ static int imx412_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx412->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx412_pm_ops = {
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 56674173524f..ee6bbbb977f7 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -915,7 +915,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
return err;
}
-static int ir_remove(struct i2c_client *client)
+static void ir_remove(struct i2c_client *client)
{
struct IR_i2c *ir = i2c_get_clientdata(client);
@@ -924,8 +924,6 @@ static int ir_remove(struct i2c_client *client)
i2c_unregister_device(ir->tx_c);
rc_unregister_device(ir->rc);
-
- return 0;
}
static const struct i2c_device_id ir_kbd_id[] = {
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index dc3068549dfa..246d8d182a8e 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1544,7 +1544,7 @@ err_entity_cleanup:
return ret;
}
-static int isl7998x_remove(struct i2c_client *client)
+static void isl7998x_remove(struct i2c_client *client)
{
struct isl7998x *isl7998x = i2c_to_isl7998x(client);
@@ -1552,8 +1552,6 @@ static int isl7998x_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&isl7998x->subdev);
isl7998x_remove_controls(isl7998x);
media_entity_cleanup(&isl7998x->subdev.entity);
-
- return 0;
}
static const struct of_device_id isl7998x_of_match[] = {
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index c077f53b9c30..215d9a43b0b9 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -675,14 +675,13 @@ static int ks0127_probe(struct i2c_client *client, const struct i2c_device_id *i
return 0;
}
-static int ks0127_remove(struct i2c_client *client)
+static void ks0127_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
ks0127_write(sd, KS_OFMTA, 0x20); /* tristate */
ks0127_write(sd, KS_CMDA, 0x2c | 0x80); /* power down */
- return 0;
}
static const struct i2c_device_id ks0127_id[] = {
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 9e34ccce4fc3..edad3138cb07 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -443,7 +443,7 @@ static int lm3560_probe(struct i2c_client *client,
return 0;
}
-static int lm3560_remove(struct i2c_client *client)
+static void lm3560_remove(struct i2c_client *client)
{
struct lm3560_flash *flash = i2c_get_clientdata(client);
unsigned int i;
@@ -453,8 +453,6 @@ static int lm3560_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&flash->ctrls_led[i]);
media_entity_cleanup(&flash->subdev_led[i].entity);
}
-
- return 0;
}
static const struct i2c_device_id lm3560_id_table[] = {
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index c76ccf67a909..0aaa963917d8 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -377,15 +377,13 @@ static int lm3646_probe(struct i2c_client *client,
return 0;
}
-static int lm3646_remove(struct i2c_client *client)
+static void lm3646_remove(struct i2c_client *client)
{
struct lm3646_flash *flash = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(&flash->subdev_led);
v4l2_ctrl_handler_free(&flash->ctrls_led);
media_entity_cleanup(&flash->subdev_led.entity);
-
- return 0;
}
static const struct i2c_device_id lm3646_id_table[] = {
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index 0a1efc1417bc..2ab91b993c33 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -154,12 +154,11 @@ static int m52790_probe(struct i2c_client *client,
return 0;
}
-static int m52790_remove(struct i2c_client *client)
+static void m52790_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index c19590389bfe..2201d2a26353 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -1020,15 +1020,13 @@ error:
return ret;
}
-static int m5mols_remove(struct i2c_client *client)
+static void m5mols_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id m5mols_id[] = {
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 0eea200124d2..1019020f3a37 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1403,15 +1403,13 @@ err_reg:
return ret;
}
-static int max2175_remove(struct i2c_client *client)
+static void max2175_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct max2175 *ctx = max2175_from_sd(sd);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_async_unregister_subdev(sd);
-
- return 0;
}
static const struct i2c_device_id max2175_id[] = {
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 3684faa72253..9c083cf14231 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1378,7 +1378,7 @@ err_powerdown:
return ret;
}
-static int max9286_remove(struct i2c_client *client)
+static void max9286_remove(struct i2c_client *client)
{
struct max9286_priv *priv = sd_to_max9286(i2c_get_clientdata(client));
@@ -1391,8 +1391,6 @@ static int max9286_remove(struct i2c_client *client)
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
max9286_cleanup_dt(priv);
-
- return 0;
}
static const struct of_device_id max9286_dt_ids[] = {
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 48cc0b0922f4..49ec59b0ca43 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -415,15 +415,13 @@ cleanup:
return ret;
}
-static int ml86v7667_remove(struct i2c_client *client)
+static void ml86v7667_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ml86v7667_priv *priv = to_ml86v7667(sd);
v4l2_ctrl_handler_free(&priv->hdl);
v4l2_device_unregister_subdev(&priv->sd);
-
- return 0;
}
static const struct i2c_device_id ml86v7667_id[] = {
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 39530d43590e..4ce7a15a9884 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -859,7 +859,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int msp_remove(struct i2c_client *client)
+static void msp_remove(struct i2c_client *client)
{
struct msp_state *state = to_state(i2c_get_clientdata(client));
@@ -872,7 +872,6 @@ static int msp_remove(struct i2c_client *client)
msp_reset(client);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index ad13b0c890c0..ebf9cf1e1bce 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -833,7 +833,7 @@ error_hdl_free:
return ret;
}
-static int mt9m001_remove(struct i2c_client *client)
+static void mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
@@ -853,8 +853,6 @@ static int mt9m001_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&mt9m001->hdl);
mutex_destroy(&mt9m001->mutex);
-
- return 0;
}
static const struct i2c_device_id mt9m001_id[] = {
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index ba0c0ea91c95..76b8c9c08c82 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -858,7 +858,7 @@ error_sensor:
return ret;
}
-static int mt9m032_remove(struct i2c_client *client)
+static void mt9m032_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -867,7 +867,6 @@ static int mt9m032_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&sensor->ctrls);
media_entity_cleanup(&subdev->entity);
mutex_destroy(&sensor->lock);
- return 0;
}
static const struct i2c_device_id mt9m032_id_table[] = {
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index afc86efa9e3e..f5fe272d1205 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1359,15 +1359,13 @@ out_hdlfree:
return ret;
}
-static int mt9m111_remove(struct i2c_client *client)
+static void mt9m111_remove(struct i2c_client *client)
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
v4l2_async_unregister_subdev(&mt9m111->subdev);
media_entity_cleanup(&mt9m111->subdev.entity);
v4l2_ctrl_handler_free(&mt9m111->hdl);
-
- return 0;
}
static const struct of_device_id mt9m111_of_match[] = {
{ .compatible = "micron,mt9m111", },
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 1fd4dc6e4726..45f7b5e52bc3 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1209,7 +1209,7 @@ done:
return ret;
}
-static int mt9p031_remove(struct i2c_client *client)
+static void mt9p031_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -1218,8 +1218,6 @@ static int mt9p031_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
mutex_destroy(&mt9p031->power_lock);
-
- return 0;
}
static const struct i2c_device_id mt9p031_id[] = {
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index b651ee4a26e8..d5abe4a7ef07 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -961,7 +961,7 @@ done:
return ret;
}
-static int mt9t001_remove(struct i2c_client *client)
+static void mt9t001_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -969,7 +969,6 @@ static int mt9t001_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&mt9t001->ctrls);
v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
- return 0;
}
static const struct i2c_device_id mt9t001_id[] = {
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index 8d2e3caa9b28..ad564095d0cf 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1102,14 +1102,12 @@ static int mt9t112_probe(struct i2c_client *client,
return v4l2_async_register_subdev(&priv->subdev);
}
-static int mt9t112_remove(struct i2c_client *client)
+static void mt9t112_remove(struct i2c_client *client)
{
struct mt9t112_priv *priv = to_mt9t112(client);
clk_disable_unprepare(priv->clk);
v4l2_async_unregister_subdev(&priv->subdev);
-
- return 0;
}
static const struct i2c_device_id mt9t112_id[] = {
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 7699e64e1127..9952ce06ebb2 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -561,7 +561,7 @@ static int mt9v011_probe(struct i2c_client *c,
return 0;
}
-static int mt9v011_remove(struct i2c_client *c)
+static void mt9v011_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct mt9v011 *core = to_mt9v011(sd);
@@ -572,8 +572,6 @@ static int mt9v011_remove(struct i2c_client *c)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&core->ctrls);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 4cfdd3dfbd42..bc4388ccc2a8 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -1192,7 +1192,7 @@ err:
return ret;
}
-static int mt9v032_remove(struct i2c_client *client)
+static void mt9v032_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -1200,8 +1200,6 @@ static int mt9v032_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&mt9v032->ctrls);
media_entity_cleanup(&subdev->entity);
-
- return 0;
}
static const struct mt9v032_model_data mt9v032_model_data[] = {
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 2dc4a0f24ce8..fe18e5258d7a 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1238,7 +1238,7 @@ error_free_ctrls:
return ret;
}
-static int mt9v111_remove(struct i2c_client *client)
+static void mt9v111_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
@@ -1253,8 +1253,6 @@ static int mt9v111_remove(struct i2c_client *client)
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
-
- return 0;
}
static const struct of_device_id mt9v111_of_match[] = {
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index bc5187f46365..ecaf5e9057f1 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -789,7 +789,7 @@ np_err:
return ret;
}
-static int noon010_remove(struct i2c_client *client)
+static void noon010_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct noon010_info *info = to_noon010(sd);
@@ -797,8 +797,6 @@ static int noon010_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id noon010_id[] = {
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 87179fc04e00..35663c10fcd9 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1015,7 +1015,7 @@ check_hwcfg_error:
return ret;
}
-static int og01a1b_remove(struct i2c_client *client)
+static void og01a1b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct og01a1b *og01a1b = to_og01a1b(sd);
@@ -1025,8 +1025,6 @@ static int og01a1b_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&og01a1b->mutex);
-
- return 0;
}
static int og01a1b_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 0f08c05333ea..2c1eb724d8e5 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -975,7 +975,7 @@ err_destroy_mutex:
return ret;
}
-static int ov02a10_remove(struct i2c_client *client)
+static void ov02a10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov02a10 *ov02a10 = to_ov02a10(sd);
@@ -988,8 +988,6 @@ static int ov02a10_remove(struct i2c_client *client)
ov02a10_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&ov02a10->mutex);
-
- return 0;
}
static const struct of_device_id ov02a10_of_match[] = {
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index e5ef6466a3ec..c1703596c3dc 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -1415,7 +1415,7 @@ check_hwcfg_error:
return ret;
}
-static int ov08d10_remove(struct i2c_client *client)
+static void ov08d10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov08d10 *ov08d10 = to_ov08d10(sd);
@@ -1425,8 +1425,6 @@ static int ov08d10_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov08d10->mutex);
-
- return 0;
}
static int ov08d10_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index d5fe67c763f7..e618b613e078 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1769,7 +1769,7 @@ error_handler_free:
return ret;
}
-static int ov13858_remove(struct i2c_client *client)
+static void ov13858_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov13858 *ov13858 = to_ov13858(sd);
@@ -1779,8 +1779,6 @@ static int ov13858_remove(struct i2c_client *client)
ov13858_free_controls(ov13858);
pm_runtime_disable(&client->dev);
-
- return 0;
}
static const struct i2c_device_id ov13858_id_table[] = {
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index 7caeae641051..549e5d93e568 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -1447,7 +1447,7 @@ error_handler_free:
return ret;
}
-static int ov13b10_remove(struct i2c_client *client)
+static void ov13b10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov13b10 *ov13b = to_ov13b10(sd);
@@ -1457,8 +1457,6 @@ static int ov13b10_remove(struct i2c_client *client)
ov13b10_free_controls(ov13b);
pm_runtime_disable(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov13b10_pm_ops = {
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 4b75da55b260..29ed0ef8c033 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1271,7 +1271,7 @@ err_clk:
return ret;
}
-static int ov2640_remove(struct i2c_client *client)
+static void ov2640_remove(struct i2c_client *client)
{
struct ov2640_priv *priv = to_ov2640(client);
@@ -1281,7 +1281,6 @@ static int ov2640_remove(struct i2c_client *client)
media_entity_cleanup(&priv->subdev.entity);
v4l2_device_unregister_subdev(&priv->subdev);
clk_disable_unprepare(priv->clk);
- return 0;
}
static const struct i2c_device_id ov2640_id[] = {
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 13ded5b2aa66..42fc64ada08c 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1544,7 +1544,7 @@ error:
return ret;
}
-static int ov2659_remove(struct i2c_client *client)
+static void ov2659_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2659 *ov2659 = to_ov2659(sd);
@@ -1558,8 +1558,6 @@ static int ov2659_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
ov2659_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov2659_pm_ops = {
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 906c711f6821..de66d3395a4d 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1097,7 +1097,7 @@ lock_destroy:
return ret;
}
-static int ov2680_remove(struct i2c_client *client)
+static void ov2680_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -1106,8 +1106,6 @@ static int ov2680_remove(struct i2c_client *client)
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-
- return 0;
}
static int __maybe_unused ov2680_suspend(struct device *dev)
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index b6e010ea3249..a3b524f15d89 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -798,7 +798,7 @@ err_destroy_mutex:
return ret;
}
-static int ov2685_remove(struct i2c_client *client)
+static void ov2685_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -814,8 +814,6 @@ static int ov2685_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
__ov2685_power_off(ov2685);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index d5f0eabf20c6..5d74ad479214 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1053,7 +1053,7 @@ check_hwcfg_error:
return ret;
}
-static int ov2740_remove(struct i2c_client *client)
+static void ov2740_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2740 *ov2740 = to_ov2740(sd);
@@ -1063,8 +1063,6 @@ static int ov2740_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov2740->mutex);
-
- return 0;
}
static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 502f0b62e950..1852e1cfc7df 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3906,7 +3906,7 @@ entity_cleanup:
return ret;
}
-static int ov5640_remove(struct i2c_client *client)
+static void ov5640_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -3915,8 +3915,6 @@ static int ov5640_remove(struct i2c_client *client)
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->lock);
-
- return 0;
}
static const struct i2c_device_id ov5640_id[] = {
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 562c62f192c4..81e4e87e1821 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -1256,7 +1256,7 @@ free_ctrl:
return ret;
}
-static int ov5645_remove(struct i2c_client *client)
+static void ov5645_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -1265,8 +1265,6 @@ static int ov5645_remove(struct i2c_client *client)
media_entity_cleanup(&ov5645->sd.entity);
v4l2_ctrl_handler_free(&ov5645->ctrls);
mutex_destroy(&ov5645->power_lock);
-
- return 0;
}
static const struct i2c_device_id ov5645_id[] = {
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index d346d18ce629..847a7bbb69c5 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1448,7 +1448,7 @@ mutex_destroy:
return ret;
}
-static int ov5647_remove(struct i2c_client *client)
+static void ov5647_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5647 *sensor = to_sensor(sd);
@@ -1459,8 +1459,6 @@ static int ov5647_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
pm_runtime_disable(&client->dev);
mutex_destroy(&sensor->lock);
-
- return 0;
}
static const struct dev_pm_ops ov5647_pm_ops = {
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index dfcd33e9ee13..84604ea7bdf9 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -2587,7 +2587,7 @@ error_endpoint:
return ret;
}
-static int ov5648_remove(struct i2c_client *client)
+static void ov5648_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
@@ -2597,8 +2597,6 @@ static int ov5648_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->mutex);
media_entity_cleanup(&subdev->entity);
-
- return 0;
}
static const struct dev_pm_ops ov5648_pm_ops = {
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 02f75c18e480..bc9fc3bc90c2 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2557,7 +2557,7 @@ error_print:
return ret;
}
-static int ov5670_remove(struct i2c_client *client)
+static void ov5670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5670 *ov5670 = to_ov5670(sd);
@@ -2568,8 +2568,6 @@ static int ov5670_remove(struct i2c_client *client)
mutex_destroy(&ov5670->mutex);
pm_runtime_disable(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov5670_pm_ops = {
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 82ba9f56baec..94dc8cb7a7c0 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1175,7 +1175,7 @@ check_hwcfg_error:
return ret;
}
-static int ov5675_remove(struct i2c_client *client)
+static void ov5675_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5675 *ov5675 = to_ov5675(sd);
@@ -1185,8 +1185,6 @@ static int ov5675_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov5675->mutex);
-
- return 0;
}
static int ov5675_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 82a9b2de7735..a97ec132ba3a 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -1501,7 +1501,7 @@ err_ctrl_handler_free:
return ret;
}
-static int ov5693_remove(struct i2c_client *client)
+static void ov5693_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
@@ -1519,8 +1519,6 @@ static int ov5693_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
ov5693_sensor_powerdown(ov5693);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov5693_pm_ops = {
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 910309783885..61906fc54e37 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1361,7 +1361,7 @@ err_destroy_mutex:
return ret;
}
-static int ov5695_remove(struct i2c_client *client)
+static void ov5695_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -1377,8 +1377,6 @@ static int ov5695_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
__ov5695_power_off(ov5695);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 6458e96d9091..18f041e985b7 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1096,13 +1096,12 @@ ectlhdlfree:
return ret;
}
-static int ov6650_remove(struct i2c_client *client)
+static void ov6650_remove(struct i2c_client *client)
{
struct ov6650 *priv = to_ov6650(client);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- return 0;
}
static const struct i2c_device_id ov6650_id[] = {
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 1bd797c7926b..88e987435285 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1767,7 +1767,7 @@ destroy_mutex:
return ret;
}
-static int ov7251_remove(struct i2c_client *client)
+static void ov7251_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov7251 *ov7251 = to_ov7251(sd);
@@ -1781,8 +1781,6 @@ static int ov7251_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(ov7251->dev))
ov7251_set_power_off(ov7251->dev);
pm_runtime_set_suspended(ov7251->dev);
-
- return 0;
}
static const struct dev_pm_ops ov7251_pm_ops = {
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index 977cd2d8ad33..5e2d67f0f9f2 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -70,13 +70,11 @@ static int ov7640_probe(struct i2c_client *client,
}
-static int ov7640_remove(struct i2c_client *client)
+static void ov7640_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
static const struct i2c_device_id ov7640_id[] = {
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 1be2c0e5bdc1..4b9b156b53c7 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -2009,7 +2009,7 @@ power_off:
return ret;
}
-static int ov7670_remove(struct i2c_client *client)
+static void ov7670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov7670_info *info = to_state(sd);
@@ -2017,7 +2017,6 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&info->sd.entity);
- return 0;
}
static const struct i2c_device_id ov7670_id[] = {
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 78602a2f70b0..4189e3fc3d53 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1521,7 +1521,7 @@ error_mutex_destroy:
return ret;
}
-static int ov772x_remove(struct i2c_client *client)
+static void ov772x_remove(struct i2c_client *client)
{
struct ov772x_priv *priv = to_ov772x(i2c_get_clientdata(client));
@@ -1532,8 +1532,6 @@ static int ov772x_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
mutex_destroy(&priv->lock);
-
- return 0;
}
static const struct i2c_device_id ov772x_id[] = {
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 2539cfee85c8..c9fd9b0bc54a 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1153,7 +1153,7 @@ error_detect:
return ret;
}
-static int ov7740_remove(struct i2c_client *client)
+static void ov7740_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -1170,7 +1170,6 @@ static int ov7740_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
ov7740_set_power(ov7740, 0);
- return 0;
}
static int __maybe_unused ov7740_runtime_suspend(struct device *dev)
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index a9728afc81d4..efa18d026ac3 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -2440,7 +2440,7 @@ check_hwcfg_error:
return ret;
}
-static int ov8856_remove(struct i2c_client *client)
+static void ov8856_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov8856 *ov8856 = to_ov8856(sd);
@@ -2452,8 +2452,6 @@ static int ov8856_remove(struct i2c_client *client)
mutex_destroy(&ov8856->mutex);
__ov8856_power_off(ov8856);
-
- return 0;
}
static int ov8856_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index b8f4f0d3e33d..a233c34b168e 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -3119,7 +3119,7 @@ error_endpoint:
return ret;
}
-static int ov8865_remove(struct i2c_client *client)
+static void ov8865_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
@@ -3131,8 +3131,6 @@ static int ov8865_remove(struct i2c_client *client)
media_entity_cleanup(&subdev->entity);
v4l2_fwnode_endpoint_free(&sensor->endpoint);
-
- return 0;
}
static const struct dev_pm_ops ov8865_pm_ops = {
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 2e0b315801e5..df144a2f6eda 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -1091,7 +1091,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int ov9282_remove(struct i2c_client *client)
+static void ov9282_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9282 *ov9282 = to_ov9282(sd);
@@ -1106,8 +1106,6 @@ static int ov9282_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&ov9282->mutex);
-
- return 0;
}
static const struct dev_pm_ops ov9282_pm_ops = {
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 9f44ed52d164..8b80be33c5f4 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -744,15 +744,13 @@ ectrlinit:
return ret;
}
-static int ov9640_remove(struct i2c_client *client)
+static void ov9640_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9640_priv *priv = to_ov9640_sensor(sd);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
-
- return 0;
}
static const struct i2c_device_id ov9640_id[] = {
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index c313e11a9754..4d458993e6d6 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1584,7 +1584,7 @@ err_mutex:
return ret;
}
-static int ov965x_remove(struct i2c_client *client)
+static void ov965x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov965x *ov965x = to_ov965x(sd);
@@ -1593,8 +1593,6 @@ static int ov965x_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov965x->lock);
-
- return 0;
}
static const struct i2c_device_id ov965x_id[] = {
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index df538ceb71c3..8b0a158cb297 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -930,7 +930,7 @@ check_hwcfg_error:
return ret;
}
-static int ov9734_remove(struct i2c_client *client)
+static void ov9734_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9734 *ov9734 = to_ov9734(sd);
@@ -940,8 +940,6 @@ static int ov9734_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov9734->mutex);
-
- return 0;
}
static int ov9734_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 2615ad154f49..a2263fa825b5 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -646,7 +646,7 @@ error:
return ret;
}
-static int rdacm20_remove(struct i2c_client *client)
+static void rdacm20_remove(struct i2c_client *client)
{
struct rdacm20_device *dev = i2c_to_rdacm20(client);
@@ -655,8 +655,6 @@ static int rdacm20_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->ctrls);
media_entity_cleanup(&dev->sd.entity);
i2c_unregister_device(dev->sensor);
-
- return 0;
}
static void rdacm20_shutdown(struct i2c_client *client)
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index ef31cf5f23ca..9ccc56c30d3b 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -614,7 +614,7 @@ error:
return ret;
}
-static int rdacm21_remove(struct i2c_client *client)
+static void rdacm21_remove(struct i2c_client *client)
{
struct rdacm21_device *dev = sd_to_rdacm21(i2c_get_clientdata(client));
@@ -622,8 +622,6 @@ static int rdacm21_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->ctrls);
i2c_unregister_device(dev->isp);
fwnode_handle_put(dev->sd.fwnode);
-
- return 0;
}
static const struct of_device_id rdacm21_of_ids[] = {
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index 2e4018c26912..1c3502f34cd3 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1398,7 +1398,7 @@ err_free_ctrl:
return ret;
}
-static int rj54n1_remove(struct i2c_client *client)
+static void rj54n1_remove(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
@@ -1410,8 +1410,6 @@ static int rj54n1_remove(struct i2c_client *client)
clk_put(rj54n1->clk);
v4l2_ctrl_handler_free(&rj54n1->hdl);
v4l2_async_unregister_subdev(&rj54n1->subdev);
-
- return 0;
}
static const struct i2c_device_id rj54n1_id[] = {
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index e2b88c5e4f98..d96ba58ce1e5 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1770,7 +1770,7 @@ out_err:
return ret;
}
-static int s5c73m3_remove(struct i2c_client *client)
+static void s5c73m3_remove(struct i2c_client *client)
{
struct v4l2_subdev *oif_sd = i2c_get_clientdata(client);
struct s5c73m3 *state = oif_sd_to_s5c73m3(oif_sd);
@@ -1785,8 +1785,6 @@ static int s5c73m3_remove(struct i2c_client *client)
media_entity_cleanup(&sensor_sd->entity);
s5c73m3_unregister_spi_driver(state);
-
- return 0;
}
static const struct i2c_device_id s5c73m3_id[] = {
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index af9a305242cd..3dddcd9dd351 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -996,7 +996,7 @@ out_err1:
return ret;
}
-static int s5k4ecgx_remove(struct i2c_client *client)
+static void s5k4ecgx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
@@ -1006,8 +1006,6 @@ static int s5k4ecgx_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&priv->handler);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id s5k4ecgx_id[] = {
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 6a5dceb699a8..5c2253ab3b6f 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -2018,7 +2018,7 @@ err_me:
return ret;
}
-static int s5k5baf_remove(struct i2c_client *c)
+static void s5k5baf_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct s5k5baf *state = to_s5k5baf(sd);
@@ -2030,8 +2030,6 @@ static int s5k5baf_remove(struct i2c_client *c)
sd = &state->cis_sd;
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id s5k5baf_id[] = {
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index f6ecf6f92bb2..a4efd6d10b43 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -354,14 +354,13 @@ static int s5k6a3_probe(struct i2c_client *client)
return ret;
}
-static int s5k6a3_remove(struct i2c_client *client)
+static void s5k6a3_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
pm_runtime_disable(&client->dev);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
- return 0;
}
static const struct i2c_device_id s5k6a3_ids[] = {
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 105a4b7d8354..059211788a65 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -1621,15 +1621,13 @@ out_err:
return ret;
}
-static int s5k6aa_remove(struct i2c_client *client)
+static void s5k6aa_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id s5k6aa_id[] = {
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index d1e0716bdfff..d6a51beabd02 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -484,7 +484,7 @@ static int saa6588_probe(struct i2c_client *client,
return 0;
}
-static int saa6588_remove(struct i2c_client *client)
+static void saa6588_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa6588 *s = to_saa6588(sd);
@@ -492,8 +492,6 @@ static int saa6588_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
cancel_delayed_work_sync(&s->work);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index a7f043cad149..5928cc6f4595 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -764,13 +764,12 @@ static int saa6752hs_probe(struct i2c_client *client,
return 0;
}
-static int saa6752hs_remove(struct i2c_client *client)
+static void saa6752hs_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- return 0;
}
static const struct i2c_device_id saa6752hs_id[] = {
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 0c7a9ce0a693..5067525d8b11 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -428,14 +428,13 @@ static int saa7110_probe(struct i2c_client *client,
return 0;
}
-static int saa7110_remove(struct i2c_client *client)
+static void saa7110_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7110 *decoder = to_saa7110(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 15ff80e6301e..86e70a980218 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1927,13 +1927,12 @@ static int saa711x_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int saa711x_remove(struct i2c_client *client)
+static void saa711x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id saa711x_id[] = {
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 891192f6412a..78c9388c2ea1 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -785,14 +785,13 @@ static int saa7127_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int saa7127_remove(struct i2c_client *client)
+static void saa7127_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
/* Turn off TV output */
saa7127_set_video_enable(sd, 0);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index adf905360171..4f3d1b432a4e 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1324,13 +1324,12 @@ static int saa717x_probe(struct i2c_client *client,
return 0;
}
-static int saa717x_remove(struct i2c_client *client)
+static void saa717x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index 7a04422df8c8..266462325d30 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -322,7 +322,7 @@ static int saa7185_probe(struct i2c_client *client,
return 0;
}
-static int saa7185_remove(struct i2c_client *client)
+static void saa7185_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7185 *encoder = to_saa7185(sd);
@@ -330,7 +330,6 @@ static int saa7185_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
/* SW: output off is active */
saa7185_write(sd, 0x61, (encoder->reg[0x61]) | 0x40);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index ad239280c42e..927a9ec41463 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -357,13 +357,11 @@ static int sony_btf_mpx_probe(struct i2c_client *client,
return 0;
}
-static int sony_btf_mpx_remove(struct i2c_client *client)
+static void sony_btf_mpx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index 19c0252df2f1..ff18693beb5c 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -732,13 +732,12 @@ static int sr030pc30_probe(struct i2c_client *client,
return 0;
}
-static int sr030pc30_remove(struct i2c_client *client)
+static void sr030pc30_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id sr030pc30_id[] = {
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 16cc547976dd..31b89aff0e86 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -1067,7 +1067,7 @@ mutex_cleanup:
return ret;
}
-static int mipid02_remove(struct i2c_client *client)
+static void mipid02_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -1078,8 +1078,6 @@ static int mipid02_remove(struct i2c_client *client)
mipid02_set_power_off(bridge);
media_entity_cleanup(&bridge->sd.entity);
mutex_destroy(&bridge->lock);
-
- return 0;
}
static const struct of_device_id mipid02_dt_ids[] = {
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index e18b8947ad7e..d99eedbdf011 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2169,7 +2169,7 @@ err_hdl:
return err;
}
-static int tc358743_remove(struct i2c_client *client)
+static void tc358743_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tc358743_state *state = to_state(sd);
@@ -2185,8 +2185,6 @@ static int tc358743_remove(struct i2c_client *client)
mutex_destroy(&state->confctl_mutex);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(&state->hdl);
-
- return 0;
}
static const struct i2c_device_id tc358743_id[] = {
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index f66ac14cffad..83931826cf6f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2805,7 +2805,7 @@ err_free_state:
return ret;
}
-static int tda1997x_remove(struct i2c_client *client)
+static void tda1997x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tda1997x_state *state = to_state(sd);
@@ -2827,8 +2827,6 @@ static int tda1997x_remove(struct i2c_client *client)
mutex_destroy(&state->lock);
kfree(state);
-
- return 0;
}
static struct i2c_driver tda1997x_i2c_driver = {
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index cbdc9be0a597..11e918311b13 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -390,7 +390,7 @@ static int tda7432_probe(struct i2c_client *client,
return 0;
}
-static int tda7432_remove(struct i2c_client *client)
+static void tda7432_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tda7432 *t = to_state(sd);
@@ -398,7 +398,6 @@ static int tda7432_remove(struct i2c_client *client)
tda7432_set(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&t->hdl);
- return 0;
}
static const struct i2c_device_id tda7432_id[] = {
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index 8c6dfe746b20..aaa74944fc7c 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -175,12 +175,11 @@ static int tda9840_probe(struct i2c_client *client,
return 0;
}
-static int tda9840_remove(struct i2c_client *client)
+static void tda9840_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id tda9840_id[] = {
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 67378dbcc74b..50e74314f315 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -134,12 +134,11 @@ static int tea6415c_probe(struct i2c_client *client,
return 0;
}
-static int tea6415c_remove(struct i2c_client *client)
+static void tea6415c_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id tea6415c_id[] = {
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 712141b261ed..246f2b10ccc7 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -116,12 +116,11 @@ static int tea6420_probe(struct i2c_client *client,
return 0;
}
-static int tea6420_remove(struct i2c_client *client)
+static void tea6420_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id tea6420_id[] = {
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 8206bf7a5a8f..2a0f9a3d1a66 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -358,13 +358,11 @@ static int ths7303_probe(struct i2c_client *client,
return 0;
}
-static int ths7303_remove(struct i2c_client *client)
+static void ths7303_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
static const struct i2c_device_id ths7303_id[] = {
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index c52fe84cba1b..081ef5a4b950 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -468,7 +468,7 @@ static int ths8200_probe(struct i2c_client *client)
return 0;
}
-static int ths8200_remove(struct i2c_client *client)
+static void ths8200_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ths8200_state *decoder = to_state(sd);
@@ -478,8 +478,6 @@ static int ths8200_remove(struct i2c_client *client)
ths8200_s_power(sd, false);
v4l2_async_unregister_subdev(&decoder->sd);
-
- return 0;
}
static const struct i2c_device_id ths8200_id[] = {
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index e4c21990fea9..937fa1dbaecb 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -177,14 +177,13 @@ static int tlv320aic23b_probe(struct i2c_client *client,
return 0;
}
-static int tlv320aic23b_remove(struct i2c_client *client)
+static void tlv320aic23b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tlv320aic23b_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index e6796e94dadf..9f1ed078b661 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2065,7 +2065,7 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
return 0;
}
-static int tvaudio_remove(struct i2c_client *client)
+static void tvaudio_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct CHIPSTATE *chip = to_state(sd);
@@ -2079,7 +2079,6 @@ static int tvaudio_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&chip->hdl);
- return 0;
}
/* This driver supports many devices and the idea is to let the driver
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index cee60f945036..a746d96875f9 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -1121,7 +1121,7 @@ done:
* Unregister decoder as an i2c client device and V4L2
* device. Complement of tvp514x_probe().
*/
-static int tvp514x_remove(struct i2c_client *client)
+static void tvp514x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tvp514x_decoder *decoder = to_decoder(sd);
@@ -1129,7 +1129,6 @@ static int tvp514x_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&decoder->sd);
media_entity_cleanup(&decoder->sd.entity);
v4l2_ctrl_handler_free(&decoder->hdl);
- return 0;
}
/* TVP5146 Init/Power on Sequence */
static const struct tvp514x_reg tvp5146_init_reg_seq[] = {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 93a980c4e899..859f1cb2fa74 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -2230,7 +2230,7 @@ err:
return res;
}
-static int tvp5150_remove(struct i2c_client *c)
+static void tvp5150_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -2250,8 +2250,6 @@ static int tvp5150_remove(struct i2c_client *c)
v4l2_ctrl_handler_free(&decoder->hdl);
pm_runtime_disable(&c->dev);
pm_runtime_set_suspended(&c->dev);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 2de18833b07b..4ccd218f5584 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1044,7 +1044,7 @@ error:
* Reset the TVP7002 device
* Returns zero.
*/
-static int tvp7002_remove(struct i2c_client *c)
+static void tvp7002_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp7002 *device = to_tvp7002(sd);
@@ -1056,7 +1056,6 @@ static int tvp7002_remove(struct i2c_client *c)
media_entity_cleanup(&device->sd.entity);
#endif
v4l2_ctrl_handler_free(&device->hdl);
- return 0;
}
/* I2C Device ID table */
diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c
index cd05f1ff504d..c7c8dfe8a8a8 100644
--- a/drivers/media/i2c/tw2804.c
+++ b/drivers/media/i2c/tw2804.c
@@ -405,14 +405,13 @@ static int tw2804_probe(struct i2c_client *client,
return 0;
}
-static int tw2804_remove(struct i2c_client *client)
+static void tw2804_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tw2804 *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id tw2804_id[] = {
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index f8e3ab4909d8..d7eef7986b75 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -235,13 +235,12 @@ static int tw9903_probe(struct i2c_client *client,
return 0;
}
-static int tw9903_remove(struct i2c_client *client)
+static void tw9903_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index c528eb01fed0..549ad8f72f12 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -203,13 +203,12 @@ static int tw9906_probe(struct i2c_client *client,
return 0;
}
-static int tw9906_remove(struct i2c_client *client)
+static void tw9906_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 09f5b3986928..853b5acead32 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -993,7 +993,7 @@ error_clk_put:
return ret;
}
-static int tw9910_remove(struct i2c_client *client)
+static void tw9910_remove(struct i2c_client *client)
{
struct tw9910_priv *priv = to_tw9910(client);
@@ -1001,8 +1001,6 @@ static int tw9910_remove(struct i2c_client *client)
gpiod_put(priv->pdn_gpio);
clk_put(priv->clk);
v4l2_async_unregister_subdev(&priv->subdev);
-
- return 0;
}
static const struct i2c_device_id tw9910_id[] = {
diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c
index b0a9c6d7163f..d0659c4392f2 100644
--- a/drivers/media/i2c/uda1342.c
+++ b/drivers/media/i2c/uda1342.c
@@ -72,12 +72,11 @@ static int uda1342_probe(struct i2c_client *client,
return 0;
}
-static int uda1342_remove(struct i2c_client *client)
+static void uda1342_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id uda1342_id[] = {
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index ef35c6574785..4de26ed2ba00 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -210,12 +210,11 @@ static int upd64031a_probe(struct i2c_client *client,
return 0;
}
-static int upd64031a_remove(struct i2c_client *client)
+static void upd64031a_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index d6a1698caa2a..2bfd5443d406 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -181,12 +181,11 @@ static int upd64083_probe(struct i2c_client *client,
return 0;
}
-static int upd64083_remove(struct i2c_client *client)
+static void upd64083_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index e08e3579c0a1..f15ef2d13059 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -895,7 +895,7 @@ error_free_device:
return ret;
}
-static int video_i2c_remove(struct i2c_client *client)
+static void video_i2c_remove(struct i2c_client *client)
{
struct video_i2c_data *data = i2c_get_clientdata(client);
@@ -908,8 +908,6 @@ static int video_i2c_remove(struct i2c_client *client)
data->chip->set_power(data, false);
video_unregister_device(&data->vdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 492af8749fca..c832edad5fa7 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -163,12 +163,11 @@ static int vp27smpx_probe(struct i2c_client *client,
return 0;
}
-static int vp27smpx_remove(struct i2c_client *client)
+static void vp27smpx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 8be03fe5928c..b481ec196b88 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -526,15 +526,13 @@ static int vpx3220_probe(struct i2c_client *client,
return 0;
}
-static int vpx3220_remove(struct i2c_client *client)
+static void vpx3220_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct vpx3220 *decoder = to_vpx3220(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
-
- return 0;
}
static const struct i2c_device_id vpx3220_id[] = {
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 29003dec6f2d..d496bb45f201 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -824,13 +824,12 @@ static int vs6624_probe(struct i2c_client *client,
return ret;
}
-static int vs6624_remove(struct i2c_client *client)
+static void vs6624_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id vs6624_id[] = {
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index ed533834db54..180b35347521 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -234,14 +234,13 @@ static int wm8739_probe(struct i2c_client *client,
return 0;
}
-static int wm8739_remove(struct i2c_client *client)
+static void wm8739_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct wm8739_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id wm8739_id[] = {
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index d4c83c39892a..8ff97867d3cd 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -280,14 +280,13 @@ static int wm8775_probe(struct i2c_client *client,
return 0;
}
-static int wm8775_remove(struct i2c_client *client)
+static void wm8775_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct wm8775_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id wm8775_id[] = {
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index 95e8c29ccc65..d2f5f30582a9 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -228,7 +228,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev;
struct video_device *vfd_enc;
- struct resource *res;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
int ret;
@@ -272,14 +271,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get irq resource");
- ret = -ENOENT;
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ if (dev->enc_irq < 0) {
+ ret = dev->enc_irq;
goto err_res;
}
- dev->enc_irq = platform_get_irq(pdev, 0);
irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->enc_irq,
mtk_vcodec_enc_irq_handler,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index 0c2507dc03d6..c6f25200982c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -856,6 +856,8 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
rpf->mem.addr[1] = cfg->mem[1];
rpf->mem.addr[2] = cfg->mem[2];
+ rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0;
+
vsp1->drm->inputs[rpf_index].crop = cfg->src;
vsp1->drm->inputs[rpf_index].compose = cfg->dst;
vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 877a24e5c577..abda40e81612 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -487,7 +487,7 @@ errfr:
return ret;
}
-static int tea5764_i2c_remove(struct i2c_client *client)
+static void tea5764_i2c_remove(struct i2c_client *client)
{
struct tea5764_device *radio = i2c_get_clientdata(client);
@@ -499,7 +499,6 @@ static int tea5764_i2c_remove(struct i2c_client *client)
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio);
}
- return 0;
}
/* I2C subsystem interface */
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index adb66f869dd2..f9e990a9c3ef 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -384,7 +384,7 @@ err:
return err;
}
-static int saa7706h_remove(struct i2c_client *client)
+static void saa7706h_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7706h_state *state = to_state(sd);
@@ -393,7 +393,6 @@ static int saa7706h_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(to_state(sd));
- return 0;
}
static const struct i2c_device_id saa7706h_id[] = {
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 59b3d77e282d..a6ad926c2b4e 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -461,7 +461,7 @@ err_initial:
/*
* si470x_i2c_remove - remove the device
*/
-static int si470x_i2c_remove(struct i2c_client *client)
+static void si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
@@ -472,7 +472,6 @@ static int si470x_i2c_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&radio->hdl);
v4l2_device_unregister(&radio->v4l2_dev);
- return 0;
}
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index adbf43ff6a21..2aec642133a1 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1623,7 +1623,7 @@ exit:
}
/* si4713_remove - remove the device */
-static int si4713_remove(struct i2c_client *client)
+static void si4713_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct si4713_device *sdev = to_si4713_device(sd);
@@ -1635,8 +1635,6 @@ static int si4713_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
-
- return 0;
}
/* si4713_i2c_driver - i2c driver interface */
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index d8810492db4f..7b0870a9785b 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -165,13 +165,12 @@ static int tef6862_probe(struct i2c_client *client,
return 0;
}
-static int tef6862_remove(struct i2c_client *client)
+static void tef6862_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_state(sd));
- return 0;
}
static const struct i2c_device_id tef6862_id[] = {
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0834d5f866fd..39d2b03e2631 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1416,42 +1416,37 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
struct device *dev = ir->dev;
- char *data;
-
- data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
- if (!data) {
- dev_err(dev, "%s: memory allocation failed!", __func__);
- return;
- }
+ char data[USB_CTRL_MSG_SZ];
/*
* This is a strange one. Windows issues a set address to the device
* on the receive control pipe and expect a certain value pair back
*/
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
- data, USB_CTRL_MSG_SZ, 3000);
+ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, 0, data, USB_CTRL_MSG_SZ, 3000,
+ GFP_KERNEL);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
/* set feature: bit rate 38400 bps */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
- 0xc04e, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
+ 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 4, USB_TYPE_VENDOR,
- 0x0808, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 4, USB_TYPE_VENDOR,
+ 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 2, USB_TYPE_VENDOR,
- 0x0000, 0x0100, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 2, USB_TYPE_VENDOR,
+ 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
@@ -1459,8 +1454,6 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
/* get hw/sw revision? */
mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
- kfree(data);
}
static void mceusb_gen2_init(struct mceusb_dev *ir)
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index b7823d97b30d..e7959ab1add8 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -438,13 +438,11 @@ static int vidtv_demod_i2c_probe(struct i2c_client *client,
return 0;
}
-static int vidtv_demod_i2c_remove(struct i2c_client *client)
+static void vidtv_demod_i2c_remove(struct i2c_client *client)
{
struct vidtv_demod_state *state = i2c_get_clientdata(client);
kfree(state);
-
- return 0;
}
static struct i2c_driver vidtv_demod_i2c_driver = {
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
index 14b6bc902ee1..aabc97ed736b 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -414,13 +414,11 @@ static int vidtv_tuner_i2c_probe(struct i2c_client *client,
return 0;
}
-static int vidtv_tuner_i2c_remove(struct i2c_client *client)
+static void vidtv_tuner_i2c_remove(struct i2c_client *client)
{
struct vidtv_tuner_dev *tuner_dev = i2c_get_clientdata(client);
kfree(tuner_dev);
-
- return 0;
}
static struct i2c_driver vidtv_tuner_i2c_driver = {
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index a3a8d051dc6c..61ae884ea59a 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -706,7 +706,7 @@ err:
return ret;
}
-static int e4000_remove(struct i2c_client *client)
+static void e4000_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct e4000_dev *dev = container_of(sd, struct e4000_dev, sd);
@@ -717,8 +717,6 @@ static int e4000_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->hdl);
#endif
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id e4000_id_table[] = {
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 1b5961bdf2d5..f30932e1a0f3 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -588,7 +588,7 @@ err:
return ret;
}
-static int fc2580_remove(struct i2c_client *client)
+static void fc2580_remove(struct i2c_client *client)
{
struct fc2580_dev *dev = i2c_get_clientdata(client);
@@ -598,7 +598,6 @@ static int fc2580_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->hdl);
#endif
kfree(dev);
- return 0;
}
static const struct i2c_device_id fc2580_id_table[] = {
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 8647c50b66e5..e32e3e9daa15 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -697,7 +697,7 @@ err:
return ret;
}
-static int m88rs6000t_remove(struct i2c_client *client)
+static void m88rs6000t_remove(struct i2c_client *client)
{
struct m88rs6000t_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->cfg.fe;
@@ -707,8 +707,6 @@ static int m88rs6000t_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id m88rs6000t_id[] = {
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 204e6186bf71..322c806228a5 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -509,11 +509,9 @@ err:
return ret;
}
-static int mt2060_remove(struct i2c_client *client)
+static void mt2060_remove(struct i2c_client *client)
{
dev_dbg(&client->dev, "\n");
-
- return 0;
}
static const struct i2c_device_id mt2060_id_table[] = {
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index c628435a1b06..6422056185a9 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -307,14 +307,13 @@ static int mxl301rf_probe(struct i2c_client *client,
return 0;
}
-static int mxl301rf_remove(struct i2c_client *client)
+static void mxl301rf_remove(struct i2c_client *client)
{
struct mxl301rf_state *state;
state = cfg_to_state(i2c_get_clientdata(client));
state->cfg.fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index 008ad870c00f..9cba0893207c 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -232,14 +232,13 @@ err_mem:
return ret;
}
-static int qm1d1b0004_remove(struct i2c_client *client)
+static void qm1d1b0004_remove(struct i2c_client *client)
{
struct dvb_frontend *fe;
fe = i2c_get_clientdata(client);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 53aa2558f71e..2d60bf501fb5 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -424,14 +424,13 @@ static int qm1d1c0042_probe(struct i2c_client *client,
return 0;
}
-static int qm1d1c0042_remove(struct i2c_client *client)
+static void qm1d1c0042_remove(struct i2c_client *client)
{
struct qm1d1c0042_state *state;
state = cfg_to_state(i2c_get_clientdata(client));
state->cfg.fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 0de587b412d4..476b32c04c20 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -951,7 +951,7 @@ err:
return ret;
}
-static int si2157_remove(struct i2c_client *client)
+static void si2157_remove(struct i2c_client *client)
{
struct si2157_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->fe;
@@ -969,8 +969,6 @@ static int si2157_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
/*
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index bf48f1cd83d2..eb97711c9c68 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -242,7 +242,7 @@ err:
return ret;
}
-static int tda18212_remove(struct i2c_client *client)
+static void tda18212_remove(struct i2c_client *client)
{
struct tda18212_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->cfg.fe;
@@ -252,8 +252,6 @@ static int tda18212_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id tda18212_id[] = {
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 8a5781b966ee..e404a5afad4c 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -856,7 +856,7 @@ err:
return ret;
}
-static int tda18250_remove(struct i2c_client *client)
+static void tda18250_remove(struct i2c_client *client)
{
struct tda18250_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->fe;
@@ -866,8 +866,6 @@ static int tda18250_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id tda18250_id_table[] = {
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index af7d5ea1f77e..d141d000b819 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -227,7 +227,7 @@ err:
return ret;
}
-static int tua9001_remove(struct i2c_client *client)
+static void tua9001_remove(struct i2c_client *client)
{
struct tua9001_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->fe;
@@ -243,7 +243,6 @@ static int tua9001_remove(struct i2c_client *client)
dev_err(&client->dev, "Tuner disable failed (%pe)\n", ERR_PTR(ret));
}
kfree(dev);
- return 0;
}
static const struct i2c_device_id tua9001_id_table[] = {
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 7835bb0f32fc..e012b21c4fd7 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 1fa6f10ee157..2f45188bf9d4 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -601,7 +601,7 @@ fail:
return err;
}
-static int s2250_remove(struct i2c_client *client)
+static void s2250_remove(struct i2c_client *client)
{
struct s2250 *state = to_state(i2c_get_clientdata(client));
@@ -609,7 +609,6 @@ static int s2250_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&state->sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
- return 0;
}
static const struct i2c_device_id s2250_id[] = {
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9c05776f11d1..d509a4a2f08e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2740,7 +2740,7 @@ static const struct usb_device_id uvc_ids[] = {
.idProduct = 0x4034,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 0,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* LogiLink Wireless Webcam */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 2d47c10de062..33162dc1daf6 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -779,7 +779,7 @@ register_client:
* @client: i2c_client descriptor
*/
-static int tuner_remove(struct i2c_client *client)
+static void tuner_remove(struct i2c_client *client)
{
struct tuner *t = to_tuner(i2c_get_clientdata(client));
@@ -789,7 +789,6 @@ static int tuner_remove(struct i2c_client *client)
list_del(&t->list);
kfree(t);
- return 0;
}
/*
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 0f3d6b5667b0..55c26e7d370e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1040,6 +1040,8 @@ int v4l2_compat_get_array_args(struct file *file, void *mbuf,
{
int err = 0;
+ memset(mbuf, 0, array_size);
+
switch (cmd) {
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c314025d977e..e6fd355a2e92 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2872,9 +2872,9 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
- IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
@@ -3367,8 +3367,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (array_buf == NULL)
- goto out_array_args;
- err = -EFAULT;
+ goto out;
if (in_compat_syscall())
err = v4l2_compat_get_array_args(file, array_buf,
user_ptr, array_size,
@@ -3377,7 +3376,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
err = copy_from_user(array_buf, user_ptr, array_size) ?
-EFAULT : 0;
if (err)
- goto out_array_args;
+ goto out;
*kernel_ptr = array_buf;
}
@@ -3395,6 +3394,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
trace_v4l2_qbuf(video_devdata(file)->minor, parg);
}
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ */
+ if (err < 0 && !always_copy)
+ goto out;
+
if (has_array_args) {
*kernel_ptr = (void __force *)user_ptr;
if (in_compat_syscall()) {
@@ -3409,16 +3415,8 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
} else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
}
- goto out_array_args;
}
- /*
- * Some ioctls can return an error, but still have valid
- * results that must be returned.
- */
- if (err < 0 && !always_copy)
- goto out;
-out_array_args:
if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
err = -EFAULT;
out:
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index ac1a411648d8..fac290e48e0b 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -66,6 +66,15 @@ config BRCMSTB_DPFE
for the DRAM's temperature. Slower refresh rate means cooler RAM,
higher refresh rate means hotter RAM.
+config BRCMSTB_MEMC
+ tristate "Broadcom STB MEMC driver"
+ default ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This driver provides a way to configure the Broadcom STB memory
+ controller and specifically control the Self Refresh Power Down
+ (SRPD) inactivity timeout.
+
config BT1_L2_CTL
bool "Baikal-T1 CM2 L2-RAM Cache Control Block"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index bc7663ed1c25..e148f636c082 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
obj-$(CONFIG_BRCMSTB_DPFE) += brcmstb_dpfe.o
+obj-$(CONFIG_BRCMSTB_MEMC) += brcmstb_memc.o
obj-$(CONFIG_BT1_L2_CTL) += bt1-l2-ctl.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
diff --git a/drivers/memory/brcmstb_memc.c b/drivers/memory/brcmstb_memc.c
new file mode 100644
index 000000000000..233a53f5bce1
--- /dev/null
+++ b/drivers/memory/brcmstb_memc.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DDR Self-Refresh Power Down (SRPD) support for Broadcom STB SoCs
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define REG_MEMC_CNTRLR_CONFIG 0x00
+#define CNTRLR_CONFIG_LPDDR4_SHIFT 5
+#define CNTRLR_CONFIG_MASK 0xf
+#define REG_MEMC_SRPD_CFG_21 0x20
+#define REG_MEMC_SRPD_CFG_20 0x34
+#define REG_MEMC_SRPD_CFG_1x 0x3c
+#define INACT_COUNT_SHIFT 0
+#define INACT_COUNT_MASK 0xffff
+#define SRPD_EN_SHIFT 16
+
+struct brcmstb_memc_data {
+ u32 srpd_offset;
+};
+
+struct brcmstb_memc {
+ struct device *dev;
+ void __iomem *ddr_ctrl;
+ unsigned int timeout_cycles;
+ u32 frequency;
+ u32 srpd_offset;
+};
+
+static int brcmstb_memc_uses_lpddr4(struct brcmstb_memc *memc)
+{
+ void __iomem *config = memc->ddr_ctrl + REG_MEMC_CNTRLR_CONFIG;
+ u32 reg;
+
+ reg = readl_relaxed(config) & CNTRLR_CONFIG_MASK;
+
+ return reg == CNTRLR_CONFIG_LPDDR4_SHIFT;
+}
+
+static int brcmstb_memc_srpd_config(struct brcmstb_memc *memc,
+ unsigned int cycles)
+{
+ void __iomem *cfg = memc->ddr_ctrl + memc->srpd_offset;
+ u32 val;
+
+ /* Max timeout supported in HW */
+ if (cycles > INACT_COUNT_MASK)
+ return -EINVAL;
+
+ memc->timeout_cycles = cycles;
+
+ val = (cycles << INACT_COUNT_SHIFT) & INACT_COUNT_MASK;
+ if (cycles)
+ val |= BIT(SRPD_EN_SHIFT);
+
+ writel_relaxed(val, cfg);
+ /* Ensure the write is committed to the controller */
+ (void)readl_relaxed(cfg);
+
+ return 0;
+}
+
+static ssize_t frequency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", memc->frequency);
+}
+
+static ssize_t srpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", memc->timeout_cycles);
+}
+
+static ssize_t srpd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ /*
+ * Cannot change the inactivity timeout on LPDDR4 chips because the
+ * dynamic tuning process will also get affected by the inactivity
+ * timeout, thus making it non functional.
+ */
+ if (brcmstb_memc_uses_lpddr4(memc))
+ return -EOPNOTSUPP;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = brcmstb_memc_srpd_config(memc, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(frequency);
+static DEVICE_ATTR_RW(srpd);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_frequency.attr,
+ &dev_attr_srpd.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attrs,
+};
+
+static const struct of_device_id brcmstb_memc_of_match[];
+
+static int brcmstb_memc_probe(struct platform_device *pdev)
+{
+ const struct brcmstb_memc_data *memc_data;
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct brcmstb_memc *memc;
+ int ret;
+
+ memc = devm_kzalloc(dev, sizeof(*memc), GFP_KERNEL);
+ if (!memc)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, memc);
+
+ of_id = of_match_device(brcmstb_memc_of_match, dev);
+ memc_data = of_id->data;
+ memc->srpd_offset = memc_data->srpd_offset;
+
+ memc->ddr_ctrl = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(memc->ddr_ctrl))
+ return PTR_ERR(memc->ddr_ctrl);
+
+ of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &memc->frequency);
+
+ ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int brcmstb_memc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ sysfs_remove_group(&dev->kobj, &dev_attr_group);
+
+ return 0;
+}
+
+enum brcmstb_memc_hwtype {
+ BRCMSTB_MEMC_V21,
+ BRCMSTB_MEMC_V20,
+ BRCMSTB_MEMC_V1X,
+};
+
+static const struct brcmstb_memc_data brcmstb_memc_versions[] = {
+ { .srpd_offset = REG_MEMC_SRPD_CFG_21 },
+ { .srpd_offset = REG_MEMC_SRPD_CFG_20 },
+ { .srpd_offset = REG_MEMC_SRPD_CFG_1x },
+};
+
+static const struct of_device_id brcmstb_memc_of_match[] = {
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V1X]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V20]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.5",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.6",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.7",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.8",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.2",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.3",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.4",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ /* default to the original offset */
+ {
+ .compatible = "brcm,brcmstb-memc-ddr",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V1X]
+ },
+ {}
+};
+
+static int brcmstb_memc_suspend(struct device *dev)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+ void __iomem *cfg = memc->ddr_ctrl + memc->srpd_offset;
+ u32 val;
+
+ if (memc->timeout_cycles == 0)
+ return 0;
+
+ /*
+ * Disable SRPD prior to suspending the system since that can
+ * cause issues with other memory clients managed by the ARM
+ * trusted firmware to access memory.
+ */
+ val = readl_relaxed(cfg);
+ val &= ~BIT(SRPD_EN_SHIFT);
+ writel_relaxed(val, cfg);
+ /* Ensure the write is committed to the controller */
+ (void)readl_relaxed(cfg);
+
+ return 0;
+}
+
+static int brcmstb_memc_resume(struct device *dev)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ if (memc->timeout_cycles == 0)
+ return 0;
+
+ return brcmstb_memc_srpd_config(memc, memc->timeout_cycles);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_memc_pm_ops, brcmstb_memc_suspend,
+ brcmstb_memc_resume);
+
+static struct platform_driver brcmstb_memc_driver = {
+ .probe = brcmstb_memc_probe,
+ .remove = brcmstb_memc_remove,
+ .driver = {
+ .name = "brcmstb_memc",
+ .of_match_table = brcmstb_memc_of_match,
+ .pm = pm_ptr(&brcmstb_memc_pm_ops),
+ },
+};
+module_platform_driver(brcmstb_memc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("DDR SRPD driver for Broadcom STB chips");
diff --git a/drivers/memory/dfl-emif.c b/drivers/memory/dfl-emif.c
index 3f719816771d..da06cd30a016 100644
--- a/drivers/memory/dfl-emif.c
+++ b/drivers/memory/dfl-emif.c
@@ -24,11 +24,24 @@
#define EMIF_STAT_CLEAR_BUSY_SFT 16
#define EMIF_CTRL 0x10
#define EMIF_CTRL_CLEAR_EN_SFT 0
-#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(3, 0)
+#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(7, 0)
#define EMIF_POLL_INVL 10000 /* us */
#define EMIF_POLL_TIMEOUT 5000000 /* us */
+/*
+ * The Capability Register replaces the Control Register (at the same
+ * offset) for EMIF feature revisions > 0. The bitmask that indicates
+ * the presence of memory channels exists in both the Capability Register
+ * and Control Register definitions. These can be thought of as a C union.
+ * The Capability Register definitions are used to check for the existence
+ * of a memory channel, and the Control Register definitions are used for
+ * managing the memory-clear functionality in revision 0.
+ */
+#define EMIF_CAPABILITY_BASE 0x10
+#define EMIF_CAPABILITY_CHN_MSK_V0 GENMASK_ULL(3, 0)
+#define EMIF_CAPABILITY_CHN_MSK GENMASK_ULL(7, 0)
+
struct dfl_emif {
struct device *dev;
void __iomem *base;
@@ -106,16 +119,30 @@ emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 0);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 1);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 2);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 3);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 4);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 5);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 6);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 7);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 0);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 1);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 2);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 3);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 4);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 5);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 6);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 7);
+
emif_clear_attr(0);
emif_clear_attr(1);
emif_clear_attr(2);
emif_clear_attr(3);
+emif_clear_attr(4);
+emif_clear_attr(5);
+emif_clear_attr(6);
+emif_clear_attr(7);
+
static struct attribute *dfl_emif_attrs[] = {
&emif_attr_inf0_init_done.attr.attr,
@@ -134,6 +161,22 @@ static struct attribute *dfl_emif_attrs[] = {
&emif_attr_inf3_cal_fail.attr.attr,
&emif_attr_inf3_clear.attr.attr,
+ &emif_attr_inf4_init_done.attr.attr,
+ &emif_attr_inf4_cal_fail.attr.attr,
+ &emif_attr_inf4_clear.attr.attr,
+
+ &emif_attr_inf5_init_done.attr.attr,
+ &emif_attr_inf5_cal_fail.attr.attr,
+ &emif_attr_inf5_clear.attr.attr,
+
+ &emif_attr_inf6_init_done.attr.attr,
+ &emif_attr_inf6_cal_fail.attr.attr,
+ &emif_attr_inf6_clear.attr.attr,
+
+ &emif_attr_inf7_init_done.attr.attr,
+ &emif_attr_inf7_cal_fail.attr.attr,
+ &emif_attr_inf7_clear.attr.attr,
+
NULL,
};
@@ -143,15 +186,24 @@ static umode_t dfl_emif_visible(struct kobject *kobj,
struct dfl_emif *de = dev_get_drvdata(kobj_to_dev(kobj));
struct emif_attr *eattr = container_of(attr, struct emif_attr,
attr.attr);
+ struct dfl_device *ddev = to_dfl_dev(de->dev);
u64 val;
/*
- * This device supports upto 4 memory interfaces, but not all
+ * This device supports up to 8 memory interfaces, but not all
* interfaces are used on different platforms. The read out value of
- * CLEAN_EN field (which is a bitmap) could tell how many interfaces
- * are available.
+ * CAPABILITY_CHN_MSK field (which is a bitmap) indicates which
+ * interfaces are available.
*/
- val = FIELD_GET(EMIF_CTRL_CLEAR_EN_MSK, readq(de->base + EMIF_CTRL));
+ if (ddev->revision > 0 && strstr(attr->name, "_clear"))
+ return 0;
+
+ if (ddev->revision == 0)
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK_V0,
+ readq(de->base + EMIF_CAPABILITY_BASE));
+ else
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK,
+ readq(de->base + EMIF_CAPABILITY_BASE));
return (val & BIT_ULL(eattr->index)) ? attr->mode : 0;
}
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index d7cb7ead2ac7..5a9754442bc7 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
@@ -14,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <soc/mediatek/smi.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <dt-bindings/memory/mtk-memory-port.h>
@@ -89,6 +91,7 @@
#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
#define MTK_SMI_FLAG_SW_FLAG BIT(1)
#define MTK_SMI_FLAG_SLEEP_CTL BIT(2)
+#define MTK_SMI_FLAG_CFG_PORT_SEC_CTL BIT(3)
#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
struct mtk_smi_reg_pair {
@@ -127,7 +130,7 @@ struct mtk_smi_common_plat {
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
- void (*config_port)(struct device *dev);
+ int (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
unsigned int flags_general;
const u8 (*ostd)[SMI_LARB_PORT_NR_MAX];
@@ -185,7 +188,7 @@ static const struct component_ops mtk_smi_larb_component_ops = {
.unbind = mtk_smi_larb_unbind,
};
-static void mtk_smi_larb_config_port_gen1(struct device *dev)
+static int mtk_smi_larb_config_port_gen1(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
@@ -214,31 +217,35 @@ static void mtk_smi_larb_config_port_gen1(struct device *dev)
common->smi_ao_base
+ REG_SMI_SECUR_CON_ADDR(m4u_port_id));
}
+ return 0;
}
-static void mtk_smi_larb_config_port_mt8167(struct device *dev)
+static int mtk_smi_larb_config_port_mt8167(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
+ return 0;
}
-static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+static int mtk_smi_larb_config_port_mt8173(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
writel(*larb->mmu, larb->base + MT8173_SMI_LARB_MMU_EN);
+ return 0;
}
-static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
+static int mtk_smi_larb_config_port_gen2_general(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
u32 reg, flags_general = larb->larb_gen->flags_general;
const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL;
+ struct arm_smccc_res res;
int i;
if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
- return;
+ return 0;
if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_THRT_UPDATE)) {
reg = readl_relaxed(larb->base + SMI_LARB_CMD_THRT_CON);
@@ -253,14 +260,78 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
for (i = 0; i < SMI_LARB_PORT_NR_MAX && larbostd && !!larbostd[i]; i++)
writel_relaxed(larbostd[i], larb->base + SMI_LARB_OSTDL_PORTx(i));
+ /*
+ * When mmu_en bits are in security world, the bank_sel still is in the
+ * LARB_NONSEC_CON below. And the mmu_en bits of LARB_NONSEC_CON have no
+ * effect in this case.
+ */
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_CFG_PORT_SEC_CTL)) {
+ arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL, IOMMU_ATF_CMD_CONFIG_SMI_LARB,
+ larb->larbid, *larb->mmu, 0, 0, 0, 0, &res);
+ if (res.a0 != 0) {
+ dev_err(dev, "Enable iommu fail, ret %ld\n", res.a0);
+ return -EINVAL;
+ }
+ }
+
for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
reg |= F_MMU_EN;
reg |= BANK_SEL(larb->bank[i]);
writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
}
+ return 0;
}
+static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x02, 0x18, 0x22, 0x22, 0x01, 0x02, 0x0a,},
+ [1] = {0x12, 0x02, 0x14, 0x14, 0x01, 0x18, 0x0a,},
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,},
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a, 0x07, 0x07,},
+ [5] = {0x02, 0x01, 0x04, 0x02, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x01, 0x0a, 0x05, 0x02, 0x03, 0x01, 0x01, 0x14, 0x14,
+ 0x0a, 0x14, 0x1e, 0x01, 0x0c, 0x0a, 0x05, 0x02, 0x02, 0x05,
+ 0x03, 0x01, 0x1e, 0x01, 0x05,},
+ [9] = {0x1e, 0x01, 0x0a, 0x0a, 0x01, 0x01, 0x03, 0x1e, 0x1e, 0x10,
+ 0x07, 0x01, 0x0a, 0x06, 0x03, 0x03, 0x0e, 0x01, 0x04, 0x28,},
+ [10] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [11] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [12] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [13] = {0x07, 0x02, 0x04, 0x02, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x07, 0x02, 0x04, 0x02, 0x05, 0x05,},
+ [14] = {0x02, 0x02, 0x0c, 0x0c, 0x0c, 0x0c, 0x01, 0x01, 0x02, 0x02,
+ 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x01, 0x01,},
+ [15] = {0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x0c, 0x0c,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02,
+ 0x0c, 0x01, 0x01,},
+ [16] = {0x28, 0x28, 0x03, 0x01, 0x01, 0x03, 0x14, 0x14, 0x0a, 0x0d,
+ 0x03, 0x05, 0x0e, 0x01, 0x01, 0x05, 0x06, 0x0d, 0x01,},
+ [17] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [18] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [19] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [20] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [21] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,
+ 0x01,},
+ [23] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x18, 0x01, 0x01,},
+ [24] = {0x12, 0x06, 0x12, 0x06,},
+ [25] = {0x01},
+};
+
static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
[1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
@@ -347,6 +418,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8186 = {
.flags_general = MTK_SMI_FLAG_SLEEP_CTL,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG |
+ MTK_SMI_FLAG_SLEEP_CTL | MTK_SMI_FLAG_CFG_PORT_SEC_CTL,
+ .ostd = mtk_smi_larb_mt8188_ostd,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
};
@@ -367,6 +445,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
{.compatible = "mediatek,mt8186-smi-larb", .data = &mtk_smi_larb_mt8186},
+ {.compatible = "mediatek,mt8188-smi-larb", .data = &mtk_smi_larb_mt8188},
{.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
{.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
@@ -511,9 +590,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
mtk_smi_larb_sleep_ctrl_disable(larb);
/* Configure the basic setting for this larb */
- larb_gen->config_port(dev);
-
- return 0;
+ return larb_gen->config_port(dev);
}
static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
@@ -597,6 +674,18 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8186 = {
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(4) | F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8188_vdo = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(5) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8188_vpp = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -633,6 +722,8 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
{.compatible = "mediatek,mt8186-smi-common", .data = &mtk_smi_common_mt8186},
+ {.compatible = "mediatek,mt8188-smi-common-vdo", .data = &mtk_smi_common_mt8188_vdo},
+ {.compatible = "mediatek,mt8188-smi-common-vpp", .data = &mtk_smi_common_mt8188_vpp},
{.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
{.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
{.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index dbdf87bc0b78..fcd20d85d385 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -134,6 +134,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
for_each_child_of_node(np_ddr, np_tim) {
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_do_get_timings(np_tim, &timings[i])) {
+ of_node_put(np_tim);
devm_kfree(dev, timings);
goto default_timings;
}
@@ -284,6 +285,7 @@ const struct lpddr3_timings
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
devm_kfree(dev, timings);
+ of_node_put(np_tim);
goto default_timings;
}
i++;
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index f84b98278745..d39ee7d06665 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -122,6 +122,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
}
of_platform_device_create(child, NULL, &adev->dev);
+ of_node_put(child);
return 0;
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index eaf9845633b4..a30e47b74327 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -583,7 +583,7 @@ out_init:
return ret;
}
-static int pm800_remove(struct i2c_client *client)
+static void pm800_remove(struct i2c_client *client)
{
struct pm80x_chip *chip = i2c_get_clientdata(client);
@@ -592,8 +592,6 @@ static int pm800_remove(struct i2c_client *client)
pm800_pages_exit(chip);
pm80x_deinit();
-
- return 0;
}
static struct i2c_driver pm800_driver = {
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index ada6c513302b..10d3637840c8 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -239,7 +239,7 @@ out_init:
return ret;
}
-static int pm805_remove(struct i2c_client *client)
+static void pm805_remove(struct i2c_client *client)
{
struct pm80x_chip *chip = i2c_get_clientdata(client);
@@ -247,8 +247,6 @@ static int pm805_remove(struct i2c_client *client)
device_irq_exit_805(chip);
pm80x_deinit();
-
- return 0;
}
static struct i2c_driver pm805_driver = {
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index b1e829ea909b..5dc86dd66202 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1201,7 +1201,7 @@ static int pm860x_probe(struct i2c_client *client)
return 0;
}
-static int pm860x_remove(struct i2c_client *client)
+static void pm860x_remove(struct i2c_client *client)
{
struct pm860x_chip *chip = i2c_get_clientdata(client);
@@ -1210,7 +1210,6 @@ static int pm860x_remove(struct i2c_client *client)
regmap_exit(chip->regmap_companion);
i2c_unregister_device(chip->companion);
}
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index abb58ab1a1a4..c3dd1fe8d8c9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -963,6 +963,27 @@ config MFD_MENF21BMC
This driver can also be built as a module. If so the module
will be called menf21bmc.
+config MFD_OCELOT
+ tristate "Microsemi Ocelot External Control Support"
+ depends on SPI_MASTER
+ select MFD_CORE
+ select REGMAP_SPI
+ help
+ Ocelot is a family of networking chips that support multiple ethernet
+ and fibre interfaces. In addition to networking, they contain several
+ other functions, including pinctrl, MDIO, and communication with
+ external chips. While some chips have an internal processor capable of
+ running an OS, others don't. All chips can be controlled externally
+ through different interfaces, including SPI, I2C, and PCIe.
+
+ Say yes here to add support for Ocelot chips (VSC7511, VSC7512,
+ VSC7513, VSC7514) controlled externally.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ocelot-soc.
+
+ If unsure, say N.
+
config EZX_PCAP
bool "Motorola EZXPCAP Support"
depends on SPI_MASTER
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 858cacf659d6..0004b7e86220 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -120,6 +120,9 @@ obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
+ocelot-soc-objs := ocelot-core.o ocelot-spi.o
+obj-$(CONFIG_MFD_OCELOT) += ocelot-soc.o
+
obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o
obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o
diff --git a/drivers/mfd/acer-ec-a500.c b/drivers/mfd/acer-ec-a500.c
index 80c2fdd14fc4..7fd8b9988075 100644
--- a/drivers/mfd/acer-ec-a500.c
+++ b/drivers/mfd/acer-ec-a500.c
@@ -169,7 +169,7 @@ static int a500_ec_probe(struct i2c_client *client)
return 0;
}
-static int a500_ec_remove(struct i2c_client *client)
+static void a500_ec_remove(struct i2c_client *client)
{
if (of_device_is_system_power_controller(client->dev.of_node)) {
if (pm_power_off == a500_ec_poweroff)
@@ -177,8 +177,6 @@ static int a500_ec_remove(struct i2c_client *client)
unregister_restart_handler(&a500_ec_restart_handler);
}
-
- return 0;
}
static const struct of_device_id a500_ec_match[] = {
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 6d83e6b9a692..bfc7cf56ff2c 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -84,13 +84,11 @@ static int arizona_i2c_probe(struct i2c_client *i2c,
return arizona_dev_init(arizona);
}
-static int arizona_i2c_remove(struct i2c_client *i2c)
+static void arizona_i2c_remove(struct i2c_client *i2c)
{
struct arizona *arizona = dev_get_drvdata(&i2c->dev);
arizona_dev_exit(arizona);
-
- return 0;
}
static const struct i2c_device_id arizona_i2c_id[] = {
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index 00ab48018d8d..8fd6727dc30a 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -50,13 +50,11 @@ static int axp20x_i2c_probe(struct i2c_client *i2c,
return axp20x_device_probe(axp20x);
}
-static int axp20x_i2c_remove(struct i2c_client *i2c)
+static void axp20x_i2c_remove(struct i2c_client *i2c)
{
struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
axp20x_device_remove(axp20x);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index a818fbb55988..3f8f6ad3a98c 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -532,12 +532,11 @@ static int da903x_probe(struct i2c_client *client,
return da903x_add_subdevs(chip, pdata);
}
-static int da903x_remove(struct i2c_client *client)
+static void da903x_remove(struct i2c_client *client)
{
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
- return 0;
}
static struct i2c_driver da903x_driver = {
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 8de93db35f3a..5a74696c8704 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -168,12 +168,11 @@ static int da9052_i2c_probe(struct i2c_client *client,
return da9052_device_init(da9052, id->driver_data);
}
-static int da9052_i2c_remove(struct i2c_client *client)
+static void da9052_i2c_remove(struct i2c_client *client)
{
struct da9052 *da9052 = i2c_get_clientdata(client);
da9052_device_exit(da9052);
- return 0;
}
static struct i2c_driver da9052_i2c_driver = {
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index bc60433b68db..276c7d1c509e 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -41,13 +41,11 @@ static int da9055_i2c_probe(struct i2c_client *i2c,
return da9055_device_init(da9055);
}
-static int da9055_i2c_remove(struct i2c_client *i2c)
+static void da9055_i2c_remove(struct i2c_client *i2c)
{
struct da9055 *da9055 = i2c_get_clientdata(i2c);
da9055_device_exit(da9055);
-
- return 0;
}
/*
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 2774b2cbaea6..0a80d82c6858 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -723,14 +723,12 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int da9062_i2c_remove(struct i2c_client *i2c)
+static void da9062_i2c_remove(struct i2c_client *i2c)
{
struct da9062 *chip = i2c_get_clientdata(i2c);
mfd_remove_devices(chip->dev);
regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
-
- return 0;
}
static const struct i2c_device_id da9062_i2c_id[] = {
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 58009c8cb870..6ae56e46d24e 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -471,15 +471,13 @@ regmap_irq_fail:
return ret;
}
-static int da9150_remove(struct i2c_client *client)
+static void da9150_remove(struct i2c_client *client)
{
struct da9150 *da9150 = i2c_get_clientdata(client);
regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
mfd_remove_devices(da9150->dev);
i2c_unregister_device(da9150->core_qif);
-
- return 0;
}
static void da9150_shutdown(struct i2c_client *client)
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 54fb6cbd2aa0..759c59690680 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -375,11 +375,10 @@ static void dm355evm_power_off(void)
dm355evm_command(MSP_COMMAND_POWEROFF);
}
-static int dm355evm_msp_remove(struct i2c_client *client)
+static void dm355evm_msp_remove(struct i2c_client *client)
{
pm_power_off = NULL;
msp430 = NULL;
- return 0;
}
static int
diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
index 1b73318d1f1f..3eff98e26bea 100644
--- a/drivers/mfd/ene-kb3930.c
+++ b/drivers/mfd/ene-kb3930.c
@@ -177,7 +177,7 @@ static int kb3930_probe(struct i2c_client *client)
return 0;
}
-static int kb3930_remove(struct i2c_client *client)
+static void kb3930_remove(struct i2c_client *client)
{
struct kb3930 *ddata = i2c_get_clientdata(client);
@@ -187,8 +187,6 @@ static int kb3930_remove(struct i2c_client *client)
unregister_restart_handler(&kb3930_restart_nb);
}
kb3930_power_off = NULL;
-
- return 0;
}
static const struct of_device_id kb3930_dt_ids[] = {
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
index d87876747b91..9d7d870c44a8 100644
--- a/drivers/mfd/gateworks-gsc.c
+++ b/drivers/mfd/gateworks-gsc.c
@@ -255,11 +255,9 @@ static int gsc_probe(struct i2c_client *client)
return 0;
}
-static int gsc_remove(struct i2c_client *client)
+static void gsc_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &attr_group);
-
- return 0;
}
static struct i2c_driver gsc_driver = {
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 5e8c94e008ed..b824e15f4d22 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -81,7 +81,7 @@ err_del_irq_chip:
return ret;
}
-static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
+static void intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
@@ -91,8 +91,6 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
mfd_remove_devices(&i2c->dev);
-
- return 0;
}
static void intel_soc_pmic_shutdown(struct i2c_client *i2c)
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index 575ab67e243d..1895fce25b06 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -1008,13 +1008,11 @@ static int iqs62x_probe(struct i2c_client *client)
return ret;
}
-static int iqs62x_remove(struct i2c_client *client)
+static void iqs62x_remove(struct i2c_client *client)
{
struct iqs62x_core *iqs62x = i2c_get_clientdata(client);
wait_for_completion(&iqs62x->fw_done);
-
- return 0;
}
static int __maybe_unused iqs62x_suspend(struct device *dev)
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 5690768f3e63..be32ffc5af38 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -607,15 +607,13 @@ static int lm3533_i2c_probe(struct i2c_client *i2c,
return lm3533_device_init(lm3533);
}
-static int lm3533_i2c_remove(struct i2c_client *i2c)
+static void lm3533_i2c_remove(struct i2c_client *i2c)
{
struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
dev_dbg(&i2c->dev, "%s\n", __func__);
lm3533_device_exit(lm3533);
-
- return 0;
}
static const struct i2c_device_id lm3533_i2c_ids[] = {
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index c223d2c6a363..e7c601bca9ef 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -199,13 +199,12 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
}
-static int lp8788_remove(struct i2c_client *cl)
+static void lp8788_remove(struct i2c_client *cl)
{
struct lp8788 *lp = i2c_get_clientdata(cl);
mfd_remove_devices(lp->dev);
lp8788_irq_exit(lp);
- return 0;
}
static const struct i2c_device_id lp8788_ids[] = {
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
index 7df5b9ba5855..915d2f95bad3 100644
--- a/drivers/mfd/madera-i2c.c
+++ b/drivers/mfd/madera-i2c.c
@@ -112,13 +112,11 @@ static int madera_i2c_probe(struct i2c_client *i2c,
return madera_dev_init(madera);
}
-static int madera_i2c_remove(struct i2c_client *i2c)
+static void madera_i2c_remove(struct i2c_client *i2c)
{
struct madera *madera = dev_get_drvdata(&i2c->dev);
madera_dev_exit(madera);
-
- return 0;
}
static const struct i2c_device_id madera_i2c_id[] = {
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6c487fa14e9c..d44ad6f33742 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -463,7 +463,7 @@ err_max77836:
return ret;
}
-static int max14577_i2c_remove(struct i2c_client *i2c)
+static void max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
@@ -471,8 +471,6 @@ static int max14577_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
max77836_remove(max14577);
-
- return 0;
}
static const struct i2c_device_id max14577_i2c_id[] = {
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 4e6244e17559..7088cb6f9174 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -294,7 +294,7 @@ err_i2c_haptic:
return ret;
}
-static int max77693_i2c_remove(struct i2c_client *i2c)
+static void max77693_i2c_remove(struct i2c_client *i2c)
{
struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
@@ -307,8 +307,6 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
i2c_unregister_device(max77693->i2c_muic);
i2c_unregister_device(max77693->i2c_haptic);
-
- return 0;
}
static const struct i2c_device_id max77693_i2c_id[] = {
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 41f566e6a096..c340080971ce 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -282,7 +282,7 @@ err_alloc_drvdata:
return ret;
}
-static int max8907_i2c_remove(struct i2c_client *i2c)
+static void max8907_i2c_remove(struct i2c_client *i2c)
{
struct max8907 *max8907 = i2c_get_clientdata(i2c);
@@ -293,8 +293,6 @@ static int max8907_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_chg);
i2c_unregister_device(max8907->i2c_rtc);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 114e905bef25..04101da42bd3 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -198,14 +198,13 @@ static int max8925_probe(struct i2c_client *client,
return 0;
}
-static int max8925_remove(struct i2c_client *client)
+static void max8925_remove(struct i2c_client *client)
{
struct max8925_chip *chip = i2c_get_clientdata(client);
max8925_device_exit(chip);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index fb937f66277e..eb94f3004cf3 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -85,10 +85,9 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
return mc13xxx_common_init(&client->dev);
}
-static int mc13xxx_i2c_remove(struct i2c_client *client)
+static void mc13xxx_i2c_remove(struct i2c_client *client)
{
mc13xxx_common_exit(&client->dev);
- return 0;
}
static struct i2c_driver mc13xxx_i2c_driver = {
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 07e0ca2e467c..eb08f69001f9 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1222,14 +1222,13 @@ fail:
return err;
}
-static int menelaus_remove(struct i2c_client *client)
+static void menelaus_remove(struct i2c_client *client)
{
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
flush_work(&menelaus->work);
the_menelaus = NULL;
- return 0;
}
static const struct i2c_device_id menelaus_id[] = {
diff --git a/drivers/mfd/ntxec.c b/drivers/mfd/ntxec.c
index b711e73eedcb..e16a7a82a929 100644
--- a/drivers/mfd/ntxec.c
+++ b/drivers/mfd/ntxec.c
@@ -239,15 +239,13 @@ static int ntxec_probe(struct i2c_client *client)
return res;
}
-static int ntxec_remove(struct i2c_client *client)
+static void ntxec_remove(struct i2c_client *client)
{
if (client == poweroff_restart_client) {
poweroff_restart_client = NULL;
pm_power_off = NULL;
unregister_restart_handler(&ntxec_restart_handler);
}
-
- return 0;
}
static const struct of_device_id of_ntxec_match_table[] = {
diff --git a/drivers/mfd/ocelot-core.c b/drivers/mfd/ocelot-core.c
new file mode 100644
index 000000000000..1816d52c65c5
--- /dev/null
+++ b/drivers/mfd/ocelot-core.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Core driver for the Ocelot chip family.
+ *
+ * The VSC7511, 7512, 7513, and 7514 can be controlled internally via an
+ * on-chip MIPS processor, or externally via SPI, I2C, PCIe. This core driver is
+ * intended to be the bus-agnostic glue between, for example, the SPI bus and
+ * the child devices.
+ *
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ocelot.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <soc/mscc/ocelot.h>
+
+#include "ocelot.h"
+
+#define REG_GCB_SOFT_RST 0x0008
+
+#define BIT_SOFT_CHIP_RST BIT(0)
+
+#define VSC7512_MIIM0_RES_START 0x7107009c
+#define VSC7512_MIIM1_RES_START 0x710700c0
+#define VSC7512_MIIM_RES_SIZE 0x024
+
+#define VSC7512_PHY_RES_START 0x710700f0
+#define VSC7512_PHY_RES_SIZE 0x004
+
+#define VSC7512_GPIO_RES_START 0x71070034
+#define VSC7512_GPIO_RES_SIZE 0x06c
+
+#define VSC7512_SIO_CTRL_RES_START 0x710700f8
+#define VSC7512_SIO_CTRL_RES_SIZE 0x100
+
+#define VSC7512_GCB_RST_SLEEP_US 100
+#define VSC7512_GCB_RST_TIMEOUT_US 100000
+
+static int ocelot_gcb_chip_rst_status(struct ocelot_ddata *ddata)
+{
+ int val, err;
+
+ err = regmap_read(ddata->gcb_regmap, REG_GCB_SOFT_RST, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+int ocelot_chip_reset(struct device *dev)
+{
+ struct ocelot_ddata *ddata = dev_get_drvdata(dev);
+ int ret, val;
+
+ /*
+ * Reset the entire chip here to put it into a completely known state.
+ * Other drivers may want to reset their own subsystems. The register
+ * self-clears, so one write is all that is needed and wait for it to
+ * clear.
+ */
+ ret = regmap_write(ddata->gcb_regmap, REG_GCB_SOFT_RST, BIT_SOFT_CHIP_RST);
+ if (ret)
+ return ret;
+
+ return readx_poll_timeout(ocelot_gcb_chip_rst_status, ddata, val, !val,
+ VSC7512_GCB_RST_SLEEP_US, VSC7512_GCB_RST_TIMEOUT_US);
+}
+EXPORT_SYMBOL_NS(ocelot_chip_reset, MFD_OCELOT);
+
+static const struct resource vsc7512_miim0_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_MIIM0_RES_START, VSC7512_MIIM_RES_SIZE, "gcb_miim0"),
+ DEFINE_RES_REG_NAMED(VSC7512_PHY_RES_START, VSC7512_PHY_RES_SIZE, "gcb_phy"),
+};
+
+static const struct resource vsc7512_miim1_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_MIIM1_RES_START, VSC7512_MIIM_RES_SIZE, "gcb_miim1"),
+};
+
+static const struct resource vsc7512_pinctrl_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_GPIO_RES_START, VSC7512_GPIO_RES_SIZE, "gcb_gpio"),
+};
+
+static const struct resource vsc7512_sgpio_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_SIO_CTRL_RES_START, VSC7512_SIO_CTRL_RES_SIZE, "gcb_sio"),
+};
+
+static const struct mfd_cell vsc7512_devs[] = {
+ {
+ .name = "ocelot-pinctrl",
+ .of_compatible = "mscc,ocelot-pinctrl",
+ .num_resources = ARRAY_SIZE(vsc7512_pinctrl_resources),
+ .resources = vsc7512_pinctrl_resources,
+ }, {
+ .name = "ocelot-sgpio",
+ .of_compatible = "mscc,ocelot-sgpio",
+ .num_resources = ARRAY_SIZE(vsc7512_sgpio_resources),
+ .resources = vsc7512_sgpio_resources,
+ }, {
+ .name = "ocelot-miim0",
+ .of_compatible = "mscc,ocelot-miim",
+ .of_reg = VSC7512_MIIM0_RES_START,
+ .use_of_reg = true,
+ .num_resources = ARRAY_SIZE(vsc7512_miim0_resources),
+ .resources = vsc7512_miim0_resources,
+ }, {
+ .name = "ocelot-miim1",
+ .of_compatible = "mscc,ocelot-miim",
+ .of_reg = VSC7512_MIIM1_RES_START,
+ .use_of_reg = true,
+ .num_resources = ARRAY_SIZE(vsc7512_miim1_resources),
+ .resources = vsc7512_miim1_resources,
+ },
+};
+
+static void ocelot_core_try_add_regmap(struct device *dev,
+ const struct resource *res)
+{
+ if (dev_get_regmap(dev, res->name))
+ return;
+
+ ocelot_spi_init_regmap(dev, res);
+}
+
+static void ocelot_core_try_add_regmaps(struct device *dev,
+ const struct mfd_cell *cell)
+{
+ int i;
+
+ for (i = 0; i < cell->num_resources; i++)
+ ocelot_core_try_add_regmap(dev, &cell->resources[i]);
+}
+
+int ocelot_core_init(struct device *dev)
+{
+ int i, ndevs;
+
+ ndevs = ARRAY_SIZE(vsc7512_devs);
+
+ for (i = 0; i < ndevs; i++)
+ ocelot_core_try_add_regmaps(dev, &vsc7512_devs[i]);
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, vsc7512_devs, ndevs, NULL, 0, NULL);
+}
+EXPORT_SYMBOL_NS(ocelot_core_init, MFD_OCELOT);
+
+MODULE_DESCRIPTION("Externally Controlled Ocelot Chip Driver");
+MODULE_AUTHOR("Colin Foster <colin.foster@in-advantage.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT_SPI);
diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
new file mode 100644
index 000000000000..0f097f4829d1
--- /dev/null
+++ b/drivers/mfd/ocelot-spi.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * SPI core driver for the Ocelot chip family.
+ *
+ * This driver will handle everything necessary to allow for communication over
+ * SPI to the VSC7511, VSC7512, VSC7513 and VSC7514 chips. The main functions
+ * are to prepare the chip's SPI interface for a specific bus speed, and a host
+ * processor's endianness. This will create and distribute regmaps for any
+ * children.
+ *
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "ocelot.h"
+
+#define REG_DEV_CPUORG_IF_CTRL 0x0000
+#define REG_DEV_CPUORG_IF_CFGSTAT 0x0004
+
+#define CFGSTAT_IF_NUM_VCORE (0 << 24)
+#define CFGSTAT_IF_NUM_VRAP (1 << 24)
+#define CFGSTAT_IF_NUM_SI (2 << 24)
+#define CFGSTAT_IF_NUM_MIIM (3 << 24)
+
+#define VSC7512_DEVCPU_ORG_RES_START 0x71000000
+#define VSC7512_DEVCPU_ORG_RES_SIZE 0x38
+
+#define VSC7512_CHIP_REGS_RES_START 0x71070000
+#define VSC7512_CHIP_REGS_RES_SIZE 0x14
+
+static const struct resource vsc7512_dev_cpuorg_resource =
+ DEFINE_RES_REG_NAMED(VSC7512_DEVCPU_ORG_RES_START,
+ VSC7512_DEVCPU_ORG_RES_SIZE,
+ "devcpu_org");
+
+static const struct resource vsc7512_gcb_resource =
+ DEFINE_RES_REG_NAMED(VSC7512_CHIP_REGS_RES_START,
+ VSC7512_CHIP_REGS_RES_SIZE,
+ "devcpu_gcb_chip_regs");
+
+static int ocelot_spi_initialize(struct device *dev)
+{
+ struct ocelot_ddata *ddata = dev_get_drvdata(dev);
+ u32 val, check;
+ int err;
+
+ val = OCELOT_SPI_BYTE_ORDER;
+
+ /*
+ * The SPI address must be big-endian, but we want the payload to match
+ * our CPU. These are two bits (0 and 1) but they're repeated such that
+ * the write from any configuration will be valid. The four
+ * configurations are:
+ *
+ * 0b00: little-endian, MSB first
+ * | 111111 | 22221111 | 33222222 |
+ * | 76543210 | 54321098 | 32109876 | 10987654 |
+ *
+ * 0b01: big-endian, MSB first
+ * | 33222222 | 22221111 | 111111 | |
+ * | 10987654 | 32109876 | 54321098 | 76543210 |
+ *
+ * 0b10: little-endian, LSB first
+ * | 111111 | 11112222 | 22222233 |
+ * | 01234567 | 89012345 | 67890123 | 45678901 |
+ *
+ * 0b11: big-endian, LSB first
+ * | 22222233 | 11112222 | 111111 | |
+ * | 45678901 | 67890123 | 89012345 | 01234567 |
+ */
+ err = regmap_write(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CTRL, val);
+ if (err)
+ return err;
+
+ /*
+ * Apply the number of padding bytes between a read request and the data
+ * payload. Some registers have access times of up to 1us, so if the
+ * first payload bit is shifted out too quickly, the read will fail.
+ */
+ val = ddata->spi_padding_bytes;
+ err = regmap_write(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CFGSTAT, val);
+ if (err)
+ return err;
+
+ /*
+ * After we write the interface configuration, read it back here. This
+ * will verify several different things. The first is that the number of
+ * padding bytes actually got written correctly. These are found in bits
+ * 0:3.
+ *
+ * The second is that bit 16 is cleared. Bit 16 is IF_CFGSTAT:IF_STAT,
+ * and will be set if the register access is too fast. This would be in
+ * the condition that the number of padding bytes is insufficient for
+ * the SPI bus frequency.
+ *
+ * The last check is for bits 31:24, which define the interface by which
+ * the registers are being accessed. Since we're accessing them via the
+ * serial interface, it must return IF_NUM_SI.
+ */
+ check = val | CFGSTAT_IF_NUM_SI;
+
+ err = regmap_read(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CFGSTAT, &val);
+ if (err)
+ return err;
+
+ if (check != val)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct regmap_config ocelot_spi_regmap_config = {
+ .reg_bits = 24,
+ .reg_stride = 4,
+ .reg_downshift = 2,
+ .val_bits = 32,
+
+ .write_flag_mask = 0x80,
+
+ .use_single_write = true,
+ .can_multi_write = false,
+
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+static int ocelot_spi_regmap_bus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct spi_transfer xfers[3] = {0};
+ struct device *dev = context;
+ struct ocelot_ddata *ddata;
+ struct spi_device *spi;
+ struct spi_message msg;
+ unsigned int index = 0;
+
+ ddata = dev_get_drvdata(dev);
+ spi = to_spi_device(dev);
+
+ xfers[index].tx_buf = reg;
+ xfers[index].len = reg_size;
+ index++;
+
+ if (ddata->spi_padding_bytes) {
+ xfers[index].len = ddata->spi_padding_bytes;
+ xfers[index].tx_buf = ddata->dummy_buf;
+ xfers[index].dummy_data = 1;
+ index++;
+ }
+
+ xfers[index].rx_buf = val;
+ xfers[index].len = val_size;
+ index++;
+
+ spi_message_init_with_transfers(&msg, xfers, index);
+
+ return spi_sync(spi, &msg);
+}
+
+static int ocelot_spi_regmap_bus_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static const struct regmap_bus ocelot_spi_regmap_bus = {
+ .write = ocelot_spi_regmap_bus_write,
+ .read = ocelot_spi_regmap_bus_read,
+};
+
+struct regmap *ocelot_spi_init_regmap(struct device *dev, const struct resource *res)
+{
+ struct regmap_config regmap_config;
+
+ memcpy(&regmap_config, &ocelot_spi_regmap_config, sizeof(regmap_config));
+
+ regmap_config.name = res->name;
+ regmap_config.max_register = resource_size(res) - 1;
+ regmap_config.reg_base = res->start;
+
+ return devm_regmap_init(dev, &ocelot_spi_regmap_bus, dev, &regmap_config);
+}
+EXPORT_SYMBOL_NS(ocelot_spi_init_regmap, MFD_OCELOT_SPI);
+
+static int ocelot_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ocelot_ddata *ddata;
+ struct regmap *r;
+ int err;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ddata);
+
+ if (spi->max_speed_hz <= 500000) {
+ ddata->spi_padding_bytes = 0;
+ } else {
+ /*
+ * Calculation taken from the manual for IF_CFGSTAT:IF_CFG.
+ * Register access time is 1us, so we need to configure and send
+ * out enough padding bytes between the read request and data
+ * transmission that lasts at least 1 microsecond.
+ */
+ ddata->spi_padding_bytes = 1 + (spi->max_speed_hz / HZ_PER_MHZ + 2) / 8;
+
+ ddata->dummy_buf = devm_kzalloc(dev, ddata->spi_padding_bytes, GFP_KERNEL);
+ if (!ddata->dummy_buf)
+ return -ENOMEM;
+ }
+
+ spi->bits_per_word = 8;
+
+ err = spi_setup(spi);
+ if (err)
+ return dev_err_probe(&spi->dev, err, "Error performing SPI setup\n");
+
+ r = ocelot_spi_init_regmap(dev, &vsc7512_dev_cpuorg_resource);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ddata->cpuorg_regmap = r;
+
+ r = ocelot_spi_init_regmap(dev, &vsc7512_gcb_resource);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ddata->gcb_regmap = r;
+
+ /*
+ * The chip must be set up for SPI before it gets initialized and reset.
+ * This must be done before calling init, and after a chip reset is
+ * performed.
+ */
+ err = ocelot_spi_initialize(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing SPI bus\n");
+
+ err = ocelot_chip_reset(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error resetting device\n");
+
+ /*
+ * A chip reset will clear the SPI configuration, so it needs to be done
+ * again before we can access any registers.
+ */
+ err = ocelot_spi_initialize(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing SPI bus after reset\n");
+
+ err = ocelot_core_init(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing Ocelot core\n");
+
+ return 0;
+}
+
+static const struct spi_device_id ocelot_spi_ids[] = {
+ { "vsc7512", 0 },
+ { }
+};
+
+static const struct of_device_id ocelot_spi_of_match[] = {
+ { .compatible = "mscc,vsc7512" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ocelot_spi_of_match);
+
+static struct spi_driver ocelot_spi_driver = {
+ .driver = {
+ .name = "ocelot-soc",
+ .of_match_table = ocelot_spi_of_match,
+ },
+ .id_table = ocelot_spi_ids,
+ .probe = ocelot_spi_probe,
+};
+module_spi_driver(ocelot_spi_driver);
+
+MODULE_DESCRIPTION("SPI Controlled Ocelot Chip Driver");
+MODULE_AUTHOR("Colin Foster <colin.foster@in-advantage.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/mfd/ocelot.h b/drivers/mfd/ocelot.h
new file mode 100644
index 000000000000..b8bc2f1486e2
--- /dev/null
+++ b/drivers/mfd/ocelot.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2021, 2022 Innovative Advantage Inc. */
+
+#ifndef _MFD_OCELOT_H
+#define _MFD_OCELOT_H
+
+#include <linux/kconfig.h>
+
+struct device;
+struct regmap;
+struct resource;
+
+/**
+ * struct ocelot_ddata - Private data for an external Ocelot chip
+ * @gcb_regmap: General Configuration Block regmap. Used for
+ * operations like chip reset.
+ * @cpuorg_regmap: CPU Device Origin Block regmap. Used for operations
+ * like SPI bus configuration.
+ * @spi_padding_bytes: Number of padding bytes that must be thrown out before
+ * read data gets returned. This is calculated during
+ * initialization based on bus speed.
+ * @dummy_buf: Zero-filled buffer of spi_padding_bytes size. The dummy
+ * bytes that will be sent out between the address and
+ * data of a SPI read operation.
+ */
+struct ocelot_ddata {
+ struct regmap *gcb_regmap;
+ struct regmap *cpuorg_regmap;
+ int spi_padding_bytes;
+ void *dummy_buf;
+};
+
+int ocelot_chip_reset(struct device *dev);
+int ocelot_core_init(struct device *dev);
+
+/* SPI-specific routines that won't be necessary for other interfaces */
+struct regmap *ocelot_spi_init_regmap(struct device *dev,
+ const struct resource *res);
+
+#define OCELOT_SPI_BYTE_ORDER_LE 0x00000000
+#define OCELOT_SPI_BYTE_ORDER_BE 0x81818181
+
+#ifdef __LITTLE_ENDIAN
+#define OCELOT_SPI_BYTE_ORDER OCELOT_SPI_BYTE_ORDER_LE
+#else
+#define OCELOT_SPI_BYTE_ORDER OCELOT_SPI_BYTE_ORDER_BE
+#endif
+
+#endif
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index f5b3fa973b13..8b7429bd2e3e 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -700,7 +700,7 @@ err_i2c:
return ret;
}
-static int palmas_i2c_remove(struct i2c_client *i2c)
+static void palmas_i2c_remove(struct i2c_client *i2c)
{
struct palmas *palmas = i2c_get_clientdata(i2c);
int i;
@@ -716,8 +716,6 @@ static int palmas_i2c_remove(struct i2c_client *i2c)
pm_power_off = NULL;
palmas_dev = NULL;
}
-
- return 0;
}
static const struct i2c_device_id palmas_i2c_id[] = {
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index e9c565cf0f54..4ccc2c3e7681 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -273,7 +273,7 @@ err2:
return ret;
}
-static int pcf50633_remove(struct i2c_client *client)
+static void pcf50633_remove(struct i2c_client *client)
{
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
@@ -289,8 +289,6 @@ static int pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
-
- return 0;
}
static const struct i2c_device_id pcf50633_id_table[] = {
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index c748fd29a220..3b5acf7ca39c 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -287,7 +287,7 @@ static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return 0;
}
-static int retu_remove(struct i2c_client *i2c)
+static void retu_remove(struct i2c_client *i2c)
{
struct retu_dev *rdev = i2c_get_clientdata(i2c);
@@ -297,8 +297,6 @@ static int retu_remove(struct i2c_client *i2c)
}
mfd_remove_devices(rdev->dev);
regmap_del_irq_chip(i2c->irq, rdev->irq_data);
-
- return 0;
}
static const struct i2c_device_id retu_id[] = {
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 4142b638e5fa..d5d641efa077 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -778,7 +778,7 @@ err_irq:
return ret;
}
-static int rk808_remove(struct i2c_client *client)
+static void rk808_remove(struct i2c_client *client)
{
struct rk808 *rk808 = i2c_get_clientdata(client);
@@ -792,8 +792,6 @@ static int rk808_remove(struct i2c_client *client)
pm_power_off = NULL;
unregister_restart_handler(&rk808_restart_handler);
-
- return 0;
}
static int __maybe_unused rk8xx_suspend(struct device *dev)
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 384acb459427..eb8005b4e58d 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -241,7 +241,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c)
return rn5t618_irq_init(priv);
}
-static int rn5t618_i2c_remove(struct i2c_client *i2c)
+static void rn5t618_i2c_remove(struct i2c_client *i2c)
{
if (i2c == rn5t618_pm_power_off) {
rn5t618_pm_power_off = NULL;
@@ -249,8 +249,6 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
}
unregister_restart_handler(&rn5t618_restart_handler);
-
- return 0;
}
static int __maybe_unused rn5t618_i2c_suspend(struct device *dev)
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index dc001c9791c1..f716ab8039a0 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -146,13 +146,11 @@ static int rsmu_i2c_probe(struct i2c_client *client,
return rsmu_core_init(rsmu);
}
-static int rsmu_i2c_remove(struct i2c_client *client)
+static void rsmu_i2c_remove(struct i2c_client *client)
{
struct rsmu_ddata *rsmu = i2c_get_clientdata(client);
rsmu_core_exit(rsmu);
-
- return 0;
}
static const struct i2c_device_id rsmu_i2c_id[] = {
diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c
index fb3bd788a3eb..c6d34dc2b520 100644
--- a/drivers/mfd/rt4831.c
+++ b/drivers/mfd/rt4831.c
@@ -87,7 +87,7 @@ static int rt4831_probe(struct i2c_client *client)
ARRAY_SIZE(rt4831_subdevs), NULL, 0, NULL);
}
-static int rt4831_remove(struct i2c_client *client)
+static void rt4831_remove(struct i2c_client *client)
{
struct regmap *regmap = dev_get_regmap(&client->dev, NULL);
int ret;
@@ -96,8 +96,6 @@ static int rt4831_remove(struct i2c_client *client)
ret = regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK);
if (ret)
dev_warn(&client->dev, "Failed to disable outputs (%pe)\n", ERR_PTR(ret));
-
- return 0;
}
static const struct of_device_id __maybe_unused rt4831_of_match[] = {
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index a2635c2d9d1a..8166949b725c 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -835,7 +835,7 @@ free_gpio:
return rval;
}
-static int si476x_core_remove(struct i2c_client *client)
+static void si476x_core_remove(struct i2c_client *client)
{
struct si476x_core *core = i2c_get_clientdata(client);
@@ -851,8 +851,6 @@ static int si476x_core_remove(struct i2c_client *client)
if (gpio_is_valid(core->gpio_reset))
gpio_free(core->gpio_reset);
-
- return 0;
}
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 122f96094410..5dd7d9688459 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -467,13 +467,11 @@ err_chip_exit:
return ret;
}
-static int stmfx_remove(struct i2c_client *client)
+static void stmfx_remove(struct i2c_client *client)
{
stmfx_irq_exit(client);
stmfx_chip_exit(client);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index d3eedf3d607e..4d55494a97c4 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -91,13 +91,11 @@ stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return stmpe_probe(&i2c_ci, partnum);
}
-static int stmpe_i2c_remove(struct i2c_client *i2c)
+static void stmpe_i2c_remove(struct i2c_client *i2c)
{
struct stmpe *stmpe = dev_get_drvdata(&i2c->dev);
stmpe_remove(stmpe);
-
- return 0;
}
static const struct i2c_device_id stmpe_i2c_id[] = {
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 13583cdb93b6..d5d0ec117acb 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -429,13 +429,11 @@ static int tc3589x_probe(struct i2c_client *i2c,
return 0;
}
-static int tc3589x_remove(struct i2c_client *client)
+static void tc3589x_remove(struct i2c_client *client)
{
struct tc3589x *tc3589x = i2c_get_clientdata(client);
mfd_remove_devices(tc3589x->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index c906324d293e..b360568ea675 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -179,7 +179,7 @@ static int tps6105x_probe(struct i2c_client *client,
return ret;
}
-static int tps6105x_remove(struct i2c_client *client)
+static void tps6105x_remove(struct i2c_client *client)
{
struct tps6105x *tps6105x = i2c_get_clientdata(client);
@@ -189,8 +189,6 @@ static int tps6105x_remove(struct i2c_client *client)
regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_MODE_MASK,
TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
-
- return 0;
}
static const struct i2c_device_id tps6105x_id[] = {
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 7e7dbee58ca9..c2afa2e69f42 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -501,7 +501,7 @@ static int tps65010_gpio_get(struct gpio_chip *chip, unsigned offset)
static struct tps65010 *the_tps;
-static int tps65010_remove(struct i2c_client *client)
+static void tps65010_remove(struct i2c_client *client)
{
struct tps65010 *tps = i2c_get_clientdata(client);
struct tps65010_board *board = dev_get_platdata(&client->dev);
@@ -517,7 +517,6 @@ static int tps65010_remove(struct i2c_client *client)
cancel_delayed_work_sync(&tps->work);
debugfs_remove(tps->file);
the_tps = NULL;
- return 0;
}
static int tps65010_probe(struct i2c_client *client,
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index cbae9777a24e..81a7360a87bb 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -111,14 +111,12 @@ static int tps65086_probe(struct i2c_client *client,
return ret;
}
-static int tps65086_remove(struct i2c_client *client)
+static void tps65086_remove(struct i2c_client *client)
{
struct tps65086 *tps = i2c_get_clientdata(client);
if (tps->irq > 0)
regmap_del_irq_chip(tps->irq, tps->irq_data);
-
- return 0;
}
static const struct i2c_device_id tps65086_id_table[] = {
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 8e8da204a02e..eebd60601b01 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -374,7 +374,7 @@ static int tps65217_probe(struct i2c_client *client)
return 0;
}
-static int tps65217_remove(struct i2c_client *client)
+static void tps65217_remove(struct i2c_client *client)
{
struct tps65217 *tps = i2c_get_clientdata(client);
unsigned int virq;
@@ -388,8 +388,6 @@ static int tps65217_remove(struct i2c_client *client)
irq_domain_remove(tps->irq_domain);
tps->irq_domain = NULL;
-
- return 0;
}
static const struct i2c_device_id tps65217_id_table[] = {
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index c9303d3d6602..fb340da64bbc 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -579,7 +579,7 @@ err_mfd_add:
return ret;
}
-static int tps6586x_i2c_remove(struct i2c_client *client)
+static void tps6586x_i2c_remove(struct i2c_client *client)
{
struct tps6586x *tps6586x = i2c_get_clientdata(client);
@@ -587,7 +587,6 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
mfd_remove_devices(tps6586x->dev);
if (client->irq)
free_irq(client->irq, tps6586x);
- return 0;
}
static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index afb7f7d97dc0..7e2b19efe867 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -43,13 +43,11 @@ static int tps65912_i2c_probe(struct i2c_client *client,
return tps65912_device_init(tps);
}
-static int tps65912_i2c_remove(struct i2c_client *client)
+static void tps65912_i2c_remove(struct i2c_client *client)
{
struct tps65912 *tps = i2c_get_clientdata(client);
tps65912_device_exit(tps);
-
- return 0;
}
static const struct i2c_device_id tps65912_i2c_id_table[] = {
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 2cb9326f3e61..2679c41232e6 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -727,7 +727,7 @@ static void clocks_init(struct device *dev)
/*----------------------------------------------------------------------*/
-static int twl_remove(struct i2c_client *client)
+static void twl_remove(struct i2c_client *client)
{
unsigned i, num_slaves;
@@ -745,7 +745,6 @@ static int twl_remove(struct i2c_client *client)
twl->client = NULL;
}
twl_priv->ready = false;
- return 0;
}
static struct of_dev_auxdata twl_auxdata_lookup[] = {
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index b9c6d94b4002..f429b8f00db6 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -808,7 +808,7 @@ gpio_err:
return ret;
}
-static int twl6040_remove(struct i2c_client *client)
+static void twl6040_remove(struct i2c_client *client)
{
struct twl6040 *twl6040 = i2c_get_clientdata(client);
@@ -820,8 +820,6 @@ static int twl6040_remove(struct i2c_client *client)
mfd_remove_devices(&client->dev);
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
-
- return 0;
}
static const struct i2c_device_id twl6040_i2c_id[] = {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 7b1d270722ba..7e88f5b0abe6 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -657,13 +657,11 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
return wm8994_device_init(wm8994, i2c->irq);
}
-static int wm8994_i2c_remove(struct i2c_client *i2c)
+static void wm8994_i2c_remove(struct i2c_client *i2c)
{
struct wm8994 *wm8994 = i2c_get_clientdata(i2c);
wm8994_device_exit(wm8994);
-
- return 0;
}
static const struct i2c_device_id wm8994_i2c_id[] = {
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 0ee0c6d808c3..28ffb4377d98 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -67,10 +67,9 @@ static int ad_dpot_i2c_probe(struct i2c_client *client,
return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
-static int ad_dpot_i2c_remove(struct i2c_client *client)
+static void ad_dpot_i2c_remove(struct i2c_client *client)
{
ad_dpot_remove(&client->dev);
- return 0;
}
static const struct i2c_device_id ad_dpot_id[] = {
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 6fff44b952bd..a32431f4b370 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -242,7 +242,7 @@ als_error1:
return res;
}
-static int apds9802als_remove(struct i2c_client *client)
+static void apds9802als_remove(struct i2c_client *client)
{
struct als_data *data = i2c_get_clientdata(client);
@@ -256,7 +256,6 @@ static int apds9802als_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
kfree(data);
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 45f5b997a0e1..e2100cc42ce8 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1185,7 +1185,7 @@ fail1:
return err;
}
-static int apds990x_remove(struct i2c_client *client)
+static void apds990x_remove(struct i2c_client *client)
{
struct apds990x_chip *chip = i2c_get_clientdata(client);
@@ -1205,7 +1205,6 @@ static int apds990x_remove(struct i2c_client *client)
regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
kfree(chip);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 0581bb9cef2e..d0dfa674414c 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1280,7 +1280,7 @@ fail0:
return err;
}
-static int bh1770_remove(struct i2c_client *client)
+static void bh1770_remove(struct i2c_client *client)
{
struct bh1770_chip *chip = i2c_get_clientdata(client);
@@ -1299,8 +1299,6 @@ static int bh1770_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 42f316c2d719..0698ddc5f4d5 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -228,11 +228,10 @@ static int ds1682_probe(struct i2c_client *client,
return rc;
}
-static int ds1682_remove(struct i2c_client *client)
+static void ds1682_remove(struct i2c_client *client)
{
sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
sysfs_remove_group(&client->dev.kobj, &ds1682_group);
- return 0;
}
static const struct i2c_device_id ds1682_id[] = {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 633e1cf08d6e..938c4f41b98c 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -791,7 +791,7 @@ static int at24_probe(struct i2c_client *client)
return 0;
}
-static int at24_remove(struct i2c_client *client)
+static void at24_remove(struct i2c_client *client)
{
struct at24_data *at24 = i2c_get_clientdata(client);
@@ -801,8 +801,6 @@ static int at24_remove(struct i2c_client *client)
regulator_disable(at24->vcc_reg);
pm_runtime_set_suspended(&client->dev);
}
-
- return 0;
}
static int __maybe_unused at24_suspend(struct device *dev)
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 9fbfe784d710..c8c6deb7ed89 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -219,14 +219,12 @@ static int ee1004_probe(struct i2c_client *client)
return err;
}
-static int ee1004_remove(struct i2c_client *client)
+static void ee1004_remove(struct i2c_client *client)
{
/* Remove page select clients if this is the last device */
mutex_lock(&ee1004_bus_lock);
ee1004_cleanup(EE1004_NUM_PAGES);
mutex_unlock(&ee1004_bus_lock);
-
- return 0;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 34fa385dfd4b..4a9445fea93d 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -183,11 +183,9 @@ static int eeprom_probe(struct i2c_client *client,
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
-static int eeprom_remove(struct i2c_client *client)
+static void eeprom_remove(struct i2c_client *client)
{
sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
-
- return 0;
}
static const struct i2c_device_id eeprom_id[] = {
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 9aec3338e37d..ada2a3af36d7 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1405,7 +1405,7 @@ err_free_pdev:
/*
* idt_remove() - IDT 89HPESx driver remove() callback method
*/
-static int idt_remove(struct i2c_client *client)
+static void idt_remove(struct i2c_client *client)
{
struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
@@ -1417,8 +1417,6 @@ static int idt_remove(struct i2c_client *client)
/* Discard driver data structure */
idt_free_pdev(pdev);
-
- return 0;
}
/*
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 9da81f6d4a1c..6bd4f4339af4 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -173,7 +173,7 @@ exit_kfree:
return err;
}
-static int max6875_remove(struct i2c_client *client)
+static void max6875_remove(struct i2c_client *client)
{
struct max6875_data *data = i2c_get_clientdata(client);
@@ -181,8 +181,6 @@ static int max6875_remove(struct i2c_client *client)
sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr);
kfree(data);
-
- return 0;
}
static const struct i2c_device_id max6875_id[] = {
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 93ebd174d848..5d9e3483b89d 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -25,7 +25,7 @@
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
-#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
+#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
@@ -1943,7 +1943,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
- sess = &cctx->session[cctx->sesscount];
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
+ dev_err(&pdev->dev, "too many sessions\n");
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ return -ENOSPC;
+ }
+ sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
@@ -1956,13 +1961,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
- dup_sess = &cctx->session[cctx->sesscount];
+ dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
- cctx->sesscount++;
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 572a2ff10f00..42b9adef28a3 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -116,10 +116,9 @@ static int hmc6352_probe(struct i2c_client *client,
return 0;
}
-static int hmc6352_remove(struct i2c_client *client)
+static void hmc6352_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
- return 0;
}
static const struct i2c_device_id hmc6352_id[] = {
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index c0fe3295c330..cbaf6d35e854 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -1039,6 +1039,7 @@ static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
static ssize_t ibmvmc_write(struct file *file, const char *buffer,
size_t count, loff_t *ppos)
{
+ struct inode *inode;
struct ibmvmc_buffer *vmc_buffer;
struct ibmvmc_file_session *session;
struct crq_server_adapter *adapter;
@@ -1122,8 +1123,9 @@ static ssize_t ibmvmc_write(struct file *file, const char *buffer,
if (p == buffer)
goto out;
- file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
- mark_inode_dirty(file->f_path.dentry->d_inode);
+ inode = file_inode(file);
+ inode->i_mtime = current_time(inode);
+ mark_inode_dirty(inode);
dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
(unsigned long)file, (unsigned long)count);
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 0f9ea75b0b18..2c4bb6d6e1a0 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -93,7 +93,7 @@ static int ics932s401_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int ics932s401_remove(struct i2c_client *client);
+static void ics932s401_remove(struct i2c_client *client);
static const struct i2c_device_id ics932s401_id[] = {
{ "ics932s401", 0 },
@@ -460,13 +460,12 @@ exit:
return err;
}
-static int ics932s401_remove(struct i2c_client *client)
+static void ics932s401_remove(struct i2c_client *client)
{
struct ics932s401_data *data = i2c_get_clientdata(client);
sysfs_remove_group(&client->dev.kobj, &data->attrs);
kfree(data);
- return 0;
}
module_i2c_driver(ics932s401_driver);
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 703d20e83ebd..8ab61be79c76 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -410,12 +410,11 @@ exit_kfree:
return err;
}
-static int isl29003_remove(struct i2c_client *client)
+static void isl29003_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group);
isl29003_set_power_state(client, 0);
kfree(i2c_get_clientdata(client));
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index fc5ff2805b94..c6f2a94f501a 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -171,11 +171,10 @@ static int isl29020_probe(struct i2c_client *client,
return res;
}
-static int isl29020_remove(struct i2c_client *client)
+static void isl29020_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
- return 0;
}
static const struct i2c_device_id isl29020_id[] = {
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 52555d2e824b..d7daa01fe7ca 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -177,7 +177,7 @@ fail:
return ret;
}
-static int lis3lv02d_i2c_remove(struct i2c_client *client)
+static void lis3lv02d_i2c_remove(struct i2c_client *client)
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
@@ -190,7 +190,6 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client)
regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
lis3_dev.regulators);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index 71483cb1e422..5245cf6013c9 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -20,6 +20,13 @@ static noinline int lkdtm_increment_int(int *counter)
return *counter;
}
+
+/* Don't allow the compiler to inline the calls. */
+static noinline void lkdtm_indirect_call(void (*func)(int *))
+{
+ func(&called_count);
+}
+
/*
* This tries to call an indirect function with a mismatched prototype.
*/
@@ -29,15 +36,11 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
* Matches lkdtm_increment_void()'s prototype, but not
* lkdtm_increment_int()'s prototype.
*/
- void (*func)(int *);
-
pr_info("Calling matched prototype ...\n");
- func = lkdtm_increment_void;
- func(&called_count);
+ lkdtm_indirect_call(lkdtm_increment_void);
pr_info("Calling mismatched prototype ...\n");
- func = (void *)lkdtm_increment_int;
- func(&called_count);
+ lkdtm_indirect_call((void *)lkdtm_increment_int);
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index 080293fa3c52..015927665678 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -10,28 +10,31 @@
static volatile int fortify_scratch_space;
-static void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFY_STR_OBJECT(void)
{
struct target {
char a[10];
- } target[2] = {};
+ int foo;
+ } target[3] = {};
/*
* Using volatile prevents the compiler from determining the value of
* 'size' at compile time. Without that, we would get a compile error
* rather than a runtime error.
*/
- volatile int size = 11;
+ volatile int size = 20;
+
+ pr_info("trying to strcmp() past the end of a struct\n");
- pr_info("trying to read past the end of a struct\n");
+ strncpy(target[0].a, target[1].a, size);
/* Store result to global to prevent the code from being eliminated */
- fortify_scratch_space = memcmp(&target[0], &target[1], size);
+ fortify_scratch_space = target[0].a[3];
- pr_err("FAIL: fortify did not block an object overread!\n");
+ pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
-static void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFY_STR_MEMBER(void)
{
struct target {
char a[10];
@@ -44,7 +47,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
- pr_info("trying to strncpy past the end of a member of a struct\n");
+ pr_info("trying to strncpy() past the end of a struct member...\n");
/*
* strncpy(target.a, src, 20); will hit a compile error because the
@@ -56,7 +59,72 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
- pr_err("FAIL: fortify did not block an sub-object overrun!\n");
+ pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+
+ kfree(src);
+}
+
+static void lkdtm_FORTIFY_MEM_OBJECT(void)
+{
+ int before[10];
+ struct target {
+ char a[10];
+ int foo;
+ } target = {};
+ int after[10];
+ /*
+ * Using volatile prevents the compiler from determining the value of
+ * 'size' at compile time. Without that, we would get a compile error
+ * rather than a runtime error.
+ */
+ volatile int size = 20;
+
+ memset(before, 0, sizeof(before));
+ memset(after, 0, sizeof(after));
+ fortify_scratch_space = before[5];
+ fortify_scratch_space = after[5];
+
+ pr_info("trying to memcpy() past the end of a struct\n");
+
+ pr_info("0: %zu\n", __builtin_object_size(&target, 0));
+ pr_info("1: %zu\n", __builtin_object_size(&target, 1));
+ pr_info("s: %d\n", size);
+ memcpy(&target, &before, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+}
+
+static void lkdtm_FORTIFY_MEM_MEMBER(void)
+{
+ struct target {
+ char a[10];
+ char b[10];
+ } target;
+ volatile int size = 20;
+ char *src;
+
+ src = kmalloc(size, GFP_KERNEL);
+ strscpy(src, "over ten bytes", size);
+ size = strlen(src) + 1;
+
+ pr_info("trying to memcpy() past the end of a struct member...\n");
+
+ /*
+ * strncpy(target.a, src, 20); will hit a compile error because the
+ * compiler knows at build time that target.a < 20 bytes. Use a
+ * volatile to force a runtime error.
+ */
+ memcpy(target.a, src, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
@@ -67,7 +135,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
-static void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFY_STRSCPY(void)
{
char *src;
char dst[5];
@@ -136,9 +204,11 @@ static void lkdtm_FORTIFIED_STRSCPY(void)
}
static struct crashtype crashtypes[] = {
- CRASHTYPE(FORTIFIED_OBJECT),
- CRASHTYPE(FORTIFIED_SUBOBJECT),
- CRASHTYPE(FORTIFIED_STRSCPY),
+ CRASHTYPE(FORTIFY_STR_OBJECT),
+ CRASHTYPE(FORTIFY_STR_MEMBER),
+ CRASHTYPE(FORTIFY_MEM_OBJECT),
+ CRASHTYPE(FORTIFY_MEM_MEMBER),
+ CRASHTYPE(FORTIFY_STRSCPY),
};
struct crashtype_category fortify_crashtypes = {
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 6215ec995cd3..67db57249a34 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -330,7 +330,7 @@ static void lkdtm_USERCOPY_KERNEL(void)
pr_info("attempting bad copy_to_user from kernel text: %px\n",
vm_mmap);
- if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap),
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 59506ba6fc48..79305e4acce2 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -15,6 +15,7 @@
#include "mei_dev.h"
#include "client.h"
+#include "mkhi.h"
#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
@@ -89,20 +90,6 @@ struct mei_os_ver {
u8 reserved2;
} __packed;
-#define MKHI_FEATURE_PTT 0x10
-
-struct mkhi_rule_id {
- __le16 rule_type;
- u8 feature_id;
- u8 reserved;
-} __packed;
-
-struct mkhi_fwcaps {
- struct mkhi_rule_id id;
- u8 len;
- u8 data[];
-} __packed;
-
struct mkhi_fw_ver_block {
u16 minor;
u8 major;
@@ -115,22 +102,6 @@ struct mkhi_fw_ver {
struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS];
} __packed;
-#define MKHI_FWCAPS_GROUP_ID 0x3
-#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
-#define MKHI_GEN_GROUP_ID 0xFF
-#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
-struct mkhi_msg_hdr {
- u8 group_id;
- u8 command;
- u8 reserved;
- u8 result;
-} __packed;
-
-struct mkhi_msg {
- struct mkhi_msg_hdr hdr;
- u8 data[];
-} __packed;
-
#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fwcaps) + \
sizeof(struct mei_os_ver))
@@ -164,7 +135,6 @@ static int mei_osver(struct mei_cl_device *cldev)
sizeof(struct mkhi_fw_ver))
#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fw_ver_block) * (__num))
-#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
static int mei_fwver(struct mei_cl_device *cldev)
{
char buf[MKHI_FWVER_BUF_LEN];
@@ -187,7 +157,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
ret = 0;
bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0,
- MKHI_RCV_TIMEOUT);
+ cldev->bus->timeouts.mkhi_recv);
if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
/*
* Should be at least one version block,
@@ -218,6 +188,19 @@ static int mei_fwver(struct mei_cl_device *cldev)
return ret;
}
+static int mei_gfx_memory_ready(struct mei_cl_device *cldev)
+{
+ struct mkhi_gfx_mem_ready req = {0};
+ unsigned int mode = MEI_CL_IO_TX_INTERNAL;
+
+ req.hdr.group_id = MKHI_GROUP_ID_GFX;
+ req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ;
+ req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED;
+
+ dev_dbg(&cldev->dev, "Sending memory ready command\n");
+ return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode);
+}
+
static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
@@ -264,6 +247,39 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
dev_err(&cldev->dev, "FW version command failed %d\n", ret);
mei_cldev_disable(cldev);
}
+
+static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ /* No need to enable the client if nothing is needed from it */
+ if (!cldev->bus->fw_f_fw_ver_supported &&
+ cldev->bus->pxp_mode != MEI_DEV_PXP_INIT)
+ return;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret)
+ return;
+
+ if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) {
+ ret = mei_gfx_memory_ready(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "memory ready command failed %d\n", ret);
+ else
+ dev_dbg(&cldev->dev, "memory ready command sent\n");
+ /* we go to reset after that */
+ cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP;
+ goto out;
+ }
+
+ ret = mei_fwver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "FW version command failed %d\n",
+ ret);
+out:
+ mei_cldev_disable(cldev);
+}
+
/**
* mei_wd - wd client on the bus, change protocol version
* as the API has changed.
@@ -503,6 +519,26 @@ static void vt_support(struct mei_cl_device *cldev)
cldev->do_match = 1;
}
+/**
+ * pxp_is_ready - enable bus client if pxp is ready
+ *
+ * @cldev: me clients device
+ */
+static void pxp_is_ready(struct mei_cl_device *cldev)
+{
+ struct mei_device *bus = cldev->bus;
+
+ switch (bus->pxp_mode) {
+ case MEI_DEV_PXP_READY:
+ case MEI_DEV_PXP_DEFAULT:
+ cldev->do_match = 1;
+ break;
+ default:
+ cldev->do_match = 0;
+ break;
+ }
+}
+
#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
static struct mei_fixup {
@@ -516,10 +552,10 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
- MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver),
+ MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver),
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
MEI_FIXUP(MEI_UUID_ANY, vt_support),
- MEI_FIXUP(MEI_UUID_PAVP, whitelist),
+ MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready),
};
/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 31264ab2eb13..0b2fbe1335a7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -870,7 +870,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
@@ -945,7 +945,7 @@ static int __mei_cl_disconnect(struct mei_cl *cl)
wait_event_timeout(cl->wait,
cl->state == MEI_FILE_DISCONNECT_REPLY ||
cl->state == MEI_FILE_DISCONNECTED,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
rets = cl->status;
@@ -1065,7 +1065,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -1164,7 +1164,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
@@ -1562,7 +1562,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
cl->notify_en == request ||
cl->status ||
!mei_cl_is_connected(cl),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->notify_en != request && !cl->status)
@@ -2336,7 +2336,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!cl->dma_mapped && !cl->status)
@@ -2415,7 +2415,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->dma_mapped && !cl->status)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 1ce61e9e24fc..3b098d4c8e3d 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2016, Intel Corporation. All rights reserved
+ * Copyright (c) 2012-2022, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -86,6 +86,20 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active);
+static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state)
+{
+#define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state
+ switch (state) {
+ MEI_PXP_MODE(DEFAULT);
+ MEI_PXP_MODE(INIT);
+ MEI_PXP_MODE(SETUP);
+ MEI_PXP_MODE(READY);
+ default:
+ return "unknown";
+ }
+#undef MEI_PXP_MODE
+}
+
static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
{
struct mei_device *dev = m->private;
@@ -112,6 +126,9 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
seq_printf(m, "pg: %s, %s\n",
mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
mei_pg_state_str(mei_pg_state(dev)));
+
+ seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode));
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index c8145e9b62b6..75765e4df4ed 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -13,6 +13,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
#include "mei_dev.h"
#include "hw-me.h"
@@ -31,6 +32,17 @@ static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val)
return 0;
}
+static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem)
+{
+ u32 low = lower_32_bits(mem->start);
+ u32 hi = upper_32_bits(mem->start);
+ u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID;
+
+ iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG);
+ iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG);
+ iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG);
+}
+
static int mei_gsc_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *aux_dev_id)
{
@@ -47,7 +59,7 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
device = &aux_dev->dev;
- dev = mei_me_dev_init(device, cfg);
+ dev = mei_me_dev_init(device, cfg, adev->slow_firmware);
if (!dev) {
ret = -ENOMEM;
goto err;
@@ -66,13 +78,33 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
dev_set_drvdata(device, dev);
- ret = devm_request_threaded_irq(device, hw->irq,
- mei_me_irq_quick_handler,
- mei_me_irq_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- if (ret) {
- dev_err(device, "irq register failed %d\n", ret);
- goto err;
+ if (adev->ext_op_mem.start) {
+ mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
+ dev->pxp_mode = MEI_DEV_PXP_INIT;
+ }
+
+ /* use polling */
+ if (mei_me_hw_use_polling(hw)) {
+ mei_disable_interrupts(dev);
+ mei_clear_interrupts(dev);
+ init_waitqueue_head(&hw->wait_active);
+ hw->is_active = true; /* start in active mode for initialization */
+ hw->polling_thread = kthread_run(mei_me_polling_thread, dev,
+ "kmegscirqd/%s", dev_name(device));
+ if (IS_ERR(hw->polling_thread)) {
+ ret = PTR_ERR(hw->polling_thread);
+ dev_err(device, "unable to create kernel thread: %d\n", ret);
+ goto err;
+ }
+ } else {
+ ret = devm_request_threaded_irq(device, hw->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(device, "irq register failed %d\n", ret);
+ goto err;
+ }
}
pm_runtime_get_noresume(device);
@@ -98,7 +130,8 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
register_err:
mei_stop(dev);
- devm_free_irq(device, hw->irq, dev);
+ if (!mei_me_hw_use_polling(hw))
+ devm_free_irq(device, hw->irq, dev);
err:
dev_err(device, "probe failed: %d\n", ret);
@@ -119,12 +152,17 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
mei_stop(dev);
+ hw = to_me_hw(dev);
+ if (mei_me_hw_use_polling(hw))
+ kthread_stop(hw->polling_thread);
+
mei_deregister(dev);
pm_runtime_disable(&aux_dev->dev);
mei_disable_interrupts(dev);
- devm_free_irq(&aux_dev->dev, hw->irq, dev);
+ if (!mei_me_hw_use_polling(hw))
+ devm_free_irq(&aux_dev->dev, hw->irq, dev);
}
static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
@@ -144,11 +182,22 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
static int __maybe_unused mei_gsc_pm_resume(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
+ struct auxiliary_device *aux_dev;
+ struct mei_aux_device *adev;
int err;
+ struct mei_me_hw *hw;
if (!dev)
return -ENODEV;
+ hw = to_me_hw(dev);
+ aux_dev = to_auxiliary_dev(device);
+ adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+ if (adev->ext_op_mem.start) {
+ mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
+ dev->pxp_mode = MEI_DEV_PXP_INIT;
+ }
+
err = mei_restart(dev);
if (err)
return err;
@@ -185,6 +234,9 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
if (mei_write_is_idle(dev)) {
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_ON;
+
+ if (mei_me_hw_use_polling(hw))
+ hw->is_active = false;
ret = 0;
} else {
ret = -EAGAIN;
@@ -209,6 +261,11 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_OFF;
+ if (mei_me_hw_use_polling(hw)) {
+ hw->is_active = true;
+ wake_up(&hw->wait_active);
+ }
+
mutex_unlock(&dev->device_lock);
irq_ret = mei_me_irq_thread_handler(1, dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index cf2b8261da14..de712cbf5d07 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
@@ -232,7 +232,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
ret = wait_event_timeout(dev->wait_hbm_start,
dev->hbm_state != MEI_HBM_STARTING,
- mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
+ dev->timeouts.hbm);
mutex_lock(&dev->device_lock);
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
@@ -275,7 +275,7 @@ int mei_hbm_start_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_STARTING;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -316,7 +316,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_DR_SETUP;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -351,7 +351,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_CAP_SETUP;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -385,7 +385,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -751,7 +751,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
return ret;
}
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 15e8e2b322b1..99966cd3e7d8 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#ifndef _MEI_HW_MEI_REGS_H_
@@ -127,6 +127,8 @@
# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
+# define GSC_CFG_HFS_5_BOOT_TYPE_MSK 0x00000003
+# define GSC_CFG_HFS_5_BOOT_TYPE_PXP 3
#define PCI_CFG_HFS_6 0x6C
/* MEI registers */
@@ -143,6 +145,11 @@
/* H_D0I3C - D0I3 Control */
#define H_D0I3C 0x800
+#define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100
+#define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104
+#define H_GSC_EXT_OP_MEM_LIMIT_REG 0x108
+#define GSC_EXT_OP_MEM_VALID BIT(31)
+
/* register bits of H_CSR (Host Control Status register) */
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
#define H_CBD 0xFF000000
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3a95fe7d4e33..9e2f781c6ed5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
+#include <linux/delay.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev)
*/
static void mei_me_intr_enable(struct mei_device *dev)
{
- u32 hcsr = mei_hcsr_read(dev);
+ u32 hcsr;
+
+ if (mei_me_hw_use_polling(to_me_hw(dev)))
+ return;
- hcsr |= H_CSR_IE_MASK;
+ hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
@@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ if (mei_me_hw_use_polling(hw))
+ return;
+
synchronize_irq(hw->irq);
}
@@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
+ if (!mei_me_hw_use_polling(to_me_hw(dev)))
+ hcsr |= H_CSR_IE_MASK;
+
+ hcsr |= H_IG | H_RDY;
mei_hcsr_set(dev, hcsr);
}
@@ -424,6 +434,29 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev)
}
/**
+ * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
+ *
+ * @dev: the device structure
+ */
+static void mei_gsc_pxp_check(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 fwsts5 = 0;
+
+ if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
+ return;
+
+ hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+ if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
+ dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
+ dev->pxp_mode = MEI_DEV_PXP_READY;
+ } else {
+ dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
+ }
+}
+
+/**
* mei_me_hw_ready_wait - wait until the me(hw) has turned ready
* or timeout is reached
*
@@ -435,13 +468,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
- mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
+ dev->timeouts.hw_ready);
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
dev_err(dev->dev, "wait hw ready failed\n");
return -ETIME;
}
+ mei_gsc_pxp_check(dev);
+
mei_me_hw_reset_release(dev);
dev->recvd_hw_ready = false;
return 0;
@@ -697,7 +732,6 @@ static void mei_me_pg_unset(struct mei_device *dev)
static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
dev->pg_event = MEI_PG_EVENT_WAIT;
@@ -708,7 +742,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
@@ -734,7 +769,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
@@ -746,7 +780,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
reply:
@@ -762,7 +797,8 @@ reply:
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
@@ -877,8 +913,6 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev)
static int mei_me_d0i3_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
- unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
u32 reg;
@@ -900,7 +934,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
@@ -920,7 +955,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@ -980,7 +1016,6 @@ on:
static int mei_me_d0i3_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
int ret;
u32 reg;
@@ -1003,7 +1038,8 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@ -1176,7 +1212,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
- if (!intr_enable)
+ if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
hcsr &= ~H_CSR_IE_MASK;
dev->recvd_hw_ready = false;
@@ -1259,7 +1295,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
- dev_warn(dev->dev, "FW not ready: resetting.\n");
+ dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
+ dev->dev_state, dev->pxp_mode);
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN)
mei_cl_all_disconnect(dev);
@@ -1331,6 +1368,66 @@ end:
}
EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
+#define MEI_POLLING_TIMEOUT_ACTIVE 100
+#define MEI_POLLING_TIMEOUT_IDLE 500
+
+/**
+ * mei_me_polling_thread - interrupt register polling thread
+ *
+ * The thread monitors the interrupt source register and calls
+ * mei_me_irq_thread_handler() to handle the firmware
+ * input.
+ *
+ * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
+ * in case there was an event, in idle case the polling
+ * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
+ * up to MEI_POLLING_TIMEOUT_IDLE.
+ *
+ * @_dev: mei device
+ *
+ * Return: always 0
+ */
+int mei_me_polling_thread(void *_dev)
+{
+ struct mei_device *dev = _dev;
+ irqreturn_t irq_ret;
+ long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+
+ dev_dbg(dev->dev, "kernel thread is running\n");
+ while (!kthread_should_stop()) {
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr;
+
+ wait_event_timeout(hw->wait_active,
+ hw->is_active || kthread_should_stop(),
+ msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
+
+ if (kthread_should_stop())
+ break;
+
+ hcsr = mei_hcsr_read(dev);
+ if (me_intr_src(hcsr)) {
+ polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+ irq_ret = mei_me_irq_thread_handler(1, dev);
+ if (irq_ret != IRQ_HANDLED)
+ dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+ } else {
+ /*
+ * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
+ * up to MEI_POLLING_TIMEOUT_IDLE
+ */
+ polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
+ MEI_POLLING_TIMEOUT_ACTIVE,
+ MEI_POLLING_TIMEOUT_IDLE);
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_me_polling_thread);
+
static const struct mei_hw_ops mei_me_hw_ops = {
.trc_status = mei_me_trc_status,
@@ -1636,11 +1733,12 @@ EXPORT_SYMBOL_GPL(mei_me_get_cfg);
*
* @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
+ * @slow_fw: configure longer timeouts as FW is slow
*
* Return: The mei_device pointer on success, NULL on failure.
*/
struct mei_device *mei_me_dev_init(struct device *parent,
- const struct mei_cfg *cfg)
+ const struct mei_cfg *cfg, bool slow_fw)
{
struct mei_device *dev;
struct mei_me_hw *hw;
@@ -1655,7 +1753,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, parent, &mei_me_hw_ops);
+ mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index a071c645e905..95cf830b7c7b 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -51,6 +51,9 @@ struct mei_cfg {
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
* @read_fws: read FW status register handler
+ * @polling_thread: interrupt polling thread
+ * @wait_active: the polling thread activity wait queue
+ * @is_active: the device is active
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
@@ -60,10 +63,19 @@ struct mei_me_hw {
bool d0i3_supported;
u8 hbuf_depth;
int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
+ /* polling */
+ struct task_struct *polling_thread;
+ wait_queue_head_t wait_active;
+ bool is_active;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw)
+{
+ return hw->irq < 0;
+}
+
/**
* enum mei_cfg_idx - indices to platform specific configurations.
*
@@ -120,12 +132,13 @@ enum mei_cfg_idx {
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
struct mei_device *mei_me_dev_init(struct device *parent,
- const struct mei_cfg *cfg);
+ const struct mei_cfg *cfg, bool slow_fw);
int mei_me_pg_enter_sync(struct mei_device *dev);
int mei_me_pg_exit_sync(struct mei_device *dev);
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+int mei_me_polling_thread(void *_dev);
#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 00652c137cc7..9862c6cd3e32 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1201,7 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
- mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
+ mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops);
hw = to_txe_hw(dev);
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b46077b17114..e7e020dba6b1 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -16,11 +16,16 @@
#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+#define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
+#define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */
+
+#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
+#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */
/*
* FW page size for DMA allocations
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index eb052005ca86..bac8852aad51 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -218,16 +218,6 @@ int mei_start(struct mei_device *dev)
goto err;
}
- if (!mei_host_is_ready(dev)) {
- dev_err(dev->dev, "host is not ready.\n");
- goto err;
- }
-
- if (!mei_hw_is_ready(dev)) {
- dev_err(dev->dev, "ME is not ready.\n");
- goto err;
- }
-
if (!mei_hbm_version_is_supported(dev)) {
dev_dbg(dev->dev, "MEI start failed.\n");
goto err;
@@ -320,6 +310,8 @@ void mei_stop(struct mei_device *dev)
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
+ /* to catch HW-initiated reset */
+ mei_cancel_work(dev);
mutex_lock(&dev->device_lock);
@@ -357,14 +349,16 @@ bool mei_write_is_idle(struct mei_device *dev)
EXPORT_SYMBOL_GPL(mei_write_is_idle);
/**
- * mei_device_init -- initialize mei_device structure
+ * mei_device_init - initialize mei_device structure
*
* @dev: the mei device
* @device: the device structure
+ * @slow_fw: configure longer timeouts as FW is slow
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
+ bool slow_fw,
const struct mei_hw_ops *hw_ops)
{
/* setup our list array */
@@ -393,6 +387,8 @@ void mei_device_init(struct mei_device *dev,
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
+ dev->pxp_mode = MEI_DEV_PXP_DEFAULT;
+
/*
* Reserving the first client ID
* 0: Reserved for MEI Bus Message communications
@@ -402,6 +398,21 @@ void mei_device_init(struct mei_device *dev,
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
dev->dev = device;
+
+ dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
+ dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
+ dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+ if (slow_fw) {
+ dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW);
+ dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW);
+ dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW);
+ } else {
+ dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+ dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
+ dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
+ }
}
EXPORT_SYMBOL_GPL(mei_device_init);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 786f7c8f7f61..930887e7e38d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -571,7 +571,7 @@ static int mei_ioctl_connect_vtag(struct file *file,
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 694f866f87ef..6bb3e1ba9ded 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -62,6 +62,21 @@ enum mei_dev_state {
MEI_DEV_POWER_UP
};
+/**
+ * enum mei_dev_pxp_mode - MEI PXP mode state
+ *
+ * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required
+ * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware
+ * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse
+ * @MEI_DEV_PXP_READY: device initialized
+ */
+enum mei_dev_pxp_mode {
+ MEI_DEV_PXP_DEFAULT = 0,
+ MEI_DEV_PXP_INIT = 1,
+ MEI_DEV_PXP_SETUP = 2,
+ MEI_DEV_PXP_READY = 3,
+};
+
const char *mei_dev_state_str(int state);
enum mei_file_transaction_states {
@@ -415,6 +430,17 @@ struct mei_fw_version {
#define MEI_MAX_FW_VER_BLOCKS 3
+struct mei_dev_timeouts {
+ unsigned long hw_ready; /* Timeout on ready message, in jiffies */
+ int connect; /* HPS: at least 2 seconds, in seconds */
+ unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */
+ int client_init; /* HPS: Clients Enumeration Timeout, in seconds */
+ unsigned long pgi; /* PG Isolation time response, in jiffies */
+ unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */
+ unsigned long hbm; /* HBM operation timeout, in jiffies */
+ unsigned long mkhi_recv; /* receive timeout, in jiffies */
+};
+
/**
* struct mei_device - MEI private device struct
*
@@ -443,6 +469,7 @@ struct mei_fw_version {
* @reset_count : number of consecutive resets
* @dev_state : device state
* @hbm_state : state of host bus message protocol
+ * @pxp_mode : PXP device mode
* @init_clients_timer : HBM init handshake timeout
*
* @pg_event : power gating event
@@ -480,6 +507,8 @@ struct mei_fw_version {
* @allow_fixed_address: allow user space to connect a fixed client
* @override_fixed_address: force allow fixed address behavior
*
+ * @timeouts: actual timeout values
+ *
* @reset_work : work item for the device reset
* @bus_rescan_work : work item for the bus rescan
*
@@ -524,6 +553,7 @@ struct mei_device {
unsigned long reset_count;
enum mei_dev_state dev_state;
enum mei_hbm_state hbm_state;
+ enum mei_dev_pxp_mode pxp_mode;
u16 init_clients_timer;
/*
@@ -568,6 +598,8 @@ struct mei_device {
bool allow_fixed_address;
bool override_fixed_address;
+ struct mei_dev_timeouts timeouts;
+
struct work_struct reset_work;
struct work_struct bus_rescan_work;
@@ -632,6 +664,7 @@ static inline u32 mei_slots2data(int slots)
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
+ bool slow_fw,
const struct mei_hw_ops *hw_ops);
int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
diff --git a/drivers/misc/mei/mkhi.h b/drivers/misc/mei/mkhi.h
new file mode 100644
index 000000000000..1473ea489666
--- /dev/null
+++ b/drivers/misc/mei/mkhi.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+
+#ifndef _MEI_MKHI_H_
+#define _MEI_MKHI_H_
+
+#include <linux/types.h>
+
+#define MKHI_FEATURE_PTT 0x10
+
+#define MKHI_FWCAPS_GROUP_ID 0x3
+#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+#define MKHI_GEN_GROUP_ID 0xFF
+#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
+
+#define MKHI_GROUP_ID_GFX 0x30
+#define MKHI_GFX_RESET_WARN_CMD_REQ 0x0
+#define MKHI_GFX_MEMORY_READY_CMD_REQ 0x1
+
+/* Allow transition to PXP mode without approval */
+#define MKHI_GFX_MEM_READY_PXP_ALLOWED 0x1
+
+struct mkhi_rule_id {
+ __le16 rule_type;
+ u8 feature_id;
+ u8 reserved;
+} __packed;
+
+struct mkhi_fwcaps {
+ struct mkhi_rule_id id;
+ u8 len;
+ u8 data[];
+} __packed;
+
+struct mkhi_msg_hdr {
+ u8 group_id;
+ u8 command;
+ u8 reserved;
+ u8 result;
+} __packed;
+
+struct mkhi_msg {
+ struct mkhi_msg_hdr hdr;
+ u8 data[];
+} __packed;
+
+struct mkhi_gfx_mem_ready {
+ struct mkhi_msg_hdr hdr;
+ u32 flags;
+} __packed;
+
+#endif /* _MEI_MKHI_H_ */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 5435604327a7..704cd0caa172 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -203,7 +203,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(&pdev->dev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg, false);
if (!dev) {
err = -ENOMEM;
goto end;
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 6d71865c8042..1652fb9b3856 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -389,7 +389,7 @@ exit:
return err;
}
-static int tsl2550_remove(struct i2c_client *client)
+static void tsl2550_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
@@ -397,8 +397,6 @@ static int tsl2550_remove(struct i2c_client *client)
tsl2550_set_power_state(client, 0);
kfree(i2c_get_clientdata(client));
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0fd91f749b3a..b89dca1f15e9 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -565,7 +565,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
- INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
+ INIT_WORK(&host->sdio_irq_work, sdio_irq_work);
timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index cee4c0b59f43..3662bf5320ce 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -870,7 +870,8 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -949,16 +950,17 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
-
- /*
- * Fetch switch information from card.
- */
- err = mmc_read_switch(card);
- if (err)
- return err;
}
/*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
@@ -1480,26 +1482,15 @@ retry:
if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
- /*
- * Re-read switch information in case it has changed since
- * oldcard was initialized.
- */
- if (oldcard) {
- err = mmc_read_switch(card);
- if (err)
- goto free_card;
- }
- if (mmc_sd_card_using_v18(card)) {
- if (mmc_host_set_uhs_voltage(host) ||
- mmc_sd_init_uhs_card(card)) {
- v18_fixup_failed = true;
- mmc_power_cycle(host, ocr);
- if (!oldcard)
- mmc_remove_card(card);
- goto retry;
- }
- goto done;
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
}
+ goto cont;
}
/* Initialization sequence for UHS-I cards */
@@ -1534,7 +1525,7 @@ retry:
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+cont:
if (!oldcard) {
/* Read/parse the extension registers. */
err = sd_read_ext_regs(card);
@@ -1566,7 +1557,7 @@ retry:
err = -EINVAL;
goto free_card;
}
-done:
+
host->card = card;
return 0;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 0b682a31cd3e..f64b9ac76a5c 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1043,7 +1043,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
/* Prevent processing of SDIO IRQs in suspended state. */
mmc_card_set_suspended(host->card);
- cancel_delayed_work_sync(&host->sdio_irq_work);
+ cancel_work_sync(&host->sdio_irq_work);
mmc_claim_host(host);
@@ -1103,7 +1103,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
wake_up_process(host->sdio_irq_thread);
else if (host->caps & MMC_CAP_SDIO_IRQ)
- queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ schedule_work(&host->sdio_irq_work);
}
out:
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 4b1f7c966ec8..2b24bdf38296 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -124,7 +124,7 @@ static void sdio_run_irqs(struct mmc_host *host)
void sdio_irq_work(struct work_struct *work)
{
struct mmc_host *host =
- container_of(work, struct mmc_host, sdio_irq_work.work);
+ container_of(work, struct mmc_host, sdio_irq_work);
sdio_run_irqs(host);
}
@@ -132,7 +132,7 @@ void sdio_irq_work(struct work_struct *work)
void sdio_signal_irq(struct mmc_host *host)
{
host->sdio_irq_pending = true;
- queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ schedule_work(&host->sdio_irq_work);
}
EXPORT_SYMBOL_GPL(sdio_signal_irq);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 10c563999d3d..f324daadaf70 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -157,6 +157,7 @@ config MMC_SDHCI_OF_ARASAN
config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
select MMC_SDHCI_IO_ACCESSORS
@@ -171,6 +172,7 @@ config MMC_SDHCI_OF_ASPEED
config MMC_SDHCI_OF_ASPEED_TEST
bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index a9a0837153d8..c88b039dc9fb 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1097,8 +1097,9 @@ out5:
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
-out_clk:
+
clk_disable_unprepare(host->clk);
+out_clk:
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b1d563b2ed1b..dc2db9c185ea 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -298,7 +298,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
{
struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
enum dma_data_direction dir = mmc_get_dma_dir(data);
- int sg_count;
+ unsigned int sg_count;
if (data->host_cookie == COOKIE_PREMAPPED)
return data->sg_count;
@@ -308,7 +308,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
data->sg_len,
dir);
- if (sg_count <= 0) {
+ if (!sg_count) {
dev_err(mmc_dev(host->mmc),
"Failed to map scatterlist for DMA operation\n");
return -EINVAL;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2f08d442e557..df05e60bed9a 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -41,14 +41,17 @@
#define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
#define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
#define CLK_V2_ALWAYS_ON BIT(24)
+#define CLK_V2_IRQ_SDIO_SLEEP BIT(25)
#define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
#define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
#define CLK_V3_ALWAYS_ON BIT(28)
+#define CLK_V3_IRQ_SDIO_SLEEP BIT(29)
#define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
#define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
#define CLK_ALWAYS_ON(h) (h->data->always_on)
+#define CLK_IRQ_SDIO_SLEEP(h) (h->data->irq_sdio_sleep)
#define SD_EMMC_DELAY 0x4
#define SD_EMMC_ADJUST 0x8
@@ -101,8 +104,7 @@
#define IRQ_RESP_STATUS BIT(14)
#define IRQ_SDIO BIT(15)
#define IRQ_EN_MASK \
- (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
- IRQ_SDIO)
+ (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN)
#define SD_EMMC_CMD_CFG 0x50
#define SD_EMMC_CMD_ARG 0x54
@@ -136,6 +138,7 @@ struct meson_mmc_data {
unsigned int rx_delay_mask;
unsigned int always_on;
unsigned int adjust;
+ unsigned int irq_sdio_sleep;
};
struct sd_emmc_desc {
@@ -175,6 +178,7 @@ struct meson_host {
bool vqmmc_enabled;
bool needs_pre_post_req;
+ spinlock_t lock;
};
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
@@ -431,6 +435,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
+ clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
/* get the mux parents */
@@ -929,33 +934,54 @@ static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
}
}
+static void __meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ u32 reg_irqen = IRQ_EN_MASK;
+
+ if (enable)
+ reg_irqen |= IRQ_SDIO;
+ writel(reg_irqen, host->regs + SD_EMMC_IRQ_EN);
+}
+
static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_command *cmd;
- struct mmc_data *data;
- u32 irq_en, status, raw_status;
+ u32 status, raw_status;
irqreturn_t ret = IRQ_NONE;
- irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
raw_status = readl(host->regs + SD_EMMC_STATUS);
- status = raw_status & irq_en;
+ status = raw_status & (IRQ_EN_MASK | IRQ_SDIO);
if (!status) {
dev_dbg(host->dev,
- "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
- irq_en, raw_status);
+ "Unexpected IRQ! irq_en 0x%08lx - status 0x%08x\n",
+ IRQ_EN_MASK | IRQ_SDIO, raw_status);
return IRQ_NONE;
}
- if (WARN_ON(!host) || WARN_ON(!host->cmd))
+ if (WARN_ON(!host))
return IRQ_NONE;
/* ack all raised interrupts */
writel(status, host->regs + SD_EMMC_STATUS);
cmd = host->cmd;
- data = cmd->data;
+
+ if (status & IRQ_SDIO) {
+ spin_lock(&host->lock);
+ __meson_mmc_enable_sdio_irq(host->mmc, 0);
+ sdio_signal_irq(host->mmc);
+ spin_unlock(&host->lock);
+ status &= ~IRQ_SDIO;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (WARN_ON(!cmd))
+ return IRQ_NONE;
+
cmd->error = 0;
if (status & IRQ_CRC_ERR) {
dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
@@ -973,12 +999,9 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
meson_mmc_read_resp(host->mmc, cmd);
- if (status & IRQ_SDIO) {
- dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
- ret = IRQ_HANDLED;
- }
-
if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
+ struct mmc_data *data = cmd->data;
+
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
if (meson_mmc_bounce_buf_read(data) ||
@@ -1121,6 +1144,21 @@ static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return -EINVAL;
}
+static void meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ __meson_mmc_enable_sdio_irq(mmc, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void meson_mmc_ack_sdio_irq(struct mmc_host *mmc)
+{
+ meson_mmc_enable_sdio_irq(mmc, 1);
+}
+
static const struct mmc_host_ops meson_mmc_ops = {
.request = meson_mmc_request,
.set_ios = meson_mmc_set_ios,
@@ -1130,6 +1168,8 @@ static const struct mmc_host_ops meson_mmc_ops = {
.execute_tuning = meson_mmc_resampling_tuning,
.card_busy = meson_mmc_card_busy,
.start_signal_voltage_switch = meson_mmc_voltage_switch,
+ .enable_sdio_irq = meson_mmc_enable_sdio_irq,
+ .ack_sdio_irq = meson_mmc_ack_sdio_irq,
};
static int meson_mmc_probe(struct platform_device *pdev)
@@ -1172,8 +1212,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -1224,10 +1266,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* clear, ack and enable interrupts */
writel(0, host->regs + SD_EMMC_IRQ_EN);
- writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
- host->regs + SD_EMMC_STATUS);
- writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
- host->regs + SD_EMMC_IRQ_EN);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
ret = request_threaded_irq(host->irq, meson_mmc_irq,
meson_mmc_irq_thread, IRQF_ONESHOT,
@@ -1235,7 +1275,13 @@ static int meson_mmc_probe(struct platform_device *pdev)
if (ret)
goto err_init_clk;
+ spin_lock_init(&host->lock);
+
mmc->caps |= MMC_CAP_CMD23;
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
if (host->dram_access_quirk) {
/* Limit segments to 1 due to low available sram memory */
mmc->max_segs = 1;
@@ -1326,6 +1372,7 @@ static const struct meson_mmc_data meson_gx_data = {
.rx_delay_mask = CLK_V2_RX_DELAY_MASK,
.always_on = CLK_V2_ALWAYS_ON,
.adjust = SD_EMMC_ADJUST,
+ .irq_sdio_sleep = CLK_V2_IRQ_SDIO_SLEEP,
};
static const struct meson_mmc_data meson_axg_data = {
@@ -1333,6 +1380,7 @@ static const struct meson_mmc_data meson_axg_data = {
.rx_delay_mask = CLK_V3_RX_DELAY_MASK,
.always_on = CLK_V3_ALWAYS_ON,
.adjust = SD_EMMC_V3_ADJUST,
+ .irq_sdio_sleep = CLK_V3_IRQ_SDIO_SLEEP,
};
static const struct of_device_id meson_mmc_of_match[] = {
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index e92e63cb5641..da85c2f2acb8 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -381,14 +381,14 @@ static void meson_mx_sdhc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int meson_mx_sdhc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- int dma_len;
+ unsigned int dma_len;
if (!data)
return 0;
dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
- if (dma_len <= 0) {
+ if (!dma_len) {
dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
return -ENOMEM;
}
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index a5e05ed0fda3..9d35453e7371 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -34,7 +34,7 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
- if (hsq->mrq) {
+ if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b6eb75f4bbfc..dfc3ffd5b1f8 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -524,9 +524,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -651,16 +648,8 @@ static int moxart_probe(struct platform_device *pdev)
dmaengine_slave_config(host->dma_chan_rx, &cfg);
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4ff73d1883de..df941438aef5 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -474,33 +474,20 @@ struct msdc_host {
struct cqhci_host *cq_host;
};
-static const struct mtk_mmc_compatible mt8135_compat = {
- .clk_div_bits = 8,
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE,
- .async_fifo = false,
- .data_tune = false,
- .busy_check = false,
- .stop_clk_fix = false,
- .enhance_rx = false,
- .support_64g = false,
-};
-
-static const struct mtk_mmc_compatible mt8173_compat = {
- .clk_div_bits = 8,
- .recheck_sdio_irq = true,
- .hs400_tune = true,
- .pad_tune_reg = MSDC_PAD_TUNE,
- .async_fifo = false,
- .data_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt8183_compat = {
+static const struct mtk_mmc_compatible mt2712_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
@@ -513,30 +500,43 @@ static const struct mtk_mmc_compatible mt8183_compat = {
.support_64g = true,
};
-static const struct mtk_mmc_compatible mt2701_compat = {
+static const struct mtk_mmc_compatible mt6779_compat = {
.clk_div_bits = 12,
- .recheck_sdio_irq = true,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+ .support_64g = true,
+};
+
+static const struct mtk_mmc_compatible mt6795_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = false,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt2712_compat = {
- .clk_div_bits = 12,
- .recheck_sdio_irq = false,
+static const struct mtk_mmc_compatible mt7620_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE0,
- .async_fifo = true,
- .data_tune = true,
- .busy_check = true,
- .stop_clk_fix = true,
- .enhance_rx = true,
- .support_64g = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .use_internal_cd = true,
};
static const struct mtk_mmc_compatible mt7622_compat = {
@@ -552,31 +552,33 @@ static const struct mtk_mmc_compatible mt7622_compat = {
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt8516_compat = {
- .clk_div_bits = 12,
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
.recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE0,
- .async_fifo = true,
- .data_tune = true,
- .busy_check = true,
- .stop_clk_fix = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .support_64g = false,
};
-static const struct mtk_mmc_compatible mt7620_compat = {
+static const struct mtk_mmc_compatible mt8173_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = true,
- .hs400_tune = false,
+ .hs400_tune = true,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
.data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
- .use_internal_cd = true,
+ .support_64g = false,
};
-static const struct mtk_mmc_compatible mt6779_compat = {
+static const struct mtk_mmc_compatible mt8183_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
@@ -589,16 +591,29 @@ static const struct mtk_mmc_compatible mt6779_compat = {
.support_64g = true,
};
+static const struct mtk_mmc_compatible mt8516_compat = {
+ .clk_div_bits = 12,
+ .recheck_sdio_irq = true,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+};
+
static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
- { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
- { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
+ { .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat},
+ { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
- { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
- { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
+
{}
};
MODULE_DEVICE_TABLE(of, msdc_of_ids);
@@ -2446,6 +2461,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2932,11 +2950,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
/*
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0db9490dc659..e4003f6058eb 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 5fe4528e296e..5798aee06653 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1042,7 +1042,6 @@ static int sd_set_timing(struct rtsx_usb_sdmmc *host,
unsigned char timing, bool *ddr_mode)
{
struct rtsx_ucr *ucr = host->ucr;
- int err;
*ddr_mode = false;
@@ -1097,9 +1096,7 @@ static int sd_set_timing(struct rtsx_usb_sdmmc *host,
break;
}
- err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
-
- return err;
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
}
static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index dc2991422a87..3a091a387ecb 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2441,6 +2441,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
*/
{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
+ {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4e904850973c..a7343d4bc50e 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -349,6 +349,15 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
.ops = &sdhci_dwcmshc_rk35xx_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -431,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
#endif
@@ -447,7 +459,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
int err;
u32 extra;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 622b7de96c7f..169b84761041 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -297,6 +297,27 @@ static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
SDHCI_QUIRK_MISSING_CAPS
};
+static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_set_ios(mmc, ios);
+
+ /*
+ * Some (ENE) controllers misbehave on some ios operations,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if (!(host->flags & SDHCI_DEVICE_DEAD))
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+}
+
+static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_ene_712 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
@@ -304,8 +325,8 @@ static const struct sdhci_pci_fixes sdhci_ene_712 = {
static const struct sdhci_pci_fixes sdhci_ene_714 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
SDHCI_QUIRK_BROKEN_DMA,
+ .probe_slot = ene_714_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_cafe = {
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 0d4d343dbb77..ad457cd9cbaa 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -317,11 +317,12 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
u32 reg_val;
/*
- * This handler only implements the eMMC tuning that is specific to
+ * This handler implements the hardware tuning that is specific to
* this controller. Fall back to the standard method for other TIMING.
*/
if ((host->timing != MMC_TIMING_MMC_HS200) &&
- (host->timing != MMC_TIMING_UHS_SDR104))
+ (host->timing != MMC_TIMING_UHS_SDR104) &&
+ (host->timing != MMC_TIMING_UHS_SDR50))
return sdhci_execute_tuning(mmc, opcode);
if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
@@ -631,6 +632,8 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (reg & 0x1)
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_DDR50;
+
sdhci_pci_o2_enable_msi(chip, host);
if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) {
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index f33e9349e4e6..46c55ab4884c 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -205,14 +205,14 @@ static inline u32 sdhci_sprd_calc_div(u32 base_clk, u32 clk)
if ((base_clk / div) > (clk * 2))
div++;
- if (div > SDHCI_SPRD_CLK_MAX_DIV)
- div = SDHCI_SPRD_CLK_MAX_DIV;
-
if (div % 2)
div = (div + 1) / 2;
else
div = div / 2;
+ if (div > SDHCI_SPRD_CLK_MAX_DIV)
+ div = SDHCI_SPRD_CLK_MAX_DIV;
+
return div;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7689ffec5ad1..fef03de85b99 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -233,28 +233,62 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
}
EXPORT_SYMBOL_GPL(sdhci_reset);
-static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
+static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
struct mmc_host *mmc = host->mmc;
if (!mmc->ops->get_cd(mmc))
- return;
+ return false;
}
host->ops->reset(host, mask);
- if (mask & SDHCI_RESET_ALL) {
+ return true;
+}
+
+static void sdhci_reset_for_all(struct sdhci_host *host)
+{
+ if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
}
-
/* Resetting the controller clears many */
host->preset_enabled = false;
}
}
+enum sdhci_reset_reason {
+ SDHCI_RESET_FOR_INIT,
+ SDHCI_RESET_FOR_REQUEST_ERROR,
+ SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
+ SDHCI_RESET_FOR_TUNING_ABORT,
+ SDHCI_RESET_FOR_CARD_REMOVED,
+ SDHCI_RESET_FOR_CQE_RECOVERY,
+};
+
+static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
+{
+ switch (reason) {
+ case SDHCI_RESET_FOR_INIT:
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ break;
+ case SDHCI_RESET_FOR_REQUEST_ERROR:
+ case SDHCI_RESET_FOR_TUNING_ABORT:
+ case SDHCI_RESET_FOR_CARD_REMOVED:
+ case SDHCI_RESET_FOR_CQE_RECOVERY:
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ break;
+ case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ break;
+ }
+}
+
+#define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
+
static void sdhci_set_default_irqs(struct sdhci_host *host)
{
host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
@@ -323,9 +357,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
unsigned long flags;
if (soft)
- sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ sdhci_reset_for(host, INIT);
else
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
@@ -1538,8 +1572,9 @@ static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
*/
if (data->error) {
if (!host->cmd || host->cmd == data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, REQUEST_ERROR);
+ else
+ sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
}
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
@@ -2403,14 +2438,6 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_clock(host, host->clock);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-
- /*
- * Some (ENE) controllers go apeshit on some ios operation,
- * signalling timeout and CRC errors even on CMD0. Resetting
- * it on each ios seems to solve the problem.
- */
- if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
- sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
@@ -2718,8 +2745,7 @@ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
{
sdhci_reset_tuning(host);
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, TUNING_ABORT);
sdhci_end_tuning(host);
@@ -2987,8 +3013,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
pr_err("%s: Resetting controller.\n",
mmc_hostname(mmc));
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, CARD_REMOVED);
sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
@@ -3059,12 +3084,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
- /*
- * Spec says we should do both at the same time, but Ricoh
- * controllers do not like that.
- */
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, REQUEST_ERROR);
host->pending_reset = false;
}
@@ -3905,10 +3925,8 @@ void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
host->cqe_on = false;
- if (recovery) {
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
+ if (recovery)
+ sdhci_reset_for(host, CQE_RECOVERY);
pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
mmc_hostname(mmc), host->ier,
@@ -3928,7 +3946,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, CMD_CRC);
} else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
@@ -3938,7 +3956,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
@@ -4066,7 +4084,7 @@ void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
if (debug_quirks2)
host->quirks2 = debug_quirks2;
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
@@ -4807,7 +4825,7 @@ int __sdhci_add_host(struct sdhci_host *host)
unled:
sdhci_led_unregister(host);
unirq:
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
@@ -4865,7 +4883,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
sdhci_led_unregister(host);
if (!dead)
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 95a08f09df30..d750c464bd1e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -379,8 +379,6 @@ struct sdhci_host {
#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
/* Controller doesn't like clearing the power reg before a change */
#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
/* Controller has an unusable DMA engine */
#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
/* Controller has an unusable ADMA engine */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index e7ced1496a07..8f1023480e12 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -554,7 +554,6 @@ static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
{
struct cqhci_host *cq_host;
- int ret;
cq_host = devm_kzalloc(mmc_dev(host->mmc), sizeof(struct cqhci_host),
GFP_KERNEL);
@@ -568,9 +567,7 @@ static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
host->mmc->caps2 |= MMC_CAP2_CQE;
- ret = cqhci_init(cq_host, host->mmc, 1);
-
- return ret;
+ return cqhci_init(cq_host, host->mmc, 1);
}
static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 163ac9df8cca..9b5c503e3a3f 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -846,7 +846,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_sdmmc)) {
dev_err(&pdev->dev, "Error getting clock\n");
ret = PTR_ERR(priv->clk_sdmmc);
- goto fail5;
+ goto fail5_and_a_half;
}
ret = clk_prepare_enable(priv->clk_sdmmc);
@@ -863,6 +863,9 @@ static int wmt_mci_probe(struct platform_device *pdev)
return 0;
fail6:
clk_put(priv->clk_sdmmc);
+fail5_and_a_half:
+ dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
+ priv->dma_desc_buffer, priv->dma_desc_device_addr);
fail5:
free_irq(dma_irq, priv);
fail4:
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 946ba80f9758..5fcefcd0baca 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -195,7 +195,7 @@ static void pismo_add_one(struct pismo_data *pismo, int i,
}
}
-static int pismo_remove(struct i2c_client *client)
+static void pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
@@ -204,8 +204,6 @@ static int pismo_remove(struct i2c_client *client)
platform_device_unregister(pismo->dev[i]);
kfree(pismo);
-
- return 0;
}
static int pismo_probe(struct i2c_client *client,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index b43df73927a0..d6db655a1d24 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -69,8 +69,8 @@ config MTD_OF_PARTS
config MTD_OF_PARTS_BCM4908
bool "BCM4908 partitioning support"
- depends on MTD_OF_PARTS && (ARCH_BCM4908 || COMPILE_TEST)
- default ARCH_BCM4908
+ depends on MTD_OF_PARTS && (ARCH_BCMBCA || COMPILE_TEST)
+ default ARCH_BCMBCA
help
This provides partitions parser for BCM4908 family devices
that can have multiple "firmware" partitions. It takes care of
@@ -78,7 +78,7 @@ config MTD_OF_PARTS_BCM4908
config MTD_OF_PARTS_LINKSYS_NS
bool "Linksys Northstar partitioning support"
- depends on MTD_OF_PARTS && (ARCH_BCM_5301X || ARCH_BCM4908 || COMPILE_TEST)
+ depends on MTD_OF_PARTS && (ARCH_BCM_5301X || ARCH_BCMBCA || COMPILE_TEST)
default ARCH_BCM_5301X
help
This provides partitions parser for Linksys devices based on Broadcom
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 94c889802566..15d4a38b1351 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -500,6 +500,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/pse-pd/Kconfig"
+
source "drivers/net/can/Kconfig"
source "drivers/net/mctp/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3f1192d3c52d..6ce076462dbf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET) += loopback.o
obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += pse-pd/
obj-y += mdio/
obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index f475eef14390..83214e2e70ab 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -68,7 +68,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
- strlcpy(s[i].name, name, IFNAMSIZ);
+ strscpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 9a247eb7679c..2d20be6ffb7e 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -2894,8 +2894,7 @@ static void amt_event_work(struct work_struct *work)
amt_event_send_request(amt);
break;
default:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
break;
}
}
@@ -3033,8 +3032,7 @@ static int amt_dev_stop(struct net_device *dev)
cancel_work_sync(&amt->event_wq);
for (i = 0; i < AMT_MAX_EVENTS; i++) {
skb = amt->events[i].skb;
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
amt->events[i].event = AMT_EVENT_NONE;
amt->events[i].skb = NULL;
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d7fb33c078e8..e58a1e0cadd2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -84,11 +84,13 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -2001,36 +2003,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 50e60843020c..24bb50dfd362 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -2081,7 +2080,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2166,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2447,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -3167,6 +3165,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
found:
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
dst_release(dst);
kfree(tags);
}
@@ -3198,12 +3199,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
return ret;
}
-static void bond_validate_ns(struct bonding *bond, struct slave *slave,
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
struct in6_addr *saddr, struct in6_addr *daddr)
{
int i;
- if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
__func__, saddr, daddr);
return;
@@ -3246,14 +3254,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
* see bond_arp_rcv().
*/
if (bond_is_active_slave(slave))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
out:
return RX_HANDLER_ANOTHER;
@@ -4174,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -4211,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4222,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -4229,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -5619,7 +5650,7 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
@@ -6218,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
-
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 9b5a5df23d21..8996bd0a194a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -47,10 +47,10 @@ static ssize_t bonding_show_bonds(struct class *cls,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", bond->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -178,10 +178,10 @@ static ssize_t bonding_show_slaves(struct device *d,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", slave->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
rtnl_unlock();
@@ -203,7 +203,7 @@ static ssize_t bonding_show_mode(struct device *d,
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
+ return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
@@ -217,7 +217,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
- return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
@@ -233,7 +233,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
- return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
@@ -248,7 +248,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
- return sprintf(buf, "%s %d\n",
+ return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
@@ -265,7 +265,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
- return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
@@ -277,7 +277,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.arp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
@@ -292,8 +292,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
- res += sprintf(buf + res, "%pI4 ",
- &bond->params.arp_targets[i]);
+ res += sysfs_emit_at(buf, res, "%pI4 ",
+ &bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -310,7 +310,7 @@ static ssize_t bonding_show_missed_max(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.missed_max);
+ return sysfs_emit(buf, "%u\n", bond->params.missed_max);
}
static DEVICE_ATTR(arp_missed_max, 0644,
bonding_show_missed_max, bonding_sysfs_store_option);
@@ -322,7 +322,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
@@ -333,7 +333,7 @@ static ssize_t bonding_show_updelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
@@ -345,8 +345,8 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n",
- bond->params.peer_notif_delay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
@@ -361,7 +361,7 @@ static ssize_t bonding_show_lacp_active(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
@@ -375,7 +375,7 @@ static ssize_t bonding_show_lacp_rate(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
@@ -386,7 +386,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.min_links);
+ return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
@@ -400,7 +400,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
- return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
@@ -412,7 +412,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.num_peer_notif);
+ return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
@@ -426,7 +426,7 @@ static ssize_t bonding_show_miimon(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
@@ -443,7 +443,7 @@ static ssize_t bonding_show_primary(struct device *d,
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
- count = sprintf(buf, "%s\n", primary->dev->name);
+ count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
@@ -462,8 +462,8 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
- return sprintf(buf, "%s %d\n",
- val->string, bond->params.primary_reselect);
+ return sysfs_emit(buf, "%s %d\n",
+ val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
@@ -475,7 +475,7 @@ static ssize_t bonding_show_carrier(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -493,7 +493,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
- count = sprintf(buf, "%s\n", slave_dev->name);
+ count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -509,7 +509,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
- return sprintf(buf, "%s\n", active ? "up" : "down");
+ return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
@@ -524,9 +524,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.aggregator_id);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -545,9 +545,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.ports);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.ports);
}
return count;
@@ -566,9 +566,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.actor_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -587,9 +587,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.partner_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -609,7 +609,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
- count = sprintf(buf, "%pM\n", ad_info.partner_system);
+ count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
@@ -634,11 +634,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ res += sysfs_emit_at(buf, res, "%s:%d ",
+ slave->dev->name, slave->queue_id);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -658,7 +658,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+ return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
@@ -670,7 +670,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.resend_igmp);
+ return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -682,7 +682,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.lp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
@@ -693,7 +693,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
+ return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
@@ -705,7 +705,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
+ return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
@@ -717,7 +717,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
@@ -731,7 +731,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+ return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
@@ -746,7 +746,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 69b0a3751dff..313866f2c0e4 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -22,30 +22,30 @@ static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
- return sprintf(buf, "backup\n");
+ return sysfs_emit(buf, "backup\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+ return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->link_failure_count);
+ return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%*phC\n",
+ return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
@@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", slave->queue_id);
}
static SLAVE_ATTR_RO(queue_id);
@@ -64,11 +64,11 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
- return sprintf(buf, "%d\n",
- agg->aggregator_identifier);
+ return sysfs_emit(buf, "%d\n",
+ agg->aggregator_identifier);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
@@ -79,11 +79,11 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
@@ -94,11 +94,11 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index f23a03300a81..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index dc8132862f33..d6605dbb7737 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_packets += pkts;
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 3c18d028bd8c..b8da15ea6ad9 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -657,7 +657,6 @@ static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *c
cf->can_id = (idw >> 18) & CAN_SFF_MASK;
/* BRS, ESI, RTR Flags */
- cf->flags = 0;
if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
cf->flags |= CANFD_BRS;
@@ -1425,7 +1424,7 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->can.clock.freq = can_clk_rate;
- netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll);
ret = register_candev(ndev);
if (ret) {
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index 89d54c2151e1..f83684f006ea 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -58,7 +58,6 @@ static int ctucan_platform_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr)) {
- dev_err(dev, "Cannot remap address.\n");
ret = PTR_ERR(addr);
goto err;
}
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index a32a01c172d4..81ebf0562c89 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -247,7 +247,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
@@ -329,7 +329,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
{
offload->dev = dev;
- /* Limit queue len to 4x the weight (rounted to next power of two) */
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 07e0feac8629..791a51e2f5d6 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -91,8 +91,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
- unsigned int *frame_len_ptr)
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
@@ -108,16 +108,12 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
+ *len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
@@ -147,7 +143,7 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
@@ -191,6 +187,20 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -204,16 +214,8 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
@@ -235,23 +237,51 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
}
skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -291,6 +321,14 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
}
return true;
@@ -299,18 +337,25 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct can_priv *priv = netdev_priv(dev);
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
goto inval_skb;
- } else {
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
goto inval_skb;
}
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index f857968efed7..5ee38e586fd8 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -295,45 +295,45 @@ static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -341,23 +341,23 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
@@ -365,8 +365,8 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
@@ -2085,20 +2085,20 @@ static int flexcan_probe(struct platform_device *pdev)
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!((devtype_data->quirks &
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) ==
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) {
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) {
dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
return -EINVAL;
}
if ((devtype_data->quirks &
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) {
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) {
dev_err(&pdev->dev,
"Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
devtype_data->quirks);
@@ -2177,8 +2177,7 @@ static int flexcan_probe(struct platform_device *pdev)
err = flexcan_setup_stop_mode(pdev);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "setup stop mode failed\n");
+ dev_err_probe(&pdev->dev, err, "setup stop mode failed\n");
goto failed_setup_stop_mode;
}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 8621a8ea1dea..025c3417031f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -63,11 +63,11 @@
/* Setup 16 mailboxes */
#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
/* Device supports RX via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX BIT(14)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14)
/* Device supports RTR reception via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR BIT(15)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_FIFO BIT(16)
+#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -121,7 +121,7 @@ flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX;
}
static inline bool
@@ -129,10 +129,10 @@ flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return (quirks & (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR);
+ return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR);
}
static inline bool
@@ -140,7 +140,7 @@ flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_FIFO;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO;
}
static inline bool
@@ -149,7 +149,7 @@ flexcan_active_rx_rtr(const struct flexcan_priv *priv)
const u32 quirks = priv->devtype_data.quirks;
if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
- if (quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)
+ if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)
return true;
} else {
/* RX-FIFO is always RTR capable */
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index ad7a89b95da7..8d42b7e6661f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -973,7 +973,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->base = addr;
- netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index ed54c0b3c7d4..4e9680c8eb34 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -329,12 +329,9 @@ MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
u32 res;
- int ret;
-
- ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
- res, res & msk, 0, 10);
- return ret;
+ return readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
}
static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 4709c012b1dc..dcb582563d5e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1467,8 +1467,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
}
if (!cdev->is_peripheral)
- netif_napi_add(dev, &cdev->napi,
- m_can_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 27085b796e75..567620d215f8 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1880,10 +1880,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Global controller context */
gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
- if (!gpriv) {
- err = -ENOMEM;
- goto fail_dev;
- }
+ if (!gpriv)
+ return -ENOMEM;
+
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
@@ -1904,12 +1903,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Peripheral clock */
gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(gpriv->clkp)) {
- err = PTR_ERR(gpriv->clkp);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->clkp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ "cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
@@ -1917,12 +1913,10 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
- if (IS_ERR(gpriv->can_clk)) {
- err = PTR_ERR(gpriv->can_clk);
- dev_err(&pdev->dev,
- "cannot get canfd clock, error %d\n", err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->can_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ "cannot get canfd clock\n");
+
gpriv->fcan = RCANFD_CANFDCLK;
} else {
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 131a084c3535..ebd5941c3f53 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -478,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 98dfd5f295a7..1bb1129b0450 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -661,8 +661,6 @@ static const struct ethtool_ops sja1000_ethtool_ops = {
int register_sja1000dev(struct net_device *dev)
{
- int ret;
-
if (!sja1000_probe_chip(dev))
return -ENODEV;
@@ -673,9 +671,7 @@ int register_sja1000dev(struct net_device *dev)
set_reset_mode(dev);
chipset_init(dev);
- ret = register_candev(dev);
-
- return ret;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 81bc741905fd..6779d5357069 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -103,6 +104,11 @@ static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *o
spin_lock_init(&tp->io_lock);
}
+static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ priv->flags = SJA1000_QUIRK_NO_CDR_REG;
+}
+
static void sp_populate(struct sja1000_priv *priv,
struct sja1000_platform_data *pdata,
unsigned long resource_mem_flags)
@@ -153,11 +159,13 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->write_reg = sp_write_reg8;
}
- err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ if (!priv->can.clock.freq) {
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ }
err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
if (!err)
@@ -192,8 +200,13 @@ static struct sja1000_of_data technologic_data = {
.init = sp_technologic_init,
};
+static struct sja1000_of_data renesas_data = {
+ .init = sp_rzn1_init,
+};
+
static const struct of_device_id sp_of_table[] = {
{ .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, },
{ .compatible = "technologic,sja1000", .data = &technologic_data, },
{ /* sentinel */ },
};
@@ -210,6 +223,7 @@ static int sp_probe(struct platform_device *pdev)
struct device_node *of = pdev->dev.of_node;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
+ struct clk *clk;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -234,6 +248,11 @@ static int sp_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
+
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "CAN clk operation failed");
} else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
@@ -262,6 +281,15 @@ static int sp_probe(struct platform_device *pdev)
priv->reg_base = addr;
if (of) {
+ if (clk) {
+ priv->can.clock.freq = clk_get_rate(clk) / 2;
+ if (!priv->can.clock.freq) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Zero CAN clk rate");
+ goto exit_free;
+ }
+ }
+
sp_populate_of(priv, of);
if (of_data && of_data->init)
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index d769bdf740b7..640fe0a1df63 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -222,7 +222,7 @@ union es58x_urb_cmd {
u8 cmd_type;
u8 cmd_id;
} __packed;
- u8 raw_cmd[0];
+ DECLARE_FLEX_ARRAY(u8, raw_cmd);
};
/**
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index baf749c8cda3..f0065d40eb24 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -10,20 +10,24 @@
*/
#include <linux/bitfield.h>
+#include <linux/clocksource.h>
#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
+#include <linux/timecounter.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GS_USB_1_VENDOR_ID 0x1d50
+#define USB_GS_USB_1_PRODUCT_ID 0x606f
#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
@@ -34,8 +38,16 @@
#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define GS_USB_ENDPOINT_IN 1
+#define GS_USB_ENDPOINT_OUT 2
+
+/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+ * for timer overflow (will be after ~71 minutes)
+ */
+#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ)
+#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800
+static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */
enum gs_usb_breq {
@@ -52,6 +64,8 @@ enum gs_usb_breq {
GS_USB_BREQ_SET_USER_ID,
GS_USB_BREQ_DATA_BITTIMING,
GS_USB_BREQ_BT_CONST_EXT,
+ GS_USB_BREQ_SET_TERMINATION,
+ GS_USB_BREQ_GET_TERMINATION,
};
enum gs_can_mode {
@@ -75,6 +89,14 @@ enum gs_can_identify_mode {
GS_CAN_IDENTIFY_ON
};
+enum gs_can_termination_state {
+ GS_CAN_TERMINATION_STATE_OFF = 0,
+ GS_CAN_TERMINATION_STATE_ON
+};
+
+#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define GS_USB_TERMINATION_ENABLED 120
+
/* data types passed between host and device */
/* The firmware on the original USB2CAN by Geschwister Schneider
@@ -111,6 +133,7 @@ struct gs_device_config {
#define GS_CAN_MODE_FD BIT(8)
/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
+/* GS_CAN_FEATURE_TERMINATION BIT(11) */
struct gs_device_mode {
__le32 mode;
@@ -135,6 +158,10 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
+struct gs_device_termination_state {
+ __le32 state;
+} __packed;
+
#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
@@ -146,7 +173,8 @@ struct gs_identify_mode {
#define GS_CAN_FEATURE_FD BIT(8)
#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
-#define GS_CAN_FEATURE_MASK GENMASK(10, 0)
+#define GS_CAN_FEATURE_TERMINATION BIT(11)
+#define GS_CAN_FEATURE_MASK GENMASK(11, 0)
/* internal quirks - keep in GS_CAN_FEATURE space for now */
@@ -199,6 +227,11 @@ struct classic_can {
u8 data[8];
} __packed;
+struct classic_can_ts {
+ u8 data[8];
+ __le32 timestamp_us;
+} __packed;
+
struct classic_can_quirk {
u8 data[8];
u8 quirk;
@@ -208,6 +241,11 @@ struct canfd {
u8 data[64];
} __packed;
+struct canfd_ts {
+ u8 data[64];
+ __le32 timestamp_us;
+} __packed;
+
struct canfd_quirk {
u8 data[64];
u8 quirk;
@@ -224,8 +262,10 @@ struct gs_host_frame {
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts);
DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts);
DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
};
} __packed;
@@ -259,6 +299,12 @@ struct gs_can {
struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ /* time counter for hardware timestamps */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
+ struct delayed_work timestamp;
+
u32 feature;
unsigned int hf_size_tx;
@@ -268,8 +314,6 @@ struct gs_can {
struct usb_anchor tx_submitted;
atomic_t active_tx_urbs;
- void *rxbuf[GS_MAX_RX_URBS];
- dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
};
/* usb interface struct */
@@ -328,27 +372,109 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *dev)
+{
+ struct gs_device_mode dm = {
+ .mode = GS_CAN_MODE_RESET,
+ };
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+}
+
+static inline int gs_usb_get_timestamp(const struct gs_can *dev,
+ u32 *timestamp_p)
{
- struct gs_device_mode *dm;
- struct usb_interface *intf = gsdev->iface;
+ __le32 timestamp;
int rc;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_TIMESTAMP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &timestamp, sizeof(timestamp),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
- dm->mode = GS_CAN_MODE_RESET;
+ *timestamp_p = le32_to_cpu(timestamp);
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel, 0, dm, sizeof(*dm), 1000);
+ return 0;
+}
- kfree(dm);
+static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+{
+ struct gs_can *dev = container_of(cc, struct gs_can, cc);
+ u32 timestamp = 0;
+ int err;
+
+ lockdep_assert_held(&dev->tc_lock);
+
+ /* drop lock for synchronous USB transfer */
+ spin_unlock_bh(&dev->tc_lock);
+ err = gs_usb_get_timestamp(dev, &timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ if (err)
+ netdev_err(dev->netdev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
- return rc;
+static void gs_usb_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gs_can *dev;
+
+ dev = container_of(delayed_work, struct gs_can, timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_read(&dev->tc);
+ spin_unlock_bh(&dev->tc_lock);
+
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_skb_set_timestamp(struct gs_can *dev,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ spin_lock_bh(&dev->tc_lock);
+ ns = timecounter_cyc2time(&dev->tc, timestamp);
+ spin_unlock_bh(&dev->tc_lock);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void gs_usb_timestamp_init(struct gs_can *dev)
+{
+ struct cyclecounter *cc = &dev->cc;
+
+ cc->read = gs_usb_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
+ cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
+
+ spin_lock_init(&dev->tc_lock);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns());
+ spin_unlock_bh(&dev->tc_lock);
+
+ INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work);
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_timestamp_stop(struct gs_can *dev)
+{
+ cancel_delayed_work_sync(&dev->timestamp);
}
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
@@ -376,6 +502,24 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
+static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ u32 timestamp;
+
+ if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP))
+ return;
+
+ if (hf->flags & GS_CAN_FLAG_FD)
+ timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us);
+ else
+ timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us);
+
+ gs_usb_skb_set_timestamp(dev, skb, timestamp);
+
+ return;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
@@ -443,6 +587,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_update_state(dev, cf);
}
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -465,6 +611,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
+ skb = dev->can.echo_skb[hf->echo_id];
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
NULL);
@@ -491,7 +640,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, usbcan->udev,
- usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+ usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN),
hf, dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, usbcan);
@@ -511,72 +660,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
- struct usb_interface *intf = dev->iface;
- int rc;
- struct gs_device_bittiming *dbt;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
/* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BITTIMING,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dbt, sizeof(*dbt), 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
- rc);
-
- return (rc > 0) ? 0 : rc;
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.data_bittiming;
- struct usb_interface *intf = dev->iface;
- struct gs_device_bittiming *dbt;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
u8 request = GS_USB_BREQ_DATA_BITTIMING;
- int rc;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
- /* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dbt, sizeof(*dbt), 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent,
- "Couldn't set data bittimings (err=%d)", rc);
-
- return (rc > 0) ? 0 : rc;
+ /* request data bit timings */
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -587,9 +708,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
-
- usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -618,8 +736,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC,
- &urb->transfer_dma);
+ hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
goto nomem_hf;
@@ -659,11 +776,11 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
}
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+ usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
hf, dev->hf_size_tx,
gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0);
@@ -678,8 +795,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -699,8 +814,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ kfree(hf);
nomem_hf:
usb_free_urb(urb);
@@ -715,11 +829,13 @@ static int gs_can_open(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- int rc, i;
- struct gs_device_mode *dm;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_START),
+ };
struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
+ int rc, i;
rc = open_candev(netdev);
if (rc)
@@ -744,7 +860,6 @@ static int gs_can_open(struct net_device *netdev)
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
- dma_addr_t buf_dma;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -752,10 +867,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* alloc rx buffer */
- buf = usb_alloc_coherent(dev->udev,
- dev->parent->hf_size_rx,
- GFP_KERNEL,
- &buf_dma);
+ buf = kmalloc(dev->parent->hf_size_rx,
+ GFP_KERNEL);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -763,17 +876,15 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
}
- urb->transfer_dma = buf_dma;
-
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
usb_rcvbulkpipe(dev->udev,
- GSUSB_ENDPOINT_IN),
+ GS_USB_ENDPOINT_IN),
buf,
dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -786,17 +897,10 @@ static int gs_can_open(struct net_device *netdev)
"usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- buf,
- buf_dma);
usb_free_urb(urb);
break;
}
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
/* Drop reference,
* USB core will take care of freeing it
*/
@@ -804,10 +908,6 @@ static int gs_can_open(struct net_device *netdev)
}
}
- dm = kmalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
-
/* flags */
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
@@ -823,25 +923,30 @@ static int gs_can_open(struct net_device *netdev)
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ /* if hardware supports timestamps, enable it */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+
+ /* start polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_init(dev);
+
/* finally start device */
- dm->mode = cpu_to_le32(GS_CAN_MODE_START);
- dm->flags = cpu_to_le32(flags);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dm, sizeof(*dm), 1000);
-
- if (rc < 0) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm.flags = cpu_to_le32(flags);
+ rc = usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+ if (rc) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
- kfree(dm);
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
- kfree(dm);
-
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -854,19 +959,17 @@ static int gs_can_close(struct net_device *netdev)
int rc;
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- unsigned int i;
netif_stop_queue(netdev);
+ /* stop polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+
/* Stop polling */
parent->active_channels--;
if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
- for (i = 0; i < GS_MAX_RX_URBS; i++)
- usb_free_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- dev->rxbuf[i],
- dev->rxbuf_dma[i]);
}
/* Stop sending URBs */
@@ -890,52 +993,57 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
+static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_eth_ioctl_hwts(netdev, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
.ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = gs_can_eth_ioctl,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode *imode;
- int rc;
-
- imode = kmalloc(sizeof(*imode), GFP_KERNEL);
-
- if (!imode)
- return -ENOMEM;
+ struct gs_identify_mode imode;
if (do_identify)
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
-
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, imode, sizeof(*imode), 100);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
- kfree(imode);
-
- return (rc > 0) ? 0 : rc;
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &imode, sizeof(imode), 100,
+ GFP_KERNEL);
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -944,9 +1052,67 @@ static int gs_usb_set_phys_id(struct net_device *dev,
return rc;
}
+static int gs_usb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+
+ /* report if device supports HW timestamps */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_ethtool_op_get_ts_info_hwts(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gs_usb_get_ts_info,
+};
+
+static int gs_usb_get_termination(struct net_device *netdev, u16 *term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+ int rc;
+
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_GET_TERMINATION,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON))
+ *term = GS_USB_TERMINATION_ENABLED;
+ else
+ *term = GS_USB_TERMINATION_DISABLED;
+
+ return 0;
+}
+
+static int gs_usb_set_termination(struct net_device *netdev, u16 term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+
+ if (term == GS_USB_TERMINATION_ENABLED)
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON);
+ else
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF);
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_SET_TERMINATION,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+}
+
+static const u16 gs_usb_termination_const[] = {
+ GS_USB_TERMINATION_DISABLED,
+ GS_USB_TERMINATION_ENABLED
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -956,26 +1122,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct gs_can *dev;
struct net_device *netdev;
int rc;
- struct gs_device_bt_const *bt_const;
- struct gs_device_bt_const_extended *bt_const_extended;
+ struct gs_device_bt_const_extended bt_const_extended;
+ struct gs_device_bt_const bt_const;
u32 feature;
- bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
- if (!bt_const)
- return ERR_PTR(-ENOMEM);
-
/* fetch bit timing constants */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel, 0, bt_const, sizeof(*bt_const), 1000);
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const, sizeof(bt_const), 1000,
+ GFP_KERNEL);
- if (rc < 0) {
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const);
+ "Couldn't get bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
return ERR_PTR(rc);
}
@@ -983,7 +1144,6 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Couldn't allocate candev\n");
- kfree(bt_const);
return ERR_PTR(-ENOMEM);
}
@@ -996,14 +1156,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
- dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
- dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
- dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
- dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
- dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
- dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
- dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
- dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@@ -1020,13 +1180,13 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
- feature = le32_to_cpu(bt_const->feature);
+ feature = le32_to_cpu(bt_const.feature);
dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -1049,6 +1209,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
+ rc = gs_usb_get_termination(netdev, &dev->can.termination);
+ if (rc) {
+ dev->feature &= ~GS_CAN_FEATURE_TERMINATION;
+
+ dev_info(&intf->dev,
+ "Disabling termination support for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ } else {
+ dev->can.termination_const = gs_usb_termination_const;
+ dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const);
+ dev->can.do_set_termination = gs_usb_set_termination;
+ }
+ }
+
/* The CANtact Pro from LinkLayer Labs is based on the
* LPC54616 µC, which is affected by the NXP LPC USB transfer
* erratum. However, the current firmware (version 2) doesn't
@@ -1063,8 +1238,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
* GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
* issue.
*/
- if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) &&
- dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) &&
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) &&
dev->udev->manufacturer && dev->udev->product &&
!strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
!strcmp(dev->udev->product, "CANtact Pro") &&
@@ -1072,61 +1247,57 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
-
- kfree(bt_const);
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
/* fetch extended bit timing constants if device has feature
* GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
*/
if (feature & GS_CAN_FEATURE_FD &&
feature & GS_CAN_FEATURE_BT_CONST_EXT) {
- bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL);
- if (!bt_const_extended)
- return ERR_PTR(-ENOMEM);
-
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST_EXT,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel, 0, bt_const_extended,
- sizeof(*bt_const_extended),
- 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const_extended,
+ sizeof(bt_const_extended),
+ 1000, GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get extended bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const_extended);
- return ERR_PTR(rc);
+ "Couldn't get extended bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
- dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
- dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
- dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
- dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max);
- dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max);
- dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min);
- dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max);
- dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
dev->can.data_bittiming_const = &dev->data_bt_const;
-
- kfree(bt_const_extended);
}
SET_NETDEV_DEV(netdev, &intf->dev);
rc = register_candev(dev->netdev);
if (rc) {
- free_candev(dev->netdev);
- dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
- return ERR_PTR(rc);
+ dev_err(&intf->dev,
+ "Couldn't register candev for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
return dev;
+
+ out_free_candev:
+ free_candev(dev->netdev);
+ return ERR_PTR(rc);
}
static void gs_destroy_candev(struct gs_can *dev)
@@ -1142,76 +1313,61 @@ static int gs_usb_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct gs_host_frame *hf;
struct gs_usb *dev;
- int rc = -ENOMEM;
+ struct gs_host_config hconf = {
+ .byte_order = cpu_to_le32(0x0000beef),
+ };
+ struct gs_device_config dconf;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = cpu_to_le32(0x0000beef);
+ int rc;
/* send host config */
- rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- GS_USB_BREQ_HOST_FORMAT,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1, intf->cur_altsetting->desc.bInterfaceNumber,
- hconf, sizeof(*hconf), 1000);
-
- kfree(hconf);
-
- if (rc < 0) {
+ rc = usb_control_msg_send(udev, 0,
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &hconf, sizeof(hconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
- rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- GS_USB_BREQ_DEVICE_CONFIG,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1, intf->cur_altsetting->desc.bInterfaceNumber,
- dconf, sizeof(*dconf), 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(udev, 0,
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &dconf, sizeof(dconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
- kfree(dconf);
return rc;
}
- icount = dconf->icount + 1;
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
- kfree(dconf);
return -EINVAL;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- kfree(dconf);
+ if (!dev)
return -ENOMEM;
- }
init_usb_anchor(&dev->rx_submitted);
- /* default to classic CAN, switch to CAN-FD if at least one of
- * our channels support CAN-FD.
- */
- dev->hf_size_rx = struct_size(hf, classic_can, 1);
usb_set_intfdata(intf, dev);
dev->udev = udev;
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf, dconf);
+ unsigned int hf_size_rx = 0;
+
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@@ -1222,18 +1378,28 @@ static int gs_usb_probe(struct usb_interface *intf,
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dconf);
kfree(dev);
return rc;
}
dev->canch[i]->parent = dev;
- if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD)
- dev->hf_size_rx = struct_size(hf, canfd, 1);
+ /* set RX packet size based on FD and if hardware
+ * timestamps are supported.
+ */
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, canfd_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, classic_can_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, classic_can, 1);
+ }
+ dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx);
}
- kfree(dconf);
-
return 0;
}
@@ -1258,8 +1424,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
- USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID,
+ USB_GS_USB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index dd65c101bfb8..6871d474dabf 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -534,7 +534,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -573,7 +573,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
@@ -694,7 +694,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -735,7 +735,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
int err;
int i;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1394,7 +1394,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
u32 kcan_id;
u32 kcan_header;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1468,7 +1468,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1533,7 +1533,7 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
int sjw = bt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1567,7 +1567,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
int sjw = dbt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1711,7 +1711,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1851,7 +1851,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -EINVAL;
}
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 8c9d53f6e24c..225697d70a9a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -962,7 +962,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 36b6310a2e5b..285635c23443 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -71,11 +71,10 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += can_skb_get_data_len(skb);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
@@ -86,14 +85,14 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
- int loop, len;
+ unsigned int len;
+ int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
stats->tx_packets++;
stats->tx_bytes += len;
@@ -137,7 +136,8 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index cffd107d8b28..26a472d2ea58 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -38,10 +38,9 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
@@ -70,7 +69,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
@@ -132,7 +131,8 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index d8ae0e8af2a0..07507b4820d7 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -76,7 +76,7 @@ config NET_DSA_SMSC_LAN9303
select NET_DSA_TAG_LAN9303
select REGMAP
help
- This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
config NET_DSA_SMSC_LAN9303_I2C
@@ -90,11 +90,11 @@ config NET_DSA_SMSC_LAN9303_I2C
for I2C managed mode.
config NET_DSA_SMSC_LAN9303_MDIO
- tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
depends on VLAN_8021Q || VLAN_8021Q=n
help
- Enable access functions if the SMSC/Microchip LAN9303 is configured
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 48cf344750ff..59cdfc51ce06 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -972,7 +972,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a7aeb3c132c9..6ddc03b58b28 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -356,8 +356,6 @@ static void b53_mdio_remove(struct mdio_device *mdiodev)
return;
b53_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..e968322dfbf0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,8 +316,6 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index da0b889880f6..bcb44034404d 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -667,8 +667,6 @@ static int b53_srab_remove(struct platform_device *pdev)
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index be0edfa093d0..cde253d27bd0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -94,6 +94,24 @@ static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -141,7 +159,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
- u32 reg, offset;
+ u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -167,21 +185,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM4908_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
- else
- reg &= ~GMII_SPEED_UP_2G;
- core_writel(priv, reg, offset);
-
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
@@ -812,17 +815,10 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
if (priv->wol_ports_mask & BIT(port))
return;
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- reg = core_readl(priv, offset);
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
- }
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
bcm_sf2_sw_mac_link_set(ds, port, interface, false);
}
@@ -836,56 +832,56 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ u32 reg_rgmii_ctrl = 0;
+ u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl = 0;
- u32 reg, offset;
+ offset = bcm_sf2_port_override_offset(priv, port);
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- if (interface == PHY_INTERFACE_MODE_RGMII ||
- interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- interface == PHY_INTERFACE_MODE_MII ||
- interface == PHY_INTERFACE_MODE_REVMII) {
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
- reg = reg_readl(priv, reg_rgmii_ctrl);
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
- if (tx_pause)
- reg |= TX_PAUSE_EN;
- if (rx_pause)
- reg |= RX_PAUSE_EN;
-
- reg_writel(priv, reg, reg_rgmii_ctrl);
- }
-
- reg = SW_OVERRIDE | LINK_STS;
- switch (speed) {
- case SPEED_1000:
- reg |= SPDSTS_1000 << SPEED_SHIFT;
- break;
- case SPEED_100:
- reg |= SPDSTS_100 << SPEED_SHIFT;
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
if (tx_pause)
- reg |= TXFLOW_CNTL;
+ reg |= TX_PAUSE_EN;
if (rx_pause)
- reg |= RXFLOW_CNTL;
+ reg |= RX_PAUSE_EN;
- core_writel(priv, reg, offset);
+ reg_writel(priv, reg, reg_rgmii_ctrl);
}
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
if (mode == MLO_AN_PHY && phydev)
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
@@ -987,7 +983,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -1011,7 +1007,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
@@ -1555,8 +1551,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index edbe5e7f1cb6..c4010b7bf089 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1296,7 +1296,7 @@ void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
"CFP%03d_%sCntr",
i, bcm_sf2_cfp_stats[j].name);
iter = (i - 1) * s + j;
- strlcpy(data + iter * ETH_GSTRING_LEN,
+ strscpy(data + iter * ETH_GSTRING_LEN,
buf, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 263e41191c29..b9107fe40023 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -351,8 +351,6 @@ static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(ds);
dev_put(ps->netdev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 01f90994dedd..951f7935c872 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -128,6 +128,16 @@ static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
hellcreek_write(hellcreek, val, HR_PSEL);
}
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
@@ -288,7 +298,7 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
counter->name, ETH_GSTRING_LEN);
}
}
@@ -1537,6 +1547,45 @@ out:
return ret;
}
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
@@ -1720,7 +1769,10 @@ static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
- /* Then select port */
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
@@ -1772,7 +1824,10 @@ static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
hellcreek_port->current_schedule = NULL;
}
- /* Then select port */
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
@@ -1809,22 +1864,43 @@ static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
return true;
}
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
struct hellcreek *hellcreek = ds->priv;
- if (type != TC_SETUP_QDISC_TAPRIO)
- return -EOPNOTSUPP;
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
- if (!hellcreek_validate_schedule(hellcreek, taprio))
- return -EOPNOTSUPP;
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
- if (taprio->enable)
- return hellcreek_port_set_schedule(ds, port, taprio);
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
- return hellcreek_port_del_schedule(ds, port);
+ return hellcreek_port_del_schedule(ds, port);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
@@ -1996,7 +2072,6 @@ static int hellcreek_remove(struct platform_device *pdev)
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9e303b8ab13c..4a678f7d61ae 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -37,6 +37,7 @@
#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
/* Register definitions */
#define HR_MODID_C (0 * 2)
@@ -72,6 +73,12 @@
#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
#define HR_CSEL (0x8d * 2)
#define HR_CSEL_SHIFT 0
#define HR_CSEL_MASK GENMASK(7, 0)
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index e03ff1f267bb..438e46af03e9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -22,6 +22,10 @@
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
@@ -32,6 +36,7 @@
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
@@ -851,15 +856,12 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
- if (!chip->reset_gpio) {
- dev_dbg(chip->dev,
- "hint: maybe failed due to missing reset GPIO\n");
- }
return ret;
}
- if ((reg >> 16) != LAN9303_CHIP_ID) {
- dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
@@ -875,7 +877,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
- dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
@@ -1090,7 +1092,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
if (!dsa_port_is_user(dp))
return 0;
- vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+ vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
return lan9303_enable_processing_port(chip, port);
}
@@ -1103,7 +1105,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return;
- vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
@@ -1349,6 +1351,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
+ u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
@@ -1359,6 +1362,19 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
lan9303_handle_reset(chip);
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 8ca4713310fa..7d746cd9ca1b 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -65,18 +65,14 @@ static int lan9303_i2c_probe(struct i2c_client *client,
return 0;
}
-static int lan9303_i2c_remove(struct i2c_client *client)
+static void lan9303_i2c_remove(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return 0;
+ return;
lan9303_remove(&sw_dev->chip);
-
- i2c_set_clientdata(client, NULL);
-
- return 0;
}
static void lan9303_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index bbb7032409ba..4f33369a2de5 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,8 +138,6 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
@@ -158,6 +156,7 @@ static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index e531b93f3cb2..05ecaa007ab1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1989,11 +1989,9 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset)) {
- if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(gphy_fw->reset);
- }
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
return gswip_gphy_fw_load(priv, gphy_fw);
}
@@ -2231,8 +2229,6 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 42c50cc4d853..8582b4b67d98 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -17,8 +17,8 @@ u32 ksz8_get_port_addr(int port, int offset);
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
-void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
-void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c79a5128235f..bd3b133e7085 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -552,7 +552,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 restart, speed, ctrl, link;
int processed = true;
@@ -560,14 +560,24 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
if (restart & PORT_PHY_LOOPBACK)
data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
@@ -597,7 +607,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
@@ -618,7 +631,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data = KSZ8795_ID_LO;
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= ADVERTISE_PAUSE_CAP;
@@ -632,7 +648,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= ADVERTISE_10HALF;
break;
case MII_LPA:
- ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= LPA_PAUSE_CAP;
@@ -648,8 +667,14 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= LPA_LPACK;
break;
case PHY_REG_LINK_MD:
- ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
- ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
if (val1 & PORT_START_CABLE_DIAG)
data |= PHY_START_CABLE_DIAG;
@@ -664,7 +689,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
break;
case PHY_REG_PHY_CTRL:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
if (link & PORT_MDIX_STATUS)
data |= KSZ886X_CTRL_MDIX_STAT;
break;
@@ -674,13 +702,16 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
}
if (processed)
*val = data;
+
+ return 0;
}
-void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
u8 restart, speed, ctrl, data;
const u16 *regs;
u8 p = phy;
+ int ret;
regs = dev->info->regs;
@@ -690,15 +721,26 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
/* Do not support PHY reset function. */
if (val & BMCR_RESET)
break;
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
data = speed;
if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
- if (data != speed)
- ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+
+ if (data != speed) {
+ ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
if ((val & BMCR_ANENABLE))
@@ -724,9 +766,17 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
data = restart;
if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
@@ -756,11 +806,19 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
- if (data != restart)
- ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
+
+ if (data != restart) {
+ ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
+ data);
+ if (ret)
+ return ret;
+ }
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
@@ -777,8 +835,12 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_10BT_FD;
if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
break;
case PHY_REG_LINK_MD:
if (val & PHY_START_CABLE_DIAG)
@@ -787,6 +849,8 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
default:
break;
}
+
+ return 0;
}
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
@@ -1187,7 +1251,6 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
if (i == dev->phy_port_cnt)
break;
p->on = 1;
- p->phy = 1;
}
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5247fdfb964d..ddb40838181e 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -180,8 +180,6 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 4b14d80d27ed..a6a0321a8931 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -193,6 +193,11 @@ int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ return 0;
+
data8 = SW_ENABLE_REFCLKO;
if (dev->synclko_disable)
data8 = 0;
@@ -264,9 +269,20 @@ void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
mutex_unlock(&mib->cnt_mutex);
}
-void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
+{
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
+}
+
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
u16 val = 0xffff;
+ int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
@@ -274,7 +290,7 @@ void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
- if (addr >= dev->phy_port_cnt) {
+ if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
@@ -307,23 +323,25 @@ void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
break;
}
} else {
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
+
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
}
*data = val;
+
+ return 0;
}
-void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
/* No real PHY after this. */
- if (addr >= dev->phy_port_cnt)
- return;
+ if (!dev->info->internal_phy[addr])
+ return 0;
- /* No gigabit support. Do not write to this register. */
- if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return;
-
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
@@ -613,6 +631,9 @@ int ksz9477_fdb_dump(struct ksz_device *dev, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
@@ -866,7 +887,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
phy_interface_t interface;
bool gbit;
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
return PHY_INTERFACE_MODE_NA;
gbit = ksz_get_gbit(dev, port);
@@ -911,7 +932,7 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
/* Energy Efficient Ethernet (EEE) feature select must
* be manually disabled (except on KSZ8565 which is 100Mbit)
*/
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
/* Register settings are required to meet data sheet
@@ -938,10 +959,35 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
MAC_SYM_PAUSE;
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
}
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u8 value;
+ u8 data;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ if (ret < 0)
+ return ret;
+
+ data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+
+ return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+}
+
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
@@ -973,7 +1019,7 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
- if (port < dev->phy_port_cnt) {
+ if (dev->info->internal_phy[port]) {
/* do not force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
@@ -996,7 +1042,7 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
@@ -1048,25 +1094,13 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->on = 1;
}
}
for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
- p = &dev->ports[i];
-
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- p->on = 1;
- if (i < dev->phy_port_cnt)
- p->phy = 1;
- if (dev->chip_id == 0x00947700 && i == 6) {
- p->sgmii = 1;
-
- /* SGMII PHY detection code is not implemented yet. */
- p->phy = 0;
- }
}
}
@@ -1155,29 +1189,6 @@ int ksz9477_switch_init(struct ksz_device *dev)
if (ret)
return ret;
- ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
- if (ret)
- return ret;
-
- /* Number of ports can be reduced depending on chip. */
- dev->phy_port_cnt = 5;
-
- /* Default capability is gigabit capable. */
- dev->features = GBIT_SUPPORT;
-
- if (dev->chip_id == KSZ9893_CHIP_ID) {
- dev->features |= IS_9893;
-
- /* Chip does not support gigabit. */
- if (data8 & SW_QW_ABLE)
- dev->features &= ~GBIT_SUPPORT;
- dev->phy_port_cnt = 2;
- } else {
- /* Chip does not support gigabit. */
- if (!(data8 & SW_GIGABIT_ABLE))
- dev->features &= ~GBIT_SUPPORT;
- }
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index cd278b307b3c..00862c4cfb7f 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -16,8 +16,9 @@ u32 ksz9477_get_port_addr(int port, int offset);
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
-void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
-void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 99966514d444..3763930dc6fc 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -52,16 +52,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int ksz9477_i2c_remove(struct i2c_client *i2c)
+static void ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (dev)
ksz_switch_remove(dev);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
@@ -92,6 +88,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.data = &ksz_switch_chips[KSZ9477]
},
{
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
},
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index ddf99d1e4bbd..53c68d286dd3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -189,8 +189,9 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
+#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -225,6 +226,7 @@
#define SW_PRIO_LOWEST_DA_SA 3
#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
#define REG_SW_LUE_INT_STATUS 0x0314
#define REG_SW_LUE_INT_ENABLE 0x0315
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index ed7d137cba99..d612181b3226 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -14,6 +14,9 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_mdio.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
@@ -170,12 +173,20 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.exit = ksz8_switch_exit,
};
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
.r_phy = ksz9477_r_phy,
.w_phy = ksz9477_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -196,6 +207,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
@@ -205,10 +217,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
static const struct ksz_dev_ops lan937x_dev_ops = {
.setup = lan937x_setup,
+ .teardown = lan937x_teardown,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
.r_phy = lan937x_r_phy,
.w_phy = lan937x_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -230,6 +244,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
@@ -412,7 +427,636 @@ static const u8 lan937x_shifts[] = {
[ALU_STAT_INDEX] = 8,
};
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7203),
+ regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6122, 0x6127),
+ regmap_reg_range(0x612a, 0x612b),
+ regmap_reg_range(0x6136, 0x6139),
+ regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
[KSZ8795] = {
.chip_id = KSZ8795_CHIP_ID,
.dev_name = "KSZ8795",
@@ -527,6 +1171,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -545,6 +1190,41 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, false},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
},
[KSZ9897] = {
@@ -555,6 +1235,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -573,6 +1254,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
},
[KSZ9893] = {
@@ -583,6 +1265,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -596,6 +1279,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
},
[KSZ9567] = {
@@ -606,6 +1290,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -624,6 +1309,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
},
[LAN9370] = {
@@ -634,6 +1320,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -657,6 +1344,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -680,6 +1368,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -707,6 +1396,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -734,6 +1424,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -803,9 +1494,15 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->supports_rgmii[port])
phy_interface_set_rgmii(config->supported_interfaces);
- if (dev->info->internal_phy[port])
+ if (dev->info->internal_phy[port]) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
@@ -959,9 +1656,281 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ ds->slave_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+}
+
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret) {
+ of_node_put(mdio_np);
+ return ret;
+ }
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (pirq->irq_num < 0)
+ return pirq->irq_num;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+ struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1001,11 +1970,63 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto out_girq;
+ }
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_pirq;
+ }
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
return 0;
+
+out_pirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+out_girq:
+ if (dev->irq > 0)
+ ksz_irq_free(&dev->girq);
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
}
static void port_r_cnt(struct ksz_device *dev, int port)
@@ -1089,8 +2110,11 @@ static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
- dev->dev_ops->r_phy(dev, addr, reg, &val);
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
return val;
}
@@ -1098,8 +2122,11 @@ static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
+ int ret;
- dev->dev_ops->w_phy(dev, addr, reg, val);
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
return 0;
}
@@ -1188,6 +2215,16 @@ static void ksz_port_fast_age(struct dsa_switch *ds, int port)
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->set_ageing_time(dev, msecs);
+}
+
static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
@@ -1277,6 +2314,8 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ p = &dev->ports[port];
+
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
@@ -1286,9 +2325,13 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
@@ -1300,12 +2343,38 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
- p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -1319,10 +2388,12 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9477;
@@ -1437,7 +2508,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
case PHY_INTERFACE_MODE_RGMII_RXID:
data8 |= bitval[P_RGMII_SEL];
/* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -1609,13 +2681,13 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct ksz_device *dev = ds->priv;
struct ksz_port *p;
p = &dev->ports[port];
@@ -1629,6 +2701,15 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
ksz_port_set_xmii_speed(dev, port, speed);
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
if (dev->dev_ops->phylink_mac_link_up)
dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
@@ -1638,7 +2719,7 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
static int ksz_switch_detect(struct ksz_device *dev)
{
- u8 id1, id2;
+ u8 id1, id2, id4;
u16 id16;
u32 id32;
int ret;
@@ -1683,8 +2764,8 @@ static int ksz_switch_detect(struct ksz_device *dev)
switch (id32) {
case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
- case KSZ9893_CHIP_ID:
case KSZ9567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
@@ -1693,6 +2774,18 @@ static int ksz_switch_detect(struct ksz_device *dev)
case LAN9374_CHIP_ID:
dev->chip_id = id32;
break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
+ if (ret)
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
+ break;
default:
dev_err(dev->dev,
"unsupported switch detected %x)\n", id32);
@@ -1706,6 +2799,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.get_phy_flags = ksz_get_phy_flags,
.setup = ksz_setup,
+ .teardown = ksz_teardown,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
@@ -1713,12 +2807,15 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phylink_mac_link_up = ksz_phylink_mac_link_up,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
+ .set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
.port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz_port_vlan_filtering,
.port_vlan_add = ksz_port_vlan_add,
@@ -1868,6 +2965,9 @@ int ksz_switch_register(struct ksz_device *dev)
GFP_KERNEL);
if (!dev->ports[i].mib.counters)
return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
}
/* set the real number of ports */
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 764ada3a0f42..9cfa179575ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -13,9 +13,12 @@
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
+#include <linux/irq.h>
#define KSZ_MAX_NUM_PORTS 8
+struct ksz_device;
+
struct vlan_table {
u32 table[3];
};
@@ -42,6 +45,7 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ u8 port_nirqs;
const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
@@ -61,17 +65,30 @@ struct ksz_chip_data {
bool supports_rmii[KSZ_MAX_NUM_PORTS];
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
};
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
@@ -81,6 +98,9 @@ struct ksz_port {
u16 max_frame;
u32 rgmii_tx_val;
u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ struct ksz_irq pirq;
+ u8 num;
};
struct ksz_device {
@@ -98,6 +118,7 @@ struct ksz_device {
struct regmap *regmap[3];
void *priv;
+ int irq;
struct gpio_desc *reset_gpio; /* Optional reset GPIO */
@@ -117,17 +138,20 @@ struct ksz_device {
unsigned long mib_read_interval;
u16 mirror_rx;
u16 mirror_tx;
- u32 features; /* chip specific features */
u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
};
/* List of supported models */
enum ksz_model {
+ KSZ8563,
KSZ8795,
KSZ8794,
KSZ8765,
KSZ8830,
KSZ9477,
+ KSZ9896,
KSZ9897,
KSZ9893,
KSZ9567,
@@ -139,11 +163,13 @@ enum ksz_model {
};
enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
KSZ8795_CHIP_ID = 0x8795,
KSZ8794_CHIP_ID = 0x8794,
KSZ8765_CHIP_ID = 0x8765,
KSZ8830_CHIP_ID = 0x8830,
KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
KSZ9897_CHIP_ID = 0x00989700,
KSZ9893_CHIP_ID = 0x00989300,
KSZ9567_CHIP_ID = 0x00956700,
@@ -253,13 +279,15 @@ struct alu_struct {
struct ksz_dev_ops {
int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
@@ -329,6 +357,10 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[0], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -338,6 +370,10 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[1], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -347,6 +383,10 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[2], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -357,7 +397,10 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
int ret;
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret)
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
*val = (u64)value[0] << 32 | value[1];
return ret;
@@ -365,17 +408,38 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- return regmap_write(dev->regmap[0], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[0], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- return regmap_write(dev->regmap[1], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[1], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- return regmap_write(dev->regmap[2], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[2], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
@@ -390,40 +454,42 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
return regmap_bulk_write(dev->regmap[2], reg, val, 2);
}
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
{
- ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
{
- ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
{
- ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
{
- ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
{
- ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
{
- ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
@@ -482,6 +548,10 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_REV_ID_M GENMASK(7, 4)
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROT_RATE 10
@@ -496,10 +566,6 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_START 0x01
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define IS_9893 BIT(2)
-
/* xMII configuration */
#define P_MII_DUPLEX_M BIT(6)
#define P_MII_100MBIT_M BIT(4)
@@ -510,6 +576,15 @@ static inline int is_lan937x(struct ksz_device *dev)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 05bd089795f8..1b6ab891b986 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -66,7 +66,10 @@ static int ksz_spi_probe(struct spi_device *spi)
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
rc = regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
@@ -85,6 +88,8 @@ static int ksz_spi_probe(struct spi_device *spi)
if (ret)
return ret;
+ dev->irq = spi->irq;
+
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
@@ -102,8 +107,6 @@ static void ksz_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
}
static void ksz_spi_shutdown(struct spi_device *spi)
@@ -147,6 +150,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ9477]
},
{
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
},
@@ -160,7 +167,7 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
@@ -197,6 +204,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8863" },
{ "ksz8873" },
{ "ksz9477" },
+ { "ksz9896" },
{ "ksz9897" },
{ "ksz9893" },
{ "ksz9563" },
@@ -226,6 +234,7 @@ static struct spi_driver ksz_spi_driver = {
module_spi_driver(ksz_spi_driver);
MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9896");
MODULE_ALIAS("spi:ksz9897");
MODULE_ALIAS("spi:ksz9893");
MODULE_ALIAS("spi:ksz9563");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 4e0b1dccec27..8e9e66d6728d 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -8,14 +8,16 @@
int lan937x_reset_switch(struct ksz_device *dev);
int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
void lan937x_config_cpu_port(struct dsa_switch *ds);
int lan937x_switch_init(struct ksz_device *dev);
void lan937x_switch_exit(struct ksz_device *dev);
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index daedd2bf20c1..7e4f307a0387 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -7,7 +7,6 @@
#include <linux/iopoll.h>
#include <linux/phy.h>
#include <linux/of_net.h>
-#include <linux/of_mdio.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/math.h>
@@ -128,81 +127,14 @@ static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
}
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- lan937x_internal_phy_read(dev, addr, reg, data);
+ return lan937x_internal_phy_read(dev, addr, reg, data);
}
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- lan937x_internal_phy_write(dev, addr, reg, val);
-}
-
-static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct ksz_device *dev = bus->priv;
- u16 val;
- int ret;
-
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
- ret = lan937x_internal_phy_read(dev, addr, regnum, &val);
- if (ret < 0)
- return ret;
-
- return val;
-}
-
-static int lan937x_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct ksz_device *dev = bus->priv;
-
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
- return lan937x_internal_phy_write(dev, addr, regnum, val);
-}
-
-static int lan937x_mdio_register(struct ksz_device *dev)
-{
- struct dsa_switch *ds = dev->ds;
- struct device_node *mdio_np;
- struct mii_bus *bus;
- int ret;
-
- mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
- if (!mdio_np) {
- dev_err(ds->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- bus = devm_mdiobus_alloc(ds->dev);
- if (!bus) {
- of_node_put(mdio_np);
- return -ENOMEM;
- }
-
- bus->priv = dev;
- bus->read = lan937x_sw_mdio_read;
- bus->write = lan937x_sw_mdio_write;
- bus->name = "lan937x slave smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
- bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
-
- ds->slave_mii_bus = bus;
-
- ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
- if (ret) {
- dev_err(ds->dev, "unable to register MDIO bus %s\n",
- bus->id);
- }
-
- of_node_put(mdio_np);
-
- return ret;
+ return lan937x_internal_phy_write(dev, addr, reg, val);
}
int lan937x_reset_switch(struct ksz_device *dev)
@@ -225,6 +157,10 @@ int lan937x_reset_switch(struct ksz_device *dev)
if (ret < 0)
return ret;
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
if (ret < 0)
return ret;
@@ -244,10 +180,6 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
- /* disable frame check length field */
- lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
- false);
-
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
@@ -315,6 +247,23 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
return 0;
}
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u32 value;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
u16 reg, u8 val)
{
@@ -383,6 +332,13 @@ void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
}
}
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
int lan937x_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -395,12 +351,6 @@ int lan937x_setup(struct dsa_switch *ds)
return ret;
}
- ret = lan937x_mdio_register(dev);
- if (ret < 0) {
- dev_err(dev->dev, "failed to register the mdio");
- return ret;
- }
-
/* The VLAN aware is a global setting. Mixed vlan
* filterings are not supported.
*/
@@ -426,11 +376,9 @@ int lan937x_setup(struct dsa_switch *ds)
return 0;
}
-int lan937x_switch_init(struct ksz_device *dev)
+void lan937x_teardown(struct dsa_switch *ds)
{
- dev->port_mask = (1 << dev->info->port_cnt) - 1;
- return 0;
}
void lan937x_switch_exit(struct ksz_device *dev)
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index ba4adaddb3ec..5bc16a4c4441 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -62,6 +62,12 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
#define REG_SW_MAC_CTRL_0 0x0330
#define SW_NEW_BACKOFF BIT(7)
#define SW_PAUSE_UNH_MODE BIT(1)
@@ -118,6 +124,18 @@
/* Port Registers */
/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
#define REG_PORT_CTRL_0 0x0020
#define PORT_MAC_LOOPBACK BIT(7)
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 835807911be0..e74c6b406172 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -506,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -592,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -2326,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -2699,9 +2708,6 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
return mt7531_sgmii_setup_mode_force(priv, port, interface);
default:
return -EINVAL;
@@ -2777,13 +2783,6 @@ unsupported:
return;
}
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
@@ -2887,8 +2886,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
@@ -2922,6 +2919,9 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port))
+ config->mac_capabilities |= MAC_2500FD;
+
/* This driver does not make use of the speed, duplex, pause or the
* advertisement in its mac_config, so it is safe to mark this driver
* as non-legacy.
@@ -2987,6 +2987,7 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE);
if (state->interface == PHY_INTERFACE_MODE_SGMII &&
(status & MT7531_SGMII_AN_ENABLE)) {
val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
@@ -3017,16 +3018,44 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
+static void
+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(val & MT7531_SGMII_LINK_STATUS);
+ if (!state->link)
+ return;
+
+ state->an_complete = state->link;
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+
+ state->duplex = DUPLEX_FULL;
+ state->pause = MLO_PAUSE_NONE;
+}
+
static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
int port = pcs_to_mt753x_pcs(pcs)->port;
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
mt7531_sgmii_pcs_get_state_an(priv, port, state);
- else
- state->link = false;
+ return;
+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) ||
+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ mt7531_sgmii_pcs_get_state_inband(priv, port, state);
+ return;
+ }
+
+ state->link = false;
}
static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
@@ -3067,6 +3096,8 @@ mt753x_setup(struct dsa_switch *ds)
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
+ if (mt753x_is_mac_port(i))
+ priv->pcs[i].pcs.poll = 1;
}
ret = priv->info->sw_setup(ds);
@@ -3300,8 +3331,6 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mt7530_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index e509af95c354..e8d966435350 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -373,6 +373,7 @@ enum mt7530_vlan_port_acc_frm {
#define MT7531_SGMII_LINK_STATUS BIT(18)
#define MT7531_SGMII_AN_ENABLE BIT(12)
#define MT7531_SGMII_AN_RESTART BIT(9)
+#define MT7531_SGMII_AN_COMPLETE BIT(21)
/* Register for SGMII PCS_SPPED_ABILITY */
#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..fdda62d6eb16 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
@@ -294,8 +297,6 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
return;
dsa_unregister_switch(ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 07e9a4da924c..2479be3a1e35 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -816,6 +816,14 @@ static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_10000FD;
}
}
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
}
static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
@@ -1128,7 +1136,7 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mv88e6xxx_atu_vtu_stats_strings[i],
ETH_GSTRING_LEN);
}
@@ -6585,14 +6593,17 @@ out:
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int members = 0;
- if (!mv88e6xxx_has_lag(chip))
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
+ }
if (!lag.id)
return false;
@@ -6601,14 +6612,20 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
/* Includes the port joining the LAG */
members++;
- if (members > 8)
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
return false;
+ }
/* We could potentially relax this to include active
* backup in the future.
*/
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
@@ -6761,12 +6778,13 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
/* DSA LAG IDs are one-based */
@@ -6819,12 +6837,13 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -7166,8 +7185,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 807aeaad9830..7536b8b0ad01 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -298,7 +298,7 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
-#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 90c55f23b7c9..5c4195c635b0 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -517,6 +517,12 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_RMII:
cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -634,6 +640,19 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index aadb0bd7c24f..dd3a18cc89dd 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -42,6 +42,25 @@ static struct net_device *felix_classify_db(struct dsa_db db)
}
}
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
@@ -422,6 +441,40 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
return BIT(ocelot->num_phys_ports);
}
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
+
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
+}
+
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
@@ -433,6 +486,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
@@ -445,6 +499,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
if (err)
return err;
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
dp->cpu_dp->index);
@@ -493,6 +550,9 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
dsa_tag_8021q_unregister(ds);
}
@@ -501,10 +561,24 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
return dsa_cpu_ports(ds);
}
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
+
+ return felix_update_trapping_destinations(ds, true);
+}
+
static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
@@ -667,6 +741,16 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
!!felix->host_flood_mc_mask, true);
}
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -855,11 +939,21 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
static int felix_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
- return ocelot_port_lag_join(ocelot, port, lag.dev, info);
+ return felix_port_change_master(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
@@ -869,7 +963,11 @@ static int felix_lag_leave(struct dsa_switch *ds, int port,
ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -1007,6 +1105,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -1028,6 +1147,55 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
}
}
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -1144,11 +1312,55 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
return err;
}
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
- struct resource res;
+ struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -1182,20 +1394,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
}
for (i = 0; i < TARGET_MAX; i++) {
- struct regmap *target;
-
- if (!felix->info->target_io_res[i].name)
- continue;
-
- memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map device memory space\n");
+ "Failed to map device memory space: %pe\n",
+ target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1212,7 +1415,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
- struct regmap *target;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -1224,16 +1426,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map memory space for port %d\n",
- port);
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1842,6 +2039,12 @@ const struct dsa_switch_ops felix_switch_ops = {
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
@@ -1851,6 +2054,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
@@ -1906,6 +2110,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index deb8dde1fc19..c9c29999c336 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -16,9 +16,13 @@
/* Platform-specific information */
struct felix_info {
- const struct resource *target_io_res;
- const struct resource *port_io_res;
- const struct resource *imdio_res;
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -56,8 +60,6 @@ struct felix_info {
void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
- struct regmap *(*init_regmap)(struct ocelot *ocelot,
- struct resource *res);
};
/* Methods for initializing the hardware resources specific to a tagging
@@ -71,6 +73,9 @@ struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -83,7 +88,6 @@ struct felix {
struct mii_bus *imdio;
struct phylink_pcs **pcs;
resource_size_t switch_base;
- resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index b4034b78c0ca..26a35ae322d1 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -22,6 +22,7 @@
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
@@ -274,27 +275,102 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
- REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -316,7 +392,6 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -402,100 +477,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9959_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static const struct resource vsc9959_imdio_res = {
- .start = 0x8030,
- .end = 0x8040,
- .name = "imdio",
-};
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
@@ -547,100 +565,8 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1020,9 +946,11 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
@@ -1038,10 +966,11 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return -ENOMEM;
}
- memcpy(&res, felix->info->imdio_res, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->imdio_base;
- res.end += felix->imdio_base;
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
@@ -1128,6 +1057,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
/* Extract shortest continuous gate open intervals in ns for each traffic class
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
* considered U64_MAX. If the gate is always closed, it is considered 0.
@@ -1189,6 +1135,73 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
min_gate_len[tc] = 0;
}
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
* values (the default value is 1518). Also, for traffic class windows smaller
@@ -1198,6 +1211,7 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct tc_taprio_qopt_offload *taprio;
u64 min_gate_len[OCELOT_NUM_TC];
int speed, picos_per_byte;
u64 needed_bit_time_ps;
@@ -1207,6 +1221,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
lockdep_assert_held(&ocelot->tas_lock);
+ taprio = ocelot_port->taprio;
+
val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
@@ -1243,17 +1259,23 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
"port %d: max frame size %d needs %llu ps at speed %d\n",
port, maxlen, needed_bit_time_ps, speed);
- vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
+
+ mutex_lock(&ocelot->fwd_domain_lock);
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
+ u64 remaining_gate_len_ps;
u32 max_sdu;
- if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
- min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
/* Setting QMAXSDU_CFG to 0 disables oversized frame
* dropping.
*/
- max_sdu = 0;
+ max_sdu = requested_max_sdu;
dev_dbg(ocelot->dev,
"port %d tc %d min gate len %llu"
", sending all frames\n",
@@ -1262,9 +1284,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
/* If traffic class doesn't support a full MTU sized
* frame, make sure to enable oversize frame dropping
* for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
*/
- max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
- picos_per_byte);
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
* special case where all packets are oversized.
* Any limit smaller than 64 octets accomplishes this
@@ -1278,6 +1306,10 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
*/
if (max_sdu > 20)
max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
dev_info(ocelot->dev,
"port %d tc %d min gate length %llu"
" ns not enough for max frame size %d at %d"
@@ -1287,47 +1319,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
max_sdu);
}
- /* ocelot_write_rix is a macro that concatenates
- * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
- * the writes to each traffic class
- */
- switch (tc) {
- case 0:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
- port);
- break;
- case 1:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
- port);
- break;
- case 2:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
- port);
- break;
- case 3:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
- port);
- break;
- case 4:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
- port);
- break;
- case 5:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
- port);
- break;
- case 6:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
- port);
- break;
- case 7:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
- port);
- break;
- }
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
}
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
@@ -1354,13 +1353,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->tas_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
- mutex_lock(&ocelot->tas_lock);
-
if (ocelot_port->taprio)
vsc9959_tas_guard_bands_update(ocelot, port);
@@ -1600,6 +1599,21 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
return 0;
}
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1607,6 +1621,8 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
@@ -1638,7 +1654,15 @@ struct felix_stream {
u32 ssid;
};
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
struct list_head list;
refcount_t refcount;
u32 index;
@@ -1653,13 +1677,6 @@ struct felix_stream_filter {
u32 maxsdu;
};
-struct felix_stream_filter_counters {
- u32 match;
- u32 not_pass_gate;
- u32 not_pass_sdu;
- u32 red;
-};
-
struct felix_stream_gate {
u32 index;
u8 enable;
@@ -2163,29 +2180,6 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
}
}
-static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
- struct felix_stream_filter_counters *counters)
-{
- mutex_lock(&ocelot->stats_lock);
-
- ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
- SYS_STAT_CFG_STAT_VIEW_M,
- SYS_STAT_CFG);
-
- counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
- counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
- counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
- counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
-
- /* Clear the PSFP counter. */
- ocelot_write(ocelot,
- SYS_STAT_CFG_STAT_VIEW(index) |
- SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
- SYS_STAT_CFG);
-
- mutex_unlock(&ocelot->stats_lock);
-}
-
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
struct flow_cls_offload *f)
{
@@ -2210,6 +2204,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
return ret;
}
+ mutex_lock(&psfp->lock);
+
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_GATE:
@@ -2251,6 +2247,7 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
sfi.maxsdu = a->police.mtu;
break;
default:
+ mutex_unlock(&psfp->lock);
return -EOPNOTSUPP;
}
}
@@ -2320,6 +2317,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
goto err;
}
+ mutex_unlock(&psfp->lock);
+
return 0;
err:
@@ -2329,6 +2328,8 @@ err:
if (sfi.fm_valid)
ocelot_vcap_policer_del(ocelot, sfi.fmid);
+ mutex_unlock(&psfp->lock);
+
return ret;
}
@@ -2336,18 +2337,22 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
struct flow_cls_offload *f)
{
struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
static struct felix_stream_filter *sfi;
- struct ocelot_psfp_list *psfp;
- psfp = &ocelot->psfp;
+ mutex_lock(&psfp->lock);
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
- if (!stream)
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
- if (!sfi)
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
if (sfi->sg_valid)
vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
@@ -2373,27 +2378,83 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
stream_entry->ports);
}
+ mutex_unlock(&psfp->lock);
+
return 0;
}
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
struct flow_cls_offload *f,
struct flow_stats *stats)
{
- struct felix_stream_filter_counters counters;
- struct ocelot_psfp_list *psfp;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
struct felix_stream *stream;
- psfp = &ocelot->psfp;
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
if (!stream)
return -ENOMEM;
- vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
- stats->pkts = counters.match;
- stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
- counters.red;
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
return 0;
}
@@ -2405,6 +2466,7 @@ static void vsc9959_psfp_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&psfp->stream_list);
INIT_LIST_HEAD(&psfp->sfi_list);
INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
}
/* When using cut-through forwarding and the egress port runs at a higher data
@@ -2420,7 +2482,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
- int port, other_port;
+ int tc, port, other_port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -2464,19 +2526,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
min_speed = other_ocelot_port->speed;
}
- /* Enable cut-through forwarding for all traffic classes. */
- if (ocelot_port->speed == min_speed)
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode.
+ */
+ if (ocelot_port->speed == min_speed) {
val = GENMASK(7, 0);
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
set:
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
if (tmp == val)
continue;
dev_dbg(ocelot->dev,
- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
port, mask, ocelot_port->speed, min_speed,
- val ? "enabling" : "disabling");
+ val ? "enabling" : "disabling", val);
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
@@ -2495,12 +2565,13 @@ static const struct ocelot_ops vsc9959_ops = {
.psfp_stats_get = vsc9959_psfp_stats_get,
.cut_through_fwd = vsc9959_cut_through_fwd,
.tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
};
static const struct felix_info felix_info_vsc9959 = {
- .target_io_res = vsc9959_target_io_res,
- .port_io_res = vsc9959_port_io_res,
- .imdio_res = &vsc9959_imdio_res,
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
@@ -2522,7 +2593,6 @@ static const struct felix_info felix_info_vsc9959 = {
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
.tas_guard_bands_update = vsc9959_tas_guard_bands_update,
- .init_regmap = ocelot_regmap_init,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -2574,7 +2644,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
pci_set_master(pdev);
@@ -2635,8 +2704,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ea0649211356..7af33b2c685d 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -312,7 +383,6 @@ static const u32 vsc9953_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
@@ -388,110 +458,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the device's base address */
-static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9953_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
- {
- .start = 0x0160000,
- .end = 0x016ffff,
- .name = "port6",
- },
- {
- .start = 0x0170000,
- .end = 0x017ffff,
- .name = "port7",
- },
- {
- .start = 0x0180000,
- .end = 0x018ffff,
- .name = "port8",
- },
- {
- .start = 0x0190000,
- .end = 0x019ffff,
- .name = "port9",
- },
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
@@ -543,101 +543,8 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
@@ -1083,8 +990,9 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
}
static const struct felix_info seville_info_vsc9953 = {
- .target_io_res = vsc9953_target_io_res,
- .port_io_res = vsc9953_port_io_res,
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
@@ -1101,7 +1009,6 @@ static const struct felix_info seville_info_vsc9953 = {
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.port_modes = vsc9953_port_modes,
- .init_regmap = ocelot_regmap_init,
};
static int seville_probe(struct platform_device *pdev)
@@ -1176,8 +1083,6 @@ static int seville_remove(struct platform_device *pdev)
kfree(felix->ds);
kfree(felix);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 0796b7cf8cae..e7b98b864fa1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1099,8 +1099,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 1d3e7782a71f..5669c92c93f7 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1889,9 +1889,9 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
@@ -1957,8 +1957,6 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index bba95613e218..fb45b598847b 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -1017,7 +1017,8 @@ int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
static bool qca8k_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp;
int members = 0;
@@ -1029,15 +1030,24 @@ static bool qca8k_lag_can_offload(struct dsa_switch *ds,
/* Includes the port joining the LAG */
members++;
- if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
return false;
+ }
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
return false;
+ }
return true;
}
@@ -1160,11 +1170,12 @@ static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
int ret;
- if (!qca8k_lag_can_offload(ds, lag, info))
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag, info);
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index e36ecc9777f4..0b7a5cb12321 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -512,7 +512,8 @@ int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
/* Common port LAG function */
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info);
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index c58f49d558d2..3e54fac5f902 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -245,8 +245,6 @@ static void realtek_mdio_remove(struct mdio_device *mdiodev)
/* leave the device reset asserted */
if (priv->reset)
gpiod_set_value(priv->reset, 1);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 45992f79ec8d..1b447d96b9c4 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -522,8 +522,6 @@ static int realtek_smi_remove(struct platform_device *pdev)
if (priv->reset)
gpiod_set_value(priv->reset, 1);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 0744e8162e1d..ed413d555bec 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1025,8 +1025,6 @@ static int a5psw_remove(struct platform_device *pdev)
clk_disable_unprepare(a5psw->hclk);
clk_disable_unprepare(a5psw->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b03d0d0c3dbf..412666111b0c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3351,8 +3351,6 @@ static void sja1105_remove(struct spi_device *spi)
return;
dsa_unregister_switch(priv->ds);
-
- spi_set_drvdata(spi, NULL);
}
static void sja1105_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index fe4b154a0a57..bd4206e8f9af 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -121,8 +121,6 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
vsc73xx_remove(&vsc_platform->vsc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 97a92e6da60d..85b9a0f51dd8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -167,8 +167,6 @@ static void vsc73xx_spi_remove(struct spi_device *spi)
return;
vsc73xx_remove(&vsc_spi->vsc);
-
- spi_set_drvdata(spi, NULL);
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 3887ed33c5fe..fa622639d640 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -109,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -138,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 6deae388a0d6..54065cdedd35 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -105,18 +105,14 @@ static int xrs700x_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int xrs700x_i2c_remove(struct i2c_client *i2c)
+static void xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
- return 0;
+ return;
xrs700x_switch_remove(priv);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 127a677d1f39..5f7d344b5d73 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -140,8 +140,6 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
return;
xrs700x_switch_remove(priv);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index f82ad7419508..aa0fc00faecb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -102,7 +102,7 @@ static const struct net_device_ops dummy_netdev_ops = {
static void dummy_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static const struct ethtool_ops dummy_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 846fa3af4504..fb68339e1511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1135,7 +1135,7 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1d124b0f65e7..d2f4358cc550 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1527,7 +1527,7 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4673bc1604e7..82f94b1635bf 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -480,7 +480,7 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ccf07667aa5e..082388bb6169 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2959,13 +2959,13 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strlcpy(info->bus_info, dev_name(vp->gendev),
+ strscpy(info->bus_info, dev_name(vp->gendev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index cad4f354cc76..aaaff3ba43ef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -969,12 +969,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if (tp->card_state == Sleeping) {
- strlcpy(info->fw_version, "Sleep image",
+ strscpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strlcpy(info->fw_version, "Unknown runtime",
+ strscpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
@@ -984,8 +984,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1f8acbba5b6b..af603256b724 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -579,9 +579,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 ax_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index e7b879123bb1..05d39ecb97ff 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -555,9 +555,9 @@ static int __init etherm_addr(char *addr)
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 21047ae1bc3d..8a7918d33419 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -450,8 +450,7 @@ static int mcf8390_remove(struct platform_device *pdev)
unregister_netdev(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a55c1d5a0a1..1917da784191 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -121,6 +121,7 @@ config LANTIQ_XRX200
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
+source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c06e75ed4231..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 1cfdd01b4c2e..cd4d71b83c33 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1576,7 +1576,7 @@ static int owl_emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
netdev->netdev_ops = &owl_emac_netdev_ops;
netdev->ethtool_ops = &owl_emac_ethtool_ops;
- netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll);
ret = devm_register_netdev(dev, netdev);
if (ret) {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8f0a6b9c518e..857361c74f5d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1844,8 +1844,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..da3bdd302502
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Analog Devices device configuration
+#
+
+config NET_VENDOR_ADI
+ bool "Analog Devices devices"
+ default y
+ depends on SPI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ADI devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ADI
+
+config ADIN1110
+ tristate "Analog Devices ADIN1110 MAC-PHY"
+ depends on SPI && NET_SWITCHDEV
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices ADIN1110
+ Low Power 10BASE-T1L Ethernet MAC-PHY.
+
+endif # NET_VENDOR_ADI
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..d0383d94303c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Makefile for the Analog Devices network device drivers.
+#
+
+obj-$(CONFIG_ADIN1110) += adin1110.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
new file mode 100644
index 000000000000..aaee7c4248e6
--- /dev/null
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY
+ * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/crc8.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <net/switchdev.h>
+
+#include <asm/unaligned.h>
+
+#define ADIN1110_PHY_ID 0x1
+
+#define ADIN1110_RESET 0x03
+#define ADIN1110_SWRESET BIT(0)
+
+#define ADIN1110_CONFIG1 0x04
+#define ADIN1110_CONFIG1_SYNC BIT(15)
+
+#define ADIN1110_CONFIG2 0x06
+#define ADIN2111_P2_FWD_UNK2HOST BIT(12)
+#define ADIN2111_PORT_CUT_THRU_EN BIT(11)
+#define ADIN1110_CRC_APPEND BIT(5)
+#define ADIN1110_FWD_UNK2HOST BIT(2)
+
+#define ADIN1110_STATUS0 0x08
+
+#define ADIN1110_STATUS1 0x09
+#define ADIN2111_P2_RX_RDY BIT(17)
+#define ADIN1110_SPI_ERR BIT(10)
+#define ADIN1110_RX_RDY BIT(4)
+
+#define ADIN1110_IMASK1 0x0D
+#define ADIN2111_RX_RDY_IRQ BIT(17)
+#define ADIN1110_SPI_ERR_IRQ BIT(10)
+#define ADIN1110_RX_RDY_IRQ BIT(4)
+#define ADIN1110_TX_RDY_IRQ BIT(3)
+
+#define ADIN1110_MDIOACC 0x20
+#define ADIN1110_MDIO_TRDONE BIT(31)
+#define ADIN1110_MDIO_ST GENMASK(29, 28)
+#define ADIN1110_MDIO_OP GENMASK(27, 26)
+#define ADIN1110_MDIO_PRTAD GENMASK(25, 21)
+#define ADIN1110_MDIO_DEVAD GENMASK(20, 16)
+#define ADIN1110_MDIO_DATA GENMASK(15, 0)
+
+#define ADIN1110_TX_FSIZE 0x30
+#define ADIN1110_TX 0x31
+#define ADIN1110_TX_SPACE 0x32
+
+#define ADIN1110_MAC_ADDR_FILTER_UPR 0x50
+#define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31)
+#define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30)
+#define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17)
+#define ADIN1110_MAC_ADDR_TO_HOST BIT(16)
+
+#define ADIN1110_MAC_ADDR_FILTER_LWR 0x51
+
+#define ADIN1110_MAC_ADDR_MASK_UPR 0x70
+#define ADIN1110_MAC_ADDR_MASK_LWR 0x71
+
+#define ADIN1110_RX_FSIZE 0x90
+#define ADIN1110_RX 0x91
+
+#define ADIN2111_RX_P2_FSIZE 0xC0
+#define ADIN2111_RX_P2 0xC1
+
+#define ADIN1110_CLEAR_STATUS0 0xFFF
+
+/* MDIO_OP codes */
+#define ADIN1110_MDIO_OP_WR 0x1
+#define ADIN1110_MDIO_OP_RD 0x3
+
+#define ADIN1110_CD BIT(7)
+#define ADIN1110_WRITE BIT(5)
+
+#define ADIN1110_MAX_BUFF 2048
+#define ADIN1110_MAX_FRAMES_READ 64
+#define ADIN1110_WR_HEADER_LEN 2
+#define ADIN1110_FRAME_HEADER_LEN 2
+#define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2
+#define ADIN1110_RD_HEADER_LEN 3
+#define ADIN1110_REG_LEN 4
+#define ADIN1110_FEC_LEN 4
+
+#define ADIN1110_PHY_ID_VAL 0x0283BC91
+#define ADIN2111_PHY_ID_VAL 0x0283BCA1
+
+#define ADIN_MAC_MAX_PORTS 2
+#define ADIN_MAC_MAX_ADDR_SLOTS 16
+
+#define ADIN_MAC_MULTICAST_ADDR_SLOT 0
+#define ADIN_MAC_BROADCAST_ADDR_SLOT 1
+#define ADIN_MAC_P1_ADDR_SLOT 2
+#define ADIN_MAC_P2_ADDR_SLOT 3
+#define ADIN_MAC_FDB_ADDR_SLOT 4
+
+DECLARE_CRC8_TABLE(adin1110_crc_table);
+
+enum adin1110_chips_id {
+ ADIN1110_MAC = 0,
+ ADIN2111_MAC,
+};
+
+struct adin1110_cfg {
+ enum adin1110_chips_id id;
+ char name[MDIO_NAME_SIZE];
+ u32 phy_ids[PHY_MAX_ADDR];
+ u32 ports_nr;
+ u32 phy_id_val;
+};
+
+struct adin1110_port_priv {
+ struct adin1110_priv *priv;
+ struct net_device *netdev;
+ struct net_device *bridge;
+ struct phy_device *phydev;
+ struct work_struct tx_work;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct work_struct rx_mode_work;
+ u32 flags;
+ struct sk_buff_head txq;
+ u32 nr;
+ u32 state;
+ struct adin1110_cfg *cfg;
+};
+
+struct adin1110_priv {
+ struct mutex lock; /* protect spi */
+ spinlock_t state_lock; /* protect RX mode */
+ struct mii_bus *mii_bus;
+ struct spi_device *spidev;
+ bool append_crc;
+ struct adin1110_cfg *cfg;
+ u32 tx_space;
+ u32 irq_mask;
+ bool forwarding;
+ int irq;
+ struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS];
+ char mii_bus_name[MII_BUS_ID_SIZE];
+ u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned;
+};
+
+struct adin1110_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct adin1110_port_priv *port_priv;
+ unsigned long event;
+};
+
+static struct adin1110_cfg adin1110_cfgs[] = {
+ {
+ .id = ADIN1110_MAC,
+ .name = "adin1110",
+ .phy_ids = {1},
+ .ports_nr = 1,
+ .phy_id_val = ADIN1110_PHY_ID_VAL,
+ },
+ {
+ .id = ADIN2111_MAC,
+ .name = "adin2111",
+ .phy_ids = {1, 2},
+ .ports_nr = 2,
+ .phy_id_val = ADIN2111_PHY_ID_VAL,
+ },
+};
+
+static u8 adin1110_crc_data(u8 *data, u32 len)
+{
+ return crc8(adin1110_crc_table, data, len, 0);
+}
+
+static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+{
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+ struct spi_transfer t[2] = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+ priv->data[2] = 0x00;
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ priv->data[3] = 0x00;
+ header_len++;
+ }
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+ t[1].rx_buf = &priv->data[header_len];
+ t[1].len = read_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret)
+ return ret;
+
+ if (priv->append_crc) {
+ u8 recv_crc;
+ u8 crc;
+
+ crc = adin1110_crc_data(&priv->data[header_len],
+ ADIN1110_REG_LEN);
+ recv_crc = priv->data[header_len + ADIN1110_REG_LEN];
+
+ if (crc != recv_crc) {
+ dev_err_ratelimited(&priv->spidev->dev, "CRC error.");
+ return -EBADMSG;
+ }
+ }
+
+ *val = get_unaligned_be32(&priv->data[header_len]);
+
+ return ret;
+}
+
+static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val)
+{
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ u32 write_len = ADIN1110_REG_LEN;
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], header_len);
+ header_len++;
+ }
+
+ put_unaligned_be32(val, &priv->data[header_len]);
+ if (priv->append_crc) {
+ priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len],
+ write_len);
+ write_len++;
+ }
+
+ return spi_write(priv->spidev, &priv->data[0], header_len + write_len);
+}
+
+static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg,
+ unsigned long mask, unsigned long val)
+{
+ u32 write_val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, reg, &write_val);
+ if (ret < 0)
+ return ret;
+
+ set_mask_bits(&write_val, mask, val);
+
+ return adin1110_write_reg(priv, reg, write_val);
+}
+
+static int adin1110_round_len(int len)
+{
+ /* can read/write only mutiples of 4 bytes of payload */
+ len = ALIGN(len, 4);
+
+ /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */
+ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF)
+ return -EINVAL;
+
+ return len;
+}
+
+static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ struct spi_transfer t[2] = {0};
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+ int round_len;
+ u16 reg;
+ int ret;
+
+ if (!port_priv->nr) {
+ reg = ADIN1110_RX;
+ ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size);
+ } else {
+ reg = ADIN2111_RX_P2;
+ ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE,
+ &frame_size);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* The read frame size includes the extra 2 bytes
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+ return ret;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+ return ret;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len);
+ if (!rxb)
+ return -ENOMEM;
+
+ memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ t[1].rx_buf = &rxb->data[0];
+ t[1].len = round_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+ skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+ rxb->offload_fwd_mark = 1;
+
+ netif_rx(rxb);
+
+ port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN;
+ port_priv->rx_packets++;
+
+ return 0;
+}
+
+static int adin1110_write_fifo(struct adin1110_port_priv *port_priv,
+ struct sk_buff *txb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ __be16 frame_header;
+ int padding = 0;
+ int padded_len;
+ int round_len;
+ int ret;
+
+ /* Pad frame to 64 byte length,
+ * MAC nor PHY will otherwise add the
+ * required padding.
+ * The FEC will be added by the MAC internally.
+ */
+ if (txb->len + ADIN1110_FEC_LEN < 64)
+ padding = 64 - (txb->len + ADIN1110_FEC_LEN);
+
+ padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN;
+
+ round_len = adin1110_round_len(padded_len);
+ if (round_len < 0)
+ return round_len;
+
+ ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len);
+ if (ret < 0)
+ return ret;
+
+ memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE;
+ priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX);
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ /* mention the port on which to send the frame in the frame header */
+ frame_header = cpu_to_be16(port_priv->nr);
+ memcpy(&priv->data[header_len], &frame_header,
+ ADIN1110_FRAME_HEADER_LEN);
+
+ memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN],
+ txb->data, txb->len);
+
+ ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len);
+ if (ret < 0)
+ return ret;
+
+ port_priv->tx_bytes += txb->len;
+ port_priv->tx_packets++;
+
+ return 0;
+}
+
+static int adin1110_read_mdio_acc(struct adin1110_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return 0;
+
+ return val;
+}
+
+static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+
+ /* write the clause 22 read command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC
+ * register is set when the read is done.
+ * After the transaction is done, ADIN1110_MDIO_DATA
+ * bitfield of ADIN1110_MDIOACC register will contain
+ * the requested register value.
+ */
+ ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ if (ret < 0)
+ return ret;
+
+ return (val & ADIN1110_MDIO_DATA);
+}
+
+static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 reg_val)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+ val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val);
+
+ /* write the clause 22 write command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+}
+
+/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
+ * ADIN2111 MAC-PHY contains two ADIN1100 PHYs.
+ * By registering a new MDIO bus we allow the PAL to discover
+ * the encapsulated PHY and probe the ADIN1100 driver.
+ */
+static int adin1110_register_mdiobus(struct adin1110_priv *priv,
+ struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u",
+ priv->cfg->name, priv->spidev->chip_select);
+
+ mii_bus->name = priv->mii_bus_name;
+ mii_bus->read = adin1110_mdio_read;
+ mii_bus->write = adin1110_mdio_write;
+ mii_bus->priv = priv;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
+ mii_bus->probe_capabilities = MDIOBUS_C22;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mii_bus);
+ if (ret)
+ return ret;
+
+ priv->mii_bus = mii_bus;
+
+ return 0;
+}
+
+static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv,
+ u32 status)
+{
+ if (!netif_oper_up(port_priv->netdev))
+ return false;
+
+ if (!port_priv->nr)
+ return !!(status & ADIN1110_RX_RDY);
+ else
+ return !!(status & ADIN2111_P2_RX_RDY);
+}
+
+static void adin1110_read_frames(struct adin1110_port_priv *port_priv,
+ unsigned int budget)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 status1;
+ int ret;
+
+ while (budget) {
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ return;
+
+ if (!adin1110_port_rx_ready(port_priv, status1))
+ break;
+
+ ret = adin1110_read_fifo(port_priv);
+ if (ret < 0)
+ return;
+
+ budget--;
+ }
+}
+
+static void adin1110_wake_queues(struct adin1110_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++)
+ netif_wake_queue(priv->ports[i]->netdev);
+}
+
+static irqreturn_t adin1110_irq(int irq, void *p)
+{
+ struct adin1110_priv *priv = p;
+ u32 status1;
+ u32 val;
+ int ret;
+ int i;
+
+ mutex_lock(&priv->lock);
+
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ goto out;
+
+ if (priv->append_crc && (status1 & ADIN1110_SPI_ERR))
+ dev_warn_ratelimited(&priv->spidev->dev,
+ "SPI CRC error on write.\n");
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0)
+ goto out;
+
+ /* TX FIFO space is expressed in half-words */
+ priv->tx_space = 2 * val;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (adin1110_port_rx_ready(priv->ports[i], status1))
+ adin1110_read_frames(priv->ports[i],
+ ADIN1110_MAX_FRAMES_READ);
+ }
+
+ /* clear IRQ sources */
+ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0);
+ adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (priv->tx_space > 0 && ret >= 0)
+ adin1110_wake_queues(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */
+static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv,
+ int mac_nr, const u8 *addr,
+ u8 *mask, u32 port_rules)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 offset = mac_nr * 2;
+ u32 port_rules_mask;
+ int ret;
+ u32 val;
+
+ if (!port_priv->nr)
+ port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (port_rules & port_rules_mask)
+ port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ port_rules_mask |= GENMASK(15, 0);
+ val = port_rules | get_unaligned_be16(&addr[0]);
+ ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset,
+ port_rules_mask, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&addr[2]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_FILTER_LWR + offset, val);
+ if (ret < 0)
+ return ret;
+
+ /* Only the first two MAC address slots support masking. */
+ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) {
+ val = get_unaligned_be16(&mask[0]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_UPR + offset,
+ val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&mask[2]);
+ return adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_LWR + offset,
+ val);
+ }
+
+ return 0;
+}
+
+static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr)
+{
+ u32 offset = mac_nr * 2;
+ int ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ /* only the first two MAC address slots are maskable */
+ if (mac_nr <= 1) {
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0);
+ }
+
+ return ret;
+}
+
+static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv,
+ bool fw_to_host,
+ bool fw_to_other_port)
+{
+ u32 port_rules = 0;
+
+ if (!port_priv->nr)
+ port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (fw_to_host)
+ port_rules |= ADIN1110_MAC_ADDR_TO_HOST;
+
+ if (fw_to_other_port && port_priv->priv->forwarding)
+ port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ return port_rules;
+}
+
+static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_multicast)
+{
+ u8 mask[ETH_ALEN] = {0};
+ u8 mac[ETH_ALEN] = {0};
+ u32 port_rules = 0;
+
+ mask[0] = BIT(0);
+ mac[0] = BIT(0);
+
+ if (accept_multicast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mac,
+ mask, port_rules);
+}
+
+static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_broadcast)
+{
+ u32 port_rules = 0;
+ u8 mask[ETH_ALEN];
+
+ memset(mask, 0xFF, ETH_ALEN);
+
+ if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mask,
+ mask, port_rules);
+}
+
+static int adin1110_set_mac_address(struct net_device *netdev,
+ const unsigned char *dev_addr)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ u32 mac_slot;
+
+ if (!is_valid_ether_addr(dev_addr))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, dev_addr);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ port_rules = adin1110_port_rules(port_priv, true, false);
+
+ return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr,
+ mask, port_rules);
+}
+
+static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_mac_address(netdev, sa->sa_data);
+}
+
+static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_do_ioctl(netdev, rq, cmd);
+}
+
+static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv,
+ bool promisc)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+
+ if (port_priv->state != BR_STATE_FORWARDING)
+ promisc = false;
+
+ if (!port_priv->nr)
+ mask = ADIN1110_FWD_UNK2HOST;
+ else
+ mask = ADIN2111_P2_FWD_UNK2HOST;
+
+ return adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ mask, promisc ? mask : 0);
+}
+
+static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv)
+{
+ int ret;
+
+ ret = adin1110_set_promisc_mode(port_priv,
+ !!(port_priv->flags & IFF_PROMISC));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_ALLMULTI));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_broadcasts_filter(port_priv,
+ ADIN_MAC_BROADCAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_BROADCAST));
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1,
+ ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC);
+}
+
+static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv)
+{
+ int i;
+
+ if (priv->cfg->id != ADIN2111_MAC)
+ return false;
+
+ /* Can't enable forwarding if ports do not belong to the same bridge */
+ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge)
+ return false;
+
+ /* Can't enable forwarding if there is a port
+ * that has been blocked by STP.
+ */
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (priv->ports[i]->state != BR_STATE_FORWARDING)
+ return false;
+ }
+
+ return true;
+}
+
+static void adin1110_rx_mode_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+
+ port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+ adin1110_setup_rx_mode(port_priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void adin1110_set_rx_mode(struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ spin_lock(&priv->state_lock);
+
+ port_priv->flags = dev->flags;
+ schedule_work(&port_priv->rx_mode_work);
+
+ spin_unlock(&priv->state_lock);
+}
+
+static int adin1110_net_open(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /* Configure MAC to compute and append the FCS itself. */
+ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND);
+ if (ret < 0)
+ goto out;
+
+ val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ;
+ if (priv->cfg->id == ADIN2111_MAC)
+ val |= ADIN2111_RX_RDY_IRQ;
+
+ priv->irq_mask = val;
+ ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret);
+ goto out;
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret);
+ goto out;
+ }
+
+ priv->tx_space = 2 * val;
+
+ port_priv->state = BR_STATE_FORWARDING;
+ ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr);
+ if (ret < 0) {
+ netdev_err(net_dev, "Could not set MAC address: %pM, %d\n",
+ net_dev->dev_addr, ret);
+ goto out;
+ }
+
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC,
+ ADIN1110_CONFIG1_SYNC);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+
+ phy_start(port_priv->phydev);
+
+ netif_start_queue(net_dev);
+
+ return 0;
+}
+
+static int adin1110_net_stop(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+ int ret;
+
+ mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ;
+
+ /* Disable RX RDY IRQs */
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ netif_stop_queue(port_priv->netdev);
+ flush_work(&port_priv->tx_work);
+ phy_stop(port_priv->phydev);
+
+ return 0;
+}
+
+static void adin1110_tx_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+ struct sk_buff *txb;
+ int ret;
+
+ port_priv = container_of(work, struct adin1110_port_priv, tx_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+
+ while ((txb = skb_dequeue(&port_priv->txq))) {
+ ret = adin1110_write_fifo(port_priv, txb);
+ if (ret < 0)
+ dev_err_ratelimited(&priv->spidev->dev,
+ "Frame write error: %d\n", ret);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ netdev_tx_t netdev_ret = NETDEV_TX_OK;
+ u32 tx_space_needed;
+
+ tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN;
+ if (tx_space_needed > priv->tx_space) {
+ netif_stop_queue(dev);
+ netdev_ret = NETDEV_TX_BUSY;
+ } else {
+ priv->tx_space -= tx_space_needed;
+ skb_queue_tail(&port_priv->txq, skb);
+ }
+
+ schedule_work(&port_priv->tx_work);
+
+ return netdev_ret;
+}
+
+static void adin1110_ndo_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ storage->rx_packets = port_priv->rx_packets;
+ storage->tx_packets = port_priv->tx_packets;
+
+ storage->rx_bytes = port_priv->rx_bytes;
+ storage->tx_bytes = port_priv->tx_bytes;
+}
+
+static int adin1110_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN);
+ memcpy(ppid->id, priv->mii_bus_name, ppid->id_len);
+
+ return 0;
+}
+
+static int adin1110_ndo_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->nr);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct net_device_ops adin1110_netdev_ops = {
+ .ndo_open = adin1110_net_open,
+ .ndo_stop = adin1110_net_stop,
+ .ndo_eth_ioctl = adin1110_ioctl,
+ .ndo_start_xmit = adin1110_start_xmit,
+ .ndo_set_mac_address = adin1110_ndo_set_mac_address,
+ .ndo_set_rx_mode = adin1110_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = adin1110_ndo_get_stats64,
+ .ndo_get_port_parent_id = adin1110_port_get_port_parent_id,
+ .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name,
+};
+
+static void adin1110_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, "ADIN1110", sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static const struct ethtool_ops adin1110_ethtool_ops = {
+ .get_drvinfo = adin1110_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void adin1110_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev->link)
+ phy_print_status(phydev);
+}
+
+/* PHY ID is stored in the MAC registers too,
+ * check spi connection by reading it.
+ */
+static int adin1110_check_spi(struct adin1110_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != priv->cfg->phy_id_val) {
+ dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n",
+ priv->cfg->phy_id_val, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable)
+{
+ int ret;
+ int i;
+
+ priv->forwarding = enable;
+
+ if (!priv->forwarding) {
+ for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) {
+ ret = adin1110_clear_mac_address(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Forwarding is optimised when MAC runs in Cut Through mode. */
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ ADIN2111_PORT_CUT_THRU_EN,
+ priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = adin1110_setup_rx_mode(priv->ports[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = bridge;
+
+ if (adin1110_can_offload_forwarding(priv)) {
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, true);
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr);
+}
+
+static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = NULL;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, false);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ int ret = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = adin1110_port_bridge_join(port_priv, info->upper_dev);
+ else
+ ret = adin1110_port_bridge_leave(port_priv, info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block adin1110_netdevice_nb = {
+ .notifier_call = adin1110_netdevice_event,
+};
+
+static void adin1110_disconnect_phy(void *data)
+{
+ phy_disconnect(data);
+}
+
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
+static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->state = BR_STATE_FORWARDING;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_mac_address(port_priv->netdev,
+ port_priv->netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+
+ if (adin1110_can_offload_forwarding(priv))
+ ret = adin1110_hw_forwarding(priv, true);
+ else
+ ret = adin1110_setup_rx_mode(port_priv);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv)
+{
+ u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_slot;
+ int ret;
+
+ port_priv->state = BR_STATE_BLOCKING;
+
+ mutex_lock(&priv->lock);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ ret = adin1110_clear_mac_address(priv, mac_slot);
+ if (ret < 0)
+ goto out;
+
+ ret = adin1110_hw_forwarding(priv, false);
+ if (ret < 0)
+ goto out;
+
+ /* Allow only BPDUs to be passed to the CPU */
+ memset(mask, 0xFF, ETH_ALEN);
+ port_rules = adin1110_port_rules(port_priv, true, false);
+ ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ mask, port_rules);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/* ADIN1110/2111 does not have any native STP support.
+ * Listen for bridge core state changes and
+ * allow all frames to pass or only the BPDUs.
+ */
+static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv,
+ u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return adin1110_port_set_forwarding_state(port_priv);
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return adin1110_port_set_blocking_state(port_priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adin1110_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return adin1110_port_attr_stp_state_set(port_priv,
+ attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin1110_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(netdev, ptr,
+ adin1110_port_dev_check,
+ adin1110_port_attr_set);
+
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block adin1110_switchdev_blocking_notifier = {
+ .notifier_call = adin1110_switchdev_blocking_event,
+};
+
+static void adin1110_fdb_offload_notify(struct net_device *netdev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ netdev, &info.info, NULL);
+}
+
+static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ struct adin1110_port_priv *other_port;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_nr;
+ u32 val;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (!priv->forwarding)
+ return 0;
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ /* Find free FDB slot on device. */
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+ if (!val)
+ break;
+ }
+
+ if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS)
+ return -ENOMEM;
+
+ other_port = priv->ports[!port_priv->nr];
+ port_rules = adin1110_port_rules(port_priv, false, true);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ mask, port_rules);
+}
+
+static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr)
+{
+ u32 val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be16(val, addr);
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be32(val, addr + 2);
+
+ return 0;
+}
+
+static int adin1110_fdb_del(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 addr[ETH_ALEN];
+ int mac_nr;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_mac(priv, mac_nr, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(addr, fdb->addr)) {
+ ret = adin1110_clear_mac_address(priv, mac_nr);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void adin1110_switchdev_event_work(struct work_struct *work)
+{
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct adin1110_port_priv *port_priv;
+ int ret;
+
+ switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
+ port_priv = switchdev_work->port_priv;
+
+ mutex_lock(&port_priv->priv->lock);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info);
+ if (!ret)
+ adin1110_fdb_offload_notify(port_priv->netdev,
+ &switchdev_work->fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ adin1110_fdb_del(port_priv, &switchdev_work->fdb_info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&port_priv->priv->lock);
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port_priv->netdev);
+}
+
+/* called under rcu_read_lock() */
+static int adin1110_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!adin1110_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
+ switchdev_work->port_priv = port_priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(netdev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block adin1110_switchdev_notifier = {
+ .notifier_call = adin1110_switchdev_event,
+};
+
+static void adin1110_unregister_notifiers(void *data)
+{
+ unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+}
+
+static int adin1110_setup_notifiers(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ int ret;
+
+ ret = register_netdevice_notifier(&adin1110_netdevice_nb);
+ if (ret < 0)
+ return ret;
+
+ ret = register_switchdev_notifier(&adin1110_switchdev_notifier);
+ if (ret < 0)
+ goto err_netdev;
+
+ ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ if (ret < 0)
+ goto err_sdev;
+
+ return devm_add_action_or_reset(dev, adin1110_unregister_notifiers, NULL);
+
+err_sdev:
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+
+err_netdev:
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+ return ret;
+}
+
+static int adin1110_probe_netdevs(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct adin1110_port_priv *port_priv;
+ struct net_device *netdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ netdev = devm_alloc_etherdev(dev, sizeof(*port_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ port_priv = netdev_priv(netdev);
+ port_priv->netdev = netdev;
+ port_priv->priv = priv;
+ port_priv->cfg = priv->cfg;
+ port_priv->nr = i;
+ priv->ports[i] = port_priv;
+ SET_NETDEV_DEV(netdev, dev);
+
+ ret = device_get_ethdev_address(dev, netdev);
+ if (ret < 0)
+ return ret;
+
+ netdev->irq = priv->spidev->irq;
+ INIT_WORK(&port_priv->tx_work, adin1110_tx_work);
+ INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work);
+ skb_queue_head_init(&port_priv->txq);
+
+ netif_carrier_off(netdev);
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->netdev_ops = &adin1110_netdev_ops;
+ netdev->ethtool_ops = &adin1110_ethtool_ops;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not find PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ port_priv->phydev = phy_connect(netdev,
+ phydev_name(port_priv->phydev),
+ adin1110_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy,
+ port_priv->phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* ADIN1110 INT_N pin will be used to signal the host */
+ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL,
+ adin1110_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_setup_notifiers(priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = devm_register_netdev(dev, priv->ports[i]->netdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register network device.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adin1110_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct adin1110_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spidev = spi;
+ priv->cfg = &adin1110_cfgs[dev_id->driver_data];
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+
+ mutex_init(&priv->lock);
+ spin_lock_init(&priv->state_lock);
+
+ /* use of CRC on control and data transactions is pin dependent */
+ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc");
+ if (priv->append_crc)
+ crc8_populate_msb(adin1110_crc_table, 0x7);
+
+ ret = adin1110_check_spi(priv);
+ if (ret < 0) {
+ dev_err(dev, "Probe SPI Read check failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_register_mdiobus(priv, dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not register MDIO bus %d\n", ret);
+ return ret;
+ }
+
+ return adin1110_probe_netdevs(priv);
+}
+
+static const struct of_device_id adin1110_match_table[] = {
+ { .compatible = "adi,adin1110" },
+ { .compatible = "adi,adin2111" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adin1110_match_table);
+
+static const struct spi_device_id adin1110_spi_id[] = {
+ { .name = "adin1110", .driver_data = ADIN1110_MAC },
+ { .name = "adin2111", .driver_data = ADIN2111_MAC },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adin1110_spi_id);
+
+static struct spi_driver adin1110_driver = {
+ .driver = {
+ .name = "adin1110",
+ .of_match_table = adin1110_match_table,
+ },
+ .probe = adin1110_probe,
+ .id_table = adin1110_spi_id,
+};
+module_spi_driver(adin1110_driver);
+
+MODULE_DESCRIPTION("ADIN1110 Network driver");
+MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 447dc64a17e5..e104fb02817d 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1112,9 +1112,9 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strlcpy(info->driver, dev_driver_string(greth->dev),
+ strscpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
@@ -1507,7 +1507,7 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* setup NAPI */
- netif_napi_add(dev, &greth->napi, greth_poll, 64);
+ netif_napi_add(dev, &greth->napi, greth_poll);
return 0;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index d19d1579c415..5fab589b3ddf 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2952,8 +2952,8 @@ static void et131x_get_drvinfo(struct net_device *netdev,
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -3969,7 +3969,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
et131x_init_send(adapter);
- netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll);
eth_hw_addr_set(netdev, adapter->addr);
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index ce353b0c02a3..a30d0f172986 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1531,8 +1531,8 @@ static void slic_get_drvinfo(struct net_device *dev,
{
struct slic_device *sdev = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops slic_ethtool_ops = {
@@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 621ce742ad21..a94c62956eed 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -331,8 +331,8 @@ prepare_err:
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static u32 emac_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 22fe98555b24..d7762da8b2c0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2691,12 +2691,12 @@ static void ace_get_drvinfo(struct net_device *dev,
{
struct ace_private *ap = netdev_priv(dev);
- strlcpy(info->driver, "acenic", sizeof(info->driver));
+ strscpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strscpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 914e56b91467..dd7fd41ccde5 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -3,6 +3,8 @@ config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
depends on HAS_DMA
select PHYLIB
+ select PHYLINK
+ select PCS_ALTERA_TSE
help
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index f17acfb579a0..db5eed06e92d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
@@ -109,17 +110,6 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
-/* SGMII PCS register addresses
- */
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
-#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_1 0x13
-#define SGMII_PCS_IF_MODE 0x14
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
-#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
-
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -423,6 +413,9 @@ struct altera_tse_private {
void __iomem *tx_dma_csr;
void __iomem *tx_dma_desc;
+ /* SGMII PCS address space */
+ void __iomem *pcs_base;
+
/* Rx buffers queue */
struct tse_buffer *rx_ring;
u32 rx_cons;
@@ -480,6 +473,10 @@ struct altera_tse_private {
u32 msg_enable;
struct altera_dmaops *dmaops;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
};
/* Function prototypes
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 4299f1301149..81313c85833e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -199,9 +199,9 @@ static int tse_reglen(struct net_device *dev)
static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *regbuf)
{
- int i;
struct altera_tse_private *priv = netdev_priv(dev);
u32 *buf = regbuf;
+ int i;
/* Set version to a known value, so ethtool knows
* how to do any special formatting of this data.
@@ -221,6 +221,22 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
+static int tse_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int tse_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
@@ -231,8 +247,9 @@ static const struct ethtool_ops tse_ethtool_ops = {
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = tse_ethtool_get_link_ksettings,
+ .set_link_ksettings = tse_ethtool_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 8c5828582c21..7633b227b2ca 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/pcs-altera-tse.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
@@ -86,27 +87,6 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
-/* PCS Register read/write functions
- */
-static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
-{
- return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
-}
-
-static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
- u16 value)
-{
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
-}
-
-/* Check PCS scratch memory */
-static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
-{
- sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
- return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
-}
-
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -141,10 +121,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
struct device_node *mdio_node = NULL;
- struct mii_bus *mdio = NULL;
struct device_node *child_node = NULL;
+ struct mii_bus *mdio = NULL;
+ int ret;
for_each_child_of_node(priv->device->of_node, child_node) {
if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
@@ -236,8 +216,8 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
static void tse_free_rx_buffer(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct sk_buff *skb = rxbuffer->skb;
dma_addr_t dma_addr = rxbuffer->dma_addr;
+ struct sk_buff *skb = rxbuffer->skb;
if (skb != NULL) {
if (dma_addr)
@@ -358,6 +338,7 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
u16 vid;
+
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
!__vlan_get_tag(skb, &vid)) {
eth_hdr = (struct ethhdr *)skb->data;
@@ -371,10 +352,10 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static int tse_rx(struct altera_tse_private *priv, int limit)
{
- unsigned int count = 0;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
unsigned int next_entry;
+ unsigned int count = 0;
struct sk_buff *skb;
- unsigned int entry = priv->rx_cons % priv->rx_ring_size;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
@@ -448,10 +429,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
static int tse_tx_complete(struct altera_tse_private *priv)
{
unsigned int txsize = priv->tx_ring_size;
- u32 ready;
- unsigned int entry;
struct tse_buffer *tx_buff;
+ unsigned int entry;
int txcomplete = 0;
+ u32 ready;
spin_lock(&priv->tx_lock);
@@ -497,8 +478,8 @@ static int tse_poll(struct napi_struct *napi, int budget)
{
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
- int rxcomplete = 0;
unsigned long int flags;
+ int rxcomplete = 0;
tse_tx_complete(priv);
@@ -561,13 +542,13 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
unsigned int txsize = priv->tx_ring_size;
- unsigned int entry;
- struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int nopaged_len = skb_headlen(skb);
+ struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
+ unsigned int entry;
spin_lock_bh(&priv->tx_lock);
@@ -619,117 +600,6 @@ out:
return ret;
}
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void altera_tse_adjust_link(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- /* only change config if there is a link */
- spin_lock(&priv->mac_cfg_lock);
- if (phydev->link) {
- /* Read old config */
- u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
-
- /* Check duplex */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- cfg_reg |= MAC_CMDCFG_HD_ENA;
- else
- cfg_reg &= ~MAC_CMDCFG_HD_ENA;
-
- netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
- dev->name, phydev->duplex);
-
- priv->oldduplex = phydev->duplex;
- }
-
- /* Check speed */
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- cfg_reg |= MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 100:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 10:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg |= MAC_CMDCFG_ENA_10;
- break;
- default:
- if (netif_msg_link(priv))
- netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
- priv->oldspeed = phydev->speed;
- }
- iowrite32(cfg_reg, &priv->mac_dev->command_config);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
-
- spin_unlock(&priv->mac_cfg_lock);
-}
-static struct phy_device *connect_local_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = NULL;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-
- if (priv->phy_addr != POLL_PHY) {
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
-
- netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
-
- phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
- priv->phy_iface);
- if (IS_ERR(phydev)) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
-
- } else {
- int ret;
- phydev = phy_find_first(priv->mdio);
- if (phydev == NULL) {
- netdev_err(dev, "No PHY found\n");
- return phydev;
- }
-
- ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
- priv->phy_iface);
- if (ret != 0) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
- }
- return phydev;
-}
-
static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
@@ -768,91 +638,6 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
return 0;
}
-/* Initialize driver's PHY state, and attach to the PHY
- */
-static int init_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *phynode;
- bool fixed_link = false;
- int rc = 0;
-
- /* Avoid init phy in case of no phy present */
- if (!priv->phy_iface)
- return 0;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
- phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
-
- if (!phynode) {
- /* check if a fixed-link is defined in device-tree */
- if (of_phy_is_fixed_link(priv->device->of_node)) {
- rc = of_phy_register_fixed_link(priv->device->of_node);
- if (rc < 0) {
- netdev_err(dev, "cannot register fixed PHY\n");
- return rc;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phynode = of_node_get(priv->device->of_node);
- fixed_link = true;
-
- netdev_dbg(dev, "fixed-link detected\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link,
- 0, priv->phy_iface);
- } else {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev, "No phy-handle nor local mdio specified\n");
- return -ENODEV;
- }
- phydev = connect_local_phy(dev);
- }
- } else {
- netdev_dbg(dev, "phy-handle found\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link, 0, priv->phy_iface);
- }
- of_node_put(phynode);
-
- if (!phydev) {
- netdev_err(dev, "Could not find the PHY\n");
- if (fixed_link)
- of_phy_deregister_fixed_link(priv->device->of_node);
- return -ENODEV;
- }
-
- /* Stop Advertising 1000BASE Capability if interface is not GMII
- */
- if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
- (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phy_set_max_speed(phydev, SPEED_100);
-
- /* Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well. If a fixed-link is used the phy_id is always 0.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if ((phydev->phy_id == 0) && !fixed_link) {
- netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
- phy_disconnect(phydev);
- return -ENODEV;
- }
-
- netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
- phydev->mdio.addr, phydev->phy_id, phydev->link);
-
- return 0;
-}
-
static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
@@ -1012,8 +797,8 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int i;
struct netdev_hw_addr *ha;
+ int i;
/* clear the hash filter */
for (i = 0; i < 64; i++)
@@ -1087,74 +872,14 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Initialise (if necessary) the SGMII PCS component
- */
-static int init_sgmii_pcs(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- int n;
- unsigned int tmp_reg = 0;
-
- if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
- return 0; /* Nothing to do, not in SGMII mode */
-
- /* The TSE SGMII PCS block looks a little like a PHY, it is
- * mapped into the zeroth MDIO space of the MAC and it has
- * ID registers like a PHY would. Sadly this is often
- * configured to zeroes, so don't be surprised if it does
- * show 0x00000000.
- */
-
- if (sgmii_pcs_scratch_test(priv, 0x0000) &&
- sgmii_pcs_scratch_test(priv, 0xffff) &&
- sgmii_pcs_scratch_test(priv, 0xa5a5) &&
- sgmii_pcs_scratch_test(priv, 0x5a5a)) {
- netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
- sgmii_pcs_read(priv, MII_PHYSID1),
- sgmii_pcs_read(priv, MII_PHYSID2));
- } else {
- netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
- return -ENOMEM;
- }
-
- /* Starting on page 5-29 of the MegaCore Function User Guide
- * Set SGMII Link timer to 1.6ms
- */
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
-
- /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
- sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
-
- /* Enable Autonegotiation */
- tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
- tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
-
- /* Reset PCS block */
- tmp_reg |= BMCR_RESET;
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
- for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
- if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
- netdev_info(dev, "SGMII PCS block initialised OK\n");
- return 0;
- }
- udelay(1);
- }
-
- /* We failed to reset the block, return a timeout */
- netdev_err(dev, "SGMII PCS block reset failed.\n");
- return -ETIMEDOUT;
-}
-
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned long flags;
int ret = 0;
int i;
- unsigned long int flags;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv);
@@ -1171,14 +896,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "TSE revision %x\n", priv->revision);
spin_lock(&priv->mac_cfg_lock);
- /* no-op if MAC not operating in SGMII mode*/
- ret = init_sgmii_pcs(dev);
- if (ret) {
- netdev_err(dev,
- "Cannot init the SGMII PCS (error: %d)\n", ret);
- spin_unlock(&priv->mac_cfg_lock);
- goto phy_error;
- }
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
@@ -1236,8 +953,12 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (dev->phydev)
- phy_start(dev->phydev);
+ ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
+ if (ret) {
+ netdev_err(dev, "could not connect phylink (%d)\n", ret);
+ goto tx_request_irq_error;
+ }
+ phylink_start(priv->phylink);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1265,13 +986,10 @@ phy_error:
static int tse_shutdown(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
unsigned long int flags;
+ int ret;
- /* Stop the PHY */
- if (dev->phydev)
- phy_stop(dev->phydev);
-
+ phylink_stop(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1317,11 +1035,79 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static void alt_tse_mac_an_restart(struct phylink_config *config)
+{
+}
+
+static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->mac_cfg_lock);
+ reset_mac(priv);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static void alt_tse_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void alt_tse_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+ ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= MAC_CMDCFG_HD_ENA;
+
+ if (speed == SPEED_1000)
+ ctrl |= MAC_CMDCFG_ETH_SPEED;
+ else if (speed == SPEED_10)
+ ctrl |= MAC_CMDCFG_ENA_10;
+
+ spin_lock(&priv->mac_cfg_lock);
+ csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX)
+ return priv->pcs;
+ else
+ return NULL;
+}
+
+static const struct phylink_mac_ops alt_tse_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_an_restart = alt_tse_mac_an_restart,
+ .mac_config = alt_tse_mac_config,
+ .mac_link_down = alt_tse_mac_link_down,
+ .mac_link_up = alt_tse_mac_link_up,
+ .mac_select_pcs = alt_tse_select_pcs,
+};
+
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
- struct resource *region;
struct device *device = &pdev->dev;
+ struct resource *region;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
@@ -1350,13 +1136,15 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- struct net_device *ndev;
- int ret = -ENODEV;
+ const struct of_device_id *of_id = NULL;
+ struct altera_tse_private *priv;
struct resource *control_port;
struct resource *dma_res;
- struct altera_tse_private *priv;
+ struct resource *pcs_res;
+ struct net_device *ndev;
void __iomem *descmap;
- const struct of_device_id *of_id = NULL;
+ int pcs_reg_width = 2;
+ int ret = -ENODEV;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1467,6 +1255,17 @@ static int altera_tse_probe(struct platform_device *pdev)
if (ret)
goto err_free_netdev;
+ /* SGMII PCS address space. The location can vary depending on how the
+ * IP is integrated. We can have a resource dedicated to it at a specific
+ * address space, but if it's not the case, we fallback to the mdiophy0
+ * from the MAC's address space
+ */
+ ret = request_and_map(pdev, "pcs", &pcs_res,
+ &priv->pcs_base);
+ if (ret) {
+ priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
+ pcs_reg_width = 4;
+ }
/* Rx IRQ */
priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
@@ -1566,7 +1365,7 @@ static int altera_tse_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
- netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, tse_poll);
spin_lock_init(&priv->mac_cfg_lock);
spin_lock_init(&priv->tx_lock);
@@ -1590,11 +1389,32 @@ static int altera_tse_probe(struct platform_device *pdev)
(unsigned long) control_port->start, priv->rx_irq,
priv->tx_irq);
- ret = init_phy(ndev);
- if (ret != 0) {
- netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ priv->pcs = alt_tse_pcs_create(ndev, priv->pcs_base, pcs_reg_width);
+
+ priv->phylink_config.dev = &ndev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(priv->device->of_node),
+ priv->phy_iface, &alt_tse_phylink_ops);
+ if (IS_ERR(priv->phylink)) {
+ dev_err(&pdev->dev, "failed to create phylink\n");
+ ret = PTR_ERR(priv->phylink);
goto err_init_phy;
}
+
return 0;
err_init_phy:
@@ -1614,16 +1434,10 @@ static int altera_tse_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
-
- if (of_phy_is_fixed_link(priv->device->of_node))
- of_phy_deregister_fixed_link(priv->device->of_node);
- }
-
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 39242c5a1729..98d6386b7f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -462,8 +462,8 @@ static void ena_get_drvinfo(struct net_device *dev,
{
struct ena_adapter *adapter = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 6a356a6cee15..d350eeec8bad 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2265,10 +2265,8 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
- netif_napi_add(adapter->netdev,
- &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &napi->napi,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
if (!ENA_IS_XDP_INDEX(adapter, i)) {
napi->rx_ring = &adapter->rx_ring[i];
@@ -3166,7 +3164,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strlcpy(host_info->kernel_ver_str, utsname()->version,
+ strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3a351d3396bf..68983b717145 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -695,7 +695,7 @@ static int a2065_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct lance_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct lance_regs));
release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 5d1baa01360f..ea6cfc2095e1 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -43,7 +43,7 @@ Revision History:
3.0.4 12/09/2003
1. Added set_mac_address routine for bonding driver support.
2. Tested the driver for bonding support
- 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ 3. Bug fix: Fixed mismach in actual receive buffer length and length
indicated to the h/w.
4. Modified amd8111e_rx() routine to receive all the received packets
in the first interrupt.
@@ -185,24 +185,23 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option) {
-
- default:
- case SPEED_AUTONEG: /* advertise all values */
- tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- break;
- case SPEED10_HALF:
- tmp |= ADVERTISE_10HALF;
- break;
- case SPEED10_FULL:
- tmp |= ADVERTISE_10FULL;
- break;
- case SPEED100_HALF:
- tmp |= ADVERTISE_100HALF;
- break;
- case SPEED100_FULL:
- tmp |= ADVERTISE_100FULL;
- break;
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
}
if(advert != tmp)
@@ -237,7 +236,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff != NULL) {
+ if (rx_skbuff) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -1084,7 +1083,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if (unlikely(dev == NULL))
+ if (unlikely(!dev))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1109,7 +1108,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
if (napi_schedule_prep(&lp->napi)) {
- /* Disable receive interupts */
+ /* Disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
@@ -1364,10 +1363,10 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1554,7 +1553,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
- /* Adapter is already stoped/suspended/interrupt-disabled */
+ /* Adapter is already stopped/suspended/interrupt-disabled */
writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 37da79da5f5e..9d570adb295b 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -600,7 +600,7 @@ typedef enum {
#define CSTATE 1
#define SSTATE 2
-/* Assume contoller gets data 10 times the maximum processing time */
+/* Assume controller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
/* amd8111e descriptor flag definitions */
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4ea7b9f3c424..38153e633231 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,7 @@ static int ariadne_rx(struct net_device *dev)
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
@@ -731,7 +731,7 @@ static int ariadne_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct Am79C960));
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 27869164c6e6..3222c48ce6ae 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -581,15 +581,15 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* Get the ethernet address */
switch( lp->cardtype ) {
- case OLD_RIEBL:
+ case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
- case NEW_RIEBL:
+ case NEW_RIEBL:
lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
eth_hw_addr_set(dev, addr);
break;
- case PAM_CARD:
+ case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
addr[i] =
@@ -854,7 +854,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
int csr0, boguscnt = 10;
int handled = 0;
- if (dev == NULL) {
+ if (!dev) {
DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
return IRQ_NONE;
}
@@ -995,7 +995,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d5f2c6989221..c5cec4e79489 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -650,7 +650,7 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -786,7 +786,7 @@ static int au1000_rx(struct net_device *dev)
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
continue;
}
@@ -1199,7 +1199,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL) {
+ if (!aup->mii_bus) {
dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
@@ -1284,7 +1284,7 @@ static int au1000_probe(struct platform_device *pdev)
return 0;
err_out:
- if (aup->mii_bus != NULL)
+ if (aup->mii_bus)
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 462016666752..fb8686214a32 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -880,7 +880,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
- if (rx_buff == NULL)
+ if (!rx_buff)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
@@ -1186,7 +1186,7 @@ lance_rx(struct net_device *dev)
else
{
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
+ if (!skb)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 30ee5329bd7c..823a329a921f 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -485,10 +485,10 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
data = inb(ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -512,10 +512,10 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -567,13 +567,13 @@ static int mace_init(mace_private *lp, unsigned int ioaddr,
* Or just set ASEL in PHYCC below!
*/
switch (if_port) {
- case 1:
+ case 1:
mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
break;
- case 2:
+ case 2:
mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
break;
- default:
+ default:
mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
/* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
and the MACE device will automatically select the operating media
@@ -815,7 +815,7 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -918,7 +918,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
- if (dev == NULL) {
+ if (!dev) {
pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
irq);
return IRQ_NONE;
@@ -1102,7 +1102,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index b5ff47283cfe..72db9f9e7bee 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -488,7 +488,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_tx_ring == NULL)
+ if (!new_tx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -547,7 +547,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_rx_ring == NULL)
+ if (!new_rx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -797,9 +797,9 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (lp->pci_dev)
- strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ strscpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
@@ -1249,7 +1249,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
return;
}
@@ -2018,7 +2018,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
&lp->tx_ring_dma_addr, GFP_KERNEL);
- if (lp->tx_ring == NULL) {
+ if (!lp->tx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2026,7 +2026,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
&lp->rx_ring_dma_addr, GFP_KERNEL);
- if (lp->rx_ring == NULL) {
+ if (!lp->rx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2365,7 +2365,7 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
+ if (!rx_skbuff) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 007bd7787291..246f34c43765 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -341,7 +341,7 @@ static int __init lance_probe( struct net_device *dev)
/* XXX - leak? */
MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
- if (MEM == NULL) {
+ if (!MEM) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -796,7 +796,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 22d609563af8..68ca1225eedc 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -530,7 +530,7 @@ static void lance_rx_dvma(struct net_device *dev)
len = (rd->mblength & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -700,7 +700,7 @@ static void lance_rx_pio(struct net_device *dev)
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1276,7 +1276,7 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strscpy(info->driver, "sunlance", sizeof(info->driver));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f342bb853189..7b666106feee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -952,14 +952,14 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
+ xgbe_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
+ xgbe_all_poll);
napi_enable(&pdata->napi);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6ceb1cdf6eba..6e83ff59172a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -402,8 +402,8 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index d022b6db9e06..379d19d18dbe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -672,7 +672,7 @@ static int xge_probe(struct platform_device *pdev)
if (ret)
goto err;
- netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &pdata->napi, xge_napi);
ret = register_netdev(ndev);
if (ret) {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 53dc8d5fede8..d6cfea65a714 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1977,14 +1977,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
napi = &pdata->rx_ring[i]->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 1daecd483b8d..a08f221e30d4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -238,7 +238,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
- strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 02058fe79f52..3d0e16791e1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -292,9 +292,6 @@ static int aq_mdo_dev_open(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
@@ -306,9 +303,6 @@ static int aq_mdo_dev_stop(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
@@ -466,9 +460,6 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
@@ -492,9 +483,6 @@ static int aq_mdo_upd_secy(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
@@ -543,9 +531,6 @@ static int aq_mdo_del_secy(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
@@ -601,9 +586,6 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
@@ -631,9 +613,6 @@ static int aq_mdo_upd_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
@@ -681,9 +660,6 @@ static int aq_mdo_del_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -780,9 +756,6 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
@@ -809,9 +782,6 @@ static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
@@ -876,9 +846,6 @@ static int aq_mdo_del_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
@@ -948,9 +915,6 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
@@ -978,9 +942,6 @@ static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
@@ -1029,9 +990,6 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -1044,9 +1002,6 @@ static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
@@ -1073,9 +1028,6 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
@@ -1106,9 +1058,6 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
@@ -1147,9 +1096,6 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
@@ -1196,9 +1142,6 @@ static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 88595863d8bc..8a0af371e7dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -94,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev)
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 275324c9e51e..80b44043e6c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1217,8 +1217,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
- netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f0fdf20f01c1..f5db1c44e9b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -119,8 +119,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->tx_rings = 0;
self->rx_rings = 0;
- netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
err_exit:
return self;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 288e2961823e..ba0646b3b122 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -91,7 +91,7 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strscpy(info->driver, priv->drv_name, sizeof(info->driver));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index 6ba5b024a7be..8b7cdf015a16 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -293,7 +293,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
skb_put(skb, padlen);
/* EOP header */
- memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+ skb_put_data(skb, &info.eop, TX_EOP_SIZE);
skb_unlink(skb, q);
@@ -381,7 +381,7 @@ static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
return 1;
}
-static int
+static netdev_tx_t
ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index e461f4764066..cc932b3cf873 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -451,8 +451,8 @@ static void ag71xx_get_drvinfo(struct net_device *ndev,
{
struct ag71xx *ag = netdev_priv(ndev);
- strlcpy(info->driver, "ag71xx", sizeof(info->driver));
- strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ strscpy(info->driver, "ag71xx", sizeof(info->driver));
+ strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index a89b93cb4e26..d30d11872719 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -752,7 +752,7 @@ static int alx_alloc_napis(struct alx_priv *alx)
goto err_out;
np->alx = alx;
- netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ netif_napi_add(alx->dev, &np->napi, alx_poll);
alx->qnapi[i] = np;
}
@@ -1912,11 +1912,14 @@ static int alx_suspend(struct device *dev)
if (!netif_running(alx->dev))
return 0;
+
+ rtnl_lock();
netif_device_detach(alx->dev);
mutex_lock(&alx->mtx);
__alx_stop(alx);
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return 0;
}
@@ -1927,6 +1930,7 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ rtnl_lock();
mutex_lock(&alx->mtx);
alx_reset_phy(hw);
@@ -1943,6 +1947,7 @@ static int alx_resume(struct device *dev)
unlock:
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index e2eb7b8c63a0..0bce122c68f1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -220,8 +220,8 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be4b1f8eef29..40c781695d58 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2732,7 +2732,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_set_threaded(netdev, true);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
- atl1c_clean_rx, 64);
+ atl1c_clean_rx);
for (i = 0; i < adapter->tx_queue_count; ++i)
netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
atl1c_clean_tx);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 0cbde352d1ba..68f1832a198d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -306,9 +306,9 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 57a51fb7746c..5db0f3495a32 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2354,7 +2354,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
- netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean);
timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index ff1fe09abf9f..c8444bcdf527 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2977,7 +2977,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl1_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
netdev->ethtool_ops = &atl1_ethtool_ops;
adapter->bd_number = cards_found;
@@ -3340,8 +3340,8 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bbc4d7b08a49..1b487c071cb6 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1980,9 +1980,9 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 56e0fb07aec7..f4e1ca68d831 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -53,8 +53,8 @@ config B44_PCI
config BCM4908_ENET
tristate "Broadcom BCM4908 internal mac support"
- depends on ARCH_BCM4908 || COMPILE_TEST
- default y if ARCH_BCM4908
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ default y if ARCH_BCMBCA
help
This driver supports Ethernet controller integrated into Broadcom
BCM4908 family SoCs.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e5857e88c207..7f876721596c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1790,13 +1790,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
- strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
- strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
@@ -2375,7 +2375,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
- netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ netif_napi_add(dev, &bp->napi, b44_poll);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index c131d8118489..93ccf549e2ed 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -507,7 +507,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
return 0;
}
-static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
@@ -716,6 +716,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
err = of_get_ethdev_address(dev->of_node, netdev);
+ if (err == -EPROBE_DEFER)
+ goto err_dma_free;
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
@@ -723,17 +725,20 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
- netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
err = register_netdev(netdev);
- if (err) {
- bcm4908_enet_dma_free(enet);
- return err;
- }
+ if (err)
+ goto err_dma_free;
platform_set_drvdata(pdev, enet);
return 0;
+
+err_dma_free:
+ bcm4908_enet_dma_free(enet);
+
+ return err;
}
static int bcm4908_enet_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 1c6aea12db72..d91fdb0c2649 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1321,8 +1321,8 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 47fc8e6963d5..867f14c30e09 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -308,8 +308,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -2564,7 +2564,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
dev->netdev_ops = &bcm_sysport_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 93580484a3f4..5fb3af5670ec 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,7 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}
@@ -1395,8 +1395,8 @@ static void bgmac_get_ethtool_stats(struct net_device *dev,
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
@@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll);
err = bgmac_phy_connect(bgmac);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b97ed9b5f685..fec57f1982c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -176,12 +176,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
- "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ "Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
- "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ "Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@@ -7042,9 +7042,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -8522,7 +8522,7 @@ bnx2_init_napi(struct bnx2 *bp)
else
poll = bnx2_poll_msix;
- netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
bnapi->bp = bp;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 712b5595bc39..16c490692f42 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -44,8 +44,7 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -55,8 +54,7 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -150,7 +148,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(buf, bp->fw_ver, buf_len);
+ strscpy(buf, bp->fw_ver, buf_len);
snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
@@ -789,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 0e319ac7799f..bda3ccc28eca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,7 +1112,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
int ext_dev_info_offset;
u32 mbi;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -1126,7 +1126,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
(mbi & 0xff000000) >> 24,
(mbi & 0x00ff0000) >> 16,
(mbi & 0x0000ff00) >> 8);
- strlcpy(info->fw_version, version,
+ strscpy(info->fw_version, version,
sizeof(info->fw_version));
}
}
@@ -1135,7 +1135,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
strlcat(info->fw_version, version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7071604f9984..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 962253db25b8..51b1690fd045 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3385,7 +3385,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 2dac704dc346..02a4e557e176 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -518,7 +518,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c9129b9ba446..0657a0f5170f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -380,7 +380,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ba0f1ffac507..eed98c10ca9d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -659,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -668,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -688,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
+ skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -698,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
@@ -9366,16 +9366,16 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
+ bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
}
}
@@ -11178,10 +11178,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
- features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
-
- if (!(bp->flags & BNXT_FLAG_TPA))
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 075c6206325c..b1b17f911300 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2130,6 +2130,7 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 059f96f7a96f..a36803e79e92 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1306,6 +1306,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 87eb5362ad70..f57e524c7e30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1371,9 +1371,9 @@ static void bnxt_get_drvinfo(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -3876,7 +3876,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strscpy(str, fw_str, ETH_GSTRING_LEN);
strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
if (test_info->offline_mask & (1 << i))
strncat(str, " (offline)",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 7f3c0875b6f5..2132ce63193c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
}
@@ -505,9 +505,13 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
ptp->tstamp_filters = flags;
if (netif_running(bp->dev)) {
- rc = bnxt_close_nic(bp, false, false);
- if (!rc)
- rc = bnxt_open_nic(bp, false, false);
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
if (!rc && !ptp->tstamp_filters)
rc = -EIO;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 730febd19330..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index eb4803b11c0e..fcc65890820a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -222,7 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f53387ed0167..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
- xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
}
@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 8309fb993cdb..25c450606985 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1146,7 +1146,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strscpy(info->driver, "bcmgenet", sizeof(info->driver));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -2707,8 +2707,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
bcmgenet_init_rx_coalesce(ring);
/* Initialize Rx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index db1e9d810b41..4179a12fc881 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7380,9 +7380,9 @@ static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -12302,9 +12302,9 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 29dd0f93d6c0..d6d90f9722a7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1891,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, NAPI_POLL_WEIGHT);
+ bnad_napi_poll_rx);
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 8aca768571b2..df10edff5603 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -114,7 +114,7 @@ static const char *bnad_net_stats_strings[] = {
"mac_tx_deferral",
"mac_tx_excessive_deferral",
"mac_tx_single_collision",
- "mac_tx_muliple_collision",
+ "mac_tx_multiple_collision",
"mac_tx_late_collision",
"mac_tx_excessive_collision",
"mac_tx_total_collision",
@@ -283,7 +283,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -291,12 +291,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ strscpy(drvinfo->bus_info, pci_name(bnad->pcidev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 66c7d08d376a..51c9fd6f68a4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/ptp_classify.h>
#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -3977,8 +3978,8 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
spin_lock_init(&queue->tx_ptr_lock);
- netif_napi_add(dev, &queue->napi_rx, macb_rx_poll, NAPI_POLL_WEIGHT);
- netif_napi_add(dev, &queue->napi_tx, macb_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -4621,6 +4622,25 @@ static int init_reset_optional(struct platform_device *pdev)
"failed to init SGMII PHY\n");
}
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
/* Fully reset controller at hardware level if mapped in device tree */
ret = device_reset_optional(&pdev->dev);
if (ret) {
@@ -4629,6 +4649,8 @@ static int init_reset_optional(struct platform_device *pdev)
}
ret = macb_init(pdev);
+
+err_out_phy_exit:
if (ret)
phy_exit(bp->sgmii_phy);
@@ -5109,6 +5131,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -5198,6 +5221,9 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
phylink_start(bp->phylink);
rtnl_unlock();
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 1281d1565ef8..f4f87dfa9687 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1792,7 +1792,7 @@ static int xgmac_probe(struct platform_device *pdev)
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
- netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ netif_napi_add(ndev, &priv->napi, xgmac_poll);
ret = register_netdev(ndev);
if (ret)
goto err_reg;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 3f1c189646f4..a0fd32476225 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -87,8 +87,8 @@
*/
#define CN23XX_SLI_PKT_IN_JABBER 0x29170
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
index d33dd8f4226f..e956109415cd 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -36,8 +36,8 @@
#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..882b2be06ea0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -851,7 +851,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
napi = &droq->napi;
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
(u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index bee35ce60171..d312bd594935 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,11 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct lio_trusted_vf_ctx {
- struct completion complete;
- int status;
-};
-
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 103591dcea1c..edde0b8fa49c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1342,7 +1342,7 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1396,8 +1396,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
p = netdev_priv(netdev);
- netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
- OCTEON_MGMT_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
p->netdev = netdev;
p->dev = &pdev->dev;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5a9fad61e9ea..e5c71f907852 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -191,8 +191,8 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
static u32 nicvf_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 768ea426d49f..98f3dc460ca7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1472,8 +1472,7 @@ int nicvf_open(struct net_device *netdev)
}
cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
- netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
napi_enable(&cq_poll->napi);
nic->napi[qidx] = cq_poll;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index f4054d2553ea..d2286adf09fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,8 +429,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -1053,7 +1053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
- netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, t1_poll);
netdev->ethtool_ops = &t1_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 174b1e156669..a52e6b6e2876 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -609,8 +609,7 @@ static void init_napi(struct adapter *adap)
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
- netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
- 64);
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
}
/*
@@ -1627,8 +1626,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 77897edd2bc0..8477a93cee6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -199,8 +199,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = netdev2adap(dev);
u32 exprom_vers;
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d0061921529f..9cbce1faab26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3903,8 +3903,8 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ee52e3b1d74f..46809e2d94ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -4467,7 +4467,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
if (ret)
goto err;
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &iq->napi, napi_rx_handler);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 26433a62d7f0..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd[3];
+ __be32 rsvd;
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index c2822e635f89..54db79f4dcfe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1553,8 +1553,8 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 43b2ceb6aa32..2d0cf76fb3c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2336,7 +2336,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
if (ret)
goto err;
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler);
rspq->cur_desc = rspq->desc;
rspq->cidx = 0;
rspq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index ddfe9208529a..f90bfba4b303 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1069,8 +1069,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
}
-static void inet_inherit_port(struct inet_hashinfo *hash_info,
- struct sock *lsk, struct sock *newsk)
+static void inet_inherit_port(struct sock *lsk, struct sock *newsk)
{
local_bh_disable();
__inet_inherit_port(lsk, newsk);
@@ -1240,7 +1239,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
- inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ inet_inherit_port(lsk, newsk);
csk_set_flag(csk, CSK_CONN_INLINE);
bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 9098b3eed4da..1e55b12fee51 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -193,7 +193,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
{
struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strscpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 21ba6e893072..8627ab19d470 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -689,7 +689,7 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
@@ -812,7 +812,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 60d8c0fbc037..08b7cc0a1809 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,10 +131,10 @@ static void enic_get_drvinfo(struct net_device *netdev,
if (err == -ENOMEM)
return;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 372fb7b3a282..29500d32e362 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2633,16 +2633,17 @@ static int enic_dev_init(struct enic *enic)
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
- netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ netif_napi_add(netdev, &enic->napi[0], enic_poll);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
netif_napi_add(netdev, &enic->napi[i],
- enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+ enic_poll_msix_rq);
}
for (i = 0; i < enic->wq_count; i++)
- netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
- enic_poll_msix_wq, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev,
+ &enic->napi[enic_cq_wq(enic, i)],
+ enic_poll_msix_wq);
break;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9e6de2f968fa..fdf10318758b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
@@ -2471,7 +2471,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
ret = of_get_mac_address(np, mac);
if (!ret) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0985ab216566..b21e56de6167 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -28,8 +28,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -540,8 +539,8 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
struct board_info *dm = to_dm9000_board(dev);
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
@@ -1012,7 +1011,7 @@ static void dm9000_send_packet(struct net_device *dev,
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int
+static netdev_tx_t
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
@@ -1421,8 +1420,7 @@ dm9000_probe(struct platform_device *pdev)
int iosize;
int i;
u32 id_val;
- int reset_gpios;
- enum of_gpio_flags flags;
+ struct gpio_desc *reset_gpio;
struct regulator *power;
bool inv_mac_addr = false;
u8 addr[ETH_ALEN];
@@ -1442,20 +1440,24 @@ dm9000_probe(struct platform_device *pdev)
dev_dbg(dev, "regulator enabled\n");
}
- reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
- &flags);
- if (gpio_is_valid(reset_gpios)) {
- ret = devm_gpio_request_one(dev, reset_gpios, flags,
- "dm9000_reset");
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(reset_gpio);
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio: %d\n", ret);
+ goto out_regulator_disable;
+ }
+
+ if (reset_gpio) {
+ ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
if (ret) {
- dev_err(dev, "failed to request reset gpio %d: %d\n",
- reset_gpios, ret);
+ dev_err(dev, "failed to set reset gpio name: %d\n",
+ ret);
goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
msleep(2);
- gpio_set_value(reset_gpios, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
/* Needs 3ms to read eeprom when PWRST is deasserted */
msleep(4);
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d51b3d24a0c8..cd3dc4b89518 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1606,8 +1606,8 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83f1727d1423..3188ba7b450f 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1074,8 +1074,8 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int dmfe_ethtool_set_wol(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index b8e46c4849ef..ecfad43df45a 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -858,8 +858,8 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 77d9058431e3..ff080ab0f116 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -971,8 +971,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 1db19463fd46..37fba39c0056 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1374,8 +1374,8 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a301f7e6a440..2c67a857a42f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1235,8 +1235,8 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "dl2k", sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 8dd7bf9014ec..43def191f26f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1644,8 +1644,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 92462ed87bc4..08184f20f510 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,8 +725,8 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, "0", sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -788,7 +788,7 @@ static int dnet_probe(struct platform_device *pdev)
}
dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ netif_napi_add(dev, &bp->napi, dnet_poll);
dev->ethtool_ops = &dnet_ethtool_ops;
dev->base_addr = (unsigned long)bp->regs;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index b4f5e57d0285..08ec84cd21c0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1878,9 +1878,9 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
sizeof(adapter->fw_ver));
- strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
sizeof(adapter->fw_on_flash));
}
err:
@@ -2373,7 +2373,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2442,9 +2442,9 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
0, PAGE_DATA_LEN, page_data);
if (!status) {
- strlcpy(adapter->phy.vendor_name, page_data +
+ strscpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
- strlcpy(adapter->phy.vendor_pn,
+ strscpy(adapter->phy.vendor_pn,
page_data + SFP_VENDOR_PN_OFFSET,
SFP_VENDOR_NAME_LEN - 1);
}
@@ -2473,7 +2473,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bd0df189d871..77edc3d9b505 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -220,15 +220,15 @@ static void be_get_drvinfo(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
- strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 414362febbb9..a92a74761546 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2982,8 +2982,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
}
return 0;
}
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
index f4e2b1102d8f..3df6bf476ae7 100644
--- a/drivers/net/ethernet/engleder/Kconfig
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -21,6 +21,7 @@ config TSNEP
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
+ select PAGE_POOL
help
Support for the Engleder TSN endpoint Ethernet MAC IP Core.
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
index cce2191cb889..b6e3b16623de 100644
--- a/drivers/net/ethernet/engleder/Makefile
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_TSNEP) += tsnep.o
tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
- $(tsnep-y)
+ tsnep_rxnfc.o $(tsnep-y)
tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 23bbece6b7de..09a723b827c7 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -21,8 +21,6 @@
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
-#define TSNEP_QUEUES 1
-
struct tsnep_gcl {
void __iomem *addr;
@@ -39,6 +37,24 @@ struct tsnep_gcl {
bool change;
};
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
struct tsnep_tx_entry {
struct tsnep_tx_desc *desc;
struct tsnep_tx_desc_wb *desc_wb;
@@ -55,6 +71,7 @@ struct tsnep_tx_entry {
struct tsnep_tx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -79,14 +96,15 @@ struct tsnep_rx_entry {
u32 properties;
- struct sk_buff *skb;
+ struct page *page;
size_t len;
- DEFINE_DMA_UNMAP_ADDR(dma);
+ dma_addr_t dma;
};
struct tsnep_rx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -95,6 +113,7 @@ struct tsnep_rx {
int read;
u32 owner_counter;
int increment_owner_counter;
+ struct page_pool *page_pool;
u32 packets;
u32 bytes;
@@ -104,12 +123,14 @@ struct tsnep_rx {
struct tsnep_queue {
struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 9];
struct tsnep_tx *tx;
struct tsnep_rx *rx;
struct napi_struct napi;
+ int irq;
u32 irq_mask;
};
@@ -125,7 +146,6 @@ struct tsnep_adapter {
struct platform_device *pdev;
struct device *dmadev;
void __iomem *addr;
- int irq;
bool gate_control;
/* gate control lock */
@@ -140,6 +160,12 @@ struct tsnep_adapter {
/* ptp clock lock */
spinlock_t ptp_lock;
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
int num_tx_queues;
struct tsnep_tx tx[TSNEP_MAX_QUEUES];
int num_rx_queues;
@@ -160,6 +186,18 @@ void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
void tsnep_ethtool_get_test_strings(u8 *data);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index e6760dc68ddd..a713a126b227 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -250,6 +250,44 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int tsnep_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int tsnep_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -287,6 +325,8 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_strings = tsnep_ethtool_get_strings,
.get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
.get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
index 916ceac3ada2..315dada75323 100644
--- a/drivers/net/ethernet/engleder/tsnep_hw.h
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -34,6 +34,7 @@
#define ECM_INT_LINK 0x00000020
#define ECM_INT_TX_0 0x00000100
#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
#define ECM_INT_ALL 0x7FFFFFFF
#define ECM_INT_DISABLE 0x80000000
@@ -92,8 +93,7 @@
/* tsnep register */
#define TSNEP_INFO 0x0100
-#define TSNEP_INFO_RX_ASSIGN 0x00010000
-#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_INFO_TX_TIME 0x00010000
#define TSNEP_CONTROL 0x0108
#define TSNEP_CONTROL_TX_RESET 0x00000001
#define TSNEP_CONTROL_TX_ENABLE 0x00000002
@@ -122,10 +122,6 @@
#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
-#define TSNEP_RX_ASSIGN 0x01A0
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
#define TSNEP_MAC_ADDRESS_LOW 0x0800
#define TSNEP_MAC_ADDRESS_HIGH 0x0804
#define TSNEP_RX_FILTER 0x0806
@@ -152,6 +148,14 @@
#define TSNEP_GCL_A 0x2000
#define TSNEP_GCL_B 0x2800
#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
/* tsnep gate control list operation */
struct tsnep_gcl_operation {
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index a5f7152a1716..48fb391951dd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -27,10 +27,10 @@
#include <linux/phy.h>
#include <linux/iopoll.h>
-#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
- TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
-#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
-#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
@@ -60,22 +60,29 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
/* handle link interrupt */
- if ((active & ECM_INT_LINK) != 0) {
- if (adapter->netdev->phydev)
- phy_mac_interrupt(adapter->netdev->phydev);
- }
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- if (adapter->netdev) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
- }
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
}
return IRQ_HANDLED;
}
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(&queue->napi);
+
+ return IRQ_HANDLED;
+}
+
static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
{
struct tsnep_adapter *adapter = bus->priv;
@@ -124,30 +131,51 @@ static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
return 0;
}
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
static void tsnep_phy_link_status_change(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
- u32 mode;
- if (phydev->link) {
- switch (phydev->speed) {
- case SPEED_100:
- mode = ECM_LINK_MODE_100;
- break;
- case SPEED_1000:
- mode = ECM_LINK_MODE_1000;
- break;
- default:
- mode = ECM_LINK_MODE_OFF;
- break;
- }
- iowrite32(mode, adapter->addr + ECM_STATUS);
- }
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
phy_print_status(netdev->phydev);
}
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int retval;
+
+ retval = phy_loopback(adapter->phydev, enable);
+
+ /* PHY link state change is not signaled if loopback is enabled, it
+ * would delay a working loopback anyway, let's ensure that loopback
+ * is working immediately by setting link mode directly
+ */
+ if (!retval && enable)
+ tsnep_set_link_mode(adapter);
+
+ return retval;
+}
+
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
@@ -241,14 +269,14 @@ alloc_failed:
return retval;
}
-static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
{
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
if (entry->skb) {
- entry->properties =
- skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
@@ -313,6 +341,7 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
struct tsnep_tx_entry *entry;
unsigned int len;
dma_addr_t dma;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -335,15 +364,18 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
dma_unmap_addr_set(entry, dma, dma);
entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
}
- return 0;
+ return map_len;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -360,9 +392,12 @@ static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
+ map_len += entry->len;
entry->len = 0;
}
}
+
+ return map_len;
}
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
@@ -371,6 +406,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
+ int length;
int i;
int retval;
@@ -394,7 +430,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry->skb = skb;
retval = tsnep_tx_map(skb, tx, count);
- if (retval != 0) {
+ if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -407,12 +443,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ length = retval;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
for (i = 0; i < count; i++)
- tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
i == (count - 1));
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
@@ -428,9 +465,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_queue(tx->adapter->netdev);
}
- tx->packets++;
- tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
-
spin_unlock_irqrestore(&tx->lock, flags);
return NETDEV_TX_OK;
@@ -442,6 +476,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
int budget = 128;
struct tsnep_tx_entry *entry;
int count;
+ int length;
spin_lock_irqsave(&tx->lock, flags);
@@ -464,7 +499,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, tx->read, count);
+ length = tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -491,6 +526,9 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
budget--;
} while (likely(budget));
@@ -505,7 +543,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
}
static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_tx *tx)
+ int queue_index, struct tsnep_tx *tx)
{
dma_addr_t dma;
int retval;
@@ -513,6 +551,7 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(tx, 0, sizeof(*tx));
tx->adapter = adapter;
tx->addr = addr;
+ tx->queue_index = queue_index;
retval = tsnep_tx_ring_init(tx);
if (retval)
@@ -548,14 +587,15 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
- if (dma_unmap_addr(entry, dma))
- dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
- dma_unmap_len(entry, len),
- DMA_FROM_DEVICE);
- if (entry->skb)
- dev_kfree_skb(entry->skb);
+ if (entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ entry->page = NULL;
}
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
memset(rx->entry, 0, sizeof(rx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
@@ -568,31 +608,19 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
}
}
-static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
- struct tsnep_rx_entry *entry)
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
{
- struct device *dmadev = rx->adapter->dmadev;
- struct sk_buff *skb;
- dma_addr_t dma;
+ struct page *page;
- skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
- GFP_ATOMIC | GFP_DMA);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- skb_reserve(skb, RX_SKB_RESERVE);
-
- dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dmadev, dma)) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- entry->skb = skb;
- entry->len = RX_SKB_LENGTH;
- dma_unmap_addr_set(entry, dma, dma);
- entry->desc->rx = __cpu_to_le64(dma);
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
return 0;
}
@@ -601,6 +629,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
struct tsnep_rx_entry *next_entry;
int i, j;
int retval;
@@ -622,12 +651,28 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
}
}
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_SKB_PAD;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (retval)
goto failed;
}
@@ -643,7 +688,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
- /* RX_SKB_LENGTH is a multiple of 4 */
+ /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (index == rx->increment_owner_counter) {
@@ -666,19 +711,52 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
entry->desc->properties = __cpu_to_le32(entry->properties);
}
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_SKB_PAD);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
int done = 0;
+ enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
+ struct page *page;
struct sk_buff *skb;
- size_t len;
- dma_addr_t dma;
int length;
bool enable = false;
int retval;
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+
while (likely(done < budget)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
@@ -691,42 +769,34 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/
dma_rmb();
- skb = entry->skb;
- len = dma_unmap_len(entry, len);
- dma = dma_unmap_addr(entry, dma);
+ prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
+ length, dma_dir);
+ page = entry->page;
/* forward skb only if allocation is successful, otherwise
- * skb is reused and frame dropped
+ * page is reused and frame dropped
*/
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (!retval) {
- dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
-
- length = __le32_to_cpu(entry->desc_wb->properties) &
- TSNEP_DESC_LENGTH_MASK;
- skb_put(skb, length - ETH_FCS_LEN);
- if (rx->adapter->hwtstamp_config.rx_filter ==
- HWTSTAMP_FILTER_ALL) {
- struct skb_shared_hwtstamps *hwtstamps =
- skb_hwtstamps(skb);
- struct tsnep_rx_inline *rx_inline =
- (struct tsnep_rx_inline *)skb->data;
-
- skb_shinfo(skb)->tx_flags |=
- SKBTX_HW_TSTAMP_NETDEV;
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->netdev_data = rx_inline;
- }
- skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
- skb->protocol = eth_type_trans(skb,
- rx->adapter->netdev);
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ page_pool_release_page(rx->page_pool, page);
- rx->packets++;
- rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
- if (skb->pkt_type == PACKET_MULTICAST)
- rx->multicast++;
+ rx->packets++;
+ rx->bytes += length -
+ TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
- napi_gro_receive(napi, skb);
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ rx->dropped++;
+ }
done++;
} else {
rx->dropped++;
@@ -752,7 +822,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
}
static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_rx *rx)
+ int queue_index, struct tsnep_rx *rx)
{
dma_addr_t dma;
int i;
@@ -761,6 +831,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(rx, 0, sizeof(*rx));
rx->adapter = adapter;
rx->addr = addr;
+ rx->queue_index = queue_index;
retval = tsnep_rx_ring_init(rx);
if (retval)
@@ -821,6 +892,56 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
return min(done, budget - 1);
}
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ sprintf(queue->name, "%s-txrx-%d", name,
+ queue->rx->queue_index);
+ else if (queue->tx)
+ sprintf(queue->name, "%s-tx-%d", name,
+ queue->tx->queue_index);
+ else
+ sprintf(queue->name, "%s-rx-%d", name,
+ queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -830,15 +951,11 @@ static int tsnep_netdev_open(struct net_device *netdev)
int rx_queue_index = 0;
int retval;
- retval = tsnep_phy_open(adapter);
- if (retval)
- return retval;
-
for (i = 0; i < adapter->num_queues; i++) {
adapter->queue[i].adapter = adapter;
if (adapter->queue[i].tx) {
addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
- retval = tsnep_tx_open(adapter, addr,
+ retval = tsnep_tx_open(adapter, addr, tx_queue_index,
adapter->queue[i].tx);
if (retval)
goto failed;
@@ -847,11 +964,20 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (adapter->queue[i].rx) {
addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
retval = tsnep_rx_open(adapter, addr,
+ rx_queue_index,
adapter->queue[i].rx);
if (retval)
goto failed;
rx_queue_index++;
}
+
+ retval = tsnep_request_irq(&adapter->queue[i], i == 0);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n",
+ adapter->queue[i].irq);
+ goto failed;
+ }
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
@@ -863,9 +989,14 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (retval)
goto failed;
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
for (i = 0; i < adapter->num_queues; i++) {
netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
- tsnep_poll, 64);
+ tsnep_poll);
napi_enable(&adapter->queue[i].napi);
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -873,14 +1004,18 @@ static int tsnep_netdev_open(struct net_device *netdev)
return 0;
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
failed:
for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
return retval;
}
@@ -889,20 +1024,23 @@ static int tsnep_netdev_close(struct net_device *netdev)
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i;
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
for (i = 0; i < adapter->num_queues; i++) {
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
napi_disable(&adapter->queue[i].napi);
netif_napi_del(&adapter->queue[i].napi);
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
-
return 0;
}
@@ -1017,6 +1155,22 @@ static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles)
@@ -1038,9 +1192,9 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_start_xmit = tsnep_netdev_xmit_frame,
.ndo_eth_ioctl = tsnep_netdev_ioctl,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
-
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
};
@@ -1141,6 +1295,52 @@ static int tsnep_phy_init(struct tsnep_adapter *adapter)
return 0;
}
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = irq_mask;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ }
+
+ return 0;
+}
+
static int tsnep_probe(struct platform_device *pdev)
{
struct tsnep_adapter *adapter;
@@ -1149,6 +1349,7 @@ static int tsnep_probe(struct platform_device *pdev)
u32 type;
int revision;
int version;
+ int queue_count;
int retval;
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
@@ -1170,41 +1371,39 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
adapter->addr = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
- adapter->irq = platform_get_irq(pdev, 0);
netdev->mem_start = io->start;
netdev->mem_end = io->end;
- netdev->irq = adapter->irq;
type = ioread32(adapter->addr + ECM_TYPE);
revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
adapter->gate_control = type & ECM_GATE_CONTROL;
-
- adapter->num_tx_queues = TSNEP_QUEUES;
- adapter->num_rx_queues = TSNEP_QUEUES;
- adapter->num_queues = TSNEP_QUEUES;
- adapter->queue[0].tx = &adapter->tx[0];
- adapter->queue[0].rx = &adapter->rx[0];
- adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
tsnep_disable_irq(adapter, ECM_INT_ALL);
- retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
- 0, TSNEP, adapter);
- if (retval != 0) {
- dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
- adapter->irq);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
return retval;
}
- tsnep_enable_irq(adapter, ECM_INT_LINK);
retval = tsnep_mac_init(adapter);
if (retval)
- goto mac_init_failed;
+ return retval;
retval = tsnep_mdio_init(adapter);
if (retval)
@@ -1222,10 +1421,14 @@ static int tsnep_probe(struct platform_device *pdev)
if (retval)
goto tc_init_failed;
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
netdev->netdev_ops = &tsnep_netdev_ops;
netdev->ethtool_ops = &tsnep_ethtool_ops;
netdev->features = NETIF_F_SG;
- netdev->hw_features = netdev->features;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -1242,6 +1445,8 @@ static int tsnep_probe(struct platform_device *pdev)
return 0;
register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
tsnep_tc_cleanup(adapter);
tc_init_failed:
tsnep_ptp_cleanup(adapter);
@@ -1250,8 +1455,6 @@ phy_init_failed:
if (adapter->mdiobus)
mdiobus_unregister(adapter->mdiobus);
mdio_init_failed:
-mac_init_failed:
- tsnep_disable_irq(adapter, ECM_INT_ALL);
return retval;
}
@@ -1261,6 +1464,8 @@ static int tsnep_remove(struct platform_device *pdev)
unregister_netdev(adapter->netdev);
+ tsnep_rxnfc_cleanup(adapter);
+
tsnep_tc_cleanup(adapter);
tsnep_ptp_cleanup(adapter);
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 000000000000..9ac2a0cf3833
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 437c5acfe222..95cbad198b4b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1224,7 +1224,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
- netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ethoc_poll);
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c03663785a8d..a03879a27b04 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1063,8 +1063,8 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static void
@@ -1506,7 +1506,7 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
/* Initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Grab our interrupt */
err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
@@ -1701,10 +1701,14 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
if (!netdev->phydev)
return;
phy_disconnect(netdev->phydev);
+ if (of_phy_is_fixed_link(priv->dev->of_node))
+ of_phy_deregister_fixed_link(priv->dev->of_node);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1867,6 +1871,26 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+ } else if (np && of_phy_is_fixed_link(np)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(np);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register fixed PHY\n");
+ goto err_phy_connect;
+ }
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
+ of_phy_deregister_fixed_link(np);
+ err = -EINVAL;
+ goto err_phy_connect;
+ }
+
+ /* Display what we found */
+ phy_attached_info(phy);
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8a341e2d5833..d95d78230828 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -807,8 +807,8 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_link_ksettings(struct net_device *netdev,
@@ -1075,6 +1075,11 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
+ netdev->max_mtu = MAX_PKT_SIZE;
+
+ err = platform_get_ethdev_address(&pdev->dev, netdev);
+ if (err == -EPROBE_DEFER)
+ goto defer_get_mac;
platform_set_drvdata(pdev, netdev);
@@ -1086,7 +1091,7 @@ static int ftmac100_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1137,6 +1142,7 @@ err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
+defer_get_mac:
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index fe986f1673fc..8af32f9070f4 100644
--- a/drivers/net/ethernet/faraday/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -122,9 +122,9 @@
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* TXBUF_BADR */
+ __le32 txdes0;
+ __le32 txdes1;
+ __le32 txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
@@ -143,9 +143,9 @@ struct ftmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* RXBUF_BADR */
+ __le32 rxdes0;
+ __le32 rxdes1;
+ __le32 rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b3939a5f7b03..ed18450fd2cc 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1809,8 +1809,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index e04e1c5cb013..ce866ae3df03 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE || COMPILE_TEST
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,15 +23,16 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select PAGE_POOL
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 45634579adb6..31cfa121333d 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -197,12 +197,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
struct dpaa_percpu_priv *percpu_priv;
const u8 *mac_addr;
int i, err;
@@ -216,10 +219,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
}
net_dev->netdev_ops = dpaa_ops;
- mac_addr = priv->mac_dev->addr;
+ mac_addr = mac_dev->addr;
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
+ net_dev->mem_start = (unsigned long)mac_dev->vaddr;
+ net_dev->mem_end = (unsigned long)mac_dev->vaddr_end;
net_dev->min_mtu = ETH_MIN_MTU;
net_dev->max_mtu = dpaa_get_max_mtu();
@@ -246,7 +249,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
- err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ err = mac_dev->change_addr(mac_dev->fman_mac,
(const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
@@ -261,6 +264,9 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+ mac_dev->net_dev = net_dev;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
@@ -288,10 +294,9 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+ mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
error = fman_port_disable(mac_dev->port[i]);
@@ -826,10 +831,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
+ * callback.
*/
if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
cs_th = DPAA_CS_THRESHOLD_10G;
@@ -858,6 +863,31 @@ out_error:
return err;
}
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = mac_dev->net_dev;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
@@ -2886,6 +2916,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
static int dpaa_phy_init(struct net_device *net_dev)
{
@@ -2893,6 +2924,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
+ u32 phy_vendor;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -2905,9 +2937,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
/* Unless the PHY is capable of rate adaptation */
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
@@ -2942,11 +2976,12 @@ static int dpaa_open(struct net_device *net_dev)
goto mac_start_failed;
}
- err = priv->mac_dev->start(mac_dev);
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
+ phy_start(priv->mac_dev->phy_dev);
netif_tx_start_all_queues(net_dev);
@@ -3148,8 +3183,7 @@ static int dpaa_napi_add(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_add(net_dev, &percpu_priv->np.napi,
- dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 4fee74c024bd..258eb6c8f4c0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -18,7 +18,7 @@ static ssize_t dpaa_eth_show_addr(struct device *dev,
if (mac_dev)
return sprintf(buf, "%llx",
- (unsigned long long)mac_dev->res->start);
+ (unsigned long long)mac_dev->vaddr);
else
return sprintf(buf, "none");
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 73f07881ce2d..769e936a263c 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -80,9 +80,9 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 75d51572693d..8d029addddad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4565,8 +4565,7 @@ static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
- netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index c9bee9a0c9b2..49ff85633783 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -549,7 +549,7 @@ void dpaa2_mac_get_strings(u8 *data)
int i;
for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index e507e9065214..2b5909fa93cf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -3373,9 +3373,8 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
* different queues for each switch ports.
*/
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
- netif_napi_add(ethsw->ports[0]->netdev,
- &ethsw->fq[i].napi, dpaa2_switch_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..e0e8dfd13793 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 4470a4a3e4c3..54bc92fc6bf0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2116,13 +2116,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@@ -2155,13 +2156,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_clear_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_clear_rxbdr(hw, priv->rx_ring[i]);
udelay(1);
}
@@ -2169,13 +2171,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
@@ -2263,13 +2265,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
@@ -2432,10 +2435,11 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
u8 num_tc;
@@ -2452,7 +2456,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ enetc_set_bdr_prio(hw, tx_ring->index, 0);
}
return 0;
@@ -2471,7 +2475,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ enetc_set_bdr_prio(hw, tx_ring->index, i);
}
/* Reset the number of netdev queues based on the TC count */
@@ -2486,25 +2490,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2600,52 +2585,29 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2657,11 +2619,6 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
@@ -2808,8 +2765,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
v->rx_dim_en = true;
}
INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
v->count_tx_rings = v_tx_rings;
for (j = 0; j < v_tx_rings; j++) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 29922c20531f..161930a65f61 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -455,7 +453,11 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
data, *dma);
}
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
@@ -465,22 +467,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -521,6 +525,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
}
#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
@@ -540,4 +545,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index ff872e40ce85..c8369e3752b0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -125,68 +125,68 @@ static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_port_counters[] = {
- { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
- { ENETC_PM0_RALN, "MAC rx alignment errors" },
- { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
- { ENETC_PM0_RFRM, "MAC rx valid frames" },
- { ENETC_PM0_RFCS, "MAC rx fcs errors" },
- { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
- { ENETC_PM0_RERR, "MAC rx frame errors" },
- { ENETC_PM0_RUCA, "MAC rx unicast frames" },
- { ENETC_PM0_RMCA, "MAC rx multicast frames" },
- { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
- { ENETC_PM0_RDRP, "MAC rx dropped packets" },
- { ENETC_PM0_RPKT, "MAC rx packets" },
- { ENETC_PM0_RUND, "MAC rx undersized packets" },
- { ENETC_PM0_R64, "MAC rx 64 byte packets" },
- { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
- { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
- { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
- { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
- { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
- { ENETC_PM0_ROVR, "MAC rx oversized packets" },
- { ENETC_PM0_RJBR, "MAC rx jabber packets" },
- { ENETC_PM0_RFRG, "MAC rx fragment packets" },
- { ENETC_PM0_RCNP, "MAC rx control packets" },
- { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
- { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
- { ENETC_PM0_TOCT, "MAC tx octets" },
- { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
- { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
- { ENETC_PM0_TFRM, "MAC tx frames" },
- { ENETC_PM0_TFCS, "MAC tx fcs errors" },
- { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frame errors" },
- { ENETC_PM0_TUCA, "MAC tx unicast frames" },
- { ENETC_PM0_TMCA, "MAC tx multicast frames" },
- { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
- { ENETC_PM0_TPKT, "MAC tx packets" },
- { ENETC_PM0_TUND, "MAC tx undersized packets" },
- { ENETC_PM0_T64, "MAC tx 64 byte packets" },
- { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
- { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
- { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
- { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
- { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
- { ENETC_PM0_TCNP, "MAC tx control packets" },
- { ENETC_PM0_TDFR, "MAC tx deferred packets" },
- { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
- { ENETC_PM0_TSCOL, "MAC tx single collisions" },
- { ENETC_PM0_TLCOL, "MAC tx late collisions" },
- { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
- { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
- { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
- { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
- { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
- { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
- { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
- { ENETC_PFDMSAPR, "SI pruning discarded frames" },
- { ENETC_PICDR(0), "ICM DR0 discarded frames" },
- { ENETC_PICDR(1), "ICM DR1 discarded frames" },
- { ENETC_PICDR(2), "ICM DR2 discarded frames" },
- { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -236,7 +236,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -258,7 +258,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strlcpy(p, enetc_port_counters[i].name,
+ strscpy(p, enetc_port_counters[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -301,6 +301,113 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+
+ *ranges = enetc_rmon_ranges;
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_mac_stats(hw, 0, mac_stats);
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+}
+
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -766,6 +873,10 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 647c87f73bf7..18ca1f42b1f7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -276,58 +276,60 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFMCAPR 0x1b38
#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
-/* MAC counters */
-#define ENETC_PM0_REOCT 0x8100
-#define ENETC_PM0_RALN 0x8110
-#define ENETC_PM0_RXPF 0x8118
-#define ENETC_PM0_RFRM 0x8120
-#define ENETC_PM0_RFCS 0x8128
-#define ENETC_PM0_RVLAN 0x8130
-#define ENETC_PM0_RERR 0x8138
-#define ENETC_PM0_RUCA 0x8140
-#define ENETC_PM0_RMCA 0x8148
-#define ENETC_PM0_RBCA 0x8150
-#define ENETC_PM0_RDRP 0x8158
-#define ENETC_PM0_RPKT 0x8160
-#define ENETC_PM0_RUND 0x8168
-#define ENETC_PM0_R64 0x8170
-#define ENETC_PM0_R127 0x8178
-#define ENETC_PM0_R255 0x8180
-#define ENETC_PM0_R511 0x8188
-#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1522 0x8198
-#define ENETC_PM0_R1523X 0x81A0
-#define ENETC_PM0_ROVR 0x81A8
-#define ENETC_PM0_RJBR 0x81B0
-#define ENETC_PM0_RFRG 0x81B8
-#define ENETC_PM0_RCNP 0x81C0
-#define ENETC_PM0_RDRNTP 0x81C8
-#define ENETC_PM0_TEOCT 0x8200
-#define ENETC_PM0_TOCT 0x8208
-#define ENETC_PM0_TCRSE 0x8210
-#define ENETC_PM0_TXPF 0x8218
-#define ENETC_PM0_TFRM 0x8220
-#define ENETC_PM0_TFCS 0x8228
-#define ENETC_PM0_TVLAN 0x8230
-#define ENETC_PM0_TERR 0x8238
-#define ENETC_PM0_TUCA 0x8240
-#define ENETC_PM0_TMCA 0x8248
-#define ENETC_PM0_TBCA 0x8250
-#define ENETC_PM0_TPKT 0x8260
-#define ENETC_PM0_TUND 0x8268
-#define ENETC_PM0_T64 0x8270
-#define ENETC_PM0_T127 0x8278
-#define ENETC_PM0_T255 0x8280
-#define ENETC_PM0_T511 0x8288
-#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1522 0x8298
-#define ENETC_PM0_T1523X 0x82A0
-#define ENETC_PM0_TCNP 0x82C0
-#define ENETC_PM0_TDFR 0x82D0
-#define ENETC_PM0_TMCOL 0x82D8
-#define ENETC_PM0_TSCOL 0x82E0
-#define ENETC_PM0_TLCOL 0x82E8
-#define ENETC_PM0_TECOL 0x82F0
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
@@ -943,13 +945,13 @@ static inline u32 enetc_usecs_to_cycles(u32 usecs)
}
/* port time gating control register */
-#define ENETC_QBV_PTGCR_OFFSET 0x11a00
-#define ENETC_QBV_TGE BIT(31)
-#define ENETC_QBV_TGPE BIT(30)
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
/* Port time gating capability register */
-#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
-#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c4a0e836d4f0..bdf94335ee99 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -516,15 +516,34 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
{
int tc;
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
for (tc = 0; tc < 8; tc++)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_reset_ptcmsdur(hw);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -709,6 +728,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -722,7 +748,30 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -739,7 +788,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 582a663ed0ba..e6416332ec79 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -7,18 +7,19 @@
#include <linux/math64.h>
#include <linux/refcount.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
- return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
- & ENETC_QBV_MAX_GCL_LEN_MASK;
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
}
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,16 +40,15 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
static int enetc_setup_taprio(struct net_device *ndev,
struct tc_taprio_qopt_offload *admin_conf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
@@ -61,15 +61,14 @@ static int enetc_setup_taprio(struct net_device *ndev,
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
return -EINVAL;
gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ tge = enetc_rd(hw, ENETC_PTGCR);
if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
priv->active_offloads &= ~ENETC_F_QBV;
@@ -117,27 +116,28 @@ static int enetc_setup_taprio(struct net_device *ndev,
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
- if (!err)
- priv->active_offloads |= ENETC_F_QBV;
+ if (err)
+ return err;
- return err;
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
}
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int err;
int i;
@@ -147,16 +147,14 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
return -EBUSY;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? i : 0);
err = enetc_setup_taprio(ndev, taprio);
if (err)
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? 0 : i);
return err;
@@ -178,7 +176,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -199,15 +197,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -224,13 +222,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -239,7 +237,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -259,8 +257,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -280,10 +278,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -293,6 +291,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -304,12 +303,11 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
return -EINVAL;
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -1517,6 +1515,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
@@ -1578,3 +1599,23 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..dfcaac302e24 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..33f84a30e167 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,8 +16,12 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
+#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
@@ -343,8 +347,11 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
+
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -498,6 +505,9 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -511,6 +521,12 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
+struct fec_enet_priv_txrx_info {
+ int offset;
+ struct page *page;
+ struct sk_buff *skb;
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -526,7 +542,14 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
};
struct fec_stop_mode_gpr {
@@ -579,6 +602,7 @@ struct fec_enet_private {
struct device_node *phy_node;
bool rgmii_txc_dly;
bool rgmii_rxc_dly;
+ bool rpm_active;
int link;
int full_duplex;
int speed;
@@ -608,6 +632,7 @@ struct fec_enet_private {
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
@@ -634,6 +659,8 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
+ struct imx_sc_ipc *ipc_handle;
+
u64 ethtool_stats[];
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e8e2aa1e7f01..98d5cd313fdd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -66,6 +66,8 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
#include <asm/cacheflush.h>
@@ -111,7 +113,8 @@ static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -155,6 +158,13 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_DELAYED_CLKS_SUPPORT,
};
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -188,6 +198,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx8qm-fec",
.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
}, {
+ .name = "s32v234-fec",
+ .driver_data = (kernel_ulong_t)&fec_s32v234_info,
+ }, {
/* sentinel */
}
};
@@ -203,6 +216,7 @@ enum imx_fec_type {
IMX6UL_FEC,
IMX8MQ_FEC,
IMX8QM_FEC,
+ S32V234_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -215,6 +229,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -409,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = FEC_ENET_RX_FRSIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
@@ -1167,6 +1224,34 @@ fec_restart(struct net_device *ndev)
}
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
+}
+
static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
{
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -1182,6 +1267,8 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
BIT(stop_gpr->bit), 0);
} else if (pdata && pdata->sleep_mode_enable) {
pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
}
}
@@ -1407,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx_queue(ndev, i);
}
-static int
+static int __maybe_unused
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1427,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
return 0;
}
-static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
+static bool __maybe_unused
+fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1453,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
return true;
}
+static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
+{
+ struct page *new_page;
+ dma_addr_t phys_addr;
+
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ WARN_ON(!new_page);
+ rxq->rx_skb_info[index].page = new_page;
+
+ rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -1465,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb_new = NULL;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
@@ -1474,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
bool vlan_packet_rcvd = false;
u16 vlan_tag;
int index = 0;
- bool is_copybreak;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct page *page;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1527,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- skb = rxq->rx_skbuff[index];
+ page = rxq->rx_skb_info[index].page;
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+ fec_enet_update_cbd(rxq, bdp, index);
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
- need_swap);
- if (!is_copybreak) {
- skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (unlikely(!skb_new)) {
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- }
-
- prefetch(skb->data - NET_IP_ALIGN);
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
skb_put(skb, pkt_len - 4);
+ skb_mark_for_recycle(skb);
data = skb->data;
- if (!is_copybreak && need_swap)
+ if (need_swap)
swap_buffer(data, pkt_len);
#if !defined(CONFIG_M5272)
@@ -1606,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
- if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
- rxq->rx_skbuff[index] = skb_new;
- fec_enet_new_rxbdp(ndev, bdp, skb_new);
- }
-
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -2104,13 +2190,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2294,9 +2380,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name,
+ strscpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
static int fec_enet_get_regs_len(struct net_device *ndev)
@@ -2959,26 +3045,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->bd.base;
- for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
- }
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3068,24 +3147,31 @@ static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
goto err_alloc;
- if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
- dev_kfree_skb(skb);
- goto err_alloc;
- }
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skbuff[i] = skb;
+ rxq->rx_skb_info[i].page = page;
+ rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3210,6 +3296,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3251,6 +3340,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
@@ -3559,7 +3651,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
@@ -3817,6 +3909,10 @@ fec_probe(struct platform_device *pdev)
!of_property_read_bool(np, "fsl,err006687-workaround-present"))
fep->quirks |= FEC_QUIRK_ERR006687;
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@ -4014,6 +4110,7 @@ failed_rgmii_delay:
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
failed_stop_mode:
+failed_ipc_init:
failed_phy:
dev_id--;
failed_ioremap:
@@ -4058,6 +4155,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
rtnl_lock();
if (netif_running(ndev)) {
@@ -4082,6 +4180,15 @@ static int __maybe_unused fec_suspend(struct device *dev)
}
/* It's safe to disable clocks since interrupts are masked */
fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
}
rtnl_unlock();
@@ -4112,6 +4219,9 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7d49c28215f3..cffd9ad499dd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -582,7 +578,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
int ret;
fep->ptp_caps.owner = THIS_MODULE;
- strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 8f0db61cb1f6..9d85fb136e34 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f2ede1360f03..2ea575a46675 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1950a8936bc0..6617932fd3fd 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_dtsec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -327,7 +301,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Number of individual addresses in registers for this station */
@@ -840,73 +814,45 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->maximum_frame = new_val;
-
- return 0;
-}
-
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
-
- return 0;
-}
-
-static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(&regs->tctrl) &
- ~TCTRL_GTS, &regs->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(&regs->rctrl) &
- ~RCTRL_GRS, &regs->rctrl);
+ iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
}
-static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_stop(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
/* Graceful stop - Assert the graceful Rx stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, &regs->rctrl);
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
- if (dtsec->fm_rev_info.major == 2) {
- /* Workaround for dTSEC Errata A002 */
- usleep_range(100, 200);
- } else {
- /* Workaround for dTSEC Errata A004839 */
- usleep_range(10, 50);
- }
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2) {
- /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
- pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
- } else {
- tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
- iowrite32be(tmp, &regs->tctrl);
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
- /* Workaround for dTSEC Errata A0012, A0014 */
- usleep_range(10, 50);
- }
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
}
}
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+static int dtsec_enable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -916,58 +862,42 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
/* Enable */
tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp |= MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= MACCFG1_TX_EN;
-
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
iowrite32be(tmp, &regs->maccfg1);
/* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+static void dtsec_disable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
/* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp &= ~MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~MACCFG1_TX_EN;
-
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
iowrite32be(tmp, &regs->maccfg1);
-
- return 0;
}
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
- u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
@@ -989,26 +919,20 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg1);
if (en)
@@ -1017,25 +941,18 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
-
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
@@ -1043,12 +960,13 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_add
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct eth_hash_entry *hash_entry;
@@ -1114,7 +1032,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
{
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -1133,7 +1051,7 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
@@ -1158,7 +1076,8 @@ int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct list_head *pos;
@@ -1229,7 +1148,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -1258,21 +1177,15 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg2);
@@ -1293,12 +1206,12 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
tmp &= ~DTSEC_ECNTRL_R100M;
iowrite32be(tmp, &regs->ecntrl);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_restart_autoneg(struct fman_mac *dtsec)
+static int dtsec_restart_autoneg(struct fman_mac *dtsec)
{
u16 tmp_reg16;
@@ -1316,20 +1229,31 @@ int dtsec_restart_autoneg(struct fman_mac *dtsec)
return 0;
}
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+static void adjust_link_dtsec(struct mac_device *mac_dev)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
- *mac_version = ioread32be(&regs->tsec_id);
+ return;
+ }
- return 0;
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable)
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
@@ -1382,7 +1306,7 @@ int dtsec_set_exception(struct fman_mac *dtsec,
return 0;
}
-int dtsec_init(struct fman_mac *dtsec)
+static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
@@ -1476,7 +1400,7 @@ int dtsec_init(struct fman_mac *dtsec)
return 0;
}
-int dtsec_free(struct fman_mac *dtsec)
+static int dtsec_free(struct fman_mac *dtsec)
{
free_init_resources(dtsec);
@@ -1487,13 +1411,11 @@ int dtsec_free(struct fman_mac *dtsec)
return 0;
}
-struct fman_mac *dtsec_config(struct fman_mac_params *params)
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *dtsec;
struct dtsec_cfg *dtsec_drv_param;
- void __iomem *base_addr;
-
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
@@ -1510,10 +1432,10 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = base_addr;
- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
dtsec->max_speed = params->max_speed;
- dtsec->phy_if = params->phy_if;
+ dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
DTSEC_IMASK_RXCEN |
@@ -1530,34 +1452,87 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
- dtsec->dev_id = params->dev_id;
+ dtsec->dev_id = mac_dev;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
- if (!params->internal_phy_node) {
+ /* Save FMan revision */
+ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+ return dtsec;
+
+err_dtsec:
+ kfree(dtsec);
+ return NULL;
+}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_dtsec;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node) {
pr_err("TBI PHY node is not available\n");
- goto err_dtsec_drv_param;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
+ dtsec->tbiphy = of_phy_find_device(phy_node);
if (!dtsec->tbiphy) {
pr_err("of_phy_find_device (TBI PHY) failed\n");
- goto err_dtsec_drv_param;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
-
put_device(&dtsec->tbiphy->mdio.dev);
- /* Save FMan revision */
- fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
- return dtsec;
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
-err_dtsec_drv_param:
- kfree(dtsec_drv_param);
-err_dtsec:
- kfree(dtsec);
- return NULL;
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 68512c3bd6e5..8c72d280c51a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __DTSEC_H
@@ -35,27 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *dtsec_config(struct fman_mac_params *params);
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
-int dtsec_adjust_link(struct fman_mac *dtsec,
- u16 speed);
-int dtsec_restart_autoneg(struct fman_mac *dtsec);
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_init(struct fman_mac *dtsec);
-int dtsec_free(struct fman_mac *dtsec);
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable);
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index e1bdfed16134..e73f6ef3c6ee 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
index c4640de3f4cb..2cb0df453074 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.h
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __KEYGEN_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 19f327efdaff..65887a3160d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -41,6 +41,7 @@
#include <linux/if_ether.h>
struct fman_mac;
+struct mac_device;
/* Ethernet Address */
typedef u8 enet_addr_t[ETH_ALEN];
@@ -75,16 +76,6 @@ typedef u8 enet_addr_t[ETH_ALEN];
#define ETH_HASH_ENTRY_OBJ(ptr) \
hlist_entry_safe(ptr, struct eth_hash_entry, node)
-/* Enumeration (bit flags) of communication modes (Transmit,
- * receive or both).
- */
-enum comm_mode {
- COMM_MODE_NONE = 0, /* No transmit/receive communication */
- COMM_MODE_RX = 1, /* Only receive communication */
- COMM_MODE_TX = 2, /* Only transmit communication */
- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
-};
-
/* FM MAC Exceptions */
enum fman_mac_exceptions {
FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
@@ -168,30 +159,23 @@ struct eth_hash_entry {
struct list_head node;
};
-typedef void (fman_mac_exception_cb)(void *dev_id,
- enum fman_mac_exceptions exceptions);
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
/* FMan MAC config input */
struct fman_mac_params {
- /* Base of memory mapped FM MAC registers */
- void __iomem *base_addr;
- /* MAC address of device; First octet is sent first */
- enet_addr_t addr;
/* MAC ID; numbering of dTSEC and 1G-mEMAC:
* 0 - FM_MAX_NUM_OF_1G_MACS;
* numbering of 10G-MAC (TGEC) and 10G-mEMAC:
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* PHY interface */
- phy_interface_t phy_if;
/* Note that the speed should indicate the maximum rate that
* this MAC should support rather than the actual speed;
*/
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
/* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
@@ -200,8 +184,6 @@ struct fman_mac_params {
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
*/
bool basex_if;
- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
- struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 2216b7f51d26..32d26cf17843 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_memac.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/io.h>
@@ -337,7 +311,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Pointer to driver's global address hash table */
@@ -712,7 +686,7 @@ static bool is_init_done(struct memac_cfg *memac_drv_params)
return false;
}
-int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_enable(struct fman_mac *memac)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -721,36 +695,26 @@ int memac_enable(struct fman_mac *memac, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
-
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+static void memac_disable(struct fman_mac *memac)
+
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
-
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -769,7 +733,7 @@ int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
return 0;
}
-int memac_adjust_link(struct fman_mac *memac, u16 speed)
+static int memac_adjust_link(struct fman_mac *memac, u16 speed)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -809,39 +773,26 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
return 0;
}
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->max_frame_length = new_val;
-
- return 0;
-}
-
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->reset_on_init = enable;
-
- return 0;
-}
-
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
+static void adjust_link_memac(struct mac_device *mac_dev)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- memac->memac_drv_param->fixed_link = fixed_link;
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
- return 0;
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time)
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -878,7 +829,7 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
return 0;
}
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -897,7 +848,8 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
@@ -907,7 +859,8 @@ int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_add
return 0;
}
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry;
@@ -940,7 +893,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_allmulti(struct fman_mac *memac, bool enable)
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
{
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
@@ -963,12 +916,13 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
-int memac_set_tstamp(struct fman_mac *memac, bool enable)
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
{
return 0; /* Always enabled. */
}
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -1001,8 +955,8 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable)
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
{
u32 bit_mask = 0;
@@ -1024,13 +978,13 @@ int memac_set_exception(struct fman_mac *memac,
return 0;
}
-int memac_init(struct fman_mac *memac)
+static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
u8 i;
enet_addr_t eth_addr;
bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link;
+ struct fixed_phy_status *fixed_link = NULL;
int err;
u32 reg32 = 0;
@@ -1141,7 +1095,7 @@ int memac_init(struct fman_mac *memac)
return 0;
}
-int memac_free(struct fman_mac *memac)
+static int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
@@ -1154,13 +1108,12 @@ int memac_free(struct fman_mac *memac)
return 0;
}
-struct fman_mac *memac_config(struct fman_mac_params *params)
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *memac;
struct memac_cfg *memac_drv_param;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the m_emac data structure */
memac = kzalloc(sizeof(*memac), GFP_KERNEL);
if (!memac)
@@ -1178,38 +1131,121 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
set_dflts(memac_drv_param);
- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- memac->regs = base_addr;
+ memac->regs = mac_dev->vaddr;
memac->max_speed = params->max_speed;
- memac->phy_if = params->phy_if;
+ memac->phy_if = mac_dev->phy_if;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
- memac->dev_id = params->dev_id;
+ memac->dev_id = mac_dev;
memac->fm = params->fm;
memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
+ return memac;
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct device_node *phy_node;
+ struct fixed_phy_status *fixed_link;
+ struct fman_mac *memac;
+
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_memac;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+
+ if (params->max_speed == SPEED_10000)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- if (!params->internal_phy_node) {
+ phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
+ if (!phy_node) {
pr_err("PCS PHY node is not available\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ memac->pcsphy = of_phy_find_device(phy_node);
if (!memac->pcsphy) {
pr_err("of_phy_find_device (PCS PHY) failed\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
}
- return memac;
+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_fm_mac_free;
+
+ fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
+ if (!fixed_link) {
+ err = -ENOMEM;
+ goto _return_fm_mac_free;
+ }
+
+ mac_dev->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(mac_dev->phy_node);
+ if (!phy) {
+ err = -EINVAL;
+ of_node_put(mac_dev->phy_node);
+ goto _return_fixed_link_free;
+ }
+
+ fixed_link->link = phy->link;
+ fixed_link->speed = phy->speed;
+ fixed_link->duplex = phy->duplex;
+ fixed_link->pause = phy->pause;
+ fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
+ memac->memac_drv_param->fixed_link = fixed_link;
+ }
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fixed_link_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fixed_link_free:
+ kfree(fixed_link);
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 3820f7a22983..5a3a14f9684f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
@@ -38,26 +11,10 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
-struct fman_mac *memac_config(struct fman_mac_params *params);
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
-int memac_adjust_link(struct fman_mac *memac, u16 speed);
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link);
-int memac_enable(struct fman_mac *memac, enum comm_mode mode);
-int memac_disable(struct fman_mac *memac, enum comm_mode mode);
-int memac_init(struct fman_mac *memac);
-int memac_free(struct fman_mac *memac);
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time);
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable);
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_set_allmulti(struct fman_mac *memac, bool enable);
-int memac_set_tstamp(struct fman_mac *memac, bool enable);
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 7ad317e622bc..f557d68e5b76 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#include "fman_muram.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 453bf849eee1..3643af61bae2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
+
#ifndef __FM_MURAM_EXT
#define __FM_MURAM_EXT
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 4c9d05c45c03..ab90fe2bee5e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 82f12661a46d..4917fe8f0617 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_PORT_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index 248f5bcca468..0fac60aa5283 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fman_sp.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
index 820b7f63088f..a62dd21c81f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.h
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -1,32 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_SP_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 311c1906e044..5a4be54ad459 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_tgec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -206,7 +180,7 @@ struct fman_mac {
/* MAC address of device; */
u64 addr;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* pointer to driver's global address hash table */
@@ -419,7 +393,7 @@ static bool is_init_done(struct tgec_cfg *cfg)
return false;
}
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_enable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -428,34 +402,25 @@ int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+static void tgec_disable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(tgec->cfg));
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -473,18 +438,9 @@ int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
return 0;
}
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
-{
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
- tgec->cfg->max_frame_length = new_val;
-
- return 0;
-}
-
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct tgec_regs __iomem *regs = tgec->regs;
@@ -496,7 +452,7 @@ int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
return 0;
}
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -514,7 +470,8 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
@@ -525,7 +482,8 @@ int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_add
return 0;
}
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
@@ -562,7 +520,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
{
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
@@ -585,7 +543,7 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -605,7 +563,8 @@ int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -642,20 +601,15 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+static void tgec_adjust_link(struct mac_device *mac_dev)
{
- struct tgec_regs __iomem *regs = tgec->regs;
-
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
- *mac_version = ioread32be(&regs->tgec_id);
-
- return 0;
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
}
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable)
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
@@ -681,7 +635,7 @@ int tgec_set_exception(struct fman_mac *tgec,
return 0;
}
-int tgec_init(struct fman_mac *tgec)
+static int tgec_init(struct fman_mac *tgec)
{
struct tgec_cfg *cfg;
enet_addr_t eth_addr;
@@ -764,7 +718,7 @@ int tgec_init(struct fman_mac *tgec)
return 0;
}
-int tgec_free(struct fman_mac *tgec)
+static int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
@@ -774,13 +728,12 @@ int tgec_free(struct fman_mac *tgec)
return 0;
}
-struct fman_mac *tgec_config(struct fman_mac_params *params)
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *tgec;
struct tgec_cfg *cfg;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
if (!tgec)
@@ -798,8 +751,8 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = base_addr;
- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
@@ -819,7 +772,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
- tgec->dev_id = params->dev_id;
+ tgec->dev_id = mac_dev;
tgec->fm = params->fm;
/* Save FMan revision */
@@ -827,3 +780,52 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
return tgec;
}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = tgec_adjust_link;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index b28b20b26148..768b8d165e05 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __TGEC_H
@@ -35,23 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *tgec_config(struct fman_mac_params *params);
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_init(struct fman_mac *tgec);
-int tgec_free(struct fman_mac *tgec);
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable);
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 39ae965cd4f6..7b7526fd7da3 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -54,20 +28,12 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
u8 cell_index;
struct fman *fman;
- struct device_node *internal_phy_node;
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
- struct fixed_phy_status *fixed_link;
u16 speed;
- u16 max_speed;
-
- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
};
struct mac_address {
@@ -75,222 +41,21 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
{
- struct mac_device *mac_dev;
- struct mac_priv_s *priv;
-
- mac_dev = handle;
- priv = mac_dev->priv;
-
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
}
- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
__func__, ex);
}
-static int set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!params->base_addr)
- return -ENOMEM;
-
- memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
- params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
- params->exception_cb = mac_exception;
- params->event_cb = mac_exception;
- params->dev_id = mac_dev;
- params->internal_phy_node = priv->internal_phy_node;
-
- return 0;
-}
-
-static int tgec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = tgec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 10G MAC, disable Tx ECC exception */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_10G_TX_ECC_ER, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- tgec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int dtsec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = dtsec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- dtsec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int memac_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- if (priv->max_speed == SPEED_10000)
- params.phy_if = PHY_INTERFACE_MODE_XGMII;
-
- mac_dev->fman_mac = memac_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan MEMAC\n");
-
- goto _return;
-
-_return_fm_mac_free:
- memac_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int start(struct mac_device *mac_dev)
-{
- int err;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
- if (!err && phy_dev)
- phy_start(phy_dev);
-
- return err;
-}
-
-static int stop(struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
-
- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-}
-
-static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
{
struct mac_priv_s *priv;
struct mac_address *old_addr, *tmp;
@@ -424,109 +189,6 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
}
EXPORT_SYMBOL(fman_get_pause_cfg);
-static void adjust_link_void(struct mac_device *mac_dev)
-{
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void setup_dtsec(struct mac_device *mac_dev)
-{
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
- mac_dev->set_allmulti = dtsec_set_allmulti;
- mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
-}
-
-static void setup_tgec(struct mac_device *mac_dev)
-{
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
- mac_dev->set_allmulti = tgec_set_allmulti;
- mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
-}
-
-static void setup_memac(struct mac_device *mac_dev)
-{
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
- mac_dev->set_allmulti = memac_set_allmulti;
- mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
-}
-
#define DTSEC_SUPPORTED \
(SUPPORTED_10baseT_Half \
| SUPPORTED_10baseT_Full \
@@ -577,7 +239,7 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
- pdev->dev.parent = priv->dev;
+ pdev->dev.parent = mac_dev->dev;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
@@ -601,9 +263,9 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec" },
- { .compatible = "fsl,fman-xgec" },
- { .compatible = "fsl,fman-memac" },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
MODULE_DEVICE_TABLE(of, mac_match);
@@ -611,50 +273,33 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
struct platform_device *of_dev;
- struct resource res;
+ struct resource *res;
struct mac_priv_s *priv;
+ struct fman_mac_params params;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!mac_dev)
+ return -ENOMEM;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!priv)
+ return -ENOMEM;
/* Save private information */
mac_dev->priv = priv;
- priv->dev = dev;
-
- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
- setup_dtsec(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "tbi-handle", 0);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
- setup_tgec(mac_dev);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
- setup_memac(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "pcsphy-handle", 0);
- } else {
- dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
- mac_node);
- err = -EINVAL;
- goto _return;
- }
+ mac_dev->dev = dev;
INIT_LIST_HEAD(&priv->mc_addr_list);
@@ -663,8 +308,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -694,42 +338,33 @@ static int mac_probe(struct platform_device *_of_dev)
of_node_put(dev_node);
/* Get the address of the memory mapped registers */
- err = of_address_to_resource(mac_node, 0, &res);
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
- goto _return_of_get_parent;
+ res = platform_get_mem_or_io(_of_dev, 0);
+ if (!res) {
+ dev_err(dev, "could not get registers\n");
+ return -EINVAL;
}
- mac_dev->res = __devm_request_region(dev,
- fman_get_mem_region(priv->fman),
- res.start, resource_size(&res),
- "mac");
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
- goto _return_of_get_parent;
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman), res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ return err;
}
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!priv->vaddr) {
+ mac_dev->vaddr = devm_ioremap(dev, res->start, resource_size(res));
+ if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- err = -EIO;
- goto _return_of_get_parent;
+ return -EIO;
}
+ mac_dev->vaddr_end = mac_dev->vaddr + resource_size(res);
- if (!of_device_is_available(mac_node)) {
- err = -ENODEV;
- goto _return_of_get_parent;
- }
+ if (!of_device_is_available(mac_node))
+ return -ENODEV;
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
priv->cell_index = (u8)val;
@@ -743,15 +378,13 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = nph;
- goto _return_of_get_parent;
+ return nph;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -760,8 +393,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_node_put;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -793,7 +425,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->phy_if = phy_if;
priv->speed = phy2speed[mac_dev->phy_if];
- priv->max_speed = priv->speed;
+ params.max_speed = priv->speed;
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
@@ -801,7 +433,7 @@ static int mac_probe(struct platform_device *_of_dev)
SUPPORTED_100baseT_Half);
/* Gigabit support (no half-duplex) */
- if (priv->max_speed == 1000)
+ if (params.max_speed == 1000)
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
@@ -810,42 +442,18 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the rest of the PHY information */
mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
-
- err = of_phy_register_fixed_link(mac_node);
- if (err)
- goto _return_of_get_parent;
-
- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
- GFP_KERNEL);
- if (!priv->fixed_link) {
- err = -ENOMEM;
- goto _return_of_get_parent;
- }
-
- mac_dev->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
- of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
- }
- priv->fixed_link->link = phy->link;
- priv->fixed_link->speed = phy->speed;
- priv->fixed_link->duplex = phy->duplex;
- priv->fixed_link->pause = phy->pause;
- priv->fixed_link->asym_pause = phy->asym_pause;
+ params.basex_if = false;
+ params.mac_id = priv->cell_index;
+ params.fm = (void *)priv->fman;
+ params.exception_cb = mac_exception;
+ params.event_cb = mac_exception;
- put_device(&phy->mdio.dev);
- }
-
- err = mac_dev->init(mac_dev);
+ err = init(mac_dev, mac_node, &params);
if (err < 0) {
dev_err(dev, "mac_dev->init() = %d\n", err);
of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
+ return err;
}
/* pause frame autonegotiation enabled */
@@ -872,13 +480,10 @@ static int mac_probe(struct platform_device *_of_dev)
priv->eth_dev = NULL;
}
- goto _return;
+ return err;
_return_of_node_put:
of_node_put(dev_node);
-_return_of_get_parent:
- kfree(priv->fixed_link);
-_return:
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index daa285a9b8b2..b95d384271bd 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MAC_H
@@ -45,13 +19,16 @@ struct fman_mac;
struct mac_priv_s;
struct mac_device {
- struct resource *res;
+ void __iomem *vaddr;
+ void __iomem *vaddr_end;
+ struct device *dev;
u8 addr[ETH_ALEN];
struct fman_port *port[2];
u32 if_support;
struct phy_device *phy_dev;
phy_interface_t phy_if;
struct device_node *phy_node;
+ struct net_device *net_dev;
bool autoneg_pause;
bool rx_pause_req;
@@ -61,9 +38,8 @@ struct mac_device {
bool promisc;
bool allmulti;
- int (*init)(struct mac_device *mac_dev);
- int (*start)(struct mac_device *mac_dev);
- int (*stop)(struct mac_device *mac_dev);
+ int (*enable)(struct fman_mac *mac_dev);
+ void (*disable)(struct fman_mac *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
@@ -81,6 +57,8 @@ struct mac_device {
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*update_speed)(struct mac_device *mac_dev, int speed);
+
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
};
@@ -97,5 +75,6 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index b3dae17e067e..8844a9a04fcf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -791,7 +791,7 @@ static int fs_enet_close(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int fs_get_regs_len(struct net_device *dev)
@@ -883,9 +883,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-extern int fs_mii_connect(struct net_device *dev);
-extern void fs_mii_disconnect(struct net_device *dev);
-
/**************************************************************************************/
#ifdef CONFIG_FS_ENET_HAS_FEC
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c210d0f..61f4b6e50d29 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bf1524b68e..b2def295523a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3233,7 +3233,7 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, NAPI_POLL_WEIGHT);
+ gfar_poll_rx_sq);
netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2);
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 81fb68730138..b2b0d3c26fcc 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -163,7 +163,7 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
}
/* Return the length of the register structure */
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 823221c912ab..7a4cb4f07c32 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3712,7 +3712,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll);
dev->mtu = 1500;
dev->max_mtu = 1518;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 69b2b98b1525..601beb93d3b3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -337,8 +337,8 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index ec90da1de030..d7d39a58cd80 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -355,7 +355,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
if (ret)
return ret;
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
else if (is_acpi_node(fwnode))
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index b0d733e9a7c6..4859493471db 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1046,8 +1046,8 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index f247b7ad3a88..095f51c4d9d9 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -339,8 +339,7 @@ static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
return PTR_ERR(irq);
fp->num_rx_irqs++;
- netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll);
}
netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
@@ -1802,16 +1801,14 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
if (rc)
goto unreg_devlink;
- if (fp->dl_port.devlink)
- devlink_port_type_eth_set(&fp->dl_port, netdev);
+ devlink_port_type_eth_set(&fp->dl_port, netdev);
return 0;
unreg_devlink:
ed->netdevs[portid] = NULL;
fun_ktls_cleanup(fp);
- if (fp->dl_port.devlink)
- devlink_port_unregister(&fp->dl_port);
+ devlink_port_unregister(&fp->dl_port);
free_stats:
fun_free_stats_area(fp);
free_rss:
@@ -1830,11 +1827,9 @@ static void fun_destroy_netdev(struct net_device *netdev)
struct funeth_priv *fp;
fp = netdev_priv(netdev);
- if (fp->dl_port.devlink) {
- devlink_port_type_clear(&fp->dl_port);
- devlink_port_unregister(&fp->dl_port);
- }
+ devlink_port_type_clear(&fp->dl_port);
unregister_netdev(netdev);
+ devlink_port_unregister(&fp->dl_port);
fun_ktls_cleanup(fp);
fun_free_stats_area(fp);
fun_free_rss(fp);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 53b7e95213a8..671f51135c26 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -206,9 +206,9 @@ struct funeth_rxq {
#define FUN_QSTAT_READ(q, seq, stats_copy) \
do { \
- seq = u64_stats_fetch_begin(&(q)->syncp); \
+ seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
stats_copy = (q)->stats; \
- } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+ } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 50b384910c83..7b9a2d9d9624 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6cafee55efc3..d3e3ac242bfc 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -526,8 +526,7 @@ static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
}
static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
@@ -1274,9 +1273,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
do {
- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8c939628e2d8..2e6461b0ea8b 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84ef494bd60..50c3f5d6611f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -830,8 +830,8 @@ static int hip04_set_coalesce(struct net_device *netdev,
static void hip04_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static const struct ethtool_ops hip04_ethtool_ops = {
@@ -990,7 +990,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
- netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index d7e62eca050f..ffcf797dfa90 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1243,7 +1243,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto out_phy_node;
- netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
if (HAS_CAP_TSO(priv->hw_cap)) {
ret = hix5hd2_init_sg_desc_queue(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d94cc8c6681f..7cf10d1e2b31 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2109,8 +2109,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2122,8 +2121,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 7d4ae467f3ad..abcd7877f7d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -233,6 +233,17 @@ struct hclgevf_mbx_arq_ring {
__le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
+};
+
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 94f80e1c4020..0179fc288f5f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -97,13 +97,15 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
};
-#define hnae3_dev_fd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
-#define hnae3_dev_gro_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
@@ -159,6 +161,12 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_cq_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -187,6 +195,7 @@ struct hns3_mac_stats {
/* hnae3 loop mode */
enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
@@ -223,6 +232,8 @@ enum hnae3_fec_mode {
HNAE3_FEC_AUTO = 0,
HNAE3_FEC_BASER,
HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
HNAE3_FEC_USER_DEF,
};
@@ -270,6 +281,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TC_SCH_INFO,
HNAE3_DBG_CMD_QOS_PAUSE_CFG,
HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
HNAE3_DBG_CMD_QOS_BUF_CFG,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
@@ -308,6 +320,11 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -560,14 +577,17 @@ struct hnae3_ae_ops {
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex);
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
- u8 duplex);
+ u8 duplex, u8 lane_num);
void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode);
int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
@@ -737,6 +757,8 @@ struct hnae3_ae_ops {
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
};
struct hnae3_dcb_ops {
@@ -745,6 +767,8 @@ struct hnae3_dcb_ops {
int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
/* DCBX configuration */
u8 (*getdcbx)(struct hnae3_handle *);
@@ -774,6 +798,8 @@ struct hnae3_tc_info {
bool mqprio_active;
};
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -784,6 +810,9 @@ struct hnae3_knic_private_info {
u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
@@ -815,6 +844,7 @@ struct hnae3_roce_private_info {
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c8b151d29f53..f671a63cecde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -52,9 +52,9 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
{
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
}
@@ -91,6 +91,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
@@ -150,6 +151,10 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -162,6 +167,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
static void
@@ -220,8 +226,10 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= ae_dev->pdev->revision;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
hclge_comm_parse_capability(ae_dev, is_pf, resp);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 7a7d4cf9bf35..b1f9383b418f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -20,6 +20,7 @@
#define HCLGE_COMM_PHY_IMP_EN_B 2
#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
#define hclge_comm_dev_phy_imp_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
@@ -102,6 +103,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
@@ -339,6 +341,10 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index d2ec4c573bf8..3b6dbf158b98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -56,6 +56,32 @@ static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
@@ -83,6 +109,8 @@ static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
.ieee_setets = hns3_dcbnl_ieee_setets,
.ieee_getpfc = hns3_dcbnl_ieee_getpfc,
.ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
.getdcbx = hns3_dcbnl_getdcbx,
.setdcbx = hns3_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 93aeb615191d..66feb23f7b7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -106,6 +106,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.init = hns3_dbg_common_file_init,
},
{
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
@@ -395,6 +402,12 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support modify vlan filter state",
.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
}
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 35d70041b9e8..4cb2421e71a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2963,6 +2963,48 @@ static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -2988,6 +3030,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -3271,12 +3314,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (hnae3_ae_dev_gro_supported(ae_dev))
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->features |= NETIF_F_NTUPLE;
- }
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4650,7 +4692,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
- hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ hns3_nic_common_poll);
}
return 0;
@@ -5782,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev,
return 0;
}
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
static const struct hns3_hw_error_info hns3_hw_err[] = {
{ .type = HNAE3_PPU_POISON_ERROR,
.msg = "PPU poison" },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 4a3253692dcc..133a054af6b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -744,4 +744,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
enum dim_cq_period_mode tx_mode,
enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 4c7988e308a2..cdf76fb58d45 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -69,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -95,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -304,6 +304,10 @@ out:
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -322,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
-static void hns3_selftest_prepare(struct net_device *ndev,
- bool if_running, int (*st_param)[2])
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test start\n");
-
- hns3_set_selftest_param(h, st_param);
-
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -371,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
-
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
- int test_index = 0;
+ int test_index = HNAE3_LOOP_APP;
u32 i;
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
@@ -401,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
}
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
/**
* hns3_self_test - self test
* @ndev: net device
@@ -410,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
if (hns3_nic_resetting(ndev)) {
@@ -418,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev,
return;
}
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
return;
- hns3_selftest_prepare(ndev, if_running, st_param);
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -712,7 +739,8 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
- &cmd->base.duplex);
+ &cmd->base.duplex,
+ &cmd->lanes);
/* 2.get link mode */
if (ops->get_link_mode)
@@ -794,6 +822,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
u8 autoneg;
u32 speed;
u8 duplex;
@@ -806,9 +835,9 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
return 0;
if (ops->get_ksettings_an_result) {
- ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
- cmd->base.duplex == duplex)
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
return 0;
}
@@ -845,10 +874,14 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
netif_dbg(handle, drv, netdev,
- "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
netdev->phydev ? "phy" : "mac",
- cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) {
@@ -886,7 +919,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (ops->cfg_mac_speed_dup_h)
ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, (u8)(cmd->lanes));
return ret;
}
@@ -1612,6 +1645,19 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
h->msg_enable = msg_level;
}
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
/* Translate local fec value into ethtool value. */
static unsigned int loc_to_eth_fec(u8 loc_fec)
{
@@ -1621,12 +1667,12 @@ static unsigned int loc_to_eth_fec(u8 loc_fec)
eth_fec |= ETHTOOL_FEC_AUTO;
if (loc_fec & BIT(HNAE3_FEC_RS))
eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
if (loc_fec & BIT(HNAE3_FEC_BASER))
eth_fec |= ETHTOOL_FEC_BASER;
-
- /* if nothing is set, then FEC is off */
- if (!eth_fec)
- eth_fec = ETHTOOL_FEC_OFF;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
return eth_fec;
}
@@ -1637,12 +1683,13 @@ static unsigned int eth_to_loc_fec(unsigned int eth_fec)
u32 loc_fec = 0;
if (eth_fec & ETHTOOL_FEC_OFF)
- return loc_fec;
-
+ loc_fec |= BIT(HNAE3_FEC_NONE);
if (eth_fec & ETHTOOL_FEC_AUTO)
loc_fec |= BIT(HNAE3_FEC_AUTO);
if (eth_fec & ETHTOOL_FEC_RS)
loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
if (eth_fec & ETHTOOL_FEC_BASER)
loc_fec |= BIT(HNAE3_FEC_BASER);
@@ -1668,6 +1715,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
fec->fec = loc_to_eth_fec(fec_ability);
fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
return 0;
}
@@ -2051,6 +2100,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
@@ -2081,6 +2131,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f9d89511eb32..43cada51d8cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -321,7 +321,9 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
#define HCLGE_TQP_ENABLE_B 0
@@ -347,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -359,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 69b8673436ca..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -359,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -543,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9b870e79c290..142415c84c6b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -14,6 +14,8 @@ static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -1115,10 +1117,11 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
return 0;
}
+#define HCLGE_DBG_TC_MASK 0x0F
+
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
int len)
{
-#define HCLGE_DBG_TC_MASK 0x0F
#define HCLGE_DBG_TC_BIT_WIDTH 4
struct hclge_qos_pri_map_cmd *pri_map;
@@ -1152,6 +1155,58 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
return 0;
}
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
@@ -1517,7 +1572,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
char *tcam_buf;
int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
@@ -1585,6 +1640,9 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
u64 cnt;
u8 i;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
pos += scnprintf(buf + pos, len - pos,
"func_id\thit_times\n");
@@ -2374,6 +2432,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.dbg_dump = hclge_dbg_dump_qos_pri_map,
},
{
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index fae79764dc44..6962a9d69cf8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -71,6 +71,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -148,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -679,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -715,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -770,6 +778,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size, p);
p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -1003,6 +1016,27 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
@@ -1101,34 +1135,36 @@ static void hclge_convert_setting_kr(u16 speed_ability,
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1574,7 +1610,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1617,7 +1653,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -2589,7 +2625,7 @@ static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2613,6 +2649,7 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2624,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2730,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2744,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2796,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2988,6 +3181,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3037,7 +3233,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3119,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3302,13 +3499,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -5334,7 +5531,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -6339,7 +6536,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6395,7 +6592,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6431,9 +6628,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6458,6 +6652,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6473,7 +6670,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6497,7 +6694,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6715,7 +6912,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6778,7 +6975,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6878,7 +7075,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7167,6 +7364,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7220,6 +7423,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7282,6 +7488,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7705,7 +7914,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7732,6 +7941,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -10793,7 +11004,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -10804,6 +11015,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -11443,6 +11656,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11510,6 +11727,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -12763,6 +12981,21 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
}
}
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = h->kinfo.tc_map_mode;
+ if (priority)
+ *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ h->kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12786,6 +13019,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_comm_get_rss_key_size,
@@ -12865,6 +13099,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
.clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12872,7 +13107,7 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
@@ -12887,7 +13122,7 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 18caddd541f8..495b639b0dc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -216,6 +216,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -258,6 +259,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -488,6 +490,26 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -826,6 +848,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -1070,7 +1093,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index e1012f7f9b73..a7b06c63143c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -779,17 +779,284 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
- bool is_del = false;
unsigned int flag;
- int ret = 0;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
@@ -814,152 +1081,16 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
trace_hclge_pf_mbx_get(hdev, req);
/* clear the resp_msg before processing every mailbox message */
memset(&resp_msg, 0, sizeof(resp_msg));
-
- switch (req->msg.code) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_GET_RING_VECTOR_MAP:
- ret = hclge_get_vf_ring_vector_map(vport, req,
- &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get VF ring vector map\n",
- ret);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- hclge_set_vf_promisc_mode(vport, req);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- hclge_get_vf_queue_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- hclge_get_vf_queue_depth(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_BASIC_INFO:
- hclge_get_basic_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_push_vf_link_status(vport);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "failed to inform link stat to VF, ret = %d\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_RESET:
- ret = hclge_reset_vf(vport);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, is_del);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- hclge_get_vf_media_type(vport, &resp_msg);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- hclge_get_vf_mac_addr(vport, &resp_msg);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- case HCLGE_MBX_HANDLE_VF_TBL:
- hclge_handle_vf_tbl(vport, req);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg.code);
- break;
- }
-
- /* PF driver should not reply IMP */
- if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
- req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
- resp_msg.status = ret;
- if (time_is_before_jiffies(hdev->last_mbx_scheduled +
- HCLGE_MBX_SCHED_TIMEOUT))
- dev_warn(&hdev->pdev->dev,
- "resp vport%u mbx(%u,%u) late\n",
- req->mbx_src_vfid,
- req->msg.code,
- req->msg.subcode);
-
- hclge_gen_resp_to_vf(vport, req, &resp_msg);
- }
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
-
- /* reinitialize ret after complete the mbx message processing */
- ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03d63b6a9b2b..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 2f33b036a47a..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -248,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -266,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -1275,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1646,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index d943943912f7..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -30,6 +30,9 @@ enum hclge_opcode_type;
#define HCLGE_TM_PF_MAX_PRI_NUM 8
#define HCLGE_TM_PF_MAX_QSET_NUM 8
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -262,4 +265,6 @@ int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
struct hclge_tm_shaper_para *para);
int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 26f87330173e..db6f7cdba958 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2125,7 +2125,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
@@ -3177,7 +3177,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle)
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
- u8 *duplex)
+ u8 *duplex, u32 *lane_num)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3429,7 +3429,7 @@ static struct hnae3_ae_algo ae_algovf = {
.pdev_id_table = ae_algovf_pci_tbl,
};
-static int hclgevf_init(void)
+static int __init hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
@@ -3444,7 +3444,7 @@ static int hclgevf_init(void)
return 0;
}
-static void hclgevf_exit(void)
+static void __exit hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
index e9e00cfa1329..e10f739d8339 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -12,7 +12,6 @@
#define TBL_ID_FUNC_CFG_SM_INST 1
#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
-#define HINIC_FUNCTION_CONFIGURE_TABLE 1
struct hinic_cmd_lt_rd {
u8 status;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 93192f58ac88..f4b680286911 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -55,7 +55,6 @@
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
-#define OBJ_STR_MAX_LEN 32
struct hw2ethtool_link_mode {
enum ethtool_link_mode_bit_indices link_mode_bit;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index a627237f694b..78190e88cd75 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -82,11 +82,6 @@
struct hinic_func_to_io, \
cmdqs)
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
enum completion_format {
COMPLETE_DIRECT = 0,
COMPLETE_SGE = 1,
@@ -509,8 +504,8 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
*
* Return 0 - Success, negative - Failure
**/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
+static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
+ enum hinic_set_arm_qtype q_type, u32 q_id)
{
struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
struct hinic_hwif *hwif = cmdqs->hwif;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 9c413e963a04..ff09cf0ed52b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -177,9 +177,6 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
enum hinic_mod_type mod, u8 cmd,
struct hinic_cmdq_buf *buf_in, u64 *out_param);
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
void __iomem **db_area);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index 7e84e4e33fff..d56e7413ace0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -22,7 +22,6 @@
(HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
(HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 2127a48749a8..94f470556295 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -29,7 +29,6 @@
#include "hinic_hw_io.h"
#include "hinic_hw_dev.h"
-#define IO_STATUS_TIMEOUT 100
#define OUTBOUND_STATE_TIMEOUT 100
#define DB_STATE_TIMEOUT 100
@@ -42,11 +41,6 @@ enum intr_type {
INTR_MSIX_TYPE,
};
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
/**
* parse_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -837,8 +831,8 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
return 0;
}
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
{
u16 out_size = sizeof(*interrupt_info);
struct hinic_pfhwdev *pfhwdev;
@@ -1041,13 +1035,6 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
hinic_free_hwif(hwdev->hwif);
}
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->max_qps;
-}
-
/**
* hinic_hwdev_num_qps - return the number QPs available for use
* @hwdev: the NIC HW device
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 416492e48274..d2d89b0a5ef0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -566,8 +566,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devli
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev);
-
int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
@@ -587,9 +585,6 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
enum hinic_msix_state flag);
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info);
-
int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
struct hinic_msix_config *interrupt_info);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 0428faa68e80..88567305d06e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -58,39 +58,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
}
/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
* hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
* @hwif: the HW interface of a pci function device
* @msix_index: msix_index
@@ -115,8 +82,6 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
* hinic_set_pf_action - set action on pf channel
* @hwif: the HW interface of a pci function device
* @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c06f2253151e..3d588896a367 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -131,10 +131,6 @@
(((u32)(val) & HINIC_MSIX_##member##_MASK) << \
HINIC_MSIX_##member##_SHIFT)
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
@@ -269,11 +265,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
u8 lli_timer_cfg, u8 lli_credit_limit,
u8 resend_timer);
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
enum hinic_msix_state flag);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 5078c0c73863..3f9c31d29215 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -117,7 +117,6 @@ enum hinic_mbox_tx_status {
#define MBOX_WB_STATUS_MASK 0xFF
#define MBOX_WB_ERROR_CODE_MASK 0xFF00
#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
-#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
#define MBOX_WB_STATUS_NOT_FINISHED 0x00
#define MBOX_STATUS_FINISHED(wb) \
@@ -130,11 +129,8 @@ enum hinic_mbox_tx_status {
#define SEQ_ID_START_VAL 0
#define SEQ_ID_MAX_VAL 42
-#define DST_AEQ_IDX_DEFAULT_VAL 0
-#define SRC_AEQ_IDX_DEFAULT_VAL 0
#define NO_DMA_ATTRIBUTE_VAL 0
-#define HINIC_MGMT_RSP_AEQN 0
#define HINIC_MBOX_RSP_AEQN 2
#define HINIC_MBOX_RECV_AEQN 0
@@ -146,7 +142,6 @@ enum hinic_mbox_tx_status {
#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
-#define MBOX_RESPONSE_ERROR 0x1
#define MBOX_MSG_ID_MASK 0xFF
#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
@@ -621,7 +616,7 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
return false;
}
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
u64 mbox_header = *((u64 *)header);
@@ -649,7 +644,7 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
}
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_send_mbox *send_mbox;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
index 46953190d29e..33ac7814d3b3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -150,10 +150,6 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod);
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
-
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
-
int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 336248aa2e48..537a8098bc4e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -472,8 +472,7 @@ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
return atomic_read(&wq->delta) - 1;
}
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
+static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
{
u32 ctrl_size, task_size, bufdesc_size;
@@ -588,18 +587,16 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
- * @prod_idx: pi value
* @sq_wqe: wqe to prepare
* @sges: sges for use by the wqe for send for buf addresses
* @nr_sges: number of sges
**/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
+ struct hinic_sge *sges, int nr_sges)
{
int i;
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
+ sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
sq_prepare_task(&sq_wqe->task);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 0dfa51ad5855..178dcc874370 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -175,9 +175,8 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
u32 l4_len,
u32 offset, u32 ip_ident, u32 mss);
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
+ struct hinic_sge *sges, int nr_sges);
void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
unsigned int cos);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 4daf6bf291ec..e1a1735c00c1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -175,8 +175,6 @@ static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index f4b6d2c1061f..c6bdeed5606e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -261,23 +261,6 @@
#define HINIC_RSS_TYPE_GET(val, member) \
(((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
enum hinic_l3_offload_type {
L3TYPE_UNKNOWN = 0,
IPV6_PKT = 1,
@@ -305,18 +288,10 @@ enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
};
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
enum hinic_l2type {
HINIC_L2TYPE_ETH = 0,
};
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
struct hinic_cmdq_header {
u32 header_info;
u32 saved_data;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index c23ee2ddbce3..e1f54a2f28b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -960,8 +960,6 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @in_size: input size
* @buf_out: output buffer
* @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
**/
static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
@@ -1382,8 +1380,6 @@ err_pci_regions:
return err;
}
-#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
-
static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
{
struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index a866bea65110..d649c6e323c8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -50,7 +50,7 @@
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
**/
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 507dcbae9085..8f7bd6a049bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -41,8 +41,6 @@ struct hinic_rxq {
struct napi_struct napi;
};
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
-
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index df555847afb5..a5f08b969e3f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -24,6 +24,7 @@ MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto,
#define HINIC_VLAN_PRIORITY_SHIFT 13
#define HINIC_ADD_VLAN_IN_MAC 0x8000
#define HINIC_TX_RATE_TABLE_FULL 12
+#define HINIC_MAX_QOS 7
static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
u16 vlan_id, u16 func_id)
@@ -774,7 +775,7 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u16 vlanprio, cur_vlanprio;
sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
@@ -820,7 +821,7 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
cur_trust = nic_io->vf_infos[vf].trust;
/* same request, so just return success */
- if ((setting && cur_trust) || (!setting && !cur_trust))
+ if (setting == cur_trust)
return 0;
err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
@@ -940,7 +941,7 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
/* same request, so just return success */
- if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ if (setting == cur_spoofchk)
return 0;
err = hinic_set_vf_spoofchk(sriov_info->hwdev,
@@ -1131,8 +1132,8 @@ static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
}
-static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
- u16 start_vf_id, u16 end_vf_id)
+static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
{
struct hinic_dev *nic_dev;
u16 func_idx, idx;
@@ -1145,8 +1146,6 @@ static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
HINIC_HW_WQ_PAGE_SIZE);
hinic_clear_vf_infos(nic_dev, idx);
}
-
- return 0;
}
int hinic_vf_func_init(struct hinic_hwdev *hwdev)
@@ -1293,7 +1292,7 @@ int hinic_pci_sriov_disable(struct pci_dev *pdev)
return 0;
}
-int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct hinic_sriov_info *sriov_info;
int err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index ba627a362f9a..d4d4e63d31ea 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -98,8 +98,6 @@ void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
int hinic_pci_sriov_disable(struct pci_dev *dev);
-int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
-
int hinic_vf_func_init(struct hinic_hwdev *hwdev);
void hinic_vf_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 5051cdff2384..e91476c8ff8b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -74,7 +74,7 @@ enum hinic_offload_type {
* hinic_txq_clean_stats - Clean the statistics of specific queue
* @txq: Logical Tx Queue
**/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
}
/**
@@ -530,7 +530,7 @@ netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
@@ -614,7 +614,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
if (err)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index b3c8657774a7..91dc778362f3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -40,8 +40,6 @@ struct hinic_txq {
struct napi_struct napi;
};
-void hinic_txq_clean_stats(struct hinic_txq *txq);
-
void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6cb86032ce46..1db5b6790a41 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -159,8 +159,8 @@ static int ehea_nway_reset(struct net_device *dev)
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 5dc302880f5f..294bdbbeacc3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1546,7 +1546,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
kfree(init_attr);
- netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
+ netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll);
ret = 0;
goto out;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index fbea9f7efe8c..9b08e41ccc29 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2284,8 +2284,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
dev->cell_index, dev->ofdev->dev.of_node);
}
@@ -2979,11 +2979,9 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->ofdev->dev, err,
+ "Can't get valid [local-]mac-address from OF !\n");
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5c6a04d29f5b..3b14dc93f59d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -141,6 +141,13 @@ static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
+static unsigned int ibmveth_real_max_tx_queues(void)
+{
+ unsigned int n_cpu = num_online_cpus();
+
+ return min(n_cpu, IBMVETH_MAX_QUEUES);
+}
+
/* setup the initial settings for a buffer pool */
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
u32 pool_index, u32 pool_size,
@@ -456,6 +463,38 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
+static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+}
+
+static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
+ GFP_KERNEL);
+ if (!adapter->tx_ltb_ptr[idx]) {
+ netdev_err(adapter->netdev,
+ "unable to allocate tx long term buffer\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
+ adapter->tx_ltb_ptr[idx],
+ adapter->tx_ltb_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
+ netdev_err(adapter->netdev,
+ "unable to DMA map tx long term buffer\n");
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -538,6 +577,11 @@ static int ibmveth_open(struct net_device *netdev)
goto out_unmap_buffer_list;
}
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ if (ibmveth_allocate_tx_ltb(adapter, i))
+ goto out_free_tx_ltb;
+ }
+
adapter->rx_queue.index = 0;
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
@@ -595,25 +639,15 @@ static int ibmveth_open(struct net_device *netdev)
rc = -ENOMEM;
- adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
- netdev->mtu + IBMVETH_BUFF_OH,
- &adapter->bounce_buffer_dma, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to alloc bounce buffer\n");
- goto out_free_irq;
- }
-
netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
netdev_dbg(netdev, "open complete\n");
return 0;
-out_free_irq:
- free_irq(netdev->irq, netdev);
out_free_buffer_pools:
while (--i >= 0) {
if (adapter->rx_buff_pool[i].active)
@@ -623,6 +657,12 @@ out_free_buffer_pools:
out_unmap_filter_list:
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
+
+out_free_tx_ltb:
+ while (--i >= 0) {
+ ibmveth_free_tx_ltb(adapter, i);
+ }
+
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -651,7 +691,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
if (!adapter->pool_config)
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -685,9 +725,8 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_free_coherent(&adapter->vdev->dev,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- adapter->bounce_buffer, adapter->bounce_buffer_dma);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ ibmveth_free_tx_ltb(adapter, i);
netdev_dbg(netdev, "close complete\n");
@@ -727,8 +766,8 @@ static void ibmveth_init_link_settings(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
- strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
+ strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strscpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
@@ -953,6 +992,69 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
}
+static void ibmveth_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ channels->max_tx = ibmveth_real_max_tx_queues();
+ channels->tx_count = netdev->real_num_tx_queues;
+
+ channels->max_rx = netdev->real_num_rx_queues;
+ channels->rx_count = netdev->real_num_rx_queues;
+}
+
+static int ibmveth_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int old = netdev->real_num_tx_queues,
+ goal = channels->tx_count;
+ int rc, i;
+
+ /* If ndo_open has not been called yet then don't allocate, just set
+ * desired netdev_queue's and return
+ */
+ if (!(netdev->flags & IFF_UP))
+ return netif_set_real_num_tx_queues(netdev, goal);
+
+ /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated
+ * but we may need to alloc/free the ltb's.
+ */
+ netif_tx_stop_all_queues(netdev);
+
+ /* Allocate any queue that we need */
+ for (i = old; i < goal; i++) {
+ if (adapter->tx_ltb_ptr[i])
+ continue;
+
+ rc = ibmveth_allocate_tx_ltb(adapter, i);
+ if (!rc)
+ continue;
+
+ /* if something goes wrong, free everything we just allocated */
+ netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ break;
+ }
+ rc = netif_set_real_num_tx_queues(netdev, goal);
+ if (rc) {
+ netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ }
+ /* Free any that are no longer needed */
+ for (i = old; i > goal; i--) {
+ if (adapter->tx_ltb_ptr[i - 1])
+ ibmveth_free_tx_ltb(adapter, i - 1);
+ }
+
+ netif_tx_wake_all_queues(netdev);
+
+ return rc;
+}
+
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -961,6 +1063,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ethtool_stats = ibmveth_get_ethtool_stats,
.get_link_ksettings = ibmveth_get_link_ksettings,
.set_link_ksettings = ibmveth_set_link_ksettings,
+ .get_channels = ibmveth_get_channels,
+ .set_channels = ibmveth_set_channels
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -969,7 +1073,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs, unsigned long mss)
+ unsigned long desc, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -982,12 +1086,9 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
retry_count = 1024;
correlator = 0;
do {
- ret = h_send_logical_lan(adapter->vdev->unit_address,
- descs[0].desc, descs[1].desc,
- descs[2].desc, descs[3].desc,
- descs[4].desc, descs[5].desc,
- correlator, &correlator, mss,
- adapter->fw_large_send_support);
+ ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -1020,34 +1121,13 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- unsigned int desc_flags;
- union ibmveth_buf_desc descs[6];
- int last, i;
- int force_bounce = 0;
- dma_addr_t dma_addr;
+ unsigned int desc_flags, total_bytes;
+ union ibmveth_buf_desc desc;
+ int i, queue_num = skb_get_queue_mapping(skb);
unsigned long mss = 0;
if (ibmveth_is_packet_unsupported(skb, netdev))
goto out;
-
- /* veth doesn't handle frag_list, so linearize the skb.
- * When GRO is enabled SKB's can have frag_list.
- */
- if (adapter->is_active_trunk &&
- skb_has_frag_list(skb) && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
- /*
- * veth handles a maximum of 6 segments including the header, so
- * we have to linearize the skb if there are more than this.
- */
- if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
/* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
((skb->protocol == htons(ETH_P_IP) &&
@@ -1077,56 +1157,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags |= IBMVETH_BUF_LRG_SND;
}
-retry_bounce:
- memset(descs, 0, sizeof(descs));
-
- /*
- * If a linear packet is below the rx threshold then
- * copy it into the static bounce buffer. This avoids the
- * cost of a TCE insert and remove.
- */
- if (force_bounce || (!skb_is_nonlinear(skb) &&
- (skb->len < tx_copybreak))) {
- skb_copy_from_linear_data(skb, adapter->bounce_buffer,
- skb->len);
-
- descs[0].fields.flags_len = desc_flags | skb->len;
- descs[0].fields.address = adapter->bounce_buffer_dma;
-
- if (ibmveth_send(adapter, descs, 0)) {
- adapter->tx_send_failed++;
- netdev->stats.tx_dropped++;
- } else {
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- }
-
- goto out;
- }
-
- /* Map the header */
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed;
-
- descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
- descs[0].fields.address = dma_addr;
-
- /* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed_frags;
-
- descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
- descs[i+1].fields.address = dma_addr;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
@@ -1143,7 +1173,36 @@ retry_bounce:
}
}
- if (ibmveth_send(adapter, descs, mss)) {
+ /* Copy header into mapped buffer */
+ if (unlikely(skb->len > adapter->tx_ltb_size)) {
+ netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
+ skb->len, adapter->tx_ltb_size);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
+ total_bytes = skb_headlen(skb);
+ /* Copy frags into mapped buffers */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
+ skb_frag_address_safe(frag), skb_frag_size(frag));
+ total_bytes += skb_frag_size(frag);
+ }
+
+ if (unlikely(total_bytes != skb->len)) {
+ netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
+ skb->len, total_bytes);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ desc.fields.flags_len = desc_flags | skb->len;
+ desc.fields.address = adapter->tx_ltb_dma[queue_num];
+ /* finish writing to long_term_buff before VIOS accessing it */
+ dma_wmb();
+
+ if (ibmveth_send(adapter, desc.desc, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1151,41 +1210,11 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
out:
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
-map_failed_frags:
- last = i+1;
- for (i = 1; i < last; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-map_failed:
- if (!firmware_has_feature(FW_FEATURE_CMO))
- netdev_err(netdev, "tx: unable to map xmit buffer\n");
- adapter->tx_map_failed++;
- if (skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
- force_bounce = 1;
- goto retry_bounce;
}
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
@@ -1568,6 +1597,8 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
+ /* add size of mapped tx buffers */
+ ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1660,8 +1691,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
-
+ netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
if (!netdev)
return -ENOMEM;
@@ -1727,6 +1757,17 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
+ rc = netif_set_real_num_tx_queues(netdev, ibmveth_real_max_tx_queues());
+ if (rc) {
+ netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
+ rc);
+ free_netdev(netdev);
+ return rc;
+ }
+ adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
+ adapter->tx_ltb_ptr[i] = NULL;
+
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
netdev_dbg(netdev, "registering netdev...\n");
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 27dfff200166..daf6f615c03f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -46,23 +46,23 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+/* FW allows us to send 6 descriptors but we only use one so mark
+ * the other 5 as unused (0)
+ */
static inline long h_send_logical_lan(unsigned long unit_address,
- unsigned long desc1, unsigned long desc2, unsigned long desc3,
- unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out,
- unsigned long mss, unsigned long large_send_support)
+ unsigned long desc, unsigned long corellator_in,
+ unsigned long *corellator_out, unsigned long mss,
+ unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
if (large_send_support)
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in, mss);
+ desc, 0, 0, 0, 0, 0, corellator_in, mss);
else
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in);
+ desc, 0, 0, 0, 0, 0, corellator_in);
*corellator_out = retbuf[0];
@@ -98,6 +98,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
+#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
+#define IBMVETH_MAX_QUEUES 16U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -137,6 +139,9 @@ struct ibmveth_adapter {
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
@@ -145,8 +150,6 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
- void *bounce_buffer;
- dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ab7c0f81e9a..65dbfbec487a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1262,7 +1262,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
netif_napi_add(adapter->netdev, &adapter->napi[i],
- ibmvnic_poll, NAPI_POLL_WEIGHT);
+ ibmvnic_poll);
}
adapter->num_active_rx_napi = adapter->req_rx_queues;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 11a884aa5082..560d1d442232 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2431,8 +2431,8 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 32803b0cf1e8..d06d29c6c037 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -531,10 +531,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000_driver_name,
+ strscpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 23299fc56199..61e60e4de600 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1012,7 +1012,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, e1000_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index b80ae9a82224..51a5afe9df2f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -639,7 +639,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
@@ -650,7 +650,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 321f2a95ae3a..49e926959ad3 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7267,7 +7267,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
+ strscpy((char *)pba_str, "Unknown", sizeof(pba_str));
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -7479,8 +7479,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -7676,7 +7676,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
- strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index fd07c3679bb1..060b263348ce 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2697,9 +2697,14 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg &= ~BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
}
@@ -2715,9 +2720,14 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg |= BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
@@ -3037,7 +3047,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, MII_BMCR, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
+ if (ret_val) {
+ e_dbg("Error reading PHY register\n");
+ return ret_val;
+ }
if (data & BMCR_LOOPBACK)
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3362f26d7f99..4a6630586ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1595,8 +1595,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(interface->netdev, &q_vector->napi,
- fm10k_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll);
/* tie q_vector and interface together */
interface->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d86b6d349ea9..9a60d6b207f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,20 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
struct i40e_flex_pit {
struct list_head list;
u16 src_offset;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index ea2bb0140a6e..10d7a982a5b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 2819e261a126..4f01e2a6b6bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -4974,6 +4975,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -5012,6 +5014,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 2610338002fe..d9c51a238dcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -24,8 +24,10 @@
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
#define I40E_IS_X710TL_DEVICE(d) \
- (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 156e92c43780..7e75706f76db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2001,10 +2001,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+ strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
@@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b36bf9c3e1e4..2c07fa8ecfc8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
@@ -384,7 +385,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -3876,7 +3879,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
- /* Linked list for the queuepairs assigned to this vector */
+ /* begin of linked list for RX queue assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
@@ -3892,6 +3895,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) {
+ /* TX queue with next queue set to TX */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3901,7 +3905,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
-
+ /* TX queue with next RX or end of linked list */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3970,7 +3974,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- u32 val;
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
@@ -3987,28 +3990,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_RQCTL(0), val);
+ /* Associate the queue pair to the vector and enable the queue
+ * interrupt RX queue in linked list with next queue set to TX
+ */
+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
if (i40e_enabled_xdp_vsi(vsi)) {
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ /* TX queue in linked list with next queue set to TX */
+ wr32(hw, I40E_QINT_TQCTL(nextqp),
+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
}
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(0), val);
+ /* last TX queue so the next RX queue doesn't matter */
+ wr32(hw, I40E_QINT_TQCTL(0),
+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
i40e_flush(hw);
}
@@ -5907,6 +5902,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5928,10 +5943,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -6657,6 +6672,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
vsi->tc_seid_map[i] = ch->seid;
}
}
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
return ret;
err_free:
@@ -8219,9 +8237,9 @@ config_tc:
if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -10699,7 +10717,7 @@ static void i40e_send_version(struct i40e_pf *pf)
dv.minor_version = 0xff;
dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
+ strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
@@ -10966,10 +10984,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
@@ -11923,8 +11941,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@@ -16047,23 +16064,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strlcpy(width, "8", PCI_WIDTH_SIZE); break;
+ strscpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strlcpy(width, "4", PCI_WIDTH_SIZE); break;
+ strscpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strlcpy(width, "2", PCI_WIDTH_SIZE); break;
+ strscpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strlcpy(width, "1", PCI_WIDTH_SIZE); break;
+ strscpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 2d3533f38d7b..ffea0c9c82f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1390,7 +1390,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strlcpy(pf->ptp_caps.name, i40e_driver_name,
+ strscpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f6ba97a0166e..69e67eb6aea7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* set the tx_flags to indicate the IP protocol type. this is
* required so that checksum header computation below is accurate.
@@ -3686,7 +3688,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
u8 prio;
/* is DCB enabled at all? */
- if (vsi->tc_config.numtc == 1)
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4f184c50f6e8..7e9f6a69eb10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index e535d4c3da49..a056e1545615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -581,9 +581,9 @@ static void iavf_get_drvinfo(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strscpy(drvinfo->driver, iavf_driver_name, 32);
+ strscpy(drvinfo->fw_version, "N/A", 4);
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 45d097a164ad..3fc572341781 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
int ret;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (handle_mac)
- goto done;
-
- ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+ iavf_is_mac_set_handled(netdev, addr->sa_data),
+ msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted.
* If ret == 0 then it means we got a timeout.
@@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (!ret)
return -EAGAIN;
-done:
if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
return -EACCES;
@@ -1270,66 +1267,138 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
}
/**
- * iavf_down - Shutdown the connection processing
+ * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
+ * yet and mark other to be removed.
* @adapter: board private structure
- *
- * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
-void iavf_down(struct iavf_adapter *adapter)
+static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct iavf_vlan_filter *vlf;
- struct iavf_cloud_filter *cf;
- struct iavf_fdir_fltr *fdir;
- struct iavf_mac_filter *f;
- struct iavf_adv_rss *rss;
-
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- adapter->link_up = false;
- iavf_napi_disable_all(adapter);
- iavf_irq_disable(adapter);
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
/* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */
- list_for_each_entry(f, &adapter->mac_filter_list, list) {
- f->remove = true;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
+ list) {
+ if (f->add) {
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->remove = true;
+ }
}
/* remove all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->remove = true;
+ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+ list) {
+ if (vlf->add) {
+ list_del(&vlf->list);
+ kfree(vlf);
+ } else {
+ vlf->remove = true;
+ }
}
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
+ * mark other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */
spin_lock_bh(&adapter->cloud_filter_list_lock);
- list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
- cf->del = true;
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->add) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ } else {
+ cf->del = true;
+ }
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
+ * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
+{
+ struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */
spin_lock_bh(&adapter->adv_rss_lock);
- list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
- rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ list_del(&rss->list);
+ kfree(rss);
+ } else {
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ }
+ }
spin_unlock_bh(&adapter->adv_rss_lock);
+}
+
+/**
+ * iavf_down - Shutdown the connection processing
+ * @adapter: board private structure
+ *
+ * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
+ **/
+void iavf_down(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->state <= __IAVF_DOWN_PENDING)
+ return;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ adapter->link_up = false;
+ iavf_napi_disable_all(adapter);
+ iavf_irq_disable(adapter);
+
+ iavf_clear_mac_vlan_filters(adapter);
+ iavf_clear_cloud_filters(adapter);
+ iavf_clear_fdir_filters(adapter);
+ iavf_clear_adv_rss_conf(adapter);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
@@ -1338,11 +1407,16 @@ void iavf_down(struct iavf_adapter *adapter)
* here for this to complete. The watchdog is still running
* and it will take care of this.
*/
- adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ if (!list_empty(&adapter->mac_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!list_empty(&adapter->vlan_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!list_empty(&adapter->cloud_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ if (!list_empty(&adapter->fdir_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ if (!list_empty(&adapter->adv_rss_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1757,7 +1831,7 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll, NAPI_POLL_WEIGHT);
+ iavf_napi_poll);
}
return 0;
@@ -2367,7 +2441,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
+ goto err;
} else if (err == -EINVAL) {
/* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -2877,6 +2951,11 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
+ /* Detach interface to avoid subsequent NDO callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
@@ -2884,7 +2963,7 @@ static void iavf_reset_task(struct work_struct *work)
if (adapter->state != __IAVF_REMOVE)
queue_work(iavf_wq, &adapter->reset_task);
- return;
+ goto reset_finish;
}
while (!mutex_trylock(&adapter->client_lock))
@@ -2954,7 +3033,6 @@ continue_reset:
if (running) {
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@@ -3084,14 +3162,21 @@ continue_reset:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- return;
+ goto reset_finish;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
- iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
+reset_finish:
+ rtnl_lock();
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
/**
@@ -4085,8 +4170,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
@@ -4161,6 +4255,7 @@ err_unlock:
static int iavf_close(struct net_device *netdev)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ u64 aq_to_restore;
int status;
mutex_lock(&adapter->crit_lock);
@@ -4173,6 +4268,29 @@ static int iavf_close(struct net_device *netdev)
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
+ /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
+ * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
+ * deadlock with adminq_task() until iavf_close timeouts. We must send
+ * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
+ * disable queues possible for vf. Give only necessary flags to
+ * iavf_down and save other to set them right before iavf_close()
+ * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
+ * iavf will be in DOWN state.
+ */
+ aq_to_restore = adapter->aq_required;
+ adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
+
+ /* Remove flags which we do not want to send after close or we want to
+ * send before disable queues.
+ */
+ aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
+ IAVF_FLAG_AQ_ENABLE_QUEUES |
+ IAVF_FLAG_AQ_CONFIGURE_QUEUES |
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER |
+ IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
+ IAVF_FLAG_AQ_ADD_FDIR_FILTER |
+ IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter);
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
@@ -4196,6 +4314,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+
+ mutex_lock(&adapter->crit_lock);
+ adapter->aq_required |= aq_to_restore;
+ mutex_unlock(&adapter->crit_lock);
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 06d18797d25a..18b6a702a1d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15ee85dc33bd..5a9e6563923e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cc5b85afd437..001500afc4a6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->alloc_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
@@ -854,6 +864,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
+int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 9939238573a4..1bdc70aa979d 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1423,6 +1423,56 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX 16
+
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+
+ u8 pending_port_option_status;
+#define ICE_AQC_PENDING_PORT_OPT_IDX_M GENMASK(3, 0)
+#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7)
+
+ u8 rsvd[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M GENMASK(3, 0)
+
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+
+ u8 global_scid[2];
+ u8 phy_scid[2];
+ u8 pf2port_cid[2];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
struct ice_aqc_gpio {
__le16 gpio_ctrl_handle;
@@ -1489,6 +1539,12 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@@ -2082,6 +2138,8 @@ struct ice_aq_desc {
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
@@ -2243,6 +2301,8 @@ enum ice_adminq_opc {
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 136d7911adb4..9e36f01dfa4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -7,18 +7,6 @@
#include "ice_dcb_lib.h"
#include "ice_sriov.h"
-static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
-{
- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
- return !!rx_ring->xdp_buf;
-}
-
-static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
-{
- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- return !!rx_ring->rx_buf;
-}
-
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -142,8 +130,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
out:
/* tie q_vector and VSI together */
@@ -417,7 +404,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Strip the Ethernet CRC bytes before the packet is posted to host
* memory.
*/
- rlan_ctx.crcstrip = 1;
+ rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
* and it needs to remain 1 for non-DVM capable configurations to not
@@ -519,11 +506,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
- kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- if (!ice_alloc_rx_buf_zc(ring))
- return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -538,8 +522,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!ice_alloc_rx_buf(ring))
- return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 27d0cbbd29da..039342a0ed15 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -8,6 +8,108 @@
#define ICE_PF_RESET_WAIT_COUNT 300
+static const char * const ice_link_mode_str_low[] = {
+ [0] = "100BASE_TX",
+ [1] = "100M_SGMII",
+ [2] = "1000BASE_T",
+ [3] = "1000BASE_SX",
+ [4] = "1000BASE_LX",
+ [5] = "1000BASE_KX",
+ [6] = "1G_SGMII",
+ [7] = "2500BASE_T",
+ [8] = "2500BASE_X",
+ [9] = "2500BASE_KX",
+ [10] = "5GBASE_T",
+ [11] = "5GBASE_KR",
+ [12] = "10GBASE_T",
+ [13] = "10G_SFI_DA",
+ [14] = "10GBASE_SR",
+ [15] = "10GBASE_LR",
+ [16] = "10GBASE_KR_CR1",
+ [17] = "10G_SFI_AOC_ACC",
+ [18] = "10G_SFI_C2C",
+ [19] = "25GBASE_T",
+ [20] = "25GBASE_CR",
+ [21] = "25GBASE_CR_S",
+ [22] = "25GBASE_CR1",
+ [23] = "25GBASE_SR",
+ [24] = "25GBASE_LR",
+ [25] = "25GBASE_KR",
+ [26] = "25GBASE_KR_S",
+ [27] = "25GBASE_KR1",
+ [28] = "25G_AUI_AOC_ACC",
+ [29] = "25G_AUI_C2C",
+ [30] = "40GBASE_CR4",
+ [31] = "40GBASE_SR4",
+ [32] = "40GBASE_LR4",
+ [33] = "40GBASE_KR4",
+ [34] = "40G_XLAUI_AOC_ACC",
+ [35] = "40G_XLAUI",
+ [36] = "50GBASE_CR2",
+ [37] = "50GBASE_SR2",
+ [38] = "50GBASE_LR2",
+ [39] = "50GBASE_KR2",
+ [40] = "50G_LAUI2_AOC_ACC",
+ [41] = "50G_LAUI2",
+ [42] = "50G_AUI2_AOC_ACC",
+ [43] = "50G_AUI2",
+ [44] = "50GBASE_CP",
+ [45] = "50GBASE_SR",
+ [46] = "50GBASE_FR",
+ [47] = "50GBASE_LR",
+ [48] = "50GBASE_KR_PAM4",
+ [49] = "50G_AUI1_AOC_ACC",
+ [50] = "50G_AUI1",
+ [51] = "100GBASE_CR4",
+ [52] = "100GBASE_SR4",
+ [53] = "100GBASE_LR4",
+ [54] = "100GBASE_KR4",
+ [55] = "100G_CAUI4_AOC_ACC",
+ [56] = "100G_CAUI4",
+ [57] = "100G_AUI4_AOC_ACC",
+ [58] = "100G_AUI4",
+ [59] = "100GBASE_CR_PAM4",
+ [60] = "100GBASE_KR_PAM4",
+ [61] = "100GBASE_CP2",
+ [62] = "100GBASE_SR2",
+ [63] = "100GBASE_DR",
+};
+
+static const char * const ice_link_mode_str_high[] = {
+ [0] = "100GBASE_KR2_PAM4",
+ [1] = "100G_CAUI2_AOC_ACC",
+ [2] = "100G_CAUI2",
+ [3] = "100G_AUI2_AOC_ACC",
+ [4] = "100G_AUI2",
+};
+
+/**
+ * ice_dump_phy_type - helper function to dump phy_type
+ * @hw: pointer to the HW structure
+ * @low: 64 bit value for phy_type_low
+ * @high: 64 bit value for phy_type_high
+ * @prefix: prefix string to differentiate multiple dumps
+ */
+static void
+ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
+{
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
+ if (low & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_low[i]);
+ }
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
+ if (high & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_high[i]);
+ }
+}
+
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -80,9 +182,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T6:
+ case ICE_SUBDEV_ID_E810T7:
+ return true;
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T5:
return true;
+ }
break;
default:
break;
@@ -183,6 +299,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
+ const char *prefix;
struct ice_hw *hw;
int status;
@@ -204,29 +321,48 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
- ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
- report_mode);
- ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
- ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
+
+ switch (report_mode) {
+ case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
+ prefix = "phy_caps_media";
+ break;
+ case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
+ prefix = "phy_caps_no_media";
+ break;
+ case ICE_AQC_REPORT_ACTIVE_CFG:
+ prefix = "phy_caps_active";
+ break;
+ case ICE_AQC_REPORT_DFLT_CFG:
+ prefix = "phy_caps_default";
+ break;
+ default:
+ prefix = "phy_caps_invalid";
+ }
+
+ ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
+ le64_to_cpu(pcaps->phy_type_high), prefix);
+
+ ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
+ prefix, report_mode);
+ ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
pcaps->low_power_ctrl_an);
- ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
+ pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
pcaps->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
pcaps->link_fec_options);
- ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
- pcaps->module_compliance_enforcement);
- ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
- pcaps->extended_compliance_code);
- ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
+ prefix, pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
+ prefix, pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
pcaps->module_type[0]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
pcaps->module_type[1]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
@@ -2397,6 +2533,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+ info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2414,6 +2552,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
+ info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2776,6 +2916,26 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
}
/**
+ * ice_is_100m_speed_supported
+ * @hw: pointer to the HW struct
+ *
+ * returns true if 100M speeds are supported by the device,
+ * false otherwise.
+ */
+bool ice_is_100m_speed_supported(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
@@ -3535,6 +3695,121 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_get_port_options
+ * @hw: pointer to the HW struct
+ * @options: buffer for the resultant port options
+ * @option_count: input - size of the buffer in port options structures,
+ * output - number of returned port options
+ * @lport: logical port to call the command with (optional)
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @active_option_idx: index of active port option in returned buffer
+ * @active_option_valid: active option in returned buffer is valid
+ * @pending_option_idx: index of pending port option in returned buffer
+ * @pending_option_valid: pending option in returned buffer is valid
+ *
+ * Calls Get Port Options AQC (0x06ea) and verifies result.
+ */
+int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid)
+{
+ struct ice_aqc_get_port_options *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 i;
+
+ /* options buffer shall be able to hold max returned options */
+ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.get_port_options;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+ cmd->lport_num_valid = lport_valid;
+
+ status = ice_aq_send_cmd(hw, &desc, options,
+ *option_count * sizeof(*options), NULL);
+ if (status)
+ return status;
+
+ /* verify direct FW response & set output parameters */
+ *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
+ cmd->port_options_count);
+ ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
+ *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
+ cmd->port_options);
+ if (*active_option_valid) {
+ *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
+ cmd->port_options);
+ if (*active_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
+ *active_option_idx);
+ }
+
+ *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
+ cmd->pending_port_option_status);
+ if (*pending_option_valid) {
+ *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
+ cmd->pending_port_option_status);
+ if (*pending_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
+ *pending_option_idx);
+ }
+
+ /* mask output options fields */
+ for (i = 0; i < *option_count; i++) {
+ options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
+ options[i].pmd);
+ options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
+ options[i].max_lane_speed);
+ ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
+ options[i].pmd, options[i].max_lane_speed);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_port_option
+ * @hw: pointer to the HW struct
+ * @lport: logical port to call the command with
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @new_option: new port option to be written
+ *
+ * Calls Set Port Options AQC (0x06eb).
+ */
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option)
+{
+ struct ice_aqc_set_port_option *cmd;
+ struct ice_aq_desc desc;
+
+ if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.set_port_option;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+
+ cmd->lport_num_valid = lport_valid;
+ cmd->selected_port_option = new_option;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -5029,20 +5304,22 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
}
/**
- * ice_fw_supports_link_override
+ * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
*
- * Checks if the firmware supports link override
+ * Checks if the firmware API is minimum version
*/
-bool ice_fw_supports_link_override(struct ice_hw *hw)
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ } else if (hw->api_maj_ver > maj) {
return true;
}
@@ -5050,6 +5327,19 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
}
/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
+}
+
+/**
* ice_get_link_default_override
* @ldo: pointer to the link default override struct
* @pi: pointer to the port info struct
@@ -5179,16 +5469,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
if (hw->mac_type != ICE_MAC_E810)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -5225,14 +5508,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 61b7c60db689..8b6712b92e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -151,6 +151,15 @@ int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid);
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option);
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -204,6 +213,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
+bool ice_is_100m_speed_supported(struct ice_hw *hw);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index b41bc3dc1745..6d560d1c74a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -24,6 +24,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 3337314a7b35..e6ec20079ced 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -9,6 +9,8 @@
#include "ice_eswitch.h"
#include "ice_fw_update.h"
+static int ice_active_port_option = -1;
+
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -466,12 +468,259 @@ ice_devlink_reload_empr_finish(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
.reload_down = ice_devlink_reload_empr_start,
.reload_up = ice_devlink_reload_empr_finish,
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
@@ -695,6 +944,39 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
}
/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
*
@@ -722,6 +1004,12 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func;
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index e35371e61e07..f9f15acae90a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -292,8 +292,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (max_vsi_num < vsi->vsi_num)
max_vsi_num = vsi->vsi_num;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
+ ice_napi_poll);
netif_keep_dst(vf->repr->netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a6fff8ebaf9d..b7be84bbe72d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -136,6 +136,11 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
+ ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
+ ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
+ ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
+ ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
+ ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
};
static const u32 ice_regs_dump_list[] = {
@@ -1284,10 +1289,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- ice_down(vsi);
- ice_up(vsi);
- }
+ ice_down_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
@@ -1468,20 +1470,22 @@ ice_get_ethtool_stats(struct net_device *netdev,
/**
* ice_mask_min_supported_speeds
+ * @hw: pointer to the HW structure
* @phy_types_high: PHY type high
* @phy_types_low: PHY type low to apply minimum supported speeds mask
*
* Apply minimum supported speeds mask to PHY type low. These are the speeds
* for ethtool supported link mode.
*/
-static
-void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+static void
+ice_mask_min_supported_speeds(struct ice_hw *hw,
+ u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
- else
+ else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
}
@@ -1531,7 +1535,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
- ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+ ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
+ &phy_types_low);
/* determine advertised modes based on link override only
* if it's supported and if the FW doesn't abstract the
* driver from having to account for link overrides
@@ -2826,6 +2831,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i--)
@@ -2884,6 +2890,7 @@ process_rx:
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
+ rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 85a94483c2ed..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index c9f7393b783d..ee5b36941ba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -61,13 +61,13 @@ static void ice_lag_set_backup(struct ice_lag *lag)
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *master;
+ const char *name, *peer, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
- master = lag->master ? "TRUE" : "FALSE";
+ primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
switch (lag->role) {
@@ -87,8 +87,8 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
- bonded, peer, upper, role, master);
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
+ bonded, peer, upper, role, primary);
}
/**
@@ -119,7 +119,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
- netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
goto lag_out;
}
@@ -164,8 +164,8 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
- /* if this is the first element in an LAG mark as master */
- lag->master = !!(peers == 1);
+ /* if this is the first element in an LAG mark as primary */
+ lag->primary = !!(peers == 1);
}
/**
@@ -264,7 +264,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
return;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index c2e3688dd8fd..51b5cf467ce2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -24,7 +24,7 @@ struct ice_lag {
struct net_device *upper_netdev; /* upper bonding netdev */
struct notifier_block notif_block;
u8 bonded:1; /* currently bonded */
- u8 master:1; /* this is a master */
+ u8 primary:1; /* this is primary */
u8 handler:1; /* did we register a rx_netdev_handler */
/* each thing blocking bonding will increment this value by one.
* If this value is zero, then bonding is allowed.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a830f7f9aed0..938ba8c215cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
*/
static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
@@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* at least 1)
*/
if (offset)
- vsi->num_rxq = offset;
+ rx_count = offset;
else
- vsi->num_rxq = num_rxq_per_tc;
+ rx_count = num_rxq_per_tc;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ if (rx_count > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ rx_count, vsi->alloc_rxq);
return -EINVAL;
}
- vsi->num_txq = tx_count;
- if (vsi->num_txq > vsi->alloc_txq) {
+ if (tx_count > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ tx_count, vsi->alloc_txq);
return -EINVAL;
}
+ vsi->num_txq = tx_count;
+ vsi->num_rxq = rx_count;
+
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
@@ -1522,6 +1524,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
+ ring->cached_phctime = pf->ptp.cached_phc_time;
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1562,6 +1565,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
+ * @vsi: VSI to be configured
+ * @disable: set to true to have FCS / CRC in the frame data
+ */
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i)
+ if (disable)
+ vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+}
+
+/**
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
@@ -1986,8 +2005,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
return ret;
}
@@ -2969,9 +2988,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- if (vsi->type == ICE_VSI_PF)
- ice_devlink_destroy_pf_port(pf);
-
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -3029,6 +3045,9 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
@@ -3181,7 +3200,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
@@ -3276,6 +3295,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
*/
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
+
+ /* disable or enable CRC stripping */
+ if (vsi->netdev)
+ ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
+ NETIF_F_RXFCS));
+
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3490,6 +3515,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u16 new_txq, new_rxq;
u8 netdev_tc = 0;
int i;
@@ -3530,21 +3556,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
}
}
- /* Set actual Tx/Rx queue pairs */
- vsi->num_txq = offset + qcount_tx;
- if (vsi->num_txq > vsi->alloc_txq) {
+ new_txq = offset + qcount_tx;
+ if (new_txq > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ new_txq, vsi->alloc_txq);
return -EINVAL;
}
- vsi->num_rxq = offset + qcount_rx;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ new_rxq = offset + qcount_rx;
+ if (new_rxq > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ new_rxq, vsi->alloc_rxq);
return -EINVAL;
}
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = new_txq;
+ vsi->num_rxq = new_rxq;
+
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
@@ -3576,6 +3605,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
+ struct ice_tc_cfg old_tc_cfg;
struct ice_vsi_ctx *ctx;
struct device *dev;
int i, ret = 0;
@@ -3600,6 +3630,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs[i] = vsi->num_txq;
}
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
@@ -3616,8 +3647,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
else
ret = ice_vsi_setup_q_map(vsi, ctx);
- if (ret)
+ if (ret) {
+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
goto out;
+ }
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -4062,7 +4095,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
if (err && err != -EEXIST)
return err;
- return 0;
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 8712b1d2ceec..ec4bf0c89857 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -89,6 +89,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
+
void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index eb40526ee179..0f6718719453 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- return status;
+ return 0;
}
/**
@@ -2397,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
- ice_unplug_aux_dev(pf);
-
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2579,7 +2579,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
@@ -2587,13 +2586,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
@@ -2683,6 +2675,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -2887,10 +2896,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ /* reallocate Rx queues that are used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
@@ -3076,7 +3093,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- ice_ptp_process_ts(pf);
+ if (!hw->reset_ongoing)
+ ret = IRQ_WAKE_THREAD;
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3111,7 +3129,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ice_service_task_schedule(pf);
}
}
- ret = IRQ_HANDLED;
+ if (!ret)
+ ret = IRQ_HANDLED;
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -3120,6 +3139,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
+ * ice_misc_intr_thread_fn - misc interrupt thread function
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+{
+ irqreturn_t ret = IRQ_HANDLED;
+ struct ice_pf *pf = data;
+ bool irq_handled;
+
+ irq_handled = ice_ptp_process_ts(pf);
+ if (!irq_handled)
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @hw: pointer to HW structure
*/
@@ -3231,10 +3268,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
- ice_misc_intr, 0, pf->int_name, pf);
+ err = devm_request_threaded_irq(dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, ice_misc_intr_thread_fn,
+ 0, pf->int_name, pf);
if (err) {
- dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -3271,7 +3310,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll, NAPI_POLL_WEIGHT);
+ ice_napi_poll);
}
/**
@@ -3374,6 +3413,11 @@ static void ice_set_netdev_features(struct net_device *netdev)
if (is_dvm_ena)
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_TX;
+
+ /* Leave CRC / FCS stripping enabled by default, but allow the value to
+ * be changed at runtime
+ */
+ netdev->hw_features |= NETIF_F_RXFCS;
}
/**
@@ -3573,6 +3617,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
@@ -3886,7 +3938,7 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
@@ -3898,87 +3950,134 @@ static int ice_init_pf(struct ice_pf *pf)
}
/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
+/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
- * compute the number of MSIX vectors required (v_budget) and request from
- * the OS. Return the number of vectors reserved or negative on failure
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int num_cpus, v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
struct device *dev = ice_pf_to_dev(pf);
- int needed, err, i;
+ int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */
- needed = ICE_MIN_LAN_OICR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
- /* reserve for flow director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
- needed = ICE_FDIR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
- }
-
- /* reserve for switchdev */
- needed = ICE_ESWITCH_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
-
- /* total used for non-traffic vectors */
- v_other = v_budget;
-
- /* reserve vectors for LAN traffic */
- needed = num_cpus;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
-
- /* reserve vectors for RDMA auxiliary driver */
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
+
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
+
+ v_wanted = v_other;
+
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
+
+ /* RDMA auxiliary driver */
if (ice_is_rdma_ena(pf)) {
- needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_rdma_msix = needed;
- v_budget += needed;
- v_left -= needed;
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
+ }
+
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
}
- pf->msix_entries = devm_kcalloc(dev, v_budget,
+ pf->msix_entries = devm_kcalloc(dev, v_wanted,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
- for (i = 0; i < v_budget; i++)
+ for (i = 0; i < v_wanted; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
- ICE_MIN_MSIX, v_budget);
+ ICE_MIN_MSIX, v_wanted);
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
- if (v_actual < v_budget) {
+ if (v_actual < v_wanted) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_budget, v_actual);
+ v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
@@ -3987,38 +4086,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
goto msix_err;
} else {
int v_remain = v_actual - v_other;
- int v_rdma = 0, v_min_rdma = 0;
- if (ice_is_rdma_ena(pf)) {
- /* Need at least 1 interrupt in addition to
- * AEQ MSIX
- */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
- v_min_rdma = ICE_MIN_RDMA_MSIX;
- }
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX ||
- v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
- dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining
- * vectors to LAN MSIX
- */
- pf->num_rdma_msix = v_min_rdma;
- pf->num_lan_msix = v_remain - v_min_rdma;
- } else {
- /* Split remaining MSIX with RDMA after
- * accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
+ ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
@@ -4033,12 +4105,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
msix_err:
devm_kfree(dev, pf->msix_entries);
- goto exit_err;
-no_hw_vecs_left_err:
- dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
- needed, v_left);
- err = -ERANGE;
exit_err:
pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
@@ -4532,6 +4599,10 @@ static int ice_register_netdev(struct ice_pf *pf)
if (!vsi || !vsi->netdev)
return -EIO;
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create;
+
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
@@ -4539,17 +4610,13 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
-err_devlink_create:
- unregister_netdev(vsi->netdev);
- clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4657,8 +4724,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_set_safe_mode_caps(hw);
}
- hw->ucast_shared = true;
-
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -5717,6 +5782,9 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
NETIF_F_HW_VLAN_STAG_RX | \
NETIF_F_HW_VLAN_STAG_TX)
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_STAG_FILTER)
@@ -5803,6 +5871,14 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
NETIF_F_HW_VLAN_STAG_TX);
}
+ if (!(netdev->features & NETIF_F_RXFCS) &&
+ (features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) &&
+ !ice_vsi_has_non_zero_vlans(np->vsi)) {
+ netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ }
+
return features;
}
@@ -5896,6 +5972,13 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
if (current_vlan_features ^ requested_vlan_features) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
+ return -EIO;
+ }
+
err = ice_set_vlan_offload_features(vsi, features);
if (err)
return err;
@@ -5977,6 +6060,23 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (ret)
return ret;
+ /* Turn on receive of FCS aka CRC, and after setting this
+ * flag the packet data will have the 4 byte CRC appended
+ */
+ if (changed & NETIF_F_RXFCS) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
+ return -EIO;
+ }
+
+ ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
+ ret = ice_down_up(vsi);
+ if (ret)
+ return ret;
+ }
+
if (changed & NETIF_F_NTUPLE) {
bool ena = !!(features & NETIF_F_NTUPLE);
@@ -6624,7 +6724,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
+ int i, tx_err, rx_err, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
@@ -6658,20 +6758,13 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- link_err = ice_force_phys_link_state(vsi, false);
- if (link_err)
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
- }
-
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err || vlan_err) {
+ if (tx_err || rx_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6681,6 +6774,31 @@ int ice_down(struct ice_vsi *vsi)
}
/**
+ * ice_down_up - shutdown the VSI connection and bring it up
+ * @vsi: the VSI to be reconnected
+ */
+int ice_down_up(struct ice_vsi *vsi)
+{
+ int ret;
+
+ /* if DOWN already set, nothing to do */
+ if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
+ return 0;
+
+ ret = ice_down(vsi);
+ if (ret)
+ return ret;
+
+ ret = ice_up(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @vsi: VSI having resources allocated
*
@@ -6833,6 +6951,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -8865,6 +8985,16 @@ int ice_stop(struct net_device *netdev)
return -EBUSY;
}
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ return -EIO;
+ }
+ }
+
ice_vsi_close(vsi);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 13cdb5ea594d..c262dc886e6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -1114,14 +1114,18 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
*
- * cmd_flags controls which banks to activate, and the preservation level to
- * use when activating the NVM bank.
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
*
* On successful return of the firmware command, the response_flags variable
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
@@ -1130,7 +1134,8 @@ int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = cmd_flags;
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!err && response_flags)
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 856d1ad4398b..774c2317967d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -34,7 +34,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
int ice_nvm_validate_checksum(struct ice_hw *hw);
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags);
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
int ice_aq_nvm_update_empr(struct ice_hw *hw);
int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 560efc7654c7..02a4e1cf624e 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -44,6 +44,7 @@ enum ice_protocol_type {
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE,
+ ICE_L2TPV3,
ICE_VLAN_EX,
ICE_VLAN_IN,
ICE_VXLAN_GPE,
@@ -111,6 +112,7 @@ enum ice_prot_id {
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
#define ICE_PPPOE_HW 103
+#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
@@ -217,6 +219,11 @@ struct ice_pppoe_hdr {
__be16 ppp_prot_id; /* control and data only */
};
+struct ice_l2tpv3_sess_hdr {
+ __be32 session_id;
+ __be64 cookie;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -235,6 +242,7 @@ union ice_prot_hdr {
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
+ struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 72b663108a4a..011b727ab190 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -491,56 +491,6 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
}
/**
- * ice_ptp_update_cached_phctime - Update the cached PHC time values
- * @pf: Board specific private structure
- *
- * This function updates the system time values which are cached in the PF
- * structure and the Rx rings.
- *
- * This function must be called periodically to ensure that the cached value
- * is never more than 2 seconds old. It must also be called whenever the PHC
- * time has been changed.
- *
- * Return:
- * * 0 - OK, successfully updated
- * * -EAGAIN - PF was busy, need to reschedule the update
- */
-static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
-{
- u64 systime;
- int i;
-
- if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
- return -EAGAIN;
-
- /* Read the current PHC time */
- systime = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* Update the cached PHC time stored in the PF structure */
- WRITE_ONCE(pf->ptp.cached_phc_time, systime);
-
- ice_for_each_vsi(pf, i) {
- struct ice_vsi *vsi = pf->vsi[i];
- int j;
-
- if (!vsi)
- continue;
-
- if (vsi->type != ICE_VSI_PF)
- continue;
-
- ice_for_each_rxq(vsi, j) {
- if (!vsi->rx_rings[j])
- continue;
- WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
- }
- }
- clear_bit(ICE_CFG_BUSY, pf->state);
-
- return 0;
-}
-
-/**
* ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
* @cached_phc_time: recently cached copy of PHC time
* @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
@@ -636,12 +586,400 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
{
const u64 mask = GENMASK_ULL(31, 0);
+ unsigned long discard_time;
+
+ /* Discard the hardware timestamp if the cached PHC time is too old */
+ discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (time_is_before_jiffies(discard_time)) {
+ pf->ptp.tx_hwtstamp_discarded++;
+ return 0;
+ }
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
(in_tstamp >> 8) & mask);
}
/**
+ * ice_ptp_tx_tstamp - Process Tx timestamps for a port
+ * @tx: the PTP Tx timestamp tracker
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) copy the timestamp out of the PHY register
+ * 4) clear the timestamp valid bit in the PHY register
+ * 5) unlock the index by clearing the associated in_use bit.
+ * 2) extend the 40b timestamp value to get a 64bit timestamp
+ * 3) send that timestamp to the stack
+ *
+ * After looping, if we still have waiting SKBs, return true. This may cause us
+ * effectively poll even when not strictly necessary. We do this because it's
+ * possible a new timestamp was requested around the same time as the interrupt.
+ * In some cases hardware might not interrupt us again when the timestamp is
+ * captured.
+ *
+ * Note that we only take the tracking lock when clearing the bit and when
+ * checking if we need to re-queue this task. The only place where bits can be
+ * set is the hard xmit routine where an SKB has a request flag set. The only
+ * places where we clear bits are this work function, or the periodic cleanup
+ * thread. If the cleanup thread clears a bit we're processing we catch it
+ * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
+ * starts a new timestamp, we might not begin processing it right away but we
+ * will notice it at the end when we re-queue the task. If a Tx thread starts
+ * a new timestamp just after this function exits without re-queuing,
+ * the interrupt when the timestamp finishes should trigger. Avoiding holding
+ * the lock for the entire function is important in order to ensure that Tx
+ * threads do not get blocked while waiting for the lock.
+ */
+static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+{
+ struct ice_ptp_port *ptp_port;
+ bool ts_handled = true;
+ struct ice_pf *pf;
+ u8 idx;
+
+ if (!tx->init)
+ return false;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->quad_offset;
+ u64 raw_tstamp, tstamp;
+ struct sk_buff *skb;
+ int err;
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ err = ice_read_phy_tstamp(&pf->hw, tx->quad, phy_idx,
+ &raw_tstamp);
+ if (err)
+ continue;
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ continue;
+
+ /* The timestamp is valid, so we'll go ahead and clear this
+ * index and then send the timestamp up to the stack.
+ */
+ spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ spin_unlock(&tx->lock);
+
+ /* it's (unlikely but) possible we raced with the cleanup
+ * thread for discarding old timestamp requests.
+ */
+ if (!skb)
+ continue;
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ if (!bitmap_empty(tx->in_use, tx->len))
+ ts_handled = false;
+ spin_unlock(&tx->lock);
+
+ return ts_handled;
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
+ if (!tx->tstamps)
+ return -ENOMEM;
+
+ tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ if (!tx->in_use) {
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&tx->lock);
+
+ tx->init = 1;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ for (idx = 0; idx < tx->len; idx++) {
+ u8 phy_idx = idx + tx->quad_offset;
+
+ spin_lock(&tx->lock);
+ if (tx->tstamps[idx].skb) {
+ dev_kfree_skb_any(tx->tstamps[idx].skb);
+ tx->tstamps[idx].skb = NULL;
+ pf->ptp.tx_hwtstamp_flushed++;
+ }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->init = 0;
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ bitmap_free(tx->in_use);
+ tx->in_use = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->quad = pf->hw.port_info->lport;
+ tx->quad_offset = 0;
+ tx->len = INDEX_PER_QUAD;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @pf: pointer to the PF struct
+ * @tx: PTP Tx tracker to clean up
+ *
+ * Loop through the Tx timestamp requests and see if any of them have been
+ * waiting for a long time. Discard any SKBs that have been waiting for more
+ * than 2 seconds. This is long enough to be reasonably sure that the
+ * timestamp will never be captured. This might happen if the packet gets
+ * discarded before it reaches the PHY timestamping block.
+ */
+static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ struct ice_hw *hw = &pf->hw;
+ u8 idx;
+
+ if (!tx->init)
+ return;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct sk_buff *skb;
+ u64 raw_tstamp;
+
+ /* Check if this SKB has been waiting for too long */
+ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
+ continue;
+
+ /* Read tstamp to be able to use this register again */
+ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+ &raw_tstamp);
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Count the number of Tx timestamps which have timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Note that the cached copy in the PF PTP structure is always updated, even
+ * if we can't update the copy in the Rx rings.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ unsigned long update_before;
+ u64 systime;
+ int i;
+
+ update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (pf->ptp.cached_phc_time &&
+ time_is_before_jiffies(update_before)) {
+ unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
+
+ dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
+ jiffies_to_msecs(time_taken));
+ pf->ptp.late_cached_phc_updates++;
+ }
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+ WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
+
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
+ * @pf: Board specific private structure
+ *
+ * This function must be called when the cached PHC time is no longer valid,
+ * such as after a time adjustment. It discards any outstanding Tx timestamps,
+ * and updates the cached PHC time for both the PF and Rx rings. If updating
+ * the PHC time cannot be done immediately, a warning message is logged and
+ * the work item is scheduled.
+ *
+ * These steps are required in order to ensure that we do not accidentally
+ * report a timestamp extended by the wrong PHC cached copy. Note that we
+ * do not directly update the cached timestamp here because it is possible
+ * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we
+ * would have to try again. During that time window, timestamps might be
+ * requested and returned with an invalid extension. Thus, on failure to
+ * immediately update the cached PHC time we would need to zero the value
+ * anyways. For this reason, we just zero the value immediately and queue the
+ * update work item.
+ */
+static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Update the cached PHC time immediately if possible, otherwise
+ * schedule the work item to execute soon.
+ */
+ err = ice_ptp_update_cached_phctime(pf);
+ if (err) {
+ /* If another thread is updating the Rx rings, we won't
+ * properly reset them here. This could lead to reporting of
+ * invalid timestamps, but there isn't much we can do.
+ */
+ dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
+ __func__);
+
+ /* Queue the work item to update the Rx rings when possible */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
+ msecs_to_jiffies(10));
+ }
+
+ /* Flush any outstanding Tx timestamps */
+ ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx);
+}
+
+/**
* ice_ptp_read_time - Read the time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
@@ -900,6 +1238,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
+ if (ice_is_reset_in_progress(pf->state))
+ return;
+
if (ice_ptp_check_offset_valid(port)) {
/* Offsets not ready yet, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1509,7 +1850,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_unlock(hw);
if (!err)
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
@@ -1588,7 +1929,7 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return err;
}
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
return 0;
}
@@ -1796,26 +2137,31 @@ void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
+ struct skb_shared_hwtstamps *hwtstamps;
+ u64 ts_ns, cached_time;
u32 ts_high;
- u64 ts_ns;
- /* Populate timesync data into skb */
- if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
- struct skb_shared_hwtstamps *hwtstamps;
+ if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
+ return;
- /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
- * cached PHC value, rather than accessing the PF. This also
- * allows us to simply pass the upper 32bits of nanoseconds
- * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
- * it would just discard these bits itself.
- */
- ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
- ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+ cached_time = READ_ONCE(rx_ring->cached_phctime);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
- }
+ /* Do not report a timestamp if we don't have a cached PHC time */
+ if (!cached_time)
+ return;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
+ * PHC value, rather than accessing the PF. This also allows us to
+ * simply pass the upper 32bits of nanoseconds directly. Calling
+ * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
+ * bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
}
/**
@@ -1871,49 +2217,26 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
static void
-ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- /* Check if SMA controller is in the netlist */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
- !ice_is_pca9575_present(&pf->hw))
- ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
-
- if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
- info->n_per_out = N_PER_OUT_E810T_NO_SMA;
- return;
- }
+ info->n_per_out = N_PER_OUT_E810;
- info->n_per_out = N_PER_OUT_E810T;
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ info->n_ext_ts = N_EXT_TS_E810;
- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) {
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
info->n_ext_ts = N_EXT_TS_E810;
info->n_pins = NUM_PTP_PINS_E810T;
info->verify = ice_verify_pin_e810t;
- }
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
-}
-
-/**
- * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- info->n_per_out = N_PER_OUT_E810;
-
- if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
- return;
-
- info->n_ext_ts = N_EXT_TS_E810;
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
}
/**
@@ -1950,11 +2273,7 @@ static void
ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
-
- if (ice_is_e810t(&pf->hw))
- ice_ptp_setup_pins_e810t(pf, info);
- else
- ice_ptp_setup_pins_e810(pf, info);
+ ice_ptp_setup_pins_e810(pf, info);
}
/**
@@ -2016,112 +2335,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
}
/**
- * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
- * @work: pointer to the kthread_work struct
- *
- * Process timestamps captured by the PHY associated with this port. To do
- * this, loop over each index with a waiting skb.
- *
- * If a given index has a valid timestamp, perform the following steps:
- *
- * 1) copy the timestamp out of the PHY register
- * 4) clear the timestamp valid bit in the PHY register
- * 5) unlock the index by clearing the associated in_use bit.
- * 2) extend the 40b timestamp value to get a 64bit timestamp
- * 3) send that timestamp to the stack
- *
- * After looping, if we still have waiting SKBs, then re-queue the work. This
- * may cause us effectively poll even when not strictly necessary. We do this
- * because it's possible a new timestamp was requested around the same time as
- * the interrupt. In some cases hardware might not interrupt us again when the
- * timestamp is captured.
- *
- * Note that we only take the tracking lock when clearing the bit and when
- * checking if we need to re-queue this task. The only place where bits can be
- * set is the hard xmit routine where an SKB has a request flag set. The only
- * places where we clear bits are this work function, or the periodic cleanup
- * thread. If the cleanup thread clears a bit we're processing we catch it
- * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
- * starts a new timestamp, we might not begin processing it right away but we
- * will notice it at the end when we re-queue the work item. If a Tx thread
- * starts a new timestamp just after this function exits without re-queuing,
- * the interrupt when the timestamp finishes should trigger. Avoiding holding
- * the lock for the entire function is important in order to ensure that Tx
- * threads do not get blocked while waiting for the lock.
- */
-static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
-{
- struct ice_ptp_port *ptp_port;
- struct ice_ptp_tx *tx;
- struct ice_pf *pf;
- struct ice_hw *hw;
- u8 idx;
-
- tx = container_of(work, struct ice_ptp_tx, work);
- if (!tx->init)
- return;
-
- ptp_port = container_of(tx, struct ice_ptp_port, tx);
- pf = ptp_port_to_pf(ptp_port);
- hw = &pf->hw;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct skb_shared_hwtstamps shhwtstamps = {};
- u8 phy_idx = idx + tx->quad_offset;
- u64 raw_tstamp, tstamp;
- struct sk_buff *skb;
- int err;
-
- ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
-
- err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
- &raw_tstamp);
- if (err)
- continue;
-
- ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
-
- /* Check if the timestamp is invalid or stale */
- if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
- continue;
-
- /* The timestamp is valid, so we'll go ahead and clear this
- * index and then send the timestamp up to the stack.
- */
- spin_lock(&tx->lock);
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
- clear_bit(idx, tx->in_use);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- spin_unlock(&tx->lock);
-
- /* it's (unlikely but) possible we raced with the cleanup
- * thread for discarding old timestamp requests.
- */
- if (!skb)
- continue;
-
- /* Extend the timestamp using cached PHC time */
- tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
- shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
-
- ice_trace(tx_tstamp_complete, skb, idx);
-
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
- }
-
- /* Check if we still have work to do. If so, re-queue this task to
- * poll for remaining timestamps.
- */
- spin_lock(&tx->lock);
- if (!bitmap_empty(tx->in_use, tx->len))
- kthread_queue_work(pf->ptp.kworker, &tx->work);
- spin_unlock(&tx->lock);
-}
-
-/**
* ice_ptp_request_ts - Request an available Tx timestamp index
* @tx: the PTP Tx timestamp tracker to request from
* @skb: the SKB to associate with this timestamp request
@@ -2161,177 +2374,17 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
}
/**
- * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * ice_ptp_process_ts - Process the PTP Tx timestamps
* @pf: Board private structure
*
- * Queue work required to process the PTP Tx timestamps outside of interrupt
- * context.
+ * Returns true if timestamps are processed.
*/
-void ice_ptp_process_ts(struct ice_pf *pf)
+bool ice_ptp_process_ts(struct ice_pf *pf)
{
if (pf->ptp.port.tx.init)
- kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
-}
-
-/**
- * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
- * @tx: Tx tracking structure to initialize
- *
- * Assumes that the length has already been initialized. Do not call directly,
- * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
- */
-static int
-ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
-{
- tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
- if (!tx->tstamps)
- return -ENOMEM;
-
- tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
- if (!tx->in_use) {
- kfree(tx->tstamps);
- tx->tstamps = NULL;
- return -ENOMEM;
- }
-
- spin_lock_init(&tx->lock);
- kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
-
- tx->init = 1;
-
- return 0;
-}
-
-/**
- * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
- * @pf: Board private structure
- * @tx: the tracker to flush
- */
-static void
-ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- for (idx = 0; idx < tx->len; idx++) {
- u8 phy_idx = idx + tx->quad_offset;
-
- spin_lock(&tx->lock);
- if (tx->tstamps[idx].skb) {
- dev_kfree_skb_any(tx->tstamps[idx].skb);
- tx->tstamps[idx].skb = NULL;
- }
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
- }
-}
-
-/**
- * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
- * @pf: Board private structure
- * @tx: Tx tracking structure to release
- *
- * Free memory associated with the Tx timestamp tracker.
- */
-static void
-ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->init = 0;
-
- kthread_cancel_work_sync(&tx->work);
-
- ice_ptp_flush_tx_tracker(pf, tx);
-
- kfree(tx->tstamps);
- tx->tstamps = NULL;
-
- bitmap_free(tx->in_use);
- tx->in_use = NULL;
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
- tx->len = 0;
-}
-
-/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
- * the timestamp block is shared for all ports in the same quad. To avoid
- * ports using the same timestamp index, logically break the block of
- * registers into chunks based on the port number.
- */
-static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
-{
- tx->quad = port / ICE_PORTS_PER_QUAD;
- tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
- tx->len = INDEX_PER_PORT;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- *
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
- */
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->quad = pf->hw.port_info->lport;
- tx->quad_offset = 0;
- tx->len = INDEX_PER_QUAD;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
- * @hw: pointer to the hw struct
- * @tx: PTP Tx tracker to clean up
- *
- * Loop through the Tx timestamp requests and see if any of them have been
- * waiting for a long time. Discard any SKBs that have been waiting for more
- * than 2 seconds. This is long enough to be reasonably sure that the
- * timestamp will never be captured. This might happen if the packet gets
- * discarded before it reaches the PHY timestamping block.
- */
-static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- if (!tx->init)
- return;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct sk_buff *skb;
- u64 raw_tstamp;
-
- /* Check if this SKB has been waiting for too long */
- if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
- continue;
-
- /* Read tstamp to be able to use this register again */
- ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
- &raw_tstamp);
-
- spin_lock(&tx->lock);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Free the SKB after we've cleared the bit */
- dev_kfree_skb_any(skb);
- }
+ return false;
}
static void ice_ptp_periodic_work(struct kthread_work *work)
@@ -2345,7 +2398,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
err = ice_ptp_update_cached_phctime(pf);
- ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
+ ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx);
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 10e396abf130..028349295b71 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -105,7 +105,6 @@ struct ice_tx_tstamp {
/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
- * @work: work function to handle processing of Tx timestamps
* @lock: lock to prevent concurrent write to in_use bitmap
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
@@ -117,7 +116,6 @@ struct ice_tx_tstamp {
* window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
- struct kthread_work work;
spinlock_t lock; /* lock protecting in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
@@ -163,6 +161,7 @@ struct ice_ptp_port {
* @work: delayed work function for periodic tasks
* @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
@@ -171,12 +170,19 @@ struct ice_ptp_port {
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
+ * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
+ * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
+ * @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time
+ * being too old to correctly extend timestamp
+ * @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
struct kthread_work extts_work;
u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
u8 ext_ts_chan;
u8 ext_ts_irq;
struct kthread_worker *kworker;
@@ -185,6 +191,11 @@ struct ice_ptp {
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
u64 reset_time;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_flushed;
+ u32 tx_hwtstamp_discarded;
+ u32 late_cached_phc_updates;
};
#define __ptp_port_to_ptp(p) \
@@ -224,8 +235,8 @@ struct ice_ptp {
#define N_EXT_TS_E810 3
#define N_PER_OUT_E810 4
#define N_PER_OUT_E810T 3
-#define N_PER_OUT_E810T_NO_SMA 2
-#define N_EXT_TS_E810_NO_SMA 2
+#define N_PER_OUT_NO_SMA_E810T 2
+#define N_EXT_TS_NO_SMA_E810T 2
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
@@ -236,7 +247,7 @@ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
-void ice_ptp_process_ts(struct ice_pf *pf);
+bool ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
@@ -269,7 +280,10 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
-static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline bool ice_ptp_process_ts(struct ice_pf *pf)
+{
+ return true;
+}
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 6dff97d53d81..772b1f566d6e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
+#include <linux/delay.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -2587,38 +2588,113 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
}
/**
- * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using the low latency
+ * timestamp read.
+ */
+static int
+ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
+{
+ u32 val;
+ u8 i;
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
+ wr32(hw, PF_SB_ATQBAL, val);
+
+ /* Read the register repeatedly until the FW provides us the TS */
+ for (i = TS_LL_READ_RETRIES; i > 0; i--) {
+ val = rd32(hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (!(FIELD_GET(TS_LL_READ_TS, val))) {
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ /* FW failed to provide the TS in time */
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ return -EINVAL;
+}
+
+/**
+ * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
* @hw: pointer to the HW struct
* @lport: the lport to read from
* @idx: the timestamp index to read
- * @tstamp: on return, the 40bit timestamp value
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
*
- * Read a 40bit timestamp value out of the timestamp block of the external PHY
- * on the E810 device.
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using sideband queue.
*/
static int
-ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
+ u32 *lo)
{
- u32 lo_addr, hi_addr, lo, hi;
+ u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_val, hi_val;
int err;
- lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
- hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
-
- err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
return err;
}
+ *lo = lo_val;
+ *hi = (u8)hi_val;
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static int
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+ u32 lo = 0;
+ u8 hi = 0;
+ int err;
+
+ if (hw->dev_caps.ts_dev_info.ts_ll_read)
+ err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
+ else
+ err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
+
+ if (err)
+ return err;
+
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1246e4ee4b5d..2bda64c76abc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -402,6 +402,7 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
+#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
@@ -413,6 +414,12 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
+/* Tx timestamp low latency read definitions */
+#define TS_LL_READ_RETRIES 200
+#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
+#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS BIT(31)
+
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 0dac67cd9c77..bd31748aae1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -377,10 +377,10 @@ static void ice_repr_rem(struct ice_vf *vf)
if (!vf->repr)
return;
- ice_devlink_destroy_vf_port(vf);
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
unregister_netdev(vf->repr->netdev);
+ ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
#ifdef CONFIG_ICE_SWITCHDEV
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7947223536e3..118595763bba 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1290,7 +1290,7 @@ err_init_port:
pi->root = NULL;
}
- devm_kfree(ice_hw_to_dev(hw), buf);
+ kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 262e553e3b58..9b762f7972ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -42,6 +42,7 @@ enum {
ICE_PKT_GTP_NOPAY = BIT(8),
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
+ ICE_PKT_L2TPV3 = BIT(11),
};
struct ice_dummy_pkt_offsets {
@@ -1258,6 +1259,65 @@ ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
+ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_L2TPV3, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_L2TPV3, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
+ 0x00, 0x0c, 0x73, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
ICE_PKT_GTP_NOPAY),
@@ -1297,6 +1357,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6 |
ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6),
@@ -2274,9 +2336,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
int status;
u16 i;
- rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
- GFP_KERNEL);
-
+ rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
if (!rbuf)
return -ENOMEM;
@@ -2324,7 +2384,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), rbuf);
+ kfree(rbuf);
return status;
}
@@ -3449,31 +3509,15 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
- *
- * IMPORTANT: When the ucast_shared flag is set to false and m_list has
- * multiple unicast addresses, the function assumes that all the
- * addresses are unique in a given add_mac call. It doesn't
- * check for duplicates in this case, removing duplicates from a given
- * list should be taken care of in the caller of this function.
*/
int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
- struct list_head *rule_head;
- u16 total_elem_left, s_rule_size;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- u16 num_unicast = 0;
int status = 0;
- u8 elem_sent;
if (!m_list || !hw)
return -EINVAL;
- s_rule = NULL;
- sw = hw->switch_info;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
@@ -3492,106 +3536,13 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
return -EINVAL;
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't overwrite the unicast address */
- mutex_lock(rule_lock);
- if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
- &m_list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -EEXIST;
- }
- mutex_unlock(rule_lock);
- num_unicast++;
- } else if (is_multicast_ether_addr(add) ||
- (is_unicast_ether_addr(add) && hw->ucast_shared)) {
- m_list_itr->status =
- ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
- m_list_itr);
- if (m_list_itr->status)
- return m_list_itr->status;
- }
- }
-
- mutex_lock(rule_lock);
- /* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast) {
- status = 0;
- goto ice_add_mac_exit;
- }
-
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_unicast_ether_addr(mac_addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
- ice_aqc_opc_add_sw_rules);
- r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
- }
+ m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
-
- elem_sent = min_t(u8, total_elem_left,
- (ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_add_sw_rules,
- NULL);
- if (status)
- goto ice_add_mac_exit;
- r_iter = (typeof(s_rule))
- ((u8 *)r_iter + (elem_sent * s_rule_size));
- }
-
- /* Fill up rule ID based on the value returned from FW */
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_fltr_mgmt_list_entry *fm_entry;
-
- if (is_unicast_ether_addr(mac_addr)) {
- f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
- f_info->fltr_act = ICE_FWD_TO_VSI;
- /* Create an entry to track this MAC address */
- fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fm_entry), GFP_KERNEL);
- if (!fm_entry) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
- fm_entry->fltr_info = *f_info;
- fm_entry->vsi_count = 1;
- /* The book keeping entries will get removed when
- * base driver calls remove filter AQ command
- */
-
- list_add(&fm_entry->list_entry, rule_head);
- r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
- }
- }
-
-ice_add_mac_exit:
- mutex_unlock(rule_lock);
- if (s_rule)
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -3979,38 +3930,6 @@ ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
- * @hw: pointer to the hardware structure
- * @recp_id: lookup type for which the specified rule needs to be searched
- * @f_info: rule information
- *
- * Helper function to search for a unicast rule entry - this is to be used
- * to remove unicast MAC filter that is not shared with other VSIs on the
- * PF switch.
- *
- * Returns pointer to entry storing the rule if found
- */
-static struct ice_fltr_mgmt_list_entry *
-ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
- struct ice_fltr_info *f_info)
-{
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *list_head;
-
- list_head = &sw->recp_list[recp_id].filt_rules;
- list_for_each_entry(list_itr, list_head, list_entry) {
- if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
- sizeof(f_info->l_data)) &&
- f_info->fwd_id.hw_vsi_id ==
- list_itr->fltr_info.fwd_id.hw_vsi_id &&
- f_info->flag == list_itr->fltr_info.flag)
- return list_itr;
- }
- return NULL;
-}
-
-/**
* ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -4026,15 +3945,12 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return -EINVAL;
- rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
- u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
@@ -4046,19 +3962,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't remove the unicast address that belongs to
- * another VSI on the switch, since it is not being
- * shared...
- */
- mutex_lock(rule_lock);
- if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
- &list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -ENOENT;
- }
- mutex_unlock(rule_lock);
- }
+
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
@@ -4445,6 +4349,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4452,7 +4363,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -4641,6 +4552,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
{ ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
{ ICE_VLAN_EX, { 2, 0 } },
{ ICE_VLAN_IN, { 2, 0 } },
};
@@ -4664,6 +4576,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
{ ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
@@ -5747,7 +5660,8 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
htons(PPP_IPV6))
match |= ICE_PKT_OUTER_IPV6;
- }
+ } else if (lkups[i].type == ICE_L2TPV3)
+ match |= ICE_PKT_L2TPV3;
}
while (ret->match && (match & ret->match) != ret->match)
@@ -5848,6 +5762,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
+ case ICE_L2TPV3:
+ len = sizeof(struct ice_l2tpv3_sess_hdr);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index a298862857a8..f68c555be4e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -36,6 +36,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))
+ lkups_cnt++;
+
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
@@ -47,11 +51,11 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
lkups_cnt++;
/* is VLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
/* is CVLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
/* are PPPoE options specified? */
@@ -64,6 +68,13 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
+ lkups_cnt++;
+
+ /* are L2TPv3 options specified? */
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
+ lkups_cnt++;
+
/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
@@ -257,6 +268,50 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ hdr->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ hdr->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ hdr_h->hop_limit = hdr->l3_key.ttl;
+ hdr_m->hop_limit = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
hdr->l3_key.ip_proto == IPPROTO_UDP) {
list[i].type = ICE_UDP_OF;
@@ -334,7 +389,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
}
/* copy VLAN info */
- if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
@@ -343,15 +398,45 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
list[i].type = ICE_VLAN_EX;
else
list[i].type = ICE_VLAN_OFOS;
- list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->vlan_hdr.vlan_prio;
+ }
+
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
list[i].type = ICE_VLAN_IN;
- list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->cvlan_hdr.vlan_prio;
+ }
+
i++;
}
@@ -420,6 +505,61 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
+ if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live =
+ headers->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live =
+ headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ headers->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ headers->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ hdr_h->hop_limit = headers->l3_key.ttl;
+ hdr_m->hop_limit = headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
+ list[i].type = ICE_L2TPV3;
+
+ list[i].h_u.l2tpv3_sess_hdr.session_id =
+ headers->l2tpv3_hdr.session_id;
+ list[i].m_u.l2tpv3_sess_hdr.session_id =
+ cpu_to_be32(0xFFFFFFFF);
+
+ i++;
+ }
+
/* copy L4 (src, dest) port */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
@@ -839,6 +979,40 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
}
/**
+ * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel
+ */
+static void
+ice_tc_set_tos_ttl(struct flow_match_ip *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ bool is_encap)
+{
+ if (match->mask->tos) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
+
+ headers->l3_key.tos = match->key->tos;
+ headers->l3_mask.tos = match->mask->tos;
+ }
+
+ if (match->mask->ttl) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
+
+ headers->l3_key.ttl = match->key->ttl;
+ headers->l3_mask.ttl = match->mask->ttl;
+ }
+}
+
+/**
* ice_tc_set_port - Parse ports from TC flower filter
* @match: Flow match structure
* @fltr: Pointer to filter structure
@@ -967,10 +1141,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- headers->l3_key.tos = match.key->tos;
- headers->l3_key.ttl = match.key->ttl;
- headers->l3_mask.tos = match.mask->tos;
- headers->l3_mask.ttl = match.mask->ttl;
+ ice_tc_set_tos_ttl(&match, fltr, headers, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
@@ -1039,9 +1210,11 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
+ BIT(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1137,16 +1310,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
return -EINVAL;
}
}
- headers->vlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ headers->vlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
+
if (match.mask->vlan_tpid)
headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
}
@@ -1164,6 +1343,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Bad CVLAN mask");
@@ -1171,10 +1353,12 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
}
}
- headers->cvlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ headers->cvlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
@@ -1217,6 +1401,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ ice_tc_set_tos_ttl(&match, fltr, headers, false);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
+ struct flow_match_l2tpv3 match;
+
+ flow_rule_match_l2tpv3(rule, &match);
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
+ headers->l2tpv3_hdr.session_id = match.key->session_id;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 91cd3d3778c7..92642faad595 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -26,9 +26,18 @@
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
+#define ICE_TC_FLWR_FIELD_IP_TOS BIT(22)
+#define ICE_TC_FLWR_FIELD_IP_TTL BIT(23)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TOS BIT(24)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TTL BIT(25)
+#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
+#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
+#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+#define ICE_IPV6_HDR_TC_MASK 0xFF00000
+
struct ice_indr_block_priv {
struct net_device *netdev;
struct ice_netdev_priv *np;
@@ -42,7 +51,7 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
- u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
__be16 vlan_tpid;
};
@@ -80,6 +89,10 @@ struct ice_tc_l3_hdr {
u8 ttl;
};
+struct ice_tc_l2tpv3_hdr {
+ __be32 session_id;
+};
+
struct ice_tc_l4_hdr {
__be16 dst_port;
__be16 src_port;
@@ -92,6 +105,7 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_vlan_hdr vlan_hdr;
struct ice_tc_vlan_hdr cvlan_hdr;
struct ice_tc_pppoe_hdr pppoe_hdr;
+ struct ice_tc_l2tpv3_hdr l2tpv3_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 836dce840712..dbe80e5053a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
@@ -1464,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
+ wd = ice_xmit_zc(tx_ring);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -2255,8 +2258,10 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
/* Grab an open timestamp slot */
idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
- if (idx < 0)
+ if (idx < 0) {
+ tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
return;
+ }
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ca902af54bb4..932b5661ec4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -295,10 +295,11 @@ struct ice_rx_ring {
struct xsk_buff_pool *xsk_pool;
struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
u64 cached_phctime;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 861b64322959..e1abfcee96dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -347,6 +347,7 @@ struct ice_ts_func_info {
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
+#define ICE_TS_LL_TX_TS_READ_M BIT(28)
struct ice_ts_dev_info {
/* Device specific info */
@@ -359,6 +360,7 @@ struct ice_ts_dev_info {
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
+ u8 ts_ll_read;
};
/* Function specific capabilities */
@@ -564,6 +566,8 @@ enum ice_rl_type {
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
+#define ICE_MAX_PORT_PER_PCI_DEV 8
+
/* Data structure for saving BW information */
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
@@ -885,8 +889,6 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
- u8 ucast_shared; /* true if VSIs can share unicast addr */
-
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 8fd7c3e37f5e..0abeed092de1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
- if (WARN_ON(!vsi))
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
+ }
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
- int err;
+ int err = 0;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_tx_filtering(vsi);
- if (err)
- return err;
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
return ice_cfg_mac_antispoof(vsi, true);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 094e3c97a1ea..2b4c791b6cba 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN filtering on first non-zero VLAN */
if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
/* Disable VLAN filtering when only VLAN 0 is left */
- if (!ice_vsi_has_non_zero_vlans(vsi))
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
- ice_vf_dis_vlan_promisc(vsi, &vlan);
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
}
}
@@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (err)
return err;
}
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
- if (err)
- return err;
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 49ba8bfdbf04..056c904b83cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
+ ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
@@ -243,7 +244,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -317,6 +318,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
}
/**
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
+ * @rx_ring: Rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
+ sizeof(*rx_ring->rx_buf);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ rx_ring->xdp_buf = sw_ring;
+ } else {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ rx_ring->rx_buf = sw_ring;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+{
+ struct ice_rx_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+ rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
* @pool: buffer pool to enable/associate to a ring, NULL to disable
@@ -329,9 +386,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
- if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
- !is_power_of_2(vsi->tx_rings[qid]->count)) {
- netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
pool_failure = -EINVAL;
goto failure;
}
@@ -339,11 +395,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_pool_if_up;
}
+
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
+ if (ret)
+ goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
@@ -353,7 +415,7 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
@@ -465,11 +527,10 @@ exit:
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
- u16 batched, leftover, i, tail_bumps;
+ u16 leftover, i, tail_bumps;
- batched = ALIGN_DOWN(count, rx_thresh);
- tail_bumps = batched / rx_thresh;
- leftover = count & (rx_thresh - 1);
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
@@ -719,69 +780,57 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
}
/**
- * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
- * @xdp_ring: XDP ring to clean
- * @napi_budget: amount of descriptors that NAPI allows us to clean
- *
- * Returns count of cleaned descriptors
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
*/
-static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- int budget = napi_budget / tx_thresh;
- u16 next_dd = xdp_ring->next_dd;
- u16 ntc, cleared_dds = 0;
-
- do {
- struct ice_tx_desc *next_dd_desc;
- u16 desc_cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames;
- u16 i;
-
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
- cleared_dds++;
- xsk_frames = 0;
- if (likely(!xdp_ring->xdp_tx_active)) {
- xsk_frames = tx_thresh;
- goto skip;
- }
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if ((tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ if (last_rs >= ntc)
+ xsk_frames = last_rs - ntc + 1;
+ else
+ xsk_frames = last_rs + cnt - ntc + 1;
+ }
- ntc = xdp_ring->next_to_clean;
+ if (!xsk_frames)
+ return;
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ if (likely(!xdp_ring->xdp_tx_active))
+ goto skip;
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < xsk_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
}
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
skip:
- xdp_ring->next_to_clean += tx_thresh;
- if (xdp_ring->next_to_clean >= desc_cnt)
- xdp_ring->next_to_clean -= desc_cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- next_dd_desc->cmd_type_offset_bsz = 0;
- next_dd = next_dd + tx_thresh;
- if (next_dd >= desc_cnt)
- next_dd = tx_thresh - 1;
- } while (--budget);
-
- xdp_ring->next_dd = next_dd;
-
- return cleared_dds * tx_thresh;
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += xsk_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
/**
@@ -816,7 +865,6 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
u32 i;
@@ -836,13 +884,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
}
xdp_ring->next_to_use = ntu;
-
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
}
/**
@@ -855,7 +896,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
u32 nb_pkts, unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
@@ -864,54 +904,54 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+}
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- struct ice_tx_desc *tx_desc;
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ */
+static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
- * @budget: number of free descriptors on HW Tx ring that can be used
- * @napi_budget: amount of descriptors that NAPI allows us to clean
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
{
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
+ int budget;
+
+ ice_clean_xdp_irq_zc(xdp_ring);
- if (budget < tx_thresh)
- budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
- struct ice_tx_desc *tx_desc;
-
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
xdp_ring->next_to_use = 0;
}
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
&total_bytes);
+ ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
@@ -944,13 +984,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (!ice_is_xdp_ena_vsi(vsi))
return -EINVAL;
- if (queue_id >= vsi->num_txq)
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -EINVAL;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
@@ -989,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 21faec8e97db..6fa181f080ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -26,12 +26,10 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool
-ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
- u32 __always_unused budget,
- int __always_unused napi_budget)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
{
return false;
}
@@ -72,5 +70,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
+
+static inline int
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
+ bool __always_unused zc)
+{
+ return 0;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c14fc871dd41..e5f3e7680dc6 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -850,14 +850,14 @@ static void igb_get_drvinfo(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers
*/
- strlcpy(drvinfo->fw_version, adapter->fw_version,
+ strscpy(drvinfo->fw_version, adapter->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d8b836a85cc3..f8e32833226c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1211,8 +1211,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igb_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -3138,7 +3137,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
adapter->i2c_algo.data = adapter;
adapter->i2c_adap.algo_data = &adapter->i2c_algo;
adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
+ strscpy(adapter->i2c_adap.name, "igb BB",
sizeof(adapter->i2c_adap.name));
status = i2c_bit_add_bus(&adapter->i2c_adap);
return status;
@@ -3637,6 +3636,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3649,12 +3649,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3814,7 +3815,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3974,6 +3977,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -7958,8 +7964,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7973,6 +7981,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 9d4322b74163..83b97989a6bd 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -169,8 +169,8 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f4e91db89fe5..3a32809510fc 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1109,7 +1109,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
return -ENOMEM;
}
- netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5c66b97c0cfa..4f9d7f013a95 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -610,7 +610,6 @@
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
-#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ebff0e04045d..34889be63e78 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
return ok;
}
-static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
- struct xdp_frame *xdpf,
- struct igc_ring *ring)
-{
- dma_addr_t dma;
-
- dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
- return -ENOMEM;
- }
-
- buffer->type = IGC_TX_BUFFER_TYPE_XDP;
- buffer->xdpf = xdpf;
- buffer->protocol = 0;
- buffer->bytecount = xdpf->len;
- buffer->gso_segs = 1;
- buffer->time_stamp = jiffies;
- dma_unmap_len_set(buffer, len, xdpf->len);
- dma_unmap_addr_set(buffer, dma, dma);
- return 0;
-}
-
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
- struct igc_tx_buffer *buffer;
- union igc_adv_tx_desc *desc;
- u32 cmd_type, olinfo_status;
- int err;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, index = ring->next_to_use;
+ struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
+ struct igc_tx_buffer *buffer = head;
+ union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
+ u32 olinfo_status, len = xdpf->len, cmd_type;
+ void *data = xdpf->data;
+ u16 i;
- if (!igc_desc_unused(ring))
- return -EBUSY;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- buffer = &ring->tx_buffer_info[ring->next_to_use];
- err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
- if (err)
- return err;
+ if (igc_maybe_stop_tx(ring, count + 3)) {
+ /* this is a hard error */
+ return -EBUSY;
+ }
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- buffer->bytecount;
- olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+ i = 0;
+ head->bytecount = xdp_get_frame_len(xdpf);
+ head->type = IGC_TX_BUFFER_TYPE_XDP;
+ head->gso_segs = 1;
+ head->xdpf = xdpf;
- desc = IGC_TX_DESC(ring, ring->next_to_use);
- desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
- netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+ for (;;) {
+ dma_addr_t dma;
- buffer->next_to_watch = desc;
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev,
+ "Failed to map DMA for TX\n");
+ goto unmap;
+ }
- ring->next_to_use++;
- if (ring->next_to_use == ring->count)
- ring->next_to_use = 0;
+ dma_unmap_len_set(buffer, len, len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | len;
+
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.buffer_addr = cpu_to_le64(dma);
+
+ buffer->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ buffer = &ring->tx_buffer_info[index];
+ desc = IGC_TX_DESC(ring, index);
+ desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
+ /* set the timestamp */
+ head->time_stamp = jiffies;
+ /* set next_to_watch value indicating a packet is present */
+ head->next_to_watch = desc;
+ ring->next_to_use = index;
return 0;
+
+unmap:
+ for (;;) {
+ buffer = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(buffer, len))
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(buffer, dma),
+ dma_unmap_len(buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(buffer, len, 0);
+ if (buffer == head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return -ENOMEM;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
+ xdp_buff_clear_frags_flag(&xdp);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -4356,8 +4394,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 46efcfab7234..efa980514944 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -456,9 +456,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgb_driver_name,
+ strscpy(drvinfo->driver, ixgb_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 45be9a1ab6af..b4d47e7a76c8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -414,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgb_netdev_ops;
ixgb_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, ixgb_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 04f453eabef6..e88e3dfac8c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1106,12 +1106,12 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ strscpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
@@ -1964,15 +1964,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
frame_size >>= 1;
- data = kmap(rx_buffer->page) + rx_buffer->page_offset;
+ data = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
- kunmap(rx_buffer->page);
-
return match;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 0fcd82036d4e..7311bd545acf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1004,7 +1004,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
UTS_RELEASE);
/* Firmware Version */
- strlcpy(info->firmware_version, adapter->eeprom_id,
+ strscpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
/* Model */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 86b11164655e..f8156fe4b1dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -874,8 +874,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d1e430b8c8aa..298cfbfcb7b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10849,7 +10849,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
@@ -11140,7 +11140,7 @@ skip_sriov:
err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
if (err)
- strlcpy(part_str, "Unknown", sizeof(part_str));
+ strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9f06896a049b..f8605f57bd06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1293,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7f7ea468ffa9..2b00db92b08f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3712,7 +3712,9 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3722,6 +3724,7 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 35c2b9b8bd19..aa4bf6c9a2f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1721,9 +1721,59 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return IXGBE_ERR_LINK_SETUP;
}
- status = mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* change mode enforcement rules to hybrid */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x0400;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* manually control the config */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20002240;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* move the AN base page values */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x1;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* set the AN37 over CB mode */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20000000;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* restart AN manually */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index fed46872af2b..ccfa6b91aac6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -213,8 +213,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2f12fbe229c1..99933e89717a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2733,7 +2733,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f43d6616bc0d..1732ec3c3dbd 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2332,9 +2332,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -3009,7 +3009,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc);
}
- netif_napi_add(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &jme->napi, jme_poll);
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index df9a8eefa007..2b9335cb4bb3 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -416,7 +416,8 @@ static void korina_abort_rx(struct net_device *dev)
}
/* transmit packet */
-static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t korina_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
u32 chain_prev, chain_next;
@@ -938,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1354,7 +1355,7 @@ static int korina_probe(struct platform_device *pdev)
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lp->napi, korina_poll);
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = korina_mdio_read;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7cedbe1fdfd7..59aab4086dcc 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -470,7 +470,7 @@ ltq_etop_stop(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
{
int queue = skb_get_queue_mapping(skb);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 5edb68a8aab1..8d646c7f8c82 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
}
skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
- return ret;
+ break;
rx++;
} else {
break;
@@ -613,8 +620,7 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx);
netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
xrx200_tx_housekeeping);
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index fdd99f0de424..35f24e0f0934 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -152,7 +152,8 @@ static int liteeth_stop(struct net_device *netdev)
return 0;
}
-static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct liteeth *priv = netdev_priv(netdev);
void __iomem *txbuffer;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b6be0552a6c1..707993b445d1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1603,12 +1603,12 @@ mv643xx_eth_set_link_ksettings(struct net_device *dev,
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ strscpy(drvinfo->driver, mv643xx_eth_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ strscpy(drvinfo->version, mv643xx_eth_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int mv643xx_eth_get_coalesce(struct net_device *dev,
@@ -3183,7 +3183,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0caa2df87c04..ff3e361e06e7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4656,11 +4656,11 @@ mvneta_ethtool_get_coalesce(struct net_device *dev,
static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5600,14 +5600,13 @@ static int mvneta_probe(struct platform_device *pdev)
* operation, so only single NAPI should be initialized.
*/
if (pp->neta_armada3700) {
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pp->napi, mvneta_poll);
} else {
for_each_present_cpu(cpu) {
struct mvneta_pcpu_port *port =
per_cpu_ptr(pp->ports, cpu);
- netif_napi_add(dev, &port->napi, mvneta_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, mvneta_poll);
port->pp = pp;
}
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ad73a488fc5f..11e603686a27 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..75e83ea2a926 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b84128b549b4..eb0fb8128096 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5425,11 +5425,11 @@ mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5770,8 +5770,7 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
v->irq = irq_of_parse_and_map(port_node, 0);
if (v->irq <= 0)
return -EINVAL;
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
port->nqvecs = 1;
@@ -5831,8 +5830,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
goto err;
}
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
}
return 0;
@@ -7706,7 +7704,18 @@ static struct platform_driver mvpp2_driver = {
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 97f080c66dd4..9089adcb75f9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -410,7 +410,7 @@ static void octep_napi_add(struct octep_device *oct)
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
- octep_napi_poll, 64);
+ octep_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index d9ae0937d17a..392d9b0da0d7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -158,8 +158,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
goto desc_dma_alloc_err;
}
- oq->buff_info = (struct octep_rx_buffer *)
- vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
if (unlikely(!oq->buff_info)) {
dev_err(&oct->pdev->dev,
"Failed to allocate buffer info for OQ-%d\n", q_no);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 40203560b291..3cf4c8285c90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o rvu_npc_hash.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index d7762577e285..8d5d5a0f68c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -293,20 +293,74 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
- nix_bandprof_get_hwinfo_rsp)
-
-/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+ nix_bandprof_get_hwinfo_rsp) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
};
@@ -1471,6 +1525,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
@@ -1478,6 +1533,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
+ int extts_on;
};
struct ptp_rsp {
@@ -1655,4 +1711,415 @@ enum cgx_af_status {
LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
};
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u8 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000000..5ba618aed6ad
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable and clear the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active)
+ goto exit;
+
+ return ret;
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000000..64dc2b80e15d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include <linux/bits.h>
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector Enumeration */
+enum mcs_int_vec_e {
+ MCS_INT_VEC_MIL_RX_GBL = 0x0,
+ MCS_INT_VEC_MIL_RX_LMACX = 0x1,
+ MCS_INT_VEC_MIL_TX_LMACX = 0x5,
+ MCS_INT_VEC_HIL_RX_GBL = 0x9,
+ MCS_INT_VEC_HIL_RX_LMACX = 0xa,
+ MCS_INT_VEC_HIL_TX_GBL = 0xe,
+ MCS_INT_VEC_HIL_TX_LMACX = 0xf,
+ MCS_INT_VEC_IP = 0x13,
+ MCS_INT_VEC_CNT = 0x14,
+};
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 4ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000000..7b6205414428
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000000..c95a8b8f5eaf
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include <linux/bits.h>
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000000..fa8029a94068
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "rvu.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 67a6821d2dff..3411e2e47d46 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
@@ -50,12 +52,23 @@
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+{
+ return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+}
+
+static bool is_ptp_dev_cn10k(struct ptp *ptp)
+{
+ return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+}
+
static bool cn10k_ptp_errata(struct ptp *ptp)
{
if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
@@ -72,6 +85,43 @@ static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
return false;
}
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
{
u64 sec, sec1, nsec;
@@ -246,6 +296,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* sclk is in MHz */
ptp->clock_rate = sclk * 1000000;
+ /* Program the seconds rollover value to 1 second */
+ if (is_ptp_dev_cnf10kb(ptp))
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@@ -270,6 +324,18 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
@@ -282,14 +348,39 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
- *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10k(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
return 0;
}
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
- writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
return 0;
}
@@ -329,6 +420,11 @@ static int ptp_probe(struct pci_dev *pdev,
else
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
return 0;
error_free:
@@ -353,6 +449,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
if (IS_ERR_OR_NULL(ptp))
return;
@@ -420,6 +519,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 95a955159f40..b9d92abc3844 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -17,7 +17,10 @@ struct ptp {
void __iomem *reg_base;
u64 (*read_ptp_tstmp)(struct ptp *ptp);
spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
u32 clock_rate;
+ u32 clock_period;
};
struct ptp *ptp_get(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index ef59de43b11e..a70e1153fa04 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -415,11 +415,26 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
return;
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
- if (enable)
+ if (enable) {
cfg |= RPMX_RX_TS_PREPEND;
- else
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
}
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index c2bd6e54ea51..77f2ef9e1425 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -16,6 +16,7 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -72,6 +73,10 @@
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 7282a826d81e..3f5e09b77d4b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -16,6 +16,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
+#include "mcs.h"
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
@@ -23,8 +24,6 @@
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
-
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
@@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF;
}
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{
int pf, func;
u64 cfg;
@@ -1159,6 +1158,12 @@ cpt:
rvu_program_channels(rvu);
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
return 0;
nix_err:
@@ -3293,6 +3298,7 @@ err_mbox:
err_hwsetup:
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
@@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0)
goto ptp_err;
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
goto rvu_err;
return 0;
rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
@@ -3370,6 +3383,7 @@ ptp_err:
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d15bc443335d..76474385a602 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -25,6 +25,8 @@
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix;
struct dentry *npc;
struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@@ -497,6 +503,8 @@ struct rvu {
struct ptp *ptp;
+ int mcs_blk_cnt;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -504,6 +512,12 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_exit(struct rvu *rvu);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index f42a09f04b25..a1970ebedf95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -19,6 +19,7 @@
#include "lmac_common.h"
#include "npc.h"
#include "rvu_npc_hash.h"
+#include "mcs.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -227,6 +228,350 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Dump LMTST map table */
static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
@@ -3053,6 +3398,7 @@ create:
rvu_dbg_npc_init(rvu);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0879a48411f3..7646bb2ec89b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4296,8 +4296,14 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
- rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 77a9ade91f3e..0e0d536645ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -266,6 +266,7 @@
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index d463dc72d80a..73fdb8798614 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index fd4f083c699e..826f691de259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000000..64f3acd7f67b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include <net/macsec.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ };
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[0] = ~0ULL;
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ unsigned char *src = rxsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg],
+ (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ /* Insert SecTag after 12 bytes (DA+SA)*/
+ u8 tag_offset = 12;
+ u8 sectag_tci = 0;
+ u64 policy;
+ int ret;
+
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ unsigned char *src = txsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn_halves.lower);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->protect_frames == txsc->last_protect_frames)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_protect_frames)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ /* Stick to 16 bytes key len until XPN support is added */
+ if (secy->key_len != 16)
+ return -EOPNOTSUPP;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->protect_frames)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index d686c7b6252f..9ac9e6615ae7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -586,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
@@ -602,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->lvl = lvl;
req->num_regs = 1;
- schq = hw->txschq_list[lvl][0];
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -611,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
(0x2ULL << 36);
req->num_regs++;
/* MDQ config */
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
req->regval[1] = parent << 16;
req->num_regs++;
@@ -619,14 +626,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
@@ -635,11 +642,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
@@ -650,8 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
@@ -676,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
@@ -806,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
@@ -1792,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index b28029cc4316..282db6fe3b08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -19,6 +19,7 @@
#include <net/devlink.h>
#include <linux/time64.h>
#include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@@ -33,6 +34,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -40,6 +42,11 @@
#define NAME_SIZE 32
+#ifdef CONFIG_DCB
+/* Max priority supported for PFC */
+#define NIX_PF_PFC_PRIO_MAX 8
+#endif
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -196,7 +203,7 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
- u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -238,6 +245,8 @@ struct otx2_hw {
#define CN10K_MBOX 1
#define CN10K_LMTST 2
#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+#define CN10K_HW_MACSEC 5
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -271,6 +280,13 @@ struct refill_work {
struct otx2_nic *pf;
};
+/* PTPv2 originTimestamp structure */
+struct ptpv2_tstamp {
+ __be16 seconds_msb; /* 16 bits + */
+ __be32 seconds_lsb; /* 32 bits = 48 bits*/
+ __be32 nanoseconds;
+} __packed;
+
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -286,6 +302,9 @@ struct otx2_ptp {
struct ptp_pin_desc extts_config;
u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+ u32 base_ns;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -335,6 +354,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
+#define CN10K_MCS_SA_PER_SC 4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+ u64 InPktsUntagged;
+ u64 InPktsNoTag;
+ u64 InPktsBadTag;
+ u64 InPktsUnknownSCI;
+ u64 InPktsNoSCI;
+ u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+ u64 InOctetsValidated;
+ u64 InOctetsDecrypted;
+ u64 InPktsUnchecked;
+ u64 InPktsDelayed;
+ u64 InPktsOK;
+ u64 InPktsInvalid;
+ u64 InPktsLate;
+ u64 InPktsNotValid;
+ u64 InPktsNotUsingSA;
+ u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+ struct macsec_secy *sw_secy;
+ struct cn10k_txsc_stats stats;
+ struct list_head entry;
+ enum macsec_validation_type last_validate_frames;
+ bool last_protect_frames;
+ u16 hw_secy_id_tx;
+ u16 hw_secy_id_rx;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 encoding_sa;
+};
+
+struct cn10k_mcs_rxsc {
+ struct macsec_secy *sw_secy;
+ struct macsec_rx_sc *sw_rxsc;
+ struct cn10k_rxsc_stats stats;
+ struct list_head entry;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+};
+
+struct cn10k_mcs_cfg {
+ struct list_head txsc_list;
+ struct list_head rxsc_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -358,6 +437,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
@@ -415,10 +495,16 @@ struct otx2_nic {
/* PFC */
u8 pfc_en;
u8 *queue_to_pfc_map;
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
#endif
/* napi event count. It is needed for adaptive irq coalescing. */
u32 napi_events;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct cn10k_mcs_cfg *macsec_cfg;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -458,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -487,7 +578,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
+
+ if (is_dev_cn10kb(pfvf->pdev))
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -743,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@@ -785,6 +881,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
dir, DMA_ATTR_SKIP_CPU_SYNC);
}
+static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+{
+#ifdef CONFIG_DCB
+ if (pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+#endif
+
+ return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -807,7 +913,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
@@ -888,6 +994,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -907,5 +1015,24 @@ void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
int otx2_dcbnl_set_ops(struct net_device *dev);
+/* PFC support */
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 723d2506d309..ccaf97bb1ce0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -7,6 +7,289 @@
#include "otx2_common.h"
+static int otx2_check_pfc_config(struct otx2_nic *pfvf)
+{
+ u8 tx_queues = pfvf->hw.tx_queues, prio;
+ u8 pfc_en = pfvf->pfc_en;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ if ((pfc_en & (1 << prio)) &&
+ prio > tx_queues - 1) {
+ dev_warn(pfvf->dev,
+ "Increase number of tx queues from %d to %d to support PFC.\n",
+ tx_queues, prio + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, lvl, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* Either PFC bit is not set
+ * or tx scheduler is not allocated for the priority
+ */
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* configure the scheduler for the tls*/
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pfvf, lvl, prio, true);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
+ __func__, lvl, prio);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level upto max level as configured
+ * link config level. These rest of the scheduler can be
+ * same as hw.txschq_list.
+ */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ req->schq[lvl] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ if (!rsp->schq[lvl])
+ return -ENOSPC;
+
+ pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
+ }
+
+ /* Set the Tx schedulers for rest of the levels same as
+ * hw.txschq_list as those will be common for all.
+ */
+ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
+
+ pfvf->pfc_alloc_status[prio] = true;
+ return 0;
+}
+
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+{
+ u8 pfc_en = pfvf->pfc_en;
+ u8 pfc_bit_set;
+ int err, prio;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_free_req *free_req;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* free PFC TLx nodes */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ pfvf->pfc_alloc_status[prio] = false;
+ return 0;
+}
+
+static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct net_device *dev = pfvf->netdev;
+ bool if_up = netif_running(dev);
+ struct nix_aq_enq_req *sq_aq;
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_stop_all_queues(pfvf->netdev);
+ else
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ cn10k_sq_aq->qidx = prio;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ cn10k_sq_aq->sq.ena = 1;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
+ cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ sq_aq->qidx = prio;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ sq_aq->sq.ena = 1;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->sq_mask.smq = GENMASK(8, 0);
+ sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_start_all_queues(pfvf->netdev);
+ else
+ netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
+ struct mbox *mbox = &pfvf->mbox;
+ int err, prio;
+
+ mutex_lock(&mbox->lock);
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* tx scheduler was created but user wants to disable now */
+ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
+ mutex_unlock(&mbox->lock);
+ if (if_up)
+ netif_tx_stop_all_queues(pfvf->netdev);
+
+ otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
+ if (if_up)
+ netif_tx_start_all_queues(pfvf->netdev);
+
+ /* delete the schq */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s failed to stop PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+ mutex_lock(&mbox->lock);
+ goto update_sq_smq_map;
+ }
+
+ /* Either PFC bit is not set
+ * or Tx scheduler is already mapped for the priority
+ */
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev,
+ "%s failed to allocate PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+update_sq_smq_map:
+ err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
+ return err;
+ }
+ }
+
+ err = otx2_pfc_txschq_config(pfvf);
+ mutex_unlock(&mbox->lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Delete the existing scheduler */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
struct cgx_pfc_cfg *req;
@@ -128,6 +411,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
/* Save PFC configuration to interface */
pfvf->pfc_en = pfc->pfc_en;
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+ goto process_pfc;
+
+ /* Check if the PFC configuration can be
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+ if (err)
+ return err;
+
+process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
if (err)
return err;
@@ -136,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 3f60a80e34c8..0eb74e8c553d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -76,8 +76,8 @@ static void otx2_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
}
static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
@@ -963,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -1313,8 +1315,8 @@ static void otx2vf_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *vf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
}
static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 9376d0e62914..5803d7f9137c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+ struct mcs_intr_info *event,
+ struct msg_rsp *rsp)
+{
+ cn10k_handle_mcs_event(pf, event);
+
+ return 0;
+}
+
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \
}
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
break;
default:
@@ -1389,18 +1399,40 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
goto err_free_sq_ptrs;
}
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+ }
+#endif
+
err = otx2_config_nix_queues(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
+
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl);
+ err = otx2_txschq_config(pf, lvl, 0, false);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_config(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
+#endif
+
mutex_unlock(&mbox->lock);
return err;
@@ -1455,6 +1487,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+#ifdef CONFIG_DCB
+ if (pf->pfc_en)
+ otx2_pfc_txschq_stop(pf);
+#endif
+
mutex_lock(&mbox->lock);
/* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
@@ -1634,8 +1671,7 @@ int otx2_open(struct net_device *netdev)
cq_poll->dev = (void *)pf;
cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
- netif_napi_add(netdev, &cq_poll->napi,
- otx2_napi_handler, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
napi_enable(&cq_poll->napi);
}
@@ -1853,6 +1889,30 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+#ifdef CONFIG_DCB
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u8 vlan_prio;
+#endif
+
+#ifdef CONFIG_DCB
+ if (!skb->vlan_present)
+ goto pick_tx;
+
+ vlan_prio = skb->vlan_tci >> 13;
+ if ((vlan_prio > pf->hw.tx_queues - 1) ||
+ !pf->pfc_alloc_status[vlan_prio])
+ goto pick_tx;
+
+ return vlan_prio;
+
+pick_tx:
+#endif
+ return netdev_pick_tx(netdev, skb, NULL);
+}
+
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1987,8 +2047,19 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ return -ERANGE;
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ fallthrough;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -2447,6 +2518,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_select_queue = otx2_select_queue,
.ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
@@ -2702,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ err = cn10k_mcs_init(pf);
+ if (err)
+ goto err_del_mcam_entries;
+
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2916,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_config_pause_frm(pf);
}
+ cn10k_mcs_free(pf);
+
#ifdef CONFIG_DCB
/* Disable PFC config */
if (pf->pfc_en) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index fdc2c9315b91..896b2f9bac34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -10,6 +10,33 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
- struct ptp_rsp *rsp;
- int err;
if (!ptp->nic)
- return 0;
+ return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
- return 0;
+ return -ENOMEM;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
- &req->hdr);
- if (IS_ERR(rsp))
- return 0;
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
- return rsp->clk;
+ return otx2_ptp_get_clock(ptp);
}
static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
@@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
+{
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ *tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
- struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(ptp, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
- ptp->last_extts = tstmp;
-
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
@@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
+ ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 tstamp;
+
+ mutex_lock(&pfvf->mbox.lock);
+ tstamp = otx2_ptp_get_clock(ptp);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ ptp->base_ns = tstamp % NSEC_PER_SEC;
+
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
@@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on)
+ if (on) {
+ ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- else
+ } else {
+ ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
+ }
return 0;
default:
break;
@@ -302,6 +349,8 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
}
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
@@ -316,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index 4bbd12ff26e6..aa205a0d158f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -236,8 +236,15 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_51_16 : 36;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index a18e8efd0f1e..5ec11d71bf60 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -19,6 +19,12 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* PTPv2 header Original Timestamp starts at byte offset 34 and
+ * contains 6 byte seconds field and 4 byte nano seconds field.
+ */
+#define PTP_SYNC_SEC_OFFSET 34
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
@@ -686,7 +692,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
@@ -696,6 +703,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = udp_csum;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -952,16 +966,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ u8 *data = skb->data, *msgtype;
+ __be16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (ntohs(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *udp_csum = 1;
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ }
+
+ msgtype = data + *offset;
+
+ /* Check PTP messageId is SYNC or not */
+ return (*msgtype & 0xf) == 0;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
+ struct ptpv2_tstamp *origin_tstamp;
+ int ptp_offset = 0, udp_csum = 0;
+ struct timespec64 ts;
u64 iova;
- if (!skb_shinfo(skb)->gso_size &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (unlikely(!skb_shinfo(skb)->gso_size &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
+ if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+ origin_tstamp = (struct ptpv2_tstamp *)
+ ((u8 *)skb->data + ptp_offset +
+ PTP_SYNC_SEC_OFFSET);
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+ origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+ origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+ }
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, pfvf->ptp->base_ns, udp_csum);
} else {
skb_tx_timestamp(skb);
}
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index d395f4131648..df14cee80153 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -4,6 +4,6 @@ prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
prestera_switchdev.o prestera_acl.o prestera_flow.o \
prestera_flower.o prestera_span.o prestera_counter.o \
- prestera_router.o prestera_router_hw.o
+ prestera_router.o prestera_router_hw.o prestera_matchall.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2f84d0fb4094..35554ee805cd 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -306,17 +306,27 @@ struct prestera_switch {
struct prestera_counter *counter;
u8 lag_member_max;
u8 lag_max;
+ u32 size_tbl_router_nexthop;
};
struct prestera_router {
struct prestera_switch *sw;
struct list_head vr_list;
struct list_head rif_entry_list;
+ struct rhashtable nh_neigh_ht;
+ struct rhashtable nexthop_group_ht;
struct rhashtable fib_ht;
+ struct rhashtable kern_neigh_cache_ht;
struct rhashtable kern_fib_cache_ht;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */
+ unsigned long nhgrp_hw_cache_kick; /* jiffies */
+ struct {
+ struct delayed_work dw;
+ } neighs_update;
};
struct prestera_rxtx_params {
@@ -362,11 +372,15 @@ int prestera_port_cfg_mac_write(struct prestera_port *port,
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
void prestera_queue_work(struct work_struct *work);
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay);
+void prestera_queue_drain(void);
int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 3d4b85f2d541..cba89fda504b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -54,6 +54,10 @@ struct prestera_acl_ruleset {
struct prestera_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
struct prestera_acl *acl;
+ struct {
+ u32 min;
+ u32 max;
+ } prio;
unsigned long rule_count;
refcount_t refcount;
void *keymask;
@@ -162,6 +166,9 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
ruleset->index = uid;
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
if (err)
@@ -178,10 +185,14 @@ err_rhashtable_init:
return ERR_PTR(err);
}
-void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
- void *keymask)
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
{
ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
+ if (!ruleset->keymask)
+ return -ENOMEM;
+
+ return 0;
}
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
@@ -365,6 +376,26 @@ prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
block->ruleset_zero = NULL;
}
+static void
+prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl,
+ struct prestera_acl_ruleset *ruleset)
+{
+ struct prestera_acl_rule *rule;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ list_for_each_entry(rule, &acl->rules, list) {
+ if (ruleset->ingress != rule->ruleset->ingress)
+ continue;
+ if (ruleset->ht_key.chain_index != rule->chain_index)
+ continue;
+
+ ruleset->prio.min = min(ruleset->prio.min, rule->priority);
+ ruleset->prio.max = max(ruleset->prio.max, rule->priority);
+ }
+}
+
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
{
@@ -389,6 +420,13 @@ u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
return ruleset->index;
}
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max)
+{
+ *prio_min = ruleset->prio.min;
+ *prio_max = ruleset->prio.max;
+}
+
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
return ruleset->offload;
@@ -429,6 +467,13 @@ void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
kfree(rule);
}
+static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset,
+ u32 prio)
+{
+ ruleset->prio.min = min(ruleset->prio.min, prio);
+ ruleset->prio.max = max(ruleset->prio.max, prio);
+}
+
int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule)
{
@@ -468,6 +513,7 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
list_add_tail(&rule->list, &sw->acl->rules);
ruleset->rule_count++;
+ prestera_acl_ruleset_prio_update(ruleset, rule->priority);
return 0;
err_acl_block_bind:
@@ -492,6 +538,7 @@ void prestera_acl_rule_del(struct prestera_switch *sw,
list_del(&rule->list);
prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+ prestera_acl_ruleset_prio_refresh(sw->acl, ruleset);
/* unbind block (all ports) */
if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 03fc5b9dc925..a35cc0609a1d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -185,8 +185,8 @@ struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
struct prestera_flow_block *block,
u32 chain_index);
-void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
- void *keymask);
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
@@ -195,6 +195,8 @@ int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max);
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 1da7ff889417..2f52daba58e6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -300,8 +300,8 @@ static void prestera_ethtool_get_drvinfo(struct net_device *dev,
struct prestera_port *port = netdev_priv(dev);
struct prestera_switch *sw = port->sw;
- strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index 2262693bd5cf..9f4267f326b0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -7,8 +7,9 @@
#include "prestera.h"
#include "prestera_acl.h"
#include "prestera_flow.h"
-#include "prestera_span.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
static LIST_HEAD(prestera_block_cb_list);
@@ -17,9 +18,9 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return prestera_span_replace(block, f);
+ return prestera_mall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
return 0;
default:
return -EOPNOTSUPP;
@@ -89,6 +90,9 @@ prestera_flow_block_create(struct prestera_switch *sw,
INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
block->ingress = ingress;
return block;
@@ -263,7 +267,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
block = flow_block_cb_priv(block_cb);
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
err = prestera_flow_block_unbind(block, port);
if (err)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 0c9e13263261..a85a3eb40279 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -22,6 +22,11 @@ struct prestera_flow_block {
struct prestera_acl_ruleset *ruleset_zero;
struct flow_block_cb *block_cb;
struct list_head template_list;
+ struct {
+ u32 prio_min;
+ u32 prio_max;
+ bool bound;
+ } mall;
unsigned int rule_count;
bool ingress;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 19d3b55c578e..91a478b75cbf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -5,6 +5,7 @@
#include "prestera_acl.h"
#include "prestera_flow.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
struct prestera_flower_template {
struct prestera_acl_ruleset *ruleset;
@@ -360,6 +361,49 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
f->common.extack);
}
+static int prestera_flower_prio_check(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ u32 mall_prio_min;
+ u32 mall_prio_max;
+ int err;
+
+ err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+
+ if (f->common.prio <= mall_prio_max && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= mall_prio_min && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
+ return 0;
+}
+
int prestera_flower_replace(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
@@ -368,6 +412,10 @@ int prestera_flower_replace(struct prestera_flow_block *block,
struct prestera_acl_rule *rule;
int err;
+ err = prestera_flower_prio_check(block, f);
+ if (err)
+ return err;
+
ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
@@ -452,7 +500,9 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
}
/* preserve keymask/template to this ruleset */
- prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ if (err)
+ goto err_ruleset_keymask_set;
/* skip error, as it is not possible to reject template operation,
* so, keep the reference to the ruleset for rules to be added
@@ -468,6 +518,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
list_add_rcu(&template->list, &block->template_list);
return 0;
+err_ruleset_keymask_set:
+ prestera_acl_ruleset_put(ruleset);
err_ruleset_get:
kfree(template);
err_malloc:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index 495f151e6fa9..1181115fe6fa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -19,5 +19,7 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f);
void prestera_flower_template_cleanup(struct prestera_flow_block *block);
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max);
#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 962d7e0c0cb5..fc6f7d2746e8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -10,11 +10,14 @@
#include "prestera_hw.h"
#include "prestera_acl.h"
#include "prestera_counter.h"
+#include "prestera_router_hw.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
#define PRESTERA_MIN_MTU 64
+#define PRESTERA_MSG_CHUNK_SIZE 1024
+
enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
@@ -57,6 +60,10 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
@@ -78,9 +85,11 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
- PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101,
- PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102,
PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105,
PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
@@ -101,6 +110,7 @@ enum {
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_LOCKED = 10,
PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
@@ -285,6 +295,7 @@ union prestera_msg_port_param {
u8 duplex;
u8 fec;
u8 fc;
+ u8 br_locked;
union {
struct {
u8 admin;
@@ -538,6 +549,14 @@ struct prestera_msg_ip_addr {
u8 __pad[3];
};
+struct prestera_msg_nh {
+ struct prestera_msg_iface oif;
+ __le32 hw_id;
+ u8 mac[ETH_ALEN];
+ u8 is_active;
+ u8 pad;
+};
+
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
@@ -563,6 +582,34 @@ struct prestera_msg_lpm_req {
u8 __pad[2];
};
+struct prestera_msg_nh_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX];
+ __le32 size;
+ __le32 grp_id;
+};
+
+struct prestera_msg_nh_chunk_req {
+ struct prestera_msg_cmd cmd;
+ __le32 offset;
+};
+
+struct prestera_msg_nh_chunk_resp {
+ struct prestera_msg_ret ret;
+ u8 hw_state[PRESTERA_MSG_CHUNK_SIZE];
+};
+
+struct prestera_msg_nh_grp_req {
+ struct prestera_msg_cmd cmd;
+ __le32 grp_id;
+ __le32 size;
+};
+
+struct prestera_msg_nh_grp_resp {
+ struct prestera_msg_ret ret;
+ __le32 grp_id;
+};
+
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
@@ -725,11 +772,15 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -745,6 +796,9 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -1022,6 +1076,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
+ sw->size_tbl_router_nexthop =
+ __le32_to_cpu(resp.size_tbl_router_nexthop);
return 0;
}
@@ -1431,27 +1487,39 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
return 0;
}
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
+ enum prestera_cmd_type_t cmd_type;
+
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND,
- &req.cmd, sizeof(req));
}
-int prestera_hw_span_unbind(const struct prestera_port *port)
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
+ enum prestera_cmd_type_t cmd_type;
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
- &req.cmd, sizeof(req));
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
}
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
@@ -1639,6 +1707,22 @@ int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .br_locked = br_locked,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -2004,6 +2088,85 @@ int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
sizeof(req));
}
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id)
+{
+ struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count),
+ .grp_id = __cpu_to_le32(grp_id) };
+ int i, err;
+
+ for (i = 0; i < count; i++) {
+ req.nh[i].is_active = nhs[i].connected;
+ memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN);
+ err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif);
+ if (err)
+ return err;
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */)
+{
+ static struct prestera_msg_nh_chunk_resp resp;
+ struct prestera_msg_nh_chunk_req req;
+ u32 buf_offset;
+ int err;
+
+ memset(&hw_state[0], 0, buf_size);
+ buf_offset = 0;
+ while (1) {
+ if (buf_offset >= buf_size)
+ break;
+
+ memset(&req, 0, sizeof(req));
+ req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */
+ err = prestera_cmd_ret(sw,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET,
+ &req.cmd, sizeof(req), &resp.ret,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ memcpy(&hw_state[buf_offset], &resp.hw_state[0],
+ buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ?
+ buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE);
+ buf_offset += PRESTERA_MSG_CHUNK_SIZE;
+ }
+
+ return 0;
+}
+
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id)
+{
+ struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) };
+ struct prestera_msg_nh_grp_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *grp_id = __le32_to_cpu(resp.grp_id);
+ return err;
+}
+
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id)
+{
+ struct prestera_msg_nh_grp_req req = {
+ .grp_id = __cpu_to_le32(grp_id),
+ .size = __cpu_to_le32(nh_count)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 56e043146dd2..0a929279e1ce 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -146,6 +146,7 @@ struct prestera_counter_stats;
struct prestera_iface;
struct prestera_flood_domain;
struct prestera_mdb_entry;
+struct prestera_neigh_info;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -183,6 +184,8 @@ int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -243,8 +246,9 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
/* SPAN API */
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
-int prestera_hw_span_unbind(const struct prestera_port *port);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress);
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress);
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
/* Router API */
@@ -263,6 +267,16 @@ int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
__be32 dst, u32 dst_len);
+/* NH API */
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id);
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */);
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id);
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index ede3e53b9790..24f9d6024745 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -36,6 +36,17 @@ void prestera_queue_work(struct work_struct *work)
queue_work(prestera_owq, work);
}
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay)
+{
+ queue_delayed_work(prestera_wq, work, delay);
+}
+
+void prestera_queue_drain(void)
+{
+ drain_workqueue(prestera_wq);
+ drain_workqueue(prestera_owq);
+}
+
int prestera_port_learning_set(struct prestera_port *port, bool learn)
{
return prestera_hw_port_learning_set(port, learn);
@@ -51,6 +62,11 @@ int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
return prestera_hw_port_mc_flood_set(port, flood);
}
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked)
+{
+ return prestera_hw_port_br_locked_set(port, br_locked);
+}
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
enum prestera_accept_frm_type frm_type;
@@ -368,6 +384,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
if (!sw->np)
return 0;
+ of_node_get(sw->np);
ports = of_find_node_by_name(sw->np, "ports");
for_each_child_of_node(ports, node) {
@@ -417,6 +434,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
}
out:
+ of_node_put(node);
of_node_put(ports);
return err;
}
@@ -797,32 +815,30 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
caching_dw = &port->cached_hw_stats.caching_dw;
- if (port->phy_link) {
- memset(&smac, 0, sizeof(smac));
- smac.valid = true;
- smac.oper = pevt->data.mac.oper;
- if (smac.oper) {
- smac.mode = pevt->data.mac.mode;
- smac.speed = pevt->data.mac.speed;
- smac.duplex = pevt->data.mac.duplex;
- smac.fc = pevt->data.mac.fc;
- smac.fec = pevt->data.mac.fec;
- phylink_mac_change(port->phy_link, true);
- } else {
- phylink_mac_change(port->phy_link, false);
- }
- prestera_port_mac_state_cache_write(port, &smac);
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
}
+ prestera_port_mac_state_cache_write(port, &smac);
if (port->state_mac.oper) {
- if (!port->phy_link)
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, true);
+ else
netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
- } else if (netif_running(port->dev) &&
- netif_carrier_ok(port->dev)) {
- if (!port->phy_link)
+ } else {
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, false);
+ else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
netif_carrier_off(port->dev);
if (delayed_work_pending(caching_dw))
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
new file mode 100644
index 000000000000..6f2b95a5263e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
+
+static int prestera_mall_prio_check(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ u32 flower_prio_min;
+ u32 flower_prio_max;
+ int err;
+
+ err = prestera_flower_prio_get(block, f->common.chain_index,
+ &flower_prio_min, &flower_prio_max);
+ if (err == -ENOENT)
+ /* No flower filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+
+ if (f->common.prio <= flower_prio_max && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= flower_prio_min && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max)
+{
+ if (!block->mall.bound)
+ return -ENOENT;
+
+ *prio_min = block->mall.prio_min;
+ *prio_max = block->mall.prio_max;
+ return 0;
+}
+
+static void prestera_mall_prio_update(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ block->mall.prio_min = min(block->mall.prio_min, f->common.prio);
+ block->mall.prio_max = max(block->mall.prio_max, f->common.prio);
+}
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ err = prestera_mall_prio_check(block, f);
+ if (err)
+ return err;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err)
+ goto rollback;
+ }
+
+ prestera_mall_prio_update(block, f);
+
+ block->mall.bound = true;
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+ return err;
+}
+
+void prestera_mall_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.h b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
new file mode 100644
index 000000000000..fed08be80257
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_MATCHALL_H_
+#define _PRESTERA_MATCHALL_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_mall_destroy(struct prestera_flow_block *block);
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max);
+
+#endif /* _PRESTERA_MATCHALL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index f538a749ebd4..59470d99f522 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id prestera_pci_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 58f4e44d5ad7..4046be0e86ff 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -7,10 +7,35 @@
#include <net/inet_dscp.h>
#include <net/switchdev.h>
#include <linux/rhashtable.h>
+#include <net/nexthop.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
+#include <net/netevent.h>
#include "prestera.h"
#include "prestera_router_hw.h"
+#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */
+
+struct prestera_kern_neigh_cache_key {
+ struct prestera_ip_addr addr;
+ struct net_device *dev;
+};
+
+struct prestera_kern_neigh_cache {
+ struct prestera_kern_neigh_cache_key key;
+ struct rhash_head ht_node;
+ struct list_head kern_fib_cache_list;
+ /* Hold prepared nh_neigh info if is in_kernel */
+ struct prestera_neigh_info nh_neigh_info;
+ /* Indicate if neighbour is reachable by direct route */
+ bool reachable;
+ /* Lock cache if neigh is present in kernel */
+ bool in_kernel;
+};
+
struct prestera_kern_fib_cache_key {
struct prestera_ip_addr addr;
u32 prefix_len;
@@ -23,15 +48,29 @@ struct prestera_kern_fib_cache {
struct {
struct prestera_fib_key fib_key;
enum prestera_fib_type fib_type;
+ struct prestera_nexthop_group_key nh_grp_key;
} lpm_info; /* hold prepared lpm info */
/* Indicate if route is not overlapped by another table */
struct rhash_head ht_node; /* node of prestera_router */
- struct fib_info *fi;
- dscp_t kern_dscp;
- u8 kern_type;
+ struct prestera_kern_neigh_cache_head {
+ struct prestera_kern_fib_cache *this;
+ struct list_head head;
+ struct prestera_kern_neigh_cache *n_cache;
+ } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
+ union {
+ struct fib_notifier_info info; /* point to any of 4/6 */
+ struct fib_entry_notifier_info fen4_info;
+ };
bool reachable;
};
+static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_neigh_cache, key),
+ .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_neigh_cache_key),
+ .automatic_shrinking = true,
+};
+
static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
.key_offset = offsetof(struct prestera_kern_fib_cache, key),
.head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
@@ -51,15 +90,450 @@ static u32 prestera_fix_tb_id(u32 tb_id)
}
static void
-prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
+prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
struct prestera_kern_fib_cache_key *key)
{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
memset(key, 0, sizeof(*key));
+ key->addr.v = PRESTERA_IPV4;
key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
key->prefix_len = fen_info->dst_len;
key->kern_tb_id = fen_info->tb_id;
}
+static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ if (nhc->nhc_gw_family == AF_INET) {
+ nk->addr.v = PRESTERA_IPV4;
+ nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
+ } else {
+ nk->addr.v = PRESTERA_IPV6;
+ nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
+ }
+
+ nk->dev = nhc->nhc_dev;
+ return 0;
+}
+
+static void
+prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
+ struct prestera_nh_neigh_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ nk->addr = ck->addr;
+ nk->rif = (void *)ck->dev;
+}
+
+static bool
+prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ struct prestera_kern_neigh_cache_key tk;
+ int err;
+
+ err = prestera_util_nhc2nc_key(sw, nhc, &tk);
+ if (err)
+ return false;
+
+ if (memcmp(&tk, nk, sizeof(tk)))
+ return false;
+
+ return true;
+}
+
+static int
+prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ if (n->tbl->family == AF_INET) {
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = *(__be32 *)n->primary_key;
+ } else {
+ return -ENOENT;
+ }
+
+ key->dev = n->dev;
+
+ return 0;
+}
+
+static bool __prestera_fi_is_direct(struct fib_info *fi)
+{
+ struct fib_nh *fib_nh;
+
+ if (fib_info_num_path(fi) == 1) {
+ fib_nh = fib_info_nh(fi, 0);
+ if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
+ return true;
+ }
+
+ return false;
+}
+
+static bool prestera_fi_is_direct(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi_is_direct(fi);
+}
+
+static bool prestera_fi_is_nh(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi_is_direct(fi);
+}
+
+static bool __prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (!fi->fib6_nh->nh_common.nhc_gw_family)
+ return true;
+
+ return false;
+}
+
+static bool prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fi6_is_nh(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_direct(fen_info->fi);
+ else
+ return prestera_fi6_is_direct(fen6_info->rt);
+}
+
+static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_nh(fen_info->fi);
+ else
+ return prestera_fi6_is_nh(fen6_info->rt);
+}
+
+/* must be called with rcu_read_lock() */
+static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
+ __be32 *addr)
+{
+ struct flowi4 fl4;
+
+ /* TODO: walkthrough appropriate tables in kernel
+ * to know if the same prefix exists in several tables
+ */
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = *addr;
+ return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
+}
+
+static bool
+__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ struct net_device *dev)
+{
+ struct fib_nh *fib_nh;
+ struct fib_result res;
+ bool reachable;
+
+ reachable = false;
+
+ if (!prestera_util_kern_get_route(&res, tb_id, addr))
+ if (prestera_fi_is_direct(res.fi)) {
+ fib_nh = fib_info_nh(res.fi, 0);
+ if (dev == fib_nh->fib_nh_dev)
+ reachable = true;
+ }
+
+ return reachable;
+}
+
+/* Check if neigh route is reachable */
+static bool
+prestera_util_kern_n_is_reachable(u32 tb_id,
+ struct prestera_ip_addr *addr,
+ struct net_device *dev)
+{
+ if (addr->v == PRESTERA_IPV4)
+ return __prestera_util_kern_n_is_reachable_v4(tb_id,
+ &addr->u.ipv4,
+ dev);
+ else
+ return false;
+}
+
+static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
+ bool offloaded)
+{
+ if (offloaded)
+ n->flags |= NTF_OFFLOADED;
+ else
+ n->flags &= ~NTF_OFFLOADED;
+}
+
+static void
+prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
+{
+ if (offloaded)
+ nhc->nhc_flags |= RTNH_F_OFFLOAD;
+ else
+ nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
+
+ if (trap)
+ nhc->nhc_flags |= RTNH_F_TRAP;
+ else
+ nhc->nhc_flags &= ~RTNH_F_TRAP;
+}
+
+static struct fib_nh_common *
+prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+ struct fib6_info *iter;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return &fib_info_nh(fen4_info->fi, n)->nh_common;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ if (!n)
+ return &fen6_info->rt->fib6_nh->nh_common;
+
+ list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
+ fib6_siblings) {
+ if (!--n)
+ return &iter->fib6_nh->nh_common;
+ }
+ }
+
+ /* if family is incorrect - than upper functions has BUG */
+ /* if doesn't find requested index - there is alsi bug, because
+ * valid index must be produced by nhs, which checks list length
+ */
+ WARN(1, "Invalid parameters passed to %s n=%d i=%p",
+ __func__, n, info);
+ return NULL;
+}
+
+static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fib_info_num_path(fen4_info->fi);
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ return fen6_info->rt->fib6_nsiblings + 1;
+ }
+
+ return 0;
+}
+
+static unsigned char
+prestera_kern_fib_info_type(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fen4_info->fi->fib_type;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ /* TODO: ECMP in ipv6 is several routes.
+ * Every route has single nh.
+ */
+ return fen6_info->rt->fib6_type;
+ }
+
+ return RTN_UNSPEC;
+}
+
+/* Decided, that uc_nh route with key==nh is obviously neighbour route */
+static bool
+prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
+{
+ if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
+ return false;
+
+ if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
+ return false;
+
+ if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
+ return false;
+
+ if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
+ &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
+ return false;
+
+ return true;
+}
+
+static int prestera_dev_if_type(const struct net_device *dev)
+{
+ struct macvlan_dev *vlan;
+
+ if (is_vlan_dev(dev) &&
+ netif_is_bridge_master(vlan_dev_real_dev(dev))) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_bridge_master(dev)) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_lag_master(dev)) {
+ return PRESTERA_IF_LAG_E;
+ } else if (netif_is_macvlan(dev)) {
+ vlan = netdev_priv(dev);
+ return prestera_dev_if_type(vlan->lowerdev);
+ } else {
+ return PRESTERA_IF_PORT_E;
+ }
+}
+
+static int
+prestera_neigh_iface_init(struct prestera_switch *sw,
+ struct prestera_iface *iface,
+ struct neighbour *n)
+{
+ struct prestera_port *port;
+
+ iface->vlan_id = 0; /* TODO: vlan egress */
+ iface->type = prestera_dev_if_type(n->dev);
+ if (iface->type != PRESTERA_IF_PORT_E)
+ return -EINVAL;
+
+ if (!prestera_netdev_check(n->dev))
+ return -EINVAL;
+
+ port = netdev_priv(n->dev);
+ iface->dev_port.hw_dev_num = port->dev_id;
+ iface->dev_port.port_num = port->hw_id;
+
+ return 0;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache =
+ rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
+ __prestera_kern_neigh_cache_ht_params);
+ return IS_ERR(n_cache) ? NULL : n_cache;
+}
+
+static void
+__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ dev_put(n_cache->key.dev);
+}
+
+static void
+__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static struct prestera_kern_neigh_cache *
+__prestera_kern_neigh_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
+ if (!n_cache)
+ goto err_kzalloc;
+
+ memcpy(&n_cache->key, key, sizeof(*key));
+ dev_hold(n_cache->key.dev);
+
+ INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
+ err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return n_cache;
+
+err_ht_insert:
+ dev_put(n_cache->key.dev);
+ kfree(n_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_get(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, key);
+ if (!n_cache)
+ n_cache = __prestera_kern_neigh_cache_create(sw, key);
+
+ return n_cache;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_put(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ if (!n_cache->in_kernel &&
+ list_empty(&n_cache->kern_fib_cache_list)) {
+ __prestera_kern_neigh_cache_destroy(sw, n_cache);
+ return NULL;
+ }
+
+ return n_cache;
+}
+
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_find(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key)
@@ -73,24 +547,79 @@ prestera_kern_fib_cache_find(struct prestera_switch *sw,
}
static void
+__prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int i;
+
+ for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
+ n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
+ if (n_cache) {
+ list_del(&fib_cache->kern_neigh_cache_head[i].head);
+ prestera_kern_neigh_cache_put(sw, n_cache);
+ }
+ }
+
+ fib_info_put(fib_cache->fen4_info.fi);
+}
+
+static void
prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
struct prestera_kern_fib_cache *fib_cache)
{
- fib_info_put(fib_cache->fi);
rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
&fib_cache->ht_node,
__prestera_kern_fib_cache_ht_params);
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
kfree(fib_cache);
}
+static int
+__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_neigh_cache_key nc_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ struct fib_nh_common *nhc;
+ int i, nhs, err;
+
+ if (!prestera_fib_info_is_nh(&fc->info))
+ return 0;
+
+ nhs = prestera_kern_fib_info_nhs(&fc->info);
+ if (nhs > PRESTERA_NHGR_SIZE_MAX)
+ return 0;
+
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
+ err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
+ if (err)
+ return 0;
+
+ n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
+ if (!n_cache)
+ return 0;
+
+ fc->kern_neigh_cache_head[i].this = fc;
+ fc->kern_neigh_cache_head[i].n_cache = n_cache;
+ list_add(&fc->kern_neigh_cache_head[i].head,
+ &n_cache->kern_fib_cache_list);
+ }
+
+ return 0;
+}
+
/* Operations on fi (offload, etc) must be wrapped in utils.
* This function just create storage.
*/
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_create(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key,
- struct fib_info *fi, dscp_t dscp, u8 type)
+ struct fib_notifier_info *info)
{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
struct prestera_kern_fib_cache *fib_cache;
int err;
@@ -99,10 +628,8 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
goto err_kzalloc;
memcpy(&fib_cache->key, key, sizeof(*key));
- fib_info_hold(fi);
- fib_cache->fi = fi;
- fib_cache->kern_dscp = dscp;
- fib_cache->kern_type = type;
+ fib_info_hold(fen_info->fi);
+ memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
&fib_cache->ht_node,
@@ -110,48 +637,270 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
if (err)
goto err_ht_insert;
+ /* Handle nexthops */
+ err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
+ if (err)
+ goto out; /* Not critical */
+
+out:
return fib_cache;
err_ht_insert:
- fib_info_put(fi);
+ fib_info_put(fen_info->fi);
kfree(fib_cache);
err_kzalloc:
return NULL;
}
static void
+__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fibc,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded, bool trap)
+{
+ struct fib_nh_common *nhc;
+ int i, nhs;
+
+ nhs = prestera_kern_fib_info_nhs(&fibc->info);
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
+ if (!nc) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ continue;
+ }
+
+ if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ break;
+ }
+ }
+}
+
+static void
+__prestera_k_arb_n_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded)
+{
+ struct neighbour *n;
+
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ return;
+
+ prestera_util_kern_set_neigh_offload(n, offloaded);
+ neigh_release(n);
+}
+
+static void
__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
struct prestera_kern_fib_cache *fc,
bool fail, bool offload, bool trap)
{
struct fib_rt_info fri;
- if (fc->key.addr.v != PRESTERA_IPV4)
+ switch (fc->key.addr.v) {
+ case PRESTERA_IPV4:
+ fri.fi = fc->fen4_info.fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.dscp = fc->fen4_info.dscp;
+ fri.type = fc->fen4_info.type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+ return;
+ case PRESTERA_IPV6:
+ /* TODO */
return;
+ }
+}
+
+static void
+__prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache,
+ bool enabled)
+{
+ struct prestera_nexthop_group_key nh_grp_key;
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ struct prestera_fib_node *fib_node;
+ struct prestera_fib_key fib_key;
+
+ /* Exception for fc with prefix 32: LPM entry is already used by fib */
+ memset(&fc_key, 0, sizeof(fc_key));
+ fc_key.addr = n_cache->key.addr;
+ fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ /* But better to use tb_id of route, which pointed to this neighbour. */
+ /* We take it from rif, because rif inconsistent.
+ * Must be separated in_rif and out_rif.
+ * Also note: for each fib pointed to this neigh should be separated
+ * neigh lpm entry (for each ingress vr)
+ */
+ fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ memset(&fib_key, 0, sizeof(fib_key));
+ fib_key.addr = n_cache->key.addr;
+ fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
+ fib_node = prestera_fib_node_find(sw, &fib_key);
+ if (!fib_cache || !fib_cache->reachable) {
+ if (!enabled && fib_node) {
+ if (prestera_fib_node_util_is_neighbour(fib_node))
+ prestera_fib_node_destroy(sw, fib_node);
+ return;
+ }
+ }
+
+ if (enabled && !fib_node) {
+ memset(&nh_grp_key, 0, sizeof(nh_grp_key));
+ prestera_util_nc_key2nh_key(&n_cache->key,
+ &nh_grp_key.neigh[0]);
+ fib_node = prestera_fib_node_create(sw, &fib_key,
+ PRESTERA_FIB_TYPE_UC_NH,
+ &nh_grp_key);
+ if (!fib_node)
+ pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
+ &fib_key.addr.u.ipv4);
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
+ &nc->key.addr, nc->key.dev))
+ nc->reachable = true;
+ else
+ nc->reachable = false;
+}
+
+/* Kernel neighbour -> neigh_cache info */
+static void
+__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct neighbour *n;
+ int err;
+
+ memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
+ if (!n)
+ goto out;
+
+ read_lock_bh(&n->lock);
+ if (n->nud_state & NUD_VALID && !n->dead) {
+ err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
+ n);
+ if (err)
+ goto n_read_out;
- fri.fi = fc->fi;
- fri.tb_id = fc->key.kern_tb_id;
- fri.dst = fc->key.addr.u.ipv4;
- fri.dst_len = fc->key.prefix_len;
- fri.dscp = fc->kern_dscp;
- fri.type = fc->kern_type;
- /* flags begin */
- fri.offload = offload;
- fri.trap = trap;
- fri.offload_failed = fail;
- /* flags end */
- fib_alias_hw_flags_set(&init_net, &fri);
+ memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
+ nc->nh_neigh_info.connected = true;
+ }
+n_read_out:
+ read_unlock_bh(&n->lock);
+out:
+ nc->in_kernel = nc->nh_neigh_info.connected;
+ if (n)
+ neigh_release(n);
+}
+
+/* neigh_cache info -> lpm update */
+static void
+__prestera_k_arb_nc_apply(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_kern_neigh_cache_head *nhead;
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ int err;
+
+ __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
+ __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh)
+ goto out;
+
+ /* Do hw update only if something changed to prevent nh flap */
+ if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
+ sizeof(nh_neigh->info))) {
+ memcpy(&nh_neigh->info, &nc->nh_neigh_info,
+ sizeof(nh_neigh->info));
+ err = prestera_nh_neigh_set(sw, nh_neigh);
+ if (err) {
+ pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
+ "prestera_nh_neigh_set", err,
+ &nh_neigh->key.addr.u.ipv4,
+ &nh_neigh->info.ha[0]);
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
+ __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
+ nc->in_kernel,
+ !nc->in_kernel);
+ }
}
static int
__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
struct prestera_kern_fib_cache *fc)
{
+ struct fib_nh_common *nhc;
+ int nh_cnt;
+
memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
- switch (fc->fi->fib_type) {
+ switch (prestera_kern_fib_info_type(&fc->info)) {
case RTN_UNICAST:
- fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ if (prestera_fib_info_is_direct(&fc->info) &&
+ fc->key.prefix_len ==
+ PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
+ /* This is special case.
+ * When prefix is 32. Than we will have conflict in lpm
+ * for direct route - once TRAP added, there is no
+ * place for neighbour entry. So represent direct route
+ * with prefix 32, as NH. So neighbour will be resolved
+ * as nexthop of this route.
+ */
+ nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
+ fc->lpm_info.nh_grp_key.neigh[0].addr =
+ fc->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[0].rif =
+ nhc->nhc_dev;
+
+ break;
+ }
+
+ /* We can also get nh_grp_key from fi. This will be correct to
+ * because cache not always represent, what actually written to
+ * lpm. But we use nh cache, as well for now (for this case).
+ */
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
+ break;
+
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
+ }
+
+ fc->lpm_info.fib_type = nh_cnt ?
+ PRESTERA_FIB_TYPE_UC_NH :
+ PRESTERA_FIB_TYPE_TRAP;
break;
/* Unsupported. Leave it for kernel: */
case RTN_BROADCAST:
@@ -191,7 +940,8 @@ static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
return 0;
fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
- fc->lpm_info.fib_type);
+ fc->lpm_info.fib_type,
+ &fc->lpm_info.nh_grp_key);
if (!fib_node) {
dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
@@ -220,6 +970,10 @@ static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
}
switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ fc->reachable, false);
+ break;
case PRESTERA_FIB_TYPE_TRAP:
__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
false, fc->reachable);
@@ -271,17 +1025,140 @@ __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
return rfc;
}
+static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ struct neighbour *n;
+ bool hw_active;
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh) {
+ pr_err("Cannot find nh_neigh for cached %pI4n",
+ &nc->key.addr.u.ipv4);
+ return;
+ }
+
+ hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh);
+
+#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+ if (!hw_active && nc->in_kernel)
+ goto out;
+#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+ if (!hw_active)
+ goto out;
+#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+
+ if (nc->key.addr.v == PRESTERA_IPV4) {
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ } else {
+ n = NULL;
+ }
+
+ if (!IS_ERR(n) && n) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ } else {
+ pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4);
+ }
+
+out:
+ return;
+}
+
+/* Propagate hw state to kernel */
+static void prestera_k_arb_hw_evt(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_hw_state_upd(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+/* Propagate kernel event to hw */
+static void prestera_k_arb_n_evt(struct prestera_switch *sw,
+ struct neighbour *n)
+{
+ struct prestera_kern_neigh_cache_key n_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ err = prestera_util_neigh2nc_key(sw, n, &n_key);
+ if (err)
+ return;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
+ if (!n_cache) {
+ n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
+ if (!n_cache)
+ return;
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ }
+
+ __prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+
+ prestera_kern_neigh_cache_put(sw, n_cache);
+}
+
+static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
static int
prestera_k_arb_fib_evt(struct prestera_switch *sw,
bool replace, /* replace or del */
- struct fib_entry_notifier_info *fen_info)
+ struct fib_notifier_info *info)
{
struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
struct prestera_kern_fib_cache_key fc_key;
struct prestera_kern_fib_cache *fib_cache;
int err;
- prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
+ prestera_util_fen_info2fib_cache_key(info, &fc_key);
fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
if (fib_cache) {
fib_cache->reachable = false;
@@ -304,10 +1181,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
}
if (replace) {
- fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
- fen_info->fi,
- fen_info->dscp,
- fen_info->type);
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
if (!fib_cache) {
dev_err(sw->dev->dev, "fib_cache == NULL");
return -ENOENT;
@@ -331,9 +1205,65 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
dev_err(sw->dev->dev, "Applying fib_cache failed");
}
+ /* Update all neighs to resolve overlapped and apply related */
+ __prestera_k_arb_fib_evt2nc(sw);
+
return 0;
}
+static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_neigh_cache *n_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ if (!list_empty(&n_cache->kern_fib_cache_list)) {
+ WARN_ON(1); /* BUG */
+ return;
+ }
+ __prestera_k_arb_n_offload_set(sw, n_cache, false);
+ n_cache->in_kernel = false;
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_fib_cache *fib_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
+ false, false,
+ false);
+ __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
+ false, false);
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static void prestera_k_arb_abort(struct prestera_switch *sw)
+{
+ /* Function to remove all arbiter entries and related hw objects. */
+ /* Sequence:
+ * 1) Clear arbiter tables, but don't touch hw
+ * 2) Clear hw
+ * We use such approach, because arbiter object is not directly mapped
+ * to hw. So deletion of one arbiter object may even lead to creation of
+ * hw object (e.g. in case of overlapped routes).
+ */
+ rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
+ __prestera_k_arb_abort_fib_ht_cb,
+ sw);
+ rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
+ __prestera_k_arb_abort_neigh_ht_cb,
+ sw);
+}
+
static int __prestera_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -469,13 +1399,15 @@ static void __prestera_router_fib_event_work(struct work_struct *work)
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
+ err = prestera_k_arb_fib_evt(sw, true,
+ &fib_work->fen_info.info);
if (err)
goto err_out;
break;
case FIB_EVENT_ENTRY_DEL:
- err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
+ err = prestera_k_arb_fib_evt(sw, false,
+ &fib_work->fen_info.info);
if (err)
goto err_out;
@@ -534,10 +1466,89 @@ static int __prestera_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct prestera_netevent_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct neighbour *n;
+};
+
+static void prestera_router_neigh_event_work(struct work_struct *work)
+{
+ struct prestera_netevent_work *net_work =
+ container_of(work, struct prestera_netevent_work, work);
+ struct prestera_switch *sw = net_work->sw;
+ struct neighbour *n = net_work->n;
+
+ /* neigh - its not hw related object. It stored only in kernel. So... */
+ rtnl_lock();
+
+ prestera_k_arb_n_evt(sw, n);
+
+ neigh_release(n);
+ rtnl_unlock();
+ kfree(net_work);
+}
+
+static int prestera_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_netevent_work *net_work;
+ struct prestera_router *router;
+ struct neighbour *n = ptr;
+
+ router = container_of(nb, struct prestera_router, netevent_nb);
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl->family != AF_INET)
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (WARN_ON(!net_work))
+ return NOTIFY_BAD;
+
+ neigh_clone(n);
+ net_work->n = n;
+ net_work->sw = router->sw;
+ INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
+ prestera_queue_work(&net_work->work);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void prestera_router_update_neighs_work(struct work_struct *work)
+{
+ struct prestera_router *router;
+
+ router = container_of(work, struct prestera_router,
+ neighs_update.dw.work);
+ rtnl_lock();
+
+ prestera_k_arb_hw_evt(router->sw);
+
+ rtnl_unlock();
+ prestera_queue_delayed_work(&router->neighs_update.dw,
+ msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL));
+}
+
+static int prestera_neigh_work_init(struct prestera_switch *sw)
+{
+ INIT_DELAYED_WORK(&sw->router->neighs_update.dw,
+ prestera_router_update_neighs_work);
+ prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0);
+ return 0;
+}
+
+static void prestera_neigh_work_fini(struct prestera_switch *sw)
+{
+ cancel_delayed_work_sync(&sw->router->neighs_update.dw);
+}
+
int prestera_router_init(struct prestera_switch *sw)
{
struct prestera_router *router;
- int err;
+ int err, nhgrp_cache_bytes;
router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
if (!router)
@@ -555,6 +1566,22 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_kern_fib_cache_ht_init;
+ err = rhashtable_init(&router->kern_neigh_cache_ht,
+ &__prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_kern_neigh_cache_ht_init;
+
+ nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
+ router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
+ if (!router->nhgrp_hw_state_cache) {
+ err = -ENOMEM;
+ goto err_nh_state_cache_alloc;
+ }
+
+ err = prestera_neigh_work_init(sw);
+ if (err)
+ goto err_neigh_work_init;
+
router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
if (err)
@@ -565,6 +1592,11 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_register_inetaddr_notifier;
+ router->netevent_nb.notifier_call = prestera_router_netevent_event;
+ err = register_netevent_notifier(&router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
router->fib_nb.notifier_call = __prestera_router_fib_event;
err = register_fib_notifier(&init_net, &router->fib_nb,
/* TODO: flush fib entries */ NULL, NULL);
@@ -574,10 +1606,18 @@ int prestera_router_init(struct prestera_switch *sw)
return 0;
err_register_fib_notifier:
+ unregister_netevent_notifier(&router->netevent_nb);
+err_register_netevent_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
+ prestera_neigh_work_fini(sw);
+err_neigh_work_init:
+ kfree(router->nhgrp_hw_state_cache);
+err_nh_state_cache_alloc:
+ rhashtable_destroy(&router->kern_neigh_cache_ht);
+err_kern_neigh_cache_ht_init:
rhashtable_destroy(&router->kern_fib_cache_ht);
err_kern_fib_cache_ht_init:
prestera_router_hw_fini(sw);
@@ -589,8 +1629,15 @@ err_router_lib_init:
void prestera_router_fini(struct prestera_switch *sw)
{
unregister_fib_notifier(&init_net, &sw->router->fib_nb);
+ unregister_netevent_notifier(&sw->router->netevent_nb);
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ prestera_neigh_work_fini(sw);
+ prestera_queue_drain();
+
+ prestera_k_arb_abort(sw);
+
+ kfree(sw->router->nhgrp_hw_state_cache);
rhashtable_destroy(&sw->router->kern_fib_cache_ht);
prestera_router_hw_fini(sw);
kfree(sw->router);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index 5b0cf3be9a9e..4f65df0ae5e8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -8,10 +8,16 @@
#include "prestera_router_hw.h"
#include "prestera_acl.h"
-/* +--+
- * +------->|vr|<-+
- * | +--+ |
- * | |
+/* Nexthop is pointed
+ * to port (not rif)
+ * +-------+
+ * +>|nexthop|
+ * | +-------+
+ * |
+ * +--+ +-----++
+ * +------->|vr|<-+ +>|nh_grp|
+ * | +--+ | | +------+
+ * | | |
* +-+-------+ +--+---+-+
* |rif_entry| |fib_node|
* +---------+ +--------+
@@ -23,6 +29,8 @@
#define PRESTERA_NHGR_UNUSED (0)
#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+/* Need to merge it with router_manager */
+#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */
static const struct rhashtable_params __prestera_fib_ht_params = {
.key_offset = offsetof(struct prestera_fib_node, key),
@@ -31,10 +39,45 @@ static const struct rhashtable_params __prestera_fib_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params __prestera_nh_neigh_ht_params = {
+ .key_offset = offsetof(struct prestera_nh_neigh, key),
+ .key_len = sizeof(struct prestera_nh_neigh_key),
+ .head_offset = offsetof(struct prestera_nh_neigh, ht_node),
+};
+
+static const struct rhashtable_params __prestera_nexthop_group_ht_params = {
+ .key_offset = offsetof(struct prestera_nexthop_group, key),
+ .key_len = sizeof(struct prestera_nexthop_group_key),
+ .head_offset = offsetof(struct prestera_nexthop_group, ht_node),
+};
+
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg);
+
+/* TODO: move to router.h as macros */
+static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key)
+{
+ return memchr_inv(key, 0, sizeof(*key)) ? true : false;
+}
+
int prestera_router_hw_init(struct prestera_switch *sw)
{
int err;
+ err = rhashtable_init(&sw->router->nh_neigh_ht,
+ &__prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_nh_neigh_ht_init;
+
+ err = rhashtable_init(&sw->router->nexthop_group_ht,
+ &__prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_grp_ht_init;
+
err = rhashtable_init(&sw->router->fib_ht,
&__prestera_fib_ht_params);
if (err)
@@ -43,15 +86,25 @@ int prestera_router_hw_init(struct prestera_switch *sw)
INIT_LIST_HEAD(&sw->router->vr_list);
INIT_LIST_HEAD(&sw->router->rif_entry_list);
+ return 0;
+
err_fib_ht_init:
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+err_nexthop_grp_ht_init:
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+err_nh_neigh_ht_init:
return 0;
}
void prestera_router_hw_fini(struct prestera_switch *sw)
{
+ rhashtable_free_and_destroy(&sw->router->fib_ht,
+ prestera_fib_node_destroy_ht_cb, sw);
WARN_ON(!list_empty(&sw->router->vr_list));
WARN_ON(!list_empty(&sw->router->rif_entry_list));
rhashtable_destroy(&sw->router->fib_ht);
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
}
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
@@ -232,6 +285,286 @@ err_kzalloc:
return NULL;
}
+static void __prestera_nh_neigh_destroy(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ rhashtable_remove_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ kfree(neigh);
+}
+
+static struct prestera_nh_neigh *
+__prestera_nh_neigh_create(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+ int err;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_KERNEL);
+ if (!neigh)
+ goto err_kzalloc;
+
+ memcpy(&neigh->key, key, sizeof(*key));
+ neigh->info.connected = false;
+ INIT_LIST_HEAD(&neigh->nexthop_group_list);
+ err = rhashtable_insert_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return neigh;
+
+err_rhashtable_insert:
+ kfree(neigh);
+err_kzalloc:
+ return NULL;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *nh_neigh;
+
+ nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,
+ key, __prestera_nh_neigh_ht_params);
+ return IS_ERR(nh_neigh) ? NULL : nh_neigh;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+
+ neigh = prestera_nh_neigh_find(sw, key);
+ if (!neigh)
+ return __prestera_nh_neigh_create(sw, key);
+
+ return neigh;
+}
+
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ if (list_empty(&neigh->nexthop_group_list))
+ __prestera_nh_neigh_destroy(sw, neigh);
+}
+
+/* Updates new prestera_neigh_info */
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ struct prestera_nh_neigh_head *nh_head;
+ struct prestera_nexthop_group *nh_grp;
+ int err;
+
+ list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) {
+ nh_grp = nh_head->this;
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh)
+{
+ bool state;
+ struct prestera_nh_neigh_head *nh_head, *tmp;
+
+ state = false;
+ list_for_each_entry_safe(nh_head, tmp,
+ &nh_neigh->nexthop_group_list, head) {
+ state = prestera_nexthop_group_util_hw_state(sw, nh_head->this);
+ if (state)
+ goto out;
+ }
+
+out:
+ return state;
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_create(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt, err, gid;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ goto err_kzalloc;
+
+ memcpy(&nh_grp->key, key, sizeof(*key));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt]))
+ break;
+
+ nh_neigh = prestera_nh_neigh_get(sw,
+ &nh_grp->key.neigh[nh_cnt]);
+ if (!nh_neigh)
+ goto err_nh_neigh_get;
+
+ nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh;
+ nh_grp->nh_neigh_head[nh_cnt].this = nh_grp;
+ list_add(&nh_grp->nh_neigh_head[nh_cnt].head,
+ &nh_neigh->nexthop_group_list);
+ }
+
+ err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id);
+ if (err)
+ goto err_nh_group_create;
+
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ goto err_nexthop_group_set;
+
+ err = rhashtable_insert_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* reset cache for created group */
+ gid = nh_grp->grp_id;
+ sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8);
+
+ return nh_grp;
+
+err_ht_insert:
+err_nexthop_group_set:
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+err_nh_group_create:
+err_nh_neigh_get:
+ for (nh_cnt--; nh_cnt >= 0; nh_cnt--) {
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh);
+ }
+
+ kfree(nh_grp);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_nexthop_group_destroy(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt;
+
+ rhashtable_remove_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!nh_neigh)
+ break;
+
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_neigh);
+ }
+
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+ kfree(nh_grp);
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_find(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,
+ key, __prestera_nexthop_group_ht_params);
+ return IS_ERR(nh_grp) ? NULL : nh_grp;
+}
+
+static struct prestera_nexthop_group *
+prestera_nexthop_group_get(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = __prestera_nexthop_group_find(sw, key);
+ if (nh_grp) {
+ refcount_inc(&nh_grp->refcount);
+ } else {
+ nh_grp = __prestera_nexthop_group_create(sw, key);
+ if (IS_ERR(nh_grp))
+ return ERR_CAST(nh_grp);
+
+ refcount_set(&nh_grp->refcount, 1);
+ }
+
+ return nh_grp;
+}
+
+static void prestera_nexthop_group_put(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ if (refcount_dec_and_test(&nh_grp->refcount))
+ __prestera_nexthop_group_destroy(sw, nh_grp);
+}
+
+/* Updates with new nh_neigh's info */
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX];
+ struct prestera_nh_neigh *neigh;
+ int nh_cnt;
+
+ memset(&info[0], 0, sizeof(info));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!neigh)
+ break;
+
+ memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info));
+ }
+
+ return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id);
+}
+
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ int err;
+ u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1;
+ u32 gid = nh_grp->grp_id;
+ u8 *cache = sw->router->nhgrp_hw_state_cache;
+
+ /* Antijitter
+ * Prevent situation, when we read state of nh_grp twice in short time,
+ * and state bit is still cleared on second call. So just stuck active
+ * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred.
+ */
+ if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick +
+ msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) {
+ err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size);
+ if (err) {
+ pr_err("Failed to get hw state nh_grp's");
+ return false;
+ }
+
+ sw->router->nhgrp_hw_cache_kick = jiffies;
+ }
+
+ if (cache[gid / 8] & BIT(gid % 8))
+ return true;
+
+ return false;
+}
+
struct prestera_fib_node *
prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
{
@@ -251,6 +584,9 @@ static void __prestera_fib_node_destruct(struct prestera_switch *sw,
prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
fib_node->key.prefix_len);
switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+ break;
case PRESTERA_FIB_TYPE_TRAP:
break;
case PRESTERA_FIB_TYPE_DROP:
@@ -272,10 +608,20 @@ void prestera_fib_node_destroy(struct prestera_switch *sw,
kfree(fib_node);
}
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_fib_node *node = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_fib_node_destruct(sw, node);
+ kfree(node);
+}
+
struct prestera_fib_node *
prestera_fib_node_create(struct prestera_switch *sw,
struct prestera_fib_key *key,
- enum prestera_fib_type fib_type)
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key)
{
struct prestera_fib_node *fib_node;
u32 grp_id;
@@ -302,6 +648,14 @@ prestera_fib_node_create(struct prestera_switch *sw,
case PRESTERA_FIB_TYPE_DROP:
grp_id = PRESTERA_NHGR_DROP;
break;
+ case PRESTERA_FIB_TYPE_UC_NH:
+ fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
+ nh_grp_key);
+ if (!fib_node->info.nh_grp)
+ goto err_nh_grp_get;
+
+ grp_id = fib_node->info.nh_grp->grp_id;
+ break;
default:
pr_err("Unsupported fib_type %d", fib_type);
goto err_nh_grp_get;
@@ -323,6 +677,8 @@ err_ht_insert:
prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
key->prefix_len);
err_lpm_add:
+ if (fib_type == PRESTERA_FIB_TYPE_UC_NH)
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
err_nh_grp_get:
prestera_vr_put(sw, vr);
err_vr_get:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
index 67dbb49c8bd4..9ca97919c863 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -31,6 +31,63 @@ struct prestera_ip_addr {
PRESTERA_IPV4 = 0,
PRESTERA_IPV6
} v;
+#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \
+ /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */)
+};
+
+struct prestera_nh_neigh_key {
+ struct prestera_ip_addr addr;
+ /* Seems like rif is obsolete, because there is iface in info ?
+ * Key can contain functional fields, or fields, which is used to
+ * filter duplicate objects on logical level (before you pass it to
+ * HW)... also key can be used to cover hardware restrictions.
+ * In our case rif - is logical interface (even can be VLAN), which
+ * is used in combination with IP address (which is also not related to
+ * hardware nexthop) to provide logical compression of created nexthops.
+ * You even can imagine, that rif+IPaddr is just cookie.
+ */
+ /* struct prestera_rif *rif; */
+ /* Use just as cookie, to divide ARP domains (in order with addr) */
+ void *rif;
+};
+
+/* Used for hw call */
+struct prestera_neigh_info {
+ struct prestera_iface iface;
+ unsigned char ha[ETH_ALEN];
+ u8 connected; /* bool. indicate, if mac/oif valid */
+ u8 __pad[1];
+};
+
+/* Used to notify nh about neigh change */
+struct prestera_nh_neigh {
+ struct prestera_nh_neigh_key key;
+ struct prestera_neigh_info info;
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct list_head nexthop_group_list;
+};
+
+#define PRESTERA_NHGR_SIZE_MAX 4
+
+struct prestera_nexthop_group {
+ struct prestera_nexthop_group_key {
+ struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX];
+ } key;
+ /* Store intermediate object here.
+ * This prevent overhead kzalloc call.
+ */
+ /* nh_neigh is used only to notify nexthop_group */
+ struct prestera_nh_neigh_head {
+ struct prestera_nexthop_group *this;
+ struct list_head head;
+ /* ptr to neigh is not necessary.
+ * It used to prevent lookup of nh_neigh by key (n) on destroy
+ */
+ struct prestera_nh_neigh *neigh;
+ } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX];
+ struct rhash_head ht_node; /* node of prestera_vr */
+ refcount_t refcount;
+ u32 grp_id; /* hw */
};
struct prestera_fib_key {
@@ -44,12 +101,16 @@ struct prestera_fib_info {
struct list_head vr_node;
enum prestera_fib_type {
PRESTERA_FIB_TYPE_INVALID = 0,
+ /* must be pointer to nh_grp id */
+ PRESTERA_FIB_TYPE_UC_NH,
/* It can be connected route
* and will be overlapped with neighbours
*/
PRESTERA_FIB_TYPE_TRAP,
PRESTERA_FIB_TYPE_DROP
} type;
+ /* Valid only if type = UC_NH*/
+ struct prestera_nexthop_group *nh_grp;
};
struct prestera_fib_node {
@@ -67,6 +128,18 @@ struct prestera_rif_entry *
prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh);
struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
struct prestera_fib_key *key);
void prestera_fib_node_destroy(struct prestera_switch *sw,
@@ -74,7 +147,8 @@ void prestera_fib_node_destroy(struct prestera_switch *sw,
struct prestera_fib_node *
prestera_fib_node_create(struct prestera_switch *sw,
struct prestera_fib_key *key,
- enum prestera_fib_type fib_type);
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key);
int prestera_router_hw_init(struct prestera_switch *sw);
void prestera_router_hw_fini(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index dc3e3ddc60bf..42ee963e9f75 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -659,7 +659,7 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
init_dummy_netdev(&sdma->napi_dev);
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index 845e9d8c8cc7..f0e9d6ea88c5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -120,8 +120,9 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
return 0;
}
-static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
- struct prestera_port *to_port)
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress)
{
struct prestera_switch *sw = binding->port->sw;
u8 span_id;
@@ -135,7 +136,7 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
if (err)
return err;
- err = prestera_hw_span_bind(binding->port, span_id);
+ err = prestera_hw_span_bind(binding->port, span_id, ingress);
if (err) {
prestera_span_put(sw, span_id);
return err;
@@ -145,11 +146,12 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
return 0;
}
-static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress)
{
int err;
- err = prestera_hw_span_unbind(binding->port);
+ err = prestera_hw_span_unbind(binding->port, ingress);
if (err)
return err;
@@ -161,60 +163,6 @@ static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
return 0;
}
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f)
-{
- struct prestera_flow_block_binding *binding;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- struct prestera_port *port;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only singular actions are supported");
- return -EOPNOTSUPP;
- }
-
- act = &f->rule->action.entries[0];
-
- if (!prestera_netdev_check(act->dev)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only Marvell Prestera port is supported");
- return -EINVAL;
- }
- if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
- return -EOPNOTSUPP;
- if (act->id != FLOW_ACTION_MIRRED)
- return -EOPNOTSUPP;
- if (protocol != htons(ETH_P_ALL))
- return -EOPNOTSUPP;
-
- port = netdev_priv(act->dev);
-
- list_for_each_entry(binding, &block->binding_list, list) {
- err = prestera_span_rule_add(binding, port);
- if (err)
- goto rollback;
- }
-
- return 0;
-
-rollback:
- list_for_each_entry_continue_reverse(binding,
- &block->binding_list, list)
- prestera_span_rule_del(binding);
- return err;
-}
-
-void prestera_span_destroy(struct prestera_flow_block *block)
-{
- struct prestera_flow_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- prestera_span_rule_del(binding);
-}
-
int prestera_span_init(struct prestera_switch *sw)
{
struct prestera_span *span;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
index f0644521f78a..493b68524bcb 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -8,13 +8,17 @@
#define PRESTERA_SPAN_INVALID_ID -1
+struct prestera_port;
struct prestera_switch;
-struct prestera_flow_block;
+struct prestera_flow_block_binding;
int prestera_span_init(struct prestera_switch *sw);
void prestera_span_fini(struct prestera_switch *sw);
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f);
-void prestera_span_destroy(struct prestera_flow_block *block);
+
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress);
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress);
#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 71cde97d85c8..e548cd32582e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -143,6 +143,7 @@ prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
prestera_port_uc_flood_set(port, false);
prestera_port_mc_flood_set(port, false);
prestera_port_learning_set(port, false);
+ prestera_port_br_locked_set(port, false);
}
static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
@@ -162,6 +163,11 @@ static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
if (err)
goto err_out;
+ err = prestera_port_br_locked_set(port,
+ br_port->flags & BR_PORT_LOCKED);
+ if (err)
+ goto err_out;
+
return 0;
err_out:
@@ -1163,7 +1169,7 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags.mask &
- ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED))
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 349b8a94e939..cf456d62677f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1354,10 +1354,10 @@ static void pxa168_eth_netpoll(struct net_device *dev)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c1e985416c0e..1b43704baceb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,9 +394,9 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(skge->hw->pdev),
sizeof(info->bus_info));
}
@@ -3832,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_HIGHDMA;
skge = netdev_priv(dev);
- netif_napi_add(dev, &skge->napi, skge_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &skge->napi, skge_poll);
skge->netdev = dev;
skge->hw = hw;
skge->msg_enable = netif_msg_init(debug, default_msg);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index bbea5458000b..ab33ba1c3023 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3687,9 +3687,9 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sky2->hw->pdev),
sizeof(info->bus_info));
}
@@ -4937,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &hw->napi, sky2_poll);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d9426b01f462..4fba7cb0144b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -73,6 +73,12 @@ static const struct mtk_reg_map mtk_reg_map = {
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
+ .gdma_to_ppe = 0x4444,
+ .ppe_base = 0x0c00,
+ .wdma_base = {
+ [0] = 0x2800,
+ [1] = 0x2c00,
+ },
};
static const struct mtk_reg_map mt7628_reg_map = {
@@ -126,6 +132,12 @@ static const struct mtk_reg_map mt7986_reg_map = {
.fq_blen = 0x472c,
},
.gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
};
/* strings used by ethtool */
@@ -1458,7 +1470,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return !eth->hwlro;
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
@@ -1573,8 +1585,8 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
.last = !xdp_frame_has_frags(xdpf),
};
int err, index = 0, n_desc = 1, nr_frags;
- struct mtk_tx_dma *htxd, *txd, *txd_pdma;
struct mtk_tx_buf *htx_buf, *tx_buf;
+ struct mtk_tx_dma *htxd, *txd;
void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
@@ -1608,7 +1620,6 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
- txd_pdma = qdma_to_pdma(ring, txd);
if (txd == ring->last_free)
goto unmap;
@@ -1629,7 +1640,8 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
- txd_pdma = qdma_to_pdma(ring, txd);
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
+
if (index & 1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
@@ -1660,13 +1672,15 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- txd_pdma = qdma_to_pdma(ring, htxd);
tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
+
txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+ }
htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
}
@@ -1732,7 +1746,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
case XDP_TX: {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
- if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
act = XDP_DROP;
break;
@@ -1891,10 +1905,21 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
- else
+ } else {
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd4;
+ }
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1902,16 +1927,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- }
-
- reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb,
- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
@@ -2974,21 +2991,25 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- u32 gdm_config = MTK_GDMA_TO_PDMA;
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config;
+ int i;
err = mtk_start_dma(eth);
if (err)
return err;
- if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
- gdm_config = MTK_GDMA_TO_PPE;
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -3026,6 +3047,7 @@ static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int i;
phylink_stop(mac->phylink);
@@ -3053,8 +3075,8 @@ static int mtk_stop(struct net_device *dev)
mtk_dma_free(eth);
- if (eth->soc->offload_version)
- mtk_ppe_stop(eth->ppe);
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
return 0;
}
@@ -3554,8 +3576,8 @@ static void mtk_get_drvinfo(struct net_device *dev,
{
struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}
@@ -3923,6 +3945,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
static int mtk_probe(struct platform_device *pdev)
{
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -4003,20 +4026,31 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- for (i = 0;; i++) {
- struct device_node *np = of_parse_phandle(pdev->dev.of_node,
- "mediatek,wed", i);
- static const u32 wdma_regs[] = {
- MTK_WDMA0_BASE,
- MTK_WDMA1_BASE
- };
- void __iomem *wdma;
-
- if (!np || i >= ARRAY_SIZE(wdma_regs))
- break;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ }
- wdma = eth->base + wdma_regs[i];
- mtk_wed_add_hw(np, eth, wdma, i);
+ if (eth->soc->offload_version) {
+ for (i = 0;; i++) {
+ struct device_node *np;
+ phys_addr_t wdma_phy;
+ u32 wdma_base;
+
+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+ np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ if (!np)
+ break;
+
+ wdma_base = eth->soc->reg_map->wdma_base[i];
+ wdma_phy = res ? res->start + wdma_base : 0;
+ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+ wdma_phy, i);
+ }
}
for (i = 0; i < 3; i++) {
@@ -4094,10 +4128,19 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
- if (!eth->ppe) {
- err = -ENOMEM;
- goto err_free_dev;
+ u32 num_ppe;
+
+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+ eth->soc->offload_version, i);
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
}
err = mtk_eth_offload_init(eth);
@@ -4123,10 +4166,8 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
- NAPI_POLL_WEIGHT);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
@@ -4190,6 +4231,8 @@ static const struct mtk_soc_data mt7621_data = {
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4208,6 +4251,8 @@ static const struct mtk_soc_data mt7622_data = {
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4225,6 +4270,8 @@ static const struct mtk_soc_data mt7623_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4256,8 +4303,11 @@ static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7986_CAPS,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 7405c97cda66..b52f3b0177ef 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -105,7 +105,6 @@
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_TO_PDMA 0x0
-#define MTK_GDMA_TO_PPE 0x4444
#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
@@ -269,9 +268,6 @@
#define TX_DMA_FPORT_MASK_V2 0xf
#define TX_DMA_SWC_V2 BIT(30)
-#define MTK_WDMA0_BASE 0x2800
-#define MTK_WDMA1_BASE 0x2c00
-
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
#define TX_DMA_TSO BIT(28)
@@ -314,8 +310,13 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)
-#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
-#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
/* PDMA V2 descriptor rxd3 */
#define RX_DMA_VTAG_V2 BIT(0)
@@ -950,6 +951,9 @@ struct mtk_reg_map {
u32 fq_blen; /* fq free page buffer length */
} qdma;
u32 gdm1_cnt;
+ u32 gdma_to_ppe;
+ u32 ppe_base;
+ u32 wdma_base[2];
};
/* struct mtk_eth_data - This is the structure holding all differences
@@ -963,6 +967,8 @@ struct mtk_reg_map {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
+ * @foe_entry_size Foe table entry size.
* @txd_size Tx DMA descriptor size.
* @rxd_size Rx DMA descriptor size.
* @rx_irq_done_mask Rx irq done register mask.
@@ -977,6 +983,8 @@ struct mtk_soc_data {
u32 required_clks;
bool required_pctl;
u8 offload_version;
+ u8 hash_offset;
+ u16 foe_entry_size;
netdev_features_t hw_features;
struct {
u32 txd_size;
@@ -1106,7 +1114,7 @@ struct mtk_eth {
int ip_align;
- struct mtk_ppe *ppe;
+ struct mtk_ppe *ppe[2];
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
@@ -1137,6 +1145,86 @@ struct mtk_mac {
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+
+ return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+ return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+ return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+ return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB2_MULTICAST_V2;
+
+ return MTK_FOE_IB2_MULTICAST;
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index dab8f3f771f8..ae00e572390d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -56,7 +56,7 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
static u32 mtk_eth_timestamp(struct mtk_eth *eth)
{
- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
}
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
@@ -88,12 +88,12 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
enable * MTK_PPE_CACHE_CTL_EN);
}
-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
- switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
+ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@ -122,16 +122,16 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
- hash <<= 1;
+ hash <<= (ffs(eth->soc->hash_offset) - 1);
hash &= MTK_PPE_ENTRIES - 1;
return hash;
}
static inline struct mtk_foe_mac_info *
-mtk_foe_entry_l2(struct mtk_foe_entry *entry)
+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return &entry->bridge.l2;
@@ -143,9 +143,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
}
static inline u32 *
-mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return &entry->bridge.ib2;
@@ -156,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
return &entry->ipv4.ib2;
}
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac)
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac)
{
struct mtk_foe_mac_info *l2;
u32 ports_pad, val;
memset(entry, 0, sizeof(*entry));
- val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
- FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
- FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
- MTK_FOE_IB1_BIND_TTL |
- MTK_FOE_IB1_BIND_CACHE;
- entry->ib1 = val;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
+ entry->ib1 = val;
- val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
- FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
- FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
+ } else {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
+ }
if (is_multicast_ether_addr(dest_mac))
- val |= MTK_FOE_IB2_MULTICAST;
+ val |= mtk_get_ib2_multicast_mask(eth);
ports_pad = 0xa5a5a500 | (l4proto & 0xff);
if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
@@ -210,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
return 0;
}
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port)
{
- u32 *ib2 = mtk_foe_entry_ib2(entry);
- u32 val;
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+ u32 val = *ib2;
- val = *ib2;
- val &= ~MTK_FOE_IB2_DEST_PORT;
- val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val &= ~MTK_FOE_IB2_DEST_PORT_V2;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
+ } else {
+ val &= ~MTK_FOE_IB2_DEST_PORT;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ }
*ib2 = val;
return 0;
}
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool egress,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
struct mtk_ipv4_tuple *t;
switch (type) {
@@ -262,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
return 0;
}
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
int i;
@@ -297,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
return 0;
}
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
l2->etype = BIT(port);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
+ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
else
l2->etype |= BIT(8);
- entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
+ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
return 0;
}
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
+ switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
case 0:
- entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
- FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
+ mtk_prep_ib1_vlan_layer(eth, 1);
l2->vlan1 = vid;
return 0;
case 1:
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
+ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
l2->vlan1 = vid;
l2->etype |= BIT(8);
} else {
l2->vlan2 = vid;
- entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
}
return 0;
default:
@@ -337,34 +357,42 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
}
}
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
- (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
+ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
l2->etype = ETH_P_PPP_SES;
- entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
+ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
l2->pppoe_id = sid;
return 0;
}
-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
- int bss, int wcid)
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
- u32 *ib2 = mtk_foe_entry_ib2(entry);
-
- *ib2 &= ~MTK_FOE_IB2_PORT_MG;
- *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
- if (wdma_idx)
- *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
- l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
- FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
- FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ }
return 0;
}
@@ -376,14 +404,15 @@ static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
}
static bool
-mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+ struct mtk_foe_entry *data)
{
int type, len;
if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
return false;
- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
len = offsetof(struct mtk_foe_entry, ipv6._rsv);
else
@@ -410,9 +439,10 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
hlist_del_init(&entry->list);
if (entry->hash != 0xffff) {
- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_BIND);
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
}
entry->hash = 0xffff;
@@ -426,14 +456,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
{
- u16 timestamp;
- u16 now;
-
- now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
- timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ u16 now = mtk_eth_timestamp(ppe->eth);
+ u16 timestamp = ib1 & ib1_ts_mask;
if (timestamp > now)
- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+ return ib1_ts_mask + 1 - timestamp + now;
else
return now - timestamp;
}
@@ -441,6 +469,7 @@ static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
static void
mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
struct hlist_node *tmp;
@@ -451,7 +480,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
int cur_idle;
u32 ib1;
- hwe = &ppe->foe_table[cur->hash];
+ hwe = mtk_foe_get_entry(ppe, cur->hash);
ib1 = READ_ONCE(hwe->ib1);
if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
@@ -465,16 +494,16 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
continue;
idle = cur_idle;
- entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->data.ib1 &= ~ib1_ts_mask;
+ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
}
}
static void
mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ struct mtk_foe_entry foe = {};
struct mtk_foe_entry *hwe;
- struct mtk_foe_entry foe;
spin_lock_bh(&ppe_lock);
@@ -486,9 +515,9 @@ mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
if (entry->hash == 0xffff)
goto out;
- hwe = &ppe->foe_table[entry->hash];
- memcpy(&foe, hwe, sizeof(foe));
- if (!mtk_flow_entry_match(entry, &foe)) {
+ hwe = mtk_foe_get_entry(ppe, entry->hash);
+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
entry->hash = 0xffff;
goto out;
}
@@ -503,16 +532,22 @@ static void
__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
u16 hash)
{
+ struct mtk_eth *eth = ppe->eth;
+ u16 timestamp = mtk_eth_timestamp(eth);
struct mtk_foe_entry *hwe;
- u16 timestamp;
- timestamp = mtk_eth_timestamp(ppe->eth);
- timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
+ timestamp);
+ } else {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
+ timestamp);
+ }
- hwe = &ppe->foe_table[hash];
- memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
wmb();
hwe->ib1 = entry->ib1;
@@ -539,16 +574,17 @@ mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
u32 hash;
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return mtk_foe_entry_commit_l2(ppe, entry);
- hash = mtk_ppe_hash_entry(&entry->data);
+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
entry->hash = 0xffff;
spin_lock_bh(&ppe_lock);
- hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
spin_unlock_bh(&ppe_lock);
return 0;
@@ -558,10 +594,11 @@ static void
mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u16 hash)
{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
struct mtk_flow_entry *flow_info;
- struct mtk_foe_entry foe, *hwe;
+ struct mtk_foe_entry foe = {}, *hwe;
struct mtk_foe_mac_info *l2;
- u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
@@ -572,32 +609,34 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
flow_info->l2_data.base_flow = entry;
flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
flow_info->hash = hash;
- hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
- hwe = &ppe->foe_table[hash];
- memcpy(&foe, hwe, sizeof(foe));
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&foe, hwe, soc->foe_entry_size);
foe.ib1 &= ib1_mask;
foe.ib1 |= entry->data.ib1 & ~ib1_mask;
- l2 = mtk_foe_entry_l2(&foe);
+ l2 = mtk_foe_entry_l2(ppe->eth, &foe);
memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
l2->etype = ETH_P_IPV6;
- *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
__mtk_foe_entry_commit(ppe, &foe, hash);
}
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- struct hlist_head *head = &ppe->foe_flow[hash / 2];
- struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
struct hlist_node *n;
@@ -621,7 +660,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
continue;
}
- if (found || !mtk_flow_entry_match(entry, hwe)) {
+ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
if (entry->hash != 0xffff)
entry->hash = 0xffff;
continue;
@@ -678,11 +717,13 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
}
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
- int version)
+ int version, int index)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct device *dev = eth->dev;
- struct mtk_foe_entry *foe;
struct mtk_ppe *ppe;
+ u32 foe_flow_size;
+ void *foe;
ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
if (!ppe)
@@ -698,14 +739,21 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
ppe->dev = dev;
ppe->version = version;
- foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
+ foe = dmam_alloc_coherent(ppe->dev,
+ MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
return NULL;
ppe->foe_table = foe;
- mtk_ppe_debugfs_init(ppe);
+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+ sizeof(*ppe->foe_flow);
+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return NULL;
+
+ mtk_ppe_debugfs_init(ppe, index);
return ppe;
}
@@ -715,21 +763,30 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
+ memset(ppe->foe_table, 0,
+ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
/* skip all entries that cross the 1024 byte boundary */
- for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
- for (k = 0; k < ARRAY_SIZE(skip); k++)
- ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
+ for (k = 0; k < ARRAY_SIZE(skip); k++) {
+ struct mtk_foe_entry *hwe;
+
+ hwe = mtk_foe_get_entry(ppe, i + skip[k]);
+ hwe->ib1 |= MTK_FOE_IB1_STATIC;
+ }
+ }
}
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;
+ if (!ppe)
+ return;
+
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
@@ -748,6 +805,8 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_TB_CFG_INFO_SEL;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
@@ -755,15 +814,21 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, true);
- val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
- MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
- MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_6RD |
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
+ MTK_PPE_MD_TOAP_BYP_CRSN1 |
+ MTK_PPE_MD_TOAP_BYP_CRSN2 |
+ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
+ else
+ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
@@ -798,7 +863,10 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
- return 0;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
+ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
+ }
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
@@ -806,9 +874,15 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
u32 val;
int i;
- for (i = 0; i < MTK_PPE_ENTRIES; i++)
- ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_INVALID);
+ if (!ppe)
+ return 0;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
+
+ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+ }
mtk_ppe_cache_enable(ppe, false);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 1f5cf1c9a947..0b7a67a958e4 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -8,8 +8,6 @@
#include <linux/bitfield.h>
#include <linux/rhashtable.h>
-#define MTK_ETH_PPE_BASE 0xc00
-
#define MTK_PPE_ENTRIES_SHIFT 3
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
@@ -34,6 +32,15 @@
#define MTK_FOE_IB1_UDP BIT(30)
#define MTK_FOE_IB1_STATIC BIT(31)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
+#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
+#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
+#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
+#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
+
enum {
MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
@@ -55,14 +62,25 @@ enum {
#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
+#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
+#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
+#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
+#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
+
#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
+#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
+
enum {
MTK_FOE_STATE_INVALID,
MTK_FOE_STATE_UNBIND,
@@ -83,6 +101,9 @@ struct mtk_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+
+ u16 minfo;
+ u16 winfo;
};
/* software-only entry type */
@@ -200,7 +221,7 @@ struct mtk_foe_entry {
struct mtk_foe_ipv4_dslite dslite;
struct mtk_foe_ipv6 ipv6;
struct mtk_foe_ipv6_6rd ipv6_6rd;
- u32 data[19];
+ u32 data[23];
};
};
@@ -249,6 +270,7 @@ struct mtk_flow_entry {
};
u8 type;
s8 wed_index;
+ u8 ppe_index;
u16 hash;
union {
struct mtk_foe_entry data;
@@ -267,20 +289,22 @@ struct mtk_ppe {
struct device *dev;
void __iomem *base;
int version;
+ char dirname[5];
- struct mtk_foe_entry *foe_table;
+ void *foe_table;
dma_addr_t foe_phys;
u16 foe_check_time[MTK_PPE_ENTRIES];
- struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+ struct hlist_head *foe_flow;
struct rhashtable l2_flows;
void *acct_table;
};
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+ int version, int index);
+void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
@@ -293,6 +317,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
if (!ppe)
return;
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
+
now = (u16)jiffies;
diff = now - ppe->foe_check_time[hash];
if (diff < HZ / 10)
@@ -302,34 +329,30 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
__mtk_ppe_check_skb(ppe, skb, hash);
}
-static inline int
-mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
-{
- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
-
- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
- return -1;
-
- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
-}
-
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac);
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool orig,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
- int bss, int wcid);
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port);
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index eb0b598f14e4..391b071bcff3 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -79,7 +79,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
int i;
for (i = 0; i < MTK_PPE_ENTRIES; i++) {
- struct mtk_foe_entry *entry = &ppe->foe_table[i];
+ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
struct mtk_foe_mac_info *l2;
struct mtk_flow_addr_info ai = {};
unsigned char h_source[ETH_ALEN];
@@ -162,52 +162,28 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
}
static int
-mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, false);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all);
static int
-mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, true);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind);
-static int
-mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
{
- return single_open(file, mtk_ppe_debugfs_foe_show_all,
- inode->i_private);
-}
-
-static int
-mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
-{
- return single_open(file, mtk_ppe_debugfs_foe_show_bind,
- inode->i_private);
-}
-
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
-{
- static const struct file_operations fops_all = {
- .open = mtk_ppe_debugfs_foe_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
- static const struct file_operations fops_bind = {
- .open = mtk_ppe_debugfs_foe_open_bind,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
struct dentry *root;
- root = debugfs_create_dir("mtk_ppe", NULL);
- debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
- debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+ root = debugfs_create_dir(ppe->dirname, NULL);
+ debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 25dc3c3aa31d..28bbd1df3e30 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -52,18 +52,19 @@ static const struct rhashtable_params mtk_flow_ht_params = {
};
static int
-mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
- bool egress)
+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data, bool egress)
{
- return mtk_foe_entry_set_ipv4_tuple(foe, egress,
+ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
data->v4.src_addr, data->src_port,
data->v4.dst_addr, data->dst_port);
}
static int
-mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data)
{
- return mtk_foe_entry_set_ipv6_tuple(foe,
+ return mtk_foe_entry_set_ipv6_tuple(eth, foe,
data->v6.src_addr.s6_addr32, data->src_port,
data->v6.dst_addr.s6_addr32, data->dst_port);
}
@@ -173,7 +174,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
- *dev = dp->cpu_dp->master;
+ *dev = dsa_port_to_master(dp);
return dp->index;
#else
@@ -190,16 +191,29 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
int pse_port, dsa_port;
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
- mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
- info.wcid);
- pse_port = 3;
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+ info.bss, info.wcid);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ switch (info.wdma_idx) {
+ case 0:
+ pse_port = 8;
+ break;
+ case 1:
+ pse_port = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pse_port = 3;
+ }
*wed_index = info.wdma_idx;
goto out;
}
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dsa_port >= 0)
- mtk_foe_entry_set_dsa(foe, dsa_port);
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
if (dev == eth->netdev[0])
pse_port = 1;
@@ -209,7 +223,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
return -EOPNOTSUPP;
out:
- mtk_foe_entry_set_pse_port(foe, pse_port);
+ mtk_foe_entry_set_pse_port(eth, foe, pse_port);
return 0;
}
@@ -333,9 +347,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
- err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
- data.eth.h_source,
- data.eth.h_dest);
+ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
+ data.eth.h_source, data.eth.h_dest);
if (err)
return err;
@@ -360,7 +373,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
data.v4.src_addr = addrs.key->src;
data.v4.dst_addr = addrs.key->dst;
- mtk_flow_set_ipv4_addr(&foe, &data, false);
+ mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@@ -371,7 +384,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
data.v6.src_addr = addrs.key->src;
data.v6.dst_addr = addrs.key->dst;
- mtk_flow_set_ipv6_addr(&foe, &data);
+ mtk_flow_set_ipv6_addr(eth, &foe, &data);
}
flow_action_for_each(i, act, &rule->action) {
@@ -401,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- err = mtk_flow_set_ipv4_addr(&foe, &data, true);
+ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
if (err)
return err;
}
@@ -413,10 +426,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
}
if (data.pppoe.num == 1)
- mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
&wed_index);
@@ -434,7 +447,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
memcpy(&entry->data, &foe, sizeof(entry->data));
entry->wed_index = wed_index;
- err = mtk_foe_entry_commit(eth->ppe, entry);
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
if (err < 0)
goto free;
@@ -446,7 +459,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return 0;
clear:
- mtk_foe_entry_clear(eth->ppe, entry);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
free:
kfree(entry);
if (wed_index >= 0)
@@ -464,7 +477,7 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(eth->ppe, entry);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
rhashtable_remove_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
if (entry->wed_index >= 0)
@@ -485,7 +498,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- idle = mtk_foe_entry_idle_time(eth->ppe, entry);
+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
return 0;
@@ -537,7 +550,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe || !eth->ppe->foe_table)
+ if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -589,8 +602,5 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe || !eth->ppe->foe_table)
- return 0;
-
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 0c45ea0900f1..59596d823d8b 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -21,6 +21,9 @@
#define MTK_PPE_GLO_CFG_BUSY BIT(31)
#define MTK_PPE_FLOW_CFG 0x204
+#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
+#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
+#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
@@ -54,6 +57,7 @@
#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
+#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
enum {
MTK_PPE_SCAN_MODE_DISABLED,
@@ -112,6 +116,8 @@ enum {
#define MTK_PPE_DEFAULT_CPU_PORT 0x248
#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
+
#define MTK_PPE_MTU_DROP 0x308
#define MTK_PPE_VLAN_MTU0 0x30c
@@ -141,4 +147,6 @@ enum {
#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+#define MTK_PPE_SBW_CTRL 0x374
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 3f0e5e64de50..7e890f81148e 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1255,7 +1255,7 @@ static const struct net_device_ops mtk_star_netdev_ops = {
static void mtk_star_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+ strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
}
/* TODO Add ethtool stats. */
@@ -1651,8 +1651,7 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 29be2fcafea3..099b6e0df619 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -25,6 +25,11 @@
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
+#define MTK_WED_MAX_GROUP_SIZE 0x100
+#define MTK_WED_VLD_GROUP_SIZE 0x40
+#define MTK_WED_PER_GROUP_PKT 128
+
+#define MTK_WED_FBUF_SIZE 128
static struct mtk_wed_hw *hw_list[2];
static DEFINE_MUTEX(hw_lock);
@@ -80,11 +85,31 @@ static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
struct mtk_wed_hw *hw;
+ int i;
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw)
+ return NULL;
+
+ if (!hw->wed_dev)
+ goto out;
+
+ if (hw->version == 1)
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+ }
+ /* MT7986 PCIE or AXI */
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = hw_list[i];
+ if (hw && !hw->wed_dev)
+ goto out;
+ }
- hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
- if (!hw || hw->wed_dev)
- return NULL;
+ return NULL;
+out:
hw->wed_dev = dev;
return hw;
}
@@ -150,10 +175,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
desc->buf0 = cpu_to_le32(buf_phys);
desc->buf1 = cpu_to_le32(buf_phys + txd_size);
- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG1;
+
+ if (dev->hw->version == 1)
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ else
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG0;
desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
desc++;
@@ -209,7 +241,7 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
if (!ring->desc)
return;
- dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
+ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
ring->desc, ring->desc_phys);
}
@@ -229,6 +261,14 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ if (dev->hw->version == 1)
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
@@ -237,9 +277,54 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
}
static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+{
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+ } else {
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ }
+}
+
+static void
+mtk_wed_dma_disable(struct mtk_wed_device *dev)
+{
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
+
+ if (dev->hw->version == 1) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
+ } else {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ mtk_wed_set_512_support(dev, false);
+ }
+}
+
+static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
- regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_clr(dev, MTK_WED_CTRL,
@@ -252,21 +337,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
-
- wed_clr(dev, MTK_WED_GLO_CFG,
- MTK_WED_GLO_CFG_TX_DMA_EN |
- MTK_WED_GLO_CFG_RX_DMA_EN);
- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
- wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
- MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
}
static void
mtk_wed_detach(struct mtk_wed_device *dev)
{
- struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
struct mtk_wed_hw *hw = dev->hw;
mutex_lock(&hw_lock);
@@ -281,9 +356,14 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mtk_wed_free_buffer(dev);
mtk_wed_free_tx_rings(dev);
- if (of_dma_is_coherent(wlan_node))
- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
- BIT(hw->index), BIT(hw->index));
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ struct device_node *wlan_node;
+
+ wlan_node = dev->wlan.pci_dev->dev.of_node;
+ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+ }
if (!hw_list[!hw->index]->wed_dev &&
hw->eth->dma_dev != hw->eth->dev)
@@ -296,14 +376,76 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mutex_unlock(&hw_lock);
}
+#define PCIE_BASE_ADDR0 0x11280000
+static void
+mtk_wed_bus_init(struct mtk_wed_device *dev)
+{
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
+
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+{
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ }
+}
+
static void
mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
u32 mask, set;
- u32 offset;
mtk_wed_stop(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
@@ -313,14 +455,33 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
- wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
+ if (dev->hw->version == 1) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
- offset = dev->hw->index ? 0x04000400 : 0;
- wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
- wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ MTK_PCIE_BASE(dev->hw->index));
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0,
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
+ MTK_WDMA_INT_STATUS) |
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
+ MTK_WDMA_GLO_CFG));
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1,
+ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
+ MTK_WDMA_RING_TX(0)) |
+ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
+ MTK_WDMA_RING_RX(0)));
+ }
}
static void
@@ -340,37 +501,65 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
- wed_w32(dev, MTK_WED_TX_BM_TKID,
- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
- dev->wlan.token_start) |
- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
- dev->wlan.token_start + dev->wlan.nbuf - 1));
-
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
- MTK_WED_TX_BM_DYN_THR_HI);
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+ wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+ dev->buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->buf_ring.size / 128));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
- wed_set(dev, MTK_WED_CTRL,
- MTK_WED_CTRL_WED_TX_BM_EN |
- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ if (dev->hw->version == 1)
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ else
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
}
static void
-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
{
+ void *head = (void *)ring->desc;
int i;
for (i = 0; i < size; i++) {
- desc[i].buf0 = 0;
- desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
- desc[i].buf1 = 0;
- desc[i].info = 0;
+ struct mtk_wdma_desc *desc;
+
+ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = 0;
}
}
@@ -421,12 +610,10 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
int i;
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
- struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
-
- if (!desc)
+ if (!dev->tx_ring[i].desc)
continue;
- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
}
if (mtk_wed_poll_busy(dev))
@@ -483,16 +670,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
static int
mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
- int size)
+ int size, u32 desc_size)
{
- ring->desc = dma_alloc_coherent(dev->hw->dev,
- size * sizeof(*ring->desc),
+ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
+ ring->desc_size = desc_size;
ring->size = size;
- mtk_wed_ring_reset(ring->desc, size);
+ mtk_wed_ring_reset(ring, size);
return 0;
}
@@ -500,9 +687,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
static int
mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
@@ -520,43 +708,63 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
}
static void
-mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
- u32 wdma_mask;
- u32 val;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
- if (!dev->tx_wdma[i].desc)
- mtk_wed_wdma_ring_setup(dev, i, 16);
-
- wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
-
- mtk_wed_hw_init(dev);
+ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+ /* wed control cr set */
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
- wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
- MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
- MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
+ dev->wlan.tx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
+ dev->wlan.tx_tbit[1]));
+
+ /* initail txfree interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
+ dev->wdma_idx));
+ }
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
- wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
-
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+}
+
+static void
+mtk_wed_dma_enable(struct mtk_wed_device *dev)
+{
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
@@ -567,16 +775,54 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ wed_set(dev, MTK_WED_WPDMA_CTRL,
+ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ }
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ if (!dev->tx_wdma[i].desc)
+ mtk_wed_wdma_ring_setup(dev, i, 16);
+
+ mtk_wed_hw_init(dev);
+ mtk_wed_configure_irq(dev, irq_mask);
+
mtk_wed_set_ext_int(dev, true);
- val = dev->wlan.wpdma_phys |
- MTK_PCIE_MIRROR_MAP_EN |
- FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
- if (dev->hw->index)
- val |= BIT(1);
- val |= BIT(0);
- regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ if (dev->hw->version == 1) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ } else {
+ mtk_wed_set_512_support(dev, true);
+ }
+ mtk_wed_dma_enable(dev);
dev->running = true;
}
@@ -585,12 +831,14 @@ mtk_wed_attach(struct mtk_wed_device *dev)
__releases(RCU)
{
struct mtk_wed_hw *hw;
+ struct device *device;
int ret = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"mtk_wed_attach without holding the RCU read lock");
- if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
+ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
+ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
!try_module_get(THIS_MODULE))
ret = -ENODEV;
@@ -608,7 +856,11 @@ mtk_wed_attach(struct mtk_wed_device *dev)
goto out;
}
- dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+ ? &dev->wlan.pci_dev->dev
+ : &dev->wlan.platform_dev->dev;
+ dev_info(device, "attaching wed device %d version %d\n",
+ hw->index, hw->version);
dev->hw = hw;
dev->dev = hw->dev;
@@ -626,7 +878,9 @@ mtk_wed_attach(struct mtk_wed_device *dev)
}
mtk_wed_hw_init_early(dev);
- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
+ if (hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
out:
mutex_unlock(&hw_lock);
@@ -653,7 +907,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+ sizeof(*ring->desc)))
return -ENOMEM;
if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
@@ -680,21 +935,21 @@ static int
mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->txfree_ring;
- int i;
+ int i, index = dev->hw->version == 1;
/*
* For txfree event handling, the same DMA ring is shared between WED
* and WLAN. The WLAN driver accesses the ring index registers through
* WED
*/
- ring->reg_base = MTK_WED_RING_RX(1);
+ ring->reg_base = MTK_WED_RING_RX(index);
ring->wpdma = regs;
for (i = 0; i < 12; i += 4) {
u32 val = readl(regs + i);
- wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
- wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
+ wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
}
return 0;
@@ -703,11 +958,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
- u32 val;
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ val &= ext_mask;
if (!dev->hw->num_flows)
val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
if (val && net_ratelimit())
@@ -782,7 +1045,8 @@ out:
}
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, int index)
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
{
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
@@ -829,26 +1093,33 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
goto unlock;
+
hw->node = np;
hw->regs = regs;
hw->eth = eth;
hw->dev = &pdev->dev;
+ hw->wdma_phy = wdma_phy;
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
- hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
- "mediatek,pcie-mirror");
- hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
- "mediatek,hifsys");
- if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
- kfree(hw);
- goto unlock;
- }
+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+
+ if (hw->version == 1) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
- if (!index) {
- regmap_write(hw->mirror, 0, 0);
- regmap_write(hw->mirror, 4, 0);
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
}
+
mtk_wed_hw_add_debugfs(hw);
hw_list[index] = hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index 981ec613f4b0..ae420ca01a48 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -18,11 +18,13 @@ struct mtk_wed_hw {
struct regmap *hifsys;
struct device *dev;
void __iomem *wdma;
+ phys_addr_t wdma_phy;
struct regmap *mirror;
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
u32 debugfs_reg;
u32 num_flows;
+ u8 version;
char dirname[5];
int irq;
int index;
@@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
}
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, int index);
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
void mtk_wed_exit(void);
int mtk_wed_flow_add(int index);
void mtk_wed_flow_remove(int index);
#else
static inline void
mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, int index)
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
{
}
static inline void
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index a81d3fd1a439..f420f187e837 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void *data)
DUMP_WDMA(WDMA_GLO_CFG),
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+ DUMP_STR("TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 0a0465ea58b4..e270fb336143 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -5,6 +5,7 @@
#define __MTK_WED_REGS_H
#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
@@ -41,6 +42,7 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_RESERVE_EN BIT(12)
#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
#define MTK_WED_EXT_INT_STATUS 0x020
@@ -57,7 +59,8 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
-#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
@@ -65,8 +68,7 @@ struct mtk_wdma_desc {
MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
- MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
- MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
#define MTK_WED_EXT_INT_MASK 0x028
@@ -81,6 +83,7 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_BM_BASE 0x084
#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_V2 0x0c8
#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
@@ -94,7 +97,25 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_BM_DYN_THR 0x0a0
#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
+
+#define MTK_WED_TX_TKID_CTRL 0x0c0
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_TKID_DYN_THR 0x0e0
+#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_TXP_DW0 0x120
+#define MTK_WED_TXP_DW1 0x124
+#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
+#define MTK_WED_TXDP_CTRL 0x130
+#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
+#define MTK_WED_RX_BM_TKID_MIB 0x1cc
#define MTK_WED_INT_STATUS 0x200
#define MTK_WED_INT_MASK 0x204
@@ -125,6 +146,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
@@ -155,21 +177,64 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+
#define MTK_WED_WPDMA_RESET_IDX 0x50c
#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WPDMA_CTRL 0x518
+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
+
#define MTK_WED_WPDMA_INT_CTRL 0x520
#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22)
+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_WPDMA_INT_MASK 0x524
+#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
+
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
+
#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_INTM 0x564
+#define MTK_WED_PCIE_CFG_MSIS 0x568
#define MTK_WED_PCIE_INT_TRIGGER 0x570
#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+#define MTK_WED_PCIE_INT_CTRL 0x57c
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+
#define MTK_WED_WPDMA_CFG_BASE 0x580
+#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+#define MTK_WED_WPDMA_CFG_TX 0x588
+#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
@@ -203,15 +268,24 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+#define MTK_WED_WDMA_INT_CLR 0xa24
+#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
+
#define MTK_WED_WDMA_INT_TRIGGER 0xa28
#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
#define MTK_WED_WDMA_INT_CTRL 0xa2c
#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_WDMA_CFG_BASE 0xaa0
#define MTK_WED_WDMA_OFFSET0 0xaa4
#define MTK_WED_WDMA_OFFSET1 0xaa8
+#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
+#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
+
#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
@@ -221,15 +295,22 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_OFS_CPU_IDX 0x08
#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
#define MTK_WDMA_GLO_CFG 0x204
-#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
+#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
#define MTK_WDMA_RESET_IDX 0x208
#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WDMA_INT_STATUS 0x220
+
#define MTK_WDMA_INT_MASK 0x228
#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 6affbd241264..1184ac5751e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -152,7 +152,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
- netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
napi_enable(&cq->napi);
break;
case TX_XDP:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 6400a827173c..7d45f1d55f79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -89,15 +89,15 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
+ strscpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dcb9eb1899ce..fe48d20d6118 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1779,7 +1779,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index d89a3da89e5a..59b8b3c73582 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -208,7 +208,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->sg, chunk->npages,
DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
@@ -222,7 +222,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
chunk->npages, DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 78c5f40382c9..d3fc86cd3c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3071,6 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
+ devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
@@ -3093,6 +3094,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
@@ -3109,6 +3111,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index bfc0cd5ec423..26685fd0fdaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -139,6 +139,14 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
+config MLX5_EN_MACSEC
+ bool "Connect-X support for MACSec offload"
+ depends on MLX5_CORE_EN
+ depends on MACSEC
+ default n
+ help
+ Build support for MACsec cryptography-offload acceleration in the NIC.
+
config MLX5_EN_IPSEC
bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a3773a8177ed..a22c32aabf11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -92,6 +92,9 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
+mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
+ en_accel/macsec_stats.o
+
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
en_accel/ipsec_offload.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a560df446bac..26a23047f1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -93,29 +93,26 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_LOG_WQE_SZ 18
-#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
- MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
-#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-
-#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
-#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
-#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
-/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
- * WQEs, This page will absorb write overflow by the hardware, when
- * receiving packets larger than MTU. These oversize packets are
- * dropped by the driver at a later stage.
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+
+/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
+ * These are theoretical maximums, which can be further restricted by
+ * capabilities. These values are used for static resource allocations and
+ * sanity checks.
+ * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE
+ * size actually used at runtime, but it's not a problem when calculating static
+ * array sizes.
*/
-#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
-#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
+#define MLX5_UMR_MAX_MTT_SPACE \
+ (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
+ MLX5_UMR_MTT_ALIGNMENT))
+#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
+ rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
+
#define MLX5E_MAX_RQ_NUM_MTTS \
- (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
+#define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
- (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
-#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
- (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
- (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \
@@ -127,8 +124,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
- MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
@@ -150,13 +146,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_UMR_WQE_INLINE_SZ \
- (sizeof(struct mlx5e_umr_wqe) + \
- ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
- MLX5_UMR_MTT_ALIGNMENT))
-#define MLX5E_UMR_WQEBBS \
- (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
-
#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
(sizeof(struct mlx5e_umr_wqe) +\
(sizeof(struct mlx5_klm) * (sgl_len)))
@@ -174,8 +163,7 @@ struct page_pool;
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
- mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -189,12 +177,6 @@ do { \
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
-enum mlx5e_rq_group {
- MLX5E_RQ_GROUP_REGULAR,
- MLX5E_RQ_GROUP_XSK,
-#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
-};
-
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{
if (mlx5_lag_is_lacp_owner(mdev))
@@ -227,13 +209,15 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
* bytes units. Driver hardens the limitation to 1KB (16
* WQEBBs), unless firmware capability is stricter.
*/
-static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
{
- return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
- MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+ BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX);
+
+ return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}
-static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
+static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,8 +226,9 @@ static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
* cache-aligned.
*/
- u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+ u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
#if L1_CACHE_BYTES >= 128
wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
@@ -272,6 +257,7 @@ struct mlx5e_umr_wqe {
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
+ DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
@@ -476,15 +462,11 @@ struct mlx5e_txqsq {
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
cqe_ts_to_ns ptp_cyc2time;
- u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
-struct mlx5e_dma_info {
- dma_addr_t addr;
- union {
- struct page *page;
- struct xdp_buff *xsk;
- };
+union mlx5e_alloc_unit {
+ struct page *page;
+ struct xdp_buff *xsk;
};
/* XDP packets can be transmitted in different ways. On completion, we need to
@@ -580,7 +562,6 @@ struct mlx5e_xdpsq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
- u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_ktls_resync_resp;
@@ -609,25 +590,20 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
- u16 max_sq_wqebbs;
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *au;
u32 offset;
bool last_in_page;
};
-struct mlx5e_umr_dma_info {
- struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
struct mlx5e_mpw_info {
- struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
- DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
+ union mlx5e_alloc_unit alloc_units[];
};
#define MLX5E_MAX_RX_FRAGS 4
@@ -635,13 +611,13 @@ struct mlx5e_mpw_info {
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
-#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
- MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache {
u32 head;
u32 tail;
- struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+ struct page *page_cache[MLX5E_CACHE_SIZE];
};
struct mlx5e_rq;
@@ -674,6 +650,12 @@ struct mlx5e_rq_frags_info {
u8 num_frags;
u8 log_num_frags;
u8 wqe_bulk;
+ u8 wqe_index_mask;
+};
+
+struct mlx5e_dma_info {
+ dma_addr_t addr;
+ struct page *page;
};
struct mlx5e_shampo_hd {
@@ -695,13 +677,20 @@ struct mlx5e_hw_gro_data {
int second_ip_id;
};
+enum mlx5e_mpwrq_umr_mode {
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_UNALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_OVERSIZED,
+ MLX5E_MPWRQ_UMR_MODE_TRIPLE,
+};
+
struct mlx5e_rq {
/* data path */
union {
struct {
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *alloc_units;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
@@ -710,6 +699,7 @@ struct mlx5e_rq {
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
+ __be32 umr_mkey_be;
u16 num_strides;
u16 actual_wq_head;
u8 log_stride_sz;
@@ -717,6 +707,11 @@ struct mlx5e_rq {
u8 umr_last_bulk;
u8 umr_completed;
u8 min_wqe_bulk;
+ u8 page_shift;
+ u8 pages_per_wqe;
+ u8 umr_wqebbs;
+ u8 mtts_per_wqe;
+ u8 umr_mode;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
@@ -767,7 +762,6 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5e_channel *channel;
- u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@ -856,11 +850,6 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
-enum {
- MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
-};
-
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
@@ -959,6 +948,9 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
+#ifdef CONFIG_MLX5_EN_MACSEC
+ struct mlx5e_macsec *macsec;
+#endif
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
@@ -1010,7 +1002,6 @@ struct mlx5e_profile {
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
- u8 rq_groups;
u32 features;
};
@@ -1019,7 +1010,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
@@ -1047,6 +1039,7 @@ struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
+#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
@@ -1101,7 +1094,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
@@ -1136,6 +1129,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
@@ -1148,8 +1142,6 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
-void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index e7c14c0de0a7..48581ea3adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -10,28 +10,33 @@ unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
return chs->num;
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+static struct mlx5e_channel *mlx5e_channels_get(struct mlx5e_channels *chs, unsigned int ix)
{
- struct mlx5e_channel *c;
+ WARN_ON_ONCE(ix >= mlx5e_channels_get_num(chs));
+ return chs->c[ix];
+}
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- *rqn = c->rq.rqn;
+ return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
{
- struct mlx5e_channel *c;
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+ *rqn = c->rq.rqn;
+}
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
- return false;
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
+
+ WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
- return true;
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index ca00cbc827cb..637ca90daaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -9,8 +9,9 @@
struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 9b8cdf2e68ad..bf2741eb7f9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -8,6 +8,7 @@
#include "lib/fs_ttc.h"
struct mlx5e_post_act;
+struct mlx5e_tc_table;
enum {
MLX5E_TC_FT_LEVEL = 0,
@@ -15,6 +16,11 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
+};
+
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
@@ -83,54 +89,28 @@ enum {
#endif
};
-struct mlx5e_priv;
-
-#ifdef CONFIG_MLX5_EN_RXNFC
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-#else
-static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
-static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
-{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{ return -EOPNOTSUPP; }
-#endif /* CONFIG_MLX5_EN_RXNFC */
+struct mlx5e_flow_steering;
+struct mlx5e_rx_res;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables;
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple);
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple);
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs);
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#else
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
+{ return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
#endif
#ifdef CONFIG_MLX5_EN_TLS
@@ -142,54 +122,63 @@ struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
-struct mlx5e_flow_steering {
- bool state_destroy;
- bool vlan_strip_disable;
- struct mlx5_core_dev *mdev;
- struct mlx5_flow_namespace *ns;
-#ifdef CONFIG_MLX5_EN_RXNFC
- struct mlx5e_ethtool_steering ethtool;
-#endif
- struct mlx5e_tc_table *tc;
- struct mlx5e_promisc_table promisc;
- struct mlx5e_vlan_table *vlan;
- struct mlx5e_l2_table l2;
- struct mlx5_ttc_table *ttc;
- struct mlx5_ttc_table *inner_ttc;
-#ifdef CONFIG_MLX5_EN_ARFS
- struct mlx5e_arfs_tables *arfs;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5e_accel_fs_tcp *accel_tcp;
-#endif
- struct mlx5e_fs_udp *udp;
- struct mlx5e_fs_any *any;
- struct mlx5e_ptp_fs *ptp_fs;
-};
-
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev);
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile);
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
-
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
+#ifdef CONFIG_MLX5_EN_RXNFC
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
+#endif
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner);
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner);
+#ifdef CONFIG_MLX5_EN_ARFS
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs);
+#endif
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs);
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any);
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp);
+#ifdef CONFIG_MLX5_EN_TLS
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp);
+#endif
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy);
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs, bool vlan_strip_disable);
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs);
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs);
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
@@ -198,5 +187,18 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
__be16 proto, u16 vid);
void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+
+#define fs_err(fs, fmt, ...) \
+ mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_dbg(fs, fmt, ...) \
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn(fs, fmt, ...) \
+ mlx5_core_warn(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn_once(fs, fmt, ...) \
+ mlx5_core_warn_once(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
new file mode 100644
index 000000000000..9e276fd3c0cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5E_FS_ETHTOOL_H__
+#define __MLX5E_FS_ETHTOOL_H__
+
+struct mlx5e_priv;
+struct mlx5e_ethtool_steering;
+#ifdef CONFIG_MLX5_EN_RXNFC
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
+{ return 0; }
+static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
+#endif
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index e153d6119e02..03cb79adf912 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
#include "en/fs_tt_redirect.h"
#include "fs_core.h"
+#include "mlx5_core.h"
enum fs_udp_type {
FS_IPV4_UDP,
@@ -74,17 +74,17 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type
}
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port)
{
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
enum fs_udp_type type = tt2fs_udp(ttc_type);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_fs_udp *fs_udp;
int err;
if (type == FS_UDP_NUM_TYPES)
@@ -94,7 +94,6 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -106,31 +105,30 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n",
- __func__, fs_udp_type2str(type), err);
+ fs_err(fs, "%s: add %s rule failed, err %d\n",
+ __func__, fs_udp_type2str(type), err);
}
return rule;
}
-static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type)
+static int fs_udp_add_default_rule(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5e_flow_table *fs_udp_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
- struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -206,33 +204,36 @@ out:
return err;
}
-static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
+static int fs_udp_create_table(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_flow_table *ft;
int err;
+ ft = &fs_udp->tables[type];
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n",
- fs_udp_type2str(type), ft->t->id, ft->t->level);
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs %s table id %u level %u\n",
+ fs_udp_type2str(type), ft->t->id, ft->t->level);
err = fs_udp_create_groups(ft, type);
if (err)
goto err;
- err = fs_udp_add_default_rule(priv, type);
+ err = fs_udp_add_default_rule(fs, type);
if (err)
goto err;
@@ -253,17 +254,17 @@ static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
fs_udp->tables[i].t = NULL;
}
-static int fs_udp_disable(struct mlx5e_priv *priv)
+static int fs_udp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_udp2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_udp2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
return err;
}
}
@@ -271,30 +272,31 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int fs_udp_enable(struct mlx5e_priv *priv)
+static int fs_udp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs->udp->tables[i].t;
+ dest.ft = udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_udp2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_udp2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
return err;
}
}
return 0;
}
-void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
int i;
if (!fs_udp)
@@ -303,48 +305,50 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
if (--fs_udp->ref_cnt)
return;
- fs_udp_disable(priv);
+ fs_udp_disable(fs);
for (i = 0; i < FS_UDP_NUM_TYPES; i++)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs->udp = NULL;
+ mlx5e_fs_set_udp(fs, NULL);
}
-int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
int i, err;
- if (priv->fs->udp) {
- priv->fs->udp->ref_cnt++;
+ if (udp) {
+ udp->ref_cnt++;
return 0;
}
- priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
- if (!priv->fs->udp)
+ udp = kzalloc(sizeof(*udp), GFP_KERNEL);
+ if (!udp)
return -ENOMEM;
+ mlx5e_fs_set_udp(fs, udp);
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- err = fs_udp_create_table(priv, i);
+ err = fs_udp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = fs_udp_enable(priv);
+ err = fs_udp_enable(fs);
if (err)
goto err_destroy_tables;
- priv->fs->udp->ref_cnt = 1;
+ udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs->udp, i);
+ fs_udp_destroy_table(udp, i);
- kfree(priv->fs->udp);
- priv->fs->udp = NULL;
+ kfree(udp);
+ mlx5e_fs_set_udp(fs, NULL);
return err;
}
@@ -356,22 +360,21 @@ static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_typ
}
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type)
{
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_fs_any *fs_any;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -383,31 +386,29 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n",
- __func__, err);
+ fs_err(fs, "%s: add ANY rule failed, err %d\n",
+ __func__, err);
}
return rule;
}
-static int fs_any_add_default_rule(struct mlx5e_priv *priv)
+static int fs_any_add_default_rule(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5e_flow_table *fs_any_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
- struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
-
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, fs type=ANY, err %d\n",
- __func__, err);
+ fs_err(fs, "%s: add default rule failed, fs type=ANY, err %d\n",
+ __func__, err);
return err;
}
@@ -472,9 +473,11 @@ err:
return err;
}
-static int fs_any_create_table(struct mlx5e_priv *priv)
+static int fs_any_create_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs->any->table;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
+ struct mlx5e_flow_table *ft = &fs_any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,21 +487,21 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n",
- ft->t->id, ft->t->level);
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs ANY table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = fs_any_create_groups(ft);
if (err)
goto err;
- err = fs_any_add_default_rule(priv);
+ err = fs_any_add_default_rule(fs);
if (err)
goto err;
@@ -509,35 +512,38 @@ err:
return err;
}
-static int fs_any_disable(struct mlx5e_priv *priv)
+static int fs_any_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(ttc, MLX5_TT_ANY);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, MLX5_TT_ANY, err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
}
-static int fs_any_enable(struct mlx5e_priv *priv)
+static int fs_any_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_any *any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs->any->table.t;
+ dest.ft = any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(ttc, MLX5_TT_ANY, &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, MLX5_TT_ANY, err);
+ fs_err(fs,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
@@ -553,9 +559,9 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
fs_any->table.t = NULL;
}
-void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_fs_any *fs_any = priv->fs->any;
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
if (!fs_any)
return;
@@ -563,43 +569,45 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
if (--fs_any->ref_cnt)
return;
- fs_any_disable(priv);
+ fs_any_disable(fs);
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs->any = NULL;
+ mlx5e_fs_set_any(fs, NULL);
}
-int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
int err;
- if (priv->fs->any) {
- priv->fs->any->ref_cnt++;
+ if (fs_any) {
+ fs_any->ref_cnt++;
return 0;
}
- priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
- if (!priv->fs->any)
+ fs_any = kzalloc(sizeof(*fs_any), GFP_KERNEL);
+ if (!fs_any)
return -ENOMEM;
+ mlx5e_fs_set_any(fs, fs_any);
- err = fs_any_create_table(priv);
+ err = fs_any_create_table(fs);
if (err)
return err;
- err = fs_any_enable(priv);
+ err = fs_any_enable(fs);
if (err)
goto err_destroy_table;
- priv->fs->any->ref_cnt = 1;
+ fs_any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs->any);
+ fs_any_destroy_table(fs_any);
- kfree(priv->fs->any);
- priv->fs->any = NULL;
+ kfree(fs_any);
+ mlx5e_fs_set_any(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
index 7a70c4f38fda..5780fd7ad507 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -4,23 +4,22 @@
#ifndef __MLX5E_FS_TT_REDIRECT_H__
#define __MLX5E_FS_TT_REDIRECT_H__
-#include "en.h"
#include "en/fs.h"
void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port);
-void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
-int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs);
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs);
/* ANY traffic type redirect*/
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type);
-void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
-int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs);
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index e025040350ba..29dd3a04c154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,11 +6,212 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include <net/xdp_sock_drv.h>
-static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
+{
+ u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
+
+ return min_page_shift ? : 12;
+}
+
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+{
+ u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
+
+ /* Regular RQ uses order-0 pages, the NIC must be able to map them. */
+ if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
+ min_page_shift = req_page_shift;
+
+ return max(req_page_shift, min_page_shift);
+}
+
+enum mlx5e_mpwrq_umr_mode
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+{
+ /* Different memory management schemes use different mechanisms to map
+ * user-mode memory. The stricter guarantees we have, the faster
+ * mechanisms we use:
+ * 1. MTT - direct mapping in page granularity.
+ * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
+ * all mappings have the same size.
+ * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
+ * mappings can have different sizes.
+ */
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ bool unaligned = xsk ? xsk->unaligned : false;
+ bool oversized = false;
+
+ if (xsk) {
+ oversized = xsk->chunk_size < (1 << page_shift);
+ WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
+ }
+
+ /* XSK frame size doesn't match the UMR page size, either because the
+ * frame size is not a power of two, or it's smaller than the minimal
+ * page size supported by the firmware.
+ * It's possible to receive packets bigger than MTU in certain setups.
+ * To avoid writing over the XSK frame boundary, the top region of each
+ * stride is mapped to a garbage page, resulting in two mappings of
+ * different sizes per frame.
+ */
+ if (oversized) {
+ /* An optimization for frame sizes equal to 3 * power_of_two.
+ * 3 KSMs point to the frame, and one KSM points to the garbage
+ * page, which works faster than KLM.
+ */
+ if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3))
+ return MLX5E_MPWRQ_UMR_MODE_TRIPLE;
+
+ return MLX5E_MPWRQ_UMR_MODE_OVERSIZED;
+ }
+
+ /* XSK frames can start at arbitrary unaligned locations, but they all
+ * have the same size which is a power of two. It allows to optimize to
+ * one KSM per frame.
+ */
+ if (unaligned)
+ return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
+
+ /* XSK: frames are naturally aligned, MTT can be used.
+ * Non-XSK: Allocations happen in units of CPU pages, therefore, the
+ * mappings are naturally aligned.
+ */
+ return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
+}
+
+u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
+{
+ switch (mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return sizeof(struct mlx5_mtt);
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return sizeof(struct mlx5_ksm);
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ return sizeof(struct mlx5_klm) * 2;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ return sizeof(struct mlx5_ksm) * 4;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
+ return 0;
+}
+
+u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u16 max_wqe_size;
+
+ /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
+ max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
+ MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
+ max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+
+ return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+}
+
+u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- return params->xdp_prog || xsk;
+ u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
+ u8 pages_per_wqe;
+
+ pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
+
+ /* Two MTTs are needed to form an octword. The number of MTTs is encoded
+ * in octwords in a UMR WQE, so we need at least two to avoid mapping
+ * garbage addresses.
+ */
+ if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ pages_per_wqe = 2;
+
+ /* Sanity check for further calculations to succeed. */
+ BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
+ if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
+ return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
+
+ return pages_per_wqe;
+}
+
+u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u16 umr_wqe_sz;
+
+ umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
+ ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+
+ WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
+
+ return umr_wqe_sz;
+}
+
+u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
+ MLX5_SEND_WQE_BB);
+}
+
+u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
+
+ /* Add another page as a buffer between WQEs. This page will absorb
+ * write overflow by the hardware, when receiving packets larger than
+ * MTU. These oversize packets are dropped by the driver at a later
+ * stage.
+ */
+ return ALIGN(pages_per_wqe + 1,
+ MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
+}
+
+u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ /* Same limits apply to KSMs and KLMs. */
+ u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS,
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return MLX5E_MAX_RQ_NUM_MTTS;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return klm_limit;
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ /* Each entry is two KLMs. */
+ return klm_limit / 2;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ /* Each entry is four KSMs. */
+ return klm_limit / 4;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
+ return 0;
+}
+
+static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
+ u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
+
+ return ilog2(max_entries / mtts_per_wqe);
+}
+
+u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
+ mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
+ MLX5E_ORDER2_MAX_PACKET_MTU;
}
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
@@ -22,7 +223,7 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return xsk->headroom;
headroom = NET_IP_ALIGN;
- if (mlx5e_rx_is_xdp(params, xsk))
+ if (params->xdp_prog)
headroom += XDP_PACKET_HEADROOM;
else
headroom += MLX5_RX_HEADROOM;
@@ -30,70 +231,80 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return headroom;
}
-u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
- return linear_rq_headroom + hw_mtu;
+ return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
{
- u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
-
- /* AF_XDP doesn't build SKBs in place. */
- if (!xsk)
- frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
+ /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
+ u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
+ u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
- * special case. It can run with frames smaller than a page, as it
- * doesn't allocate pages dynamically. However, here we pretend that
- * fragments are page-sized: it allows to treat XSK frames like pages
- * by redirecting alloc and free operations to XSK rings and by using
- * the fact there are no multiple packets per "page" (which is a frame).
- * The latter is important, because frames may come in a random order,
- * and we will have trouble assemblying a real page of multiple frames.
- */
- if (mlx5e_rx_is_xdp(params, xsk))
- frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
+ return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
+}
- /* Even if we can go with a smaller fragment size, we must not put
- * multiple packets into a single frame.
+static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ bool mpwqe)
+{
+ /* XSK frames are mapped as individual pages, because frames may come in
+ * an arbitrary order from random locations in the UMEM.
*/
if (xsk)
- frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
+ return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
+
+ /* XDP in mlx5e doesn't support multiple packets per page. */
+ if (params->xdp_prog)
+ return PAGE_SIZE;
- return frag_sz;
+ return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
}
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
+ u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
- return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+ return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
+ order_base_2(linear_stride_sz);
}
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
- * than one page. For this, check both with and without xsk.
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
+ return false;
+
+ /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ * must fit into a CPU page.
*/
- u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
- mlx5e_rx_get_linear_frag_sz(params, NULL));
+ if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
+ return false;
+
+ /* XSK frames must be big enough to hold the packet data. */
+ if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
+ return false;
- return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
- linear_frag_sz <= PAGE_SIZE;
+ return true;
}
-bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
- u8 log_stride_sz, u8 log_num_strides)
+static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides,
+ u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
+ if (log_stride_sz + log_num_strides !=
+ mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
return false;
if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
@@ -113,28 +324,53 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- s8 log_num_strides;
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_num_strides;
u8 log_stride_sz;
+ u8 log_wqe_sz;
- if (!mlx5e_rx_is_linear_skb(params, xsk))
+ if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
return false;
- log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
+ log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
- return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
+ if (log_wqe_sz < log_stride_sz)
+ return false;
+
+ log_num_strides = log_wqe_sz - log_stride_sz;
+
+ return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
+ log_num_strides, page_shift,
+ umr_mode);
}
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
+
+ log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
+ page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
/* Numbers are unsigned, don't subtract to avoid underflow. */
if (params->log_rq_mtu_frames <
log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ /* Ethtool's rx_max_pending is calculated for regular RQ, that uses
+ * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
+ * frame size not equal to PAGE_SIZE.
+ * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
+ * unexpected failure.
+ */
+ if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
+ return max_log_rq_size;
+
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
@@ -164,7 +400,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk)
{
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
- return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
+ return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
@@ -173,7 +409,10 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- return MLX5_MPWRQ_LOG_WQE_SZ -
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+
+ return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
@@ -209,11 +448,11 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
- /* A MPWQE can take up to the maximum-sized WQE + all the normal
- * stop room can be taken if a new packet breaks the active
- * MPWQE session and allocates its WQEs right away.
+ /* A MPWQE can take up to the maximum cacheline-aligned WQE +
+ * all the normal stop room can be taken if a new packet breaks
+ * the active MPWQE session and allocates its WQEs right away.
*/
- stop_room += mlx5e_stop_room_for_max_wqe(mdev);
+ stop_room += mlx5e_stop_room_for_mpwqe(mdev);
return stop_room;
}
@@ -320,22 +559,46 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return false;
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
- if (params->xdp_prog) {
- /* XSK params are not considered here. If striding RQ is in use,
- * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
- * be called with the known XSK params.
- */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- return false;
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ return -EOPNOTSUPP;
+
+ if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+ return -EINVAL;
+
+ return 0;
+}
+
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ bool unaligned = xsk ? xsk->unaligned : false;
+ u16 max_mtu_pkts;
+
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ return -EOPNOTSUPP;
+
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return -EINVAL;
+
+ /* Current RQ length is too big for the given frame size, the
+ * needed number of WQEs exceeds the maximum.
+ */
+ max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
+ mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
+ if (params->log_rq_mtu_frames > max_mtu_pkts) {
+ mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
+ 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+ return -EINVAL;
}
- return true;
+ return 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
@@ -348,7 +611,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
+ BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
@@ -356,8 +619,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_CYCLIC;
}
@@ -374,9 +636,9 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
*/
if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
- mlx5e_striding_rq_possible(mdev, params) &&
+ !mlx5e_mpwrq_validate_regular(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
- !mlx5e_rx_is_linear_skb(params, NULL)))
+ !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -419,16 +681,22 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
- if (mlx5e_rx_is_linear_skb(params, xsk)) {
+ if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
int frag_stride;
- frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
- frag_stride = roundup_pow_of_two(frag_stride);
+ frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride;
info->num_frags = 1;
- info->wqe_bulk = PAGE_SIZE / frag_stride;
+
+ /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
+ * first WQE in the page is responsible for allocation of this
+ * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
+ * still not completed, the allocation must stop before k*N.
+ */
+ info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
+
goto out;
}
@@ -477,11 +745,40 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
i++;
}
info->num_frags = i;
- /* number of different wqes sharing a page */
- info->wqe_bulk = 1 + (info->num_frags % 2);
+
+ /* The last fragment of WQE with index 2*N may share the page with the
+ * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
+ * is not completed yet, WQE 2*N must not be allocated, as it's
+ * responsible for allocating a new page.
+ */
+ if (frag_size_max == PAGE_SIZE) {
+ /* No WQE can start in the middle of a page. */
+ info->wqe_index_mask = 0;
+ } else {
+ /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments,
+ * because there would be more than MLX5E_MAX_RX_FRAGS of them.
+ */
+ WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
+
+ /* Odd number of fragments allows to pack the last fragment of
+ * the previous WQE and the first fragment of the next WQE into
+ * the same page.
+ * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS
+ * is 4, the last fragment can be bigger than the rest only if
+ * it's the fourth one, so WQEs consisting of 3 fragments will
+ * always share a page.
+ * When a page is shared, WQE bulk size is 2, otherwise just 1.
+ */
+ info->wqe_index_mask = info->num_frags % 2;
+ }
out:
- info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
+ /* Bulking optimization to skip allocation until at least 8 WQEs can be
+ * allocated in a row. At the same time, never start allocation when
+ * the page is still used by older WQEs.
+ */
+ info->wqe_bulk = max_t(u8, info->wqe_index_mask + 1, 8);
+
info->log_num_frags = order_base_2(info->num_frags);
return 0;
@@ -520,7 +817,7 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
- int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
@@ -544,7 +841,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
else
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -587,12 +884,16 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
- log_wqe_num_of_strides)) {
+ log_wqe_num_of_strides,
+ page_shift, umr_mode)) {
mlx5_core_err(mdev,
- "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
- log_wqe_stride_size, log_wqe_num_of_strides);
+ "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
+ log_wqe_stride_size, log_wqe_num_of_strides,
+ umr_mode);
return -EINVAL;
}
@@ -600,7 +901,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
MLX5_SET(wq, wq, shampo_enable, true);
MLX5_SET(wq, wq, log_reservation_size,
@@ -712,13 +1013,6 @@ static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
-{
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- return MLX5_GET(wq, wq, log_wq_sz);
-}
-
/* This function calculates the maximum number of headers entries that are needed
* per WQE, the formula is based on the size of the reservations and the
* restriction we have about max packets for reservation that is equal to max
@@ -779,31 +1073,92 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
+static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 umr_wqebbs;
+
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
+
+ return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+}
+
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp)
{
- u32 wqebbs;
+ u32 wqebbs, total_pages, useful_space;
/* MLX5_WQ_TYPE_CYCLIC */
if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ /* UMR WQEs for the regular RQ. */
+ wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
* both regular RQ and XSK RQ.
- * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
- * doesn't affect its return value, as long as params->xdp_prog != NULL,
- * so we can just multiply by 2.
+ *
+ * XSK uses different values of page_shift, and the total number of UMR
+ * WQEBBs depends on it. This dependency is complex and not monotonic,
+ * especially taking into consideration that some of the parameters come
+ * from capabilities. Hence, we have to try all valid values of XSK
+ * frame size (and page_shift) to find the maximum.
*/
- if (params->xdp_prog)
- wqebbs *= 2;
+ if (params->xdp_prog) {
+ u32 max_xsk_wqebbs = 0;
+ u8 frame_shift;
+
+ for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
+ frame_shift <= PAGE_SHIFT; frame_shift++) {
+ /* The headroom doesn't affect the calculation. */
+ struct mlx5e_xsk_param xsk = {
+ .chunk_size = 1 << frame_shift,
+ .unaligned = false,
+ };
+
+ /* XSK aligned mode. */
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is a power of two. */
+ xsk.unaligned = true;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is not equal to stride size. */
+ xsk.chunk_size -= 1;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is a triple power of two. */
+ xsk.chunk_size = (1 << frame_shift) / 4 * 3;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+ }
+
+ wqebbs += max_xsk_wqebbs;
+ }
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+ /* UMR WQEs don't cross the page boundary, they are padded with NOPs.
+ * This padding is always smaller than the max WQE size. That gives us
+ * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
+ * per page. The number of pages is estimated as the total size of WQEs
+ * divided by the useful space in page, rounding up. If some WQEs don't
+ * fully fit into the useful space, they can occupy part of the padding,
+ * which proves this estimation to be correct (reserve enough space).
+ */
+ useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
+ total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
+ wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
+
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
@@ -857,7 +1212,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
+ param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index f5c46e78eebc..034debd140bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -9,6 +9,7 @@
struct mlx5e_xsk_param {
u16 headroom;
u16 chunk_size;
+ bool unaligned;
};
struct mlx5e_cq_param {
@@ -52,37 +53,26 @@ struct mlx5e_create_sq_param {
u8 min_inline_mode;
};
-static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
- u16 qid,
- enum mlx5e_rq_group group,
- u16 *ix)
-{
- int nch = params->num_channels;
- int ch = qid - nch * group;
-
- if (ch < 0 || ch >= nch)
- return false;
-
- *ix = ch;
- return true;
-}
-
-static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
- u16 qid,
- u16 *ix,
- enum mlx5e_rq_group *group)
-{
- u16 nch = params->num_channels;
-
- *ix = qid % nch;
- *group = qid / nch;
-}
-
-static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
- struct mlx5e_params *params, u64 qid)
-{
- return qid < params->num_channels * profile->rq_groups;
-}
+/* Striding RQ dynamic parameters */
+
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+enum mlx5e_mpwrq_umr_mode
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode);
+u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
/* Parameter calculations */
@@ -92,25 +82,23 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
- u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 903de88bab53..8469e9c38670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -622,37 +622,39 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
}
-static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
+static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
if (!ptp_fs->valid)
return;
mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
- mlx5e_fs_tt_redirect_any_destroy(priv);
+ mlx5e_fs_tt_redirect_any_destroy(fs);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
- mlx5e_fs_tt_redirect_udp_destroy(priv);
+ mlx5e_fs_tt_redirect_udp_destroy(fs);
ptp_fs->valid = false;
}
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
+ struct mlx5e_flow_steering *fs = priv->fs;
struct mlx5_flow_handle *rule;
+ struct mlx5e_ptp_fs *ptp_fs;
int err;
+ ptp_fs = mlx5e_fs_get_ptp(fs);
if (ptp_fs->valid)
return 0;
- err = mlx5e_fs_tt_redirect_udp_create(priv);
+ err = mlx5e_fs_tt_redirect_udp_create(fs);
if (err)
goto out_free;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -660,7 +662,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v4_rule = rule;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -668,11 +670,11 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v6_rule = rule;
- err = mlx5e_fs_tt_redirect_any_create(priv);
+ err = mlx5e_fs_tt_redirect_any_create(fs);
if (err)
goto out_destroy_udp_v6_rule;
- rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
+ rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto out_destroy_fs_any;
@@ -683,13 +685,13 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
return 0;
out_destroy_fs_any:
- mlx5e_fs_tt_redirect_any_destroy(priv);
+ mlx5e_fs_tt_redirect_any_destroy(fs);
out_destroy_udp_v6_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
out_destroy_udp_v4_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
out_destroy_fs_udp:
- mlx5e_fs_tt_redirect_udp_destroy(priv);
+ mlx5e_fs_tt_redirect_udp_destroy(fs);
out_free:
return err;
}
@@ -723,7 +725,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
+ netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -797,29 +799,31 @@ int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
return 0;
}
-int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile)
{
struct mlx5e_ptp_fs *ptp_fs;
- if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
+ if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
if (!ptp_fs)
return -ENOMEM;
+ mlx5e_fs_set_ptp(fs, ptp_fs);
- priv->fs->ptp_fs = ptp_fs;
return 0;
}
-void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
+void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
- if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
+ if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return;
- mlx5e_ptp_rx_unset_fs(priv);
+ mlx5e_ptp_rx_unset_fs(fs);
kfree(ptp_fs);
}
@@ -845,6 +849,6 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
return -EINVAL;
}
- mlx5e_ptp_rx_unset_fs(priv);
+ mlx5e_ptp_rx_unset_fs(priv->fs);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 92dbbec472ec..5bce554e131a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -74,8 +74,10 @@ void mlx5e_ptp_close(struct mlx5e_ptp *c);
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
-int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
-void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
+void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index fc366e66d0b0..5f6f95ad6888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -134,38 +134,17 @@ out:
return err;
}
-static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
-{
- struct net_device *dev = rq->netdev;
- int err;
-
- err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
- if (err) {
- netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
- return err;
- }
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
- if (err) {
- netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
{
struct mlx5e_rq *rq = ctx;
int err;
mlx5e_deactivate_rq(rq);
- mlx5e_free_rx_descs(rq);
-
- err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
+ err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR);
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
if (err)
- goto out;
+ return err;
- clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
mlx5e_activate_rq(rq);
rq->stats->recover++;
if (rq->channel)
@@ -173,9 +152,6 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
else
mlx5e_trigger_napi_sched(rq->cq.napi);
return 0;
-out:
- clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
- return err;
}
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 24c32f73040a..e1095bc36543 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -24,8 +24,6 @@ struct mlx5e_rx_res {
struct {
struct mlx5e_rqt direct_rqt;
struct mlx5e_tir direct_tir;
- struct mlx5e_rqt xsk_rqt;
- struct mlx5e_tir xsk_tir;
} *channels;
struct {
@@ -320,48 +318,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
mlx5e_tir_builder_clear(builder);
}
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- goto out;
-
- for (ix = 0; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
- res->mdev, false, res->drop_rqn);
- if (err) {
- mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
- err, ix);
- goto err_destroy_xsk_rqts;
- }
- }
-
- for (ix = 0; ix < res->max_nch; ix++) {
- mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
- mlx5e_tir_builder_build_direct(builder);
-
- err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
- if (err) {
- mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
- err, ix);
- goto err_destroy_xsk_tirs;
- }
-
- mlx5e_tir_builder_clear(builder);
- }
-
goto out;
-err_destroy_xsk_tirs:
- while (--ix >= 0)
- mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
-
- ix = res->max_nch;
-err_destroy_xsk_rqts:
- while (--ix >= 0)
- mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
-
- ix = res->max_nch;
err_destroy_direct_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
@@ -420,12 +378,6 @@ static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
for (ix = 0; ix < res->max_nch; ix++) {
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
-
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
-
- mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
- mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
}
kvfree(res->channels);
@@ -491,13 +443,6 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
}
-u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
-{
- WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
-
- return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
-}
-
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
{
struct mlx5e_rss *rss = res->rss[0];
@@ -523,56 +468,53 @@ static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int i
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
-void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
+ struct mlx5e_channels *chs,
+ unsigned int ix)
{
- unsigned int nch, ix;
+ u32 rqn = res->rss_rqns[ix];
int err;
- nch = mlx5e_channels_get_num(chs);
-
- for (ix = 0; ix < chs->num; ix++)
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
- res->rss_nch = chs->num;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ rqn, ix, err);
+}
- mlx5e_rx_res_rss_enable(res);
+static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
+ unsigned int ix)
+{
+ int err;
- for (ix = 0; ix < nch; ix++) {
- u32 rqn;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+}
- mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- rqn, ix, err);
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+{
+ unsigned int nch, ix;
+ int err;
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
+ nch = mlx5e_channels_get_num(chs);
- if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
- rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- rqn, ix, err);
+ for (ix = 0; ix < chs->num; ix++) {
+ if (mlx5e_channels_is_xsk(chs, ix))
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ else
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
}
- for (ix = nch; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- res->drop_rqn, ix, err);
+ res->rss_nch = chs->num;
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
+ mlx5e_rx_res_rss_enable(res);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- }
+ for (ix = 0; ix < nch; ix++)
+ mlx5e_rx_res_channel_activate_direct(res, chs, ix);
+ for (ix = nch; ix < res->max_nch; ix++)
+ mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
@@ -595,22 +537,8 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
- for (ix = 0; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- res->drop_rqn, ix, err);
-
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
-
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- }
+ for (ix = 0; ix < res->max_nch; ix++)
+ mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
@@ -621,33 +549,17 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
}
}
-int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
- unsigned int ix)
+void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix, bool xsk)
{
- u32 rqn;
- int err;
-
- if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
- return -EINVAL;
-
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- rqn, ix, err);
- return err;
-}
+ if (xsk)
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ else
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
-int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
-{
- int err;
+ mlx5e_rx_res_rss_enable(res);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- return err;
+ mlx5e_rx_res_channel_activate_direct(res, chs, ix);
}
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index b39b20a720e0..5d5f64fab60f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -17,8 +17,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
- MLX5E_RX_RES_FEATURE_XSK = BIT(1),
- MLX5E_RX_RES_FEATURE_PTP = BIT(2),
+ MLX5E_RX_RES_FEATURE_PTP = BIT(1),
};
/* Setup */
@@ -32,7 +31,6 @@ void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
@@ -40,9 +38,8 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
-int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
- unsigned int ix);
-int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix);
+void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix, bool xsk);
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 69949ab830b6..25174f68613e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -12,6 +12,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
@@ -21,7 +22,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 37522352e4b2..c8e5ca65bb6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -79,6 +79,10 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
+ err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ if (err)
+ return err;
+
err = fill_meter_params_from_act(act, &params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 11f2a7fb72a9..201ac7dd338f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -147,7 +147,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
+ netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -230,12 +230,12 @@ static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
- err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ err = mlx5e_add_vlan_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
- err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ err = mlx5e_add_mac_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
@@ -256,10 +256,10 @@ static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
{
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
- mlx5e_remove_vlan_trap(priv);
+ mlx5e_remove_vlan_trap(priv->fs);
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
- mlx5e_remove_mac_trap(priv);
+ mlx5e_remove_mac_trap(priv->fs);
break;
default:
netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index c208ea307bff..4456ad5cedf1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -439,16 +439,24 @@ static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
}
-static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
{
- u16 room = sq->reserved_room;
+ u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
- WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
- "wqe_size %u is greater than max SQ WQEBBs %u",
- wqe_size, sq->max_sq_wqebbs);
+ return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
+}
- room += MLX5E_STOP_ROOM(wqe_size);
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+ u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
+
+static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
+{
+ size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe);
+
+ return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 8f321a6c0809..4685c652c97e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -333,7 +333,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 287e17911251..bc2d9034af5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -122,7 +122,7 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 2c520394aa1d..ebada0c5af3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -72,6 +72,7 @@ void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *x
{
xsk->headroom = xsk_pool_get_headroom(pool);
xsk->chunk_size = xsk_pool_get_chunk_size(pool);
+ xsk->unaligned = pool->unaligned;
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
@@ -98,6 +99,15 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
mlx5e_build_xsk_param(pool, &xsk);
+ if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
+ const char *recommendation = is_power_of_2(xsk.chunk_size) ?
+ "Upgrade firmware" : "Disable striding RQ";
+
+ mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n",
+ xsk.chunk_size, recommendation);
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
/* XSK objects will be created on open. */
goto validate_closed;
@@ -123,15 +133,12 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
* any Fill Ring entries at the setup stage.
*/
- err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix);
- if (unlikely(err))
- goto err_deactivate;
+ mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true);
- return 0;
+ mlx5e_deactivate_rq(&c->rq);
+ mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
-err_deactivate:
- mlx5e_deactivate_xsk(c);
- mlx5e_close_xsk(c);
+ return 0;
err_remove_pool:
mlx5e_xsk_remove_pool(&priv->xsk, ix);
@@ -170,7 +177,13 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
goto remove_pool;
c = priv->channels.c[ix];
- mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix);
+
+ mlx5e_activate_rq(&c->rq);
+ mlx5e_trigger_napi_icosq(c);
+ mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
+
+ mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
+
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
@@ -208,11 +221,10 @@ int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
- u16 ix;
- if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ if (unlikely(qid >= params->num_channels))
return -EINVAL;
- return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
- mlx5e_xsk_disable_pool(priv, ix);
+ return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
+ mlx5e_xsk_disable_pool(priv, qid);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 9a1553598a7c..c91b54d9ff27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -8,18 +8,221 @@
/* RX data path */
-static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
- u32 cqe_bcnt)
+int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5_wq_cyc *wq = &icosq->wq;
+ struct mlx5e_umr_wqe *umr_wqe;
+ int batch, i;
+ u32 offset; /* 17-bit value with MTT. */
+ u16 pi;
+
+ if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe)))
+ goto err;
+
+ BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
+ rq->mpwqe.pages_per_wqe);
+
+ /* If batch < pages_per_wqe, either:
+ * 1. Some (or all) descriptors were invalid.
+ * 2. dma_need_sync is true, and it fell back to allocating one frame.
+ * In either case, try to continue allocating frames one by one, until
+ * the first error, which will mean there are no more valid descriptors.
+ */
+ for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
+ wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!wi->alloc_units[batch].xsk))
+ goto err_reuse_batch;
+ }
+
+ pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs);
+ umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
+
+ if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(addr | MLX5_EN_WR),
+ };
+ }
+ } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ };
+ }
+ } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
+ u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 1] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr + mapping_size),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 2] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr + mapping_size * 2),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 3] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(rq->wqe_overflow.addr),
+ };
+ }
+ } else {
+ __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
+ rq->xsk_pool->chunk_size);
+ __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ .bcount = frame_size,
+ };
+ umr_wqe->inline_klms[(i << 1) + 1] = (struct mlx5_klm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(rq->wqe_overflow.addr),
+ .bcount = pad_size,
+ };
+ }
+ }
+
+ bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
+ wi->consumed_strides = 0;
+
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
+
+ /* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
+ offset = ix * rq->mpwqe.mtts_per_wqe;
+ if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ offset = offset * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
+ else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_OVERSIZED))
+ offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
+ else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
+ offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+
+ icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = rq->mpwqe.umr_wqebbs,
+ .umr.rq = rq,
+ };
+
+ icosq->pc += rq->mpwqe.umr_wqebbs;
+
+ icosq->doorbell_cseg = &umr_wqe->ctrl;
+
+ return 0;
+
+err_reuse_batch:
+ while (--batch >= 0)
+ xsk_buff_free(wi->alloc_units[batch].xsk);
+
+err:
+ rq->stats->buff_alloc_err++;
+ return -ENOMEM;
+}
+
+int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ struct xdp_buff **buffs;
+ u32 contig, alloc;
+ int i;
+
+ /* mlx5e_init_frags_partition creates a 1:1 mapping between
+ * rq->wqe.frags and rq->wqe.alloc_units, which allows us to
+ * allocate XDP buffers straight into alloc_units.
+ */
+ BUILD_BUG_ON(sizeof(rq->wqe.alloc_units[0]) !=
+ sizeof(rq->wqe.alloc_units[0].xsk));
+ buffs = (struct xdp_buff **)rq->wqe.alloc_units;
+ contig = mlx5_wq_cyc_get_size(wq) - ix;
+ if (wqe_bulk <= contig) {
+ alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk);
+ } else {
+ alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig);
+ if (likely(alloc == contig))
+ alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig);
+ }
+
+ for (i = 0; i < alloc; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return alloc;
+}
+
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ int i;
+
+ for (i = 0; i < wqe_bulk; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ frag->au->xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!frag->au->xsk))
+ return i;
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return wqe_bulk;
+}
+
+static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
+{
+ u32 totallen = xdp->data_end - xdp->data_meta;
+ u32 metalen = xdp->data - xdp->data_meta;
struct sk_buff *skb;
- skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
+ skb = napi_alloc_skb(rq->cq.napi, totallen);
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
return NULL;
}
- skb_put_data(skb, data, cqe_bcnt);
+ skb_put_data(skb, xdp->data_meta, totallen);
+
+ if (metalen) {
+ skb_metadata_set(skb, metalen);
+ __skb_pull(skb, metalen);
+ }
return skb;
}
@@ -30,7 +233,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
+ struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -46,8 +249,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xdp->data_end = xdp->data + cqe_bcnt;
- xdp_set_data_meta_invalid(xdp);
+ xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -76,14 +278,14 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+ return mlx5e_xsk_construct_skb(rq, xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->di->xsk;
+ struct xdp_buff *xdp = wi->au->xsk;
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
@@ -93,8 +295,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xdp->data_end = xdp->data + cqe_bcnt;
- xdp_set_data_meta_invalid(xdp);
+ xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -103,8 +304,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
- * will be handled by mlx5e_put_rx_frag.
+ * will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+ return mlx5e_xsk_construct_skb(rq, xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index cc18d97d8ee0..087c943bd8e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -5,12 +5,12 @@
#define __MLX5_EN_XSK_RX_H__
#include "en.h"
-#include <net/xdp_sock_drv.h>
-
-#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
/* RX data path */
+int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
u16 cqe_bcnt,
@@ -20,46 +20,4 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
-static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
-retry:
- dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
- if (!dma_info->xsk)
- return -ENOMEM;
-
- /* Store the DMA address without headroom. In striding RQ case, we just
- * provide pages for UMR, and headroom is counted at the setup stage
- * when creating a WQE. In non-striding RQ case, headroom is accounted
- * in mlx5e_alloc_rx_wqe.
- */
- dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
-
- /* MTT page mapping has alignment requirements. If they are not
- * satisfied, leak the descriptor so that it won't come again, and try
- * to allocate a new one.
- */
- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
- xsk_buff_discard(dma_info->xsk);
- goto retry;
- }
- }
-
- return 0;
-}
-
-static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
-{
- if (!xsk_uses_need_wakeup(rq->xsk_pool))
- return alloc_err;
-
- if (unlikely(alloc_err))
- xsk_set_rx_need_wakeup(rq->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rq->xsk_pool);
-
- return false;
-}
-
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 98ed9ef3a6bd..ff03c43833bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -5,24 +5,19 @@
#include "en/params.h"
#include "en/txrx.h"
#include "en/health.h"
+#include <net/xdp_sock_drv.h>
-/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
- * change unexpectedly, and mlx5e has a minimum valid stride size for striding
- * RQ, keep this check in the driver.
+/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
+ * stride size of striding RQ.
*/
-#define MLX5E_MIN_XSK_CHUNK_SIZE 2048
+#define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE)
bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE ||
- xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
- return false;
-
- /* Current MTU and XSK headroom don't allow packets to fit the frames. */
- if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size)
+ if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear
@@ -30,9 +25,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
*/
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
- return mlx5e_rx_is_linear_skb(params, xsk);
+ return mlx5e_rx_is_linear_skb(mdev, params, xsk);
}
}
@@ -71,7 +66,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->xsk_pool = pool;
rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
- rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
+ rq_xdp_ix = c->ix;
err = mlx5e_rq_set_handlers(rq, params, xsk);
if (err)
return err;
@@ -159,7 +154,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 4902ef74fedf..367a9505ca4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -12,18 +12,14 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_channel *c;
- u16 ix;
if (unlikely(!mlx5e_xdp_is_active(priv)))
return -ENETDOWN;
- if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ if (unlikely(qid >= params->num_channels))
return -EINVAL;
- c = priv->channels.c[ix];
-
- if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
- return -EINVAL;
+ c = priv->channels.c[qid];
if (!napi_if_scheduled_mark_missed(&c->napi)) {
/* To avoid WQE overrun, don't post a NOP if async_icosq is not
@@ -36,9 +32,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_napi_icosq(c);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index a05085035f23..9c505158b975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -5,7 +5,6 @@
#define __MLX5_EN_XSK_TX_H__
#include "en.h"
-#include <net/xdp_sock_drv.h>
/* TX data path */
@@ -13,15 +12,4 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
-static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
-{
- if (!xsk_uses_need_wakeup(sq->xsk_pool))
- return;
-
- if (sq->pc != sq->cc)
- xsk_clear_tx_need_wakeup(sq->xsk_pool);
- else
- xsk_set_tx_need_wakeup(sq->xsk_pool);
-}
-
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1839f1ab1ddd..07187028f0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -39,6 +39,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
+#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
@@ -137,6 +138,15 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
+ return false;
+ }
+#endif
+
return true;
}
@@ -163,6 +173,11 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
+ mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
+#endif
+
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 20a4f1e585af..285d32d2fd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
+#include <mlx5_core.h>
#include "en_accel/fs_tcp.h"
#include "fs_core.h"
@@ -71,13 +71,13 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
struct mlx5e_flow_table *ft = NULL;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
@@ -86,19 +86,17 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs->accel_tcp;
-
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (sk->sk_family) {
case AF_INET:
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
- mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
- &inet_sk(sk)->inet_rcv_saddr,
- inet_sk(sk)->inet_sport,
- &inet_sk(sk)->inet_daddr,
- inet_sk(sk)->inet_dport);
+ fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
+ &inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ &inet_sk(sk)->inet_daddr,
+ inet_sk(sk)->inet_dport);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
@@ -140,34 +138,32 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
- PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
out:
kvfree(spec);
return flow;
}
-static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
+static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
enum accel_fs_tcp_type type)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_flow_table *accel_fs_t;
struct mlx5_flow_destination dest;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, accel_fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -265,9 +261,11 @@ out:
return err;
}
-static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
+static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,21 +275,21 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
- ft->t->id, ft->t->level);
+ fs_dbg(fs, "Created fs accel table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = accel_fs_tcp_create_groups(ft, type);
if (err)
goto err;
- err = accel_fs_tcp_add_default_rule(priv, type);
+ err = accel_fs_tcp_add_default_rule(fs, type);
if (err)
goto err;
@@ -301,17 +299,18 @@ err:
return err;
}
-static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
@@ -319,32 +318,32 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs->accel_tcp->tables[i].t;
+ dest.ft = accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
return 0;
}
-static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
+static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
{
- struct mlx5e_accel_fs_tcp *fs_tcp;
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
- fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -353,40 +352,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
fs_tcp->tables[i].t = NULL;
}
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
int i;
- if (!priv->fs->accel_tcp)
+ if (!accel_tcp)
return;
- accel_fs_tcp_disable(priv);
+ accel_fs_tcp_disable(fs);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
- accel_fs_tcp_destroy_table(priv, i);
+ accel_fs_tcp_destroy_table(fs, i);
- kfree(priv->fs->accel_tcp);
- priv->fs->accel_tcp = NULL;
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
}
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp;
int i, err;
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
- if (!priv->fs->accel_tcp)
+ accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
+ if (!accel_tcp)
return -ENOMEM;
+ mlx5e_fs_set_accel_tcp(fs, accel_tcp);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- err = accel_fs_tcp_create_table(priv, i);
+ err = accel_fs_tcp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = accel_fs_tcp_enable(priv);
+ err = accel_fs_tcp_enable(fs);
if (err)
goto err_destroy_tables;
@@ -394,9 +396,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_tables:
while (--i >= 0)
- accel_fs_tcp_destroy_table(priv, i);
-
- kfree(priv->fs->accel_tcp);
- priv->fs->accel_tcp = NULL;
+ accel_fs_tcp_destroy_table(fs, i);
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index 589235824543..a032bff482a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -4,19 +4,19 @@
#ifndef __MLX5E_ACCEL_FS_TCP_H__
#define __MLX5E_ACCEL_FS_TCP_H__
-#include "en.h"
+#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv);
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv);
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
-static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {}
-static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
+static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
+static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f8113fd23265..b859e4a4c744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -174,6 +174,8 @@ static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -182,15 +184,14 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
-
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +206,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -230,6 +231,7 @@ err_add:
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
@@ -249,7 +251,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -260,6 +262,7 @@ out:
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -271,7 +274,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
@@ -385,7 +388,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
0xff, 16);
}
- flow_act->ipsec_obj_id = ipsec_obj_id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act->crypto.obj_id = ipsec_obj_id;
flow_act->flags |= FLOW_ACT_NO_APPEND;
}
@@ -441,7 +445,7 @@ static int rx_add_rule(struct mlx5e_priv *priv,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
flow_act.modify_hdr = modify_hdr;
@@ -497,7 +501,7 @@ static int tx_add_rule(struct mlx5e_priv *priv,
MLX5_ETH_WQE_FT_META_IPSEC);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -573,7 +577,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
if (!ns)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 0ae4e12ce528..1878a70b9031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,9 @@
#include "en.h"
#include "en/txrx.h"
-/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
-#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
@@ -77,11 +77,6 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
-static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
-{
- return ipsec_st->x;
-}
-
static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
{
return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 30a70d139046..da2184c94203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -92,6 +92,24 @@ static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_resync = mlx5e_ktls_resync,
};
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ /* Check the possibility to post the required ICOSQ WQEs. */
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
+ return false;
+
+ return true;
+}
+
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
@@ -118,9 +136,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
if (enable)
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
else
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
mutex_unlock(&priv->state_lock);
return err;
@@ -138,7 +156,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
if (err) {
destroy_workqueue(priv->tls->rx_wq);
return err;
@@ -154,7 +172,7 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
destroy_workqueue(priv->tls->rx_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 948400dee525..1c35045e41fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -25,7 +25,8 @@ static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ return (MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128) ||
+ MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256));
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
@@ -36,6 +37,10 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
+ case TLS_CIPHER_AES_GCM_256:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256);
+ break;
}
return false;
@@ -56,10 +61,7 @@ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
-}
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 27483aa7be8a..3e54834747ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx {
};
struct mlx5e_ktls_offload_context_rx {
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct accel_rule rule;
struct sock *sk;
struct mlx5e_rq_stats *rq_stats;
@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
- rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
+ rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk,
mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
@@ -362,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
bool trigger_poll;
@@ -373,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
spin_lock_bh(&ktls_resync->lock);
spin_lock_bh(&priv_rx->lock);
- memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+ switch (priv_rx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &priv_rx->crypto_info.crypto_info_128;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &priv_rx->crypto_info.crypto_info_256;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_rx->crypto_info.crypto_info.cipher_type);
+ spin_unlock_bh(&priv_rx->lock);
+ spin_unlock_bh(&ktls_resync->lock);
+ return;
+ }
+
if (list_empty(&priv_rx->list)) {
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
@@ -461,6 +484,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct net_device *netdev = rq->netdev;
+ struct net *net = dev_net(netdev);
struct sock *sk = NULL;
unsigned int datalen;
struct iphdr *iph;
@@ -475,7 +499,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
@@ -485,7 +509,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
@@ -603,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
- priv_rx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_rx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_rx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 0aef69527226..2e0335246967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -93,7 +93,7 @@ struct mlx5e_ktls_offload_context_tx {
bool ctx_post_pending;
/* control / resync */
struct list_head list_node; /* member of the pool */
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats;
@@ -246,7 +246,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
- struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_ktls_offload_context_tx *obj, *n;
struct mlx5e_async_ctx *bulk_async;
int i;
@@ -255,7 +255,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
return;
i = 0;
- list_for_each_entry(obj, list, list_node) {
+ list_for_each_entry_safe(obj, n, list, list_node) {
mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
i++;
}
@@ -485,8 +485,20 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_key;
priv_tx->expected_seq = start_offload_tcp_sn;
- priv_tx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_tx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_tx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@@ -671,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
- rec_seq = info->rec_seq;
- rec_seq_sz = sizeof(info->rec_seq);
+ switch (priv_tx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_tx->crypto_info.crypto_info.cipher_type);
+ return;
+ }
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
if (!skip_static_post)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
index ac29aeb8af49..570a912dd6fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
@@ -21,7 +21,7 @@ enum {
static void
fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 key_id, u32 resync_tcp_sn)
{
char *initial_rn, *gcm_iv;
@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
ctx = params->ctx;
- EXTRACT_INFO_FIELDS;
+ switch (crypto_info->crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &crypto_info->crypto_info_128;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &crypto_info->crypto_info_256;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->crypto_info.cipher_type);
+ return;
+ }
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction)
{
@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
- fill_static_params(&wqe->params, info, key_id, resync_tcp_sn);
+ fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index 0dc715c4c10d..3d79cd379890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -27,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);
+union mlx5e_crypto_info {
+ struct tls_crypto_info crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 crypto_info_128;
+ struct tls12_crypto_info_aes_gcm_256 crypto_info_256;
+};
+
struct mlx5e_set_tls_static_params_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
@@ -72,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe {
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction);
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
new file mode 100644
index 000000000000..5da746da898d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -0,0 +1,1870 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/xarray.h>
+
+#include "en.h"
+#include "lib/aso.h"
+#include "lib/mlx5.h"
+#include "en_accel/macsec.h"
+#include "en_accel/macsec_fs.h"
+
+#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
+#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
+
+enum mlx5_macsec_aso_event_arm {
+ MLX5E_ASO_EPN_ARM = BIT(0),
+};
+
+enum {
+ MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
+struct mlx5e_macsec_handle {
+ struct mlx5e_macsec *macsec;
+ u32 obj_id;
+ u8 idx;
+};
+
+enum {
+ MLX5_MACSEC_EPN,
+};
+
+struct mlx5e_macsec_aso_out {
+ u8 event_arm;
+ u32 mode_param;
+};
+
+struct mlx5e_macsec_aso_in {
+ u8 mode;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_epn_state {
+ u32 epn_msb;
+ u8 epn_enabled;
+ u8 overlap;
+};
+
+struct mlx5e_macsec_async_work {
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ struct work_struct work;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_sa {
+ bool active;
+ u8 assoc_num;
+ u32 macsec_obj_id;
+ u32 enc_key_id;
+ u32 next_pn;
+ sci_t sci;
+ salt_t salt;
+
+ struct rhash_head hash;
+ u32 fs_id;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct rcu_head rcu_head;
+ struct mlx5e_macsec_epn_state epn_state;
+};
+
+struct mlx5e_macsec_rx_sc;
+struct mlx5e_macsec_rx_sc_xarray_element {
+ u32 fs_id;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+};
+
+struct mlx5e_macsec_rx_sc {
+ bool active;
+ sci_t sci;
+ struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
+ struct list_head rx_sc_list_element;
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct metadata_dst *md_dst;
+ struct rcu_head rcu_head;
+};
+
+struct mlx5e_macsec_umr {
+ dma_addr_t dma_addr;
+ u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ u32 mkey;
+};
+
+struct mlx5e_macsec_aso {
+ /* ASO */
+ struct mlx5_aso *maso;
+ /* Protects macsec ASO */
+ struct mutex aso_lock;
+ /* UMR */
+ struct mlx5e_macsec_umr *umr;
+
+ u32 pdn;
+};
+
+static const struct rhashtable_params rhash_sci = {
+ .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
+ .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
+ .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+struct mlx5e_macsec_device {
+ const struct net_device *netdev;
+ struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
+ struct list_head macsec_rx_sc_list_head;
+ unsigned char *dev_addr;
+ struct list_head macsec_device_list_element;
+};
+
+struct mlx5e_macsec {
+ struct list_head macsec_device_list_head;
+ int num_of_devices;
+ struct mlx5e_macsec_fs *macsec_fs;
+ struct mutex lock; /* Protects mlx5e_macsec internal contexts */
+
+ /* Tx sci -> fs id mapping handling */
+ struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
+
+ /* Rx fs_id -> rx_sc mapping */
+ struct xarray sc_xarray;
+
+ struct mlx5_core_dev *mdev;
+
+ /* Stats manage */
+ struct mlx5e_macsec_stats stats;
+
+ /* ASO */
+ struct mlx5e_macsec_aso aso;
+
+ struct notifier_block nb;
+ struct workqueue_struct *wq;
+};
+
+struct mlx5_macsec_obj_attrs {
+ u32 aso_pdn;
+ u32 next_pn;
+ __be64 sci;
+ u32 enc_key_id;
+ bool encrypt;
+ struct mlx5e_macsec_epn_state epn_state;
+ salt_t salt;
+ __be32 ssci;
+ bool replay_protect;
+ u32 replay_window;
+};
+
+struct mlx5_aso_ctrl_param {
+ u8 data_mask_mode;
+ u8 condition_0_operand;
+ u8 condition_1_operand;
+ u8 condition_0_offset;
+ u8 condition_1_offset;
+ u8 data_offset;
+ u8 condition_operand;
+ u32 condition_0_data;
+ u32 condition_0_mask;
+ u32 condition_1_data;
+ u32 condition_1_mask;
+ u64 bitwise_data;
+ u64 data_mask;
+};
+
+static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr;
+ struct device *dma_device;
+ dma_addr_t dma_addr;
+ int err;
+
+ umr = kzalloc(sizeof(*umr), GFP_KERNEL);
+ if (!umr) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ dma_device = &mdev->pdev->dev;
+ dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(dma_device, dma_addr);
+ if (err) {
+ mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
+ goto out_dma;
+ }
+
+ err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
+ goto out_mkey;
+ }
+
+ umr->dma_addr = dma_addr;
+
+ aso->umr = umr;
+
+ return 0;
+
+out_mkey:
+ dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+out_dma:
+ kfree(umr);
+ return err;
+}
+
+static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr = aso->umr;
+
+ mlx5_core_destroy_mkey(mdev, umr->mkey);
+ dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ kfree(umr);
+}
+
+static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
+{
+ u8 window_sz;
+
+ if (!attrs->replay_protect)
+ return 0;
+
+ switch (attrs->replay_window) {
+ case 256:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ case 128:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 64:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 32:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
+
+ return 0;
+}
+
+static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_obj_attrs *attrs,
+ bool is_tx,
+ u32 *macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *aso_ctx;
+ void *obj;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
+ aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
+
+ MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
+ MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
+ MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
+ MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
+ MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
+
+ /* Epn */
+ if (attrs->epn_state.epn_enabled) {
+ void *salt_p;
+ int i;
+
+ MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
+ salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
+ for (i = 0; i < 3 ; i++)
+ memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
+ } else {
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
+ }
+
+ MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
+ if (is_tx) {
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
+ } else {
+ err = macsec_set_replay_protection(attrs, aso_ctx);
+ if (err)
+ return err;
+ }
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to create MACsec object (err = %d)\n",
+ err);
+ return err;
+ }
+
+ *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa,
+ bool is_tx)
+{
+ int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ if ((is_tx) && sa->fs_id) {
+ /* Make sure ongoing datapath readers sees a valid SA */
+ rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ sa->fs_id = 0;
+ }
+
+ if (!sa->macsec_rule)
+ return;
+
+ mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+ sa->macsec_rule = NULL;
+}
+
+static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa,
+ bool encrypt,
+ bool is_tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_macsec_obj_attrs obj_attrs;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct macsec_key *key;
+ int err;
+
+ obj_attrs.next_pn = sa->next_pn;
+ obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
+ obj_attrs.enc_key_id = sa->enc_key_id;
+ obj_attrs.encrypt = encrypt;
+ obj_attrs.aso_pdn = macsec->aso.pdn;
+ obj_attrs.epn_state = sa->epn_state;
+
+ if (is_tx) {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
+ key = &ctx->sa.tx_sa->key;
+ } else {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+ key = &ctx->sa.rx_sa->key;
+ }
+
+ memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+ obj_attrs.replay_window = ctx->secy->replay_window;
+ obj_attrs.replay_protect = ctx->secy->replay_protect;
+
+ err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
+ if (!macsec_rule) {
+ err = -ENOMEM;
+ goto destroy_macsec_object;
+ }
+
+ sa->macsec_rule = macsec_rule;
+
+ if (is_tx) {
+ err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ if (err)
+ goto destroy_macsec_object_and_rule;
+ }
+
+ return 0;
+
+destroy_macsec_object_and_rule:
+ mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
+destroy_macsec_object:
+ mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
+
+ return err;
+}
+
+static struct mlx5e_macsec_rx_sc *
+mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
+{
+ struct mlx5e_macsec_rx_sc *iter;
+
+ list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
+ if (iter->sci == sci)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *rx_sa,
+ bool active)
+{
+ struct mlx5_core_dev *mdev = macsec->mdev;
+ struct mlx5_macsec_obj_attrs attrs;
+ int err = 0;
+
+ if (rx_sa->active != active)
+ return 0;
+
+ rx_sa->active = active;
+ if (!active) {
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ return 0;
+ }
+
+ attrs.sci = rx_sa->sci;
+ attrs.enc_key_id = rx_sa->enc_key_id;
+ err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
+{
+ const struct net_device *netdev = ctx->netdev;
+ const struct macsec_secy *secy = ctx->secy;
+
+ if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when validate_frame is in strict mode\n");
+ return false;
+ }
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
+ netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
+ MACSEC_DEFAULT_ICV_LEN);
+ return false;
+ }
+
+ if (!secy->protect_frames) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when protect_frames is set\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5e_macsec_device *
+mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
+ const struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_device *iter;
+ const struct list_head *list;
+
+ list = &macsec->macsec_device_list_head;
+ list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
+ if (iter->netdev == ctx->secy->netdev)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
+ const pn_t *next_pn_halves)
+{
+ struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
+
+ sa->salt = key->salt;
+ epn_state->epn_enabled = 1;
+ epn_state->epn_msb = next_pn_halves->upper;
+ epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
+}
+
+static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (macsec_device->tx_sa[assoc_num]) {
+ netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
+ if (!tx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+ tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
+ tx_sa->sci = secy->sci;
+ tx_sa->assoc_num = assoc_num;
+
+ if (secy->xpn)
+ update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &tx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ macsec_device->tx_sa[assoc_num] = tx_sa;
+ if (!secy->operational ||
+ assoc_num != tx_sc->encoding_sa ||
+ !tx_sa->active)
+ goto out;
+
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto destroy_encryption_key;
+
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+destroy_encryption_key:
+ macsec_device->tx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
+destroy_sa:
+ kfree(tx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct net_device *netdev;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ netdev = ctx->netdev;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
+ netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (tx_sa->active == ctx_tx_sa->active)
+ goto out;
+
+ if (tx_sa->assoc_num != tx_sc->encoding_sa)
+ goto out;
+
+ if (ctx_tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ } else {
+ if (!tx_sa->macsec_rule) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree_rcu(tx_sa);
+ macsec_device->tx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
+{
+ struct mlx5e_macsec_sa *macsec_sa;
+ u32 fs_id = 0;
+
+ rcu_read_lock();
+ macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
+ if (macsec_sa)
+ fs_id = macsec_sa->fs_id;
+ rcu_read_unlock();
+
+ return fs_id;
+}
+
+static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct list_head *rx_sc_list;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
+ if (rx_sc) {
+ netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
+ ctx_rx_sc->sci);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
+ if (!rx_sc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
+ if (!sc_xarray_element) {
+ err = -ENOMEM;
+ goto destroy_rx_sc;
+ }
+
+ sc_xarray_element->rx_sc = rx_sc;
+ err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
+ XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
+ if (err)
+ goto destroy_sc_xarray_elemenet;
+
+ rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!rx_sc->md_dst) {
+ err = -ENOMEM;
+ goto erase_xa_alloc;
+ }
+
+ rx_sc->sci = ctx_rx_sc->sci;
+ rx_sc->active = ctx_rx_sc->active;
+ list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
+
+ rx_sc->sc_xarray_element = sc_xarray_element;
+ rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+erase_xa_alloc:
+ xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
+destroy_sc_xarray_elemenet:
+ kfree(sc_xarray_element);
+destroy_rx_sc:
+ kfree(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int i;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
+ if (!rx_sc) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc->active = ctx_rx_sc->active;
+ if (rx_sc->active == ctx_rx_sc->active)
+ goto out;
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+/*
+ * At this point the relevant MACsec offload Rx rule already removed at
+ * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
+ * Rx related data propagating using xa_erase which uses rcu to sync,
+ * once fs_id is erased then this rx_sc is hidden from datapath.
+ */
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+ xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
+ metadata_dst_free(rx_sc->md_dst);
+ kfree(rx_sc->sc_xarray_element);
+
+ kfree_rcu(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rx_sc->rx_sa[assoc_num]) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
+ if (!rx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rx_sa->active = ctx_rx_sa->active;
+ rx_sa->next_pn = ctx_rx_sa->next_pn;
+ rx_sa->sci = sci;
+ rx_sa->assoc_num = assoc_num;
+ rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
+
+ if (ctx->secy->xpn)
+ update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &rx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ rx_sc->rx_sa[assoc_num] = rx_sa;
+ if (!rx_sa->active)
+ goto out;
+
+ //TODO - add support for both authentication and encryption flows
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ if (err)
+ goto destroy_encryption_key;
+
+ goto out;
+
+destroy_encryption_key:
+ rx_sc->rx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
+destroy_sa:
+ kfree(rx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
+ netdev_err(ctx->netdev,
+ "MACsec offload update RX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ sci_t sci = ctx->sa.rx_sa->sc->sci;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ const struct net_device *netdev = ctx->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
+ netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
+ goto out;
+ }
+
+ if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
+ netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
+ err = -EBUSY;
+ goto out;
+ }
+
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
+ if (!macsec_device->dev_addr) {
+ kfree(macsec_device);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->netdev = dev;
+
+ INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
+ list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
+
+ ++macsec->num_of_devices;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
+ struct mlx5e_macsec_device *macsec_device)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct list_head *list;
+ int i, err = 0;
+
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa || !rx_sa->macsec_rule)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ }
+ }
+
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ if (rx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+ memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
+out:
+ return err;
+}
+
+/* this function is called from 2 macsec ops functions:
+ * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
+ * and create new Tx contexts(macsec object + steering).
+ * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
+ * destroy Tx and Rx contexts(macsec object + steering)
+ */
+static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int i, err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
+ if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
+ err = macsec_upd_secy_hw_address(ctx, macsec_device);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree(tx_sa);
+ macsec_device->tx_sa[i] = NULL;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+
+ kfree_rcu(rx_sc);
+ }
+
+ kfree(macsec_device->dev_addr);
+ macsec_device->dev_addr = NULL;
+
+ list_del_rcu(&macsec_device->macsec_device_list_element);
+ --macsec->num_of_devices;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
+ struct mlx5_macsec_obj_attrs *attrs)
+{
+ attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
+ attrs->epn_state.overlap = sa->epn_state.overlap;
+}
+
+static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5_aso_ctrl_param *param)
+{
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ if (macsec_aso->umr->dma_addr) {
+ aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
+ aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
+ aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
+ }
+
+ if (!param)
+ return;
+
+ aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
+ aso_ctrl->condition_1_0_operand = param->condition_1_operand |
+ param->condition_0_operand << 4;
+ aso_ctrl->condition_1_0_offset = param->condition_1_offset |
+ param->condition_0_offset << 4;
+ aso_ctrl->data_offset_condition_operand = param->data_offset |
+ param->condition_operand << 6;
+ aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
+ aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
+ aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
+ aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
+ aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
+ aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
+}
+
+static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
+ u32 macsec_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
+ u64 modify_field_select = 0;
+ void *obj;
+ int err;
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
+ macsec_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
+ modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
+
+ /* EPN */
+ if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
+ mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
+ macsec_id);
+ return -EOPNOTSUPP;
+ }
+
+ obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
+ MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5_aso_ctrl_param param = {};
+
+ param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
+ param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
+ param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
+ if (in->mode == MLX5_MACSEC_EPN) {
+ param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ param.bitwise_data = BIT_ULL(54);
+ param.data_mask = param.bitwise_data;
+ }
+ macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
+}
+
+static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false, 10);
+ mutex_unlock(&aso->aso_lock);
+
+ return err;
+}
+
+static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
+
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false, 10);
+ if (err)
+ goto err_out;
+
+ if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
+ out->event_arm |= MLX5E_ASO_EPN_ARM;
+
+ out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
+
+err_out:
+ mutex_unlock(&aso->aso_lock);
+ return err;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = iter->tx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list, *sc_list;
+ struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ sc_list = &iter->macsec_rx_sc_list_head;
+ list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = mlx5e_rx_sc->rx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
+ struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
+{
+ struct mlx5_macsec_obj_attrs attrs = {};
+ struct mlx5e_macsec_aso_in in = {};
+
+ /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
+ * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
+ * esn_overlap to OLD (1).
+ * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
+ * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
+ * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
+ */
+
+ if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
+ sa->epn_state.epn_msb++;
+ sa->epn_state.overlap = 0;
+ } else {
+ sa->epn_state.overlap = 1;
+ }
+
+ macsec_build_accel_attrs(sa, &attrs);
+ mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
+
+ /* Re-set EPN arm event */
+ in.obj_id = obj_id;
+ in.mode = MLX5_MACSEC_EPN;
+ macsec_aso_set_arm_event(mdev, macsec, &in);
+}
+
+static void macsec_async_event(struct work_struct *work)
+{
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5e_macsec_aso_out out = {};
+ struct mlx5e_macsec_aso_in in = {};
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+
+ async_work = container_of(work, struct mlx5e_macsec_async_work, work);
+ macsec = async_work->macsec;
+ mdev = async_work->mdev;
+ obj_id = async_work->obj_id;
+ macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
+ goto out_async_work;
+ }
+ }
+
+ /* Query MACsec ASO context */
+ in.obj_id = obj_id;
+ macsec_aso_query(mdev, macsec, &in, &out);
+
+ /* EPN case */
+ if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
+ macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
+
+out_async_work:
+ kfree(async_work);
+}
+
+static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5_eqe_obj_change *obj_change;
+ struct mlx5_eqe *eqe = data;
+ u16 obj_type;
+ u32 obj_id;
+
+ if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+ return NOTIFY_DONE;
+
+ obj_change = &eqe->data.obj_change;
+ obj_type = be16_to_cpu(obj_change->obj_type);
+ obj_id = be32_to_cpu(obj_change->obj_id);
+
+ if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
+ return NOTIFY_DONE;
+
+ async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
+ if (!async_work)
+ return NOTIFY_DONE;
+
+ async_work->macsec = macsec;
+ async_work->mdev = macsec->mdev;
+ async_work->obj_id = obj_id;
+
+ INIT_WORK(&async_work->work, macsec_async_event);
+
+ WARN_ON(!queue_work(macsec->wq, &async_work->work));
+
+ return NOTIFY_OK;
+}
+
+static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ struct mlx5_aso *maso;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &aso->pdn);
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
+ err);
+ return err;
+ }
+
+ maso = mlx5_aso_create(mdev, aso->pdn);
+ if (IS_ERR(maso)) {
+ err = PTR_ERR(maso);
+ goto err_aso;
+ }
+
+ err = mlx5e_macsec_aso_reg_mr(mdev, aso);
+ if (err)
+ goto err_aso_reg;
+
+ mutex_init(&aso->aso_lock);
+
+ aso->maso = maso;
+
+ return 0;
+
+err_aso_reg:
+ mlx5_aso_destroy(maso);
+err_aso:
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+ return err;
+}
+
+static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ if (!aso)
+ return;
+
+ mlx5e_macsec_aso_dereg_mr(mdev, aso);
+
+ mlx5_aso_destroy(aso->maso);
+
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+}
+
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
+{
+ mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
+}
+
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
+{
+ if (!macsec)
+ return NULL;
+
+ return &macsec->stats;
+}
+
+static const struct macsec_ops macsec_offload_ops = {
+ .mdo_add_txsa = mlx5e_macsec_add_txsa,
+ .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
+ .mdo_del_txsa = mlx5e_macsec_del_txsa,
+ .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
+ .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
+ .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
+ .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
+ .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
+ .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
+ .mdo_add_secy = mlx5e_macsec_add_secy,
+ .mdo_upd_secy = mlx5e_macsec_upd_secy,
+ .mdo_del_secy = mlx5e_macsec_del_secy,
+};
+
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ goto err_out;
+
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ return;
+
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec *macsec;
+ u32 fs_id;
+
+ macsec = priv->macsec;
+ if (!macsec)
+ return;
+
+ fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data);
+
+ rcu_read_lock();
+ sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
+ rx_sc = sc_xarray_element->rx_sc;
+ if (rx_sc) {
+ dst_hold(&rx_sc->md_dst->dst);
+ skb_dst_set(skb, &rx_sc->md_dst->dst);
+ }
+
+ rcu_read_unlock();
+}
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return;
+
+ /* Enable MACsec */
+ mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
+ netdev->macsec_ops = &macsec_offload_ops;
+ netdev->features |= NETIF_F_HW_MACSEC;
+ netif_keep_dst(netdev);
+}
+
+int mlx5e_macsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_macsec *macsec = NULL;
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ if (!mlx5e_is_macsec_device(priv->mdev)) {
+ mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
+ return 0;
+ }
+
+ macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
+ if (!macsec)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&macsec->macsec_device_list_head);
+ mutex_init(&macsec->lock);
+
+ err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
+ err);
+ goto err_hash;
+ }
+
+ err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
+ goto err_aso;
+ }
+
+ macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
+ if (!macsec->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
+
+ priv->macsec = macsec;
+
+ macsec->mdev = mdev;
+
+ macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
+ if (!macsec_fs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ macsec->macsec_fs = macsec_fs;
+
+ macsec->nb.notifier_call = macsec_obj_change_event;
+ mlx5_notifier_register(mdev, &macsec->nb);
+
+ mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
+
+ return 0;
+
+err_out:
+ destroy_workqueue(macsec->wq);
+err_wq:
+ mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
+err_aso:
+ rhashtable_destroy(&macsec->sci_hash);
+err_hash:
+ kfree(macsec);
+ priv->macsec = NULL;
+ return err;
+}
+
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_core_dev *mdev = macsec->mdev;
+
+ if (!macsec)
+ return;
+
+ mlx5_notifier_unregister(mdev, &macsec->nb);
+
+ mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
+
+ /* Cleanup workqueue */
+ destroy_workqueue(macsec->wq);
+
+ mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
+
+ priv->macsec = NULL;
+
+ rhashtable_destroy(&macsec->sci_hash);
+
+ mutex_destroy(&macsec->lock);
+
+ kfree(macsec);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
new file mode 100644
index 000000000000..d580b4a91253
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_ACCEL_MACSEC_H__
+#define __MLX5_EN_ACCEL_MACSEC_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include <linux/mlx5/driver.h>
+#include <net/macsec.h>
+#include <net/dst_metadata.h>
+
+/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */
+#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
+#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0))
+
+struct mlx5e_priv;
+struct mlx5e_macsec;
+
+struct mlx5e_macsec_stats {
+ u64 macsec_rx_pkts;
+ u64 macsec_rx_bytes;
+ u64 macsec_rx_pkts_drop;
+ u64 macsec_rx_bytes_drop;
+ u64 macsec_tx_pkts;
+ u64 macsec_tx_bytes;
+ u64 macsec_tx_pkts_drop;
+ u64 macsec_tx_bytes_drop;
+};
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_macsec_init(struct mlx5e_priv *priv);
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb);
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ return md_dst && (md_dst->type == METADATA_MACSEC);
+}
+
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
+
+#else
+
+static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {}
+static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {}
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; }
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{}
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
+#endif /* CONFIG_MLX5_EN_MACSEC */
+
+#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
new file mode 100644
index 000000000000..13dc628b988a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/macsec.h>
+#include <linux/netdevice.h>
+#include <linux/mlx5/qp.h>
+#include "fs_core.h"
+#include "en/fs.h"
+#include "en_accel/macsec_fs.h"
+#include "mlx5_core.h"
+
+/* MACsec TX flow steering */
+#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
+#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
+
+#define TX_CRYPTO_TABLE_LEVEL 0
+#define TX_CRYPTO_TABLE_NUM_GROUPS 3
+#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
+#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
+ CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
+#define TX_CHECK_TABLE_LEVEL 1
+#define TX_CHECK_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_LEVEL 0
+#define RX_CHECK_TABLE_LEVEL 1
+#define RX_CHECK_TABLE_NUM_FTE 3
+#define RX_CRYPTO_TABLE_NUM_GROUPS 3
+#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
+ ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
+#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
+#define RX_NUM_OF_RULES_PER_SA 2
+
+#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
+#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
+#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
+
+/* MACsec RX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
+
+struct mlx5_sectag_header {
+ __be16 ethertype;
+ u8 tci_an;
+ u8 sl;
+ u32 pn;
+ u8 sci[MACSEC_SCI_LEN]; /* optional */
+} __packed;
+
+struct mlx5e_macsec_tx_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u32 fs_id;
+};
+
+struct mlx5e_macsec_tables {
+ struct mlx5e_flow_table ft_crypto;
+ struct mlx5_flow_handle *crypto_miss_rule;
+
+ struct mlx5_flow_table *ft_check;
+ struct mlx5_flow_group *ft_check_group;
+ struct mlx5_fc *check_miss_rule_counter;
+ struct mlx5_flow_handle *check_miss_rule;
+ struct mlx5_fc *check_rule_counter;
+
+ u32 refcnt;
+};
+
+struct mlx5e_macsec_tx {
+ struct mlx5_flow_handle *crypto_mke_rule;
+ struct mlx5_flow_handle *check_rule;
+
+ struct ida tx_halloc;
+
+ struct mlx5e_macsec_tables tables;
+};
+
+struct mlx5e_macsec_rx_rule {
+ struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5e_macsec_rx {
+ struct mlx5_flow_handle *check_rule[2];
+ struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
+
+ struct mlx5e_macsec_tables tables;
+};
+
+union mlx5e_macsec_rule {
+ struct mlx5e_macsec_tx_rule tx_rule;
+ struct mlx5e_macsec_rx_rule rx_rule;
+};
+
+struct mlx5e_macsec_fs {
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5e_macsec_rx *rx_fs;
+};
+
+static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ tx_tables = &tx_fs->tables;
+
+ /* Tx check table */
+ if (tx_fs->check_rule) {
+ mlx5_del_flow_rules(tx_fs->check_rule);
+ tx_fs->check_rule = NULL;
+ }
+
+ if (tx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->check_miss_rule);
+ tx_tables->check_miss_rule = NULL;
+ }
+
+ if (tx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(tx_tables->ft_check_group);
+ tx_tables->ft_check_group = NULL;
+ }
+
+ if (tx_tables->ft_check) {
+ mlx5_destroy_flow_table(tx_tables->ft_check);
+ tx_tables->ft_check = NULL;
+ }
+
+ /* Tx crypto table */
+ if (tx_fs->crypto_mke_rule) {
+ mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
+ tx_fs->crypto_mke_rule = NULL;
+ }
+
+ if (tx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
+ tx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&tx_tables->ft_crypto);
+}
+
+static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow Group for MKE match */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for SA rules */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_table
+ *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
+ int level, int max_fte)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb = NULL;
+
+ /* reserve entry for the match all miss group and rule */
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = 0;
+ ft_attr.flags = flags;
+ ft_attr.level = level;
+ ft_attr.max_fte = max_fte;
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+
+ return fdb;
+}
+
+static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto out_spec;
+
+ tx_tables = &tx_fs->tables;
+ ft_crypto = &tx_tables->ft_crypto;
+
+ /* Tx crypto table */
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Tx crypto table groups */
+ err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
+ memset(&flow_act, 0, sizeof(flow_act));
+ memset(spec, 0, sizeof(*spec));
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->crypto_mke_rule = rule;
+
+ /* Tx crypto table Default miss rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
+ goto err;
+ }
+ tx_tables->crypto_miss_rule = rule;
+
+ /* Tx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
+ TX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->ft_check = flow_table;
+
+ /* Tx check table Default miss group/rule */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+ tx_tables->ft_check_group = flow_group;
+
+ /* Tx check table default drop rule */
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->check_miss_rule = rule;
+
+ /* Tx check table rule */
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->check_rule = rule;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_tx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_tx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ tx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+
+ if (--tx_tables->refcnt)
+ return;
+
+ macsec_fs_tx_destroy(macsec_fs);
+}
+
+static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ u32 macsec_obj_id,
+ u32 *fs_id)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ int err = 0;
+ u32 id;
+
+ err = ida_alloc_range(&tx_fs->tx_halloc, 1,
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ id = err;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Metadata match */
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC | id << 2);
+
+ *fs_id = id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ flow_act->crypto.obj_id = macsec_obj_id;
+
+ mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
+ return 0;
+}
+
+static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
+ char *reformatbf,
+ size_t *reformat_size)
+{
+ const struct macsec_secy *secy = ctx->secy;
+ bool sci_present = macsec_send_sci(secy);
+ struct mlx5_sectag_header sectag = {};
+ const struct macsec_tx_sc *tx_sc;
+
+ tx_sc = &secy->tx_sc;
+ sectag.ethertype = htons(ETH_P_MACSEC);
+
+ if (sci_present) {
+ sectag.tci_an |= MACSEC_TCI_SC;
+ memcpy(&sectag.sci, &secy->sci,
+ sizeof(sectag.sci));
+ } else {
+ if (tx_sc->end_station)
+ sectag.tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ sectag.tci_an |= MACSEC_TCI_SCB;
+ }
+
+ /* With GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ sectag.tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ sectag.tci_an |= MACSEC_TCI_C;
+
+ sectag.tci_an |= tx_sc->encoding_sa;
+
+ *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+
+ memcpy(reformatbf, &sectag, *reformat_size);
+}
+
+static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_tx_rule *tx_rule)
+{
+ if (tx_rule->rule) {
+ mlx5_del_flow_rules(tx_rule->rule);
+ tx_rule->rule = NULL;
+ }
+
+ if (tx_rule->pkt_reformat) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
+ tx_rule->pkt_reformat = NULL;
+ }
+
+ if (tx_rule->fs_id) {
+ ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
+ tx_rule->fs_id = 0;
+ }
+
+ kfree(tx_rule);
+
+ macsec_fs_tx_ft_put(macsec_fs);
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ size_t reformat_size;
+ int err = 0;
+ u32 fs_id;
+
+ tx_tables = &tx_fs->tables;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_tx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_tx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ tx_rule = &macsec_rule->tx_rule;
+
+ /* Tx crypto table crypto rule */
+ macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
+ reformat_params.size = reformat_size;
+ reformat_params.data = reformatbf;
+ flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (IS_ERR(flow_act.pkt_reformat)) {
+ err = PTR_ERR(flow_act.pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
+ goto err;
+ }
+ tx_rule->pkt_reformat = flow_act.pkt_reformat;
+
+ err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+
+ tx_rule->fs_id = fs_id;
+ *sa_fs_id = fs_id;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx_tables->ft_check;
+ rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
+ goto err;
+ }
+ tx_rule->rule = rule;
+
+ goto out_spec;
+
+err:
+ macsec_fs_tx_del_rule(macsec_fs, tx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+
+ return macsec_rule;
+}
+
+static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ if (!tx_fs)
+ return;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
+ tx_tables->refcnt);
+ return;
+ }
+
+ ida_destroy(&tx_fs->tx_halloc);
+
+ if (tx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
+ tx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (tx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+}
+
+static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ tx_tables = &tx_fs->tables;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+ tx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ tx_tables->check_miss_rule_counter = flow_counter;
+
+ ida_init(&tx_fs->tx_halloc);
+
+ macsec_fs->tx_fs = tx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5e_macsec_tables *rx_tables;
+ int i;
+
+ /* Rx check table */
+ for (i = 1; i >= 0; --i) {
+ if (rx_fs->check_rule[i]) {
+ mlx5_del_flow_rules(rx_fs->check_rule[i]);
+ rx_fs->check_rule[i] = NULL;
+ }
+
+ if (rx_fs->check_rule_pkt_reformat[i]) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev,
+ rx_fs->check_rule_pkt_reformat[i]);
+ rx_fs->check_rule_pkt_reformat[i] = NULL;
+ }
+ }
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->check_miss_rule);
+ rx_tables->check_miss_rule = NULL;
+ }
+
+ if (rx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(rx_tables->ft_check_group);
+ rx_tables->ft_check_group = NULL;
+ }
+
+ if (rx_tables->ft_check) {
+ mlx5_destroy_flow_table(rx_tables->ft_check);
+ rx_tables->ft_check = NULL;
+ }
+
+ /* Rx crypto table */
+ if (rx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
+ rx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&rx_tables->ft_crypto);
+}
+
+static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow group for SA rule with SCI */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow group for SA rule without SCI */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec,
+ int reformat_param_size)
+{
+ int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
+ u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ rx_tables = &rx_fs->tables;
+
+ /* Rx check table decap 16B rule */
+ memset(dest, 0, sizeof(*dest));
+ memset(flow_act, 0, sizeof(*flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
+ reformat_params.size = reformat_param_size;
+ reformat_params.data = mlx5_reformat_buf;
+ flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (IS_ERR(flow_act->pkt_reformat)) {
+ err = PTR_ERR(flow_act->pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
+ return err;
+ }
+ rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ /* MACsec syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
+ /* ASO return reg syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+ /* Sectag TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ flow_act->flags = FLOW_ACT_NO_APPEND;
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
+ return err;
+ }
+
+ rx_fs->check_rule[rule_index] = rule;
+
+ return 0;
+}
+
+static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto free_spec;
+
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Rx crypto table */
+ ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Rx crypto table groups */
+ err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add MACsec Rx crypto table default miss rule %d\n",
+ err);
+ goto err;
+ }
+ rx_tables->crypto_miss_rule = rule;
+
+ /* Rx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns,
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
+ RX_CHECK_TABLE_LEVEL,
+ RX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->ft_check = flow_table;
+
+ /* Rx check table Default miss group/rule */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Rx check table err(%d)\n",
+ err);
+ goto err;
+ }
+ rx_tables->ft_check_group = flow_group;
+
+ /* Rx check table default drop rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->check_miss_rule = rule;
+
+ /* Rx check table decap rules */
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
+ if (err)
+ goto err;
+
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
+ if (err)
+ goto err;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_rx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+free_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ int err = 0;
+
+ if (rx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_rx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ rx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+
+ if (--rx_tables->refcnt)
+ return;
+
+ macsec_fs_rx_destroy(macsec_fs);
+}
+
+static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_rx_rule *rx_rule)
+{
+ int i;
+
+ for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
+ if (rx_rule->rule[i]) {
+ mlx5_del_flow_rules(rx_rule->rule[i]);
+ rx_rule->rule[i] = NULL;
+ }
+ }
+
+ if (rx_rule->meta_modhdr) {
+ mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
+ rx_rule->meta_modhdr = NULL;
+ }
+
+ kfree(rx_rule);
+
+ macsec_fs_rx_ft_put(macsec_fs);
+}
+
+static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_macsec_rule_attrs *attrs,
+ bool sci_present)
+{
+ u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
+ struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
+ __be32 *sci_p = (__be32 *)(&attrs->sci);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* MACsec ethertype */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ /* Sectag AN + TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (sci_present) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
+ be32_to_cpu(sci_p[0]));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_3);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
+ be32_to_cpu(sci_p[1]));
+ } else {
+ /* When SCI isn't present in the Sectag, need to match the source */
+ /* MAC address only if the SCI contains the default MACsec PORT */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
+ sci_p, ETH_ALEN);
+ }
+
+ crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ crypto_params->obj_id = attrs->macsec_obj_id;
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 fs_id)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_rx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_rx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ rx_rule = &macsec_rule->rx_rule;
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Set bit[31 - 30] macsec marker - 0x01 */
+ /* Set bit[3-0] fs id */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, fs_id | BIT(30));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto err;
+ }
+ rx_rule->meta_modhdr = modify_hdr;
+
+ /* Rx crypto table with SCI rule */
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[0] = rule;
+
+ /* Rx crypto table without SCI rule */
+ if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) {
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[1] = rule;
+ }
+
+ return macsec_rule;
+
+err:
+ macsec_fs_rx_del_rule(macsec_fs, rx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+ return macsec_rule;
+}
+
+static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx *rx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
+ if (!rx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+
+ rx_tables = &rx_fs->tables;
+ rx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ rx_tables->check_miss_rule_counter = flow_counter;
+
+ macsec_fs->rx_fs = rx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+
+ if (!rx_fs)
+ return;
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
+ rx_tables->refcnt);
+ return;
+ }
+
+ if (rx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
+ rx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (rx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+}
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats)
+{
+ struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats;
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+
+ if (tx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_rule_counter,
+ &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
+
+ if (tx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
+ &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
+
+ if (rx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_rule_counter,
+ &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
+
+ if (rx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
+ &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
+}
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
+ macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+}
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action)
+{
+ (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) :
+ macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule);
+}
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ macsec_fs_rx_cleanup(macsec_fs);
+ macsec_fs_tx_cleanup(macsec_fs);
+ kfree(macsec_fs);
+}
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
+ if (!macsec_fs)
+ return NULL;
+
+ macsec_fs->mdev = mdev;
+ macsec_fs->netdev = netdev;
+
+ err = macsec_fs_tx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto err;
+ }
+
+ err = macsec_fs_rx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto tx_cleanup;
+ }
+
+ return macsec_fs;
+
+tx_cleanup:
+ macsec_fs_tx_cleanup(macsec_fs);
+err:
+ kfree(macsec_fs);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
new file mode 100644
index 000000000000..b429648d4ee7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_MACSEC_STEERING_H__
+#define __MLX5_MACSEC_STEERING_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include "en_accel/macsec.h"
+
+#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
+
+struct mlx5e_macsec_fs;
+union mlx5e_macsec_rule;
+
+struct mlx5_macsec_rule_attrs {
+ sci_t sci;
+ u32 macsec_obj_id;
+ u8 assoc_num;
+ int action;
+};
+
+enum mlx5_macsec_action {
+ MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
+};
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs);
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id);
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action);
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats);
+
+#endif
+
+#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
new file mode 100644
index 000000000000..e50a2e3f3d18
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "en_accel/macsec.h"
+
+static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) },
+};
+
+#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(macsec_hw)
+{
+ if (!priv->macsec)
+ return 0;
+
+ if (mlx5e_is_macsec_device(priv->mdev))
+ return NUM_MACSEC_HW_COUNTERS;
+
+ return 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(macsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
+{
+ unsigned int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_macsec_hw_stats_desc[i].format);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
+{
+ int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec));
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec),
+ mlx5e_macsec_hw_stats_desc,
+ i);
+
+ return idx;
+}
+
+MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index cd7f245dcf14..0ae1865086ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -114,47 +114,49 @@ static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
}
}
-static int arfs_disable(struct mlx5e_priv *priv)
+static int arfs_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, arfs_get_tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, arfs_get_tt(i), err);
return err;
}
}
return 0;
}
-static void arfs_del_rules(struct mlx5e_priv *priv);
+static void arfs_del_rules(struct mlx5e_flow_steering *fs);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{
- arfs_del_rules(priv);
+ arfs_del_rules(fs);
- return arfs_disable(priv);
+ return arfs_disable(fs);
}
-int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
+ dest.ft = arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
- __func__, arfs_get_tt(i), err);
- arfs_disable(priv);
+ fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
+ __func__, arfs_get_tt(i), err);
+ arfs_disable(fs);
return err;
}
}
@@ -167,31 +169,37 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}
-static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
+static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
int i;
- arfs_del_rules(priv);
- destroy_workqueue(priv->fs->arfs->wq);
+ arfs_del_rules(fs);
+ destroy_workqueue(arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&arfs->arfs_tables[i]);
}
}
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
{
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
+
+ if (!ntuple)
return;
- _mlx5e_cleanup_tables(priv);
- kvfree(priv->fs->arfs);
+ _mlx5e_cleanup_tables(fs);
+ mlx5e_fs_set_arfs(fs, NULL);
+ kvfree(arfs);
}
-static int arfs_add_default_rule(struct mlx5e_priv *priv,
+static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
+ struct arfs_table *arfs_t = &arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -200,23 +208,21 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
- netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
- __func__, type);
+ fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
return -EINVAL;
}
/* FIXME: Must use mlx5_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
- dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
+ dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
- netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
- __func__, type);
+ fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
}
return err;
@@ -318,10 +324,12 @@ out:
return err;
}
-static int arfs_create_table(struct mlx5e_priv *priv,
+static int arfs_create_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +340,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -343,7 +351,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
if (err)
goto err;
- err = arfs_add_default_rule(priv, type);
+ err = arfs_add_default_rule(fs, rx_res, type);
if (err)
goto err;
@@ -353,35 +361,40 @@ err:
return err;
}
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
{
+ struct mlx5e_arfs_tables *arfs;
int err = -ENOMEM;
int i;
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ if (!ntuple)
return 0;
- priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
- if (!priv->fs->arfs)
+ arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
+ if (!arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs->arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs->arfs->rules);
- priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs->arfs->wq)
+ spin_lock_init(&arfs->arfs_lock);
+ INIT_LIST_HEAD(&arfs->rules);
+ arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!arfs->wq)
goto err;
+ mlx5e_fs_set_arfs(fs, arfs);
+
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- err = arfs_create_table(priv, i);
+ err = arfs_create_table(fs, rx_res, i);
if (err)
goto err_des;
}
return 0;
err_des:
- _mlx5e_cleanup_tables(priv);
+ _mlx5e_cleanup_tables(fs);
err:
- kvfree(priv->fs->arfs);
+ mlx5e_fs_set_arfs(fs, NULL);
+ kvfree(arfs);
return err;
}
@@ -389,6 +402,7 @@ err:
static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
HLIST_HEAD(del_list);
@@ -396,8 +410,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs->arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +422,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs->arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -417,20 +431,21 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
}
}
-static void arfs_del_rules(struct mlx5e_priv *priv)
+static void arfs_del_rules(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct hlist_node *htmp;
struct arfs_rule *rule;
HLIST_HEAD(del_list);
int i;
int j;
- spin_lock_bh(&priv->fs->arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs->arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +489,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -588,13 +603,15 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
+ struct mlx5e_arfs_tables *arfs;
struct mlx5_flow_handle *rule;
+ arfs = mlx5e_fs_get_arfs(priv->fs);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ spin_lock_bh(&arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs->arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -620,6 +637,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *rule;
struct arfs_tuple *tuple;
@@ -647,7 +665,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,11 +709,12 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
- struct arfs_table *arfs_t;
+ struct mlx5e_arfs_tables *arfs;
struct arfs_rule *arfs_rule;
+ struct arfs_table *arfs_t;
struct flow_keys fk;
+ arfs = mlx5e_fs_get_arfs(priv->fs);
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
@@ -725,7 +744,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
+ queue_work(arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index c0f409c195bf..68f19324db93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -46,8 +46,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
-static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- u32 *mkey)
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index b811207fe5ed..24aa25da482b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -38,18 +38,19 @@
#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
+#include "en/fs_ethtool.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev->board_id);
- strlcpy(drvinfo->bus_info, dev_name(mdev->device),
+ strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
@@ -310,7 +311,15 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param)
{
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ /* Limitation for regular RQ. XSK RQ may clamp the queue length in
+ * mlx5e_mpwqe_get_log_rq_size.
+ */
+ u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev,
+ PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED);
+
+ param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
+ max_log_mpwrq_pkts);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
@@ -494,14 +503,14 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
- mlx5e_arfs_disable(priv);
+ mlx5e_arfs_disable(priv->fs);
/* Switch to new channels, set new parameters and close old ones */
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
- int err2 = mlx5e_arfs_enable(priv);
+ int err2 = mlx5e_arfs_enable(priv->fs);
if (err2)
netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
@@ -1996,10 +2005,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_params new_params;
if (enable) {
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return -EOPNOTSUPP;
- if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
- return -EINVAL;
+ /* Checking the regular RQ here; mlx5e_validate_xsk_param called
+ * from mlx5e_open_xsk will check for each XSK queue, and
+ * mlx5e_safe_switch_params will be reverted if any check fails.
+ */
+ int err = mlx5e_mpwrq_validate_regular(mdev, &priv->channels.params);
+
+ if (err)
+ return err;
} else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e2a9b9be5c1f..1892ccb889b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -36,10 +36,38 @@
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
-#include "en.h"
#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
+#include "en/fs_ethtool.h"
+
+struct mlx5e_flow_steering {
+ struct work_struct set_rx_mode_work;
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_namespace *egress_ns;
+#ifdef CONFIG_MLX5_EN_RXNFC
+ struct mlx5e_ethtool_steering *ethtool;
+#endif
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_promisc_table promisc;
+ struct mlx5e_vlan_table *vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_ttc_table *inner_ttc;
+#ifdef CONFIG_MLX5_EN_ARFS
+ struct mlx5e_arfs_tables *arfs;
+#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_fs_tcp *accel_tcp;
+#endif
+ struct mlx5e_fs_udp *udp;
+ struct mlx5e_fs_any *any;
+ struct mlx5e_ptp_fs *ptp_fs;
+};
static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
@@ -148,9 +176,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- mlx5_core_warn(fs->mdev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
@@ -167,8 +194,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
+ err);
kvfree(vlans);
return err;
@@ -249,7 +276,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
+ fs_err(fs, "%s: add rule failed\n", __func__);
}
return err;
@@ -351,78 +378,78 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
return rule;
}
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs->vlan->trap_rule = NULL;
- mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ fs->vlan->trap_rule = NULL;
+ fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs->vlan->trap_rule = rule;
+ fs->vlan->trap_rule = rule;
return 0;
}
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
{
- if (priv->fs->vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
- priv->fs->vlan->trap_rule = NULL;
+ if (fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(fs->vlan->trap_rule);
+ fs->vlan->trap_rule = NULL;
}
}
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs->l2.trap_rule = NULL;
- mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ fs->l2.trap_rule = NULL;
+ fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs->l2.trap_rule = rule;
+ fs->l2.trap_rule = rule;
return 0;
}
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
{
- if (priv->fs->l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs->l2.trap_rule);
- priv->fs->l2.trap_rule = NULL;
+ if (fs->l2.trap_rule) {
+ mlx5_del_flow_rules(fs->l2.trap_rule);
+ fs->l2.trap_rule = NULL;
}
}
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (!priv->fs->vlan->cvlan_filter_disabled)
+ if (!fs->vlan->cvlan_filter_disabled)
return;
- priv->fs->vlan->cvlan_filter_disabled = false;
- if (priv->netdev->flags & IFF_PROMISC)
+ fs->vlan->cvlan_filter_disabled = false;
+ if (promisc)
return;
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (priv->fs->vlan->cvlan_filter_disabled)
+ if (fs->vlan->cvlan_filter_disabled)
return;
- priv->fs->vlan->cvlan_filter_disabled = true;
- if (priv->netdev->flags & IFF_PROMISC)
+ fs->vlan->cvlan_filter_disabled = true;
+ if (promisc)
return;
- mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
@@ -462,7 +489,7 @@ int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
{
if (!fs->vlan) {
- mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ fs_err(fs, "Vlan doesn't exist\n");
return -EINVAL;
}
@@ -479,7 +506,7 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
__be16 proto, u16 vid)
{
if (!fs->vlan) {
- mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ fs_err(fs, "Vlan doesn't exist\n");
return -EINVAL;
}
@@ -512,28 +539,28 @@ static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
mlx5e_fs_add_any_vid_rules(fs);
}
-static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(priv->fs->state_destroy);
+ WARN_ON_ONCE(fs->state_destroy);
- mlx5e_remove_vlan_trap(priv);
+ mlx5e_remove_vlan_trap(fs);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs->vlan->cvlan_filter_disabled)
- mlx5e_fs_del_any_vid_rules(priv->fs);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
@@ -568,8 +595,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
}
if (l2_err)
- mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del",
+ mac_addr, l2_err);
}
static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
@@ -640,9 +668,8 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
size++;
if (size > max_size) {
- mlx5_core_warn(fs->mdev,
- "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -658,9 +685,8 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- mlx5_core_err(fs->mdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ fs_err(fs, "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
@@ -730,7 +756,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
+ fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
@@ -750,7 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
+ fs_err(fs, "fail to create promisc table err=%d\n", err);
return err;
}
@@ -807,8 +833,8 @@ void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
if (err)
enable_promisc = false;
if (!fs->vlan_strip_disable && !err)
- mlx5_core_warn_once(fs->mdev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ fs_warn_once(fs,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
@@ -856,14 +882,15 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
ft->t = NULL;
}
-static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
+static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -872,13 +899,14 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
- mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
+ mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss_inner(rx_res,
tt);
}
}
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel)
{
@@ -886,7 +914,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -895,19 +923,19 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
- mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
+ mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss(rx_res, tt);
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
+ mlx5_get_ttc_flow_table(fs->inner_ttc);
}
}
@@ -959,8 +987,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1044,14 +1071,14 @@ err_destroy_groups:
return err;
}
-static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
{
- mlx5e_destroy_flow_table(&priv->fs->l2.ft);
+ mlx5e_destroy_flow_table(&fs->l2.ft);
}
-static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_l2_table *l2_table = &priv->fs->l2;
+ struct mlx5e_l2_table *l2_table = &fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1062,7 +1089,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1221,126 +1248,128 @@ err_destroy_vlan_table:
return err;
}
-static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
+ mlx5e_del_vlan_rules(fs);
+ mlx5e_destroy_flow_table(&fs->vlan->ft);
}
-static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs->inner_ttc);
+ mlx5_destroy_ttc_table(fs->inner_ttc);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
{
- mlx5_destroy_ttc_table(priv->fs->ttc);
+ mlx5_destroy_ttc_table(fs->ttc);
}
-static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return 0;
- mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs->inner_ttc))
- return PTR_ERR(priv->fs->inner_ttc);
+ mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
+ fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
+ &ttc_params);
+ if (IS_ERR(fs->inner_ttc))
+ return PTR_ERR(fs->inner_ttc);
return 0;
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
- if (IS_ERR(priv->fs->ttc))
- return PTR_ERR(priv->fs->ttc);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
+ if (IS_ERR(fs->ttc))
+ return PTR_ERR(fs->ttc);
return 0;
}
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev)
{
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
int err;
- priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
-
- if (!priv->fs->ns)
+ if (!ns)
return -EOPNOTSUPP;
- err = mlx5e_arfs_create_tables(priv);
+ mlx5e_fs_set_ns(fs, ns, false);
+ err = mlx5e_arfs_create_tables(fs, rx_res,
+ !!(netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
- err);
- priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
+ netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ err = mlx5e_create_inner_ttc_table(fs, rx_res);
if (err) {
- mlx5_core_err(priv->fs->mdev,
- "Failed to create inner ttc table, err=%d\n", err);
+ fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(fs, rx_res);
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create ttc table, err=%d\n", err);
goto err_destroy_inner_ttc_table;
}
- err = mlx5e_create_l2_table(priv);
+ err = mlx5e_create_l2_table(fs);
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create l2 table, err=%d\n", err);
goto err_destroy_ttc_table;
}
- err = mlx5e_fs_create_vlan_table(priv->fs);
+ err = mlx5e_fs_create_vlan_table(fs);
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create vlan table, err=%d\n", err);
goto err_destroy_l2_table;
}
- err = mlx5e_ptp_alloc_rx_fs(priv);
+ err = mlx5e_ptp_alloc_rx_fs(fs, profile);
if (err)
goto err_destory_vlan_table;
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(fs);
return 0;
err_destory_vlan_table:
- mlx5e_destroy_vlan_table(priv);
+ mlx5e_destroy_vlan_table(fs);
err_destroy_l2_table:
- mlx5e_destroy_l2_table(priv);
+ mlx5e_destroy_l2_table(fs);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(fs);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv);
+ mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile)
{
- mlx5e_ptp_free_rx_fs(priv);
- mlx5e_destroy_vlan_table(priv);
- mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
- mlx5e_arfs_destroy_tables(priv);
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_ptp_free_rx_fs(fs, profile);
+ mlx5e_destroy_vlan_table(fs);
+ mlx5e_destroy_l2_table(fs);
+ mlx5e_destroy_ttc_table(fs);
+ mlx5e_destroy_inner_ttc_table(fs);
+ mlx5e_arfs_destroy_tables(fs, ntuple);
+ mlx5e_ethtool_cleanup_steering(fs);
}
static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
@@ -1356,6 +1385,11 @@ static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
kvfree(fs->vlan);
}
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
+{
+ return fs->vlan;
+}
+
static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
{
fs->tc = mlx5e_tc_table_alloc();
@@ -1369,6 +1403,32 @@ static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
mlx5e_tc_table_free(fs->tc);
}
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
+{
+ return fs->tc;
+}
+
+#ifdef CONFIG_MLX5_EN_RXNFC
+static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
+{
+ return mlx5e_ethtool_alloc(&fs->ethtool);
+}
+
+static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_ethtool_free(fs->ethtool);
+}
+
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
+{
+ return fs->ethtool;
+}
+#else
+static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
+{ return 0; }
+static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
+#endif
+
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy)
@@ -1394,18 +1454,126 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
goto err_free_vlan;
}
+ err = mlx5e_fs_ethtool_alloc(fs);
+ if (err)
+ goto err_free_tc;
+
return fs;
-err_free_fs:
- kvfree(fs);
+err_free_tc:
+ mlx5e_fs_tc_free(fs);
err_free_vlan:
mlx5e_fs_vlan_free(fs);
+err_free_fs:
+ kvfree(fs);
err:
return NULL;
}
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
+ mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs);
mlx5e_fs_vlan_free(fs);
kvfree(fs);
}
+
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
+{
+ return &fs->l2;
+}
+
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
+{
+ return egress ? fs->egress_ns : fs->ns;
+}
+
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
+{
+ if (!egress)
+ fs->ns = ns;
+ else
+ fs->egress_ns = ns;
+}
+
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
+{
+ return inner ? fs->inner_ttc : fs->ttc;
+}
+
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
+{
+ if (!inner)
+ fs->ttc = ttc;
+ else
+ fs->inner_ttc = ttc;
+}
+
+#ifdef CONFIG_MLX5_EN_ARFS
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
+{
+ return fs->arfs;
+}
+
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
+{
+ fs->arfs = arfs;
+}
+#endif
+
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
+{
+ return fs->ptp_fs;
+}
+
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
+{
+ fs->ptp_fs = ptp_fs;
+}
+
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
+{
+ return fs->any;
+}
+
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
+{
+ fs->any = any;
+}
+
+#ifdef CONFIG_MLX5_EN_TLS
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
+{
+ return fs->accel_tcp;
+}
+
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
+{
+ fs->accel_tcp = accel_tcp;
+}
+#endif
+
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
+{
+ fs->state_destroy = state_destroy;
+}
+
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
+ bool vlan_strip_disable)
+{
+ fs->vlan_strip_disable = vlan_strip_disable;
+}
+
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
+{
+ return fs->udp;
+}
+
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
+{
+ fs->udp = udp;
+}
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
+{
+ return fs->mdev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3e4bc7836ef4..aac32e505c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -34,6 +34,22 @@
#include "en.h"
#include "en/params.h"
#include "en/xsk/pool.h"
+#include "en/fs_ethtool.h"
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
static int flow_type_to_traffic_type(u32 flow_type);
@@ -66,6 +82,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
@@ -81,18 +98,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
+ eth_ft = &ethtool->l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
+ eth_ft = &ethtool->l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs->ethtool.l2_ft[prio];
+ eth_ft = &ethtool->l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -382,15 +399,16 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
+ struct list_head *head = &ethtool->rules;
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs->ethtool.rules;
- list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
+ list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs->ethtool.tot_num_rules++;
+ ethtool->tot_num_rules++;
list_add(&rule->list, head);
}
@@ -433,15 +451,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
eth_rule->rss = rss;
mlx5e_rss_refcnt_inc(eth_rule->rss);
} else {
- struct mlx5e_params *params = &priv->channels.params;
- enum mlx5e_rq_group group;
- u16 ix;
-
- mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
-
- *tirn = group == MLX5E_RQ_GROUP_XSK ?
- mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
+ *tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie);
}
return 0;
@@ -499,15 +509,16 @@ free:
return err ? ERR_PTR(err) : rule;
}
-static void del_ethtool_rule(struct mlx5e_priv *priv,
+static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_ethtool_rule *eth_rule)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
if (eth_rule->rule)
mlx5_del_flow_rules(eth_rule->rule);
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs->ethtool.tot_num_rules--;
+ ethtool->tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -515,9 +526,10 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
+ list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -531,7 +543,7 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
eth_rule = find_ethtool_rule(priv, location);
if (eth_rule)
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
if (!eth_rule)
@@ -662,8 +674,7 @@ static int validate_flow(struct mlx5e_priv *priv,
return -ENOSPC;
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
- if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
- fs->ring_cookie))
+ if (fs->ring_cookie >= priv->channels.params.num_channels)
return -EINVAL;
switch (flow_type_mask(fs->flow_type)) {
@@ -754,7 +765,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
return 0;
del_ethtool_rule:
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
return err;
}
@@ -774,7 +785,7 @@ mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
goto out;
}
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
out:
return err;
}
@@ -783,12 +794,13 @@ static int
mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, int location)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *eth_rule;
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &ethtool->rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -826,18 +838,34 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
return err;
}
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
{
+ *ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
+ if (!*ethtool)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
+{
+ kvfree(ethtool);
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
+{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
- del_ethtool_rule(priv, iter);
+ list_for_each_entry_safe(iter, temp, &ethtool->rules, list)
+ del_ethtool_rule(fs, iter);
}
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
{
- INIT_LIST_HEAD(&priv->fs->ethtool.rules);
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
+
+ INIT_LIST_HEAD(&ethtool->rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -959,11 +987,12 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs->ethtool.tot_num_rules;
+ info->rule_cnt = ethtool->tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d858667736a3..364f04309149 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,6 +45,7 @@
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
#include "lib/vxlan.h"
@@ -67,22 +68,25 @@
#include "qos.h"
#include "en/trap.h"
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- bool striding_rq_umr, inline_umr;
- u16 max_wqe_sz_cap;
+ u16 umr_wqebbs, max_wqebbs;
+ bool striding_rq_umr;
striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
- max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
- inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
if (!striding_rq_umr)
return false;
- if (!inline_umr) {
- mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
- (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
+
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
+ max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
+ /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
+ * calculated from mlx5e_get_max_sq_aligned_wqebbs.
+ */
+ if (WARN_ON(umr_wqebbs > max_wqebbs))
return false;
- }
+
return true;
}
@@ -199,21 +203,35 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
}
+static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+
+ WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
+
+ return entries * umr_entry_size / MLX5_OCTWORD;
+}
+
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
+ u16 octowords;
+ u8 ds_cnt;
+
+ ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode),
+ MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
- cseg->umr_mkey = rq->mkey_be;
+ cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- ucseg->xlt_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
+ ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
@@ -259,10 +277,12 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ size_t alloc_size;
+
+ alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, alloc_units,
+ rq->mpwqe.pages_per_wqe));
- rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
- sizeof(*rq->mpwqe.info)),
- GFP_KERNEL, node);
+ rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -271,18 +291,52 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
-static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift, u32 *umr_mkey,
- dma_addr_t filler_addr)
+
+static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return MLX5_MKC_ACCESS_MODE_MTT;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return MLX5_MKC_ACCESS_MODE_KSM;
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ return MLX5_MKC_ACCESS_MODE_KLMS;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ return MLX5_MKC_ACCESS_MODE_KSM;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
+ return 0;
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
+ u32 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr,
+ enum mlx5e_mpwrq_umr_mode umr_mode,
+ u32 xsk_chunk_size)
{
struct mlx5_mtt *mtt;
+ struct mlx5_ksm *ksm;
+ struct mlx5_klm *klm;
+ u32 octwords;
int inlen;
void *mkc;
u32 *in;
int err;
int i;
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
+ if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED ||
+ umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) &&
+ !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
+ mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
+ return -EINVAL;
+ }
+
+ octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
+
+ inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
+ MLX5_OCTWORD, octwords);
+ if (inlen < 0)
+ return inlen;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
@@ -294,16 +348,17 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
- MLX5_SET(mkc, mkc, translations_octword_size,
- MLX5_MTT_OCTW(npages));
- MLX5_SET(mkc, mkc, log_page_size, page_shift);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, translations_octword_size, octwords);
+ if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)
+ MLX5_SET(mkc, mkc, log_page_size, page_shift - 2);
+ else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED)
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
@@ -311,9 +366,47 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
- mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for (i = 0 ; i < npages ; i++)
- mtt[i].ptag = cpu_to_be64(filler_addr);
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++) {
+ klm[i << 1] = (struct mlx5_klm) {
+ .va = cpu_to_be64(filler_addr),
+ .bcount = cpu_to_be32(xsk_chunk_size),
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ };
+ klm[(i << 1) + 1] = (struct mlx5_klm) {
+ .va = cpu_to_be64(filler_addr),
+ .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size),
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ };
+ }
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ mtt[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(filler_addr),
+ };
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages * 4; i++) {
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ }
+ break;
+ }
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
@@ -356,10 +449,27 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
+ u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
+ u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ u32 num_entries, max_num_entries;
+ u32 umr_mkey;
+ int err;
+
+ max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
+
+ /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
+ if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
+ &num_entries) ||
+ num_entries > max_num_entries))
+ mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
+ __func__, wq_size, rq->mpwqe.mtts_per_wqe,
+ max_num_entries);
- return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
- &rq->umr_mkey, rq->wqe_overflow.addr);
+ err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
+ &umr_mkey, rq->wqe_overflow.addr,
+ rq->mpwqe.umr_mode, xsk_chunk_size);
+ rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
+ return err;
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
@@ -376,18 +486,20 @@ static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
&rq->mpwqe.shampo->mkey);
}
-static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
-{
- return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
-}
-
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
struct mlx5e_wqe_frag_info *prev = NULL;
int i;
- next_frag.di = &rq->wqe.di[0];
+ if (rq->xsk_pool) {
+ /* Assumptions used by XSK batched allocator. */
+ WARN_ON(rq->wqe.info.num_frags != 1);
+ WARN_ON(rq->wqe.info.log_num_frags != 0);
+ WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
+ }
+
+ next_frag.au = &rq->wqe.alloc_units[0];
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -397,7 +509,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
- next_frag.di++;
+ next_frag.au++;
next_frag.offset = 0;
if (prev)
prev->last_in_page = true;
@@ -414,12 +526,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
+static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
- rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
- if (!rq->wqe.di)
+ rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)),
+ GFP_KERNEL, node);
+ if (!rq->wqe.alloc_units)
return -ENOMEM;
mlx5e_init_frags_partition(rq);
@@ -427,9 +540,9 @@ int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
return 0;
}
-void mlx5e_free_di_list(struct mlx5e_rq *rq)
+static void mlx5e_free_au_list(struct mlx5e_rq *rq)
{
- kvfree(rq->wqe.di);
+ kvfree(rq->wqe.alloc_units);
}
static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
@@ -485,7 +598,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
@@ -572,6 +685,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
@@ -587,8 +702,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
- mlx5e_mpwqe_get_log_rq_size(params, xsk);
+ rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ rq->mpwqe.pages_per_wqe =
+ mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+ rq->mpwqe.umr_wqebbs =
+ mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+ rq->mpwqe.mtts_per_wqe =
+ mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+
+ pool_size = rq->mpwqe.pages_per_wqe <<
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
@@ -600,7 +727,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
@@ -608,7 +734,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
if (err)
- goto err_free_by_rq_type;
+ goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -633,11 +759,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, node);
+ err = mlx5e_init_au_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
-
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@ -662,14 +786,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
- goto err_free_shampo;
+ goto err_free_by_rq_type;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
- goto err_free_shampo;
+ goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -677,13 +801,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
- u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
+ rq->mpwqe.page_shift;
u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
0 : rq->buff.headroom;
wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
- wqe->data[0].lkey = rq->mkey_be;
+ wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
} else {
struct mlx5e_rx_wqe_cyc *wqe =
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
@@ -721,19 +846,21 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
return 0;
-err_free_shampo:
- mlx5e_rq_free_shampo(rq);
+err_destroy_page_pool:
+ page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
+err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
}
@@ -761,24 +888,22 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
- struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
-
/* With AF_XDP, page_cache is not used, so this loop is not
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
- mlx5e_page_release_dynamic(rq, dma_info->page, false);
+ mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
@@ -833,7 +958,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -862,6 +987,32 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
+static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+{
+ struct net_device *dev = rq->netdev;
+ int err;
+
+ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
+ return err;
+ }
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
+{
+ mlx5e_free_rx_descs(rq);
+
+ return mlx5e_rq_to_ready(rq, curr_state);
+}
+
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1154,9 +1305,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
- sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs);
- sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
+ sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
+ mlx5e_stop_room_for_max_wqe(mdev);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1231,7 +1382,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
- sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1317,8 +1467,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -2280,7 +2429,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
- netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2318,10 +2467,11 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_icosq(&c->async_icosq);
- mlx5e_activate_rq(&c->rq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
+ else
+ mlx5e_activate_rq(&c->rq);
mlx5e_trigger_napi_icosq(c);
}
@@ -2332,8 +2482,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
+ else
+ mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_rq(&c->rq);
mlx5e_deactivate_icosq(&c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
@@ -2425,8 +2576,6 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
mlx5e_ptp_activate_channel(chs->ptp);
}
-#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
-
static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
int err = 0;
@@ -2434,8 +2583,12 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++) {
int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
+ struct mlx5e_channel *c = chs->c[i];
- err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ continue;
+
+ err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
/* Don't wait on the XSK RQ, because the newer xdpsock sample
* doesn't provide any Fill Ring entries at the setup stage.
@@ -2600,7 +2753,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
- int num_rxqs, nch, ntc;
+ int nch, ntc;
int err;
int i;
@@ -2611,7 +2764,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.mqprio.num_tc;
- num_rxqs = nch * priv->profile->rq_groups;
tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
@@ -2620,7 +2772,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
- err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+ err = netif_set_real_num_rx_queues(netdev, nch);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
@@ -2738,7 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
- mlx5e_add_sqs_fwd_rules(priv);
+ mlx5e_rep_activate_channels(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2752,7 +2904,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
- mlx5e_remove_sqs_fwd_rules(priv);
+ mlx5e_rep_deactivate_channels(priv);
/* The results of ndo_select_queue are unreliable, while netdev config
* is being changed (real_num_tx_queues, num_tc). Stop all queues to
@@ -3547,7 +3699,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
- PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+ PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
+ VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
stats->rx_crc_errors =
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
@@ -3669,9 +3822,11 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
- mlx5e_enable_cvlan_filter(priv);
+ mlx5e_enable_cvlan_filter(priv->fs,
+ !!(priv->netdev->flags & IFF_PROMISC));
else
- mlx5e_disable_cvlan_filter(priv);
+ mlx5e_disable_cvlan_filter(priv->fs,
+ !!(priv->netdev->flags & IFF_PROMISC));
return 0;
}
@@ -3682,7 +3837,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+ MLX5_TC_FLAG(NIC_OFFLOAD);
+ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -3778,7 +3935,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
- priv->fs->vlan_strip_disable = !enable;
+ mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
priv->channels.params.vlan_strip_disable = !enable;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -3786,7 +3943,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
if (err) {
- priv->fs->vlan_strip_disable = enable;
+ mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
priv->channels.params.vlan_strip_disable = enable;
}
unlock:
@@ -3824,9 +3981,9 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
int err;
if (enable)
- err = mlx5e_arfs_enable(priv);
+ err = mlx5e_arfs_enable(priv->fs);
else
- err = mlx5e_arfs_disable(priv);
+ err = mlx5e_arfs_disable(priv->fs);
return err;
}
@@ -3910,12 +4067,14 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
+ vlan = mlx5e_fs_get_vlan(priv->fs);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs->vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
+ if (!vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -4004,7 +4163,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
* 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
- max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
+ max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
max_mtu = min(max_mtu_frame, max_mtu_page);
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
@@ -4016,14 +4175,16 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
-static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
+static bool mlx5e_params_validate_xdp(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
bool is_linear;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
- is_linear = mlx5e_rx_is_linear_skb(params, NULL);
+ is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL);
if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
@@ -4060,7 +4221,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
- if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
+ if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
+ &new_params)) {
err = -EINVAL;
goto out;
}
@@ -4075,19 +4237,21 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
&new_params, NULL);
- u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
- u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL);
+ u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
+ u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
/* Always reset in linear mode - hw_mtu is used in data path.
* Check that the mode was non-linear and didn't change.
* If XSK is active, XSK RQs are linear.
+ * Reset if the RQ size changed, even if it's non-linear.
*/
if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
- ppw_old == ppw_new)
+ sz_old == sz_new)
reset = false;
}
@@ -4537,7 +4701,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- if (!mlx5e_params_validate_xdp(netdev, &new_params))
+ if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params))
return -EINVAL;
return 0;
@@ -4575,8 +4739,20 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- if (reset)
- mlx5e_set_rq_type(priv->mdev, &new_params);
+
+ /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
+ * supported with the new parameters: if PAGE_SIZE is bigger than
+ * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
+ * the MTU is small enough for the linear mode, because XDP uses strides
+ * of PAGE_SIZE on regular RQs.
+ */
+ if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ /* Checking for regular RQs here; XSK RQs were checked on XSK bind. */
+ err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params);
+ if (err)
+ goto unlock;
+ }
+
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
@@ -4769,14 +4945,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
- /* HW LRO */
- if (MLX5_CAP_ETH(mdev, lro_cap) &&
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- /* No XSK params: checking the availability of striding RQ in general. */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->packet_merge.type = slow_pci_heuristic(mdev) ?
- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
- }
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
@@ -4904,7 +5072,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev))
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
@@ -4992,6 +5161,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_netdev_dev_addr(netdev);
+ mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
mlx5e_ktls_build_netdev(priv);
}
@@ -5093,7 +5263,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
+ features = MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
@@ -5103,7 +5273,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_flow_steering(priv);
+ err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
+ priv->netdev);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_rx_res;
@@ -5126,7 +5297,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
@@ -5142,7 +5314,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -5194,9 +5367,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
+ err = mlx5e_macsec_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
+
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
@@ -5254,6 +5432,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
+ mlx5e_macsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5275,7 +5454,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
@@ -5308,8 +5486,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
- tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
- max_nch = min_t(unsigned int, max_nch, tmp);
+ max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues);
/* netdev tx queues */
tmp = netdev->num_tx_queues;
@@ -5453,11 +5630,7 @@ static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile)
{
- unsigned int nch;
-
- nch = mlx5e_profile_max_num_channels(mdev, profile);
-
- return nch * profile->rq_groups;
+ return mlx5e_profile_max_num_channels(mdev, profile);
}
struct net_device *
@@ -5518,7 +5691,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
- priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5579,7 +5753,8 @@ out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
- priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5590,7 +5765,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
- priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (profile->disable)
profile->disable(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4c1599de652c..794cd8dfe9c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -56,6 +56,7 @@
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
#include "en/ptp.h"
+#include "en/fs_ethtool.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -69,7 +70,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ strscpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
@@ -397,7 +398,8 @@ out_err:
return err;
}
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+static int
+mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -451,7 +453,8 @@ out:
return err;
}
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+static void
+mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -460,6 +463,49 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
mlx5e_sqs2vport_stop(esw, rep);
}
+static int
+mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_group *g;
+
+ g = esw->fdb_table.offloads.send_to_vport_meta_grp;
+ if (!g)
+ return 0;
+
+ flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ rpriv->send_to_vport_meta_rule = flow_rule;
+
+ return 0;
+}
+
+static void
+mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (rpriv->send_to_vport_meta_rule)
+ mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule);
+}
+
+void mlx5e_rep_activate_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_add_sqs_fwd_rules(priv);
+ mlx5e_rep_add_meta_tunnel_rule(priv);
+}
+
+void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_rep_del_meta_tunnel_rule(priv);
+ mlx5e_remove_sqs_fwd_rules(priv);
+}
+
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -662,6 +708,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -696,6 +744,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -708,12 +763,21 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
@@ -729,19 +793,20 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ mlx5e_fs_set_ns(priv->fs,
+ mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs->ttc)) {
- err = PTR_ERR(priv->fs->ttc);
+ mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false);
+ if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) {
+ err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false));
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -761,7 +826,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false));
return 0;
}
@@ -836,13 +901,6 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
- if (!priv->fs) {
- netdev_err(priv->netdev, "FS allocation failed\n");
- return -ENOMEM;
- }
-
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
@@ -876,14 +934,14 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_root_ft;
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs->ttc);
+ mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
@@ -897,10 +955,10 @@ err_free_fs:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_ethtool_cleanup_steering(priv->fs);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs->ttc);
+ mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
@@ -1166,7 +1224,6 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
.max_nch_limit = mlx5e_rep_max_nch_limit,
@@ -1186,8 +1243,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
- /* XSK is needed so we can replace profile with NIC netdev */
- .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index dec183ccd4ac..b4e691760da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -111,6 +111,7 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
+ struct mlx5_flow_handle *send_to_vport_meta_rule;
struct rhashtable tc_ht;
};
@@ -241,8 +242,8 @@ int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_rep_activate_channels(struct mlx5e_priv *priv);
+void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
@@ -256,8 +257,8 @@ static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_activate_channels(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 24de37b79f5a..58084650151f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -41,6 +41,7 @@
#include <net/gro.h>
#include <net/udp.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
@@ -49,6 +50,7 @@
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec.h"
+#include "en_accel/macsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
@@ -237,69 +239,61 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
return false;
}
- cache->page_cache[cache->tail].page = page;
- cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
+ cache->page_cache[cache->tail] = page;
cache->tail = tail_next;
return true;
}
-static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
struct mlx5e_rq_stats *stats = rq->stats;
+ dma_addr_t addr;
if (unlikely(cache->head == cache->tail)) {
stats->cache_empty++;
return false;
}
- if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ if (page_ref_count(cache->page_cache[cache->head]) != 1) {
stats->cache_busy++;
return false;
}
- *dma_info = cache->page_cache[cache->head];
+ au->page = cache->page_cache[cache->head];
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
stats->cache_reuse++;
- dma_sync_single_for_device(rq->pdev, dma_info->addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ addr = page_pool_get_dma_addr(au->page);
+ /* Non-XSK always uses PAGE_SIZE. */
+ dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
return true;
}
-static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
- if (mlx5e_rx_cache_get(rq, dma_info))
+ dma_addr_t addr;
+
+ if (mlx5e_rx_cache_get(rq, au))
return 0;
- dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!dma_info->page))
+ au->page = page_pool_dev_alloc_pages(rq->page_pool);
+ if (unlikely(!au->page))
return -ENOMEM;
- dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
- rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- page_pool_recycle_direct(rq->page_pool, dma_info->page);
- dma_info->page = NULL;
+ /* Non-XSK always uses PAGE_SIZE. */
+ addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
+ rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(rq->pdev, addr))) {
+ page_pool_recycle_direct(rq->page_pool, au->page);
+ au->page = NULL;
return -ENOMEM;
}
- page_pool_set_dma_addr(dma_info->page, dma_info->addr);
+ page_pool_set_dma_addr(au->page, addr);
return 0;
}
-static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- if (rq->xsk_pool)
- return mlx5e_xsk_page_alloc_pool(rq, dma_info);
- else
- return mlx5e_page_alloc_pool(rq, dma_info);
-}
-
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
@@ -324,32 +318,18 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool rec
}
}
-static inline void mlx5e_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle)
-{
- if (rq->xsk_pool)
- /* The `recycle` parameter is ignored, and the page is always
- * put into the Reuse Ring, because there is no way to return
- * the page to the userspace when the interface goes down.
- */
- xsk_buff_free(dma_info->xsk);
- else
- mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
-}
-
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
int err = 0;
if (!frag->offset)
- /* On first frag (offset == 0), replenish page (dma_info actually).
- * Other frags that point to the same dma_info (with a different
+ /* On first frag (offset == 0), replenish page (alloc_unit actually).
+ * Other frags that point to the same alloc_unit (with a different
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc(rq, frag->di);
+ err = mlx5e_page_alloc_pool(rq, frag->au);
return err;
}
@@ -359,7 +339,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, recycle);
+ mlx5e_page_release_dynamic(rq, frag->au->page, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -375,6 +355,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+ dma_addr_t addr;
u16 headroom;
err = mlx5e_get_rx_frag(rq, frag);
@@ -382,8 +363,8 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
goto free_frags;
headroom = i == 0 ? rq->buff.headroom : 0;
- wqe->data[i].addr = cpu_to_be64(frag->di->addr +
- frag->offset + headroom);
+ addr = page_pool_get_dma_addr(frag->au->page);
+ wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
return 0;
@@ -401,6 +382,15 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
{
int i;
+ if (rq->xsk_pool) {
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ xsk_buff_free(wi->au->xsk);
+ return;
+ }
+
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
mlx5e_put_rx_frag(rq, wi, recycle);
}
@@ -412,84 +402,76 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_free_rx_wqe(rq, wi, false);
}
-static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
+static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- int err;
int i;
- if (rq->xsk_pool) {
- int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
-
- /* Check in advance that we have enough frames, instead of
- * allocating one-by-one, failing and moving frames to the
- * Reuse Ring.
- */
- if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
- return -ENOMEM;
- }
-
for (i = 0; i < wqe_bulk; i++) {
- struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
-
- err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
- if (unlikely(err))
- goto free_wqes;
- }
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_rx_wqe_cyc *wqe;
- return 0;
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
-free_wqes:
- while (--i >= 0)
- mlx5e_dealloc_rx_wqe(rq, ix + i);
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
+ break;
+ }
- return err;
+ return i;
}
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
+ union mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_sync_single_for_cpu(rq->pdev,
- di->addr + frag_offset,
- len, DMA_FROM_DEVICE);
- page_ref_inc(di->page);
+ dma_addr_t addr = page_pool_get_dma_addr(au->page);
+
+ dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
+ page_ref_inc(au->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- di->page, frag_offset, len, truesize);
+ au->page, frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
- struct mlx5e_dma_info *dma_info,
+ struct page *page, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(dma_info->page) + offset_from;
+ const void *from = page_address(page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, from, len);
}
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
+ union mlx5e_alloc_unit *alloc_units = wi->alloc_units;
bool no_xdp_xmit;
- struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
int i;
/* A common case for AF_XDP. */
- if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
+ if (bitmap_full(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe))
return;
- no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
- MLX5_MPWRQ_PAGES_PER_WQE);
+ no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
- if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], recycle);
+ if (rq->xsk_pool) {
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ xsk_buff_free(alloc_units[i].xsk);
+ } else {
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle);
+ }
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
@@ -574,11 +556,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
- err = mlx5e_page_alloc(rq, dma_info);
+ union mlx5e_alloc_unit au;
+
+ err = mlx5e_page_alloc_pool(rq, &au);
if (unlikely(err))
goto err_unmap;
- addr = dma_info->addr;
- page = dma_info->page;
+ page = dma_info->page = au.page;
+ addr = dma_info->addr = page_pool_get_dma_addr(au.page);
} else {
dma_info->addr = addr + header_offset;
dma_info->page = page;
@@ -611,7 +595,7 @@ err_unmap:
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release(rq, dma_info, true);
+ mlx5e_page_release_dynamic(rq, dma_info->page, true);
}
}
rq->stats->buff_alloc_err++;
@@ -659,57 +643,55 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
+ union mlx5e_alloc_unit *au = &wi->alloc_units[0];
struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
+ u32 offset; /* 17-bit value with MTT. */
u16 pi;
int err;
int i;
- /* Check in advance that we have enough frames, instead of allocating
- * one-by-one, failing and moving frames to the Reuse Ring.
- */
- if (rq->xsk_pool &&
- unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
- err = -ENOMEM;
- goto err;
- }
-
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
err = mlx5e_alloc_rx_hd_mpwqe(rq);
if (unlikely(err))
goto err;
}
- pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
+ pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
+
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) {
+ dma_addr_t addr;
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
- err = mlx5e_page_alloc(rq, dma_info);
+ err = mlx5e_page_alloc_pool(rq, au);
if (unlikely(err))
goto err_unmap;
- umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+ addr = page_pool_get_dma_addr(au->page);
+ umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(addr | MLX5_EN_WR),
+ };
}
- bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->uctrl.xlt_offset =
- cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
+
+ offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
- .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .num_wqebbs = rq->mpwqe.umr_wqebbs,
.umr.rq = rq,
};
- sq->pc += MLX5E_UMR_WQEBBS;
+ sq->pc += rq->mpwqe.umr_wqebbs;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -717,8 +699,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
- dma_info--;
- mlx5e_page_release(rq, dma_info, true);
+ au--;
+ mlx5e_page_release_dynamic(rq, au->page, true);
}
err:
@@ -752,7 +734,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->page != deleted_page) {
deleted_page = hd_info->page;
- mlx5e_page_release(rq, hd_info, false);
+ mlx5e_page_release_dynamic(rq, hd_info->page, false);
}
}
@@ -767,7 +749,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
/* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false);
}
@@ -775,38 +757,51 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- u8 wqe_bulk;
- int err;
+ int wqe_bulk, count;
+ bool busy = false;
+ u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- wqe_bulk = rq->wqe.info.wqe_bulk;
-
- if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
+ if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
return false;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
- do {
- u16 head = mlx5_wq_cyc_get_head(wq);
+ wqe_bulk = mlx5_wq_cyc_missing(wq);
+ head = mlx5_wq_cyc_get_head(wq);
- err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
- if (unlikely(err)) {
- rq->stats->buff_alloc_err++;
- break;
- }
+ /* Don't allow any newly allocated WQEs to share the same page with old
+ * WQEs that aren't completed yet. Stop earlier.
+ */
+ wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
- mlx5_wq_cyc_push_n(wq, wqe_bulk);
- } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
+ if (!rq->xsk_pool)
+ count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
+ else if (likely(!rq->xsk_pool->dma_need_sync))
+ count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
+ else
+ /* If dma_need_sync is true, it's more efficient to call
+ * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
+ * because the latter does the same check and returns only one
+ * frame.
+ */
+ count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
+
+ mlx5_wq_cyc_push_n(wq, count);
+ if (unlikely(count != wqe_bulk)) {
+ rq->stats->buff_alloc_err++;
+ busy = true;
+ }
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
mlx5_wq_cyc_update_db_record(wq);
- return !!err;
+ return busy;
}
void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
@@ -974,7 +969,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
- alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
+ alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
+ mlx5e_alloc_rx_mpwqe(rq, head);
if (unlikely(alloc_err))
break;
@@ -1421,6 +1417,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
+ if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
+ mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -1524,19 +1523,21 @@ static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di = wi->di;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 metasize = 0;
void *va, *data;
+ dma_addr_t addr;
u32 frag_size;
- va = page_address(di->page) + wi->offset;
+ va = page_address(au->page) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
net_prefetch(data);
@@ -1546,7 +1547,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
+ if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
@@ -1559,7 +1560,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
return skb;
}
@@ -1570,20 +1571,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
- struct mlx5e_dma_info *di = wi->di;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
+ dma_addr_t addr;
u32 truesize;
void *va;
- va = page_address(di->page) + wi->offset;
+ va = page_address(au->page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, DMA_FROM_DEVICE);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
@@ -1599,11 +1602,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
while (cqe_bcnt) {
skb_frag_t *frag;
- di = wi->di;
+ au = wi->au;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, DMA_FROM_DEVICE);
if (!xdp_buff_has_frags(&xdp)) {
@@ -1616,11 +1620,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
frag = &sinfo->frags[sinfo->nr_frags++];
- __skb_frag_set_page(frag, di->page);
+ __skb_frag_set_page(frag, au->page);
skb_frag_off_set(frag, wi->offset);
skb_frag_size_set(frag, frag_consumed_bytes);
- if (page_is_pfmemalloc(di->page))
+ if (page_is_pfmemalloc(au->page))
xdp_buff_set_frag_pfmemalloc(&xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
@@ -1631,10 +1635,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
wi++;
}
- di = head_wi->di;
+ au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1651,7 +1655,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
if (unlikely(!skb))
return NULL;
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
if (unlikely(xdp_buff_has_frags(&xdp))) {
int i;
@@ -1706,9 +1710,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
+ mlx5e_xsk_skb_from_cqe_linear,
rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
@@ -1791,11 +1796,11 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -1846,12 +1851,13 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 data_bcnt, u32 data_offset)
+mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
while (data_bcnt) {
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
unsigned int truesize;
@@ -1860,12 +1866,12 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_i
else
truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_frag(rq, skb, di, data_offset,
+ mlx5e_add_skb_frag(rq, skb, au, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
- di++;
+ au++;
}
}
@@ -1873,12 +1879,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
- struct mlx5e_dma_info *head_di = di;
+ union mlx5e_alloc_unit *head_au = au;
struct sk_buff *skb;
+ dma_addr_t addr;
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
@@ -1889,14 +1896,17 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
net_prefetchw(skb->data);
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
if (unlikely(frag_offset >= PAGE_SIZE)) {
- di++;
+ au++;
frag_offset -= PAGE_SIZE;
}
- mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
+ mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
+ addr = page_pool_get_dma_addr(head_au->page);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
+ head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1908,12 +1918,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 metasize = 0;
void *va, *data;
+ dma_addr_t addr;
u32 frag_size;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -1922,11 +1933,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(di->page) + head_offset;
+ va = page_address(au->page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, DMA_FROM_DEVICE);
net_prefetch(data);
@@ -1936,7 +1948,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -1952,7 +1964,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
return skb;
}
@@ -1997,7 +2009,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
prefetchw(skb->data);
- mlx5e_copy_skb_header(rq->pdev, skb, head,
+ mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2049,7 +2061,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release(rq, &shampo->info[header_index], true);
+ mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true);
}
bitmap_clear(shampo->bitmap, header_index, 1);
}
@@ -2070,11 +2082,11 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *au;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
- wi = &rq->mpwqe.info[wqe_id];
+ wi = mlx5e_get_mpw_info(rq, wqe_id);
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
@@ -2120,8 +2132,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- di = &wi->umr.dma_info[page_idx];
- mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ au = &wi->alloc_units[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset);
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
@@ -2143,11 +2155,11 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -2170,9 +2182,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
- skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ mlx5e_xsk_skb_from_cqe_mpwrq_linear,
rq, wi, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -2417,7 +2430,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear :
- mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
@@ -2471,7 +2484,7 @@ free_wqe:
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
{
- rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7409829d1201..03c1841970f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -641,17 +641,26 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
};
+static const struct counter_desc vnic_env_stats_drop_desc[] = {
+ { "rx_oversize_pkts_buffer",
+ VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
+};
+
#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
+#define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
+ ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
- NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
+ NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
+ NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
@@ -665,6 +674,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vnic_env_stats_dev_oob_desc[i].format);
+
+ for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_drop_desc[i].format);
+
return idx;
}
@@ -679,6 +693,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
vnic_env_stats_dev_oob_desc, i);
+
+ for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_drop_desc, i);
+
return idx;
}
@@ -2451,6 +2470,9 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
&MLX5E_STATS_GRP(qos),
+#ifdef CONFIG_MLX5_EN_MACSEC
+ &MLX5E_STATS_GRP(macsec_hw),
+#endif
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index ed4fc940e4ef..9f781085be47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -273,6 +273,10 @@ struct mlx5e_qcounter_stats {
u32 rx_if_down_packets;
};
+#define VNIC_ENV_GET(vnic_env_stats, c) \
+ MLX5_GET(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
+ vport_env.c)
+
struct mlx5e_vnic_env_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
@@ -486,5 +490,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
+extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f154bda668ad..70a7a61f9708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -311,6 +311,7 @@ mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -322,7 +323,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs->tc->ct;
+ return tc->ct;
}
static struct mlx5e_tc_psample *
@@ -345,6 +346,7 @@ get_sample_priv(struct mlx5e_priv *priv)
static struct mlx5e_post_act *
get_post_action(struct mlx5e_priv *priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -356,7 +358,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
- return priv->fs->tc->post_act;
+ return tc->post_act;
}
struct mlx5_flow_handle *
@@ -607,11 +609,12 @@ int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs->tc->mod_hdr;
+ &tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -810,6 +813,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct ttc_params ttc_params;
+ struct mlx5_ttc_table *ttc;
int err;
err = mlx5e_hairpin_create_indirect_rqt(hp);
@@ -827,9 +831,10 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
goto err_create_ttc_table;
}
+ ttc = mlx5e_fs_get_ttc(priv->fs, false);
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
+ mlx5_get_ttc_flow_table(ttc)->id);
return 0;
@@ -916,10 +921,11 @@ static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
u16 peer_vhca_id, u8 prio)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
+ hash_for_each_possible(tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -933,11 +939,12 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -993,6 +1000,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
@@ -1021,10 +1029,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_lock(&tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -1036,7 +1044,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -1048,9 +1056,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -1126,8 +1134,9 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
+ struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
@@ -1163,7 +1172,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
}
dest_ix++;
}
@@ -1191,7 +1200,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs->tc->t);
+ rule = ERR_CAST(tc->t);
goto err_ft_get;
}
}
@@ -1293,8 +1302,10 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_fs_chains *nic_chains;
+ nic_chains = mlx5e_nic_chains(tc);
mlx5_del_flow_rules(rule);
if (attr->chain || attr->prio)
@@ -1309,8 +1320,8 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
@@ -1322,13 +1333,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs->tc->t_lock);
+ mutex_lock(&tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs->tc->t = NULL;
+ tc->t = NULL;
}
- mutex_unlock(&priv->fs->tc->t_lock);
+ mutex_unlock(&tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -1494,8 +1505,11 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_priv = netdev_priv(route_dev);
route_mdev = route_priv->mdev;
- if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
- route_mdev->coredev_type != MLX5_COREDEV_VF)
+ if (out_mdev->coredev_type != MLX5_COREDEV_PF)
+ return false;
+
+ if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
+ route_mdev->coredev_type != MLX5_COREDEV_SF)
return false;
return mlx5e_same_hw_devs(out_priv, route_priv);
@@ -4058,13 +4072,14 @@ static const struct rhashtable_params tc_ht_params = {
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs->tc->ht;
+ return &tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4448,7 +4463,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
int err = 0;
if (!mlx5_esw_hold(priv->mdev))
- return -EAGAIN;
+ return -EBUSY;
mlx5_esw_get(priv->mdev);
@@ -4772,6 +4787,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
struct mlx5e_hairpin_entry *hpe, *tmp;
LIST_HEAD(init_wait_list);
@@ -4783,11 +4799,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
- hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4841,7 +4857,8 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
- struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_flow_table **ft = &tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
@@ -4863,12 +4880,14 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->fs->tc->miss_t);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+
+ mlx5_destroy_flow_table(tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = priv->fs->tc;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4909,7 +4928,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs->tc->miss_t;
+ attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
@@ -4958,7 +4977,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = priv->fs->tc;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -5163,13 +5182,13 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
+ struct mlx5e_tc_table *tc;
int err;
reg_b = be32_to_cpu(cqe->ft_metadata);
-
+ tc = mlx5e_fs_get_tc(priv->fs);
chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 6ce1ab6b86b7..48241317a535 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -54,6 +54,7 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 27f791feb517..bf2232a2a836 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -485,7 +486,7 @@ err_drop:
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
{
return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
- !attr->insz;
+ !attr->insz && !mlx5e_macsec_skb_is_offload(skb);
}
static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 833be29170a1..9a458a5d9853 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -31,6 +31,7 @@
*/
#include <linux/irq.h>
+#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en/xdp.h"
@@ -86,26 +87,36 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
{
+ bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool);
bool busy_xsk = false, xsk_rx_alloc_err;
- /* Handle the race between the application querying need_wakeup and the
- * driver setting it:
- * 1. Update need_wakeup both before and after the TX. If it goes to
- * "yes", it can only happen with the first update.
- * 2. If the application queried need_wakeup before we set it, the
- * packets will be transmitted anyway, even w/o a wakeup.
- * 3. Give a chance to clear need_wakeup after new packets were queued
- * for TX.
+ /* If SQ is empty, there are no TX completions to trigger NAPI, so set
+ * need_wakeup. Do it before queuing packets for TX to avoid race
+ * condition with userspace.
*/
- mlx5e_xsk_update_tx_wakeup(xsksq);
+ if (need_wakeup && xsksq->pc == xsksq->cc)
+ xsk_set_tx_need_wakeup(xsksq->xsk_pool);
busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
- mlx5e_xsk_update_tx_wakeup(xsksq);
+ /* If we queued some packets for TX, no need for wakeup anymore. */
+ if (need_wakeup && xsksq->pc != xsksq->cc)
+ xsk_clear_tx_need_wakeup(xsksq->xsk_pool);
+ /* If WQ is empty, RX won't trigger NAPI, so set need_wakeup. Do it
+ * before refilling to avoid race condition with userspace.
+ */
+ if (need_wakeup && !mlx5e_rqwq_get_cur_sz(xskrq))
+ xsk_set_rx_need_wakeup(xskrq->xsk_pool);
xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
xskrq);
- busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
+ /* Ask for wakeup if WQ is not full after refill. */
+ if (!need_wakeup)
+ busy_xsk |= xsk_rx_alloc_err;
+ else if (xsk_rx_alloc_err)
+ xsk_set_rx_need_wakeup(xskrq->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xskrq->xsk_pool);
return busy_xsk;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 229728c80233..a0242dc15741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -575,6 +575,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
+ if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index 0abef71cb839..c9a91158e99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -78,12 +78,16 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
struct mlx5_core_dev *dest_mdev)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ bool vf_sf_vport;
+
+ vf_sf_vport = mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
/* Use indirect table for all IP traffic from UL to VF with vport
* destination when source rewrite flag is set.
*/
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
- mlx5_eswitch_is_vf_vport(esw, vport_num) &&
+ vf_sf_vport &&
esw->dev == dest_mdev &&
attr->ip_version &&
attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 694c54066955..4f8a24d84a86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -924,12 +924,16 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
struct mlx5_esw_rate_group *group,
struct netlink_ext_ack *extack)
{
- int err;
+ int err = 0;
mutex_lock(&esw->state_lock);
+ if (!vport->qos.enabled && !group)
+ goto unlock;
+
err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
if (!err)
err = esw_qos_vport_update_group(esw, vport, group, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6aa58044b949..c59107fa9e6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1360,7 +1360,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
- esw_offloads_del_send_to_vport_meta_rules(esw);
devl_rate_nodes_destroy(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 87ce5a208cb5..f68dc2d0dbe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -244,6 +244,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
+ struct mlx5_flow_group *vport_rx_drop_group;
+ struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
@@ -344,7 +346,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
-void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ed73132129aa..4e50df3139c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -70,6 +70,8 @@
#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -427,7 +429,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (mlx5_lag_mpesw_is_activated(esw->dev))
+ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+ mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
@@ -480,25 +483,27 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
!(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
- } else if (attr->dest_ft) {
- esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
- (*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
- } else if (attr->dest_chain) {
- err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
- 1, 0, *i);
- (*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+
+ if (attr->dest_ft) {
+ err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+ 1, 0, *i);
+ (*i)++;
+ }
}
return err;
@@ -1057,52 +1062,23 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
- int i = 0, num_vfs = esw->esw_funcs.num_vfs;
-
- if (!num_vfs || !flows)
- return;
-
- for (i = 0; i < num_vfs; i++)
- mlx5_del_flow_rules(flows[i]);
-
- kvfree(flows);
- /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
- * meta rules could be freed again. So set it to NULL.
- */
- esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
+ if (rule)
+ mlx5_del_flow_rules(rule);
}
-void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
-{
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
-}
-
-static int
-mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
- int num_vfs, rule_idx = 0, err = 0;
struct mlx5_flow_handle *flow_rule;
- struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
- unsigned long i;
- u16 vport_num;
-
- num_vfs = esw->esw_funcs.num_vfs;
- flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
- if (!flows)
- return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto alloc_err;
- }
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
@@ -1115,34 +1091,18 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- vport_num = vport->vport;
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
- dest.vport.num = vport_num;
-
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
- rule_idx, PTR_ERR(flow_rule));
- goto rule_err;
- }
- flows[rule_idx++] = flow_rule;
- }
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
+ dest.vport.num = vport_num;
- esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
- kvfree(spec);
- return 0;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
+ vport_num, PTR_ERR(flow_rule));
-rule_err:
- while (--rule_idx >= 0)
- mlx5_del_flow_rules(flows[rule_idx]);
kvfree(spec);
-alloc_err:
- kvfree(flows);
- return err;
+ return flow_rule;
}
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
@@ -1667,18 +1627,200 @@ esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
#endif
+static int
+esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int count, err = 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
+
+ /* See comment at table_size calculation */
+ count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
+ *ix += count;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.offloads.send_to_vport_grp = g;
+
+out:
+ return err;
+}
+
+static int
+esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+
+ if (!esw_src_port_rewrite_supported(esw))
+ return 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, *ix + esw->total_vports - 1);
+ *ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev,
+ "Failed to create send-to-vport meta flow group err(%d)\n", err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ return 0;
+
+send_vport_meta_err:
+ return err;
+}
+
+static int
+esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ esw_set_flow_group_source_port(esw, flow_group_in);
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ *ix + esw->total_vports - 1);
+ *ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
+
+out:
+ return err;
+}
+
+static int
+esw_create_miss_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+ u8 *dmac;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ *ix + MLX5_ESW_MISS_FLOWS);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
+ goto miss_err;
+ }
+ esw->fdb_table.offloads.miss_grp = g;
+
+ err = esw_add_fdb_miss_rule(esw);
+ if (err)
+ goto miss_rule_err;
+
+ return 0;
+
+miss_rule_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+ return err;
+}
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
- int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
+ int table_size, ix = 0, err = 0;
u32 flags = 0, *flow_group_in;
- struct mlx5_flow_group *g;
- void *match_criteria;
- u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
@@ -1712,7 +1854,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
* total vports of the peer (currently is also uses esw->total_vports).
*/
table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
- MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
+ esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1753,139 +1895,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
goto fdb_chains_err;
}
- /* create send-to-vport group */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
-
- /* See comment above table_size calculation */
- ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
+ if (err)
goto send_vport_err;
- }
- esw->fdb_table.offloads.send_to_vport_grp = g;
-
- if (esw_src_port_rewrite_supported(esw)) {
- /* meta send to vport */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS_2);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
-
- num_vfs = esw->esw_funcs.num_vfs;
- if (num_vfs) {
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in,
- end_flow_index, ix + num_vfs - 1);
- ix += num_vfs;
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
- err);
- goto send_vport_meta_err;
- }
- esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
- err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
- if (err)
- goto meta_rule_err;
- }
- }
-
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
- /* create peer esw miss group */
- memset(flow_group_in, 0, inlen);
-
- esw_set_flow_group_source_port(esw, flow_group_in);
-
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in,
- match_criteria);
-
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
-
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + esw->total_vports - 1);
- ix += esw->total_vports;
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
- goto peer_miss_err;
- }
- esw->fdb_table.offloads.peer_miss_grp = g;
- }
-
- /* create miss group */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
- match_criteria);
- dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers.dmac_47_16);
- dmac[0] = 0x01;
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + MLX5_ESW_MISS_FLOWS);
+ err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
+ if (err)
+ goto send_vport_meta_err;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
- goto miss_err;
- }
- esw->fdb_table.offloads.miss_grp = g;
+ err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
+ if (err)
+ goto peer_miss_err;
- err = esw_add_fdb_miss_rule(esw);
+ err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
- goto miss_rule_err;
+ goto miss_err;
kvfree(flow_group_in);
return 0;
-miss_rule_err:
- mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
-meta_rule_err:
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
send_vport_meta_err:
@@ -1912,7 +1944,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
@@ -1930,7 +1961,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0);
}
-static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
+static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{
int nvports;
@@ -1955,7 +1986,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = esw_get_offloads_ft_size(esw);
+ ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
+ MLX5_ESW_FT_OFFLOADS_DROP_RULE;
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
@@ -1984,7 +2016,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports;
int err = 0;
- nvports = esw_get_offloads_ft_size(esw);
+ nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -2014,6 +2046,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
}
+static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
+{
+ /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+ * for the drop rule, which is placed at the end of the table.
+ * So return the total of vport and int_port as rule index.
+ */
+ return esw_get_nr_ft_offloads_steering_src_ports(esw);
+}
+
+static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int flow_index;
+ int err = 0;
+
+ flow_index = esw_create_vport_rx_drop_rule_index(esw);
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
+ goto out;
+ }
+
+ esw->offloads.vport_rx_drop_group = g;
+out:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
+{
+ if (esw->offloads.vport_rx_drop_group)
+ mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
@@ -2062,6 +2140,32 @@ out:
return flow_rule;
}
+static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
+ &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx drop rule err %ld\n",
+ PTR_ERR(flow_rule));
+ return PTR_ERR(flow_rule);
+ }
+
+ esw->offloads.vport_rx_drop_rule = flow_rule;
+
+ return 0;
+}
+
+static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
+{
+ if (esw->offloads.vport_rx_drop_rule)
+ mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
+}
+
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
@@ -3062,8 +3166,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
+ err = esw_create_vport_rx_drop_group(esw);
+ if (err)
+ goto create_rx_drop_fg_err;
+
+ err = esw_create_vport_rx_drop_rule(esw);
+ if (err)
+ goto create_rx_drop_rule_err;
+
return 0;
+create_rx_drop_rule_err:
+ esw_destroy_vport_rx_drop_group(esw);
+create_rx_drop_fg_err:
+ esw_destroy_vport_rx_group(esw);
create_fg_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
@@ -3081,6 +3197,8 @@ create_indir_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ esw_destroy_vport_rx_drop_rule(esw);
+ esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
@@ -3115,8 +3233,10 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
+ if (err) {
+ devl_unlock(devlink);
return;
+ }
}
esw->esw_funcs.num_vfs = new_num_vfs;
devl_unlock(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index a1ac3a654962..9459e56ee90a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -36,6 +36,7 @@ static struct mlx5_nb events_nbs_ref[] = {
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_OBJECT_CHANGE },
/* QP/WQ resource events to forward */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
@@ -132,6 +133,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
+ case MLX5_EVENT_TYPE_OBJECT_CHANGE:
+ return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index e735e19461ba..32d4c967469c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -577,7 +577,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
- MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
+ fte->action.crypto.type);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
+ fte->action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
@@ -919,13 +922,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
- case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e3960cdf5131..d53749248fa0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -104,6 +104,10 @@
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
+#define KERNEL_RX_MACSEC_NUM_PRIOS 1
+#define KERNEL_RX_MACSEC_NUM_LEVELS 2
+#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
+
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
@@ -126,11 +130,15 @@
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
-#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
-#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+
+#define KERNEL_TX_MACSEC_NUM_PRIOS 1
+#define KERNEL_TX_MACSEC_NUM_LEVELS 2
+#define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
struct node_caps {
size_t arr_sz;
@@ -149,12 +157,16 @@ static struct init_tree_node {
enum mlx5_flow_table_miss_action def_miss_action;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 7,
+ .ar_size = 8,
.children = (struct init_tree_node[]){
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
+ KERNEL_RX_MACSEC_NUM_LEVELS))),
ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
@@ -186,18 +198,23 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
+ ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
+ KERNEL_TX_MACSEC_NUM_LEVELS))),
}
};
@@ -2269,6 +2286,7 @@ static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
{
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_LAG:
case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
@@ -2315,7 +2333,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
prio = FDB_BYPASS_PATH;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
- case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
root_ns = steering->egress_root_ns;
prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 079fa44ada71..f34e758a2f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -273,6 +273,19 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, adv_virtualization)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2cf2c9948446..86ed87d704f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -601,7 +601,7 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
fw_reporter_ctx.miss_counter = health->miss_counter;
if (fw_reporter_ctx.err_synd) {
devlink_health_report(health->fw_reporter,
- "FW syndrom reported", &fw_reporter_ctx);
+ "FW syndrome reported", &fw_reporter_ctx);
return;
}
if (fw_reporter_ctx.miss_counter)
@@ -702,11 +702,25 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.dump = mlx5_fw_fatal_reporter_dump,
};
-#define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct devlink *devlink = priv_to_devlink(dev);
+ u64 grace_period;
+
+ if (mlx5_core_is_ecpf(dev)) {
+ grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ } else if (mlx5_core_is_pf(dev)) {
+ grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ } else {
+ /* VF or SF */
+ grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ }
health->fw_reporter =
devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
@@ -718,7 +732,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
health->fw_fatal_reporter =
devlink_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops,
- MLX5_REPORTER_FW_GRACEFUL_PERIOD,
+ grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
@@ -843,9 +857,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
-
- if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
- queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -862,6 +873,14 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
del_timer_sync(&health->timer);
}
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
+}
+
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -875,13 +894,6 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
cancel_work_sync(&health->fatal_report_work);
}
-void mlx5_health_flush(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_health *health = &dev->priv.health;
-
- flush_workqueue(health->wq);
-}
-
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ac3757beaea2..c247cca154e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "ipoib.h"
+#include "en/fs_ethtool.h"
static void mlx5i_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
@@ -39,7 +40,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
- strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
+ strscpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
sizeof(drvinfo->driver));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index c02b7b08fb4c..4e3a75496dd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -35,6 +35,7 @@
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
+#include "en/fs_ethtool.h"
#define IB_DEFAULT_Q_KEY 0xb1b
#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
@@ -320,43 +321,47 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
+ struct mlx5_flow_namespace *ns =
+ mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
int err;
- priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs->ns)
+ if (!ns)
return -EINVAL;
- err = mlx5e_arfs_create_tables(priv);
+ mlx5e_fs_set_ns(priv->fs, ns, false);
+ err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(priv->fs, priv->rx_res);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv);
+ mlx5e_arfs_destroy_tables(priv->fs,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_arfs_destroy_tables(priv);
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_destroy_ttc_table(priv->fs);
+ mlx5e_arfs_destroy_tables(priv->fs,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_ethtool_cleanup_steering(priv->fs);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
@@ -458,7 +463,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 0b86e78dbc0e..0227a521d301 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -349,7 +349,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.update_stats = NULL,
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 0f34e3c80d1f..a9f4ede4a9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -65,6 +65,21 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
}
+static u8 lag_active_port_bits(struct mlx5_lag *ldev)
+{
+ u8 enabled_ports[MLX5_MAX_PORTS] = {};
+ u8 active_port = 0;
+ int num_enabled;
+ int idx;
+
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ &num_enabled);
+ for (idx = 0; idx < num_enabled; idx++)
+ active_port |= BIT_MASK(enabled_ports[idx]);
+
+ return active_port;
+}
+
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
unsigned long flags)
{
@@ -77,9 +92,21 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
- if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {
+
+ switch (port_sel_mode) {
+ case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ break;
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
+ if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
+ break;
+
+ MLX5_SET(lagc, lag_ctx, active_port,
+ lag_active_port_bits(mlx5_lag_dev(dev)));
+ break;
+ default:
+ break;
}
MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
@@ -386,12 +413,37 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
}
}
+static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
+ void *lag_ctx;
+
+ lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+
+ MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
+ MLX5_SET(modify_lag_in, in, field_select, 0x2);
+
+ MLX5_SET(lagc, lag_ctx, active_port, ports);
+
+ return mlx5_cmd_exec_in(dev, modify_lag, in);
+}
+
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ u8 active_ports;
+ int ret;
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags))
- return mlx5_lag_port_sel_modify(ldev, ports);
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
+ ret = mlx5_lag_port_sel_modify(ldev, ports);
+ if (ret ||
+ !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass))
+ return ret;
+
+ active_ports = lag_active_port_bits(ldev);
+
+ return mlx5_cmd_modify_active_port(dev0, active_ports);
+ }
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
@@ -432,21 +484,22 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
mlx5_lag_drop_rule_setup(ldev, tracker);
}
-#define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
unsigned long *flags)
{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- if (ldev->ports == MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED) {
- /* Four ports are support only in hash mode */
- if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table))
- return -EINVAL;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+ if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
+ return -EINVAL;
+ return 0;
}
+ if (ldev->ports > 2)
+ ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
+
+ set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+
return 0;
}
@@ -1067,30 +1120,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
+ unsigned long flags;
if (fn >= ldev->ports)
return;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
}
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -1234,7 +1289,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
+ if (!ldev->pf[i].netdev)
break;
if (i >= ldev->ports)
@@ -1246,12 +1301,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1260,27 +1316,45 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
EXPORT_SYMBOL(mlx5_lag_is_active);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ bool res = 0;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ if (ldev)
+ res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_mode_is_hash);
+
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev) &&
dev == ldev->pf[MLX5_LAG_P1].dev;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1289,12 +1363,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1303,13 +1378,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev) &&
test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1352,9 +1428,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -1373,7 +1450,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return ndev;
}
@@ -1383,10 +1460,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
u8 port = 0;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -1401,7 +1479,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
port = ldev->v2p_map[port * ldev->buckets];
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
@@ -1422,8 +1500,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -1433,7 +1512,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
ldev->pf[MLX5_LAG_P1].dev;
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1446,6 +1525,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev;
+ unsigned long flags;
int num_ports;
int ret, i, j;
void *out;
@@ -1462,7 +1542,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
@@ -1472,7 +1552,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index b3bbf284fe71..d854e01d7fc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -11,7 +11,9 @@
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
#define MLX5_ASO_WQEBBS_DATA \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define ASO_CTRL_READ_EN BIT(0)
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
@@ -70,6 +72,7 @@ enum {
enum {
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 91e806c1aa21..d3a9ae80fd30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -65,6 +65,8 @@ enum {
MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+ MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9),
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
};
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
@@ -72,6 +74,13 @@ static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
}
+static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
+{
+ return (mlx5_real_time_mode(mdev) &&
+ MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
+ MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
+}
+
static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
@@ -459,9 +468,95 @@ static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
return find_target_cycles(mdev, target_ns);
}
-static u64 perout_conf_real_time(s64 sec)
+static u64 perout_conf_real_time(s64 sec, u32 nsec)
+{
+ return (u64)nsec | (u64)sec << 32;
+}
+
+static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
+ u64 *time_stamp, bool real_time)
+{
+ struct timespec64 ts;
+ s64 ns;
+
+ ts.tv_nsec = rq->perout.period.nsec;
+ ts.tv_sec = rq->perout.period.sec;
+ ns = timespec64_to_ns(&ts);
+
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+
+ *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
+ perout_conf_internal_timer(mdev, rq->perout.start.sec);
+
+ return 0;
+}
+
+#define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
+static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
+ struct ptp_clock_request *rq,
+ u32 *out_pulse_duration_ns)
{
- return (u64)sec << 32;
+ struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ u32 out_pulse_duration;
+ struct timespec64 ts;
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ ts.tv_sec = rq->perout.on.sec;
+ ts.tv_nsec = rq->perout.on.nsec;
+ out_pulse_duration = (u32)timespec64_to_ns(&ts);
+ } else {
+ /* out_pulse_duration_ns should be up to 50% of the
+ * pulse period as default
+ */
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
+ }
+
+ if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
+ out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
+ mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
+ out_pulse_duration, pps_info->min_out_pulse_duration_ns,
+ MLX5_MAX_PULSE_DURATION);
+ return -EINVAL;
+ }
+ *out_pulse_duration_ns = out_pulse_duration;
+
+ return 0;
+}
+
+static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
+ u32 *field_select, u32 *out_pulse_duration_ns,
+ u64 *period, u64 *time_stamp)
+{
+ struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct ptp_clock_time *time = &rq->perout.start;
+ struct timespec64 ts;
+
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
+ mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
+ pps_info->min_npps_period);
+ return -EINVAL;
+ }
+ *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
+
+ if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
+ return -EINVAL;
+
+ *time_stamp = perout_conf_real_time(time->sec, time->nsec);
+ *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
+
+ return 0;
+}
+
+static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
+{
+ return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
+ (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,20 +569,20 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
container_of(clock, struct mlx5_core_dev, clock);
bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- struct timespec64 ts;
+ u32 out_pulse_duration_ns = 0;
u32 field_select = 0;
+ u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- s64 ns;
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
- if (rq->perout.flags)
+ if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
return -EOPNOTSUPP;
if (rq->perout.index >= clock->ptp_info.n_pins)
@@ -500,29 +595,25 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
- s64 sec = rq->perout.start.sec;
-
- if (rq->perout.start.nsec)
- return -EINVAL;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
- if ((ns >> 1) != 500000000LL)
+ if (rt_mode && rq->perout.start.sec > U32_MAX)
return -EINVAL;
- if (rt_mode && sec > U32_MAX)
- return -EINVAL;
-
- time_stamp = rt_mode ? perout_conf_real_time(sec) :
- perout_conf_internal_timer(mdev, sec);
-
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_TIME_STAMP;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ err = perout_conf_npps_real_time(mdev, rq, &field_select,
+ &out_pulse_duration_ns, &npps_period,
+ &time_stamp);
+ else
+ err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
+ if (err)
+ return err;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -531,7 +622,8 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
-
+ MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
+ MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
@@ -687,6 +779,13 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
+ if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
+ clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
+ cap_log_min_npps_period);
+ if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
+ clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
+ cap_log_min_out_pulse_duration_ns);
+
clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2f536c5d30b1..032adb21ad4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -83,6 +83,7 @@ int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, voi
enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
+ MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC,
};
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bec8d6d0b5f6..0b459d841c3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -494,6 +494,24 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+
+ if (!err)
+ return val.vbool;
+
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return MLX5_CAP_GEN(dev, roce);
+}
+EXPORT_SYMBOL(mlx5_is_roce_on);
+
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
@@ -597,7 +615,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported))
- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+ mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev);
if (max_uc_list > 0)
@@ -623,7 +642,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
*/
static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
(!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
}
@@ -652,6 +671,33 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
return err;
}
+static int handle_hca_cap_port_selection(struct mlx5_core_dev *dev,
+ void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, port_selection_cap))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass) ||
+ !MLX5_CAP_PORT_SELECTION_MAX(dev, port_select_flow_table_bypass))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur,
+ MLX5_ST_SZ_BYTES(port_selection_cap));
+ MLX5_SET(port_selection_cap, set_hca_cap, port_select_flow_table_bypass, 1);
+
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION);
+
+ return err;
+}
+
static int set_hca_cap(struct mlx5_core_dev *dev)
{
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
@@ -696,6 +742,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_port_selection(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_port_selection failed\n");
+ goto out;
+ }
+
out:
kfree(set_ctx);
return err;
@@ -1039,7 +1092,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
-static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
{
int err;
@@ -1077,10 +1130,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto err_cmd_cleanup;
+ goto stop_health_poll;
}
err = mlx5_core_set_issi(dev);
@@ -1132,8 +1187,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
mlx5_core_err(dev, "query hca failed\n");
goto reclaim_boot_pages;
}
-
- mlx5_start_health_poll(dev);
+ mlx5_start_health_fw_log_up(dev);
return 0;
@@ -1141,6 +1195,8 @@ reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
+stop_health_poll:
+ mlx5_stop_health_poll(dev, boot);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
@@ -1152,7 +1208,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
{
int err;
- mlx5_stop_health_poll(dev, boot);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
@@ -1160,6 +1215,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
+ mlx5_stop_health_poll(dev, boot);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
@@ -1309,7 +1365,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
- err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ err = mlx5_function_setup(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err)
goto err_function;
@@ -1397,7 +1453,7 @@ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
else
timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
- err = mlx5_function_setup(dev, timeout);
+ err = mlx5_function_setup(dev, false, timeout);
if (err)
goto err_function;
@@ -1488,6 +1544,8 @@ static const int types[] = {
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
MLX5_CAP_DEV_SHAMPO,
+ MLX5_CAP_MACSEC,
+ MLX5_CAP_ADV_VIRTUALIZATION,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1530,7 +1588,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
+ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1597,6 +1657,7 @@ err_timeout_init:
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
return err;
}
@@ -1618,6 +1679,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
}
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index ad61b86d5769..a806e3de7b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -143,6 +143,36 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2
+static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
+ size_t item_size, size_t num_items,
+ const char *func, int line)
+{
+ int inlen;
+
+ if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
+ mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
+ mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ if (check_add_overflow((int)fixed, inlen, &inlen)) {
+ mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ return inlen;
+}
+
+#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
+ mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index ec76a8b1acc1..60596357bfc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -376,8 +376,8 @@ retry:
goto out_dropped;
}
}
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_dropped;
@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.reclaim_pages_discard += npages;
}
/* if triggered by FW event and failed by FW then ignore */
- if (event && err == -EREMOTEIO)
+ if (event && err == -EREMOTEIO) {
err = 0;
+ goto out_free;
+ }
+
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e1bd54574ea5..a1548e6bfb35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -493,29 +493,6 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz)
-{
- u32 *in;
- int err;
-
- in = kvzalloc(sz, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- return err;
- }
-
- MLX5_SET(ppcnt_reg, in, local_port, port_num);
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
- err = mlx5_core_access_reg(dev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
-
- kvfree(in);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
-
static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out,
u32 out_size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index ee2e1b7c1310..c0e6c487c63c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -159,11 +159,11 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
+ devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
- devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 062c7c74a1f3..1777a1e508e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1294,20 +1294,6 @@ struct mlx5dr_cmd_gid_attr {
u32 roce_ver;
};
-struct mlx5dr_cmd_qp_create_attr {
- u32 page_id;
- u32 pdn;
- u32 cqn;
- u32 pm_state;
- u32 service_type;
- u32 buff_umem_id;
- u32 db_umem_id;
- u32 sq_wqe_cnt;
- u32 rq_wqe_cnt;
- u32 rq_wqe_shift;
- u8 isolate_vl_tc:1;
-};
-
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
index 1fb185d6ac7f..d168622063d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
@@ -14,10 +14,6 @@ struct mlx5_fs_dr_action {
struct mlx5dr_action *dr_action;
};
-struct mlx5_fs_dr_ns {
- struct mlx5_dr_ns *dr_ns;
-};
-
struct mlx5_fs_dr_rule {
struct mlx5dr_rule *dr_rule;
/* Only actions created by fs_dr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e5c4dcd1425e..4d629e5ddbc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -123,7 +123,7 @@ static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
wq->cur_sz++;
}
-static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
+static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n)
{
wq->wqe_ctr += n;
wq->cur_sz += n;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 5fdf9b7179f5..5a1027b07215 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -75,6 +75,7 @@ struct mlxbf_gige {
struct net_device *netdev;
struct platform_device *pdev;
void __iomem *mdio_io;
+ void __iomem *clk_io;
struct mii_bus *mdiobus;
spinlock_t lock; /* for packet processing indices */
u16 rx_q_entries;
@@ -137,7 +138,8 @@ enum mlxbf_gige_res {
MLXBF_GIGE_RES_MDIO9,
MLXBF_GIGE_RES_GPIO0,
MLXBF_GIGE_RES_LLU,
- MLXBF_GIGE_RES_PLU
+ MLXBF_GIGE_RES_PLU,
+ MLXBF_GIGE_RES_CLK
};
/* Version of register data returned by mlxbf_gige_get_regs() */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index b03e1c66bac0..2292d63a279c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -156,7 +156,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
phy_start(phydev);
- netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 2e6c1b7af096..aa780b1614a3 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -22,10 +22,23 @@
#include <linux/property.h>
#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
+#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
+#define MLXBF_GIGE_MDC_CLK_NS 400
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
+#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
+#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
+#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
+#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
+
/* Support clause 22 */
#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
@@ -50,27 +63,76 @@
#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
+#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+
+static struct resource corepll_params[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .start = MLXBF_GIGE_BF2_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ },
+};
+
+/* Returns core clock i1clk in Hz */
+static u64 calculate_i1clk(struct mlxbf_gige *priv)
+{
+ u8 core_od, core_r;
+ u64 freq_output;
+ u32 reg1, reg2;
+ u32 core_f;
+
+ reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
+ reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
+
+ core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_F_SHIFT;
+ core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_R_SHIFT;
+ core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
+
+ /* Compute PLL output frequency as follow:
+ *
+ * CORE_F / 16384
+ * freq_output = freq_reference * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ */
+ freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
+ MLXBF_GIGE_MDIO_COREPLL_CONST);
+ freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
+
+ return freq_output;
+}
+
/* Formula for encoding the MDIO period. The encoded value is
* passed to the MDIO config register.
*
- * mdc_clk = 2*(val + 1)*i1clk
+ * mdc_clk = 2*(val + 1)*(core clock in sec)
*
- * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ * i1clk is in Hz:
+ * 400 ns = 2*(val + 1)*(1/i1clk)
*
- * val = (((400 * 430 / 1000) / 2) - 1)
+ * val = (((400/10^9) / (1/i1clk) / 2) - 1)
+ * val = (400/2 * i1clk)/10^9 - 1
*/
-#define MLXBF_GIGE_I1CLK_MHZ 430
-#define MLXBF_GIGE_MDC_CLK_NS 400
+static u8 mdio_period_map(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u64 i1clk;
-#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+ i1clk = calculate_i1clk(priv);
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
- MLXBF_GIGE_MDIO_PERIOD) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+ mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
+
+ return mdio_period;
+}
static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
int phy_reg, u32 opcode)
@@ -117,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
@@ -124,9 +189,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
int phy_reg, u16 val)
{
struct mlxbf_gige *priv = bus->priv;
+ u32 temp;
u32 cmd;
int ret;
- u32 temp;
if (phy_reg & MII_ADDR_C45)
return -EOPNOTSUPP;
@@ -141,21 +206,50 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
5, 1000000);
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
+static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u32 val;
+
+ mdio_period = mdio_period_map(priv);
+
+ val = MLXBF_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+}
+
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
{
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
- /* Configure mdio parameters */
- writel(MLXBF_GIGE_MDIO_CFG_VAL,
- priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ /* clk resource shared with other drivers so cannot use
+ * devm_platform_ioremap_resource
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
+ if (!res) {
+ /* For backward compatibility with older ACPI tables, also keep
+ * CLK resource internal to the driver.
+ */
+ res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ }
+
+ priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (!priv->clk_io)
+ return -ENOMEM;
+
+ mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
if (!priv->mdiobus) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 5fb33c9294bf..7be3a793984d 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,6 +8,8 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#define MLXBF_GIGE_VERSION 0x0000
+#define MLXBF_GIGE_VERSION_BF2 0x0
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 60232fb8ccd7..09bef04b11d1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -703,6 +703,9 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
/* cmd_mbox_config_profile_max_lag
* Maximum number of LAG IDs requested.
+ * Reserved when Spectrum-1/2/3, supported from Spectrum-4 and above.
+ * For Spectrum-4, firmware sets 128 for values between 1-128 and 256 for values
+ * between 129-256.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 75553eb2c7f2..e2a985ec2c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -70,6 +70,8 @@ struct mlxsw_core {
struct workqueue_struct *emad_wq;
struct list_head rx_listener_list;
struct list_head event_listener_list;
+ struct list_head irq_event_handler_list;
+ struct mutex irq_event_handler_lock; /* Locks access to handlers list */
struct {
atomic64_t tid;
struct list_head trans_list;
@@ -184,6 +186,23 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_max_ports);
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (driver->profile->used_max_lag) {
+ *p_max_lag = driver->profile->max_lag;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG))
+ return -EIO;
+
+ *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_max_lag);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
@@ -633,7 +652,7 @@ static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
return;
string = mlxsw_emad_string_tlv_string_data(string_tlv);
- strlcpy(trans->emad_err_string, string,
+ strscpy(trans->emad_err_string, string,
MLXSW_EMAD_STRING_TLV_STRING_LEN);
}
@@ -1305,21 +1324,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
extack);
}
-static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type port_type)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
- struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
- struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
-
- if (!mlxsw_driver->port_type_set)
- return -EOPNOTSUPP;
-
- return mlxsw_driver->port_type_set(mlxsw_core,
- mlxsw_core_port->local_port,
- port_type);
-}
-
static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
@@ -1650,7 +1654,6 @@ static const struct devlink_ops mlxsw_devlink_ops = {
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
- .port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
.sb_pool_get = mlxsw_devlink_sb_pool_get,
@@ -2090,6 +2093,18 @@ static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
}
+static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
+{
+ INIT_LIST_HEAD(&mlxsw_core->irq_event_handler_list);
+ mutex_init(&mlxsw_core->irq_event_handler_lock);
+}
+
+static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core)
+{
+ mutex_destroy(&mlxsw_core->irq_event_handler_lock);
+ WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list));
+}
+
static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
@@ -2101,6 +2116,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
size_t alloc_size;
+ u16 max_lag;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
@@ -2125,6 +2141,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus = mlxsw_bus;
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
+ mlxsw_core_irq_event_handler_init(mlxsw_core);
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->res);
@@ -2141,10 +2158,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_ports_init;
- if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
- MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
- alloc_size = sizeof(*mlxsw_core->lag.mapping) *
- MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
+ err = mlxsw_core_max_lag(mlxsw_core, &max_lag);
+ if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
@@ -2233,6 +2249,7 @@ err_ports_init:
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
devl_unlock(devlink);
devlink_free(devlink);
@@ -2302,6 +2319,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devl_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
devl_unlock(devlink);
devlink_free(devlink);
@@ -2772,6 +2790,57 @@ int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
}
EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+struct mlxsw_core_irq_event_handler_item {
+ struct list_head list;
+ void (*cb)(struct mlxsw_core *mlxsw_core);
+};
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->cb = cb;
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_add_tail(&item->list, &mlxsw_core->irq_event_handler_list);
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register);
+
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item, *tmp;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry_safe(item, tmp,
+ &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb == cb) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister);
+
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb)
+ item->cb(mlxsw_core);
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call);
+
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
@@ -3115,18 +3184,6 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_type_ib_set(devlink_port, NULL);
-}
-EXPORT_SYMBOL(mlxsw_core_port_ib_set);
-
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
@@ -3139,18 +3196,6 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_clear);
-enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u16 local_port)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- return devlink_port->type;
-}
-EXPORT_SYMBOL(mlxsw_core_port_type_get);
-
-
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 02d9cc2ef0c8..ca0c3d2bee6b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,6 +35,8 @@ struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
@@ -215,6 +217,14 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+typedef void mlxsw_irq_event_cb_t(struct mlxsw_core *mlxsw_core);
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core);
+
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -256,12 +266,8 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
-enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u16 local_port);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
@@ -291,6 +297,7 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
+ used_max_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -306,6 +313,7 @@ struct mlxsw_config_profile {
used_kvd_sizes:1,
used_cqe_time_stamp_type:1;
u8 max_vepa_channels;
+ u16 max_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -341,8 +349,6 @@ struct mlxsw_driver {
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
- int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
- enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 636db9a87457..9dfe7148199f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -737,8 +737,9 @@ mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
if (!cookie)
return ERR_PTR(-ENOMEM);
refcount_set(&cookie->ref_count, 1);
- memcpy(&cookie->fa_cookie, fa_cookie,
- sizeof(*fa_cookie) + fa_cookie->cookie_len);
+ cookie->fa_cookie = *fa_cookie;
+ memcpy(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
mlxsw_afa_cookie_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index ca59f0b946da..83d2dc91ba2c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -785,6 +785,21 @@ static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
}
+static void mlxsw_linecards_irq_event_handler(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ /* Handle change of line card active state. */
+ for (i = 0; i < linecards->count; i++) {
+ struct mlxsw_linecard *linecard = mlxsw_linecard_get(linecards,
+ i + 1);
+
+ mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ }
+}
+
static const char * const mlxsw_linecard_status_event_type_name[] = {
[MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
[MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
@@ -1238,7 +1253,6 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
{
struct devlink_linecard *devlink_linecard;
struct mlxsw_linecard *linecard;
- int err;
linecard = mlxsw_linecard_get(linecards, slot_index);
linecard->slot_index = slot_index;
@@ -1248,17 +1262,45 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
slot_index, &mlxsw_linecard_ops,
linecard);
- if (IS_ERR(devlink_linecard)) {
- err = PTR_ERR(devlink_linecard);
- goto err_devlink_linecard_create;
- }
+ if (IS_ERR(devlink_linecard))
+ return PTR_ERR(devlink_linecard);
+
linecard->devlink_linecard = devlink_linecard;
INIT_DELAYED_WORK(&linecard->status_event_to_dw,
&mlxsw_linecard_status_event_to_work);
+ return 0;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_event_delivery_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
if (err)
- goto err_event_delivery_set;
+ return err;
err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
linecard);
@@ -1269,29 +1311,18 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
err_status_get_and_process:
mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
-err_event_delivery_set:
- devlink_linecard_destroy(linecard->devlink_linecard);
-err_devlink_linecard_create:
- mutex_destroy(&linecard->lock);
return err;
}
-static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
- struct mlxsw_linecards *linecards,
- u8 slot_index)
+static void
+mlxsw_linecard_event_delivery_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
{
struct mlxsw_linecard *linecard;
linecard = mlxsw_linecard_get(linecards, slot_index);
mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
- cancel_delayed_work_sync(&linecard->status_event_to_dw);
- /* Make sure all scheduled events are processed */
- mlxsw_core_flush_owq();
- if (linecard->active)
- mlxsw_linecard_active_clear(linecard);
- mlxsw_linecard_bdev_del(linecard);
- devlink_linecard_destroy(linecard->devlink_linecard);
- mutex_destroy(&linecard->lock);
}
/* LINECARDS INI BUNDLE FILE
@@ -1505,6 +1536,11 @@ int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_traps_register;
+ err = mlxsw_core_irq_event_handler_register(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+ if (err)
+ goto err_irq_event_handler_register;
+
mlxsw_core_linecards_set(mlxsw_core, linecards);
for (i = 0; i < linecards->count; i++) {
@@ -1513,11 +1549,25 @@ int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
goto err_linecard_init;
}
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_event_delivery_init(mlxsw_core, linecards,
+ i + 1);
+ if (err)
+ goto err_linecard_event_delivery_init;
+ }
+
return 0;
+err_linecard_event_delivery_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ i = linecards->count;
err_linecard_init:
for (i--; i >= 0; i--)
mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+err_irq_event_handler_register:
mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
ARRAY_SIZE(mlxsw_linecard_listener),
mlxsw_core);
@@ -1536,7 +1586,11 @@ void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
if (!linecards)
return;
for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ for (i = 0; i < linecards->count; i++)
mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
ARRAY_SIZE(mlxsw_linecard_listener),
mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 3548fe1df7c8..987fe5c9d5a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -21,7 +21,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
-#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MIN_STATE 2
#define MLXSW_THERMAL_MAX_DUTY 255
@@ -101,8 +100,6 @@ struct mlxsw_thermal {
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
- unsigned int tz_highest_score;
- struct thermal_zone_device *tz_highest_dev;
struct mlxsw_thermal_area line_cards[];
};
@@ -193,34 +190,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
return 0;
}
-static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal,
- struct thermal_zone_device *tzdev,
- struct mlxsw_thermal_trip *trips,
- int temp)
-{
- struct mlxsw_thermal_trip *trip = trips;
- unsigned int score, delta, i, shift = 1;
-
- /* Calculate thermal zone score, if temperature is above the hot
- * threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX.
- */
- score = MLXSW_THERMAL_TEMP_SCORE_MAX;
- for (i = MLXSW_THERMAL_TEMP_TRIP_NORM; i < MLXSW_THERMAL_NUM_TRIPS;
- i++, trip++) {
- if (temp < trip->temp) {
- delta = DIV_ROUND_CLOSEST(temp, trip->temp - temp);
- score = delta * shift;
- break;
- }
- shift *= 256;
- }
-
- if (score > thermal->tz_highest_score) {
- thermal->tz_highest_score = score;
- thermal->tz_highest_dev = tzdev;
- }
-}
-
static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
struct thermal_cooling_device *cdev)
{
@@ -286,9 +255,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return err;
}
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
- if (temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips,
- temp);
*p_temp = temp;
return 0;
@@ -349,21 +315,6 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
return 0;
}
-static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trend *trend)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- if (tzdev == thermal->tz_highest_dev)
- return 1;
-
- *trend = THERMAL_TREND_STABLE;
- return 0;
-}
-
static struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
@@ -377,7 +328,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.set_trip_temp = mlxsw_thermal_set_trip_temp,
.get_trip_hyst = mlxsw_thermal_get_trip_hyst,
.set_trip_hyst = mlxsw_thermal_set_trip_hyst,
- .get_trend = mlxsw_thermal_trend_get,
};
static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
@@ -463,7 +413,6 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
int temp, crit_temp, emerg_temp;
struct device *dev;
u16 sensor_index;
- int err;
dev = thermal->bus_info->dev;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module;
@@ -479,10 +428,8 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
/* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
- crit_temp, emerg_temp);
- if (!err && temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
+ mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
+ crit_temp, emerg_temp);
return 0;
}
@@ -546,22 +493,6 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0;
}
-static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trend *trend)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
- struct mlxsw_thermal *thermal = tz->parent;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- if (tzdev == thermal->tz_highest_dev)
- return 1;
-
- *trend = THERMAL_TREND_STABLE;
- return 0;
-}
-
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
@@ -571,7 +502,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@@ -592,8 +522,6 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return err;
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
- if (temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
*p_temp = temp;
return 0;
@@ -608,7 +536,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index ce843ea91464..f5f5f8dc3d19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/platform_data/mlxreg.h>
#include <linux/slab.h>
#include "cmd.h"
@@ -51,6 +52,15 @@
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
+/* Driver can be initialized by kernel platform driver or from the user
+ * space. In the first case IRQ line number is passed through the platform
+ * data, otherwise default IRQ line is to be used. Default IRQ is relevant
+ * only for specific I2C slave address, allowing 3.4 MHz I2C path to the chip
+ * (special hardware feature for I2C acceleration).
+ */
+#define MLXSW_I2C_DEFAULT_IRQ 17
+#define MLXSW_FAST_I2C_SLAVE 0x37
+
/**
* struct mlxsw_i2c - device private data:
* @cmd: command attributes;
@@ -63,6 +73,9 @@
* @core: switch core pointer;
* @bus_info: bus info block;
* @block_size: maximum block size allowed to pass to under layer;
+ * @pdata: device platform data;
+ * @irq_work: interrupts work item;
+ * @irq: IRQ line number;
*/
struct mlxsw_i2c {
struct {
@@ -76,6 +89,9 @@ struct mlxsw_i2c {
struct mlxsw_core *core;
struct mlxsw_bus_info bus_info;
u16 block_size;
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct work_struct irq_work;
+ int irq;
};
#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
@@ -546,6 +562,67 @@ static void mlxsw_i2c_fini(void *bus_priv)
mlxsw_i2c->core = NULL;
}
+static void mlxsw_i2c_work_handler(struct work_struct *work)
+{
+ struct mlxsw_i2c *mlxsw_i2c;
+
+ mlxsw_i2c = container_of(work, struct mlxsw_i2c, irq_work);
+ mlxsw_core_irq_event_handlers_call(mlxsw_i2c->core);
+}
+
+static irqreturn_t mlxsw_i2c_irq_handler(int irq, void *dev)
+{
+ struct mlxsw_i2c *mlxsw_i2c = dev;
+
+ mlxsw_core_schedule_work(&mlxsw_i2c->irq_work);
+
+ /* Interrupt handler shares IRQ line with 'main' interrupt handler.
+ * Return here IRQ_NONE, while main handler will return IRQ_HANDLED.
+ */
+ return IRQ_NONE;
+}
+
+static int mlxsw_i2c_irq_init(struct mlxsw_i2c *mlxsw_i2c, u8 addr)
+{
+ int err;
+
+ /* Initialize interrupt handler if system hotplug driver is reachable,
+ * otherwise interrupt line is not enabled and interrupts will not be
+ * raised to CPU. Also request_irq() call will be not valid.
+ */
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG))
+ return 0;
+
+ /* Set default interrupt line. */
+ if (mlxsw_i2c->pdata && mlxsw_i2c->pdata->irq)
+ mlxsw_i2c->irq = mlxsw_i2c->pdata->irq;
+ else if (addr == MLXSW_FAST_I2C_SLAVE)
+ mlxsw_i2c->irq = MLXSW_I2C_DEFAULT_IRQ;
+
+ if (!mlxsw_i2c->irq)
+ return 0;
+
+ INIT_WORK(&mlxsw_i2c->irq_work, mlxsw_i2c_work_handler);
+ err = request_irq(mlxsw_i2c->irq, mlxsw_i2c_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED, "mlxsw-i2c",
+ mlxsw_i2c);
+ if (err) {
+ dev_err(mlxsw_i2c->bus_info.dev, "Failed to request irq: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_i2c_irq_fini(struct mlxsw_i2c *mlxsw_i2c)
+{
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG) || !mlxsw_i2c->irq)
+ return;
+ cancel_work_sync(&mlxsw_i2c->irq_work);
+ free_irq(mlxsw_i2c->irq, mlxsw_i2c);
+}
+
static const struct mlxsw_bus mlxsw_i2c_bus = {
.kind = "i2c",
.init = mlxsw_i2c_init,
@@ -638,17 +715,24 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->bus_info.dev = &client->dev;
mlxsw_i2c->bus_info.low_frequency = true;
mlxsw_i2c->dev = &client->dev;
+ mlxsw_i2c->pdata = client->dev.platform_data;
+
+ err = mlxsw_i2c_irq_init(mlxsw_i2c, client->addr);
+ if (err)
+ goto errout;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c, false,
NULL, NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
- return err;
+ goto err_bus_device_register;
}
return 0;
+err_bus_device_register:
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
errout:
mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
@@ -656,14 +740,13 @@ errout:
return err;
}
-static int mlxsw_i2c_remove(struct i2c_client *client)
+static void mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
mutex_destroy(&mlxsw_i2c->cmd.lock);
-
- return 0;
}
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index bb1cd4bae82e..55b3c42bb007 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -26,20 +26,29 @@ static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
struct mlxsw_m_port;
+struct mlxsw_m_line_card {
+ bool active;
+ int module_to_port[];
+};
+
struct mlxsw_m {
struct mlxsw_m_port **ports;
- int *module_to_port;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
u8 base_mac[ETH_ALEN];
u8 max_ports;
+ u8 max_modules_per_slot; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mlxsw_m_line_card **line_cards;
};
struct mlxsw_m_port {
struct net_device *dev;
struct mlxsw_m *mlxsw_m;
u16 local_port;
+ u8 slot_index;
u8 module;
+ u8 module_offset;
};
static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
@@ -94,14 +103,14 @@ static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- strlcpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
+ strscpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
mlxsw_m->bus_info->fw_rev.major,
mlxsw_m->bus_info->fw_rev.minor,
mlxsw_m->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
+ strscpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
sizeof(drvinfo->bus_info));
}
@@ -111,8 +120,9 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module,
- modinfo);
+ return mlxsw_env_get_module_info(netdev, core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module, modinfo);
}
static int
@@ -122,7 +132,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom(netdev, core, 0,
+ return mlxsw_env_get_module_eeprom(netdev, core,
+ mlxsw_m_port->slot_index,
mlxsw_m_port->module, ee, data);
}
@@ -134,7 +145,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom_by_page(core, 0,
+ return mlxsw_env_get_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
mlxsw_m_port->module,
page, extack);
}
@@ -144,7 +156,8 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module,
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
flags);
}
@@ -156,7 +169,8 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module,
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
params, extack);
}
@@ -168,7 +182,8 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module,
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
params->policy, extack);
}
@@ -184,7 +199,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
static int
mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
- u8 *p_module, u8 *p_width)
+ u8 *p_module, u8 *p_width, u8 *p_slot_index)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -195,6 +210,7 @@ mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
return err;
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
return 0;
}
@@ -212,18 +228,25 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
if (err)
return err;
mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
- eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1 +
+ mlxsw_m_port->module_offset);
return 0;
}
+static bool mlxsw_m_port_created(struct mlxsw_m *mlxsw_m, u16 local_port)
+{
+ return mlxsw_m->ports[local_port];
+}
+
static int
-mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 slot_index,
+ u8 module)
{
struct mlxsw_m_port *mlxsw_m_port;
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0,
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, slot_index,
module + 1, false, 0, false,
0, mlxsw_m->base_mac,
sizeof(mlxsw_m->base_mac));
@@ -246,6 +269,15 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
mlxsw_m_port->mlxsw_m = mlxsw_m;
mlxsw_m_port->local_port = local_port;
mlxsw_m_port->module = module;
+ mlxsw_m_port->slot_index = slot_index;
+ /* Add module offset for line card. Offset for main board iz zero.
+ * For line card in slot #n offset is calculated as (#n - 1)
+ * multiplied by maximum modules number, which could be found on a line
+ * card.
+ */
+ mlxsw_m_port->module_offset = mlxsw_m_port->slot_index ?
+ (mlxsw_m_port->slot_index - 1) *
+ mlxsw_m->max_modules_per_slot : 0;
dev->netdev_ops = &mlxsw_m_port_netdev_ops;
dev->ethtool_ops = &mlxsw_m_port_ethtool_ops;
@@ -291,19 +323,29 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
mlxsw_core_port_fini(mlxsw_m->core, local_port);
}
+static int*
+mlxsw_m_port_mapping_get(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
+{
+ return &mlxsw_m->line_cards[slot_index]->module_to_port[module];
+}
+
static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *last_module)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- u8 module, width;
+ u8 module, width, slot_index;
+ int *module_to_port;
int err;
/* Fill out to local port mapping array */
err = mlxsw_m_port_module_info_get(mlxsw_m, local_port, &module,
- &width);
+ &width, &slot_index);
if (err)
return err;
+ /* Skip if line card has been already configured */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ return 0;
if (!width)
return 0;
/* Skip, if port belongs to the cluster */
@@ -313,91 +355,220 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
- mlxsw_env_module_port_map(mlxsw_m->core, 0, module);
- mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
+ mlxsw_env_module_port_map(mlxsw_m->core, slot_index, module);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, module);
+ *module_to_port = local_port;
return 0;
}
-static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
+static void
+mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
{
- mlxsw_m->module_to_port[module] = -1;
- mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module);
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index,
+ module);
+ *module_to_port = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, slot_index, module);
}
-static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+static int mlxsw_m_linecards_init(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- u8 last_module = max_ports;
- int i;
- int err;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 num_of_modules;
+ int i, j, err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &num_of_modules,
+ &mlxsw_m->num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ if (mlxsw_m->num_of_slots)
+ mlxsw_m->max_modules_per_slot =
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl);
+ else
+ mlxsw_m->max_modules_per_slot = num_of_modules;
+ /* Add slot for main board. */
+ mlxsw_m->num_of_slots += 1;
mlxsw_m->ports = kcalloc(max_ports, sizeof(*mlxsw_m->ports),
GFP_KERNEL);
if (!mlxsw_m->ports)
return -ENOMEM;
- mlxsw_m->module_to_port = kmalloc_array(max_ports, sizeof(int),
- GFP_KERNEL);
- if (!mlxsw_m->module_to_port) {
+ mlxsw_m->line_cards = kcalloc(mlxsw_m->num_of_slots,
+ sizeof(*mlxsw_m->line_cards),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards) {
err = -ENOMEM;
- goto err_module_to_port_alloc;
+ goto err_kcalloc;
}
- /* Invalidate the entries of module to local port mapping array */
- for (i = 0; i < max_ports; i++)
- mlxsw_m->module_to_port[i] = -1;
+ for (i = 0; i < mlxsw_m->num_of_slots; i++) {
+ mlxsw_m->line_cards[i] =
+ kzalloc(struct_size(mlxsw_m->line_cards[i],
+ module_to_port,
+ mlxsw_m->max_modules_per_slot),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards[i]) {
+ err = -ENOMEM;
+ goto err_kmalloc_array;
+ }
- /* Fill out module to local port mapping array */
- for (i = 1; i < max_ports; i++) {
- err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
- if (err)
- goto err_module_to_port_map;
+ /* Invalidate the entries of module to local port mapping array. */
+ for (j = 0; j < mlxsw_m->max_modules_per_slot; j++)
+ mlxsw_m->line_cards[i]->module_to_port[j] = -1;
}
- /* Create port objects for each valid entry */
- for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0) {
- err = mlxsw_m_port_create(mlxsw_m,
- mlxsw_m->module_to_port[i],
- i);
+ return 0;
+
+err_kmalloc_array:
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+err_kcalloc:
+ kfree(mlxsw_m->ports);
+ return err;
+}
+
+static void mlxsw_m_linecards_fini(struct mlxsw_m *mlxsw_m)
+{
+ int i = mlxsw_m->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+ kfree(mlxsw_m->line_cards);
+ kfree(mlxsw_m->ports);
+}
+
+static void
+mlxsw_m_linecard_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int i;
+
+ for (i = mlxsw_m->max_modules_per_slot - 1; i >= 0; i--) {
+ int *module_to_port;
+
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0)
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
+ }
+}
+
+static int
+mlxsw_m_linecard_ports_create(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int *module_to_port;
+ int i, err;
+
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0) {
+ err = mlxsw_m_port_create(mlxsw_m, *module_to_port,
+ slot_index, i);
if (err)
- goto err_module_to_port_create;
+ goto err_port_create;
+ /* Mark slot as active */
+ if (!mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = true;
}
}
-
return 0;
-err_module_to_port_create:
+err_port_create:
for (i--; i >= 0; i--) {
- if (mlxsw_m->module_to_port[i] > 0)
- mlxsw_m_port_remove(mlxsw_m,
- mlxsw_m->module_to_port[i]);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ /* Mark slot as inactive */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = false;
+ }
}
- i = max_ports;
-err_module_to_port_map:
- for (i--; i > 0; i--)
- mlxsw_m_port_module_unmap(mlxsw_m, i);
- kfree(mlxsw_m->module_to_port);
-err_module_to_port_alloc:
- kfree(mlxsw_m->ports);
return err;
}
-static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+static void
+mlxsw_m_linecard_ports_remove(struct mlxsw_m *mlxsw_m, u8 slot_index)
{
int i;
- for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0) {
- mlxsw_m_port_remove(mlxsw_m,
- mlxsw_m->module_to_port[i]);
- mlxsw_m_port_module_unmap(mlxsw_m, i);
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m,
+ slot_index, i);
+
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
}
}
+}
- kfree(mlxsw_m->module_to_port);
- kfree(mlxsw_m->ports);
+static int mlxsw_m_ports_module_map(struct mlxsw_m *mlxsw_m)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ u8 last_module = max_ports;
+ int i, err;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+{
+ int err;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, 0);
+ if (err)
+ goto err_linecard_ports_create;
+
+ return 0;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, 0);
+
+ return err;
+}
+
+static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+{
+ mlxsw_m_linecard_ports_remove(mlxsw_m, 0);
+}
+
+static void
+mlxsw_m_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_linecard *linecard_priv = priv;
+ struct mlxsw_m_line_card *linecard;
+
+ linecard = mlxsw_m->line_cards[linecard_priv->slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, linecard_priv->slot_index);
+ linecard->active = false;
}
static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
@@ -418,6 +589,60 @@ static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
return -EINVAL;
}
+static void
+mlxsw_m_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+ int err;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+ /* Skip if line card has been already configured during init */
+ if (linecard->active)
+ return;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, slot_index);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create port for line card at slot %d\n",
+ slot_index);
+ goto err_linecard_ports_create;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, slot_index);
+}
+
+static void
+mlxsw_m_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, slot_index);
+ linecard->active = false;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_m_event_ops = {
+ .got_active = mlxsw_m_got_active,
+ .got_inactive = mlxsw_m_got_inactive,
+};
+
static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack)
@@ -438,13 +663,33 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_m_linecards_init(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create line cards\n");
+ return err;
+ }
+
+ err = mlxsw_linecards_event_ops_register(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to register line cards operations\n");
+ goto linecards_event_ops_register;
+ }
+
err = mlxsw_m_ports_create(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
- return err;
+ goto err_ports_create;
}
return 0;
+
+err_ports_create:
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+linecards_event_ops_register:
+ mlxsw_m_linecards_fini(mlxsw_m);
+ return err;
}
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
@@ -452,6 +697,9 @@ static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_m_ports_remove(mlxsw_m);
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ mlxsw_m_linecards_fini(mlxsw_m);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
@@ -461,6 +709,7 @@ static struct mlxsw_driver mlxsw_m_driver = {
.priv_size = sizeof(struct mlxsw_m),
.init = mlxsw_m_init,
.fini = mlxsw_m_fini,
+ .ports_remove_selected = mlxsw_m_ports_remove_selected,
.profile = &mlxsw_m_config_profile,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 50527adc5b5a..c968309657dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1187,6 +1187,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
+ profile->max_lag);
+ }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index f27bdecdf952..0777bed5bb1a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2218,76 +2218,6 @@ static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
mlxsw_reg_smpe_evid_set(payload, evid);
}
-/* SFTR-V2 - Switch Flooding Table Version 2 Register
- * --------------------------------------------------
- * The switch flooding table is used for flooding packet replication. The table
- * defines a bit mask of ports for packet replication.
- */
-#define MLXSW_REG_SFTR2_ID 0x202F
-#define MLXSW_REG_SFTR2_LEN 0x120
-
-MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN);
-
-/* reg_sftr2_swid
- * Switch partition ID with which to associate the port.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8);
-
-/* reg_sftr2_flood_table
- * Flooding table index to associate with the specific type on the specific
- * switch partition.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6);
-
-/* reg_sftr2_index
- * Index. Used as an index into the Flooding Table in case the table is
- * configured to use VID / FID or FID Offset.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16);
-
-/* reg_sftr2_table_type
- * See mlxsw_flood_table_type
- * Access: RW
- */
-MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3);
-
-/* reg_sftr2_range
- * Range of entries to update
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16);
-
-/* reg_sftr2_port
- * Local port membership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1);
-
-/* reg_sftr2_port_mask
- * Local port mask (1 bit per port).
- * Access: WO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1);
-
-static inline void mlxsw_reg_sftr2_pack(char *payload,
- unsigned int flood_table,
- unsigned int index,
- enum mlxsw_flood_table_type table_type,
- unsigned int range, u16 port, bool set)
-{
- MLXSW_REG_ZERO(sftr2, payload);
- mlxsw_reg_sftr2_swid_set(payload, 0);
- mlxsw_reg_sftr2_flood_table_set(payload, flood_table);
- mlxsw_reg_sftr2_index_set(payload, index);
- mlxsw_reg_sftr2_table_type_set(payload, table_type);
- mlxsw_reg_sftr2_range_set(payload, range);
- mlxsw_reg_sftr2_port_set(payload, port, set);
- mlxsw_reg_sftr2_port_mask_set(payload, port, 1);
-}
-
/* SMID-V2 - Switch Multicast ID Version 2 Register
* ------------------------------------------------
* The MID record maps from a MID (Multicast ID), which is a unique identifier
@@ -4729,25 +4659,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
-/* reg_ptys_ib_link_width_cap
- * IB port supported widths.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16);
-
-#define MLXSW_REG_PTYS_IB_SPEED_SDR BIT(0)
-#define MLXSW_REG_PTYS_IB_SPEED_DDR BIT(1)
-#define MLXSW_REG_PTYS_IB_SPEED_QDR BIT(2)
-#define MLXSW_REG_PTYS_IB_SPEED_FDR10 BIT(3)
-#define MLXSW_REG_PTYS_IB_SPEED_FDR BIT(4)
-#define MLXSW_REG_PTYS_IB_SPEED_EDR BIT(5)
-
-/* reg_ptys_ib_proto_cap
- * IB port supported speeds and protocols.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16);
-
/* reg_ptys_ext_eth_proto_admin
* Extended speed and protocol to set port to.
* Access: RW
@@ -4760,18 +4671,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_admin, 0x14, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
-/* reg_ptys_ib_link_width_admin
- * IB width to set port to.
- * Access: RW
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16);
-
-/* reg_ptys_ib_proto_admin
- * IB speeds and protocols to set port to.
- * Access: RW
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16);
-
/* reg_ptys_ext_eth_proto_oper
* The extended current speed and protocol configured for the port.
* Access: RO
@@ -4784,18 +4683,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_oper, 0x20, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
-/* reg_ptys_ib_link_width_oper
- * The current IB width to set port to.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16);
-
-/* reg_ptys_ib_proto_oper
- * The current IB speed and protocol.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
-
enum mlxsw_reg_ptys_connector_type {
MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR,
MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE,
@@ -4866,33 +4753,6 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
-static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port,
- u16 proto_admin, u16 link_width)
-{
- MLXSW_REG_ZERO(ptys, payload);
- mlxsw_reg_ptys_local_port_set(payload, local_port);
- mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_IB);
- mlxsw_reg_ptys_ib_proto_admin_set(payload, proto_admin);
- mlxsw_reg_ptys_ib_link_width_admin_set(payload, link_width);
-}
-
-static inline void mlxsw_reg_ptys_ib_unpack(char *payload, u16 *p_ib_proto_cap,
- u16 *p_ib_link_width_cap,
- u16 *p_ib_proto_oper,
- u16 *p_ib_link_width_oper)
-{
- if (p_ib_proto_cap)
- *p_ib_proto_cap = mlxsw_reg_ptys_ib_proto_cap_get(payload);
- if (p_ib_link_width_cap)
- *p_ib_link_width_cap =
- mlxsw_reg_ptys_ib_link_width_cap_get(payload);
- if (p_ib_proto_oper)
- *p_ib_proto_oper = mlxsw_reg_ptys_ib_proto_oper_get(payload);
- if (p_ib_link_width_oper)
- *p_ib_link_width_oper =
- mlxsw_reg_ptys_ib_link_width_oper_get(payload);
-}
-
/* PPAD - Port Physical Address Register
* -------------------------------------
* The PPAD register configures the per port physical MAC address.
@@ -5666,27 +5526,6 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
-/* PLIB - Port Local to InfiniBand Port
- * ------------------------------------
- * The PLIB register performs mapping from Local Port into InfiniBand Port.
- */
-#define MLXSW_REG_PLIB_ID 0x500A
-#define MLXSW_REG_PLIB_LEN 0x10
-
-MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
-
-/* reg_plib_local_port
- * Local port number.
- * Access: Index
- */
-MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12);
-
-/* reg_plib_ib_port
- * InfiniBand port remapping for local_port.
- * Access: RW
- */
-MLXSW_ITEM32(reg, plib, ib_port, 0x00, 0, 8);
-
/* PPTB - Port Prio To Buffer Register
* -----------------------------------
* Configures the switch priority to buffer table.
@@ -12924,7 +12763,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvc),
MLXSW_REG(spevet),
MLXSW_REG(smpe),
- MLXSW_REG(sftr2),
MLXSW_REG(smid2),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
@@ -12962,7 +12800,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(paos),
MLXSW_REG(pfcc),
MLXSW_REG(ppcnt),
- MLXSW_REG(plib),
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1e240cdd9cbd..5bcf5bceff71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
@@ -2691,6 +2691,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u16 max_lag;
u32 seed;
int err;
@@ -2709,12 +2710,14 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
return -EIO;
- mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
- sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
GFP_KERNEL);
if (!mlxsw_sp->lags)
return -ENOMEM;
@@ -3509,6 +3512,33 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
};
+/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
+ * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
+ * table.
+ */
+#define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
+
+static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
+ .used_max_lag = 1,
+ .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -4039,7 +4069,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp2_config_profile,
+ .profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4263,10 +4293,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
- u64 max_lag;
- int i;
+ u16 max_lag;
+ int err, i;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
- max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
for (i = 0; i < max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 915dffb85a1c..dcd79d7e2af4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -14,16 +14,16 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ strscpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+ strscpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
mlxsw_sp->bus_info->fw_rev.major,
mlxsw_sp->bus_info->fw_rev.minor,
mlxsw_sp->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ strscpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 2e0b704b8a31..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
* enabled.
*/
struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
};
struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_ptp_traps_set;
refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
return &ptp_state->common;
err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ mutex_destroy(&ptp_state->lock);
mlxsw_sp_ptp_traps_unset(mlxsw_sp);
kfree(ptp_state);
}
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
*config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
return 0;
}
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
return -EINVAL;
}
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
*p_ing_types = ing_types;
*p_egr_types = egr_types;
return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config)
{
+ struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
&new_egr_types, &rx_filter);
if (err)
- return err;
+ goto err_get_message_types;
new_config.flags = config->flags;
new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
new_egr_types, new_config);
if (err)
- return err;
+ goto err_configure_port;
} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
if (err)
- return err;
+ goto err_deconfigure_port;
}
mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
/* Notify the ioctl caller what we are actually timestamping. */
config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 2d1628fdefc1..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2c4443c6b964..48f1fa62a4fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1819,7 +1819,7 @@ void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
/* The configuration where several tunnels have the same local address in the
* same underlay table needs special treatment in the HW. That is currently not
* implemented in the driver. This function finds and demotes the first tunnel
- * with a given source address, except the one passed in in the argument
+ * with a given source address, except the one passed in the argument
* `except'.
*/
bool
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 39904dacf4f0..b3472fb94617 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+ 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 6f34a61739b6..fecd43754cea 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -403,7 +403,7 @@ struct ks8851_net {
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
struct regulator *vdd_io;
- int gpio;
+ struct gpio_desc *gpio;
struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 691206f19ea7..cfbc900d4aeb 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -17,10 +17,9 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -703,9 +702,9 @@ static const struct net_device_ops ks8851_netdev_ops = {
static void ks8851_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *di)
{
- strlcpy(di->driver, "KS8851", sizeof(di->driver));
- strlcpy(di->version, "1.00", sizeof(di->version));
- strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+ strscpy(di->driver, "KS8851", sizeof(di->driver));
+ strscpy(di->version, "1.00", sizeof(di->version));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
}
static u32 ks8851_get_msglevel(struct net_device *dev)
@@ -1117,24 +1116,23 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
{
struct ks8851_net *ks = netdev_priv(netdev);
unsigned cider;
- int gpio;
int ret;
ks->netdev = netdev;
ks->tx_space = 6144;
- gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
- if (gpio == -EPROBE_DEFER)
- return gpio;
-
- ks->gpio = gpio;
- if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(dev, gpio,
- GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
- if (ret) {
- dev_err(dev, "reset gpio request failed\n");
- return ret;
- }
+ ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(ks->gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "reset gpio request failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(ks->gpio, "ks8851_rst_n");
+ if (ret) {
+ dev_err(dev, "failed to set reset gpio name: %d\n", ret);
+ return ret;
}
ks->vdd_io = devm_regulator_get(dev, "vdd-io");
@@ -1161,9 +1159,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
goto err_reg;
}
- if (gpio_is_valid(gpio)) {
+ if (ks->gpio) {
usleep_range(10000, 11000);
- gpio_set_value(gpio, 1);
+ gpiod_set_value_cansleep(ks->gpio, 0);
}
spin_lock_init(&ks->statelock);
@@ -1239,8 +1237,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
err_id:
ks8851_unregister_mdiobus(ks);
err_mdio:
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, 0);
+ if (ks->gpio)
+ gpiod_set_value_cansleep(ks->gpio, 1);
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@@ -1259,8 +1257,8 @@ void ks8851_remove_common(struct device *dev)
dev_info(dev, "remove\n");
unregister_netdev(priv->netdev);
- if (gpio_is_valid(priv->gpio))
- gpio_set_value(priv->gpio, 0);
+ if (priv->gpio)
+ gpiod_set_value_cansleep(priv->gpio, 1);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
}
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 82d55fc27edc..70bc7253454f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -413,7 +413,8 @@ static int ks8851_probe_spi(struct spi_device *spi)
spi->bits_per_word = 8;
- ks = netdev_priv(netdev);
+ kss = netdev_priv(netdev);
+ ks = &kss->ks8851;
ks->lock = ks8851_lock_spi;
ks->unlock = ks8851_unlock_spi;
@@ -433,8 +434,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
IRQ_RXPSI) /* RX process stop */
ks->rc_ier = STD_IRQ;
- kss = to_ks8851_spi(ks);
-
kss->spidev = spi;
mutex_init(&kss->lock);
INIT_WORK(&kss->tx_work, ks8851_tx_work);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2b3eb5ed8233..468520079c65 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5998,9 +5998,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(hw_priv->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 559ad94a44d0..176efbeae127 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1467,9 +1467,9 @@ static void enc28j60_restart_work_handler(struct work_struct *work)
static void
enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info,
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info,
dev_name(dev->dev.parent), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index dc1840cb5b10..d7c8aa77ec75 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -925,9 +925,9 @@ static void encx24j600_get_regs(struct net_device *dev,
static void encx24j600_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index b1c74e6cb012..c739d60ee17d 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -579,8 +579,8 @@ static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info,
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info,
pci_name(adapter->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a9a1dea6d731..50eeecba1f18 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1585,6 +1585,9 @@ static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
rfctl |= RFE_CTL_AM_;
}
+ if (netdev->features & NETIF_F_RXCSUM)
+ rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
+
memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
if (netdev_mc_count(netdev)) {
struct netdev_hw_addr *ha;
@@ -2066,11 +2069,13 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
{
int required_number_of_descriptors = 0;
unsigned int start_frame_length = 0;
+ netdev_tx_t retval = NETDEV_TX_OK;
unsigned int frame_length = 0;
unsigned int head_length = 0;
unsigned long irq_flags = 0;
bool do_timestamp = false;
bool ignore_sync = false;
+ struct netdev_queue *txq;
int nr_frags = 0;
bool gso = false;
int j;
@@ -2083,9 +2088,12 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors > (tx->ring_size - 1)) {
dev_kfree_skb_irq(skb);
} else {
- /* save to overflow buffer */
- tx->overflow_skb = skb;
- netif_stop_queue(tx->adapter->netdev);
+ /* save how many descriptors we needed to restart the queue */
+ tx->rqd_descriptors = required_number_of_descriptors;
+ retval = NETDEV_TX_BUSY;
+ txq = netdev_get_tx_queue(tx->adapter->netdev,
+ tx->channel_number);
+ netif_tx_stop_queue(txq);
}
goto unlock;
}
@@ -2144,15 +2152,15 @@ finish:
unlock:
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
- return NETDEV_TX_OK;
+ return retval;
}
static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
{
struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
struct lan743x_adapter *adapter = tx->adapter;
- bool start_transmitter = false;
unsigned long irq_flags = 0;
+ struct netdev_queue *txq;
u32 ioc_bit = 0;
ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
@@ -2163,24 +2171,20 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
/* clean up tx ring */
lan743x_tx_release_completed_descriptors(tx);
- if (netif_queue_stopped(adapter->netdev)) {
- if (tx->overflow_skb) {
- if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
- lan743x_tx_get_avail_desc(tx))
- start_transmitter = true;
+ txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
+ if (netif_tx_queue_stopped(txq)) {
+ if (tx->rqd_descriptors) {
+ if (tx->rqd_descriptors <=
+ lan743x_tx_get_avail_desc(tx)) {
+ tx->rqd_descriptors = 0;
+ netif_tx_wake_queue(txq);
+ }
} else {
- netif_wake_queue(adapter->netdev);
+ netif_tx_wake_queue(txq);
}
}
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
- if (start_transmitter) {
- /* space is now available, transmit overflow skb */
- lan743x_tx_xmit_frame(tx, tx->overflow_skb);
- tx->overflow_skb = NULL;
- netif_wake_queue(adapter->netdev);
- }
-
if (!napi_complete(napi))
goto done;
@@ -2304,10 +2308,7 @@ static void lan743x_tx_close(struct lan743x_tx *tx)
lan743x_tx_release_all_descriptors(tx);
- if (tx->overflow_skb) {
- dev_kfree_skb(tx->overflow_skb);
- tx->overflow_skb = NULL;
- }
+ tx->rqd_descriptors = 0;
lan743x_tx_ring_cleanup(tx);
}
@@ -2387,7 +2388,7 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
(tx->channel_number));
netif_napi_add_tx_weight(adapter->netdev,
&tx->napi, lan743x_tx_napi_poll,
- tx->ring_size - 1);
+ NAPI_POLL_WEIGHT);
napi_enable(&tx->napi);
data = 0;
@@ -2549,6 +2550,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
struct lan743x_rx_buffer_info *buffer_info;
int frame_length, buffer_length;
+ bool is_ice, is_tce, is_icsm;
int extension_index = -1;
bool is_last, is_first;
struct sk_buff *skb;
@@ -2595,6 +2597,9 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
frame_length =
RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
buffer_length = buffer_info->buffer_length;
+ is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
+ is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
+ is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
netdev_dbg(netdev, "%s%schunk: %d/%d",
is_first ? "first " : " ",
@@ -2663,6 +2668,10 @@ process_extension:
if (is_last && rx->skb_head) {
rx->skb_head->protocol = eth_type_trans(rx->skb_head,
rx->adapter->netdev);
+ if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (!is_ice && !is_tce && !is_icsm)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
netdev_dbg(netdev, "sending %d byte frame to OS",
rx->skb_head->len);
napi_gro_receive(&rx->napi, rx->skb_head);
@@ -2866,9 +2875,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
if (ret)
goto return_error;
- netif_napi_add(adapter->netdev,
- &rx->napi, lan743x_rx_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
lan743x_csr_write(adapter, DMAC_CMD,
DMAC_CMD_RX_SWR_(rx->channel_number));
@@ -3347,8 +3354,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
PCI11X1X_USED_TX_CHANNELS,
LAN743X_USED_RX_CHANNELS);
} else {
- netdev = devm_alloc_etherdev(&pdev->dev,
- sizeof(struct lan743x_adapter));
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ LAN743X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
}
if (!netdev)
@@ -3383,7 +3392,8 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
adapter->netdev->netdev_ops = &lan743x_netdev_ops;
adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
- adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+ adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
/* carrier off reporting is important to ethtool even BEFORE open */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 72adae4f2aa0..67877d3b6dd9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -266,6 +266,8 @@
#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x)))
#define RFE_CTL (0x508)
+#define RFE_CTL_TCP_UDP_COE_ BIT(12)
+#define RFE_CTL_IP_COE_ BIT(11)
#define RFE_CTL_AB_ BIT(10)
#define RFE_CTL_AM_ BIT(9)
#define RFE_CTL_AU_ BIT(8)
@@ -954,8 +956,7 @@ struct lan743x_tx {
struct napi_struct napi;
u32 frame_count;
-
- struct sk_buff *overflow_skb;
+ u32 rqd_descriptors;
};
void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
@@ -1110,7 +1111,7 @@ struct lan743x_tx_buffer_info {
unsigned int buffer_length;
};
-#define LAN743X_TX_RING_SIZE (50)
+#define LAN743X_TX_RING_SIZE (128)
/* OWN bit is set. ie, Descs are owned by RX DMAC */
#define RX_DESC_DATA0_OWN_ (0x00008000)
@@ -1122,6 +1123,9 @@ struct lan743x_tx_buffer_info {
(((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16)
#define RX_DESC_DATA0_EXT_ (0x00004000)
#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF)
+#define RX_DESC_DATA1_STATUS_ICE_ (0x00020000)
+#define RX_DESC_DATA1_STATUS_TCE_ (0x00010000)
+#define RX_DESC_DATA1_STATUS_ICSM_ (0x00000001)
#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF)
#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 6a11e2ceb013..da3ea905adbb 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1049,6 +1049,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
enum ptp_pin_function func,
unsigned int chan)
{
+ struct lan743x_ptp *lan_ptp =
+ container_of(ptp, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(lan_ptp, struct lan743x_adapter, ptp);
int result = 0;
/* Confirm the requested function is supported. Parameter
@@ -1057,7 +1061,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
+ break;
case PTP_PF_EXTTS:
+ if (!adapter->is_pci11x1x)
+ result = -1;
break;
case PTP_PF_PHYSYNC:
default:
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index 4241ff0e5098..49e1464a4313 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -4,6 +4,7 @@ config LAN966X_SWITCH
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
+ depends on BRIDGE || BRIDGE=n
select PHYLINK
select PACKING
help
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index fd2e0ebb2427..962f7c5f9e7d 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -8,4 +8,7 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
- lan966x_ptp.o lan966x_fdma.o
+ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
+ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \
+ lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \
+ lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
new file mode 100644
index 000000000000..70cbbf8d2b67
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 cir, cbs;
+ u8 se_idx;
+
+ /* Check for invalid values */
+ if (qopt->idleslope <= 0 ||
+ qopt->sendslope >= 0 ||
+ qopt->locredit >= qopt->hicredit)
+ return -EINVAL;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+ cir = qopt->idleslope;
+ cbs = (qopt->idleslope - qopt->sendslope) *
+ (qopt->hicredit - qopt->locredit) /
+ -qopt->sendslope;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 se_idx;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
new file mode 100644
index 000000000000..8310d3f35404
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define DWRR_COST_BIT_WIDTH BIT(5)
+
+static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight)
+{
+ u32 res;
+
+ /* Round half up: Multiply with 16 before division,
+ * add 8 and divide result with 16 again
+ */
+ res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4;
+ return max_t(u32, 1, res) - 1;
+}
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params;
+ struct lan966x *lan966x = port->lan966x;
+ u32 w_min = 100;
+ u8 count = 0;
+ u32 se_idx;
+ u8 i;
+
+ /* Check the input */
+ if (qopt->parent != TC_H_ROOT)
+ return -EINVAL;
+
+ params = &qopt->replace_params;
+ if (params->bands != NUM_PRIO_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < params->bands; ++i) {
+ /* In the switch the DWRR is always on the lowest consecutive
+ * priorities. Due to this, the first priority must map to the
+ * first DWRR band.
+ */
+ if (params->priomap[i] != (7 - i))
+ return -EINVAL;
+
+ if (params->quanta[i] && params->weights[i] == 0)
+ return -EINVAL;
+ }
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ /* Find minimum weight */
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ ++count;
+
+ lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]),
+ lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i));
+ }
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 se_idx;
+ int i;
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ for (i = 0; i < NUM_PRIO_QUEUES; ++i)
+ lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i));
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
index da5ca7188679..2ea263e893ee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -8,6 +8,7 @@ struct lan966x_fdb_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct net_device *orig_dev;
struct lan966x *lan966x;
unsigned long event;
};
@@ -127,75 +128,119 @@ void lan966x_fdb_deinit(struct lan966x *lan966x)
lan966x_fdb_purge_entries(lan966x);
}
-static void lan966x_fdb_event_work(struct work_struct *work)
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x)
+{
+ flush_workqueue(lan966x->fdb_work);
+}
+
+static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work)
{
- struct lan966x_fdb_event_work *fdb_work =
- container_of(work, struct lan966x_fdb_event_work, work);
struct switchdev_notifier_fdb_info *fdb_info;
- struct net_device *dev = fdb_work->dev;
struct lan966x_port *port;
struct lan966x *lan966x;
- int ret;
- fdb_info = &fdb_work->fdb_info;
lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->orig_dev);
+ fdb_info = &fdb_work->fdb_info;
- if (lan966x_netdevice_check(dev)) {
- port = netdev_priv(dev);
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x *lan966x;
+ int ret;
- switch (fdb_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
- fdb_info->vid);
+ lan966x = fdb_work->lan966x;
+ fdb_info = &fdb_work->fdb_info;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- lan966x_mac_del_entry(lan966x, fdb_info->addr,
- fdb_info->vid);
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
break;
- }
- } else {
- if (!netif_is_bridge_master(dev))
- goto out;
-
- /* In case the bridge is called */
- switch (fdb_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- /* If there is no front port in this vlan, there is no
- * point to copy the frame to CPU because it would be
- * just dropped at later point. So add it only if
- * there is a port but it is required to store the fdb
- * entry for later point when a port actually gets in
- * the vlan.
- */
- lan966x_fdb_add_entry(lan966x, fdb_info);
- if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
- fdb_info->vid))
- break;
-
- lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
- fdb_info->vid);
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev))
+ return;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- ret = lan966x_fdb_del_entry(lan966x, fdb_info);
- if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
- fdb_info->vid))
- break;
-
- if (ret)
- lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
- fdb_info->vid);
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
break;
- }
+ lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid);
+ break;
}
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+
+ if (lan966x_netdevice_check(fdb_work->orig_dev))
+ lan966x_fdb_port_event_work(fdb_work);
+ else if (netif_is_bridge_master(fdb_work->orig_dev))
+ lan966x_fdb_bridge_event_work(fdb_work);
+ else if (netif_is_lag_master(fdb_work->orig_dev))
+ lan966x_fdb_lag_event_work(fdb_work);
-out:
kfree(fdb_work->fdb_info.addr);
kfree(fdb_work);
- dev_put(dev);
}
int lan966x_handle_fdb(struct net_device *dev,
@@ -221,7 +266,8 @@ int lan966x_handle_fdb(struct net_device *dev,
if (!fdb_work)
return -ENOMEM;
- fdb_work->dev = orig_dev;
+ fdb_work->dev = dev;
+ fdb_work->orig_dev = orig_dev;
fdb_work->lan966x = lan966x;
fdb_work->event = event;
INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
@@ -231,7 +277,6 @@ int lan966x_handle_fdb(struct net_device *dev,
goto err_addr_alloc;
ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
- dev_hold(orig_dev);
queue_work(lan966x->fdb_work, &fdb_work->work);
break;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 6dea7f8c1481..7e4061c854f0 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -425,7 +425,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
lan966x_ifh_get_src_port(skb->data, &src_port);
lan966x_ifh_get_timestamp(skb->data, &timestamp);
- WARN_ON(src_port >= lan966x->num_phys_ports);
+ if (WARN_ON(src_port >= lan966x->num_phys_ports))
+ goto free_skb;
skb->dev = lan966x->ports[src_port]->dev;
skb_pull(skb, IFH_LEN * sizeof(u32));
@@ -449,6 +450,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
return skb;
+free_skb:
+ kfree_skb(skb);
unmap_page:
dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
FDMA_DCB_STATUS_BLOCKL(db->status),
@@ -784,8 +787,7 @@ void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
return;
lan966x->fdma_ndev = dev;
- netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
napi_enable(&lan966x->napi);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
new file mode 100644
index 000000000000..41fa2523d91d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+{
+ u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
+ int p, lag, i;
+
+ /* Reset destination and aggregation PGIDS */
+ for (p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(ANA_PGID_PGID_SET(BIT(p)),
+ lan966x, ANA_PGID(p));
+
+ for (p = PGID_AGGR; p < PGID_SRC; ++p)
+ lan_wr(ANA_PGID_PGID_SET(visited),
+ lan966x, ANA_PGID(p));
+
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ visited &= ~BIT(p);
+ }
+
+ /* Now, set PGIDs for each active LAG */
+ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+ struct net_device *bond = lan966x->ports[lag]->bond;
+ int num_active_ports = 0;
+ unsigned long bond_mask;
+ u8 aggr_idx[16];
+
+ if (!bond || (visited & BIT(lag)))
+ continue;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+
+ for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ lan966x, ANA_PGID(p));
+ if (port->lag_tx_active)
+ aggr_idx[num_active_ports++] = p;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; ++i) {
+ u32 ac;
+
+ ac = lan_rd(lan966x, ANA_PGID(i));
+ ac &= ~bond_mask;
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
+ lan_wr(ANA_PGID_PGID_SET(ac),
+ lan966x, ANA_PGID(i));
+ }
+
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (p = lag; p < lan966x->num_phys_ports; p++) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ visited |= BIT(p);
+ }
+ }
+}
+
+static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ u32 bond_mask;
+ u32 lag_id;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ lag_id = port->chip_port;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ }
+}
+
+static void lan966x_lag_update_ids(struct lan966x *lan966x)
+{
+ lan966x_lag_set_port_ids(lan966x);
+ lan966x_update_fwd_mask(lan966x);
+ lan966x_lag_set_aggr_pgids(lan966x);
+}
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ u32 lag_id = -1;
+ u32 bond_mask;
+ int err;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ port->bond = bond;
+ lan966x_lag_update_ids(lan966x);
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto out;
+
+ lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
+
+ if (lan966x_lag_first_port(port->bond, port->dev) &&
+ lag_id != -1)
+ lan966x_mac_lag_replace_port_entry(lan966x,
+ lan966x->ports[lag_id],
+ port);
+
+ return 0;
+
+out:
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+
+ return err;
+}
+
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 bond_mask;
+ u32 lag_id;
+
+ if (lan966x_lag_first_port(port->bond, port->dev)) {
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ bond_mask &= ~BIT(port->chip_port);
+ if (bond_mask) {
+ lag_id = __ffs(bond_mask);
+ lan966x_mac_lag_replace_port_entry(lan966x, port,
+ lan966x->ports[lag_id]);
+ } else {
+ lan966x_mac_lag_remove_port_entry(lan966x, port);
+ }
+ }
+
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+ lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
+}
+
+static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
+ enum netdev_lag_hash hash_type)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ if (port->hash_type != hash_type)
+ return false;
+ }
+
+ return true;
+}
+
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct netdev_lag_upper_info *lui;
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ lui = info->upper_info;
+ if (!lui) {
+ port->hash_type = NETDEV_LAG_HASH_NONE;
+ return NOTIFY_DONE;
+ }
+
+ if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported Tx type");
+ return -EINVAL;
+ }
+
+ if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG devices can have only the same hash_type");
+ return -EINVAL;
+ }
+
+ switch (lui->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L34:
+ lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L23:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported hash type");
+ return -EINVAL;
+ }
+
+ port->hash_type = lui->hash_type;
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag = info->lower_state_info;
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool is_active;
+
+ if (!port->bond)
+ return NOTIFY_DONE;
+
+ is_active = lag->link_up && lag->tx_enabled;
+ if (port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ port->lag_tx_active = is_active;
+ lan966x_lag_set_aggr_pgids(lan966x);
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_prechangeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_changeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long bond_mask;
+
+ if (port->bond != lag)
+ return false;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, lag);
+ if (bond_mask && port->chip_port == __ffs(bond_mask))
+ return true;
+
+ return false;
+}
+
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
+{
+ struct lan966x_port *port;
+ u32 mask = 0;
+ int p;
+
+ if (!bond)
+ return mask;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ mask |= BIT(p);
+ }
+
+ return mask;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index 5893770bfd94..baa3a30c039f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -22,6 +22,7 @@ struct lan966x_mac_entry {
u16 vid;
u16 port_index;
int row;
+ bool lag;
};
struct lan966x_mac_raw_entry {
@@ -69,15 +70,14 @@ static void lan966x_mac_select(struct lan966x *lan966x,
lan_wr(mach, lan966x, ANA_MACHDATA);
}
-static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
- bool cpu_copy,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid,
- enum macaccess_entry_type type)
+static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
{
- int ret;
+ lockdep_assert_held(&lan966x->mac_lock);
- spin_lock(&lan966x->mac_lock);
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
@@ -89,7 +89,19 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
lan966x, ANA_MACACCESS);
- ret = lan966x_mac_wait_for_completion(lan966x);
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
spin_unlock(&lan966x->mac_lock);
return ret;
@@ -119,6 +131,16 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
}
+static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
+}
+
static int lan966x_mac_forget_locked(struct lan966x *lan966x,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
@@ -178,8 +200,9 @@ void lan966x_mac_init(struct lan966x *lan966x)
INIT_LIST_HEAD(&lan966x->mac_entries);
}
-static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
- u16 vid, u16 port_index)
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
+ const unsigned char *mac,
+ u16 vid)
{
struct lan966x_mac_entry *mac_entry;
@@ -189,8 +212,9 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
memcpy(mac_entry->mac, mac, ETH_ALEN);
mac_entry->vid = vid;
- mac_entry->port_index = port_index;
+ mac_entry->port_index = port->chip_port;
mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ mac_entry->lag = port->bond ? true : false;
return mac_entry;
}
@@ -269,7 +293,7 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
goto mac_learn;
}
- mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
+ mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
if (!mac_entry) {
spin_unlock(&lan966x->mac_lock);
return -ENOMEM;
@@ -278,7 +302,8 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
- lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
+ port->bond ?: port->dev);
mac_learn:
lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
@@ -309,6 +334,50 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
return 0;
}
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ lan966x_mac_learn_locked(lan966x, dst->chip_port,
+ mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+ mac_entry->port_index = dst->chip_port;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
void lan966x_mac_purge_entries(struct lan966x *lan966x)
{
struct lan966x_mac_entry *mac_entry, *tmp;
@@ -354,6 +423,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
struct lan966x_mac_entry *mac_entry, *tmp;
unsigned char mac[ETH_ALEN] __aligned(2);
struct list_head mac_deleted_entries;
+ struct lan966x_port *port;
u32 dest_idx;
u32 column;
u16 vid;
@@ -406,9 +476,10 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
/* Notify the bridge that the entry doesn't exist
* anymore in the HW
*/
+ port = lan966x->ports[mac_entry->port_index];
lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
mac_entry->mac, mac_entry->vid,
- lan966x->ports[mac_entry->port_index]->dev);
+ port->bond ?: port->dev);
list_del(&mac_entry->list);
kfree(mac_entry);
}
@@ -440,7 +511,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
continue;
}
- mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ port = lan966x->ports[dest_idx];
+ mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
if (!mac_entry) {
spin_unlock(&lan966x->mac_lock);
return;
@@ -451,7 +523,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
spin_unlock(&lan966x->mac_lock);
lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- mac, vid, lan966x->ports[dest_idx]->dev);
+ mac, vid, port->bond ?: port->dev);
}
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1d6e3b641b2e..be2fd030cccb 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -344,7 +344,8 @@ static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
}
-static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
@@ -466,6 +467,7 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
.ndo_eth_ioctl = lan966x_port_ioctl,
+ .ndo_setup_tc = lan966x_tc_setup,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -710,7 +712,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
@@ -718,10 +720,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (lan966x->fdma)
devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
- if (lan966x->ptp_irq)
+ if (lan966x->ptp_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
- if (lan966x->ptp_ext_irq)
+ if (lan966x->ptp_ext_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
@@ -738,7 +740,8 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
return -EINVAL;
dev = devm_alloc_etherdev_mqs(lan966x->dev,
- sizeof(struct lan966x_port), 8, 1);
+ sizeof(struct lan966x_port),
+ NUM_PRIO_QUEUES, 1);
if (!dev)
return -ENOMEM;
@@ -754,7 +757,9 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
dev->netdev_ops = &lan966x_port_netdev_ops;
dev->ethtool_ops = &lan966x_ethtool_ops;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
dev->needed_headroom = IFH_LEN * sizeof(u32);
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -770,6 +775,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
@@ -778,6 +784,8 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QUSGMII,
+ port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
@@ -956,6 +964,8 @@ static void lan966x_init(struct lan966x *lan966x)
lan966x, ANA_ANAINTR);
spin_lock_init(&lan966x->tx_lock);
+
+ lan966x_taprio_init(lan966x);
}
static int lan966x_ram_init(struct lan966x *lan966x)
@@ -969,7 +979,8 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
int val = 0;
int ret;
- switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
+ switch_reset = devm_reset_control_get_optional_shared(lan966x->dev,
+ "switch");
if (IS_ERR(switch_reset))
return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
"Could not obtain switch reset");
@@ -1049,7 +1060,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
@@ -1164,6 +1175,7 @@ static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_taprio_deinit(lan966x);
lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 2787055c1847..9656071b8289 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -9,6 +9,8 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/switchdev.h>
#include "lan966x_regs.h"
@@ -36,6 +38,7 @@
#define NUM_PHYS_PORTS 8
#define CPU_PORT 8
+#define NUM_PRIO_QUEUES 8
/* Reserved PGIDs */
#define PGID_CPU (PGID_AGGR - 6)
@@ -79,6 +82,9 @@
#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 512
+#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
+#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -258,6 +264,11 @@ struct lan966x {
struct lan966x_rx rx;
struct lan966x_tx tx;
struct napi_struct napi;
+
+ /* Mirror */
+ struct lan966x_port *mirror_monitor;
+ u32 mirror_mask[2];
+ u32 mirror_count;
};
struct lan966x_port_config {
@@ -270,6 +281,15 @@ struct lan966x_port_config {
bool autoneg;
};
+struct lan966x_port_tc {
+ bool ingress_shared_block;
+ unsigned long police_id;
+ unsigned long ingress_mirror_id;
+ unsigned long egress_mirror_id;
+ struct flow_stats police_stat;
+ struct flow_stats mirror_stat;
+};
+
struct lan966x_port {
struct net_device *dev;
struct lan966x *lan966x;
@@ -292,11 +312,19 @@ struct lan966x_port {
u8 ptp_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
+
+ struct net_device *bond;
+ bool lag_tx_active;
+ enum netdev_lag_hash hash_type;
+
+ struct lan966x_port_tc tc;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
extern const struct ethtool_ops lan966x_ethtool_ops;
+extern struct notifier_block lan966x_switchdev_nb __read_mostly;
+extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
bool lan966x_netdevice_check(const struct net_device *dev);
@@ -345,6 +373,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x,
struct lan966x_port *port,
const unsigned char *addr,
u16 vid);
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst);
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src);
void lan966x_mac_purge_entries(struct lan966x *lan966x);
irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
@@ -369,6 +402,7 @@ void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
int lan966x_fdb_init(struct lan966x *lan966x);
void lan966x_fdb_deinit(struct lan966x *lan966x);
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x);
int lan966x_handle_fdb(struct net_device *dev,
struct net_device *orig_dev,
unsigned long event, const void *ctx,
@@ -397,6 +431,8 @@ void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
struct sk_buff *skb);
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+u32 lan966x_ptp_get_period_ps(void);
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
@@ -406,6 +442,89 @@ int lan966x_fdma_init(struct lan966x *lan966x);
void lan966x_fdma_deinit(struct lan966x *lan966x);
irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack);
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond);
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info);
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev);
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond);
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state);
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t);
+void lan966x_update_fwd_mask(struct lan966x *lan966x);
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc);
+int lan966x_mqprio_del(struct lan966x_port *port);
+
+void lan966x_taprio_init(struct lan966x *lan966x);
+void lan966x_taprio_deinit(struct lan966x *lan966x);
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt);
+int lan966x_taprio_del(struct lan966x_port *port);
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed);
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress);
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack);
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats);
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress);
+
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
int gbase, int ginst,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
new file mode 100644
index 000000000000..7e1ba3f40c35
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_port *monitor_port;
+
+ if (!lan966x_netdevice_check(action->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an lan966x port");
+ return -EOPNOTSUPP;
+ }
+
+ monitor_port = netdev_priv(action->dev);
+
+ if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror already exists");
+ return -EEXIST;
+ }
+
+ if (lan966x->mirror_monitor &&
+ lan966x->mirror_monitor != monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change mirror port while in use");
+ return -EBUSY;
+ }
+
+ if (port == monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mirror the monitor port");
+ return -EINVAL;
+ }
+
+ lan966x->mirror_mask[ingress] |= BIT(port->chip_port);
+
+ lan966x->mirror_monitor = monitor_port;
+ lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count++;
+
+ if (ingress)
+ port->tc.ingress_mirror_id = mirror_id;
+ else
+ port->tc.egress_mirror_id = mirror_id;
+
+ return 0;
+}
+
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "There is no mirroring for this port");
+ return -ENOENT;
+ }
+
+ lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count--;
+
+ if (lan966x->mirror_count == 0) {
+ lan966x->mirror_monitor = NULL;
+ lan_wr(0, lan966x, ANA_MIRRORPORTS);
+ }
+
+ if (ingress)
+ port->tc.ingress_mirror_id = 0;
+ else
+ port->tc.egress_mirror_id = 0;
+
+ return 0;
+}
+
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.mirror_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ if (ingress) {
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(stats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
new file mode 100644
index 000000000000..7fa76e74f9e2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc)
+{
+ u8 i;
+
+ if (num_tc != NUM_PRIO_QUEUES) {
+ netdev_err(port->dev, "Only %d traffic classes supported\n",
+ NUM_PRIO_QUEUES);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(port->dev, num_tc);
+
+ for (i = 0; i < num_tc; ++i)
+ netdev_set_tc_queue(port->dev, i, 1, i);
+
+ return 0;
+}
+
+int lan966x_mqprio_del(struct lan966x_port *port)
+{
+ netdev_reset_tc(port->dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 38a7e95d69b4..e4ac59480514 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -28,11 +28,12 @@ static int lan966x_phylink_mac_prepare(struct phylink_config *config,
phy_interface_t iface)
{
struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ phy_interface_t serdes_mode = iface;
int err;
if (port->serdes) {
err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
- iface);
+ serdes_mode);
if (err) {
netdev_err(to_net_dev(config->dev),
"Could not set mode of SerDes\n");
@@ -59,6 +60,9 @@ static void lan966x_phylink_mac_link_up(struct phylink_config *config,
port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+ if (phy_interface_mode_is_rgmii(interface))
+ phy_set_speed(port->serdes, speed);
+
lan966x_port_config_up(port);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
new file mode 100644
index 000000000000..a9aec900d608
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+/* 0-8 : 9 port policers */
+#define POL_IDX_PORT 0
+
+/* Policer order: Serial (QoS -> Port -> VCAP) */
+#define POL_ORDER 0x1d3
+
+struct lan966x_tc_policer {
+ /* kilobit per second */
+ u32 rate;
+ /* bytes */
+ u32 burst;
+};
+
+static int lan966x_police_add(struct lan966x_port *port,
+ struct lan966x_tc_policer *pol,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Rate unit is 33 1/3 kpps */
+ pol->rate = DIV_ROUND_UP(pol->rate * 3, 100);
+ /* Avoid zero burst size */
+ pol->burst = pol->burst ?: 1;
+ /* Unit is 4kB */
+ pol->burst = DIV_ROUND_UP(pol->burst, 4096);
+
+ if (pol->rate > GENMASK(15, 0) ||
+ pol->burst > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(1) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static int lan966x_police_del(struct lan966x_port *port,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(2) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(0),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static int lan966x_police_validate(struct lan966x_port *port,
+ const struct flow_action *action,
+ const struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.ingress_shared_block) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on shared ingress blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct rtnl_link_stats64 new_stats;
+ struct lan966x_tc_policer pol;
+ struct flow_stats *old_stats;
+ int err;
+
+ err = lan966x_police_validate(port, action, act, police_id, ingress,
+ extack);
+ if (err)
+ return err;
+
+ memset(&pol, 0, sizeof(pol));
+
+ pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = act->police.burst;
+
+ err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = police_id;
+
+ /* Setup initial stats */
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+
+ return 0;
+}
+
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ if (port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid policer id");
+ return -EINVAL;
+ }
+
+ err = lan966x_police_del(port, port->tc.police_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = 0;
+
+ return 0;
+}
+
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index f141644e4372..1a61c6cdb077 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -165,10 +165,12 @@ static void lan966x_port_link_up(struct lan966x_port *port)
break;
}
+ lan966x_taprio_speed_set(port, config->speed);
+
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
* port speed for QSGMII ports.
*/
- if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ if (phy_interface_num_ports(config->portmode) == 4)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
@@ -331,10 +333,14 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x *lan966x = port->lan966x;
bool inband_aneg = false;
bool outband;
+ bool full_preamble = false;
+
+ if (config->portmode == PHY_INTERFACE_MODE_QUSGMII)
+ full_preamble = true;
if (config->inband) {
if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
- config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ phy_interface_num_ports(config->portmode) == 4)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
config->autoneg)
@@ -345,9 +351,15 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
outband = true;
}
- /* Disable or enable inband */
- lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
- DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ /* Disable or enable inband.
+ * For QUSGMII, we rely on the preamble to transmit data such as
+ * timestamps, therefore force full preamble transmission, and prevent
+ * premable shortening
+ */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA,
lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
/* Enable PCS */
@@ -396,7 +408,7 @@ void lan966x_port_init(struct lan966x_port *port)
if (lan966x->fdma)
lan966x_fdma_netdev_init(lan966x, port->dev);
- if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
+ if (phy_interface_num_ports(config->portmode) != 4)
return;
lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 3a621c5165bc..e5a2bbe064f8 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -464,8 +464,7 @@ static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
return 0;
}
-static int lan966x_ptp_gettime64(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
@@ -890,3 +889,9 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = full_ts_in_ns;
}
+
+u32 lan966x_ptp_get_period_ps(void)
+{
+ /* This represents the system clock period in picoseconds */
+ return 15125;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 8265ad89f0bc..1d90b93dd417 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -90,6 +90,24 @@ enum lan966x_target {
#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+/* ANA:ANA:MIRRORPORTS */
+#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4)
+
+#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0)
+#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x)
+#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x)
+
+/* ANA:ANA:EMIRRORPORTS */
+#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4)
+
+#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+
/* ANA:ANA:FLOODING */
#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
@@ -330,6 +348,12 @@ enum lan966x_target {
/* ANA:PORT:PORT_CFG */
#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+
#define ANA_PORT_CFG_LEARNAUTO BIT(6)
#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
@@ -354,6 +378,21 @@ enum lan966x_target {
#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+/* ANA:PORT:POL_CFG */
+#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4)
+
+#define ANA_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x)
+#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\
+ FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x)
+
+#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0)
+#define ANA_POL_CFG_POL_ORDER_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_POL_ORDER, x)
+#define ANA_POL_CFG_POL_ORDER_GET(x)\
+ FIELD_GET(ANA_POL_CFG_POL_ORDER, x)
+
/* ANA:PFC:PFC_CFG */
#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
@@ -363,6 +402,108 @@ enum lan966x_target {
#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+/* ANA:COMMON:AGGR_CFG */
+#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x)
+#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x)
+
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+
+/* ANA:POL:POL_PIR_CFG */
+#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4)
+
+#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x)
+#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x)
+
+#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0)
+#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x)
+#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x)
+
+/* ANA:POL:POL_MODE_CFG */
+#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4)
+
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5)
+#define ANA_POL_MODE_IPG_SIZE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x)
+#define ANA_POL_MODE_IPG_SIZE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_IPG_SIZE, x)
+
+#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3)
+#define ANA_POL_MODE_FRM_MODE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_FRM_MODE, x)
+#define ANA_POL_MODE_FRM_MODE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_FRM_MODE, x)
+
+#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0)
+#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x)
+#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x)
+
+/* ANA:POL:POL_PIR_STATE */
+#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4)
+
+#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0)
+#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x)
+#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\
+ FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x)
+
/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
@@ -504,6 +645,12 @@ enum lan966x_target {
#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
@@ -967,6 +1114,215 @@ enum lan966x_target {
/* QSYS:RES_CTRL:RES_CFG */
#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+/* QSYS:HSCH:CIR_CFG */
+#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4)
+
+#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x)
+#define QSYS_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x)
+
+#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define QSYS_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x)
+#define QSYS_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x)
+
+/* QSYS:HSCH:SE_CFG */
+#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4)
+
+#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x)
+#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x)
+
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x)
+#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x)
+
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x)
+#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x)
+
+#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x)
+#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x)
+
+#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4)
+
+#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\
+ FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\
+ FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+
+/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */
+#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+
+/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */
+#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4)
+
+#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+
+/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4)
+
+#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+
+/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */
+#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */
+#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4)
+
+#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0)
+#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x)
+#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */
+#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */
+#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4)
+
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */
+#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */
+#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4)
+
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\
+ FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\
+ FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */
+#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\
+ FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\
+ FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */
+#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4)
+
+#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0)
+#define QSYS_TAS_LST_LIST_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x)
+#define QSYS_TAS_LST_LIST_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_LST_LIST_STATE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */
+#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */
+#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */
+#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4)
+
+/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */
+#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+
/* REW:PORT:PORT_VLAN_CFG */
#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index df2bee678559..1c88120eb291 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -6,8 +6,6 @@
#include "lan966x_main.h"
static struct notifier_block lan966x_netdevice_nb __read_mostly;
-static struct notifier_block lan966x_switchdev_nb __read_mostly;
-static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
u32 pgid_ip)
@@ -132,7 +130,7 @@ static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
return 0;
}
-static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+void lan966x_update_fwd_mask(struct lan966x *lan966x)
{
int i;
@@ -140,9 +138,14 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
struct lan966x_port *port = lan966x->ports[i];
unsigned long mask = 0;
- if (port && lan966x->bridge_fwd_mask & BIT(i))
+ if (port && lan966x->bridge_fwd_mask & BIT(i)) {
mask = lan966x->bridge_fwd_mask & ~BIT(i);
+ if (port->bond)
+ mask &= ~lan966x_lag_get_mask(lan966x,
+ port->bond);
+ }
+
mask |= BIT(CPU_PORT);
lan_wr(ANA_PGID_PGID_SET(mask),
@@ -150,7 +153,7 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
}
}
-static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
{
struct lan966x *lan966x = port->lan966x;
bool learn_ena = false;
@@ -171,8 +174,8 @@ static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
lan966x_update_fwd_mask(lan966x);
}
-static void lan966x_port_ageing_set(struct lan966x_port *port,
- unsigned long ageing_clock_t)
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
@@ -241,6 +244,7 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
}
static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
@@ -258,7 +262,7 @@ static int lan966x_port_bridge_join(struct lan966x_port *port,
}
}
- err = switchdev_bridge_port_offload(dev, dev, port,
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
&lan966x_switchdev_nb,
&lan966x_switchdev_blocking_nb,
false, extack);
@@ -295,8 +299,9 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_apply(port);
}
-static int lan966x_port_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
@@ -306,44 +311,68 @@ static int lan966x_port_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = lan966x_port_bridge_join(port, info->upper_dev,
+ err = lan966x_port_bridge_join(port, brport_dev,
+ info->upper_dev,
extack);
else
lan966x_port_bridge_leave(port, info->upper_dev);
}
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_lag_port_join(port, info->upper_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_lag_port_leave(port, info->upper_dev);
+ }
+
return err;
}
-static int lan966x_port_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
+ int err = NOTIFY_DONE;
- if (netif_is_bridge_master(info->upper_dev) && !info->linking)
- switchdev_bridge_port_unoffload(port->dev, port,
- NULL, NULL);
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
+ switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
- return NOTIFY_DONE;
+ if (netif_is_lag_master(info->upper_dev)) {
+ err = lan966x_lag_port_prechangeupper(dev, info);
+ if (err || info->linking)
+ return err;
+
+ switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ return err;
}
-static int lan966x_foreign_bridging_check(struct net_device *bridge,
+static int lan966x_foreign_bridging_check(struct net_device *upper,
+ bool *has_foreign,
+ bool *seen_lan966x,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = NULL;
- bool has_foreign = false;
struct net_device *dev;
struct list_head *iter;
- if (!netif_is_bridge_master(bridge))
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper))
return 0;
- netdev_for_each_lower_dev(bridge, dev, iter) {
+ netdev_for_each_lower_dev(upper, dev, iter) {
if (lan966x_netdevice_check(dev)) {
struct lan966x_port *port = netdev_priv(dev);
if (lan966x) {
- /* Bridge already has at least one port of a
+ /* Upper already has at least one port of a
* lan966x switch inside it, check that it's
* the same instance of the driver.
*/
@@ -354,15 +383,24 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
}
} else {
/* This is the first lan966x port inside this
- * bridge
+ * upper device
*/
lan966x = port->lan966x;
+ *seen_lan966x = true;
}
+ } else if (netif_is_lag_master(dev)) {
+ /* Allow to have bond interfaces that have only lan966x
+ * devices
+ */
+ if (lan966x_foreign_bridging_check(dev, has_foreign,
+ seen_lan966x,
+ extack))
+ return -EINVAL;
} else {
- has_foreign = true;
+ *has_foreign = true;
}
- if (lan966x && has_foreign) {
+ if (*seen_lan966x && *has_foreign) {
NL_SET_ERR_MSG_MOD(extack,
"Bridging lan966x ports with foreign interfaces disallowed");
return -EINVAL;
@@ -375,7 +413,12 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
static int lan966x_bridge_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
+ bool has_foreign = false;
+ bool seen_lan966x = false;
+
return lan966x_foreign_bridging_check(info->upper_dev,
+ &has_foreign,
+ &seen_lan966x,
info->info.extack);
}
@@ -386,21 +429,44 @@ static int lan966x_netdevice_port_event(struct net_device *dev,
int err = 0;
if (!lan966x_netdevice_check(dev)) {
- if (event == NETDEV_CHANGEUPPER)
- return lan966x_bridge_check(dev, ptr);
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ if (netif_is_lag_master(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ err = lan966x_lag_netdev_changeupper(dev,
+ ptr);
+ else
+ err = lan966x_lag_netdev_prechangeupper(dev,
+ ptr);
+
+ return err;
+ }
+ break;
+ default:
+ return 0;
+ }
+
return 0;
}
switch (event) {
case NETDEV_PRECHANGEUPPER:
- err = lan966x_port_prechangeupper(dev, ptr);
+ err = lan966x_port_prechangeupper(dev, dev, ptr);
break;
case NETDEV_CHANGEUPPER:
err = lan966x_bridge_check(dev, ptr);
if (err)
return err;
- err = lan966x_port_changeupper(dev, ptr);
+ err = lan966x_port_changeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ err = lan966x_lag_port_changelowerstate(dev, ptr);
break;
}
@@ -418,19 +484,23 @@ static int lan966x_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(ret);
}
-/* We don't offload uppers such as LAG as bridge ports, so every device except
- * the bridge itself is foreign.
- */
static bool lan966x_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int i;
if (netif_is_bridge_master(foreign_dev))
if (lan966x->bridge == foreign_dev)
return false;
+ if (netif_is_lag_master(foreign_dev))
+ for (i = 0; i < lan966x->num_phys_ports; ++i)
+ if (lan966x->ports[i] &&
+ lan966x->ports[i]->bond == foreign_dev)
+ return false;
+
return true;
}
@@ -571,11 +641,11 @@ static struct notifier_block lan966x_netdevice_nb __read_mostly = {
.notifier_call = lan966x_netdevice_event,
};
-static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+struct notifier_block lan966x_switchdev_nb __read_mostly = {
.notifier_call = lan966x_switchdev_event,
};
-static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
.notifier_call = lan966x_switchdev_blocking_event,
};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
new file mode 100644
index 000000000000..3f5b212066c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define LAN966X_TAPRIO_TIMEOUT_MS 1000
+#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
+
+/* Minimum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
+
+/* Maximum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
+
+/* Total number of TAS GCL entries */
+#define LAN966X_TAPRIO_NUM_GCL 256
+
+/* TAPRIO link speeds for calculation of guard band */
+enum lan966x_taprio_link_speed {
+ LAN966X_TAPRIO_SPEED_NO_GB,
+ LAN966X_TAPRIO_SPEED_10,
+ LAN966X_TAPRIO_SPEED_100,
+ LAN966X_TAPRIO_SPEED_1000,
+ LAN966X_TAPRIO_SPEED_2500,
+};
+
+/* TAPRIO list states */
+enum lan966x_taprio_state {
+ LAN966X_TAPRIO_STATE_ADMIN,
+ LAN966X_TAPRIO_STATE_ADVANCING,
+ LAN966X_TAPRIO_STATE_PENDING,
+ LAN966X_TAPRIO_STATE_OPERATING,
+ LAN966X_TAPRIO_STATE_TERMINATING,
+ LAN966X_TAPRIO_STATE_MAX,
+};
+
+/* TAPRIO GCL command */
+enum lan966x_taprio_gcl_cmd {
+ LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
+};
+
+static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
+{
+ return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
+}
+
+static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, QSYS_TAS_LST);
+ return QSYS_TAS_LST_LIST_STATE_GET(val);
+}
+
+static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ return lan966x_taprio_list_state_get(port);
+}
+
+static void lan966x_taprio_list_state_set(struct lan966x_port *port,
+ u32 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+}
+
+static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool pending, operating;
+ unsigned long end;
+ u32 state;
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ /* It is required to try multiple times to set the state of list,
+ * because the HW can overwrite this.
+ */
+ do {
+ state = lan966x_taprio_list_state_get(port);
+
+ pending = false;
+ operating = false;
+
+ if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
+ state == LAN966X_TAPRIO_STATE_PENDING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_ADMIN);
+ pending = true;
+ }
+
+ if (state == LAN966X_TAPRIO_STATE_OPERATING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_TERMINATING);
+ operating = true;
+ }
+
+ /* If the entry was in pending and now gets in admin, then there
+ * is nothing else to do, so just bail out
+ */
+ state = lan966x_taprio_list_state_get(port);
+ if (pending &&
+ state == LAN966X_TAPRIO_STATE_ADMIN)
+ return 0;
+
+ /* If the list was in operating and now is in terminating or
+ * admin, then is OK to exit but it needs to wait until the list
+ * will get in admin. It is not required to set the state
+ * again.
+ */
+ if (operating &&
+ (state == LAN966X_TAPRIO_STATE_TERMINATING ||
+ state == LAN966X_TAPRIO_STATE_ADMIN))
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ do {
+ state = lan966x_taprio_list_state_get(port);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ /* If the list was in operating mode, it could be stopped while some
+ * queues where closed, so make sure to restore "all-queues-open"
+ */
+ if (operating) {
+ lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
+ lan966x, QSYS_TAS_GS_CTRL);
+
+ lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
+ lan966x, QSYS_TAS_GATE_STATE);
+ }
+
+ return 0;
+}
+
+static int lan966x_taprio_shutdown(struct lan966x_port *port)
+{
+ u32 i, list, state;
+ int err;
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list = lan966x_taprio_list_index(port, i);
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ err = lan966x_taprio_list_shutdown(port, list);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Find a suitable list for a new schedule. First priority is a list in state
+ * pending. Second priority is a list in state admin.
+ */
+static int lan966x_taprio_find_list(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int *new_list, int *obs_list)
+{
+ int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int err, oper = -1;
+ u32 i;
+
+ *new_list = -1;
+ *obs_list = -1;
+
+ /* If there is already an entry in operating mode, return this list in
+ * obs_list, such that when the new list will get activated the
+ * operating list will be stopped. In this way is possible to have
+ * smooth transitions between the lists
+ */
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list[i] = lan966x_taprio_list_index(port, i);
+ state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
+ if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
+ oper = list[i];
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
+ err = lan966x_taprio_shutdown(port);
+ if (err)
+ return err;
+
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
+{
+ u64 total_time = 0;
+ u32 i;
+
+ /* This is not supported by th HW */
+ if (qopt->cycle_time_extension)
+ return -EOPNOTSUPP;
+
+ /* There is a limited number of gcl entries that can be used, they are
+ * shared by all ports
+ */
+ if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
+ return -EINVAL;
+
+ /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
+ if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ for (i = 0; i < qopt->num_entries; ++i) {
+ struct tc_taprio_sched_entry *entry = &qopt->entries[i];
+
+ /* Don't allow intervals bigger than 1 sec or smaller than 1
+ * usec
+ */
+ if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+
+ total_time += qopt->entries[i].interval;
+ }
+
+ /* Don't allow the total time of intervals be bigger than 1 sec */
+ if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ /* The HW expects that the cycle time to be at least as big as sum of
+ * each interval of gcl
+ */
+ if (qopt->cycle_time < total_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
+ unsigned long *free_list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 num_free, state, list;
+ u32 base, next, max_list;
+
+ /* By default everything is free */
+ bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
+ num_free = LAN966X_TAPRIO_NUM_GCL;
+
+ /* Iterate over all gcl entries and find out which are free. And mark
+ * those that are not free.
+ */
+ max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
+ for (list = 0; list < max_list; ++list) {
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
+ base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
+ next = base;
+
+ do {
+ clear_bit(next, free_list);
+ num_free--;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
+ next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
+ } while (base != next);
+ }
+
+ return num_free;
+}
+
+static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
+ struct tc_taprio_sched_entry *entry,
+ u32 next_entry)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Setup a single gcl entry */
+ lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
+ QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
+ lan966x, QSYS_TAS_GCL_CT_CFG);
+
+ lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
+ lan966x, QSYS_TAS_GCL_CT_CFG2);
+
+ lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
+}
+
+static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int list)
+{
+ DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, base, next;
+
+ if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
+ return -ENOSPC;
+
+ /* Select list */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* Setup the address of the first gcl entry */
+ base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
+ lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
+ QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
+ lan966x, QSYS_TAS_LIST_CFG);
+
+ /* Iterate over entries and add them to the gcl list */
+ next = base;
+ for (i = 0; i < qopt->num_entries; ++i) {
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* If the entry is last, point back to the start of the list */
+ if (i == qopt->num_entries - 1)
+ next = base;
+ else
+ next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
+ next + 1);
+
+ lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
+ }
+
+ return 0;
+}
+
+/* Calculate new base_time based on cycle_time. The HW recommends to have the
+ * new base time at least 2 * cycle type + current time
+ */
+static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
+ const u32 cycle_time,
+ const ktime_t org_base_time,
+ ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time;
+ struct timespec64 ts;
+
+ /* Get the current time and calculate the threshold_time */
+ lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+
+ /* If the org_base_time is in enough in future just use it */
+ if (org_base_time >= threshold_time) {
+ *new_base_time = org_base_time;
+ return;
+ }
+
+ /* If the org_base_time is smaller than current_time, calculate the new
+ * base time as following.
+ */
+ if (org_base_time <= current_time) {
+ u64 tmp = current_time - org_base_time;
+ u32 rem = 0;
+
+ if (tmp > cycle_time)
+ div_u64_rem(tmp, cycle_time, &rem);
+ rem = cycle_time - rem;
+ *new_base_time = threshold_time + rem;
+ return;
+ }
+
+ /* The only left place for org_base_time is between current_time and
+ * threshold_time. In this case the new_base_time is calculated like
+ * org_base_time + 2 * cycletime
+ */
+ *new_base_time = org_base_time + 2 * cycle_time;
+}
+
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 taprio_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ taprio_speed = LAN966X_TAPRIO_SPEED_10;
+ break;
+ case SPEED_100:
+ taprio_speed = LAN966X_TAPRIO_SPEED_100;
+ break;
+ case SPEED_1000:
+ taprio_speed = LAN966X_TAPRIO_SPEED_1000;
+ break;
+ case SPEED_2500:
+ taprio_speed = LAN966X_TAPRIO_SPEED_2500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
+ QSYS_TAS_PROFILE_CFG_LINK_SPEED,
+ lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
+
+ return 0;
+}
+
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err, new_list, obs_list;
+ struct timespec64 ts;
+ ktime_t base_time;
+
+ err = lan966x_taprio_check(qopt);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_gcl_setup(port, qopt, new_list);
+ if (err)
+ return err;
+
+ lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
+ qopt->base_time, &base_time);
+
+ ts = ktime_to_timespec64(base_time);
+ lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
+ lan966x, QSYS_TAS_BT_NSEC);
+
+ lan_wr(lower_32_bits(ts.tv_sec),
+ lan966x, QSYS_TAS_BT_SEC_LSB);
+
+ lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
+ lan966x, QSYS_TAS_BT_SEC_MSB);
+
+ lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
+
+ lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
+ QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
+ lan966x, QSYS_TAS_STARTUP_CFG);
+
+ /* Start list processing */
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+
+ return err;
+}
+
+int lan966x_taprio_del(struct lan966x_port *port)
+{
+ return lan966x_taprio_shutdown(port);
+}
+
+void lan966x_taprio_init(struct lan966x *lan966x)
+{
+ int num_taprio_lists;
+ int p;
+
+ lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
+ lan966x_ptp_get_period_ps()),
+ lan966x, QSYS_TAS_STM_CFG);
+
+ num_taprio_lists = lan966x->num_phys_ports *
+ LAN966X_TAPRIO_ENTRIES_PER_PORT;
+
+ /* For now we always use guard band on all queues */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
+ QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ for (p = 0; p < lan966x->num_phys_ports; p++)
+ lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
+ QSYS_TAS_PROFILE_CFG_PORT_NUM,
+ lan966x, QSYS_TAS_PROFILE_CFG(p));
+}
+
+void lan966x_taprio_deinit(struct lan966x *lan966x)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ lan966x_taprio_del(lan966x->ports[p]);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
new file mode 100644
index 000000000000..4555a35d0d28
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 cir, cbs;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8;
+ cbs = qopt->replace_params.max_size;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
new file mode 100644
index 000000000000..651d5493ae55
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/pkt_cls.h>
+
+#include "lan966x_main.h"
+
+static LIST_HEAD(lan966x_tc_block_cb_list);
+
+static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u8 num_tc = mqprio->qopt.num_tc;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return num_tc ? lan966x_mqprio_add(port, num_tc) :
+ lan966x_mqprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ return taprio->enable ? lan966x_taprio_add(port, taprio) :
+ lan966x_taprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return lan966x_tbf_add(port, qopt);
+ case TC_TBF_DESTROY:
+ return lan966x_tbf_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ return qopt->enable ? lan966x_cbs_add(port, qopt) :
+ lan966x_cbs_del(port, qopt);
+}
+
+static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ return lan966x_ets_add(port, qopt);
+ case TC_ETS_DESTROY:
+ return lan966x_ets_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct lan966x_port *port = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return lan966x_tc_matchall(port, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int lan966x_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int lan966x_tc_setup_block(struct lan966x_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = lan966x_tc_block_cb_ingress;
+ port->tc.ingress_shared_block = f->block_shared;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = lan966x_tc_block_cb_egress;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list,
+ cb, port, port, ingress);
+}
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return lan966x_tc_setup_qdisc_mqprio(port, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return lan966x_tc_setup_qdisc_taprio(port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return lan966x_tc_setup_qdisc_tbf(port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return lan966x_tc_setup_qdisc_cbs(port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return lan966x_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_BLOCK:
+ return lan966x_tc_setup_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
new file mode 100644
index 000000000000..7368433b9277
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_tc_matchall_add(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ return lan966x_police_port_add(port, &f->rule->action, act,
+ f->cookie, ingress,
+ f->common.extack);
+ case FLOW_ACTION_MIRRED:
+ return lan966x_mirror_port_add(port, act, f->cookie,
+ ingress, f->common.extack);
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_del(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ return lan966x_police_port_del(port, f->cookie,
+ f->common.extack);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ return lan966x_mirror_port_del(port, ingress,
+ f->common.extack);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_stats(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ lan966x_police_port_stats(port, &f->stats);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ lan966x_mirror_port_stats(port, &f->stats, ingress);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only chain zero is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return lan966x_tc_matchall_add(port, f, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return lan966x_tc_matchall_del(port, f, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return lan966x_tc_matchall_stats(port, f, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 4402c3ed1dc5..d1c6ad966747 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
- sparx5_ptp.o sparx5_pgid.o
+ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index a5837dbe0c7e..4af285918ea2 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -186,8 +186,8 @@ bool sparx5_mact_getnext(struct sparx5 *sparx5,
return ret == 0;
}
-bool sparx5_mact_find(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
{
int ret;
u32 cfg2;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 01be7bd84181..62a325e96345 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -27,6 +27,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_qos.h"
#define QLIM_WM(fraction) \
((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
@@ -277,6 +278,7 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+ spx5_port->is_mrouter = false;
sparx5->ports[config->portno] = spx5_port;
err = sparx5_port_init(sparx5, spx5_port, &config->conf);
@@ -661,6 +663,9 @@ static int sparx5_start(struct sparx5 *sparx5)
queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
SPX5_MACT_PULL_DELAY);
+ mutex_init(&sparx5->mdb_lock);
+ INIT_LIST_HEAD(&sparx5->mdb_entries);
+
err = sparx5_register_netdevs(sparx5);
if (err)
return err;
@@ -864,6 +869,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
goto cleanup_ports;
}
+ err = sparx5_qos_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to initialize QoS\n");
+ goto cleanup_ports;
+ }
+
err = sparx5_ptp_init(sparx5);
if (err) {
dev_err(sparx5->dev, "PTP failed\n");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index b197129044b5..7a83222caa73 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -190,6 +190,7 @@ struct sparx5_port {
u8 ptp_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
+ bool is_mrouter;
};
enum sparx5_core_clockfreq {
@@ -215,6 +216,15 @@ struct sparx5_skb_cb {
unsigned long jiffies;
};
+struct sparx5_mdb_entry {
+ struct list_head list;
+ DECLARE_BITMAP(port_mask, SPX5_PORTS);
+ unsigned char addr[ETH_ALEN];
+ bool cpu_copy;
+ u16 vid;
+ u16 pgid_idx;
+};
+
#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
@@ -256,6 +266,10 @@ struct sparx5 {
struct list_head mact_entries;
/* mac table list (mact_entries) mutex */
struct mutex mact_lock;
+ /* SW MDB table */
+ struct list_head mdb_entries;
+ /* mdb list mutex */
+ struct mutex mdb_lock;
struct delayed_work mact_work;
struct workqueue_struct *mact_queue;
/* Board specifics */
@@ -291,7 +305,7 @@ struct frame_info {
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
-int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
@@ -307,8 +321,8 @@ int sparx5_mact_learn(struct sparx5 *sparx5, int port,
const unsigned char mac[ETH_ALEN], u16 vid);
bool sparx5_mact_getnext(struct sparx5 *sparx5,
unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
-bool sparx5_mact_find(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
int sparx5_mact_forget(struct sparx5 *sparx5,
const unsigned char mac[ETH_ALEN], u16 vid);
int sparx5_add_mact_entry(struct sparx5 *sparx5,
@@ -325,6 +339,7 @@ void sparx5_mact_init(struct sparx5 *sparx5);
/* sparx5_vlan.c */
void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid);
void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]);
void sparx5_update_fwd(struct sparx5 *sparx5);
void sparx5_vlan_init(struct sparx5 *sparx5);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index c94de436b281..fa2eb70f487a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -2993,6 +2993,147 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+
+#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
+#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
+#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
+
+#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
+#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
+
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+
+#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
+#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
+#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
+
+#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
+#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
+#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
+
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+
+#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+
+#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
+#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
+#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
+
+#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
+#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_STOP BIT(0)
+#define HSCH_SE_CFG_SE_STOP_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
+#define HSCH_SE_CFG_SE_STOP_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
+
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+
+#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
+ FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
+ FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+
+#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
+#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
+#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
+
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
@@ -3002,6 +3143,30 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
+ FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
+ FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+
/* HSCH:SYSTEM:FLUSH_CTRL */
#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index af4d3e1f1a6d..19516ccad533 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -7,6 +7,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_tc.h"
/* The IFH bit position of the first VSTAX bit. This is because the
* VSTAX bit positions in Data sheet is starting from zero.
@@ -228,6 +229,7 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
.ndo_eth_ioctl = sparx5_port_ioctl,
+ .ndo_setup_tc = sparx5_port_setup_tc,
};
bool sparx5_netdevice_check(const struct net_device *dev)
@@ -240,10 +242,14 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
struct sparx5_port *spx5_port;
struct net_device *ndev;
- ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+ ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port),
+ SPX5_PRIOS, 1);
if (!ndev)
return ERR_PTR(-ENOMEM);
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= NETIF_F_HW_TC;
+
SET_NETDEV_DEV(ndev, sparx5->dev);
spx5_port = netdev_priv(ndev);
spx5_port->ndev = ndev;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 304f84aadc36..83c16ca5b30f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -113,6 +113,8 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* This assumes STATUS_WORD_POS == 1, Status
* just after last data
*/
+ if (!byte_swap)
+ val = ntohl((__force __be32)val);
byte_cnt -= (4 - XTR_VALID_BYTES(val));
eof_flag = true;
break;
@@ -220,13 +222,13 @@ static int sparx5_inject(struct sparx5 *sparx5,
return NETDEV_TX_OK;
}
-int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
u32 ifh[IFH_LEN];
- int ret;
+ netdev_tx_t ret;
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(ifh, port->portno);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
new file mode 100644
index 000000000000..1e79d0ef0cb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+/* Max rates for leak groups */
+static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
+ 1048568, /* 1.049 Gbps */
+ 2621420, /* 2.621 Gbps */
+ 10485680, /* 10.486 Gbps */
+ 26214200 /* 26.214 Gbps */
+};
+
+static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
+
+static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
+ return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
+}
+
+static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
+ HSCH_HSCH_TIMER_CFG(layer, group));
+}
+
+static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
+ return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
+}
+
+static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
+ return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
+}
+
+static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_lg_get_first(sparx5, layer, group);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
+}
+
+static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_first(sparx5, layer, group);
+}
+
+static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
+}
+
+static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ if (sparx5_lg_is_empty(sparx5, layer, group))
+ return false;
+
+ return sparx5_lg_get_first(sparx5, layer, group) ==
+ sparx5_lg_get_last(sparx5, layer, group);
+}
+
+static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
+}
+
+static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, 0);
+}
+
+static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
+ u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ if (sparx5_lg_is_empty(sparx5, layer, i))
+ continue;
+
+ itr = sparx5_lg_get_first(sparx5, layer, i);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
+{
+ struct sparx5_layer *l = &layers[layer];
+ struct sparx5_lg *lg;
+ u32 i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ lg = &l->leak_groups[i];
+ if (rate <= lg->max_rate) {
+ *group = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx, u32 *prev, u32 *next, u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_lg_get_first(sparx5, layer, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_lg_get_next(sparx5, layer, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -1; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 se_first, u32 idx, u32 idx_next, bool empty)
+{
+ u32 leak_time = layers[layer].leak_groups[group].leak_time;
+
+ /* Stop leaking */
+ sparx5_lg_disable(sparx5, layer, group);
+
+ if (empty)
+ return 0;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Link elements */
+ spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
+ HSCH_SE_CONNECT(idx));
+
+ /* Set the first element. */
+ spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
+ HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
+ HSCH_HSCH_LEAK_CFG(layer, group));
+
+ /* Start leaking */
+ sparx5_lg_enable(sparx5, layer, group, leak_time);
+
+ return 0;
+}
+
+static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ /* idx *must* be present in the leak group */
+ WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
+ &first) < 0);
+
+ if (sparx5_lg_is_singular(sparx5, layer, group)) {
+ empty = true;
+ } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
+ empty);
+}
+
+static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
+ u32 idx)
+{
+ u32 first, next, old_group;
+
+ pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
+ idx);
+
+ /* Is this SE already shaping ? */
+ if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
+ if (old_group != new_group) {
+ /* Delete from old group */
+ sparx5_lg_del(sparx5, layer, old_group, idx);
+ } else {
+ /* Nothing to do here */
+ return 0;
+ }
+ }
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_lg_is_empty(sparx5, layer, new_group))
+ next = idx;
+ else
+ next = sparx5_lg_get_first(sparx5, layer, new_group);
+
+ return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
+ false);
+}
+
+static int sparx5_shaper_conf_set(struct sparx5_port *port,
+ const struct sparx5_shaper *sh, u32 layer,
+ u32 idx, u32 group)
+{
+ int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!sh->rate && !sh->burst)
+ sparx5_lg_action = &sparx5_lg_del;
+ else
+ sparx5_lg_action = &sparx5_lg_add;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Set frame mode */
+ spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
+ sparx5, HSCH_SE_CFG(idx));
+
+ /* Set committed rate and burst */
+ spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
+ HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
+ sparx5, HSCH_CIR_CFG(idx));
+
+ /* This has to be done after the shaper configuration has been set */
+ sparx5_lg_action(sparx5, layer, group, idx);
+
+ return 0;
+}
+
+static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
+{
+ return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
+ 1;
+}
+
+static int sparx5_dwrr_conf_set(struct sparx5_port *port,
+ struct sparx5_dwrr *dwrr)
+{
+ int i;
+
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ port->sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Number of *lower* indexes that are arbitrated dwrr */
+ spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
+ HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
+ HSCH_SE_CFG(port->portno));
+
+ for (i = 0; i < dwrr->count; i++) {
+ spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
+ HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
+ HSCH_DWRR_ENTRY(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_leak_groups_init(struct sparx5 *sparx5)
+{
+ struct sparx5_layer *layer;
+ u32 sys_clk_per_100ps;
+ struct sparx5_lg *lg;
+ u32 leak_time_us;
+ int i, ii;
+
+ sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
+
+ for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
+ layer = &layers[i];
+ for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
+ lg = &layer->leak_groups[ii];
+ lg->max_rate = spx5_hsch_max_group_rate[ii];
+
+ /* Calculate the leak time in us, to serve a maximum
+ * rate of 'max_rate' for this group
+ */
+ leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
+
+ /* Hardware wants leak time in ns */
+ lg->leak_time = 1000 * leak_time_us;
+
+ /* Calculate resolution */
+ lg->resolution = 1000 / leak_time_us;
+
+ /* Maximum number of shapers that can be served by
+ * this leak group
+ */
+ lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
+
+ /* Example:
+ * Wanted bandwidth is 100Mbit:
+ *
+ * 100 mbps can be served by leak group zero.
+ *
+ * leak_time is 125000 ns.
+ * resolution is: 8
+ *
+ * cir = 100000 / 8 = 12500
+ * leaks_pr_sec = 125000 / 10^9 = 8000
+ * bw = 12500 * 8000 = 10^8 (100 Mbit)
+ */
+
+ /* Disable by default - this also indicates an empty
+ * leak group
+ */
+ sparx5_lg_disable(sparx5, i, ii);
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_qos_init(struct sparx5 *sparx5)
+{
+ int ret;
+
+ ret = sparx5_leak_groups_init(sparx5);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
+{
+ int i;
+
+ if (num_tc != SPX5_PRIOS) {
+ netdev_err(ndev, "Only %d traffic classes supported\n",
+ SPX5_PRIOS);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_del(struct net_device *ndev)
+{
+ netdev_reset_tc(ndev);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {
+ .mode = SPX5_SE_MODE_DATARATE,
+ .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
+ .burst = params->max_size,
+ };
+ struct sparx5_lg *lg;
+ u32 group;
+
+ /* Find suitable group for this se */
+ if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
+ pr_debug("Could not find leak group for se with rate: %d",
+ sh.rate);
+ return -EINVAL;
+ }
+
+ lg = &layers[layer].leak_groups[group];
+
+ pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
+
+ if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
+ return -EINVAL;
+
+ /* Calculate committed rate and burst */
+ sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
+ sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
+
+ if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
+ return -EINVAL;
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {0};
+ u32 group;
+
+ sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params)
+{
+ struct sparx5_dwrr dwrr = {0};
+ /* Minimum weight for each iteration */
+ unsigned int w_min = 100;
+ int i;
+
+ /* Find minimum weight for all dwrr bands */
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ if (params->quanta[i] == 0)
+ continue;
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ /* Strict band; skip */
+ if (params->quanta[i] == 0)
+ continue;
+
+ dwrr.count++;
+
+ /* On the sparx5, bands with higher indexes are preferred and
+ * arbitrated strict. Strict bands are put in the lower indexes,
+ * by tc, so we reverse the bands here.
+ *
+ * Also convert the weight to something the hardware
+ * understands.
+ */
+ dwrr.cost[SPX5_PRIOS - i - 1] =
+ sparx5_weight_to_hw_cost(w_min, params->weights[i]);
+ }
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
+
+int sparx5_tc_ets_del(struct sparx5_port *port)
+{
+ struct sparx5_dwrr dwrr = {0};
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
new file mode 100644
index 000000000000..ced35033a6c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_QOS_H__
+#define __SPARX5_QOS_H__
+
+#include <linux/netdevice.h>
+
+/* Number of Layers */
+#define SPX5_HSCH_LAYER_CNT 3
+
+/* Scheduling elements per layer */
+#define SPX5_HSCH_L0_SE_CNT 5040
+#define SPX5_HSCH_L1_SE_CNT 64
+#define SPX5_HSCH_L2_SE_CNT 64
+
+/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
+#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
+
+/* Number of leak groups */
+#define SPX5_HSCH_LEAK_GRP_CNT 4
+
+/* Scheduler modes */
+#define SPX5_SE_MODE_LINERATE 0
+#define SPX5_SE_MODE_DATARATE 1
+
+/* Rate and burst */
+#define SPX5_SE_RATE_MAX 262143
+#define SPX5_SE_BURST_MAX 127
+#define SPX5_SE_RATE_MIN 1
+#define SPX5_SE_BURST_MIN 1
+#define SPX5_SE_BURST_UNIT 4096
+
+/* Dwrr */
+#define SPX5_DWRR_COST_MAX 63
+
+struct sparx5_shaper {
+ u32 mode;
+ u32 rate;
+ u32 burst;
+};
+
+struct sparx5_lg {
+ u32 max_rate;
+ u32 resolution;
+ u32 leak_time;
+ u32 max_ses;
+};
+
+struct sparx5_layer {
+ struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
+};
+
+struct sparx5_dwrr {
+ u32 count; /* Number of inputs running dwrr */
+ u8 cost[SPX5_PRIOS];
+};
+
+int sparx5_qos_init(struct sparx5 *sparx5);
+
+/* Multi-Queue Priority */
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
+int sparx5_tc_mqprio_del(struct net_device *ndev);
+
+/* Token Bucket Filter */
+struct tc_tbf_qopt_offload_replace_params;
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx);
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
+
+/* Enhanced Transmission Selection */
+struct tc_ets_qopt_offload_replace_params;
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params);
+
+int sparx5_tc_ets_del(struct sparx5_port *port);
+
+#endif /* __SPARX5_QOS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index ec07f7d0528c..4af85d108a06 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -29,14 +29,23 @@ static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
return 0;
}
+static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
+{
+ bool should_flood = flood_flag || port->is_mrouter;
+ int pgid;
+
+ for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ sparx5_pgid_update_mask(port, pgid, should_flood);
+}
+
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
- int pgid;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
+ sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
+ }
- if (flags.mask & BR_MCAST_FLOOD)
- for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
- sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
if (flags.mask & BR_FLOOD)
sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
@@ -82,6 +91,37 @@ static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
sparx5_set_ageing(port->sparx5, ageing_time);
}
+static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
+ struct net_device *orig_dev,
+ bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mdb_entry *e;
+ bool flood_flag;
+
+ if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
+ return;
+
+ /* Add/del mrouter port on all active mdb entries in HW.
+ * Don't change entry port mask, since that represents
+ * ports that actually joined that group.
+ */
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (!test_bit(port->portno, e->port_mask) &&
+ ether_addr_is_ip_mcast(e->addr))
+ sparx5_pgid_update_mask(port, e->pgid_idx, enable);
+ }
+ mutex_unlock(&sparx5->mdb_lock);
+
+ /* Enable/disable flooding depending on if port is mrouter port
+ * or if mcast flood is enabled.
+ */
+ port->is_mrouter = enable;
+ flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
+ sparx5_port_update_mcast_ip_flood(port, flood_flag);
+}
+
static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -110,6 +150,11 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
port->vlan_aware = attr->u.vlan_filtering;
sparx5_vlan_port_apply(port->sparx5, port);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ sparx5_port_attr_mrouter_set(port,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -386,16 +431,95 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
+static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid,
+ struct sparx5_mdb_entry **entry_out)
+{
+ struct sparx5_mdb_entry *entry;
+ u16 pgid_idx;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
+ if (err) {
+ kfree(entry);
+ return err;
+ }
+
+ memcpy(entry->addr, addr, ETH_ALEN);
+ entry->vid = vid;
+ entry->pgid_idx = pgid_idx;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_add_tail(&entry->list, &sparx5->mdb_entries);
+ mutex_unlock(&sparx5->mdb_lock);
+
+ *entry_out = entry;
+ return 0;
+}
+
+static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *entry, *tmp;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
+ if ((vid == 0 || entry->vid == vid) &&
+ ether_addr_equal(addr, entry->addr)) {
+ list_del(&entry->list);
+
+ sparx5_pgid_free(sparx5, entry->pgid_idx);
+ kfree(entry);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+}
+
+static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *e, *found = NULL;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
+ found = e;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+ return found;
+}
+
+static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
+{
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid));
+}
+
static int sparx5_handle_port_mdb_add(struct net_device *dev,
struct notifier_block *nb,
const struct switchdev_obj_port_mdb *v)
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
- u16 pgid_idx, vid;
- u32 mact_entry;
- bool is_host;
- int res, err;
+ struct sparx5_mdb_entry *entry;
+ bool is_host, is_new;
+ int err, i;
+ u16 vid;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
@@ -410,66 +534,36 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
else
vid = v->vid;
- res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
-
- if (res == 0) {
- pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
-
- /* MC_IDX starts after the port masks in the PGID table */
- pgid_idx += SPX5_PORTS;
-
- if (is_host)
- spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
- ANA_AC_PGID_MISC_CFG(pgid_idx));
- else
- sparx5_pgid_update_mask(port, pgid_idx, true);
-
- } else {
- err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
- if (err) {
- netdev_warn(dev, "multicast pgid table full\n");
+ is_new = false;
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry) {
+ err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
+ is_new = true;
+ if (err)
return err;
- }
-
- if (is_host)
- spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
- ANA_AC_PGID_MISC_CFG(pgid_idx));
- else
- sparx5_pgid_update_mask(port, pgid_idx, true);
-
- err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
-
- if (err) {
- netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
- sparx5_pgid_free(spx5, pgid_idx);
- sparx5_pgid_update_mask(port, pgid_idx, false);
- return err;
- }
}
- return 0;
-}
+ mutex_lock(&spx5->mdb_lock);
+
+ /* Add any mrouter ports to the new entry */
+ if (is_new && ether_addr_is_ip_mcast(v->addr))
+ for (i = 0; i < SPX5_PORTS; i++)
+ if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
+ sparx5_pgid_update_mask(spx5->ports[i],
+ entry->pgid_idx,
+ true);
+
+ if (is_host && !entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
+ entry->cpu_copy = true;
+ } else if (!is_host) {
+ sparx5_pgid_update_mask(port, entry->pgid_idx, true);
+ set_bit(port->portno, entry->port_mask);
+ }
+ mutex_unlock(&spx5->mdb_lock);
-static int sparx5_mdb_del_entry(struct net_device *dev,
- struct sparx5 *spx5,
- const unsigned char mac[ETH_ALEN],
- const u16 vid,
- u16 pgid_idx)
-{
- int err;
+ sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
- err = sparx5_mact_forget(spx5, mac, vid);
- if (err) {
- netdev_warn(dev, "could not forget mac address %pM", mac);
- return err;
- }
- err = sparx5_pgid_free(spx5, pgid_idx);
- if (err) {
- netdev_err(dev, "attempted to free already freed pgid\n");
- return err;
- }
return 0;
}
@@ -479,42 +573,45 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
- u16 pgid_idx, vid;
- u32 mact_entry, res, pgid_entry[3], misc_cfg;
- bool host_ena;
+ struct sparx5_mdb_entry *entry;
+ bool is_host;
+ u16 vid;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
+
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
vid = v->vid;
- res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
-
- if (res == 0) {
- pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
-
- /* MC_IDX starts after the port masks in the PGID table */
- pgid_idx += SPX5_PORTS;
-
- if (netif_is_bridge_master(v->obj.orig_dev))
- spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
- ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
- ANA_AC_PGID_MISC_CFG(pgid_idx));
- else
- sparx5_pgid_update_mask(port, pgid_idx, false);
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry)
+ return 0;
- misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
- host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
+ mutex_lock(&spx5->mdb_lock);
+ if (is_host && entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
+ entry->cpu_copy = false;
+ } else if (!is_host) {
+ clear_bit(port->portno, entry->port_mask);
- sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
- if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
- /* No ports or CPU are in MC group. Remove entry */
- return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
+ /* Port not mrouter port or addr is L2 mcast, remove port from mask. */
+ if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
+ sparx5_pgid_update_mask(port, entry->pgid_idx, false);
+ }
+ mutex_unlock(&spx5->mdb_lock);
+
+ if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
+ /* Clear pgid in case mrouter ports exists
+ * that are not part of the group.
+ */
+ sparx5_pgid_clear(spx5, entry->pgid_idx);
+ sparx5_mact_forget(spx5, entry->addr, entry->vid);
+ sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
}
-
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
new file mode 100644
index 000000000000..e05429c751ee
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_tc.h"
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
+ u32 *idx)
+{
+ if (parent == TC_H_ROOT) {
+ *layer = 2;
+ *idx = portno;
+ } else {
+ u32 queue = TC_H_MIN(parent) - 1;
+ *layer = 0;
+ *idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
+ }
+}
+
+static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
+ struct tc_mqprio_qopt_offload *m)
+{
+ m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ if (m->qopt.num_tc == 0)
+ return sparx5_tc_mqprio_del(ndev);
+ else
+ return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
+}
+
+static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 layer, se_idx;
+
+ sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
+ &se_idx);
+
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
+ se_idx);
+ case TC_TBF_DESTROY:
+ return sparx5_tc_tbf_del(port, layer, se_idx);
+ case TC_TBF_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params =
+ &qopt->replace_params;
+ struct sparx5_port *port = netdev_priv(ndev);
+ int i;
+
+ /* Only allow ets on ports */
+ if (qopt->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+
+ /* We support eight priorities */
+ if (params->bands != SPX5_PRIOS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checks */
+ for (i = 0; i < SPX5_PRIOS; ++i) {
+ /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
+ if (params->priomap[i] != (7 - i))
+ return -EOPNOTSUPP;
+ /* Throw an error if we receive zero weights by tc */
+ if (params->quanta[i] && params->weights[i] == 0) {
+ pr_err("Invalid ets configuration; band %d has weight zero",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ sparx5_tc_ets_add(port, params);
+ break;
+ case TC_ETS_DESTROY:
+
+ sparx5_tc_ets_del(port);
+
+ break;
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return sparx5_tc_setup_qdisc_ets(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
new file mode 100644
index 000000000000..5b55e11b77e1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_TC_H__
+#define __SPARX5_TC_H__
+
+#include <linux/netdevice.h>
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
+#endif /* __SPARX5_TC_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 37e4ac965849..34f954bbf815 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -138,6 +138,13 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
}
}
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
+{
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+}
+
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
{
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 5f9240182351..a6f99b4344d9 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -397,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg)
break;
}
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+
mana_gd_process_eqe(eq);
eq->head++;
@@ -1134,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
if (WARN_ON_ONCE(owner_bits != new_bits))
return -1;
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading completion info
+ */
+ rmb();
+
comp->wq_num = cqe->cqe_info.wq_num;
comp->is_sq = cqe->cqe_info.is_sq;
memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
@@ -1465,10 +1475,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
static const struct pci_device_id mana_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a3214a762e4b..3da99b62797d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -29,12 +29,12 @@
#include "moxart_ether.h"
-static inline void moxart_desc_write(u32 data, u32 *desc)
+static inline void moxart_desc_write(u32 data, __le32 *desc)
{
*desc = cpu_to_le32(data);
}
-static inline u32 moxart_desc_read(u32 *desc)
+static inline u32 moxart_desc_read(__le32 *desc)
{
return le32_to_cpu(*desc);
}
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 41b34a509308..5d435a565d4c 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -2,16 +2,17 @@
obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o
mscc_ocelot_switch_lib-y := \
ocelot.o \
+ ocelot_devlink.o \
+ ocelot_flower.o \
ocelot_io.o \
ocelot_police.o \
- ocelot_vcap.o \
- ocelot_flower.o \
ocelot_ptp.o \
- ocelot_devlink.o \
+ ocelot_stats.o \
+ ocelot_vcap.o \
vsc7514_regs.o
mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_fdma.o \
- ocelot_vsc7514.o \
- ocelot_net.o
+ ocelot_net.o \
+ ocelot_vsc7514.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d4649e4ee0e7..13b14110a060 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,7 +6,6 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
-#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
@@ -290,6 +289,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
if (!(vlan->portmask & BIT(port)))
continue;
+ /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
+ * because this is never active in hardware at the same time as
+ * the bridge VLANs, which only matter in VLAN-aware mode.
+ */
+ if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
+ continue;
+
if (vlan->untagged & BIT(port))
num_untagged++;
}
@@ -910,211 +916,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- unsigned long flags;
-
- spin_lock_irqsave(&ocelot->ts_id_lock, flags);
-
- if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
- ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return -EBUSY;
- }
-
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
- OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
-
- ocelot_port->ts_id++;
- if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
- ocelot_port->ts_id = 0;
-
- ocelot_port->ptp_skbs_in_flight++;
- ocelot->ptp_skbs_in_flight++;
-
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
-
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
-
- return 0;
-}
-
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
- unsigned int ptp_class)
-{
- struct ptp_header *hdr;
- u8 msgtype, twostep;
-
- hdr = ptp_parse_header(skb, ptp_class);
- if (!hdr)
- return false;
-
- msgtype = ptp_get_msgtype(hdr, ptp_class);
- twostep = hdr->flag_field[0] & 0x2;
-
- if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
- return true;
-
- return false;
-}
-
-int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
- struct sk_buff *skb,
- struct sk_buff **clone)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u8 ptp_cmd = ocelot_port->ptp_cmd;
- unsigned int ptp_class;
- int err;
-
- /* Don't do anything if PTP timestamping not enabled */
- if (!ptp_cmd)
- return 0;
-
- ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
-
- /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
- if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
- OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
- return 0;
- }
-
- /* Fall back to two-step timestamping */
- ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- }
-
- if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- *clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
-
- err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
- if (err)
- return err;
-
- OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
- OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ocelot_port_txtstamp_request);
-
-static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
- struct timespec64 *ts)
-{
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- /* Read current PTP time to get seconds */
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
-
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
- ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
-
- /* Read packet HW timestamp from FIFO */
- val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
- ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
-
- /* Sec has incremented since the ts was registered */
- if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
- ts->tv_sec--;
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
-}
-
-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
- if (WARN_ON(!hdr))
- return false;
-
- return seqid == ntohs(hdr->sequence_id);
-}
-
-void ocelot_get_txtstamp(struct ocelot *ocelot)
-{
- int budget = OCELOT_PTP_QUEUE_SZ;
-
- while (budget--) {
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
- struct skb_shared_hwtstamps shhwtstamps;
- u32 val, id, seqid, txport;
- struct ocelot_port *port;
- struct timespec64 ts;
- unsigned long flags;
-
- val = ocelot_read(ocelot, SYS_PTP_STATUS);
-
- /* Check if a timestamp can be retrieved */
- if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
- break;
-
- WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
-
- /* Retrieve the ts ID and Tx port */
- id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
- txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
- seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
-
- port = ocelot->ports[txport];
-
- spin_lock(&ocelot->ts_id_lock);
- port->ptp_skbs_in_flight--;
- ocelot->ptp_skbs_in_flight--;
- spin_unlock(&ocelot->ts_id_lock);
-
- /* Retrieve its associated skb */
-try_again:
- spin_lock_irqsave(&port->tx_skbs.lock, flags);
-
- skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (OCELOT_SKB_CB(skb)->ts_id != id)
- continue;
- __skb_unlink(skb, &port->tx_skbs);
- skb_match = skb;
- break;
- }
-
- spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
-
- if (WARN_ON(!skb_match))
- continue;
-
- if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
- dev_err_ratelimited(ocelot->dev,
- "port %d received stale TX timestamp for seqid %d, discarding\n",
- txport, seqid);
- dev_kfree_skb_any(skb);
- goto try_again;
- }
-
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
- /* Set the timestamp into the skb */
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_complete_tx_timestamp(skb_match, &shhwtstamps);
-
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
- }
-}
-EXPORT_SYMBOL(ocelot_get_txtstamp);
-
static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
u32 *rval)
{
@@ -1366,50 +1167,6 @@ int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
}
EXPORT_SYMBOL(ocelot_fdb_del);
-int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data)
-{
- struct ocelot_dump_ctx *dump = data;
- u32 portid = NETLINK_CB(dump->cb->skb).portid;
- u32 seq = dump->cb->nlh->nlmsg_seq;
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
-
- if (dump->idx < dump->cb->args[2])
- goto skip;
-
- nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
- sizeof(*ndm), NLM_F_MULTI);
- if (!nlh)
- return -EMSGSIZE;
-
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
-
- if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
- goto nla_put_failure;
-
- nlmsg_end(dump->skb, nlh);
-
-skip:
- dump->idx++;
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(dump->skb, nlh);
- return -EMSGSIZE;
-}
-EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
-
/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry)
@@ -1541,53 +1298,6 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_fdb_dump);
-static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
-}
-
-static void
-ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV4;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv4.dport.value = PTP_EV_PORT;
- trap->key.ipv4.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv6.dport.value = PTP_EV_PORT;
- trap->key.ipv6.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV4;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv4.dport.value = PTP_GEN_PORT;
- trap->key.ipv4.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv6.dport.value = PTP_GEN_PORT;
- trap->key.ipv6.dport.mask = 0xffff;
-}
-
int ocelot_trap_add(struct ocelot *ocelot, int port,
unsigned long cookie, bool take_ts,
void (*populate)(struct ocelot_vcap_filter *f))
@@ -1656,363 +1366,6 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
return ocelot_vcap_filter_replace(ocelot, trap);
}
-static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
-
- return ocelot_trap_add(ocelot, port, l2_cookie, true,
- ocelot_populate_l2_ptp_trap_key);
-}
-
-static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
-
- return ocelot_trap_del(ocelot, port, l2_cookie);
-}
-
-static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
- unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
- ocelot_populate_ipv4_ptp_event_trap_key);
- if (err)
- return err;
-
- err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
- ocelot_populate_ipv4_ptp_general_trap_key);
- if (err)
- ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
-
- return err;
-}
-
-static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
- unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
- err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
- return err;
-}
-
-static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
- unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
- ocelot_populate_ipv6_ptp_event_trap_key);
- if (err)
- return err;
-
- err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
- ocelot_populate_ipv6_ptp_general_trap_key);
- if (err)
- ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
-
- return err;
-}
-
-static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
- unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
- err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
- return err;
-}
-
-static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
- bool l2, bool l4)
-{
- int err;
-
- if (l2)
- err = ocelot_l2_ptp_trap_add(ocelot, port);
- else
- err = ocelot_l2_ptp_trap_del(ocelot, port);
- if (err)
- return err;
-
- if (l4) {
- err = ocelot_ipv4_ptp_trap_add(ocelot, port);
- if (err)
- goto err_ipv4;
-
- err = ocelot_ipv6_ptp_trap_add(ocelot, port);
- if (err)
- goto err_ipv6;
- } else {
- err = ocelot_ipv4_ptp_trap_del(ocelot, port);
-
- err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
- }
- if (err)
- return err;
-
- return 0;
-
-err_ipv6:
- ocelot_ipv4_ptp_trap_del(ocelot, port);
-err_ipv4:
- if (l2)
- ocelot_l2_ptp_trap_del(ocelot, port);
- return err;
-}
-
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
-{
- return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
- sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(ocelot_hwstamp_get);
-
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
- int err;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- /* Tx type sanity check */
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_ON:
- ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- break;
- case HWTSTAMP_TX_ONESTEP_SYNC:
- /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
- * need to update the origin time.
- */
- ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
- break;
- case HWTSTAMP_TX_OFF:
- ocelot_port->ptp_cmd = 0;
- break;
- default:
- return -ERANGE;
- }
-
- mutex_lock(&ocelot->ptp_lock);
-
- switch (cfg.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- l2 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- l2 = true;
- l4 = true;
- break;
- default:
- mutex_unlock(&ocelot->ptp_lock);
- return -ERANGE;
- }
-
- err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
- if (err) {
- mutex_unlock(&ocelot->ptp_lock);
- return err;
- }
-
- if (l2 && l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- else if (l2)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- else
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
-
- /* Commit back the result & save it */
- memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
- mutex_unlock(&ocelot->ptp_lock);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(ocelot_hwstamp_set);
-
-void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
-{
- int i;
-
- if (sset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ocelot->num_stats; i++)
- memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
- ETH_GSTRING_LEN);
-}
-EXPORT_SYMBOL(ocelot_get_strings);
-
-/* Caller must hold &ocelot->stats_lock */
-static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
-{
- unsigned int idx = port * ocelot->num_stats;
- struct ocelot_stats_region *region;
- int err, j;
-
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
-
- list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- region->offset, region->buf,
- region->count);
- if (err)
- return err;
-
- for (j = 0; j < region->count; j++) {
- u64 *stat = &ocelot->stats[idx + j];
- u64 val = region->buf[j];
-
- if (val < (*stat & U32_MAX))
- *stat += (u64)1 << 32;
-
- *stat = (*stat & ~(u64)U32_MAX) + val;
- }
-
- idx += region->count;
- }
-
- return err;
-}
-
-static void ocelot_check_stats_work(struct work_struct *work)
-{
- struct delayed_work *del_work = to_delayed_work(work);
- struct ocelot *ocelot = container_of(del_work, struct ocelot,
- stats_work);
- int i, err;
-
- mutex_lock(&ocelot->stats_lock);
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- err = ocelot_port_update_stats(ocelot, i);
- if (err)
- break;
- }
- mutex_unlock(&ocelot->stats_lock);
-
- if (err)
- dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
-
- queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
- OCELOT_STATS_CHECK_DELAY);
-}
-
-void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
-{
- int i, err;
-
- mutex_lock(&ocelot->stats_lock);
-
- /* check and update now */
- err = ocelot_port_update_stats(ocelot, port);
-
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
-
- mutex_unlock(&ocelot->stats_lock);
-
- if (err)
- dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
-}
-EXPORT_SYMBOL(ocelot_get_ethtool_stats);
-
-int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
-{
- if (sset != ETH_SS_STATS)
- return -EOPNOTSUPP;
-
- return ocelot->num_stats;
-}
-EXPORT_SYMBOL(ocelot_get_sset_count);
-
-static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
-{
- struct ocelot_stats_region *region = NULL;
- unsigned int last;
- int i;
-
- INIT_LIST_HEAD(&ocelot->stats_regions);
-
- for (i = 0; i < ocelot->num_stats; i++) {
- if (region && ocelot->stats_layout[i].offset == last + 1) {
- region->count++;
- } else {
- region = devm_kzalloc(ocelot->dev, sizeof(*region),
- GFP_KERNEL);
- if (!region)
- return -ENOMEM;
-
- region->offset = ocelot->stats_layout[i].offset;
- region->count = 1;
- list_add_tail(&region->node, &ocelot->stats_regions);
- }
-
- last = ocelot->stats_layout[i].offset;
- }
-
- list_for_each_entry(region, &ocelot->stats_regions, node) {
- region->buf = devm_kcalloc(ocelot->dev, region->count,
- sizeof(*region->buf), GFP_KERNEL);
- if (!region->buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info)
-{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- return 0;
- }
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
- BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-
- return 0;
-}
-EXPORT_SYMBOL(ocelot_get_ts_info);
-
static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
{
u32 mask = 0;
@@ -2036,7 +1389,7 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
/* The logical port number of a LAG is equal to the lowest numbered physical
* port ID present in that LAG. It may change if that port ever leaves the LAG.
*/
-static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
{
int bond_mask = ocelot_get_bond_mask(ocelot, bond);
@@ -2045,7 +1398,18 @@ static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
return __ffs(bond_mask);
}
+EXPORT_SYMBOL_GPL(ocelot_bond_get_id);
+/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port.
+ * Note that when CPU ports are in a LAG, the user ports are assigned to the
+ * 'primary' CPU port, the one whose physical port number gives the logical
+ * port number of the LAG.
+ *
+ * We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG
+ * (to which no user port is assigned), but it appears that forwarding from
+ * this secondary CPU port looks at the PGID_SRC associated with the logical
+ * port ID that it's assigned to, which *is* configured properly.
+ */
static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
struct ocelot_port *cpu)
{
@@ -2062,9 +1426,15 @@ static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
mask |= BIT(port);
}
+ if (cpu->bond)
+ mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
+
return mask;
}
+/* Returns the DSA tag_8021q CPU port that the given port is assigned to,
+ * or the bit mask of CPU ports if said CPU port is in a LAG.
+ */
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2073,6 +1443,9 @@ u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
if (!cpu_port)
return 0;
+ if (cpu_port->bond)
+ return ocelot_get_bond_mask(ocelot, cpu_port->bond);
+
return BIT(cpu_port->index);
}
EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
@@ -2196,61 +1569,61 @@ static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
}
-void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
- int cpu)
+void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
-
- if (!cpu_port->is_dsa_8021q_cpu) {
- cpu_port->is_dsa_8021q_cpu = true;
+ cpu_port->is_dsa_8021q_cpu = true;
- for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_add(ocelot, cpu, vid, true);
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_add(ocelot, cpu, vid, true);
- ocelot_update_pgid_cpu(ocelot);
- }
-
- ocelot_apply_bridge_fwd_mask(ocelot, true);
+ ocelot_update_pgid_cpu(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
-EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
+EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu);
-void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
- struct ocelot_port *cpu_port = ocelot->ports[port]->dsa_8021q_cpu;
- bool keep = false;
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
- int p;
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->dsa_8021q_cpu = NULL;
+ cpu_port->is_dsa_8021q_cpu = false;
- for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (!ocelot->ports[p])
- continue;
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
- if (ocelot->ports[p]->dsa_8021q_cpu == cpu_port) {
- keep = true;
- break;
- }
- }
+ ocelot_update_pgid_cpu(ocelot);
- if (!keep) {
- cpu_port->is_dsa_8021q_cpu = false;
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu);
- for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
+ int cpu)
+{
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
- ocelot_update_pgid_cpu(ocelot);
- }
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
+
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot->ports[port]->dsa_8021q_cpu = NULL;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
@@ -2767,10 +2140,14 @@ static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return -EOPNOTSUPP;
+ }
mutex_lock(&ocelot->fwd_domain_lock);
@@ -3340,8 +2717,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- const struct ocelot_stat_layout *stat;
- char queue_name[32];
int i, ret;
u32 port;
@@ -3353,33 +2728,21 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->num_stats = 0;
- for_each_stat(ocelot, stat)
- ocelot->num_stats++;
-
- ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
- sizeof(u64), GFP_KERNEL);
- if (!ocelot->stats)
- return -ENOMEM;
-
- mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
mutex_init(&ocelot->tas_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
- snprintf(queue_name, sizeof(queue_name), "%s-stats",
- dev_name(ocelot->dev));
- ocelot->stats_queue = create_singlethread_workqueue(queue_name);
- if (!ocelot->stats_queue)
- return -ENOMEM;
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
- if (!ocelot->owq) {
- destroy_workqueue(ocelot->stats_queue);
+ if (!ocelot->owq)
return -ENOMEM;
+
+ ret = ocelot_stats_init(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->owq);
+ return ret;
}
INIT_LIST_HEAD(&ocelot->multicast);
@@ -3491,27 +2854,14 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
- ret = ocelot_prepare_stats_regions(ocelot);
- if (ret) {
- destroy_workqueue(ocelot->stats_queue);
- destroy_workqueue(ocelot->owq);
- return ret;
- }
-
- INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
- queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
- OCELOT_STATS_CHECK_DELAY);
-
return 0;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- cancel_delayed_work(&ocelot->stats_work);
- destroy_workqueue(ocelot->stats_queue);
+ ocelot_stats_deinit(ocelot);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 6d65cc87d757..70dbd9c4e512 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -51,13 +51,6 @@ struct ocelot_port_private {
struct ocelot_port_tc tc;
};
-struct ocelot_dump_ctx {
- struct net_device *dev;
- struct sk_buff *skb;
- struct netlink_callback *cb;
- int idx;
-};
-
/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
* possibilities of egress port masks for L2 multicast traffic.
* For a switch with 9 user ports, there are 512 possible port masks, but the
@@ -84,8 +77,6 @@ struct ocelot_multicast {
int ocelot_bridge_num_find(struct ocelot *ocelot,
const struct net_device *bridge);
-int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data);
int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type);
@@ -115,6 +106,9 @@ struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
struct netlink_ext_ack *extack);
void ocelot_mirror_put(struct ocelot *ocelot);
+int ocelot_stats_init(struct ocelot *ocelot);
+void ocelot_stats_deinit(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 5e6136e80282..50858cc10fef 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -20,6 +20,13 @@
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
+struct ocelot_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
static bool ocelot_netdevice_dev_check(const struct net_device *dev);
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
@@ -726,36 +733,7 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
-
- /* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
- stats->rx_dropped = dev->stats.rx_dropped;
-
- /* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ return ocelot_port_get_stats64(ocelot, port, stats);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -785,6 +763,49 @@ static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge);
}
+static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
+{
+ struct ocelot_dump_ctx *dump = data;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
static int ocelot_port_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1391,11 +1412,10 @@ static int ocelot_netdevice_lag_join(struct net_device *dev,
int port = priv->port.index;
int err;
- err = ocelot_port_lag_join(ocelot, port, bond, info);
- if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(extack, "Offloading not supported");
+ err = ocelot_port_lag_join(ocelot, port, bond, info, extack);
+ if (err == -EOPNOTSUPP)
+ /* Offloading not supported, fall back to software LAG */
return 0;
- }
bridge_dev = netdev_master_upper_dev_get(bond);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 09c703efe946..1a82f10c8853 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -6,9 +6,13 @@
*/
#include <linux/time64.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
+#include "ocelot.h"
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
@@ -310,6 +314,483 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
}
EXPORT_SYMBOL(ocelot_ptp_enable);
+static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
+ *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
+}
+
+static void
+ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv4.dport.value = PTP_EV_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.dport.value = PTP_EV_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv4.dport.value = PTP_GEN_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.dport.value = PTP_GEN_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
+
+ return ocelot_trap_add(ocelot, port, l2_cookie, true,
+ ocelot_populate_l2_ptp_trap_key);
+}
+
+static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, l2_cookie);
+}
+
+static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
+ ocelot_populate_ipv4_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
+ ocelot_populate_ipv4_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
+ ocelot_populate_ipv6_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
+ ocelot_populate_ipv6_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
+ return err;
+}
+
+static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ bool l2, bool l4)
+{
+ int err;
+
+ if (l2)
+ err = ocelot_l2_ptp_trap_add(ocelot, port);
+ else
+ err = ocelot_l2_ptp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ if (l4) {
+ err = ocelot_ipv4_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv4;
+
+ err = ocelot_ipv6_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv6;
+ } else {
+ err = ocelot_ipv4_ptp_trap_del(ocelot, port);
+
+ err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
+ }
+ if (err)
+ return err;
+
+ return 0;
+
+err_ipv6:
+ ocelot_ipv4_ptp_trap_del(ocelot, port);
+err_ipv4:
+ if (l2)
+ ocelot_l2_ptp_trap_del(ocelot, port);
+ return err;
+}
+
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_get);
+
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool l2 = false, l4 = false;
+ struct hwtstamp_config cfg;
+ int err;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ ocelot_port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ l2 = true;
+ l4 = true;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+ if (err) {
+ mutex_unlock(&ocelot->ptp_lock);
+ return err;
+ }
+
+ if (l2 && l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ else if (l2)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ else
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_set);
+
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+
+ if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+ ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
+
+ ocelot_port->ts_id++;
+ if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+ ocelot_port->ts_id = 0;
+
+ ocelot_port->ptp_skbs_in_flight++;
+ ocelot->ptp_skbs_in_flight++;
+
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+
+ return 0;
+}
+
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+ unsigned int ptp_class)
+{
+ struct ptp_header *hdr;
+ u8 msgtype, twostep;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return false;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ twostep = hdr->flag_field[0] & 0x2;
+
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
+ return true;
+
+ return false;
+}
+
+int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ struct sk_buff *skb,
+ struct sk_buff **clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u8 ptp_cmd = ocelot_port->ptp_cmd;
+ unsigned int ptp_class;
+ int err;
+
+ /* Don't do anything if PTP timestamping not enabled */
+ if (!ptp_cmd)
+ return 0;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
+ if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ return 0;
+ }
+
+ /* Fall back to two-step timestamping */
+ ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ }
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *clone = skb_clone_sk(skb);
+ if (!(*clone))
+ return -ENOMEM;
+
+ err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ if (err)
+ return err;
+
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_txtstamp_request);
+
+static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+ if (WARN_ON(!hdr))
+ return false;
+
+ return seqid == ntohs(hdr->sequence_id);
+}
+
+void ocelot_get_txtstamp(struct ocelot *ocelot)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u32 val, id, seqid, txport;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
+
+ port = ocelot->ports[txport];
+
+ spin_lock(&ocelot->ts_id_lock);
+ port->ptp_skbs_in_flight--;
+ ocelot->ptp_skbs_in_flight--;
+ spin_unlock(&ocelot->ts_id_lock);
+
+ /* Retrieve its associated skb */
+try_again:
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (OCELOT_SKB_CB(skb)->ts_id != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+ dev_err_ratelimited(ocelot->dev,
+ "port %d received stale TX timestamp for seqid %d, discarding\n",
+ txport, seqid);
+ dev_kfree_skb_any(skb);
+ goto try_again;
+ }
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_txtstamp);
+
int ocelot_init_timestamp(struct ocelot *ocelot,
const struct ptp_clock_info *info)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
new file mode 100644
index 000000000000..dbd20b125cea
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Statistics for Ocelot switch family
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2022 NXP
+ */
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "ocelot.h"
+
+/* Read the counters from hardware and keep them in region->buf.
+ * Caller must hold &ocelot->stat_view_lock.
+ */
+static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
+{
+ struct ocelot_stats_region *region;
+ int err;
+
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Transfer the counters from region->buf to ocelot->stats.
+ * Caller must hold &ocelot->stat_view_lock and &ocelot->stats_lock.
+ */
+static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
+{
+ unsigned int idx = port * OCELOT_NUM_STATS;
+ struct ocelot_stats_region *region;
+ int j;
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ for (j = 0; j < region->count; j++) {
+ u64 *stat = &ocelot->stats[idx + j];
+ u64 val = region->buf[j];
+
+ if (val < (*stat & U32_MAX))
+ *stat += (u64)1 << 32;
+
+ *stat = (*stat & ~(u64)U32_MAX) + val;
+ }
+
+ idx += region->count;
+ }
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+ int port, err;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ err = ocelot_port_update_stats(ocelot, port);
+ if (err)
+ break;
+
+ spin_lock(&ocelot->stats_lock);
+ ocelot_port_transfer_stats(ocelot, port);
+ spin_unlock(&ocelot->stats_lock);
+ }
+
+ if (!err && ocelot->ops->update_stats)
+ ocelot->ops->update_stats(ocelot);
+
+ mutex_unlock(&ocelot->stat_view_lock);
+
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
+
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+}
+
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
+{
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+/* Update ocelot->stats for the given port and run the given callback */
+static void ocelot_port_stats_run(struct ocelot *ocelot, int port, void *priv,
+ void (*cb)(struct ocelot *ocelot, int port,
+ void *priv))
+{
+ int err;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ err = ocelot_port_update_stats(ocelot, port);
+ if (err) {
+ dev_err(ocelot->dev, "Failed to update port %d stats: %pe\n",
+ port, ERR_PTR(err));
+ goto out_unlock;
+ }
+
+ spin_lock(&ocelot->stats_lock);
+
+ ocelot_port_transfer_stats(ocelot, port);
+ cb(ocelot, port, priv);
+
+ spin_unlock(&ocelot->stats_lock);
+
+out_unlock:
+ mutex_unlock(&ocelot->stat_view_lock);
+}
+
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
+ int i, num_stats = 0;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
+}
+EXPORT_SYMBOL(ocelot_get_sset_count);
+
+static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *data = priv;
+ int i;
+
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
+}
+
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
+{
+ ocelot_port_stats_run(ocelot, port, data, ocelot_port_ethtool_stats_cb);
+}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
+
+static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_pause_stats *pause_stats = priv;
+
+ pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PAUSE];
+ pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE];
+}
+
+void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pause_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats);
+
+static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1526 },
+ { 1527, 65535 },
+ {},
+};
+
+static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_rmon_stats *rmon_stats = priv;
+
+ rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_SHORTS];
+ rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_LONGS];
+ rmon_stats->fragments = s[OCELOT_STAT_RX_FRAGMENTS];
+ rmon_stats->jabbers = s[OCELOT_STAT_RX_JABBERS];
+
+ rmon_stats->hist[0] = s[OCELOT_STAT_RX_64];
+ rmon_stats->hist[1] = s[OCELOT_STAT_RX_65_127];
+ rmon_stats->hist[2] = s[OCELOT_STAT_RX_128_255];
+ rmon_stats->hist[3] = s[OCELOT_STAT_RX_256_511];
+ rmon_stats->hist[4] = s[OCELOT_STAT_RX_512_1023];
+ rmon_stats->hist[5] = s[OCELOT_STAT_RX_1024_1526];
+ rmon_stats->hist[6] = s[OCELOT_STAT_RX_1527_MAX];
+
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
+}
+
+void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ *ranges = ocelot_rmon_ranges;
+
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_rmon_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats);
+
+static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
+
+ ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL];
+}
+
+void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_ctrl_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats);
+
+static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_mac_stats *mac_stats = priv;
+
+ mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_OCTETS];
+ mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_OCTETS];
+ mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_GREEN_PRIO_0] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_1] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_2] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_3] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_4] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_5] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_6] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_7] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_0] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_1] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_2] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_3] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_4] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_5] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_6] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_7];
+ mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_MULTICAST];
+ mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_BROADCAST];
+ mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_MULTICAST];
+ mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_BROADCAST];
+ mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_LONGS];
+ /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
+ * counted individually.
+ */
+ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
+ mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
+}
+
+void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_mac_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats);
+
+static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_phy_stats *phy_stats = priv;
+
+ phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS];
+}
+
+void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_phy_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+
+void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+
+ spin_lock(&ocelot->stats_lock);
+
+ /* Get Rx stats */
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
+ stats->rx_missed_errors = s[OCELOT_STAT_DROP_TAIL];
+ stats->rx_dropped = s[OCELOT_STAT_RX_RED_PRIO_0] +
+ s[OCELOT_STAT_RX_RED_PRIO_1] +
+ s[OCELOT_STAT_RX_RED_PRIO_2] +
+ s[OCELOT_STAT_RX_RED_PRIO_3] +
+ s[OCELOT_STAT_RX_RED_PRIO_4] +
+ s[OCELOT_STAT_RX_RED_PRIO_5] +
+ s[OCELOT_STAT_RX_RED_PRIO_6] +
+ s[OCELOT_STAT_RX_RED_PRIO_7] +
+ s[OCELOT_STAT_DROP_LOCAL] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_0] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_1] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_2] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_3] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_4] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_5] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_6] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_7] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_0] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_1] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_2] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_3] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_4] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_5] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_6] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_7];
+
+ /* Get Tx stats */
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_port_get_stats64);
+
+static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+{
+ struct ocelot_stats_region *region = NULL;
+ unsigned int last;
+ int i;
+
+ INIT_LIST_HEAD(&ocelot->stats_regions);
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (!ocelot->stats_layout[i].reg)
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
+ region->count++;
+ } else {
+ region = devm_kzalloc(ocelot->dev, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->base = ocelot->stats_layout[i].reg;
+ region->count = 1;
+ list_add_tail(&region->node, &ocelot->stats_regions);
+ }
+
+ last = ocelot->stats_layout[i].reg;
+ }
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ region->buf = devm_kcalloc(ocelot->dev, region->count,
+ sizeof(*region->buf), GFP_KERNEL);
+ if (!region->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int ocelot_stats_init(struct ocelot *ocelot)
+{
+ char queue_name[32];
+ int ret;
+
+ ocelot->stats = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
+ sizeof(u64), GFP_KERNEL);
+ if (!ocelot->stats)
+ return -ENOMEM;
+
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(ocelot->dev));
+ ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!ocelot->stats_queue)
+ return -ENOMEM;
+
+ spin_lock_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->stat_view_lock);
+
+ ret = ocelot_prepare_stats_regions(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->stats_queue);
+ return ret;
+ }
+
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+
+ return 0;
+}
+
+void ocelot_stats_deinit(struct ocelot *ocelot)
+{
+ cancel_delayed_work(&ocelot->stats_work);
+ destroy_workqueue(ocelot->stats_queue);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 961f803aca19..6f22aea08a64 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -6,6 +6,7 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
@@ -25,6 +26,9 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
+#define MEM_INIT_SLEEP_US 1000
+#define MEM_INIT_TIMEOUT_US 100000
+
static const u32 *ocelot_regmap[TARGET_MAX] = {
[ANA] = vsc7514_ana_regmap,
[QS] = vsc7514_qs_regmap,
@@ -96,101 +100,8 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
};
static void ocelot_pll5_init(struct ocelot *ocelot)
@@ -284,27 +195,43 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+static int ocelot_mem_init_status(struct ocelot *ocelot)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+
+ return err ?: val;
+}
+
static int ocelot_reset(struct ocelot *ocelot)
{
- int retries = 100;
+ int err;
u32 val;
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ if (err)
+ return err;
- do {
- msleep(1);
- regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
- } while (val && --retries);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
- if (!retries)
- return -ETIMEDOUT;
+ /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
+ * 100us) before enabling the switch core.
+ */
+ err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
+ MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
+ if (err)
+ return err;
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
- return 0;
+ return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
}
/* Watermark encode
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9d2d3e13cacf 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
@@ -223,7 +283,6 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_MMGT_FAST, 0x0006a0),
REG(SYS_EVENTS_DIF, 0x0006a4),
REG(SYS_EVENTS_CORE, 0x0006b4),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x0006b8),
REG(SYS_PTP_TXSTAMP, 0x0006bc),
REG(SYS_PTP_NXT, 0x0006c0),
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 971dde8c3286..9063e2e22cd5 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1647,10 +1647,10 @@ myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- strlcpy(info->driver, "myri10ge", sizeof(info->driver));
- strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
- strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "myri10ge", sizeof(info->driver));
+ strscpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+ strscpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
}
static int myri10ge_get_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 9aae7f1eb5d2..650a5a166070 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -869,7 +869,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
np->ioaddr = ioaddr;
- netif_napi_add(dev, &np->napi, natsemi_poll, 64);
+ netif_napi_add(dev, &np->napi, natsemi_poll);
np->dev = dev;
np->pci_dev = pdev;
@@ -2564,9 +2564,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 49ea130c9067..998586872599 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1351,9 +1351,9 @@ static int ns83820_set_link_ksettings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strlcpy(info->driver, "ns83820", sizeof(info->driver));
- strlcpy(info->version, VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, "ns83820", sizeof(info->driver));
+ strscpy(info->version, VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 30f955efa830..dcf8212119f9 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5348,9 +5348,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strlcpy(info->version, s2io_driver_version, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strscpy(info->version, s2io_driver_version, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
}
/**
@@ -7359,10 +7359,9 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
int get_off = ring_data->rx_curr_get_info.offset;
int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
- unsigned char *buff = skb_push(skb, buf0_len);
struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- memcpy(buff, ba->ba_0, buf0_len);
+ skb_put_data(skb, ba->ba_0, buf0_len);
skb_put(skb, buf2_len);
}
@@ -7905,10 +7904,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
for (i = 0; i < config->rx_ring_num ; i++) {
struct ring_info *ring = &mac_control->rings[i];
- netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
+ netif_napi_add(dev, &ring->napi, s2io_poll_msix);
}
} else {
- netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
+ netif_napi_add(dev, &sp->napi, s2io_poll_inta);
}
/* Not needed for Herc */
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 78368e71ce83..f80f1a6953fa 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -474,6 +474,7 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
+ struct net *net = dev_net(netdev);
struct ipv6hdr *ipv6h;
struct tcphdr *th;
struct iphdr *iph;
@@ -494,13 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index b3b2a23b8d89..f693119541d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2021 Corigine, Inc. */
+#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_ct.h>
+
#include "conntrack.h"
#include "../nfp_port.h"
@@ -56,9 +59,17 @@ bool is_pre_ct_flow(struct flow_cls_offload *flow)
int i;
flow_action_for_each(i, act, &flow->rule->action) {
- if (act->id == FLOW_ACTION_CT && !act->ct.action)
- return true;
+ if (act->id == FLOW_ACTION_CT) {
+ /* The pre_ct rule only have the ct or ct nat action, cannot
+ * contains other ct action e.g ct commit and so on.
+ */
+ if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
+ return true;
+ else
+ return false;
+ }
}
+
return false;
}
@@ -66,13 +77,37 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action_entry *act;
+ bool exist_ct_clear = false;
struct flow_match_ct ct;
+ int i;
+
+ /* post ct entry cannot contains any ct action except ct_clear. */
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT) {
+ /* ignore ct clear action. */
+ if (act->ct.action == TCA_CT_ACT_CLEAR) {
+ exist_ct_clear = true;
+ continue;
+ }
+
+ return false;
+ }
+ }
if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
return true;
+ } else {
+ /* when do nat with ct, the post ct entry ignore the ct status,
+ * will match the nat field(sip/dip) instead. In this situation,
+ * the flow chain index is not zero and contains ct clear action.
+ */
+ if (flow->common.chain_index && exist_ct_clear)
+ return true;
}
+
return false;
}
@@ -168,6 +203,20 @@ static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
return buf;
}
+/* Note entry1 and entry2 are not swappable. only skip ip and
+ * tport merge check for pre_ct and post_ct when pre_ct do nat.
+ */
+static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
+ struct nfp_fl_ct_flow_entry *entry2)
+{
+ /* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
+ if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
+ entry2->type == CT_TYPE_POST_CT)
+ return false;
+
+ return true;
+}
+
/* Note entry1 and entry2 are not swappable, entry1 should be
* the former flow whose mangle action need be taken into account
* if existed, and entry2 should be the latter flow whose action
@@ -225,7 +274,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ /* if pre ct entry do nat, the nat ip exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this ip merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv4_addrs match1, match2;
flow_rule_match_ipv4_addrs(entry1->rule, &match1);
@@ -242,7 +296,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ /* if pre ct entry do nat, the nat ip exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this ip merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv6_addrs match1, match2;
flow_rule_match_ipv6_addrs(entry1->rule, &match1);
@@ -259,7 +318,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ /* if pre ct entry do nat, the nat tport exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this tport merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
struct flow_match_ports match1, match2;
@@ -404,12 +468,55 @@ check_failed:
return -EINVAL;
}
+static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
+{
+ struct flow_match_vlan match;
+
+ if (unlikely(flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)))
+ return -EOPNOTSUPP;
+
+ /* post_ct does not match VLAN KEY, can be merged. */
+ if (likely(!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)))
+ return 0;
+
+ switch (a_in->id) {
+ /* pre_ct has pop vlan, post_ct cannot match VLAN KEY, cannot be merged. */
+ case FLOW_ACTION_VLAN_POP:
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
+ flow_rule_match_vlan(rule, &match);
+ /* different vlan id, cannot be merged. */
+ if ((match.key->vlan_id & match.mask->vlan_id) ^
+ (a_in->vlan.vid & match.mask->vlan_id))
+ return -EOPNOTSUPP;
+
+ /* different tpid, cannot be merged. */
+ if ((match.key->vlan_tpid & match.mask->vlan_tpid) ^
+ (a_in->vlan.proto & match.mask->vlan_tpid))
+ return -EOPNOTSUPP;
+
+ /* different priority, cannot be merged. */
+ if ((match.key->vlan_priority & match.mask->vlan_priority) ^
+ (a_in->vlan.prio & match.mask->vlan_priority))
+ return -EOPNOTSUPP;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
struct nfp_fl_ct_flow_entry *post_ct_entry,
struct nfp_fl_ct_flow_entry *nft_entry)
{
struct flow_action_entry *act;
- int i;
+ int i, err;
/* Check for pre_ct->action conflicts */
flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
@@ -417,6 +524,10 @@ static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
case FLOW_ACTION_VLAN_MANGLE:
+ err = nfp_ct_check_vlan_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
case FLOW_ACTION_MPLS_PUSH:
case FLOW_ACTION_MPLS_POP:
case FLOW_ACTION_MPLS_MANGLE:
@@ -468,6 +579,12 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
return -EINVAL;
return 0;
+ } else {
+ /* post_ct with ct clear action will not match the
+ * ct status when nft is nat entry.
+ */
+ if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
+ return 0;
}
return -EINVAL;
@@ -537,11 +654,37 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
return key_size;
}
+/* get the csum flag according the ip proto and mangle action. */
+static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
+{
+ if (a_in->id != FLOW_ACTION_MANGLE)
+ return;
+
+ switch (a_in->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ *csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
+ if (ip_proto == IPPROTO_TCP)
+ *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+ else if (ip_proto == IPPROTO_UDP)
+ *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ default:
+ break;
+ }
+}
+
static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
struct nfp_flower_priv *priv,
struct net_device *netdev,
struct nfp_fl_payload *flow_pay)
{
+ enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
struct flow_action_entry *a_in;
int i, j, num_actions, id;
struct flow_rule *a_rule;
@@ -551,15 +694,25 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
rules[CT_TYPE_NFT]->action.num_entries +
rules[CT_TYPE_POST_CT]->action.num_entries;
- a_rule = flow_rule_alloc(num_actions);
+ /* Add one action to make sure there is enough room to add an checksum action
+ * when do nat.
+ */
+ a_rule = flow_rule_alloc(num_actions + 1);
if (!a_rule)
return -ENOMEM;
/* Actions need a BASIC dissector. */
a_rule->match = rules[CT_TYPE_PRE_CT]->match;
+ /* post_ct entry have one action at least. */
+ if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
+ tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
+ }
/* Copy actions */
for (j = 0; j < _CT_TYPE_MAX; j++) {
+ u32 csum_updated = 0;
+ u8 ip_proto = 0;
+
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -571,8 +724,10 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
* through the subflows and assign the proper subflow to a_rule
*/
flow_rule_match_basic(rules[j], &match);
- if (match.mask->ip_proto)
+ if (match.mask->ip_proto) {
a_rule->match = rules[j]->match;
+ ip_proto = match.key->ip_proto;
+ }
}
for (i = 0; i < rules[j]->action.num_entries; i++) {
@@ -589,11 +744,32 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
case FLOW_ACTION_CT_METADATA:
continue;
default:
+ /* nft entry is generated by tc ct, which mangle action do not care
+ * the stats, inherit the post entry stats to meet the
+ * flow_action_hw_stats_check.
+ */
+ if (j == CT_TYPE_NFT) {
+ if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
+ a_in->hw_stats = tmp_stats;
+ nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
+ }
memcpy(&a_rule->action.entries[offset++],
a_in, sizeof(struct flow_action_entry));
break;
}
}
+ /* nft entry have mangle action, but do not have checksum action when do NAT,
+ * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
+ * to meet csum action check.
+ */
+ if (csum_updated) {
+ struct flow_action_entry *csum_action;
+
+ csum_action = &a_rule->action.entries[offset++];
+ csum_action->id = FLOW_ACTION_CSUM;
+ csum_action->csum_flags = csum_updated;
+ csum_action->hw_stats = tmp_stats;
+ }
}
/* Some actions would have been ignored, so update the num_entries field */
@@ -1191,6 +1367,49 @@ static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
return NULL;
}
+static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
+{
+ if (mangle_action->id != FLOW_ACTION_MANGLE)
+ return;
+
+ switch (mangle_action->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
+ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ return;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
+ mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+ return;
+
+ default:
+ return;
+ }
+}
+
+static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
+ struct nfp_fl_ct_flow_entry *entry)
+{
+ switch (act->id) {
+ case FLOW_ACTION_CT:
+ if (act->ct.action == TCA_CT_ACT_NAT)
+ entry->flags |= NFP_FL_ACTION_DO_NAT;
+ break;
+
+ case FLOW_ACTION_MANGLE:
+ entry->flags |= NFP_FL_ACTION_DO_MANGLE;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct
nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
struct net_device *netdev,
@@ -1257,6 +1476,13 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
new_act = &entry->rule->action.entries[i];
memcpy(new_act, act, sizeof(struct flow_action_entry));
+ /* nft entry mangle field is host byte order, need translate to
+ * network byte order.
+ */
+ if (is_nft)
+ nfp_nft_ct_translate_mangle_action(new_act);
+
+ nfp_nft_ct_set_flow_flag(new_act, entry);
/* Entunnel is a special case, need to allocate and copy
* tunnel info.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index beb6cceff9d8..762c0b36e269 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -103,6 +103,10 @@ enum nfp_nfp_layer_name {
_FLOW_PAY_LAYERS_MAX
};
+/* NFP flow entry flags. */
+#define NFP_FL_ACTION_DO_NAT BIT(0)
+#define NFP_FL_ACTION_DO_MANGLE BIT(1)
+
/**
* struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
* @cookie: Flow cookie, same as original TC flow, used as key
@@ -115,6 +119,7 @@ enum nfp_nfp_layer_name {
* @rule: Reference to the original TC flow rule
* @stats: Used to cache stats for updating
* @tun_offset: Used to indicate tunnel action offset in action list
+ * @flags: Used to indicate flow flag like NAT which used by merge.
*/
struct nfp_fl_ct_flow_entry {
unsigned long cookie;
@@ -127,6 +132,7 @@ struct nfp_fl_ct_flow_entry {
struct flow_rule *rule;
struct flow_stats stats;
u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
+ u8 flags;
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 83c97154c0c7..3ab3e4536b99 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1301,9 +1301,14 @@ static bool offload_pre_check(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_ct ct;
- if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
- return false;
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ /* Allow special case where CT match is all 0 */
+ if (memchr_inv(ct.key, 0, sizeof(*ct.key)))
+ return false;
+ }
if (flow->common.chain_index)
return false;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 4e5df9f2c372..99052a925d9e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -119,7 +119,8 @@ int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
static int nfp_policer_validate(const struct flow_action *action,
const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ bool ingress)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
@@ -127,11 +128,20 @@ static int nfp_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
- act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not pipe or ok");
- return -EOPNOTSUPP;
+ if (ingress) {
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue or ok");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
}
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
@@ -217,7 +227,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
- err = nfp_policer_validate(&flow->rule->action, action, extack);
+ err = nfp_policer_validate(&flow->rule->action, action, extack, true);
if (err)
return err;
@@ -686,6 +696,7 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
bool pps_support, pps;
bool add = false;
u64 rate;
+ int err;
pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
@@ -697,6 +708,11 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
"unsupported offload: qos rate limit offload requires police action");
continue;
}
+
+ err = nfp_policer_validate(&fl_act->action, action, extack, false);
+ if (err)
+ return err;
+
if (action->police.rate_bytes_ps > 0) {
rate = action->police.rate_bytes_ps;
burst = action->police.burst;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 65e243168765..5d9db8c2a5b4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -84,7 +84,7 @@ static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
nfp_net_xsk_rx_drop(r_vec, xrxbuf);
return;
}
- memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
+ skb_put_data(skb, xrxbuf->xdp->data, pkt_len);
skb->mark = meta->mark;
skb_set_hash(skb, meta->hash, meta->hash_type);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 873429f7a6da..e66e548919d4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -691,6 +691,71 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
return 0;
}
+int nfp_net_pf_get_app_id(struct nfp_pf *pf)
+{
+ return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
+ NFP_APP_CORE_NIC);
+}
+
+static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf)
+{
+ char name[32];
+ int err = 0;
+ u64 val;
+
+ snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp));
+
+ val = nfp_rtsym_read_le(pf->rtbl, name, &err);
+ if (err) {
+ if (err != -ENOENT)
+ nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
+
+ return 0;
+ }
+
+ return val;
+}
+
+static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff)
+{
+ struct nfp_nsp *nsp;
+ char hwinfo[32];
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff);
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ /* Not a fatal error, no need to return error to stop driver from loading */
+ if (err) {
+ nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err);
+ } else {
+ /* Need reinit eth_tbl since the eth table state may change
+ * after sp_indiff is configured.
+ */
+ kfree(pf->eth_tbl);
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ }
+
+ nfp_nsp_close(nsp);
+ return 0;
+}
+
+static int nfp_pf_nsp_cfg(struct nfp_pf *pf)
+{
+ bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) ||
+ (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF);
+
+ return nfp_pf_cfg_hwinfo(pf, sp_indiff);
+}
+
+static void nfp_pf_nsp_clean(struct nfp_pf *pf)
+{
+ nfp_pf_cfg_hwinfo(pf, false);
+}
+
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
@@ -791,10 +856,14 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_fw_unload;
}
- err = nfp_net_pci_probe(pf);
+ err = nfp_pf_nsp_cfg(pf);
if (err)
goto err_fw_unload;
+ err = nfp_net_pci_probe(pf);
+ if (err)
+ goto err_nsp_clean;
+
err = nfp_hwmon_register(pf);
if (err) {
dev_err(&pdev->dev, "Failed to register hwmon info\n");
@@ -805,6 +874,8 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err_net_remove:
nfp_net_pci_remove(pf);
+err_nsp_clean:
+ nfp_pf_nsp_clean(pf);
err_fw_unload:
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
@@ -844,6 +915,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
nfp_net_pci_remove(pf);
+ nfp_pf_nsp_clean(pf);
vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index f56ca11de134..afd3edfa2428 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -161,6 +161,7 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
unsigned int default_val);
+int nfp_net_pf_get_app_id(struct nfp_pf *pf);
u8 __iomem *
nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area);
@@ -190,4 +191,7 @@ int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
int nfp_devlink_params_register(struct nfp_pf *pf);
void nfp_devlink_params_unregister(struct nfp_pf *pf);
+
+unsigned int nfp_net_lr2speed(unsigned int linkrate);
+unsigned int nfp_net_speed2lr(unsigned int speed);
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index cf4d6f1129fa..27f4786ace4f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -474,19 +474,22 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
{
unsigned long flags;
bool link_up;
- u32 sts;
+ u16 sts;
spin_lock_irqsave(&nn->link_status_lock, flags);
- sts = nn_readl(nn, NFP_NET_CFG_STS);
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
link_up = !!(sts & NFP_NET_CFG_STS_LINK);
if (nn->link_up == link_up)
goto out;
nn->link_up = link_up;
- if (nn->port)
+ if (nn->port) {
set_bit(NFP_PORT_CHANGED, &nn->port->flags);
+ if (nn->port->link_cb)
+ nn->port->link_cb(nn->port);
+ }
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
@@ -768,9 +771,7 @@ nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
{
if (dp->netdev)
netif_napi_add(dp->netdev, &r_vec->napi,
- nfp_net_has_xsk_pool_slow(dp, idx) ?
- dp->ops->xsk_poll : dp->ops->poll,
- NAPI_POLL_WEIGHT);
+ nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
else
tasklet_enable(&r_vec->tasklet);
}
@@ -1630,21 +1631,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ac05ec34d69e..6714d5e8fdab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -14,6 +14,9 @@
#include <linux/types.h>
+/* 64-bit per app capabilities */
+#define NFP_NET_APP_CAP_SP_INDIFF BIT_ULL(0) /* indifferent to port speed */
+
/* Configuration BAR size.
*
* The configuration BAR is 8K in size, but due to
@@ -193,6 +196,10 @@
#define NFP_NET_CFG_STS_LINK_RATE_40G 5
#define NFP_NET_CFG_STS_LINK_RATE_50G 6
#define NFP_NET_CFG_STS_LINK_RATE_100G 7
+/* NSP Link rate is a 16-bit word. It's determined by NSP and
+ * written to CFG BAR by NFP driver.
+ */
+#define NFP_NET_CFG_STS_NSP_LINK_RATE 0x0036
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index eeb1455a4e5d..22a5d2419084 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -205,7 +205,7 @@ nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
{
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
- strlcpy(drvinfo->driver, dev_driver_string(&pdev->dev),
+ strscpy(drvinfo->driver, dev_driver_string(&pdev->dev),
sizeof(drvinfo->driver));
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -222,18 +222,49 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor);
- strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
+ strscpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
}
+static int
+nfp_net_nway_reset(struct net_device *netdev)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ err = nfp_eth_set_configured(port->app->cpp, eth_port->index, false);
+ if (err) {
+ netdev_info(netdev, "Link down failed: %d\n", err);
+ return err;
+ }
+
+ err = nfp_eth_set_configured(port->app->cpp, eth_port->index, true);
+ if (err) {
+ netdev_info(netdev, "Link up failed: %d\n", err);
+ return err;
+ }
+
+ netdev_info(netdev, "Link reset succeeded\n");
+ return 0;
+}
+
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
- strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ strscpy(drvinfo->bus_info, pci_name(app->pdev),
sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -273,20 +304,11 @@ static int
nfp_net_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- static const u32 ls_to_ethtool[] = {
- [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
- [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
- [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
- [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
- [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
- [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
- [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
- [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
- };
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
struct nfp_net *nn;
- u32 sts, ls;
+ unsigned int speed;
+ u16 sts;
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
@@ -299,8 +321,13 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
if (eth_port) {
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
- cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
- AUTONEG_ENABLE : AUTONEG_DISABLE;
+ if (eth_port->supp_aneg) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (eth_port->aneg == NFP_ANEG_AUTO) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ }
+ }
nfp_net_set_fec_link_mode(eth_port, cmd);
}
@@ -319,18 +346,15 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
nn = netdev_priv(netdev);
- sts = nn_readl(nn, NFP_NET_CFG_STS);
-
- ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
- if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
+ speed = nfp_net_lr2speed(FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts));
+ if (!speed)
return -EOPNOTSUPP;
- if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
- ls >= ARRAY_SIZE(ls_to_ethtool))
- return 0;
-
- cmd->base.speed = ls_to_ethtool[ls];
- cmd->base.duplex = DUPLEX_FULL;
+ if (speed != SPEED_UNKNOWN) {
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ }
return 0;
}
@@ -339,6 +363,7 @@ static int
nfp_net_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ bool req_aneg = (cmd->base.autoneg == AUTONEG_ENABLE);
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
struct nfp_nsp *nsp;
@@ -358,13 +383,25 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (IS_ERR(nsp))
return PTR_ERR(nsp);
- err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
- NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
+ if (req_aneg && !eth_port->supp_aneg) {
+ netdev_warn(netdev, "Autoneg is not supported.\n");
+ err = -EOPNOTSUPP;
+ goto err_bad_set;
+ }
+
+ err = __nfp_eth_set_aneg(nsp, req_aneg ? NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
if (err)
goto err_bad_set;
+
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ if (req_aneg) {
+ netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
+ err = -EINVAL;
+ goto err_bad_set;
+ }
+
err = __nfp_eth_set_speed(nsp, speed);
if (err)
goto err_bad_set;
@@ -649,7 +686,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -657,10 +694,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -670,7 +707,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -1008,7 +1045,7 @@ nfp_port_get_fecparam(struct net_device *netdev,
return 0;
param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
- param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
+ param->active_fec = nfp_port_fec_nsp_to_ethtool(BIT(eth_port->act_fec));
return 0;
}
@@ -1676,11 +1713,166 @@ static int nfp_net_set_phys_id(struct net_device *netdev,
return err;
}
+#define NFP_EEPROM_LEN ETH_ALEN
+
+static int
+nfp_net_get_eeprom_len(struct net_device *netdev)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return 0;
+
+ return NFP_EEPROM_LEN;
+}
+
+static int
+nfp_net_get_nsp_hwindex(struct net_device *netdev,
+ struct nfp_nsp **nspptr,
+ u32 *index)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
+ netdev_err(netdev, "NSP doesn't support PF MAC generation\n");
+ nfp_nsp_close(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ *nspptr = nsp;
+ *index = eth_port->eth_index;
+
+ return 0;
+}
+
+static int
+nfp_net_get_port_mac_by_hwinfo(struct net_device *netdev,
+ u8 *mac_addr)
+{
+ char hwinfo[32] = {};
+ struct nfp_nsp *nsp;
+ u32 index;
+ int err;
+
+ err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
+ if (err)
+ return err;
+
+ snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac", index);
+ err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ netdev_err(netdev, "Reading persistent MAC address failed: %d\n",
+ err);
+ return -EOPNOTSUPP;
+ }
+
+ if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &mac_addr[0], &mac_addr[1], &mac_addr[2],
+ &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
+ netdev_err(netdev, "Can't parse persistent MAC address (%s)\n",
+ hwinfo);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_set_port_mac_by_hwinfo(struct net_device *netdev,
+ u8 *mac_addr)
+{
+ char hwinfo[32] = {};
+ struct nfp_nsp *nsp;
+ u32 index;
+ int err;
+
+ err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
+ if (err)
+ return err;
+
+ snprintf(hwinfo, sizeof(hwinfo),
+ "eth%u.mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ index, mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5]);
+
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ netdev_err(netdev, "HWinfo set failed: %d, hwinfo: %s\n",
+ err, hwinfo);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ u8 buf[NFP_EEPROM_LEN] = {};
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ eeprom->magic = nn->pdev->vendor | (nn->pdev->device << 16);
+ memcpy(bytes, buf + eeprom->offset, eeprom->len);
+
+ return 0;
+}
+
+static int
+nfp_net_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ u8 buf[NFP_EEPROM_LEN] = {};
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (nn->pdev->vendor | nn->pdev->device << 16))
+ return -EINVAL;
+
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ memcpy(buf + eeprom->offset, bytes, eeprom->len);
+ if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = nfp_net_get_drvinfo,
+ .nway_reset = nfp_net_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
@@ -1699,6 +1891,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_eeprom_len = nfp_net_get_eeprom_len,
+ .get_eeprom = nfp_net_get_eeprom,
+ .set_eeprom = nfp_net_set_eeprom,
.get_module_info = nfp_port_get_module_info,
.get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
@@ -1715,6 +1910,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
const struct ethtool_ops nfp_port_ethtool_ops = {
.get_drvinfo = nfp_app_get_drvinfo,
+ .nway_reset = nfp_net_nway_reset,
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index ca4e05650fe6..3bae92dc899e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -77,12 +77,6 @@ static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
}
-static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
-{
- return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
- NFP_APP_CORE_NIC);
-}
-
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
if (nfp_net_is_data_vnic(nn))
@@ -202,6 +196,9 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
goto err_free_prev;
}
+ if (nn->port)
+ nn->port->link_cb = nfp_net_refresh_port_table;
+
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
/* Kill the vNIC if app init marked it as invalid */
@@ -523,6 +520,57 @@ err_unmap_ctrl:
return err;
}
+static const unsigned int lr_to_speed[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
+};
+
+unsigned int nfp_net_lr2speed(unsigned int linkrate)
+{
+ if (linkrate < ARRAY_SIZE(lr_to_speed))
+ return lr_to_speed[linkrate];
+
+ return SPEED_UNKNOWN;
+}
+
+unsigned int nfp_net_speed2lr(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
+ if (speed == lr_to_speed[i])
+ return i;
+ }
+
+ return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
+}
+
+static void nfp_net_notify_port_speed(struct nfp_port *port)
+{
+ struct net_device *netdev = port->netdev;
+ struct nfp_net *nn;
+ u16 sts;
+
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return;
+
+ nn = netdev_priv(netdev);
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
+
+ if (!(sts & NFP_NET_CFG_STS_LINK)) {
+ nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
+ return;
+ }
+
+ nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
+}
+
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -544,6 +592,7 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
}
memcpy(port->eth_port, eth_port, sizeof(*eth_port));
+ nfp_net_notify_port_speed(port);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index d1ebe6c72f7f..6793cdf9ff11 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -46,6 +46,7 @@ enum nfp_port_flags {
* @tc_offload_cnt: number of active TC offloads, how offloads are counted
* is not defined, use as a boolean
* @app: backpointer to the app structure
+ * @link_cb: callback when link status changed
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
@@ -66,6 +67,7 @@ struct nfp_port {
unsigned long tc_offload_cnt;
struct nfp_app *app;
+ void (*link_cb)(struct nfp_port *port);
struct devlink_port dl_port;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 77d66855be42..992d72ac98d3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -132,6 +132,7 @@ enum nfp_eth_fec {
* @ports.interface: interface (module) plugged in
* @ports.media: media type of the @interface
* @ports.fec: forward error correction mode
+ * @ports.act_fec: active forward error correction mode
* @ports.aneg: auto negotiation mode
* @ports.mac_addr: interface MAC address
* @ports.label_port: port id
@@ -162,6 +163,7 @@ struct nfp_eth_table {
enum nfp_eth_media media;
enum nfp_eth_fec fec;
+ enum nfp_eth_fec act_fec;
enum nfp_eth_aneg aneg;
u8 mac_addr[ETH_ALEN];
@@ -172,6 +174,7 @@ struct nfp_eth_table {
bool enabled;
bool tx_enabled;
bool rx_enabled;
+ bool supp_aneg;
bool override_changed;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index edd300033735..bb64efec4c46 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -27,6 +27,7 @@
#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
+#define NSP_ETH_PORT_SUPP_ANEG BIT_ULL(63)
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
@@ -40,6 +41,7 @@
#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
+#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -170,7 +172,14 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
if (dst->fec_modes_supported)
dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
- dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
+ dst->fec = FIELD_GET(NSP_ETH_STATE_FEC, state);
+ dst->act_fec = dst->fec;
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 33)
+ return;
+
+ dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state);
+ dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port);
}
static void
@@ -507,6 +516,7 @@ int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
nfp_err(nfp_nsp_cpp(nsp),
"set id mode operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 4b3482ce90a1..3db4a2431741 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -990,8 +990,8 @@ static const struct net_device_ops nixge_netdev_ops = {
static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, "nixge", sizeof(ed->driver));
- strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
+ strscpy(ed->driver, "nixge", sizeof(ed->driver));
+ strscpy(ed->bus_info, "platform", sizeof(ed->bus_info));
}
static int
@@ -1294,7 +1294,7 @@ static int nixge_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->dev = &pdev->dev;
- netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, nixge_poll);
err = nixge_of_get_resources(pdev);
if (err)
goto free_netdev;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 5116badaf091..daa028729d44 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4291,9 +4291,9 @@ static void nv_do_stats_poll(struct timer_list *t)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -5876,7 +5876,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
else
dev->netdev_ops = &nv_netdev_ops_optimized;
- netif_napi_add(dev, &np->napi, nv_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &np->napi, nv_napi_poll);
dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index f606d75b33b4..1a4a272f4c5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1184,9 +1184,9 @@ static int lpc_eth_open(struct net_device *ndev)
static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 84cc79e928c8..541b8bcd3223 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -169,9 +169,9 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 46da937ad27f..3f2c30184752 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2516,8 +2516,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
- netif_napi_add(netdev, &adapter->napi,
- pch_gbe_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &adapter->napi, pch_gbe_napi_poll);
netdev->hw_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features = netdev->hw_features;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 9c408328be0d..1cc001087193 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1819,9 +1819,9 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct hamachi_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 03650022d444..640ac01689fb 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1340,9 +1340,9 @@ static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
struct yellowfin_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index f0ace3a0e85c..aaab590ef548 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1697,7 +1697,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mac->pdev = pdev;
mac->netdev = dev;
- netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
+ netif_napi_add(dev, &mac->napi, pasemi_mac_poll);
dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_GSO;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 0a7a757494bc..ce436e97324a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -320,16 +320,16 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
- err = ionic_lif_register(ionic->lif);
+ err = ionic_devlink_register(ionic);
if (err) {
- dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
+ dev_err(dev, "Cannot register devlink: %d\n", err);
goto err_out_deinit_lifs;
}
- err = ionic_devlink_register(ionic);
+ err = ionic_lif_register(ionic->lif);
if (err) {
- dev_err(dev, "Cannot register devlink: %d\n", err);
- goto err_out_deregister_lifs;
+ dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
+ goto err_out_deregister_devlink;
}
mod_timer(&ionic->watchdog_timer,
@@ -337,8 +337,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-err_out_deregister_lifs:
- ionic_lif_unregister(ionic->lif);
+err_out_deregister_devlink:
+ ionic_devlink_unregister(ionic);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
ionic_lif_deinit(ionic->lif);
@@ -380,8 +380,8 @@ static void ionic_remove(struct pci_dev *pdev)
del_timer_sync(&ionic->watchdog_timer);
if (ionic->lif) {
- ionic_devlink_unregister(ionic);
ionic_lif_unregister(ionic->lif);
+ ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 1443f788ee37..5d58fd99be3c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -774,8 +774,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -830,11 +829,9 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
- netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -1564,8 +1561,67 @@ static int ionic_set_features(struct net_device *netdev,
return err;
}
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+
+ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+ return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+ u8 get_mac[ETH_ALEN];
+ int err;
+
+ err = ionic_set_attr_mac(lif, mac);
+ if (err)
+ return err;
+
+ err = ionic_get_attr_mac(lif, get_mac);
+ if (err)
+ return err;
+
+ /* To deal with older firmware that silently ignores the set attr mac:
+ * doesn't actually change the mac and doesn't return an error, so we
+ * do the get attr to verify whether or not the set actually happened
+ */
+ if (!ether_addr_equal(get_mac, mac))
+ return 1;
+
+ return 0;
+}
+
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
{
+ struct ionic_lif *lif = netdev_priv(netdev);
struct sockaddr *addr = sa;
u8 *mac;
int err;
@@ -1574,6 +1630,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
if (ether_addr_equal(netdev->dev_addr, mac))
return 0;
+ err = ionic_program_mac(lif, mac);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+ __func__);
+
err = eth_prepare_mac_addr_change(netdev, addr);
if (err)
return err;
@@ -2963,6 +3027,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
err = ionic_qcqs_alloc(lif);
if (err)
goto err_unlock;
@@ -3095,8 +3162,7 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
- netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
napi_enable(&qcq->napi);
@@ -3169,6 +3235,7 @@ static int ionic_station_set(struct ionic_lif *lif)
.attr = IONIC_LIF_ATTR_MAC,
},
};
+ u8 mac_address[ETH_ALEN];
struct sockaddr addr;
int err;
@@ -3177,8 +3244,23 @@ static int ionic_station_set(struct ionic_lif *lif)
return err;
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
ctx.comp.lif_getattr.mac);
- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
- return 0;
+ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+ if (is_zero_ether_addr(mac_address)) {
+ eth_hw_addr_random(netdev);
+ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+ ether_addr_copy(mac_address, netdev->dev_addr);
+
+ err = ionic_program_mac(lif, mac_address);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+ __func__);
+ return 0;
+ }
+ }
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
@@ -3186,12 +3268,11 @@ static int ionic_station_set(struct ionic_lif *lif)
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
- netdev->dev_addr))
+ if (!ether_addr_equal(mac_address, netdev->dev_addr))
ionic_lif_addr_add(lif, netdev->dev_addr);
} else {
/* Update the netdev mac with the device's mac */
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ ether_addr_copy(addr.sa_data, mac_address);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 4029b4e021f8..56f93b030551 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -474,8 +474,8 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
@@ -488,6 +488,8 @@ try_again:
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3c4a84ea6321..8c4cb910e09b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -65,9 +65,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ strscpy(drvinfo->driver, netxen_nic_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ strscpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
@@ -75,7 +75,7 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 4e6f00af17d9..de8d54b23f73 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -173,8 +173,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_add(netdev, &sds_ring->napi,
- netxen_nic_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &sds_ring->napi, netxen_nic_poll);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index d701ecd3ba00..2661c483c67e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1119,7 +1119,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
snprintf(bit_name, 30,
p_aeu->bit_name, num);
else
- strlcpy(bit_name,
+ strscpy(bit_name,
p_aeu->bit_name, 30);
/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 97a7ab0826ed..8034d812d5a0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -624,7 +624,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
struct qede_dev *edev = netdev_priv(ndev);
char mbi[ETHTOOL_FWVERS_LEN];
- strlcpy(info->driver, "qede", sizeof(info->driver));
+ strscpy(info->driver, "qede", sizeof(info->driver));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -661,7 +661,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
"mfw %s", mfw);
}
- strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index f56b679adb4b..953f304b8588 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1214,7 +1214,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* Start the Slowpath-process */
memset(&sp_params, 0, sizeof(sp_params));
sp_params.int_mode = QED_INT_MODE_MSIX;
- strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
@@ -1904,8 +1904,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
/* Add NAPI objects */
for_each_queue(i) {
- netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
- qede_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
napi_enable(&edev->fp_array[i].napi);
}
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 06f4d9a9e938..76072f8c3d2f 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,10 +1736,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ql3xxx_driver_version,
+ strscpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ql3xxx_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
}
@@ -3813,7 +3813,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
- netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
+ netif_napi_add(ndev, &qdev->napi, ql_poll);
ndev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 54a2d653be63..1ee491f78c6b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -277,10 +277,10 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ strscpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9da5e97f8a0a..92930a055cbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1586,17 +1586,15 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll);
} else {
if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_poll);
else
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_rx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_rx_poll);
}
}
@@ -2115,17 +2113,14 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_rx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_rx_poll);
else
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_msix_sriov_vf_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_msix_sriov_vf_poll);
} else {
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_poll);
}
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index a55c52696d49..3115b2c12898 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -684,8 +684,7 @@ static int emac_probe(struct platform_device *pdev)
/* Initialize queues */
emac_mac_rx_tx_ring_init_all(pdev, adpt);
- netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx);
ret = register_netdev(netdev);
if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 792ce9a323cd..f62c39544e08 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -164,10 +164,10 @@ qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p)
{
struct qcaspi *qca = netdev_priv(dev);
- strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
- strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
- strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
- strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev),
+ strscpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
+ strscpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
+ strscpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
+ strscpy(p->bus_info, dev_name(&qca->spi_dev->dev),
sizeof(p->bus_info));
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index e5a0b38f7dbe..2b033060fc20 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -19,7 +19,7 @@ struct rmnet_map_control_command {
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
} __aligned(1);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index a6bf7d505178..eecd52ed1ed2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -939,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct r6040_private *rp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1127,7 +1127,7 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, r6040_poll, 64);
+ netif_napi_add(dev, &lp->napi, r6040_poll);
lp->mii_bus = mdiobus_alloc();
if (!lp->mii_bus) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e0feeec13da6..f5786d78ed23 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1382,9 +1382,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 15b40fd93cd2..469e2e229c6e 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1002,7 +1002,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
dev->netdev_ops = &rtl8139_netdev_ops;
dev->ethtool_ops = &rtl8139_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
+ netif_napi_add(dev, &tp->napi, rtl8139_poll);
/* note: the hardware is not capable of sg/csum/highdma, however
* through the use of skb_copy_and_csum_dev we enable these
@@ -2380,9 +2380,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
static int rtl8139_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 8da4b66b71b5..55ef8251feb5 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,10 +23,10 @@ enum mac_version {
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
RTL_GIGA_MAC_VER_11,
- RTL_GIGA_MAC_VER_12,
- RTL_GIGA_MAC_VER_13,
+ /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
+ /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
- RTL_GIGA_MAC_VER_16,
+ /* RTL_GIGA_MAC_VER_16 was merged with VER_10 */
RTL_GIGA_MAC_VER_17,
RTL_GIGA_MAC_VER_18,
RTL_GIGA_MAC_VER_19,
@@ -51,20 +51,20 @@ enum mac_version {
RTL_GIGA_MAC_VER_38,
RTL_GIGA_MAC_VER_39,
RTL_GIGA_MAC_VER_40,
- RTL_GIGA_MAC_VER_41,
+ /* support for RTL_GIGA_MAC_VER_41 has been removed */
RTL_GIGA_MAC_VER_42,
RTL_GIGA_MAC_VER_43,
RTL_GIGA_MAC_VER_44,
- RTL_GIGA_MAC_VER_45,
+ /* support for RTL_GIGA_MAC_VER_45 has been removed */
RTL_GIGA_MAC_VER_46,
- RTL_GIGA_MAC_VER_47,
+ /* support for RTL_GIGA_MAC_VER_47 has been removed */
RTL_GIGA_MAC_VER_48,
- RTL_GIGA_MAC_VER_49,
- RTL_GIGA_MAC_VER_50,
+ /* support for RTL_GIGA_MAC_VER_49 has been removed */
+ /* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
RTL_GIGA_MAC_VER_53,
- RTL_GIGA_MAC_VER_60,
+ /* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_NONE
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 1b7fdb4f056b..a73d061d9fcb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -49,10 +49,8 @@
#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
-#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
-#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
@@ -102,12 +100,9 @@ static const struct {
[RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" },
[RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
@@ -131,20 +126,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
[RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
[RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
[RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
[RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
[RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_60] = {"RTL8125A" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
@@ -658,10 +647,8 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1);
MODULE_FIRMWARE(FIRMWARE_8106E_2);
MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
-MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
MODULE_FIRMWARE(FIRMWARE_8168FP_3);
-MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
@@ -689,7 +676,7 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
static bool rtl_is_8125(struct rtl8169_private *tp)
{
- return tp->mac_version >= RTL_GIGA_MAC_VER_60;
+ return tp->mac_version >= RTL_GIGA_MAC_VER_61;
}
static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
@@ -892,8 +879,6 @@ static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
if (value & BMCR_RESET || !(value & BMCR_PDOWN))
rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
else
@@ -1207,7 +1192,7 @@ static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
return RTL_DASH_NONE;
@@ -1423,11 +1408,11 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (rtl_fw)
- strlcpy(info->fw_version, rtl_fw->version,
+ strscpy(info->fw_version, rtl_fw->version,
sizeof(info->fw_version));
}
@@ -2011,7 +1996,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168F family. */
{ 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ */
{ 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
@@ -2041,7 +2029,6 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
- { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
{ 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
@@ -2054,19 +2041,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
- { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
- /* FIXME: where did these entries come from ? -- FR
- * Not even r8101 vendor driver knows these id's,
- * so let's disable detection for now. -- HK
- * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
- * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
- */
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
/* 8110 family. */
{ 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
@@ -2088,8 +2066,6 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
if (ver != RTL_GIGA_MAC_NONE && !gmii) {
if (ver == RTL_GIGA_MAC_VER_42)
ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_45)
- ver = RTL_GIGA_MAC_VER_47;
else if (ver == RTL_GIGA_MAC_VER_46)
ver = RTL_GIGA_MAC_VER_48;
}
@@ -2271,7 +2247,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
default:
@@ -2338,7 +2314,6 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
readrq = 512;
@@ -2455,7 +2430,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_63:
@@ -2468,6 +2443,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
}
}
+static void rtl_disable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+}
+
static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
{
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
@@ -2700,8 +2680,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2712,8 +2692,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
} else {
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2985,7 +2965,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3223,7 +3203,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3274,7 +3254,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3288,45 +3268,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
}
-static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8168ep_1[] = {
- { 0x00, 0xffff, 0x10ab },
- { 0x06, 0xffff, 0xf030 },
- { 0x08, 0xffff, 0x2006 },
- { 0x0d, 0xffff, 0x1666 },
- { 0x0c, 0x3ff0, 0x0000 }
- };
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_1);
-
- rtl_hw_start_8168ep(tp);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
-}
-
-static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8168ep_2[] = {
- { 0x00, 0xffff, 0x10a3 },
- { 0x19, 0xffff, 0xfc00 },
- { 0x1e, 0xffff, 0x20ea }
- };
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_2);
-
- rtl_hw_start_8168ep(tp);
-
- RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
- RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
-}
-
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168ep_3[] = {
@@ -3377,7 +3318,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3621,48 +3562,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
else
rtl8125a_config_eee_mac(tp);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
- udelay(10);
-}
-
-static void rtl_hw_start_8125a_1(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8125a_1[] = {
- { 0x01, 0xffff, 0xa812 },
- { 0x09, 0xffff, 0x520c },
- { 0x04, 0xffff, 0xd000 },
- { 0x0d, 0xffff, 0xf702 },
- { 0x0a, 0xffff, 0x8653 },
- { 0x06, 0xffff, 0x001e },
- { 0x08, 0xffff, 0x3595 },
- { 0x20, 0xffff, 0x9455 },
- { 0x21, 0xffff, 0x99ff },
- { 0x02, 0xffff, 0x6046 },
- { 0x29, 0xffff, 0xfe00 },
- { 0x23, 0xffff, 0xab62 },
-
- { 0x41, 0xffff, 0xa80c },
- { 0x49, 0xffff, 0x520c },
- { 0x44, 0xffff, 0xd000 },
- { 0x4d, 0xffff, 0xf702 },
- { 0x4a, 0xffff, 0x8653 },
- { 0x46, 0xffff, 0x001e },
- { 0x48, 0xffff, 0x3595 },
- { 0x60, 0xffff, 0x9455 },
- { 0x61, 0xffff, 0x99ff },
- { 0x42, 0xffff, 0x6046 },
- { 0x69, 0xffff, 0xfe00 },
- { 0x63, 0xffff, 0xab62 },
- };
-
- rtl_set_def_aspm_entry_latency(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8125a_1);
-
- rtl_hw_start_8125_common(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
+ rtl_disable_rxdvgate(tp);
}
static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
@@ -3721,10 +3621,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
- [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
- [RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
- [RTL_GIGA_MAC_VER_16] = NULL,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
@@ -3748,20 +3645,14 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
- [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
- [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
- [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
- [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
- [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
};
@@ -4156,7 +4047,6 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
padto = max_t(unsigned int, padto, ETH_ZLEN);
@@ -4677,8 +4567,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
pm_runtime_idle(&tp->pci_dev->dev);
}
- if (net_ratelimit())
- phy_print_status(tp->phydev);
+ phy_print_status(tp->phydev);
}
static int r8169_phy_connect(struct rtl8169_private *tp)
@@ -4954,23 +4843,6 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
rtl8169_runtime_idle)
};
-static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
-{
- /* WoL fails with 8168b when the receiver is disabled. */
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pci_clear_master(tp->pci_dev);
-
- RTL_W8(tp, ChipCmd, CmdRxEnb);
- rtl_pci_commit(tp);
- break;
- default:
- break;
- }
-}
-
static void rtl_shutdown(struct pci_dev *pdev)
{
struct rtl8169_private *tp = pci_get_drvdata(pdev);
@@ -4984,9 +4856,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF &&
tp->dash_type == RTL_DASH_NONE) {
- if (tp->saved_wolopts)
- rtl_wol_shutdown_quirk(tp);
-
pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
@@ -5194,13 +5063,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
rtl_hw_init_8125(tp);
break;
default:
@@ -5220,7 +5089,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
return JUMBO_7K;
/* RTL8168b */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
@@ -5231,37 +5099,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
}
}
-static void rtl_disable_clk(void *data)
-{
- clk_disable_unprepare(data);
-}
-
-static int rtl_get_ether_clk(struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
- struct clk *clk;
- int rc;
-
- clk = devm_clk_get(d, "ether_clk");
- if (IS_ERR(clk)) {
- rc = PTR_ERR(clk);
- if (rc == -ENOENT)
- /* clk-core allows NULL (for suspend / resume) */
- rc = 0;
- else
- dev_err_probe(d, rc, "failed to get clk\n");
- } else {
- tp->clk = clk;
- rc = clk_prepare_enable(clk);
- if (rc)
- dev_err(d, "failed to enable clk: %d\n", rc);
- else
- rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
- }
-
- return rc;
-}
-
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
u8 mac_addr[ETH_ALEN] __aligned(2) = {};
@@ -5291,7 +5128,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_60 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
@@ -5325,9 +5162,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
/* Get the *optional* external "ether_clk" used on some boards */
- rc = rtl_get_ether_clk(tp);
- if (rc)
- return rc;
+ tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
+ if (IS_ERR(tp->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n");
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -5346,12 +5183,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- /* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
- return -ENODEV;
- }
-
rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
if (rc < 0) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
@@ -5378,7 +5209,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (rtl_aspm_is_safe(tp))
rc = 0;
- else if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
@@ -5413,7 +5244,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &rtl8169_ethtool_ops;
- netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &tp->napi, rtl8169_poll);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 15c295f90196..930496cd34ed 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -793,71 +793,6 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_config_eee_phy(phydev);
}
-static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- u16 dout_tapbin;
- u32 data;
-
- r8169_apply_firmware(tp);
-
- /* CHN EST parameters adjust - giga master */
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
- r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
-
- /* CHN EST parameters adjust - giga slave */
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
- r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
-
- /* enable R-tune & PGA-retune function */
- dout_tapbin = 0;
- data = phy_read_paged(phydev, 0x0a46, 0x13);
- data &= 3;
- data <<= 2;
- dout_tapbin |= data;
- data = phy_read_paged(phydev, 0x0a46, 0x12);
- data &= 0xc000;
- data >>= 14;
- dout_tapbin |= data;
- dout_tapbin = ~(dout_tapbin ^ 0x08);
- dout_tapbin <<= 12;
- dout_tapbin &= 0xf000;
-
- r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- rtl8168g_enable_gphy_10m(phydev);
-
- /* SAR ADC performance */
- phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
-
- r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
-
- /* disable phy pfm mode */
- phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(phydev);
- rtl8168h_config_eee_phy(phydev);
-}
-
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -895,27 +830,6 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_config_eee_phy(phydev);
}
-static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- /* Enable PHY auto speed down */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(phydev);
-
- /* Enable EEE auto-fallback function */
- phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* set rg_sel_sdm_rate */
- phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- rtl8168g_disable_aldps(phydev);
- rtl8168g_config_eee_phy(phydev);
-}
-
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1081,44 +995,6 @@ static void rtl8125_legacy_force_mode(struct phy_device *phydev)
phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0);
}
-static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
- phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
- phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
- phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
- phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
-
- r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
- r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
- r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
- r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
- r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
- r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
- r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
- r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
- r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
- r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
- r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
-
- phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
-
- phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
- phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
- phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- rtl8168g_enable_gphy_10m(phydev);
-
- rtl8125a_config_eee_phy(phydev);
-}
-
static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1239,10 +1115,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
[RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
- [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
- [RTL_GIGA_MAC_VER_16] = NULL,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
@@ -1266,20 +1139,14 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
[RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
[RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_41] = NULL,
[RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
[RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
[RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
[RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
};
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b980bce763d3..e0f8276cffed 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -189,6 +189,7 @@ enum ravb_reg {
PSR = 0x0528,
PIPR = 0x052c,
CXR31 = 0x0530, /* RZ/G2L only */
+ CXR35 = 0x0540, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
@@ -965,6 +966,13 @@ enum CXR31_BIT {
CXR31_SEL_LINK1 = 0x00000008,
};
+enum CXR35_BIT {
+ CXR35_SEL_XMII = 0x00000003,
+ CXR35_SEL_XMII_RGMII = 0x00000000,
+ CXR35_SEL_XMII_MII = 0x00000002,
+ CXR35_HALFCYC_CLKSW = 0xffff0000,
+};
+
enum CSR0_BIT {
CSR0_TPE = 0x00000010,
CSR0_RPE = 0x00000020,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b357ac4c56c5..36324126db6d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -540,7 +540,13 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+ } else {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+ CXR31_SEL_LINK0);
+ }
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@@ -1449,6 +1455,8 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -2512,6 +2520,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
@@ -2832,9 +2841,9 @@ static int ravb_probe(struct platform_device *pdev)
goto out_dma_free;
}
- netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
/* Network device register */
error = register_netdev(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 67ade78fb767..71a499113308 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -3366,7 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
- netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
+ netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
/* network device register */
ret = register_netdev(ndev);
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fc83ec23bd1d..023682cd2768 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2226,8 +2226,8 @@ rocker_port_set_link_ksettings(struct net_device *dev,
static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
@@ -2574,8 +2574,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx);
- netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx);
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bc70c6abd6a5..58cf7cc54f40 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 98edb01024f0..8ba017ec9849 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -175,8 +175,8 @@ static int sxgbe_set_eee(struct net_device *dev,
static void sxgbe_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 sxgbe_getmsglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index a1c10b61269b..9664f029fa16 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2143,7 +2143,7 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
}
- netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
+ netif_napi_add(ndev, &priv->napi, sxgbe_poll);
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index bb06fa228367..b5e45fc6337e 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -9,7 +9,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
- mae.o tc.o
+ mae.o tc.o tc_bindings.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ee734b69150f..d1e1aa19a68e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4213,7 +4213,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 702abbe59b76..135ece2f1375 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -43,6 +43,8 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_sset_count = efx_ethtool_get_sset_count,
+ .get_priv_flags = efx_ethtool_get_priv_flags,
+ .set_priv_flags = efx_ethtool_set_priv_flags,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.get_link_ksettings = efx_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 17b9d37218cb..88fa29572e23 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -23,6 +23,7 @@
#include "mcdi_filters.h"
#include "rx_common.h"
#include "ef100_sriov.h"
+#include "tc_bindings.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -246,6 +247,9 @@ static const struct net_device_ops ef100_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
+#ifdef CONFIG_SFC_SRIOV
+ .ndo_setup_tc = efx_tc_setup,
+#endif
};
/* Netdev registration
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 8061efdaf82c..ad686c671ab8 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1137,6 +1137,9 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
*/
netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
rc);
+ } else {
+ net_dev->features |= NETIF_F_HW_TC;
+ efx->fixed_features |= NETIF_F_HW_TC;
}
#endif
return 0;
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 73ae4656a6e7..81ab22c74635 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -14,6 +14,7 @@
#include "ef100_nic.h"
#include "mae.h"
#include "rx_common.h"
+#include "tc_bindings.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
@@ -42,8 +43,7 @@ static int efx_ef100_rep_open(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
- netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll);
napi_enable(&efv->napi);
return 0;
}
@@ -107,6 +107,20 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
return 0;
}
+static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+ struct efx_nic *efx = efv->parent;
+
+ if (type == TC_SETUP_CLSFLOWER)
+ return efx_tc_flower(efx, net_dev, type_data, efv);
+ if (type == TC_SETUP_BLOCK)
+ return efx_tc_setup_block(net_dev, efx, type_data, efv);
+
+ return -EOPNOTSUPP;
+}
+
static void efx_ef100_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -120,13 +134,14 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev,
stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
}
-static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_open = efx_ef100_rep_open,
.ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
.ndo_get_stats64 = efx_ef100_rep_get_stats64,
+ .ndo_setup_tc = efx_ef100_rep_setup_tc,
};
static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
index 070f700893c1..c21bc716f847 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.h
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -66,4 +66,5 @@ void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
* Caller must hold rcu_read_lock().
*/
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
+extern const struct net_device_ops efx_ef100_rep_netdev_ops;
#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 153d68e29b8b..054d5ce6029e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -778,7 +778,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
return;
if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
@@ -1175,6 +1175,17 @@ static int efx_pm_freeze(struct device *dev)
return 0;
}
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
static int efx_pm_thaw(struct device *dev)
{
int rc;
@@ -1279,6 +1290,7 @@ static struct pci_driver efx_pci_driver = {
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
.err_handler = &efx_err_handlers,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure,
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 032b8c0bd788..aaa381743bca 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -1313,7 +1313,7 @@ void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
}
void efx_init_napi(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index a929a1aaba92..c2224e41a694 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -996,7 +996,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bc840ede3053..6649a2327d03 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -101,15 +101,23 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "log-tc-errors",
+};
+
+#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0)
+
+#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings)
+
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
@@ -452,6 +460,8 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ case ETH_SS_PRIV_FLAGS:
+ return EFX_ETHTOOL_PRIV_FLAGS_COUNT;
default:
return -EINVAL;
}
@@ -468,7 +478,7 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (efx_describe_per_queue_stats(efx, strings) *
@@ -478,12 +488,39 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++)
+ strscpy(strings + i * ETH_GSTRING_LEN,
+ efx_ethtool_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ break;
default:
/* No other string sets */
break;
}
}
+u32 efx_ethtool_get_priv_flags(struct net_device *net_dev)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 ret_flags = 0;
+
+ if (efx->log_tc_errs)
+ ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS;
+
+ return ret_flags;
+}
+
+int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ efx->log_tc_errs =
+ !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS);
+
+ return 0;
+}
+
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 659491932101..0afc74021a5e 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -27,6 +27,8 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings);
+u32 efx_ethtool_get_priv_flags(struct net_device *net_dev);
+int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags);
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __attribute__ ((unused)),
u64 *data);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index a63f40b09856..e151b0957751 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2012,7 +2012,7 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
struct ef4_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, 64);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
}
static void ef4_init_napi(struct ef4_nic *efx)
@@ -2329,7 +2329,7 @@ static void ef4_unregister_netdev(struct ef4_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
if (ef4_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2640,7 +2640,7 @@ static int ef4_init_struct(struct ef4_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 907254b36663..3976a333f7e3 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -162,9 +162,9 @@ static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct ef4_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
@@ -412,7 +412,7 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (ef4_describe_per_queue_stats(efx, strings) *
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 3324a6219a09..7a1c9337081b 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -2387,7 +2387,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
board->i2c_data.data = efx;
board->i2c_adap.algo_data = &board->i2c_data;
board->i2c_adap.dev.parent = &efx->pci_dev->dev;
- strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
+ strscpy(board->i2c_adap.name, "SFC4000 GPIO",
sizeof(board->i2c_adap.name));
rc = i2c_bit_add_bus(&board->i2c_adap);
if (rc)
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index 156da315ec89..78c851b5a56f 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -452,7 +452,7 @@ size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 4d928839d292..be72e71da027 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/in6.h>
#include <asm/byteorder.h>
/**
@@ -224,6 +225,27 @@ efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
}
/**
+ * efx_filter_set_ipv6_local - specify IPv6 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv6_local(struct efx_filter_spec *spec, u8 proto,
+ const struct in6_addr *host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IPV6);
+ spec->ip_proto = proto;
+ memcpy(spec->loc_host, host, sizeof(spec->loc_host));
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 97627f5e3674..874c765b2465 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -112,6 +112,167 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
return 0;
}
+static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_MAE_GET_CAPS_IN_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_GET_CAPS, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ caps->match_field_count = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT);
+ caps->action_prios = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_ACTION_PRIOS);
+ return 0;
+}
+
+static int efx_mae_get_rule_fields(struct efx_nic *efx, u32 cmd,
+ u8 *field_support)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(MAE_NUM_FIELDS));
+ MCDI_DECLARE_STRUCT_PTR(caps);
+ unsigned int count;
+ size_t outlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_IN_LEN);
+
+ rc = efx_mcdi_rpc(efx, cmd, NULL, 0, outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ count = MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_COUNT);
+ memset(field_support, MAE_FIELD_UNSUPPORTED, MAE_NUM_FIELDS);
+ caps = _MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_FIELD_FLAGS);
+ /* We're only interested in the support status enum, not any other
+ * flags, so just extract that from each entry.
+ */
+ for (i = 0; i < count; i++)
+ if (i * sizeof(*outbuf) + MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST < outlen)
+ field_support[i] = EFX_DWORD_FIELD(caps[i], MAE_FIELD_FLAGS_SUPPORT_STATUS);
+ return 0;
+}
+
+int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps)
+{
+ int rc;
+
+ rc = efx_mae_get_basic_caps(efx, caps);
+ if (rc)
+ return rc;
+ return efx_mae_get_rule_fields(efx, MC_CMD_MAE_GET_AR_CAPS,
+ caps->action_rule_fields);
+}
+
+/* Bit twiddling:
+ * Prefix: 1...110...0
+ * ~: 0...001...1
+ * + 1: 0...010...0 is power of two
+ * so (~x) & ((~x) + 1) == 0. Converse holds also.
+ */
+#define is_prefix_byte(_x) !(((_x) ^ 0xff) & (((_x) ^ 0xff) + 1))
+
+enum mask_type { MASK_ONES, MASK_ZEROES, MASK_PREFIX, MASK_OTHER };
+
+static const char *mask_type_name(enum mask_type typ)
+{
+ switch (typ) {
+ case MASK_ONES:
+ return "all-1s";
+ case MASK_ZEROES:
+ return "all-0s";
+ case MASK_PREFIX:
+ return "prefix";
+ case MASK_OTHER:
+ return "arbitrary";
+ default: /* can't happen */
+ return "unknown";
+ }
+}
+
+/* Checks a (big-endian) bytestring is a bit prefix */
+static enum mask_type classify_mask(const u8 *mask, size_t len)
+{
+ bool zeroes = true; /* All bits seen so far are zeroes */
+ bool ones = true; /* All bits seen so far are ones */
+ bool prefix = true; /* Valid prefix so far */
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (ones) {
+ if (!is_prefix_byte(mask[i]))
+ prefix = false;
+ } else if (mask[i]) {
+ prefix = false;
+ }
+ if (mask[i] != 0xff)
+ ones = false;
+ if (mask[i])
+ zeroes = false;
+ }
+ if (ones)
+ return MASK_ONES;
+ if (zeroes)
+ return MASK_ZEROES;
+ if (prefix)
+ return MASK_PREFIX;
+ return MASK_OTHER;
+}
+
+static int efx_mae_match_check_cap_typ(u8 support, enum mask_type typ)
+{
+ switch (support) {
+ case MAE_FIELD_UNSUPPORTED:
+ case MAE_FIELD_SUPPORTED_MATCH_NEVER:
+ if (typ == MASK_ZEROES)
+ return 0;
+ return -EOPNOTSUPP;
+ case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL:
+ if (typ == MASK_ZEROES)
+ return 0;
+ fallthrough;
+ case MAE_FIELD_SUPPORTED_MATCH_ALWAYS:
+ if (typ == MASK_ONES)
+ return 0;
+ return -EINVAL;
+ case MAE_FIELD_SUPPORTED_MATCH_PREFIX:
+ if (typ == MASK_OTHER)
+ return -EOPNOTSUPP;
+ return 0;
+ case MAE_FIELD_SUPPORTED_MATCH_MASK:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mae_match_check_caps(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *supported_fields = efx->tc->caps->action_rule_fields;
+ __be32 ingress_port = cpu_to_be32(mask->ingress_port);
+ enum mask_type ingress_port_mask_type;
+ int rc;
+
+ /* Check for _PREFIX assumes big-endian, so we need to convert */
+ ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
+ sizeof(ingress_port));
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
+ ingress_port_mask_type);
+ if (rc) {
+ efx_tc_err(efx, "No support for %s mask in field ingress_port\n",
+ mask_type_name(ingress_port_mask_type));
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port");
+ return rc;
+ }
+ return 0;
+}
+
static bool efx_mae_asl_id(u32 id)
{
return !!(id & BIT(31));
@@ -279,6 +440,10 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
+ match->value.recirc_id);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
+ match->mask.recirc_id);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 0369be4d8983..3e0cd238d523 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -27,6 +27,20 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
+
+struct mae_caps {
+ u32 match_field_count;
+ u32 action_prios;
+ u8 action_rule_fields[MAE_NUM_FIELDS];
+};
+
+int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
+
+int efx_mae_match_check_caps(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack);
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 26bc69f76801..1f18e9dc62e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -201,6 +201,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+/* Use MCDI_STRUCT_ functions to access members of MCDI structuredefs.
+ * _buf should point to the start of the structure, typically obtained with
+ * MCDI_DECLARE_STRUCT_PTR(structure) = _MCDI_DWORD(mcdi_buf, FIELD_WHICH_IS_STRUCT);
+ */
+#define MCDI_STRUCT_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, _field ## _OFST)
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
@@ -208,6 +214,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define _MCDI_STRUCT_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
+#define MCDI_STRUCT_SET_BYTE(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 1); \
+ *(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
+ } while (0)
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 5954fcfee2b1..f5128db7c7e7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -285,7 +285,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
- strlcpy(attr->name, name, sizeof(attr->name));
+ strscpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 7ef823d7a89a..2e9ba0cfe848 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -855,6 +855,7 @@ enum efx_xdp_tx_queues_mode {
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irqs_hooked: Channel interrupts are hooked
+ * @log_tc_errs: Error logging for TC filter insertion is enabled
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
@@ -1017,6 +1018,7 @@ struct efx_nic {
unsigned int timer_max_ns;
bool irq_rx_adaptive;
bool irqs_hooked;
+ bool log_tc_errs;
unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us;
u32 msg_enable;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 22fbb0ae77fb..63e2394382bb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -465,7 +465,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 10ad0b93d283..eaef4a15008a 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -118,9 +118,14 @@
#define PTP_MIN_LENGTH 63
-#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
+#define PTP_RXFILTERS_LEN 5
+
+#define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */
+#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0x01, 0x81} /* ff0e::181 */
#define PTP_EVENT_PORT 319
#define PTP_GENERAL_PORT 320
+#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */
/* Annoyingly the format of the version numbers are different between
* versions 1 and 2 so it isn't possible to simply look for 1 or 2.
@@ -224,9 +229,8 @@ struct efx_ptp_timeset {
* @work: Work task
* @reset_required: A serious error has occurred and the PTP task needs to be
* reset (disable, enable).
- * @rxfilter_event: Receive filter when operating
- * @rxfilter_general: Receive filter when operating
- * @rxfilter_installed: Receive filter installed
+ * @rxfilters: Receive filters when operating
+ * @rxfilters_count: Num of installed rxfilters, should be == PTP_RXFILTERS_LEN
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
@@ -295,9 +299,8 @@ struct efx_ptp_data {
struct workqueue_struct *workwq;
struct work_struct work;
bool reset_required;
- u32 rxfilter_event;
- u32 rxfilter_general;
- bool rxfilter_installed;
+ u32 rxfilters[PTP_RXFILTERS_LEN];
+ size_t rxfilters_count;
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
@@ -1290,61 +1293,108 @@ static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
+ while (ptp->rxfilters_count) {
+ ptp->rxfilters_count--;
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
+ ptp->rxfilters[ptp->rxfilters_count]);
}
}
-static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+static void efx_ptp_init_filter(struct efx_nic *efx,
+ struct efx_filter_spec *rxfilter)
+{
+ struct efx_channel *channel = efx->ptp_data->channel;
+ struct efx_rx_queue *queue = efx_channel_get_rx_queue(channel);
+
+ efx_filter_init_rx(rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(queue));
+}
+
+static int efx_ptp_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *rxfilter)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+
+ int rc = efx_filter_insert_filter(efx, rxfilter, true);
+ if (rc < 0)
+ return rc;
+ ptp->rxfilters[ptp->rxfilters_count] = rc;
+ ptp->rxfilters_count++;
+ return 0;
+}
+
+static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, u16 port)
+{
struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, htonl(PTP_ADDR_IPV4),
+ htons(port));
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, u16 port)
+{
+ const struct in6_addr addr = {{PTP_ADDR_IPV6}};
+ struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_ipv6_local(&rxfilter, IPPROTO_UDP, &addr, htons(port));
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_eth_filter(struct efx_nic *efx)
+{
+ const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER;
+ struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_eth_local(&rxfilter, EFX_FILTER_VID_UNSPEC, addr);
+ rxfilter.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ rxfilter.ether_type = htons(ETH_P_1588);
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
int rc;
- if (!ptp->channel || ptp->rxfilter_installed)
+ if (!ptp->channel || ptp->rxfilters_count)
return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
*/
- efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
- efx_rx_queue_index(
- efx_channel_get_rx_queue(ptp->channel)));
- rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
- htonl(PTP_ADDRESS),
- htons(PTP_EVENT_PORT));
- if (rc != 0)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ rc = efx_ptp_insert_ipv4_filter(efx, PTP_EVENT_PORT);
if (rc < 0)
- return rc;
- ptp->rxfilter_event = rc;
-
- efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
- efx_rx_queue_index(
- efx_channel_get_rx_queue(ptp->channel)));
- rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
- htonl(PTP_ADDRESS),
- htons(PTP_GENERAL_PORT));
- if (rc != 0)
goto fail;
- rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ rc = efx_ptp_insert_ipv4_filter(efx, PTP_GENERAL_PORT);
if (rc < 0)
goto fail;
- ptp->rxfilter_general = rc;
- ptp->rxfilter_installed = true;
+ /* if the NIC supports hw timestamps by the MAC, we can support
+ * PTP over IPv6 and Ethernet
+ */
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ rc = efx_ptp_insert_ipv6_filter(efx, PTP_EVENT_PORT);
+ if (rc < 0)
+ goto fail;
+
+ rc = efx_ptp_insert_ipv6_filter(efx, PTP_GENERAL_PORT);
+ if (rc < 0)
+ goto fail;
+
+ rc = efx_ptp_insert_eth_filter(efx);
+ if (rc < 0)
+ goto fail;
+ }
+
return 0;
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 63d999e63960..60e5b7c8ccf9 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -775,7 +775,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_siena_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
@@ -1148,6 +1148,17 @@ static int efx_pm_freeze(struct device *dev)
return 0;
}
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
static int efx_pm_thaw(struct device *dev)
{
int rc;
@@ -1252,6 +1263,7 @@ static struct pci_driver efx_pci_driver = {
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
.err_handler = &efx_siena_err_handlers,
#ifdef CONFIG_SFC_SIENA_SRIOV
.sriov_configure = efx_pci_sriov_configure,
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 017212a40df3..06ed74994e36 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -1317,7 +1317,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
}
void efx_siena_init_napi(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index 954daf464abb..1fd396b00bfb 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -1006,7 +1006,7 @@ int efx_siena_init_struct(struct efx_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 0207d07f54e3..f590e87e5a23 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -105,10 +105,10 @@ void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_siena_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev)
@@ -467,7 +467,7 @@ void efx_siena_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (efx_describe_per_queue_stats(efx, strings) *
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
index c7ea703c5d7a..56a9c56ed9e3 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
@@ -285,7 +285,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
- strlcpy(attr->name, name, sizeof(attr->name));
+ strscpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
index abf9a4adf139..0ea0433a6230 100644
--- a/drivers/net/ethernet/sfc/siena/nic.c
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -458,7 +458,7 @@ size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t coun
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
index e166dcb9b99c..91e87594ed1e 100644
--- a/drivers/net/ethernet/sfc/siena/tx.c
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0c0aeb91f500..3478860d4023 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -9,11 +9,60 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <net/pkt_cls.h>
#include "tc.h"
+#include "tc_bindings.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
+#define EFX_EFV_PF NULL
+/* Look up the representor information (efv) for a device.
+ * May return NULL for the PF (us), or an error pointer for a device that
+ * isn't supported as a TC offload endpoint
+ */
+static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
+ struct net_device *dev)
+{
+ struct efx_rep *efv;
+
+ if (!dev)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it us (the PF)? */
+ if (dev == efx->net_dev)
+ return EFX_EFV_PF;
+ /* Is it an efx vfrep at all? */
+ if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it ours? We don't support TC rules that include another
+ * EF100's netdevices (not even on another port of the same NIC).
+ */
+ efv = netdev_priv(dev);
+ if (efv->parent != efx)
+ return ERR_PTR(-EOPNOTSUPP);
+ return efv;
+}
+
+/* Convert a driver-internal vport ID into an external device (wire or VF) */
+static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
+{
+ u32 mport;
+
+ if (IS_ERR(efv))
+ return PTR_ERR(efv);
+ if (!efv) /* device is PF (us) */
+ efx_mae_mport_wire(efx, &mport);
+ else /* device is repr */
+ efx_mae_mport_mport(efx, efv->mport, &mport);
+ return mport;
+}
+
+static const struct rhashtable_params efx_tc_match_action_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct efx_tc_flow_rule, cookie),
+ .head_offset = offsetof(struct efx_tc_flow_rule, linkage),
+};
+
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
@@ -58,6 +107,333 @@ static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rul
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
}
+static void efx_tc_flow_free(void *ptr, void *arg)
+{
+ struct efx_tc_flow_rule *rule = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc rule %lx still present at teardown, removing\n",
+ rule->cookie);
+
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+
+ kfree(rule);
+}
+
+static int efx_tc_flower_parse_match(struct efx_nic *efx,
+ struct flow_rule *rule,
+ struct efx_tc_match *match,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control fm;
+
+ flow_rule_match_control(rule, &fm);
+
+ if (fm.mask->flags) {
+ efx_tc_err(efx, "Unsupported match on control.flags %#x\n",
+ fm.mask->flags);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC))) {
+ efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic fm;
+
+ flow_rule_match_basic(rule, &fm);
+ if (fm.mask->n_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ if (fm.mask->ip_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_tc_flower_replace(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc,
+ struct efx_rep *efv)
+{
+ struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_action_set *act = NULL;
+ const struct flow_action_entry *fa;
+ struct efx_rep *from_efv, *to_efv;
+ struct efx_tc_match match;
+ s64 rc;
+ int i;
+
+ if (!tc_can_offload_extack(efx->net_dev, extack))
+ return -EOPNOTSUPP;
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+ if (WARN_ON(!efx->tc->up))
+ return -ENETDOWN;
+
+ from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
+ if (IS_ERR(from_efv)) {
+ /* Might be a tunnel decap rule from an indirect block.
+ * Support for those not implemented yet.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ if (efv != from_efv) {
+ /* can't happen */
+ efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n",
+ netdev_name(net_dev), efv ? "non-" : "",
+ from_efv ? "non-" : "");
+ if (efv)
+ NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)");
+ else
+ NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)");
+ return -EINVAL;
+ }
+
+ /* Parse match */
+ memset(&match, 0, sizeof(match));
+ rc = efx_tc_flower_external_mport(efx, from_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port");
+ return rc;
+ }
+ match.value.ingress_port = rc;
+ match.mask.ingress_port = ~0;
+ rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
+ if (rc)
+ return rc;
+
+ if (tc->common.chain_index) {
+ EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index");
+ return -EOPNOTSUPP;
+ }
+ match.mask.recirc_id = 0xff;
+
+ rc = efx_mae_match_check_caps(efx, &match.mask, extack);
+ if (rc)
+ return rc;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&rule->acts.list);
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+
+ flow_action_for_each(i, fa, &fr->action) {
+ struct efx_tc_action_set save;
+
+ if (!act) {
+ /* more actions after a non-pipe action */
+ EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action");
+ rc = -EINVAL;
+ goto release;
+ }
+
+ switch (fa->id) {
+ case FLOW_ACTION_DROP:
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* end of the line */
+ break;
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED:
+ save = *act;
+ to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
+ if (IS_ERR(to_efv)) {
+ EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch");
+ rc = PTR_ERR(to_efv);
+ goto release;
+ }
+ rc = efx_tc_flower_external_mport(efx, to_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port");
+ goto release;
+ }
+ act->dest_mport = rc;
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL;
+ if (fa->id == FLOW_ACTION_REDIRECT)
+ break; /* end of the line */
+ /* Mirror, so continue on with saved act */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+ *act = save;
+ break;
+ default:
+ efx_tc_err(efx, "Unhandled action %u\n", fa->id);
+ rc = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ goto release;
+ }
+ }
+
+ if (act) {
+ /* Not shot/redirected, so deliver to default dest */
+ if (from_efv == EFX_EFV_PF)
+ /* Rule applies to traffic from the wire,
+ * and default dest is thus the PF
+ */
+ efx_mae_mport_uplink(efx, &act->dest_mport);
+ else
+ /* Representor, so rule applies to traffic from
+ * representee, and default dest is thus the rep.
+ * All reps use the same mport for delivery
+ */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &act->dest_mport);
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* Prevent double-free in error path */
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed filter (cookie %lx)\n",
+ tc->cookie);
+
+ rule->match = match;
+
+ rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw");
+ goto release;
+ }
+ rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
+ rule->acts.fw_id, &rule->fw_id);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw");
+ goto release_acts;
+ }
+ return 0;
+
+release_acts:
+ efx_mae_free_action_set_list(efx, &rule->acts);
+release:
+ /* We failed to insert the rule, so free up any entries we created in
+ * subsidiary tables.
+ */
+ if (act)
+ efx_tc_free_action_set(efx, act, false);
+ if (rule) {
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ efx_tc_free_action_set_list(efx, &rule->acts, false);
+ }
+ kfree(rule);
+ return rc;
+}
+
+static int efx_tc_flower_destroy(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule;
+
+ rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
+ efx_tc_match_action_ht_params);
+ if (!rule) {
+ /* Only log a message if we're the ingress device. Otherwise
+ * it's a foreign filter and we might just not have been
+ * interested (e.g. we might not have been the egress device
+ * either).
+ */
+ if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
+ netif_warn(efx, drv, efx->net_dev,
+ "Filter %lx not found to remove\n", tc->cookie);
+ NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
+ return -ENOENT;
+ }
+
+ /* Remove it from HW */
+ efx_tc_delete_rule(efx, rule);
+ /* Delete it from SW */
+ rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
+ efx_tc_match_action_ht_params);
+ netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
+ kfree(rule);
+ return 0;
+}
+
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv)
+{
+ int rc;
+
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->tc->mutex);
+ switch (tc->command) {
+ case FLOW_CLS_REPLACE:
+ rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
+ break;
+ case FLOW_CLS_DESTROY:
+ rc = efx_tc_flower_destroy(efx, net_dev, tc);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&efx->tc->mutex);
+ return rc;
+}
+
static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
u32 eg_port, struct efx_tc_flow_rule *rule)
{
@@ -201,13 +577,37 @@ int efx_init_tc(struct efx_nic *efx)
{
int rc;
+ rc = efx_mae_get_caps(efx, efx->tc->caps);
+ if (rc)
+ return rc;
+ if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
+ /* Firmware supports some match fields the driver doesn't know
+ * about. Not fatal, unless any of those fields are required
+ * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
+ */
+ netif_warn(efx, probe, efx->net_dev,
+ "FW reports additional match fields %u\n",
+ efx->tc->caps->match_field_count);
+ if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
+ netif_err(efx, probe, efx->net_dev,
+ "Too few action prios supported (have %u, need %u)\n",
+ efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
+ return -EIO;
+ }
rc = efx_tc_configure_default_rule_pf(efx);
if (rc)
return rc;
rc = efx_tc_configure_default_rule_wire(efx);
if (rc)
return rc;
- return efx_tc_configure_rep_mport(efx);
+ rc = efx_tc_configure_rep_mport(efx);
+ if (rc)
+ return rc;
+ efx->tc->up = true;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ return rc;
+ return 0;
}
void efx_fini_tc(struct efx_nic *efx)
@@ -215,20 +615,35 @@ void efx_fini_tc(struct efx_nic *efx)
/* We can get called even if efx_init_struct_tc() failed */
if (!efx->tc)
return;
+ if (efx->tc->up)
+ flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
efx_tc_deconfigure_rep_mport(efx);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+ efx->tc->up = false;
}
int efx_init_struct_tc(struct efx_nic *efx)
{
+ int rc;
+
if (efx->type->is_vf)
return 0;
efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
if (!efx->tc)
return -ENOMEM;
+ efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
+ if (!efx->tc->caps) {
+ rc = -ENOMEM;
+ goto fail_alloc_caps;
+ }
+ INIT_LIST_HEAD(&efx->tc->block_list);
+ mutex_init(&efx->tc->mutex);
+ rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
+ if (rc < 0)
+ goto fail_match_action_ht;
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
@@ -236,6 +651,13 @@ int efx_init_struct_tc(struct efx_nic *efx)
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
return 0;
+fail_match_action_ht:
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
+fail_alloc_caps:
+ kfree(efx->tc);
+ efx->tc = NULL;
+ return rc;
}
void efx_fini_struct_tc(struct efx_nic *efx)
@@ -243,10 +665,16 @@ void efx_fini_struct_tc(struct efx_nic *efx)
if (!efx->tc)
return;
+ mutex_lock(&efx->tc->mutex);
EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
+ efx);
+ mutex_unlock(&efx->tc->mutex);
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
kfree(efx->tc);
efx->tc = NULL;
}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 309123c6b386..196fd74ed973 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -11,8 +11,28 @@
#ifndef EFX_TC_H
#define EFX_TC_H
+#include <net/flow_offload.h>
+#include <linux/rhashtable.h>
#include "net_driver.h"
+/* Error reporting: convenience macros. For indicating why a given filter
+ * insertion is not supported; errors in internal operation or in the
+ * hardware should be netif_err()s instead.
+ */
+/* Used when error message is constant. */
+#define EFX_TC_ERR_MSG(efx, extack, message) do { \
+ NL_SET_ERR_MSG_MOD(extack, message); \
+ if (efx->log_tc_errs) \
+ netif_info(efx, drv, efx->net_dev, "%s\n", message); \
+} while (0)
+/* Used when error message is not constant; caller should also supply a
+ * constant extack message with NL_SET_ERR_MSG_MOD().
+ */
+#define efx_tc_err(efx, fmt, args...) do { \
+if (efx->log_tc_errs) \
+ netif_info(efx, drv, efx->net_dev, fmt, ##args);\
+} while (0)
+
struct efx_tc_action_set {
u16 deliver:1;
u32 dest_mport;
@@ -23,6 +43,7 @@ struct efx_tc_action_set {
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
+ u8 recirc_id;
};
struct efx_tc_match {
@@ -36,12 +57,15 @@ struct efx_tc_action_set_list {
};
struct efx_tc_flow_rule {
+ unsigned long cookie;
+ struct rhash_head linkage;
struct efx_tc_match match;
struct efx_tc_action_set_list acts;
u32 fw_id;
};
enum efx_tc_rule_prios {
+ EFX_TC_PRIO_TC, /* Rule inserted by TC */
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
@@ -49,6 +73,10 @@ enum efx_tc_rule_prios {
/**
* struct efx_tc_state - control plane data for TC offload
*
+ * @caps: MAE capabilities reported by MCDI
+ * @block_list: List of &struct efx_tc_block_binding
+ * @mutex: Used to serialise operations on TC hashtables
+ * @match_action_ht: Hashtable of TC match-action rules
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
@@ -57,14 +85,20 @@ enum efx_tc_rule_prios {
* %EFX_TC_PRIO_DFLT. Named by *ingress* port
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
* @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ * @up: have TC datastructures been set up?
*/
struct efx_tc_state {
+ struct mae_caps *caps;
+ struct list_head block_list;
+ struct mutex mutex;
+ struct rhashtable match_action_ht;
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
struct {
struct efx_tc_flow_rule pf;
struct efx_tc_flow_rule wire;
} dflt;
+ bool up;
};
struct efx_rep;
@@ -72,6 +106,8 @@ struct efx_rep;
int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule);
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv);
int efx_tc_insert_rep_filters(struct efx_nic *efx);
void efx_tc_remove_rep_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/tc_bindings.c b/drivers/net/ethernet/sfc/tc_bindings.c
new file mode 100644
index 000000000000..c18d64519c2d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_bindings.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc_bindings.h"
+#include "tc.h"
+
+struct efx_tc_block_binding {
+ struct list_head list;
+ struct efx_nic *efx;
+ struct efx_rep *efv;
+ struct net_device *otherdev; /* may actually be us */
+ struct flow_block *block;
+};
+
+static struct efx_tc_block_binding *efx_tc_find_binding(struct efx_nic *efx,
+ struct net_device *otherdev)
+{
+ struct efx_tc_block_binding *binding;
+
+ ASSERT_RTNL();
+ list_for_each_entry(binding, &efx->tc->block_list, list)
+ if (binding->otherdev == otherdev)
+ return binding;
+ return NULL;
+}
+
+static int efx_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct efx_tc_block_binding *binding = cb_priv;
+ struct flow_cls_offload *tcf = type_data;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return efx_tc_flower(binding->efx, binding->otherdev,
+ tcf, binding->efv);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void efx_tc_block_unbind(void *cb_priv)
+{
+ struct efx_tc_block_binding *binding = cb_priv;
+
+ list_del(&binding->list);
+ kfree(binding);
+}
+
+static struct efx_tc_block_binding *efx_tc_create_binding(
+ struct efx_nic *efx, struct efx_rep *efv,
+ struct net_device *otherdev, struct flow_block *block)
+{
+ struct efx_tc_block_binding *binding = kmalloc(sizeof(*binding), GFP_KERNEL);
+
+ if (!binding)
+ return ERR_PTR(-ENOMEM);
+ binding->efx = efx;
+ binding->efv = efv;
+ binding->otherdev = otherdev;
+ binding->block = block;
+ list_add(&binding->list, &efx->tc->block_list);
+ return binding;
+}
+
+int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
+ struct flow_block_offload *tcb, struct efx_rep *efv)
+{
+ struct efx_tc_block_binding *binding;
+ struct flow_block_cb *block_cb;
+ int rc;
+
+ if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+
+ switch (tcb->command) {
+ case FLOW_BLOCK_BIND:
+ binding = efx_tc_create_binding(efx, efv, net_dev, tcb->block);
+ if (IS_ERR(binding))
+ return PTR_ERR(binding);
+ block_cb = flow_block_cb_alloc(efx_tc_block_cb, binding,
+ binding, efx_tc_block_unbind);
+ rc = PTR_ERR_OR_ZERO(block_cb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "bind %sdirect block for device %s, rc %d\n",
+ net_dev == efx->net_dev ? "" :
+ efv ? "semi" : "in",
+ net_dev ? net_dev->name : NULL, rc);
+ if (rc) {
+ list_del(&binding->list);
+ kfree(binding);
+ } else {
+ flow_block_cb_add(block_cb, tcb);
+ }
+ return rc;
+ case FLOW_BLOCK_UNBIND:
+ binding = efx_tc_find_binding(efx, net_dev);
+ if (binding) {
+ block_cb = flow_block_cb_lookup(tcb->block,
+ efx_tc_block_cb,
+ binding);
+ if (block_cb) {
+ flow_block_cb_remove(block_cb, tcb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "unbound %sdirect block for device %s\n",
+ net_dev == efx->net_dev ? "" :
+ binding->efv ? "semi" : "in",
+ net_dev ? net_dev->name : NULL);
+ return 0;
+ }
+ }
+ /* If we're in driver teardown, then we expect to have
+ * already unbound all our blocks (we did it early while
+ * we still had MCDI to remove the filters), so getting
+ * unbind callbacks now isn't a problem.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ !efx->tc->up, warn,
+ "%sdirect block unbind for device %s, was never bound\n",
+ net_dev == efx->net_dev ? "" : "in",
+ net_dev ? net_dev->name : NULL);
+ return -ENOENT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type,
+ void *type_data, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct flow_block_offload *tcb = type_data;
+ struct efx_tc_block_binding *binding;
+ struct flow_block_cb *block_cb;
+ struct efx_nic *efx = cb_priv;
+ bool is_ovs_int_port;
+ int rc;
+
+ if (!net_dev)
+ return -EOPNOTSUPP;
+
+ if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ is_ovs_int_port = netif_is_ovs_master(net_dev);
+ if (tcb->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ !is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ if (is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ switch (tcb->command) {
+ case FLOW_BLOCK_BIND:
+ binding = efx_tc_create_binding(efx, NULL, net_dev, tcb->block);
+ if (IS_ERR(binding))
+ return PTR_ERR(binding);
+ block_cb = flow_indr_block_cb_alloc(efx_tc_block_cb, binding,
+ binding, efx_tc_block_unbind,
+ tcb, net_dev, sch, data, binding,
+ cleanup);
+ rc = PTR_ERR_OR_ZERO(block_cb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "bind indr block for device %s, rc %d\n",
+ net_dev ? net_dev->name : NULL, rc);
+ if (rc) {
+ list_del(&binding->list);
+ kfree(binding);
+ } else {
+ flow_block_cb_add(block_cb, tcb);
+ }
+ return rc;
+ case FLOW_BLOCK_UNBIND:
+ binding = efx_tc_find_binding(efx, net_dev);
+ if (!binding)
+ return -ENOENT;
+ block_cb = flow_block_cb_lookup(tcb->block,
+ efx_tc_block_cb,
+ binding);
+ if (!block_cb)
+ return -ENOENT;
+ flow_indr_block_cb_remove(block_cb, tcb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "unbind indr block for device %s\n",
+ net_dev ? net_dev->name : NULL);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* .ndo_setup_tc implementation
+ * Entry point for flower block and filter management.
+ */
+int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ if (efx->type->is_vf)
+ return -EOPNOTSUPP;
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ if (type == TC_SETUP_CLSFLOWER)
+ return efx_tc_flower(efx, net_dev, type_data, NULL);
+ if (type == TC_SETUP_BLOCK)
+ return efx_tc_setup_block(net_dev, efx, type_data, NULL);
+
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/tc_bindings.h b/drivers/net/ethernet/sfc/tc_bindings.h
new file mode 100644
index 000000000000..c210bb09150e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_bindings.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_BINDINGS_H
+#define EFX_TC_BINDINGS_H
+#include "net_driver.h"
+
+#include <net/sch_generic.h>
+
+struct efx_rep;
+
+void efx_tc_block_unbind(void *cb_priv);
+int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
+ struct flow_block_offload *tcb, struct efx_rep *efv);
+int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data);
+
+int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type,
+ void *type_data, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+#endif /* EFX_TC_BINDINGS_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index d12474042c84..c5f88f7a7a04 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index e2d009866a7b..8fc3f5272fa7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1158,9 +1158,9 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
static void ioc3_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
- strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(info->driver, IOC3_NAME, sizeof(info->driver));
+ strscpy(info->version, IOC3_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 216bb2d34d7c..dda4e488c77a 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1769,9 +1769,9 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(tp->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 23a336c5096e..cb7fec226cab 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2027,9 +2027,9 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ strscpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sis_priv->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0329caf63279..013e90d69182 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -482,7 +482,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &epic_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &ep->napi, epic_poll, 64);
+ netif_napi_add(dev, &ep->napi, epic_poll);
ret = register_netdev(dev);
if (ret < 0)
@@ -1392,9 +1392,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 24d66af797d4..52ecfb461c41 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1509,9 +1509,9 @@ smc911x_ethtool_set_link_ksettings(struct net_device *dev,
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, version, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->version, version, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 37c822e27207..29bb19f42de9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index a31c159e96ea..35e99bf0c401 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1588,9 +1588,9 @@ smc_ethtool_set_link_ksettings(struct net_device *dev,
static void
smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, version, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->version, version, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 3bf20211cceb..a2e511912e6a 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1037,6 +1037,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@@ -1953,9 +1955,9 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
- strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
+ strscpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -2587,6 +2589,8 @@ static int smsc911x_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ if (!device_may_wakeup(dev))
+ phy_stop(ndev->phydev);
}
/* enable wake on LAN, energy detection and the external PME
@@ -2628,6 +2632,8 @@ static int smsc911x_resume(struct device *dev)
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
+ if (!device_may_wakeup(dev))
+ phy_start(ndev->phydev);
}
return 0;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 0c68c7f8056d..71fbb358bb7d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -215,10 +215,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(pd->pdev),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
@@ -1585,7 +1585,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->netdev_ops = &smsc9420_netdev_ops;
dev->ethtool_ops = &smsc9420_ethtool_ops;
- netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pd->napi, smsc9420_rx_poll);
result = register_netdev(dev);
if (result) {
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index b0c5a44785fa..2240f6d0b89b 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -526,8 +526,8 @@ static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
static void netsec_et_get_drvinfo(struct net_device *net_device,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "netsec", sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ strscpy(info->driver, "netsec", sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(net_device->dev.parent),
sizeof(info->bus_info));
}
@@ -2093,7 +2093,7 @@ static int netsec_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision %d.%d\n",
hw_ver >> 16, hw_ver & 0xffff);
- netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll);
ndev->netdev_ops = &netsec_netdev_ops;
ndev->ethtool_ops = &netsec_ethtool_ops;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f0c8de2c6075..1fa09b49ba7f 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -395,8 +395,8 @@ static void ave_ethtool_get_drvinfo(struct net_device *ndev,
{
struct device *dev = ndev->dev.parent;
- strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+ strscpy(info->driver, dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
}
@@ -1687,8 +1687,7 @@ static int ave_probe(struct platform_device *pdev)
pdev->name, pdev->id);
/* Register as a NAPI supported driver */
- netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 358fc26f8d1f..80efdeeb0b59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -445,9 +445,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to probe subdriver: %d\n",
- ret);
+ dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
goto remove_config;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 52f9ed8db9c9..0a2afc1a3124 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -610,7 +610,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
plat->ext_snapshot_num = AUX_SNAPSHOT0;
- plat->has_crossts = true;
plat->crosststamp = intel_crosststamp;
plat->int_snapshot_en = 0;
@@ -1134,9 +1133,8 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
- pcim_iounmap_regions(pdev, BIT(0));
}
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index c469abc91fa1..f7269d79a385 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -32,6 +32,8 @@ struct rk_gmac_ops {
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
+ bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
bool regs_valid;
u32 regs[];
@@ -66,6 +68,7 @@ struct rk_priv_data {
int rx_delay;
struct regmap *grf;
+ struct regmap *php_grf;
};
#define HIWORD_UPDATE(val, mask, shift) \
@@ -1101,6 +1104,147 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* sys_grf */
+#define RK3588_GRF_GMAC_CON7 0X031c
+#define RK3588_GRF_GMAC_CON8 0X0320
+#define RK3588_GRF_GMAC_CON9 0X0324
+
+#define RK3588_GMAC_RXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 3)
+#define RK3588_GMAC_RXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 3)
+#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
+#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+/* php_grf */
+#define RK3588_GRF_GMAC_CON0 0X0008
+#define RK3588_GRF_CLK_CON1 0X0070
+
+#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
+ (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
+ (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+
+#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
+#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
+
+#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+
+#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
+#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
+ (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+
+#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
+#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
+
+static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 offset_con, id = bsp_priv->id;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
+ RK3588_GRF_GMAC_CON8;
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RGMII_MODE(id));
+
+ regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
+ RK3588_GMAC_RXCLK_DLY_ENABLE(id) |
+ RK3588_GMAC_TXCLK_DLY_ENABLE(id));
+
+ regmap_write(bsp_priv->grf, offset_con,
+ RK3588_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3588_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
+}
+
+static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, id = bsp_priv->id;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV20(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV50(id);
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV2(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV5(id);
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMAC_CLK_RGMII_DIV1(id);
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+
+ val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
+ RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+}
+
+static const struct rk_gmac_ops rk3588_ops = {
+ .set_to_rgmii = rk3588_set_to_rgmii,
+ .set_to_rmii = rk3588_set_to_rmii,
+ .set_rgmii_speed = rk3588_set_gmac_speed,
+ .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_clock_selection = rk3588_set_clock_selection,
+};
+
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
@@ -1153,6 +1297,130 @@ static const struct rk_gmac_ops rv1108_ops = {
.set_rmii_speed = rv1108_set_rmii_speed,
};
+#define RV1126_GRF_GMAC_CON0 0X0070
+#define RV1126_GRF_GMAC_CON1 0X0074
+#define RV1126_GRF_GMAC_CON2 0X0078
+
+/* RV1126_GRF_GMAC_CON0 */
+#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
+#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
+#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RV1126_GMAC_M0_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+#define RV1126_GMAC_M0_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RV1126_GMAC_M0_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RV1126_GMAC_M1_RXCLK_DLY_ENABLE GRF_BIT(3)
+#define RV1126_GMAC_M1_RXCLK_DLY_DISABLE GRF_CLR_BIT(3)
+#define RV1126_GMAC_M1_TXCLK_DLY_ENABLE GRF_BIT(2)
+#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
+
+/* RV1126_GRF_GMAC_CON1 */
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+/* RV1126_GRF_GMAC_CON2 */
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON1,
+ RV1126_GMAC_M0_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M0_CLK_TX_DL_CFG(tx_delay));
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON2,
+ RV1126_GMAC_M1_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M1_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ case 1000:
+ rate = 125000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static const struct rk_gmac_ops rv1126_ops = {
+ .set_to_rgmii = rv1126_set_to_rgmii,
+ .set_to_rmii = rv1126_set_to_rmii,
+ .set_rgmii_speed = rv1126_set_rgmii_speed,
+ .set_rmii_speed = rv1126_set_rmii_speed,
+};
+
#define RK_GRF_MACPHY_CON0 0xb00
#define RK_GRF_MACPHY_CON1 0xb04
#define RK_GRF_MACPHY_CON2 0xb08
@@ -1304,6 +1572,10 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->clk_mac_speed))
clk_prepare_enable(bsp_priv->clk_mac_speed);
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, true);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1330,6 +1602,10 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->mac_clk_tx);
clk_disable_unprepare(bsp_priv->clk_mac_speed);
+
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1444,6 +1720,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
+ bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1680,7 +1958,9 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
+ { .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 35ab8d0bdce7..7ab791c8d355 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -56,7 +56,7 @@
#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
-#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+#define MAC_CORE_INIT (MAC_CONTROL_HBD)
/* MAC FLOW CTRL defines */
#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 3c73453725f9..4296ddda8aaa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,7 +126,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
-#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \
GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 76edb9b72675..0e00dd83d027 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -15,7 +15,6 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
@@ -24,7 +23,6 @@
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
- struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
@@ -32,13 +30,6 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
/* Configure GMAC core */
value |= GMAC_CORE_INIT;
- /* Clear ACS bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
- value &= ~GMAC_CONTROL_ACS;
-
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 75071a7d551a..a6e8d7bd9588 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -15,7 +15,6 @@
*******************************************************************************/
#include <linux/crc32.h>
-#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac.h"
#include "dwmac100.h"
@@ -28,13 +27,6 @@ static void dwmac100_core_init(struct mac_device_info *hw,
value |= MAC_CORE_INIT;
- /* Clear ASTP bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev))
- value &= ~MAC_CONTROL_ASTP;
-
writel(value, ioaddr + MAC_CONTROL);
#ifdef STMMAC_VLAN_TAG_USED
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index d8f1fbc25bdd..c25bfecb4a2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
-#include <net/dsa.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index caa4bfc4c1d6..9b6138b11776 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
if (enable)
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
}
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d6a44d53fe08..f453b0d09366 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -287,15 +287,15 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac || priv->plat->has_gmac4)
- strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else if (priv->plat->has_xgmac)
- strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
+ strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ strscpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
if (priv->plat->pdev) {
- strlcpy(info->bus_info, pci_name(priv->plat->pdev),
+ strscpy(info->bus_info, pci_name(priv->plat->pdev),
sizeof(info->bus_info));
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 070b5ef165eb..65c96773c6d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -986,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1064,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
@@ -3800,6 +3801,15 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
+ }
+
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -3903,6 +3913,10 @@ static int stmmac_release(struct net_device *dev)
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -5075,16 +5089,8 @@ read_again:
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
buf1_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
}
@@ -5261,16 +5267,8 @@ read_again:
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
len += buf2_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
if (buf2_len) {
buf2_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
@@ -6889,8 +6887,7 @@ static void stmmac_napi_add(struct net_device *dev)
spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
}
if (queue < priv->plat->tx_queues_to_use) {
netif_napi_add_tx(dev, &ch->tx_napi,
@@ -6899,8 +6896,7 @@ static void stmmac_napi_add(struct net_device *dev)
if (queue < priv->plat->rx_queues_to_use &&
queue < priv->plat->tx_queues_to_use) {
netif_napi_add(dev, &ch->rxtx_napi,
- stmmac_napi_poll_rxtx,
- NAPI_POLL_WEIGHT);
+ stmmac_napi_poll_rxtx);
}
}
}
@@ -7292,14 +7288,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@@ -7314,8 +7302,6 @@ int stmmac_dvr_probe(struct device *device,
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_xpcs_setup:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 9f5cac4000da..50f6b4a14be4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -440,11 +440,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
/* Default to phy auto-detection */
plat->phy_addr = -1;
- /* Default to get clk_csr from stmmac_clk_crs_set(),
+ /* Default to get clk_csr from stmmac_clk_csr_set(),
* or get clk_csr from device tree.
*/
plat->clk_csr = -1;
- of_property_read_u32(np, "clk_csr", &plat->clk_csr);
+ if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
+ of_property_read_u32(np, "clk_csr", &plat->clk_csr);
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0b08b0e085e8..0aca193d9550 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4484,9 +4484,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static int cas_get_link_ksettings(struct net_device *dev,
@@ -5050,7 +5050,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->watchdog_timeo = CAS_TX_TIMEOUT;
#ifdef USE_NAPI
- netif_napi_add(dev, &cp->napi, cas_poll, 64);
+ netif_napi_add(dev, &cp->napi, cas_poll);
#endif
dev->irq = pdev->irq;
dev->dma = 0;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 0cd8493b810f..8addee6d04bd 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -63,8 +63,8 @@ static struct vio_version vsw_versions[] = {
static void vsw_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vsw_get_msglevel(struct net_device *dev)
@@ -354,8 +354,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
dev_set_drvdata(&vdev->dev, port);
- netif_napi_add(dev, &port->napi, sunvnet_poll_common,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, sunvnet_poll_common);
spin_lock_irqsave(&vp->lock, flags);
list_add_rcu(&port->list, &vp->port_list);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df70df29deea..e6144d963eaa 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6798,12 +6798,12 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strlcpy(info->bus_info, pci_name(np->pdev),
+ strscpy(info->bus_info, pci_name(np->pdev),
sizeof(info->bus_info));
}
@@ -9115,7 +9115,7 @@ static int niu_ldg_init(struct niu *np)
for (i = 0; i < np->num_ldg; i++) {
struct niu_ldg *lp = &np->ldg[i];
- netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
+ netif_napi_add(np->dev, &lp->napi, niu_poll);
lp->np = np;
lp->ldg_num = ldg_num_map[i];
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 531a6f449afa..34b94153bf0c 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1038,8 +1038,8 @@ static void bigmac_set_multicast(struct net_device *dev)
/* Ethtool support... */
static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunbmac", sizeof(info->driver));
- strlcpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->driver, "sunbmac", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
}
static u32 bigmac_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index a14591b41acb..4154e68639ac 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2521,9 +2521,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_link_ksettings(struct net_device *dev,
@@ -2980,7 +2980,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_consistent;
dev->netdev_ops = &gem_netdev_ops;
- netif_napi_add(dev, &gp->napi, gem_poll, 64);
+ netif_napi_add(dev, &gp->napi, gem_poll);
dev->ethtool_ops = &gem_ethtool_ops;
dev->watchdog_timeo = 5 * HZ;
dev->dma = 0;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 8594ee839628..62deed210a95 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -61,15 +61,8 @@
#include "sunhme.h"
#define DRV_NAME "sunhme"
-#define DRV_VERSION "3.10"
-#define DRV_RELDATE "August 26, 2008"
-#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
-static char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
MODULE_LICENSE("GPL");
@@ -87,13 +80,17 @@ static struct quattro *qfe_sbus_list;
static struct quattro *qfe_pci_list;
#endif
-#undef HMEDEBUG
-#undef SXDEBUG
-#undef RXDEBUG
-#undef TXDEBUG
-#undef TXLOGGING
+#define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
+#define HMD hme_debug
+
+/* "Auto Switch Debug" aka phy debug */
+#if 1
+#define ASD hme_debug
+#else
+#define ASD(...)
+#endif
-#ifdef TXLOGGING
+#if 0
struct hme_tx_logent {
unsigned int tstamp;
int tx_new, tx_old;
@@ -128,46 +125,16 @@ static __inline__ void tx_dump_log(void)
this = txlog_cur_entry;
for (i = 0; i < TX_LOG_LEN; i++) {
- printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
+ pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
tx_log[this].tstamp,
tx_log[this].tx_new, tx_log[this].tx_old,
tx_log[this].action, tx_log[this].status);
this = (this + 1) & (TX_LOG_LEN - 1);
}
}
-static __inline__ void tx_dump_ring(struct happy_meal *hp)
-{
- struct hmeal_init_block *hb = hp->happy_block;
- struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
- int i;
-
- for (i = 0; i < TX_RING_SIZE; i+=4) {
- printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
- i, i + 4,
- le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
- le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
- le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
- le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
- }
-}
-#else
-#define tx_add_log(hp, a, s) do { } while(0)
-#define tx_dump_log() do { } while(0)
-#define tx_dump_ring(hp) do { } while(0)
-#endif
-
-#ifdef HMEDEBUG
-#define HMD(x) printk x
-#else
-#define HMD(x)
-#endif
-
-/* #define AUTO_SWITCH_DEBUG */
-
-#ifdef AUTO_SWITCH_DEBUG
-#define ASD(x) printk x
#else
-#define ASD(x)
+#define tx_add_log(hp, a, s)
+#define tx_dump_log()
#endif
#define DEFAULT_IPG0 16 /* For lance-mode only */
@@ -343,8 +310,6 @@ static int happy_meal_bb_read(struct happy_meal *hp,
int retval = 0;
int i;
- ASD(("happy_meal_bb_read: reg=%d ", reg));
-
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
@@ -378,7 +343,7 @@ static int happy_meal_bb_read(struct happy_meal *hp,
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
- ASD(("value=%x\n", retval));
+ ASD("reg=%d value=%x\n", reg, retval);
return retval;
}
@@ -389,7 +354,7 @@ static void happy_meal_bb_write(struct happy_meal *hp,
u32 tmp;
int i;
- ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
+ ASD("reg=%d value=%x\n", reg, value);
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
@@ -433,14 +398,13 @@ static int happy_meal_tcvr_read(struct happy_meal *hp,
int tries = TCVR_READ_TRIES;
int retval;
- ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
if (hp->tcvr_type == none) {
- ASD(("no transceiver, value=TCVR_FAILURE\n"));
+ ASD("no transceiver, value=TCVR_FAILURE\n");
return TCVR_FAILURE;
}
if (!(hp->happy_flags & HFLAG_FENABLE)) {
- ASD(("doing bit bang\n"));
+ ASD("doing bit bang\n");
return happy_meal_bb_read(hp, tregs, reg);
}
@@ -449,11 +413,11 @@ static int happy_meal_tcvr_read(struct happy_meal *hp,
while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
udelay(20);
if (!tries) {
- printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
+ netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
return TCVR_FAILURE;
}
retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
- ASD(("value=%04x\n", retval));
+ ASD("reg=0x%02x value=%04x\n", reg, retval);
return retval;
}
@@ -465,7 +429,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
{
int tries = TCVR_WRITE_TRIES;
- ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
+ ASD("reg=0x%02x value=%04x\n", reg, value);
/* Welcome to Sun Microsystems, can I take your order please? */
if (!(hp->happy_flags & HFLAG_FENABLE)) {
@@ -482,7 +446,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
/* Anything else? */
if (!tries)
- printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
+ netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
/* Fifty-two cents is your change, have a nice day. */
}
@@ -660,8 +624,8 @@ static void happy_meal_timer(struct timer_list *t)
/* Enter force mode. */
do_force_mode:
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
- printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
- hp->dev->name);
+ netdev_notice(hp->dev,
+ "Auto-Negotiation unsuccessful, trying force link mode\n");
hp->sw_bmcr = BMCR_SPEED100;
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -720,8 +684,8 @@ static void happy_meal_timer(struct timer_list *t)
restart_timer = 0;
} else {
if (hp->timer_ticks >= 10) {
- printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
- "not completely up.\n", hp->dev->name);
+ netdev_notice(hp->dev,
+ "Auto negotiation successful, link still not completely up.\n");
hp->timer_ticks = 0;
restart_timer = 1;
} else {
@@ -776,14 +740,14 @@ static void happy_meal_timer(struct timer_list *t)
*/
/* Let the user know... */
- printk(KERN_NOTICE "%s: Link down, cable problem?\n",
- hp->dev->name);
+ netdev_notice(hp->dev,
+ "Link down, cable problem?\n");
ret = happy_meal_init(hp);
if (ret) {
/* ho hum... */
- printk(KERN_ERR "%s: Error, cannot re-init the "
- "Happy Meal.\n", hp->dev->name);
+ netdev_err(hp->dev,
+ "Error, cannot re-init the Happy Meal.\n");
}
goto out;
}
@@ -805,8 +769,8 @@ static void happy_meal_timer(struct timer_list *t)
case asleep:
default:
/* Can't happens.... */
- printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Aieee, link timer is asleep but we got one anyways!\n");
restart_timer = 0;
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
@@ -830,7 +794,7 @@ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = TX_RESET_TRIES;
- HMD(("happy_meal_tx_reset: reset, "));
+ HMD("reset...\n");
/* Would you like to try our SMCC Delux? */
hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
@@ -839,10 +803,10 @@ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
/* Lettuce, tomato, buggy hardware (no extra charge)? */
if (!tries)
- printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
+ netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
/* Take care. */
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -850,7 +814,7 @@ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = RX_RESET_TRIES;
- HMD(("happy_meal_rx_reset: reset, "));
+ HMD("reset...\n");
/* We have a special on GNU/Viking hardware bugs today. */
hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
@@ -859,10 +823,10 @@ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
/* Will that be all? */
if (!tries)
- printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
+ netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
/* Don't forget your vik_1137125_wa. Have a nice day. */
- HMD(("done\n"));
+ HMD("done\n");
}
#define STOP_TRIES 16
@@ -872,7 +836,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
{
int tries = STOP_TRIES;
- HMD(("happy_meal_stop: reset, "));
+ HMD("reset...\n");
/* We're consolidating our STB products, it's your lucky day. */
hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
@@ -881,10 +845,10 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
/* Come back next week when we are "Sun Microelectronics". */
if (!tries)
- printk(KERN_ERR "happy meal: Fry guys.");
+ netdev_err(hp->dev, "Fry guys.\n");
/* Remember: "Different name, same old buggy as shit hardware." */
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -913,21 +877,18 @@ static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
/* hp->happy_lock must be held */
static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
{
- ASD(("happy_meal_poll_stop: "));
-
/* If polling disabled or not polling already, nothing to do. */
if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
(HFLAG_POLLENABLE | HFLAG_POLL)) {
- HMD(("not polling, return\n"));
+ ASD("not polling, return\n");
return;
}
/* Shut up the MIF. */
- ASD(("were polling, mif ints off, "));
+ ASD("were polling, mif ints off, polling off\n");
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* Turn off polling. */
- ASD(("polling off, "));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
@@ -936,7 +897,7 @@ static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
/* Let the bits set. */
udelay(200);
- ASD(("done\n"));
+ ASD("done\n");
}
/* Only Sun can take such nice parts and fuck up the programming interface
@@ -952,44 +913,40 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
int result, tries = TCVR_RESET_TRIES;
tconfig = hme_read32(hp, tregs + TCVR_CFG);
- ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
+ ASD("tcfg=%08x\n", tconfig);
if (hp->tcvr_type == external) {
- ASD(("external<"));
hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
- ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
- ASD(("phyread_fail>\n"));
+ ASD("phyread_fail\n");
return -1;
}
- ASD(("phyread_ok,PSELECT>"));
+ ASD("external: ISOLATE, phyread_ok, PSELECT\n");
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->tcvr_type = external;
hp->paddr = TCV_PADDR_ETX;
} else {
if (tconfig & TCV_CFG_MDIO1) {
- ASD(("internal<PSELECT,"));
hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
- ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
- ASD(("phyread_fail>\n"));
+ ASD("phyread_fail>\n");
return -1;
}
- ASD(("phyread_ok,~PSELECT>"));
+ ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
}
}
- ASD(("BMCR_RESET "));
+ ASD("BMCR_RESET...\n");
happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
while (--tries) {
@@ -1002,10 +959,10 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
udelay(20);
}
if (!tries) {
- ASD(("BMCR RESET FAILED!\n"));
+ ASD("BMCR RESET FAILED!\n");
return -1;
}
- ASD(("RESET_OK\n"));
+ ASD("RESET_OK\n");
/* Get fresh copies of the PHY registers. */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
@@ -1013,7 +970,7 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
- ASD(("UNISOLATE"));
+ ASD("UNISOLATE...\n");
hp->sw_bmcr &= ~(BMCR_ISOLATE);
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1027,10 +984,10 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
udelay(20);
}
if (!tries) {
- ASD((" FAILED!\n"));
+ ASD("UNISOLATE FAILED!\n");
return -1;
}
- ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
+ ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
if (!is_lucent_phy(hp)) {
result = happy_meal_tcvr_read(hp, tregs,
DP83840_CSCONFIG);
@@ -1048,60 +1005,55 @@ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tr
{
unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
- ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
+ ASD("tcfg=%08lx\n", tconfig);
if (hp->happy_flags & HFLAG_POLL) {
/* If we are polling, we must stop to get the transceiver type. */
- ASD(("<polling> "));
if (hp->tcvr_type == internal) {
if (tconfig & TCV_CFG_MDIO1) {
- ASD(("<internal> <poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
- ASD(("<external>\n"));
tconfig &= ~(TCV_CFG_PENABLE);
tconfig |= TCV_CFG_PSELECT;
hme_write32(hp, tregs + TCVR_CFG, tconfig);
+ ASD("poll stop, internal->external\n");
}
} else {
if (hp->tcvr_type == external) {
- ASD(("<external> "));
if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
- ASD(("<poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
- ASD(("<internal>\n"));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) &
~(TCV_CFG_PSELECT));
+ ASD("poll stop, external->internal\n");
}
- ASD(("\n"));
} else {
- ASD(("<none>\n"));
+ ASD("polling, none\n");
}
}
} else {
u32 reread = hme_read32(hp, tregs + TCVR_CFG);
/* Else we can just work off of the MDIO bits. */
- ASD(("<not polling> "));
if (reread & TCV_CFG_MDIO1) {
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
- ASD(("<external>\n"));
+ ASD("not polling, external\n");
} else {
if (reread & TCV_CFG_MDIO0) {
hme_write32(hp, tregs + TCVR_CFG,
tconfig & ~(TCV_CFG_PSELECT));
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
- ASD(("<internal>\n"));
+ ASD("not polling, internal\n");
} else {
- printk(KERN_ERR "happy meal: Transceiver and a coke please.");
+ netdev_err(hp->dev,
+ "Transceiver and a coke please.");
hp->tcvr_type = none; /* Grrr... */
- ASD(("<none>\n"));
+ ASD("not polling, none\n");
}
}
}
@@ -1208,15 +1160,14 @@ static void happy_meal_init_rings(struct happy_meal *hp)
struct hmeal_init_block *hb = hp->happy_block;
int i;
- HMD(("happy_meal_init_rings: counters to zero, "));
+ HMD("counters to zero\n");
hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
/* Free any skippy bufs left around in the rings. */
- HMD(("clean, "));
happy_meal_clean_rings(hp);
/* Now get new skippy bufs for the receive ring. */
- HMD(("init rxring, "));
+ HMD("init rxring\n");
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
u32 mapping;
@@ -1243,11 +1194,11 @@ static void happy_meal_init_rings(struct happy_meal *hp)
skb_reserve(skb, RX_OFFSET);
}
- HMD(("init txring, "));
+ HMD("init txring\n");
for (i = 0; i < TX_RING_SIZE; i++)
hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -1294,17 +1245,11 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp,
* XXX so I completely skip checking for it in the BMSR for now.
*/
-#ifdef AUTO_SWITCH_DEBUG
- ASD(("%s: Advertising [ ", hp->dev->name));
- if (hp->sw_advertise & ADVERTISE_10HALF)
- ASD(("10H "));
- if (hp->sw_advertise & ADVERTISE_10FULL)
- ASD(("10F "));
- if (hp->sw_advertise & ADVERTISE_100HALF)
- ASD(("100H "));
- if (hp->sw_advertise & ADVERTISE_100FULL)
- ASD(("100F "));
-#endif
+ ASD("Advertising [ %s%s%s%s]\n",
+ hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
+ hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
+ hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
+ hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
/* Enable Auto-Negotiation, this is usually on already... */
hp->sw_bmcr |= BMCR_ANENABLE;
@@ -1324,10 +1269,11 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp,
udelay(10);
}
if (!timeout) {
- printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
- "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
- printk(KERN_NOTICE "%s: Performing force link detection.\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
+ hp->sw_bmcr);
+ netdev_notice(hp->dev,
+ "Performing force link detection.\n");
goto force_link;
} else {
hp->timer_state = arbwait;
@@ -1382,70 +1328,69 @@ static int happy_meal_init(struct happy_meal *hp)
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
+ const char *bursts;
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
- HMD(("happy_meal_init: happy_flags[%08x] ",
- hp->happy_flags));
+ HMD("happy_flags[%08x]\n", hp->happy_flags);
if (!(hp->happy_flags & HFLAG_INIT)) {
- HMD(("set HFLAG_INIT, "));
+ HMD("set HFLAG_INIT\n");
hp->happy_flags |= HFLAG_INIT;
happy_meal_get_counters(hp, bregs);
}
/* Stop polling. */
- HMD(("to happy_meal_poll_stop\n"));
+ HMD("to happy_meal_poll_stop\n");
happy_meal_poll_stop(hp, tregs);
/* Stop transmitter and receiver. */
- HMD(("happy_meal_init: to happy_meal_stop\n"));
+ HMD("to happy_meal_stop\n");
happy_meal_stop(hp, gregs);
/* Alloc and reset the tx/rx descriptor chains. */
- HMD(("happy_meal_init: to happy_meal_init_rings\n"));
+ HMD("to happy_meal_init_rings\n");
happy_meal_init_rings(hp);
/* Shut up the MIF. */
- HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
- hme_read32(hp, tregs + TCVR_IMASK)));
+ HMD("Disable all MIF irqs (old[%08x])\n",
+ hme_read32(hp, tregs + TCVR_IMASK));
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* See if we can enable the MIF frame on this card to speak to the DP83840. */
if (hp->happy_flags & HFLAG_FENABLE) {
- HMD(("use frame old[%08x], ",
- hme_read32(hp, tregs + TCVR_CFG)));
+ HMD("use frame old[%08x]\n",
+ hme_read32(hp, tregs + TCVR_CFG));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
} else {
- HMD(("use bitbang old[%08x], ",
- hme_read32(hp, tregs + TCVR_CFG)));
+ HMD("use bitbang old[%08x]\n",
+ hme_read32(hp, tregs + TCVR_CFG));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
}
/* Check the state of the transceiver. */
- HMD(("to happy_meal_transceiver_check\n"));
+ HMD("to happy_meal_transceiver_check\n");
happy_meal_transceiver_check(hp, tregs);
/* Put the Big Mac into a sane state. */
- HMD(("happy_meal_init: "));
switch(hp->tcvr_type) {
case none:
/* Cannot operate if we don't know the transceiver type! */
- HMD(("AAIEEE no transceiver type, EAGAIN"));
+ HMD("AAIEEE no transceiver type, EAGAIN\n");
return -EAGAIN;
case internal:
/* Using the MII buffers. */
- HMD(("internal, using MII, "));
+ HMD("internal, using MII\n");
hme_write32(hp, bregs + BMAC_XIFCFG, 0);
break;
case external:
/* Not using the MII, disable it. */
- HMD(("external, disable MII, "));
+ HMD("external, disable MII\n");
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
}
@@ -1454,18 +1399,16 @@ static int happy_meal_init(struct happy_meal *hp)
return -EAGAIN;
/* Reset the Happy Meal Big Mac transceiver and the receiver. */
- HMD(("tx/rx reset, "));
+ HMD("tx/rx reset\n");
happy_meal_tx_reset(hp, bregs);
happy_meal_rx_reset(hp, bregs);
/* Set jam size and inter-packet gaps to reasonable defaults. */
- HMD(("jsize/ipg1/ipg2, "));
hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
/* Load up the MAC address and random seed. */
- HMD(("rseed/macaddr, "));
/* The docs recommend to use the 10LSB of our MAC here. */
hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
@@ -1474,7 +1417,6 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
- HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
@@ -1504,9 +1446,9 @@ static int happy_meal_init(struct happy_meal *hp)
}
/* Set the RX and TX ring ptrs. */
- HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
- ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
- ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
+ HMD("ring ptrs rxr[%08x] txr[%08x]\n",
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
hme_write32(hp, erxregs + ERX_RING,
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
hme_write32(hp, etxregs + ETX_RING,
@@ -1524,9 +1466,6 @@ static int happy_meal_init(struct happy_meal *hp)
| 0x4);
/* Set the supported burst sizes. */
- HMD(("happy_meal_init: old[%08x] bursts<",
- hme_read32(hp, gregs + GREG_CFG)));
-
#ifndef CONFIG_SPARC
/* It is always PCI and can handle 64byte bursts. */
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
@@ -1554,34 +1493,35 @@ static int happy_meal_init(struct happy_meal *hp)
}
#endif
- HMD(("64>"));
+ bursts = "64";
hme_write32(hp, gregs + GREG_CFG, gcfg);
} else if (hp->happy_bursts & DMA_BURST32) {
- HMD(("32>"));
+ bursts = "32";
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
} else if (hp->happy_bursts & DMA_BURST16) {
- HMD(("16>"));
+ bursts = "16";
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
} else {
- HMD(("XXX>"));
+ bursts = "XXX";
hme_write32(hp, gregs + GREG_CFG, 0);
}
#endif /* CONFIG_SPARC */
+ HMD("old[%08x] bursts<%s>\n",
+ hme_read32(hp, gregs + GREG_CFG), bursts);
+
/* Turn off interrupts we do not want to hear. */
- HMD((", enable global interrupts, "));
hme_write32(hp, gregs + GREG_IMASK,
(GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
/* Set the transmit ring buffer size. */
- HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
- hme_read32(hp, etxregs + ETX_RSIZE)));
+ HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
+ hme_read32(hp, etxregs + ETX_RSIZE));
hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
/* Enable transmitter DVMA. */
- HMD(("tx dma enable old[%08x], ",
- hme_read32(hp, etxregs + ETX_CFG)));
+ HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
hme_write32(hp, etxregs + ETX_CFG,
hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
@@ -1590,21 +1530,23 @@ static int happy_meal_init(struct happy_meal *hp)
* properly. I cannot think of a sane way to provide complete
* coverage for this hardware bug yet.
*/
- HMD(("erx regs bug old[%08x]\n",
- hme_read32(hp, erxregs + ERX_CFG)));
+ HMD("erx regs bug old[%08x]\n",
+ hme_read32(hp, erxregs + ERX_CFG));
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
regtmp = hme_read32(hp, erxregs + ERX_CFG);
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
- printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
- printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
- ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
+ netdev_err(hp->dev,
+ "Eieee, rx config register gets greasy fries.\n");
+ netdev_err(hp->dev,
+ "Trying to set %08x, reread gives %08x\n",
+ ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
/* XXX Should return failure here... */
}
/* Enable Big Mac hash table filter. */
- HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
- hme_read32(hp, bregs + BMAC_RXCFG)));
+ HMD("enable hash rx_cfg_old[%08x]\n",
+ hme_read32(hp, bregs + BMAC_RXCFG));
rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
if (hp->dev->flags & IFF_PROMISC)
rxcfg |= BIGMAC_RXCFG_PMISC;
@@ -1614,7 +1556,7 @@ static int happy_meal_init(struct happy_meal *hp)
udelay(10);
/* Ok, configure the Big Mac transmitter. */
- HMD(("BIGMAC init, "));
+ HMD("BIGMAC init\n");
regtmp = 0;
if (hp->happy_flags & HFLAG_FULL)
regtmp |= BIGMAC_TXCFG_FULLDPLX;
@@ -1638,14 +1580,13 @@ static int happy_meal_init(struct happy_meal *hp)
if (hp->tcvr_type == external)
regtmp |= BIGMAC_XCFG_MIIDISAB;
- HMD(("XIF config old[%08x], ",
- hme_read32(hp, bregs + BMAC_XIFCFG)));
+ HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
/* Start things up. */
- HMD(("tx old[%08x] and rx [%08x] ON!\n",
- hme_read32(hp, bregs + BMAC_TXCFG),
- hme_read32(hp, bregs + BMAC_RXCFG)));
+ HMD("tx old[%08x] and rx [%08x] ON!\n",
+ hme_read32(hp, bregs + BMAC_TXCFG),
+ hme_read32(hp, bregs + BMAC_RXCFG));
/* Set larger TX/RX size to allow for 802.1q */
hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
@@ -1735,25 +1676,26 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
GREG_STAT_SLVPERR))
- printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
- hp->dev->name, status);
+ netdev_err(hp->dev,
+ "Error interrupt for happy meal, status = %08x\n",
+ status);
if (status & GREG_STAT_RFIFOVF) {
/* Receive FIFO overflow is harmless and the hardware will take
care of it, just some packets are lost. Who cares. */
- printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
+ netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
}
if (status & GREG_STAT_STSTERR) {
/* BigMAC SQE link test failed. */
- printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
reset = 1;
}
if (status & GREG_STAT_TFIFO_UND) {
/* Transmit FIFO underrun, again DMA error likely. */
- printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Happy Meal transmitter FIFO underrun, DMA error.\n");
reset = 1;
}
@@ -1761,7 +1703,7 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Driver error, tried to transmit something larger
* than ethernet max mtu.
*/
- printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
reset = 1;
}
@@ -1771,21 +1713,16 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
* faster than the interrupt handler could keep up
* with.
*/
- printk(KERN_INFO "%s: Happy Meal out of receive "
- "descriptors, packet dropped.\n",
- hp->dev->name);
+ netdev_info(hp->dev,
+ "Happy Meal out of receive descriptors, packet dropped.\n");
}
if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
/* All sorts of DMA receive errors. */
- printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
- if (status & GREG_STAT_RXERR)
- printk("GenericError ");
- if (status & GREG_STAT_RXPERR)
- printk("ParityError ");
- if (status & GREG_STAT_RXTERR)
- printk("RxTagBotch ");
- printk("]\n");
+ netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
+ status & GREG_STAT_RXERR ? "GenericError " : "",
+ status & GREG_STAT_RXPERR ? "ParityError " : "",
+ status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
reset = 1;
}
@@ -1793,29 +1730,24 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Driver bug, didn't set EOP bit in tx descriptor given
* to the happy meal.
*/
- printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "EOP not set in happy meal transmit descriptor!\n");
reset = 1;
}
if (status & GREG_STAT_MIFIRQ) {
/* MIF signalled an interrupt, were we polling it? */
- printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
}
if (status &
(GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
/* All sorts of transmit DMA errors. */
- printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
- if (status & GREG_STAT_TXEACK)
- printk("GenericError ");
- if (status & GREG_STAT_TXLERR)
- printk("LateError ");
- if (status & GREG_STAT_TXPERR)
- printk("ParityError ");
- if (status & GREG_STAT_TXTERR)
- printk("TagBotch ");
- printk("]\n");
+ netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
+ status & GREG_STAT_TXEACK ? "GenericError " : "",
+ status & GREG_STAT_TXLERR ? "LateError " : "",
+ status & GREG_STAT_TXPERR ? "ParityError " : "",
+ status & GREG_STAT_TXTERR ? "TagBotch " : "");
reset = 1;
}
@@ -1823,14 +1755,14 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Bus or parity error when cpu accessed happy meal registers
* or it's internal FIFO's. Should never see this.
*/
- printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
- hp->dev->name,
- (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
+ netdev_err(hp->dev,
+ "Happy Meal register access SBUS slave (%s) error.\n",
+ (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
reset = 1;
}
if (reset) {
- printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
+ netdev_notice(hp->dev, "Resetting...\n");
happy_meal_init(hp);
return 1;
}
@@ -1842,22 +1774,22 @@ static void happy_meal_mif_interrupt(struct happy_meal *hp)
{
void __iomem *tregs = hp->tcvregs;
- printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
+ netdev_info(hp->dev, "Link status change.\n");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
/* Use the fastest transmission protocol possible. */
if (hp->sw_lpa & LPA_100FULL) {
- printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 100Mbps at full duplex.\n");
hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
} else if (hp->sw_lpa & LPA_100HALF) {
- printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 100MBps at half duplex.\n");
hp->sw_bmcr |= BMCR_SPEED100;
} else if (hp->sw_lpa & LPA_10FULL) {
- printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 10MBps at full duplex.\n");
hp->sw_bmcr |= BMCR_FULLDPLX;
} else {
- printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Using 10Mbps at half duplex.\n");
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1865,12 +1797,6 @@ static void happy_meal_mif_interrupt(struct happy_meal *hp)
happy_meal_poll_stop(hp, tregs);
}
-#ifdef TXDEBUG
-#define TXD(x) printk x
-#else
-#define TXD(x)
-#endif
-
/* hp->happy_lock must be held */
static void happy_meal_tx(struct happy_meal *hp)
{
@@ -1880,13 +1806,12 @@ static void happy_meal_tx(struct happy_meal *hp)
int elem;
elem = hp->tx_old;
- TXD(("TX<"));
while (elem != hp->tx_new) {
struct sk_buff *skb;
u32 flags, dma_addr, dma_len;
int frag;
- TXD(("[%d]", elem));
+ netdev_vdbg(hp->dev, "TX[%d]\n", elem);
this = &txbase[elem];
flags = hme_read_desc32(hp, &this->tx_flags);
if (flags & TXFLAG_OWN)
@@ -1922,19 +1847,12 @@ static void happy_meal_tx(struct happy_meal *hp)
dev->stats.tx_packets++;
}
hp->tx_old = elem;
- TXD((">"));
if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(dev);
}
-#ifdef RXDEBUG
-#define RXD(x) printk x
-#else
-#define RXD(x)
-#endif
-
/* Originally I used to handle the allocation failure by just giving back just
* that one ring buffer to the happy meal. Problem is that usually when that
* condition is triggered, the happy meal expects you to do something reasonable
@@ -1951,7 +1869,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
int elem = hp->rx_new, drops = 0;
u32 flags;
- RXD(("RX<"));
this = &rxbase[elem];
while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
struct sk_buff *skb;
@@ -1959,11 +1876,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
u16 csum = flags & RXFLAG_CSUM;
u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
- RXD(("[%d ", elem));
-
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
- RXD(("ERR(%08x)]", flags));
+ netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
dev->stats.rx_errors++;
if (len < ETH_ZLEN)
dev->stats.rx_length_errors++;
@@ -2020,9 +1935,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
@@ -2035,7 +1950,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb->csum = csum_unfold(~(__force __sum16)htons(csum));
skb->ip_summed = CHECKSUM_COMPLETE;
- RXD(("len=%d csum=%4x]", len, csum));
+ netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -2047,8 +1962,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
hp->rx_new = elem;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
- RXD((">"));
+ netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
}
static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
@@ -2057,32 +1971,25 @@ static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
- HMD(("happy_meal_interrupt: status=%08x ", happy_status));
+ HMD("status=%08x\n", happy_status);
spin_lock(&hp->happy_lock);
if (happy_status & GREG_STAT_ERRORS) {
- HMD(("ERRORS "));
if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
goto out;
}
- if (happy_status & GREG_STAT_MIFIRQ) {
- HMD(("MIFIRQ "));
+ if (happy_status & GREG_STAT_MIFIRQ)
happy_meal_mif_interrupt(hp);
- }
- if (happy_status & GREG_STAT_TXALL) {
- HMD(("TXALL "));
+ if (happy_status & GREG_STAT_TXALL)
happy_meal_tx(hp);
- }
- if (happy_status & GREG_STAT_RXTOHOST) {
- HMD(("RXTOHOST "));
+ if (happy_status & GREG_STAT_RXTOHOST)
happy_meal_rx(hp, dev);
- }
- HMD(("done\n"));
+ HMD("done\n");
out:
spin_unlock(&hp->happy_lock);
@@ -2100,7 +2007,7 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
- HMD(("quattro_interrupt: status=%08x ", happy_status));
+ HMD("status=%08x\n", happy_status);
if (!(happy_status & (GREG_STAT_ERRORS |
GREG_STAT_MIFIRQ |
@@ -2110,31 +2017,23 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
spin_lock(&hp->happy_lock);
- if (happy_status & GREG_STAT_ERRORS) {
- HMD(("ERRORS "));
+ if (happy_status & GREG_STAT_ERRORS)
if (happy_meal_is_not_so_happy(hp, happy_status))
goto next;
- }
- if (happy_status & GREG_STAT_MIFIRQ) {
- HMD(("MIFIRQ "));
+ if (happy_status & GREG_STAT_MIFIRQ)
happy_meal_mif_interrupt(hp);
- }
- if (happy_status & GREG_STAT_TXALL) {
- HMD(("TXALL "));
+ if (happy_status & GREG_STAT_TXALL)
happy_meal_tx(hp);
- }
- if (happy_status & GREG_STAT_RXTOHOST) {
- HMD(("RXTOHOST "));
+ if (happy_status & GREG_STAT_RXTOHOST)
happy_meal_rx(hp, dev);
- }
next:
spin_unlock(&hp->happy_lock);
}
- HMD(("done\n"));
+ HMD("done\n");
return IRQ_HANDLED;
}
@@ -2145,8 +2044,6 @@ static int happy_meal_open(struct net_device *dev)
struct happy_meal *hp = netdev_priv(dev);
int res;
- HMD(("happy_meal_open: "));
-
/* On SBUS Quattro QFE cards, all hme interrupts are concentrated
* into a single source which we register handling at probe time.
*/
@@ -2154,15 +2051,14 @@ static int happy_meal_open(struct net_device *dev)
res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
dev->name, dev);
if (res) {
- HMD(("EAGAIN\n"));
- printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
- hp->irq);
+ HMD("EAGAIN\n");
+ netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
return -EAGAIN;
}
}
- HMD(("to happy_meal_init\n"));
+ HMD("to happy_meal_init\n");
spin_lock_irq(&hp->happy_lock);
res = happy_meal_init(hp);
@@ -2196,22 +2092,16 @@ static int happy_meal_close(struct net_device *dev)
return 0;
}
-#ifdef SXDEBUG
-#define SXD(x) printk x
-#else
-#define SXD(x)
-#endif
-
static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct happy_meal *hp = netdev_priv(dev);
- printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
tx_dump_log();
- printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
- hme_read32(hp, hp->gregs + GREG_STAT),
- hme_read32(hp, hp->etxregs + ETX_CFG),
- hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
+ netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
+ hme_read32(hp, hp->gregs + GREG_STAT),
+ hme_read32(hp, hp->etxregs + ETX_CFG),
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
spin_lock_irq(&hp->happy_lock);
happy_meal_init(hp);
@@ -2261,13 +2151,12 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&hp->happy_lock);
- printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
entry = hp->tx_new;
- SXD(("SX<l[%d]e[%d]>", len, entry));
+ netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
hp->tx_skbs[entry] = skb;
if (skb_shinfo(skb)->nr_frags == 0) {
@@ -2467,11 +2356,10 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strlcpy(info->driver, "sunhme", sizeof(info->driver));
- strlcpy(info->version, "2.02", sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2504,8 +2392,6 @@ static const struct ethtool_ops hme_ethtool_ops = {
.set_link_ksettings = hme_set_link_ksettings,
};
-static int hme_version_printed;
-
#ifdef CONFIG_SBUS
/* Given a happy meal sbus device, find it's quattro parent.
* If none exist, allocate and return a new one.
@@ -2523,19 +2409,15 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
if (qp)
return qp;
- qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
- if (qp != NULL) {
- int i;
-
- for (i = 0; i < 4; i++)
- qp->happy_meals[i] = NULL;
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
- qp->quattro_dev = child;
- qp->next = qfe_sbus_list;
- qfe_sbus_list = qp;
+ qp->quattro_dev = child;
+ qp->next = qfe_sbus_list;
+ qfe_sbus_list = qp;
- platform_set_drvdata(op, qp);
- }
+ platform_set_drvdata(op, qp);
return qp;
}
@@ -2563,8 +2445,9 @@ static int __init quattro_sbus_register_irqs(void)
IRQF_SHARED, "Quattro",
qp);
if (err != 0) {
- printk(KERN_ERR "Quattro HME: IRQ registration "
- "error %d.\n", err);
+ dev_err(&op->dev,
+ "Quattro HME: IRQ registration error %d.\n",
+ err);
return err;
}
}
@@ -2595,30 +2478,33 @@ static void quattro_sbus_free_irqs(void)
#ifdef CONFIG_PCI
static struct quattro *quattro_pci_find(struct pci_dev *pdev)
{
+ int i;
struct pci_dev *bdev = pdev->bus->self;
struct quattro *qp;
- if (!bdev) return NULL;
+ if (!bdev)
+ return ERR_PTR(-ENODEV);
+
for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
struct pci_dev *qpdev = qp->quattro_dev;
if (qpdev == bdev)
return qp;
}
+
qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
- if (qp != NULL) {
- int i;
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
- for (i = 0; i < 4; i++)
- qp->happy_meals[i] = NULL;
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
- qp->quattro_dev = bdev;
- qp->next = qfe_pci_list;
- qfe_pci_list = qp;
+ qp->quattro_dev = bdev;
+ qp->next = qfe_pci_list;
+ qfe_pci_list = qp;
- /* No range tricks necessary on PCI. */
- qp->nranges = 0;
- }
+ /* No range tricks necessary on PCI. */
+ qp->nranges = 0;
return qp;
}
#endif /* CONFIG_PCI */
@@ -2668,9 +2554,6 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out;
SET_NETDEV_DEV(dev, &op->dev);
- if (hme_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
/* If user did not specify a MAC address specifically, use
* the Quattro local-mac-address property...
*/
@@ -2712,35 +2595,35 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
hp->gregs = of_ioremap(&op->resource[0], 0,
GREG_REG_SIZE, "HME Global Regs");
if (!hp->gregs) {
- printk(KERN_ERR "happymeal: Cannot map global registers.\n");
+ dev_err(&op->dev, "Cannot map global registers.\n");
goto err_out_free_netdev;
}
hp->etxregs = of_ioremap(&op->resource[1], 0,
ETX_REG_SIZE, "HME TX Regs");
if (!hp->etxregs) {
- printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
+ dev_err(&op->dev, "Cannot map MAC TX registers.\n");
goto err_out_iounmap;
}
hp->erxregs = of_ioremap(&op->resource[2], 0,
ERX_REG_SIZE, "HME RX Regs");
if (!hp->erxregs) {
- printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
+ dev_err(&op->dev, "Cannot map MAC RX registers.\n");
goto err_out_iounmap;
}
hp->bigmacregs = of_ioremap(&op->resource[3], 0,
BMAC_REG_SIZE, "HME BIGMAC Regs");
if (!hp->bigmacregs) {
- printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
+ dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
goto err_out_iounmap;
}
hp->tcvregs = of_ioremap(&op->resource[4], 0,
TCVR_REG_SIZE, "HME Tranceiver Regs");
if (!hp->tcvregs) {
- printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
+ dev_err(&op->dev, "Cannot map TCVR registers.\n");
goto err_out_iounmap;
}
@@ -2807,21 +2690,19 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
err = register_netdev(hp->dev);
if (err) {
- printk(KERN_ERR "happymeal: Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting.\n");
goto err_out_free_coherent;
}
platform_set_drvdata(op, hp);
if (qfe_slot != -1)
- printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
- dev->name, qfe_slot);
+ netdev_info(dev,
+ "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
+ qfe_slot, dev->dev_addr);
else
- printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
- dev->name);
-
- printk("%pM\n", dev->dev_addr);
+ netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
@@ -2949,7 +2830,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
struct happy_meal *hp;
struct net_device *dev;
void __iomem *hpreg_base;
- unsigned long hpreg_res;
+ struct resource *hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
u8 addr[ETH_ALEN];
@@ -2966,32 +2847,33 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
strcpy(prom_name, "SUNW,hme");
#endif
- err = -ENODEV;
-
- if (pci_enable_device(pdev))
+ err = pcim_enable_device(pdev);
+ if (err)
goto err_out;
pci_set_master(pdev);
if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
qp = quattro_pci_find(pdev);
- if (qp == NULL)
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
goto err_out;
+ }
+
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
- if (qp->happy_meals[qfe_slot] == NULL)
+ if (!qp->happy_meals[qfe_slot])
break;
+
if (qfe_slot == 4)
goto err_out;
}
- dev = alloc_etherdev(sizeof(struct happy_meal));
- err = -ENOMEM;
- if (!dev)
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
+ if (!dev) {
+ err = -ENOMEM;
goto err_out;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
- if (hme_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
hp = netdev_priv(dev);
hp->happy_dev = pdev;
@@ -3005,21 +2887,26 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
qp->happy_meals[qfe_slot] = dev;
}
- hpreg_res = pci_resource_start(pdev, 0);
- err = -ENODEV;
+ err = -EINVAL;
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
- printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
+ dev_err(&pdev->dev,
+ "Cannot find proper PCI device base address.\n");
goto err_out_clear_quattro;
}
- if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
- "aborting.\n");
+
+ hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), DRV_NAME);
+ if (IS_ERR(hpreg_res)) {
+ err = PTR_ERR(hpreg_res);
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_clear_quattro;
}
- if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
- printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
- goto err_out_free_res;
+ hpreg_base = pcim_iomap(pdev, 0, 0x8000);
+ if (!hpreg_base) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Unable to remap card memory.\n");
+ goto err_out_clear_quattro;
}
for (i = 0; i < 6; i++) {
@@ -3085,11 +2972,12 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS;
#endif
- hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
- &hp->hblock_dvma, GFP_KERNEL);
- err = -ENODEV;
- if (!hp->happy_block)
- goto err_out_iounmap;
+ hp->happy_block = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &hp->hblock_dvma, GFP_KERNEL);
+ if (!hp->happy_block) {
+ err = -ENOMEM;
+ goto err_out_clear_quattro;
+ }
hp->linkcheck = 0;
hp->timer_state = asleep;
@@ -3123,11 +3011,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- err = register_netdev(hp->dev);
+ err = devm_register_netdev(&pdev->dev, dev);
if (err) {
- printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
- "aborting.\n");
- goto err_out_free_coherent;
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clear_quattro;
}
pci_set_drvdata(pdev, hp);
@@ -3140,61 +3027,30 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
int i = simple_strtoul(dev->name + 3, NULL, 10);
sprintf(prom_name, "-%d", i + 3);
}
- printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
- if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
- qpdev->device == PCI_DEVICE_ID_DEC_21153)
- printk("DEC 21153 PCI Bridge\n");
- else
- printk("unknown bridge %04x.%04x\n",
- qpdev->vendor, qpdev->device);
+ netdev_info(dev,
+ "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
+ prom_name, qpdev->vendor, qpdev->device);
}
if (qfe_slot != -1)
- printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
- dev->name, qfe_slot);
+ netdev_info(dev,
+ "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
+ qfe_slot, dev->dev_addr);
else
- printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
- dev->name);
-
- printk("%pM\n", dev->dev_addr);
+ netdev_info(dev,
+ "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
-err_out_free_coherent:
- dma_free_coherent(hp->dma_dev, PAGE_SIZE,
- hp->happy_block, hp->hblock_dvma);
-
-err_out_iounmap:
- iounmap(hp->gregs);
-
-err_out_free_res:
- pci_release_regions(pdev);
-
err_out_clear_quattro:
if (qp != NULL)
qp->happy_meals[qfe_slot] = NULL;
- free_netdev(dev);
-
err_out:
return err;
}
-static void happy_meal_pci_remove(struct pci_dev *pdev)
-{
- struct happy_meal *hp = pci_get_drvdata(pdev);
- struct net_device *net_dev = hp->dev;
-
- unregister_netdev(net_dev);
-
- dma_free_coherent(hp->dma_dev, PAGE_SIZE,
- hp->happy_block, hp->hblock_dvma);
- iounmap(hp->gregs);
- pci_release_regions(hp->happy_dev);
-
- free_netdev(net_dev);
-}
-
static const struct pci_device_id happymeal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
@@ -3206,7 +3062,6 @@ static struct pci_driver hme_pci_driver = {
.name = "hme",
.id_table = happymeal_pci_ids,
.probe = happy_meal_pci_probe,
- .remove = happy_meal_pci_remove,
};
static int __init happy_meal_pci_init(void)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index efe0d33f6024..6418fcc3139f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -684,8 +684,8 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
- strlcpy(info->driver, "sunqe", sizeof(info->driver));
- strlcpy(info->version, "3.0", sizeof(info->version));
+ strscpy(info->driver, "sunqe", sizeof(info->driver));
+ strscpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index da8119625cf3..acda6cbd0238 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -60,8 +60,8 @@ static struct vio_version vnet_versions[] = {
static void vnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vnet_get_msglevel(struct net_device *dev)
@@ -467,8 +467,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 546206640492..9be585237277 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -62,7 +62,8 @@ static int spl2sw_ethernet_stop(struct net_device *ndev)
return 0;
}
-static int spl2sw_ethernet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t spl2sw_ethernet_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
@@ -248,8 +249,8 @@ static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *
/* Check if mac address is valid */
if (!is_valid_ether_addr(mac)) {
- kfree(mac);
dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
+ kfree(mac);
return -EINVAL;
}
@@ -492,7 +493,7 @@ static int spl2sw_probe(struct platform_device *pdev)
}
/* Add and enable napi. */
- netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll);
napi_enable(&comm->rx_napi);
netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index 5c9b6c90942b..f8e133604146 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -54,8 +54,8 @@ static void xlgmac_default_config(struct xlgmac_pdata *pdata)
pdata->phy_speed = SPEED_25000;
pdata->sysclk_rate = XLGMAC_SYSCLOCK;
- strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
- strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+ strscpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+ strscpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
}
static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
index 49f8c6be9459..e794da727fe0 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -102,9 +102,9 @@ static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev,
u32 ver = pdata->hw_feat.version;
u32 snpsver, devid, userver;
- strlcpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
/* S|SNPSVER: Synopsys-defined Version
* D|DEVID: Indicates the Device family
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index e54ce73396ee..36b948820c1e 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -419,15 +419,14 @@ static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xlgmac_one_poll,
- NAPI_POLL_WEIGHT);
+ xlgmac_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xlgmac_all_poll, NAPI_POLL_WEIGHT);
+ xlgmac_all_poll);
napi_enable(&pdata->napi);
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 985073eba3bd..ca409515ead5 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1994,7 +1994,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->nic = nic;
priv->msg_enable = BDX_DEF_MSG_ENABLE;
- netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
+ netif_napi_add(ndev, &priv->napi, bdx_poll);
if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
DBG("HW statistics not supported\n");
@@ -2133,10 +2133,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct bdx_priv *priv = netdev_priv(netdev);
- strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
+ strscpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index fb30bc5d56cb..fce06663e1e1 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -33,6 +33,7 @@ config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select PHYLIB
+ select MDIO_BITBANG
help
This driver supports TI's DaVinci MDIO module.
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index abc1e4276cf0..c51e2af91f69 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -402,9 +402,9 @@ static void am65_cpsw_get_drvinfo(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- strlcpy(info->driver, dev_driver_string(common->dev),
+ strscpy(info->driver, dev_driver_string(common->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
}
static u32 am65_cpsw_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index f4a6b590a1e3..3cbe4ec46234 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -74,6 +74,9 @@
#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+#define AM65_CPSW_SGMII_CONTROL_REG 0x010
+#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
+
#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
@@ -360,8 +363,7 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
-static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
- netdev_features_t features)
+static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
int port_idx, i, ret;
@@ -574,7 +576,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
for (i = 0; i < common->tx_ch_num; i++)
netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
- ret = am65_cpsw_nuss_common_open(common, ndev->features);
+ ret = am65_cpsw_nuss_common_open(common);
if (ret)
return ret;
@@ -590,11 +592,6 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
- port->slave.phy_if);
- if (ret)
- goto error_cleanup;
-
ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
if (ret)
goto error_cleanup;
@@ -1409,7 +1406,14 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- /* Currently not used */
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+
+ if (common->pdata.extra_modes & BIT(state->interface))
+ writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
+ port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
}
static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
@@ -1847,6 +1851,8 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->common = common;
port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ if (common->pdata.extra_modes)
+ port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
@@ -1886,6 +1892,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
}
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ if (ret)
+ goto of_node_put;
+
ret = of_get_mac_address(port_np, port->slave.mac_addr);
if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
@@ -1981,7 +1991,18 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->slave.phylink_config.type = PHYLINK_NETDEV;
port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
- phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ } else if (port->slave.phy_if == PHY_INTERFACE_MODE_RMII) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else {
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
phylink = phylink_create(&port->slave.phylink_config,
of_node_to_fwnode(port->slave.phy_node),
@@ -2023,7 +2044,7 @@ static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
}
netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+ am65_cpsw_nuss_rx_poll);
return ret;
}
@@ -2611,10 +2632,18 @@ static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};
+static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
+ { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index ac945631bf2f..2c9850fdfcb6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -46,6 +46,7 @@ struct am65_cpsw_port {
const char *name;
u32 port_id;
void __iomem *port_base;
+ void __iomem *sgmii_base;
void __iomem *stat_base;
void __iomem *fetch_ram_base;
bool disabled;
@@ -88,6 +89,7 @@ struct am65_cpsw_rx_chn {
struct am65_cpsw_pdata {
u32 quirks;
+ u64 extra_modes;
enum k3_ring_mode fdqring_mode;
const char *ale_dev_id;
};
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c30a6e510aa3..e2f0fb286143 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -943,9 +943,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->irq = of_irq_get_byname(node, "cpts");
if (cpts->irq <= 0) {
ret = cpts->irq ?: -ENXIO;
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get IRQ number (err = %d)\n",
- ret);
+ dev_err_probe(dev, ret, "Failed to get IRQ number\n");
return ERR_PTR(ret);
}
@@ -965,8 +963,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
if (IS_ERR(cpts->refclk)) {
ret = PTR_ERR(cpts->refclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get refclk %d\n", ret);
+ dev_err_probe(dev, ret, "Failed to get refclk\n");
return ERR_PTR(ret);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index bef5e68dac31..80eeeb463c4f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -851,8 +851,8 @@ static int cpmac_set_ringparam(struct net_device *dev,
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "cpmac", sizeof(info->driver));
- strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ strscpy(info->driver, "cpmac", sizeof(info->driver));
+ strscpy(info->version, CPMAC_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
}
@@ -1109,7 +1109,7 @@ static int cpmac_probe(struct platform_device *pdev)
dev->netdev_ops = &cpmac_netdev_ops;
dev->ethtool_ops = &cpmac_ethtool_ops;
- netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+ netif_napi_add(dev, &priv->napi, cpmac_poll);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->rx_lock);
@@ -1169,7 +1169,7 @@ static struct platform_driver cpmac_driver = {
.remove = cpmac_remove,
};
-int cpmac_init(void)
+int __init cpmac_init(void)
{
u32 mask;
int i, res;
@@ -1239,7 +1239,7 @@ fail_alloc:
return res;
}
-void cpmac_exit(void)
+void __exit cpmac_exit(void)
{
platform_driver_unregister(&cpmac_driver);
mdiobus_unregister(cpmac_mii);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ed66c4d4d830..709ca6dd6ecb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1172,9 +1172,9 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct platform_device *pdev = to_platform_device(cpsw->dev);
- strlcpy(info->driver, "cpsw", sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, "cpsw", sizeof(info->driver));
+ strscpy(info->version, "1.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int cpsw_set_pauseparam(struct net_device *ndev,
@@ -1319,8 +1319,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
*/
ret = of_phy_register_fixed_link(slave_node);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n");
goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
@@ -1638,8 +1637,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &cpsw->napi_rx,
- cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
- NAPI_POLL_WEIGHT);
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
netif_napi_add_tx(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 353e58b22c51..83596ec0c7cb 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1146,9 +1146,9 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev;
pdev = to_platform_device(cpsw->dev);
- strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
- strlcpy(info->version, "2.0", sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int cpsw_set_pauseparam(struct net_device *ndev,
@@ -1288,9 +1288,8 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
- port_np, ret);
+ dev_err_probe(dev, ret, "%pOF failed to register fixed-link phy\n",
+ port_np);
goto err_node_put;
}
slave_data->phy_node = of_node_get(port_np);
@@ -1417,9 +1416,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
* accordingly.
*/
netif_napi_add(ndev, &cpsw->napi_rx,
- cpsw->quirk_irq ?
- cpsw_rx_poll : cpsw_rx_mq_poll,
- NAPI_POLL_WEIGHT);
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
netif_napi_add_tx(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ?
cpsw_tx_poll : cpsw_tx_mq_poll);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2a3e4e842fa5..2eb9d5a32588 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -374,8 +374,8 @@ static char *emac_rxhost_errcodes[16] = {
static void emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, emac_version_string, sizeof(info->driver));
- strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, emac_version_string, sizeof(info->driver));
+ strscpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
}
/**
@@ -949,7 +949,7 @@ static void emac_tx_handler(void *token, int len, int status)
*
* Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
*/
-static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
@@ -1948,7 +1948,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &ethtool_ops;
- netif_napi_add(ndev, &priv->napi, emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, emac_poll);
pm_runtime_enable(&pdev->dev);
rc = pm_runtime_resume_and_get(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index ea3772618043..946b9753ccfb 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -26,6 +26,8 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/sys_soc.h>
/*
* This timeout definition is a worst-case ultra defensive measure against
@@ -41,6 +43,7 @@
struct davinci_mdio_of_param {
int autosuspend_delay_ms;
+ bool manual_mode;
};
struct davinci_mdio_regs {
@@ -49,6 +52,15 @@ struct davinci_mdio_regs {
#define CONTROL_IDLE BIT(31)
#define CONTROL_ENABLE BIT(30)
#define CONTROL_MAX_DIV (0xffff)
+#define CONTROL_CLKDIV GENMASK(15, 0)
+
+#define MDIO_MAN_MDCLK_O BIT(2)
+#define MDIO_MAN_OE BIT(1)
+#define MDIO_MAN_PIN BIT(0)
+#define MDIO_MANUALMODE BIT(31)
+
+#define MDIO_PIN 0
+
u32 alive;
u32 link;
@@ -59,7 +71,9 @@ struct davinci_mdio_regs {
u32 userintmasked;
u32 userintmaskset;
u32 userintmaskclr;
- u32 __reserved_1[20];
+ u32 manualif;
+ u32 poll;
+ u32 __reserved_1[18];
struct {
u32 access;
@@ -79,6 +93,7 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
+ struct mdiobb_ctrl bb_ctrl;
struct davinci_mdio_regs __iomem *regs;
struct clk *clk;
struct device *dev;
@@ -90,6 +105,7 @@ struct davinci_mdio_data {
*/
bool skip_scan;
u32 clk_div;
+ bool manual_mode;
};
static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
@@ -128,9 +144,122 @@ static void davinci_mdio_enable(struct davinci_mdio_data *data)
writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
-static int davinci_mdio_reset(struct mii_bus *bus)
+static void davinci_mdio_disable(struct davinci_mdio_data *data)
+{
+ u32 reg;
+
+ /* Disable MDIO state machine */
+ reg = readl(&data->regs->control);
+
+ reg &= ~CONTROL_CLKDIV;
+ reg |= data->clk_div;
+
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &data->regs->control);
+}
+
+static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
+{
+ u32 reg;
+ /* set manual mode */
+ reg = readl(&data->regs->poll);
+ reg |= MDIO_MANUALMODE;
+ writel(reg, &data->regs->poll);
+}
+
+static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (level)
+ reg |= MDIO_MAN_MDCLK_O;
+ else
+ reg &= ~MDIO_MAN_MDCLK_O;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (output)
+ reg |= MDIO_MAN_OE;
+ else
+ reg &= ~MDIO_MAN_OE;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (value)
+ reg |= MDIO_MAN_PIN;
+ else
+ reg &= ~MDIO_MAN_PIN;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct davinci_mdio_data *data;
+ unsigned long reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+ return test_bit(MDIO_PIN, &reg);
+}
+
+static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_read(bus, phy, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_write(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
{
- struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
int ret;
@@ -138,6 +267,11 @@ static int davinci_mdio_reset(struct mii_bus *bus)
if (ret < 0)
return ret;
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ }
+
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -171,6 +305,23 @@ done:
return 0;
}
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+
+ return davinci_mdio_common_reset(data);
+}
+
+static int davinci_mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ struct davinci_mdio_data *data;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+
+ return davinci_mdio_common_reset(data);
+}
+
/* wait until hardware is ready for another user access */
static inline int wait_for_user_access(struct davinci_mdio_data *data)
{
@@ -318,6 +469,28 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return 0;
}
+struct k3_mdio_soc_data {
+ bool manual_mode;
+};
+
+static const struct k3_mdio_soc_data am65_mdio_soc_data = {
+ .manual_mode = true,
+};
+
+static const struct soc_device_attribute k3_mdio_socinfo[] = {
+ { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { /* sentinel */ },
+};
+
#if IS_ENABLED(CONFIG_OF)
static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
.autosuspend_delay_ms = 100,
@@ -331,6 +504,14 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
#endif
+static const struct mdiobb_ops davinci_mdiobb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = davinci_set_mdc,
+ .set_mdio_dir = davinci_set_mdio_dir,
+ .set_mdio_data = davinci_set_mdio_data,
+ .get_mdio_data = davinci_get_mdio_data,
+};
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -345,7 +526,26 @@ static int davinci_mdio_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->bus = devm_mdiobus_alloc(dev);
+ data->manual_mode = false;
+ data->bb_ctrl.ops = &davinci_mdiobb_ops;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ const struct soc_device_attribute *soc_match_data;
+
+ soc_match_data = soc_device_match(k3_mdio_socinfo);
+ if (soc_match_data && soc_match_data->data) {
+ const struct k3_mdio_soc_data *socdata =
+ soc_match_data->data;
+
+ data->manual_mode = socdata->manual_mode;
+ }
+ }
+
+ if (data->manual_mode)
+ data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
+ else
+ data->bus = devm_mdiobus_alloc(dev);
+
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
return -ENOMEM;
@@ -371,11 +571,20 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
data->bus->name = dev_name(dev);
- data->bus->read = davinci_mdio_read;
- data->bus->write = davinci_mdio_write;
- data->bus->reset = davinci_mdio_reset;
+
+ if (data->manual_mode) {
+ data->bus->read = davinci_mdiobb_read;
+ data->bus->write = davinci_mdiobb_write;
+ data->bus->reset = davinci_mdiobb_reset;
+
+ dev_info(dev, "Configuring MDIO in manual mode\n");
+ } else {
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
+ data->bus->priv = data;
+ }
data->bus->parent = dev;
- data->bus->priv = data;
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
@@ -433,9 +642,13 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus)
+ if (data->bus) {
mdiobus_unregister(data->bus);
+ if (data->manual_mode)
+ free_mdio_bitbang(data->bus);
+ }
+
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -452,7 +665,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
writel(ctrl, &data->regs->control);
- wait_for_idle(data);
+
+ if (!data->manual_mode)
+ wait_for_idle(data);
return 0;
}
@@ -461,7 +676,12 @@ static int davinci_mdio_runtime_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- davinci_mdio_enable(data);
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ } else {
+ davinci_mdio_enable(data);
+ }
return 0;
}
#endif
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index b15d44261e76..aba70bef4894 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2095,7 +2095,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
}
/* NAPI register */
- netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll);
netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll);
/* Register the network device */
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 741c42c6a417..b3da76efa8f5 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -762,12 +762,12 @@ static void tlan_get_drvinfo(struct net_device *dev,
{
struct tlan_priv *priv = netdev_priv(dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
if (priv->pci_dev)
- strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
else
- strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
+ strscpy(info->bus_info, "EISA", sizeof(info->bus_info));
}
static int tlan_get_eeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3dbfb1b20649..cf8de8a7a8a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1187,8 +1187,8 @@ int gelic_net_open(struct net_device *netdev)
void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int gelic_ether_get_link_ksettings(struct net_device *netdev,
@@ -1441,7 +1441,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
{
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */
- netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, napi, gelic_net_poll);
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
netdev->netdev_ops = &gelic_netdevice_ops;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index bc4914c758ad..50d7eacfec58 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2270,8 +2270,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->aneg_count = 0;
timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
- netif_napi_add(netdev, &card->napi,
- spider_net_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &card->napi, spider_net_poll);
spider_net_setup_netdev_ops(netdev);
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 93110dba0bfa..fef9fd127b5e 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -63,12 +63,12 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
card = netdev_priv(netdev);
/* clear and fill out info */
- strlcpy(drvinfo->driver, spider_net_driver_name,
+ strscpy(drvinfo->driver, spider_net_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "no information",
+ strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "no information",
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(card->pdev),
+ strscpy(drvinfo->bus_info, pci_name(card->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 47aab9c132c8..b50be67b398b 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1956,9 +1956,9 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct tc35815_local *lp = netdev_priv(dev);
- strlcpy(info->driver, MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static u32 tc35815_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 5251fc324221..2cd2afc3fff0 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -59,9 +59,6 @@
/* Check the phy status every half a second. */
#define CHECK_PHY_INTERVAL (HZ/2)
-static int tsi108_init_one(struct platform_device *pdev);
-static int tsi108_ether_remove(struct platform_device *pdev);
-
struct tsi108_prv_data {
void __iomem *regs; /* Base of normal regs */
void __iomem *phyregs; /* Base of register bank used for PHY access */
@@ -144,16 +141,6 @@ struct tsi108_prv_data {
struct platform_device *pdev;
};
-/* Structure for a device driver */
-
-static struct platform_driver tsi_eth_driver = {
- .probe = tsi108_init_one,
- .remove = tsi108_ether_remove,
- .driver = {
- .name = "tsi-ethernet",
- },
-};
-
static void tsi108_timed_checker(struct timer_list *t);
#ifdef DEBUG
@@ -1598,7 +1585,7 @@ tsi108_init_one(struct platform_device *pdev)
data->phy_type = einfo->phy_type;
data->irq_num = einfo->irq_num;
data->id = pdev->id;
- netif_napi_add(dev, &data->napi, tsi108_poll, 64);
+ netif_napi_add(dev, &data->napi, tsi108_poll);
dev->netdev_ops = &tsi108_netdev_ops;
dev->ethtool_ops = &tsi108_ethtool_ops;
@@ -1683,6 +1670,16 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+ .probe = tsi108_init_one,
+ .remove = tsi108_ether_remove,
+ .driver = {
+ .name = "tsi-ethernet",
+ },
+};
module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index eb39a45de012..aeed2a093e34 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -750,6 +750,13 @@ static const struct of_device_id mse102x_match_table[] = {
};
MODULE_DEVICE_TABLE(of, mse102x_match_table);
+static const struct spi_device_id mse102x_ids[] = {
+ { "mse1021" },
+ { "mse1022" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mse102x_ids);
+
static struct spi_driver mse102x_driver = {
.driver = {
.name = DRV_NAME,
@@ -758,10 +765,11 @@ static struct spi_driver mse102x_driver = {
},
.probe = mse102x_probe_spi,
.remove = mse102x_remove_spi,
+ .id_table = mse102x_ids,
};
module_spi_driver(mse102x_driver);
MODULE_DESCRIPTION("MSE102x Network driver");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@chargebyte.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 509c5e9b29df..0fb15a17b547 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -965,7 +965,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
+ netif_napi_add(dev, &rp->napi, rhine_napipoll);
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
@@ -2281,8 +2281,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct device *hwdev = dev->dev.parent;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ff0c102cb578..a502812ac418 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2846,7 +2846,7 @@ static int velocity_probe(struct device *dev, int irq,
netdev->netdev_ops = &velocity_netdev_ops;
netdev->ethtool_ops = &velocity_ethtool_ops;
- netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &vptr->napi, velocity_poll);
netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX;
@@ -3419,13 +3419,13 @@ static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
struct velocity_info *vptr = netdev_priv(dev);
- strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
- strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strscpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strscpy(info->version, VELOCITY_VERSION, sizeof(info->version));
if (vptr->pdev)
- strlcpy(info->bus_info, pci_name(vptr->pdev),
+ strscpy(info->bus_info, pci_name(vptr->pdev),
sizeof(info->bus_info));
else
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index b4a4fa0a58f8..f5d43d8c9629 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -16,6 +16,19 @@ config NET_VENDOR_WANGXUN
if NET_VENDOR_WANGXUN
+config NGBE
+ tristate "Wangxun(R) GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ngbe.
+
config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
index c34db1bead25..ac3fb06b233c 100644
--- a/drivers/net/ethernet/wangxun/Makefile
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_TXGBE) += txgbe/
+obj-$(CONFIG_NGBE) += ngbe/
diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile
new file mode 100644
index 000000000000..0baf75907496
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_NGBE) += ngbe.o
+
+ngbe-objs := ngbe_main.o
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
new file mode 100644
index 000000000000..f5fa6e5238cc
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_H_
+#define _NGBE_H_
+
+#include "ngbe_type.h"
+
+#define NGBE_MAX_FDIR_INDICES 7
+
+#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct ngbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char ngbe_driver_name[];
+
+#endif /* _NGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
new file mode 100644
index 000000000000..7674cb6e5700
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "ngbe.h"
+char ngbe_driver_name[] = "ngbe";
+
+/* ngbe_pci_tbl - PCI Device ID Table
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ngbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void ngbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ ngbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * ngbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ngbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ngbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int ngbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct ngbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ ngbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct ngbe_adapter),
+ NGBE_MAX_TX_QUEUES,
+ NGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ngbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ngbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void ngbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ngbe_driver = {
+ .name = ngbe_driver_name,
+ .id_table = ngbe_pci_tbl,
+ .probe = ngbe_probe,
+ .remove = ngbe_remove,
+ .shutdown = ngbe_shutdown,
+};
+
+module_pci_driver(ngbe_driver);
+
+MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@net-swift.com>");
+MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
new file mode 100644
index 000000000000..26e776c3539a
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_TYPE_H_
+#define _NGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ NGBE_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100
+#define NGBE_DEV_ID_EM_WX1860A2 0x0101
+#define NGBE_DEV_ID_EM_WX1860A2S 0x0102
+#define NGBE_DEV_ID_EM_WX1860A4 0x0103
+#define NGBE_DEV_ID_EM_WX1860A4S 0x0104
+#define NGBE_DEV_ID_EM_WX1860AL2 0x0105
+#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106
+#define NGBE_DEV_ID_EM_WX1860AL4 0x0107
+#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108
+#define NGBE_DEV_ID_EM_WX1860LC 0x0109
+#define NGBE_DEV_ID_EM_WX1860A1 0x010a
+#define NGBE_DEV_ID_EM_WX1860A1L 0x010b
+
+/* Subsystem ID */
+#define NGBE_SUBID_M88E1512_SFP 0x0003
+#define NGBE_SUBID_OCP_CARD 0x0040
+#define NGBE_SUBID_LY_M88E1512_SFP 0x0050
+#define NGBE_SUBID_M88E1512_RJ45 0x0051
+#define NGBE_SUBID_M88E1512_MIX 0x0052
+#define NGBE_SUBID_YT8521S_SFP 0x0060
+#define NGBE_SUBID_INTERNAL_YT8521S_SFP 0x0061
+#define NGBE_SUBID_YT8521S_SFP_GPIO 0x0062
+#define NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO 0x0064
+#define NGBE_SUBID_LY_YT8521S_SFP 0x0070
+#define NGBE_SUBID_RGMII_FPGA 0x0080
+
+#define NGBE_OEM_MASK 0x00FF
+
+#define NGBE_NCSI_SUP 0x8000
+#define NGBE_NCSI_MASK 0x8000
+#define NGBE_WOL_SUP 0x4000
+#define NGBE_WOL_MASK 0x4000
+
+#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index acd78120e53c..634946e87e5f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -719,9 +719,9 @@ static void w5100_hw_close(struct w5100_priv *priv)
static void w5100_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 773f8c77909a..b0958fe8111e 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -282,9 +282,9 @@ static void w5300_hw_close(struct w5300_priv *priv)
static void w5300_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index c6395c406418..6668d1b760d8 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -21,36 +21,45 @@
/* Configuration options */
/* Accept all incoming packets.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_PROMISC (1 << 0)
/* Jumbo frame support for Tx & Rx.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_JUMBO (1 << 1)
/* VLAN Rx & Tx frame support.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_VLAN (1 << 2)
/* Enable recognition of flow control frames on Rx
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames.
* Note: PAD from VLAN frames is not stripped.
- * This option defaults to disabled (set) */
+ * This option defaults to disabled (set)
+ */
#define XTE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
-set, the MAC will filter frames that have a mismatched type/length field
-and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
-types of frames are encountered. When this option is cleared, the MAC will
-allow these types of frames to be received.
-This option defaults to enabled (set) */
+ * set, the MAC will filter frames that have a mismatched type/length field
+ * and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+ * types of frames are encountered. When this option is cleared, the MAC will
+ * allow these types of frames to be received.
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_TXEN (1 << 11)
/* Enable the receiver
-* This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_RXEN (1 << 12)
/* Default options set when device is initialized or reset */
@@ -68,18 +77,18 @@ This option defaults to enabled (set) */
#define TX_TAILDESC_PTR 0x04 /* rw */
#define TX_CHNL_CTRL 0x05 /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
-*/
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
+ */
#define CHNL_CTRL_IRQ_IOE (1 << 9)
#define CHNL_CTRL_IRQ_EN (1 << 7)
#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
@@ -87,35 +96,35 @@ This option defaults to enabled (set) */
#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
#define TX_IRQ_REG 0x06 /* rw */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
- 29 2 ErrIrq
- 30 1 DlyIrq
- 31 0 CoalIrq
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ * 29 2 ErrIrq
+ * 30 1 DlyIrq
+ * 31 0 CoalIrq
*/
#define TX_CHNL_STS 0x07 /* r */
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define RX_NXTDESC_PTR 0x08 /* r */
#define RX_CURBUF_ADDR 0x09 /* r */
@@ -124,17 +133,17 @@ This option defaults to enabled (set) */
#define RX_TAILDESC_PTR 0x0c /* rw */
#define RX_CHNL_CTRL 0x0d /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
*/
#define RX_IRQ_REG 0x0e /* rw */
#define IRQ_COAL (1 << 0)
@@ -142,13 +151,13 @@ This option defaults to enabled (set) */
#define IRQ_ERR (1 << 2)
#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
-*/
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ */
#define RX_CHNL_STS 0x0f /* r */
#define CHNL_STS_ENGBUSY (1 << 1)
#define CHNL_STS_EOP (1 << 2)
@@ -165,23 +174,23 @@ This option defaults to enabled (set) */
#define CHNL_STS_CMPERR (1 << 20)
#define CHNL_STS_TAILERR (1 << 21)
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define DMA_CONTROL_REG 0x10 /* rw */
#define DMA_CONTROL_RST (1 << 0)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3f6b9dfca095..1066420d6a83 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -117,8 +117,8 @@ int temac_indirect_busywait(struct temac_local *lp)
spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
if (WARN_ON(!hard_acs_rdy(lp)))
return -ETIMEDOUT;
- else
- return 0;
+
+ return 0;
}
/*
@@ -261,7 +261,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
* I/O functions
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
unsigned int dcrs;
@@ -286,7 +286,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
* such as with MicroBlaze and x86
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
return -1;
}
@@ -307,11 +307,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
- else {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(lp->rx_skb[i]);
- }
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skb[i]);
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
@@ -430,7 +428,8 @@ static void temac_do_set_mac_address(struct net_device *ndev)
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
- * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ * so don't affect them Set MAC bits [47:32] in EUAW1
+ */
temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
@@ -530,66 +529,66 @@ static struct temac_option {
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXJMBO_MASK,
+ .m_or = XTE_RXC1_RXJMBO_MASK,
},
/* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXVLAN_MASK,
+ .m_or = XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXVLAN_MASK,
+ .m_or = XTE_RXC1_RXVLAN_MASK,
},
/* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXFCS_MASK,
+ .m_or = XTE_RXC1_RXFCS_MASK,
},
/* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXFCS_MASK,
+ .m_or = XTE_TXC_TXFCS_MASK,
},
/* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXLT_MASK,
+ .m_or = XTE_RXC1_RXLT_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_RXFLO_MASK,
+ .m_or = XTE_FCC_RXFLO_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_TXFLO_MASK,
+ .m_or = XTE_FCC_TXFLO_MASK,
},
/* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
- .m_or =XTE_AFM_EPPRM_MASK,
+ .m_or = XTE_AFM_EPPRM_MASK,
},
/* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXEN_MASK,
+ .m_or = XTE_TXC_TXEN_MASK,
},
/* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXEN_MASK,
+ .m_or = XTE_RXC1_RXEN_MASK,
},
{}
};
@@ -641,7 +640,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset RX reset timeout!!\n");
+ "%s RX reset timeout!!\n", __func__);
break;
}
}
@@ -653,7 +652,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset TX reset timeout!!\n");
+ "%s TX reset timeout!!\n", __func__);
break;
}
}
@@ -672,7 +671,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset DMA reset timeout!!\n");
+ "%s DMA reset timeout!!\n", __func__);
break;
}
}
@@ -680,7 +679,7 @@ static void temac_device_reset(struct net_device *ndev)
if (temac_dma_bd_init(ndev)) {
dev_err(&ndev->dev,
- "temac_device_reset descriptor allocation failed\n");
+ "%s descriptor allocation failed\n", __func__);
}
spin_lock_irqsave(lp->indirect_lock, flags);
@@ -691,7 +690,8 @@ static void temac_device_reset(struct net_device *ndev)
spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
- * but leave receiver and transmitter disabled. */
+ * but leave receiver and transmitter disabled.
+ */
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
@@ -723,9 +723,15 @@ static void temac_adjust_link(struct net_device *ndev)
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
- case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
- case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
- case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
+ case SPEED_1000:
+ mii_speed |= XTE_EMCFG_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ mii_speed |= XTE_EMCFG_LINKSPD_100;
+ break;
+ case SPEED_10:
+ mii_speed |= XTE_EMCFG_LINKSPD_10;
+ break;
}
/* Write new speed setting out to TEMAC */
@@ -1007,7 +1013,6 @@ static void ll_temac_recv(struct net_device *ndev)
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) {
-
/* Convert from device endianness (be32) to cpu
* endianness, and if necessary swap the bytes
* (back) for proper IP checksum byte order
@@ -1563,16 +1568,12 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
- if (lp->rx_irq < 0) {
- if (lp->rx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA RX irq\n");
- return lp->rx_irq;
- }
- if (lp->tx_irq < 0) {
- if (lp->tx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA TX irq\n");
- return lp->tx_irq;
- }
+ if (lp->rx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->rx_irq,
+ "could not get DMA RX irq\n");
+ if (lp->tx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->tx_irq,
+ "could not get DMA TX irq\n");
if (temac_np) {
/* Retrieve the MAC address */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 6fd2dea4e60f..2371c072b53f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -29,7 +29,8 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
- * in the LSW0 register */
+ * in the LSW0 register
+ */
spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
@@ -88,7 +89,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
}
/* Enable the MDIO bus by asserting the enable bit and writing
- * in the clock config */
+ * in the clock config
+ */
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
bus = devm_mdiobus_alloc(&pdev->dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index f2e2261b4b7d..6370c447ac5c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -402,6 +402,9 @@ struct axidma_bd {
* @rx_bd_num: Size of RX buffer descriptor ring
* @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
* accessed currently.
+ * @rx_packets: RX packet count for statistics
+ * @rx_bytes: RX byte count for statistics
+ * @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
* @tx_dma_cr: Nominal content of TX DMA control register
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
@@ -411,6 +414,9 @@ struct axidma_bd {
* complete. Only updated at runtime by TX NAPI poll.
* @tx_bd_tail: Stores the index of the next Tx buffer descriptor in the ring
* to be populated.
+ * @tx_packets: TX packet count for statistics
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -458,6 +464,9 @@ struct axienet_local {
dma_addr_t rx_bd_p;
u32 rx_bd_num;
u32 rx_bd_ci;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
u32 tx_dma_cr;
@@ -466,6 +475,9 @@ struct axienet_local {
u32 tx_bd_num;
u32 tx_bd_ci;
u32 tx_bd_tail;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync tx_stat_sync;
struct work_struct dma_err_task;
@@ -591,7 +603,7 @@ static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
#else /* CONFIG_64BIT */
static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
- dma_addr_t addr)
+ dma_addr_t addr)
{
axienet_dma_out32(lp, reg, lower_32_bits(addr));
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1760930ec0c4..d1d772580da9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -597,7 +597,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -645,7 +645,7 @@ static int axienet_device_reset(struct net_device *ndev)
* @nr_bds: Max number of descriptors to clean up
* @force: Whether to clean descriptors even if not complete
* @sizep: Pointer to a u32 filled with the total sum of all bytes
- * in all cleaned-up descriptors. Ignored if NULL.
+ * in all cleaned-up descriptors. Ignored if NULL.
* @budget: NAPI budget (use 0 when not called from NAPI poll)
*
* Would either be called after a successful transmit operation, or after
@@ -752,8 +752,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci %= lp->tx_bd_num;
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_packets, packets);
+ u64_stats_add(&lp->tx_bytes, size);
+ u64_stats_update_end(&lp->tx_stat_sync);
/* Matches barrier in axienet_start_xmit */
smp_mb();
@@ -984,8 +986,10 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- lp->ndev->stats.rx_packets += packets;
- lp->ndev->stats.rx_bytes += size;
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, packets);
+ u64_stats_add(&lp->rx_bytes, size);
+ u64_stats_update_end(&lp->rx_stat_sync);
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
@@ -1292,10 +1296,32 @@ static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(lp->phylink, rq, cmd);
}
+static void
+axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
+ stats->rx_packets = u64_stats_read(&lp->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
+ stats->tx_packets = u64_stats_read(&lp->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
.ndo_start_xmit = axienet_start_xmit,
+ .ndo_get_stats64 = axienet_get_stats64,
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -1317,8 +1343,8 @@ static const struct net_device_ops axienet_netdev_ops = {
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
- strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
}
/**
@@ -1349,7 +1375,7 @@ static int axienet_ethtools_get_regs_len(struct net_device *ndev)
static void axienet_ethtools_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *ret)
{
- u32 *data = (u32 *) ret;
+ u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N;
struct axienet_local *lp = netdev_priv(ndev);
@@ -1850,8 +1876,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
- netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT);
- netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT);
+ u64_stats_init(&lp->rx_stat_sync);
+ u64_stats_init(&lp->tx_stat_sync);
+
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2772a79cd3ed..0b3b6935c558 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -126,7 +126,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
return ret;
}
- axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
XAE_MDIO_MCR_PHYAD_MASK) |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 016a9c4f2c6c..05848ff15fb5 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1060,7 +1060,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
}
static const struct ethtool_ops xemaclite_ethtool_ops = {
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index f9587e55b842..894e92ef415b 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1402,7 +1402,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
+ strscpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3591b9edc9a1..3b0c5f177447 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -841,7 +841,7 @@ static void eth_txdone_irq(void *unused)
}
}
-static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = netdev_priv(dev);
unsigned int txreadyq = port->plat->txreadyq;
@@ -999,11 +999,11 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
{
struct port *port = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
- strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strscpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_ts_info(struct net_device *dev,
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 746736c83873..19c99529566b 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -151,11 +151,11 @@ static void fjes_get_drvinfo(struct net_device *netdev,
plat_dev = adapter->plat_dev;
- strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, fjes_driver_version,
+ strscpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, fjes_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
"platform:%s", plat_dev->name);
}
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 5805e4a56385..1eff202f6a1f 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -32,68 +32,12 @@ MODULE_VERSION(DRV_VERSION);
#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
-static int fjes_request_irq(struct fjes_adapter *);
-static void fjes_free_irq(struct fjes_adapter *);
-
-static int fjes_open(struct net_device *);
-static int fjes_close(struct net_device *);
-static int fjes_setup_resources(struct fjes_adapter *);
-static void fjes_free_resources(struct fjes_adapter *);
-static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
-static void fjes_raise_intr_rxdata_task(struct work_struct *);
-static void fjes_tx_stall_task(struct work_struct *);
-static void fjes_force_close_task(struct work_struct *);
-static irqreturn_t fjes_intr(int, void*);
-static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
-static int fjes_change_mtu(struct net_device *, int);
-static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
-static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
-static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
-
-static int fjes_acpi_add(struct acpi_device *);
-static int fjes_acpi_remove(struct acpi_device *);
-static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
-
-static int fjes_probe(struct platform_device *);
-static int fjes_remove(struct platform_device *);
-
-static int fjes_sw_init(struct fjes_adapter *);
-static void fjes_netdev_setup(struct net_device *);
-static void fjes_irq_watch_task(struct work_struct *);
-static void fjes_watch_unshare_task(struct work_struct *);
-static void fjes_rx_irq(struct fjes_adapter *, int);
-static int fjes_poll(struct napi_struct *, int);
-
static const struct acpi_device_id fjes_acpi_ids[] = {
{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
-static struct acpi_driver fjes_acpi_driver = {
- .name = DRV_NAME,
- .class = DRV_NAME,
- .owner = THIS_MODULE,
- .ids = fjes_acpi_ids,
- .ops = {
- .add = fjes_acpi_add,
- .remove = fjes_acpi_remove,
- },
-};
-
-static struct platform_driver fjes_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = fjes_probe,
- .remove = fjes_remove,
-};
-
-static struct resource fjes_resource[] = {
- DEFINE_RES_MEM(0, 1),
- DEFINE_RES_IRQ(0)
-};
-
static bool is_extended_socket_device(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
@@ -139,43 +83,6 @@ static int acpi_check_extended_socket_status(struct acpi_device *device)
return 0;
}
-static int fjes_acpi_add(struct acpi_device *device)
-{
- struct platform_device *plat_dev;
- acpi_status status;
-
- if (!is_extended_socket_device(device))
- return -ENODEV;
-
- if (acpi_check_extended_socket_status(device))
- return -ENODEV;
-
- status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- fjes_get_acpi_resource, fjes_resource);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* create platform_device */
- plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
- ARRAY_SIZE(fjes_resource));
- if (IS_ERR(plat_dev))
- return PTR_ERR(plat_dev);
-
- device->driver_data = plat_dev;
-
- return 0;
-}
-
-static int fjes_acpi_remove(struct acpi_device *device)
-{
- struct platform_device *plat_dev;
-
- plat_dev = (struct platform_device *)acpi_driver_data(device);
- platform_device_unregister(plat_dev);
-
- return 0;
-}
-
static acpi_status
fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -206,143 +113,59 @@ fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
-static int fjes_request_irq(struct fjes_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int result = -1;
-
- adapter->interrupt_watch_enable = true;
- if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
- queue_delayed_work(adapter->control_wq,
- &adapter->interrupt_watch_task,
- FJES_IRQ_WATCH_DELAY);
- }
-
- if (!adapter->irq_registered) {
- result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
- IRQF_SHARED, netdev->name, adapter);
- if (result)
- adapter->irq_registered = false;
- else
- adapter->irq_registered = true;
- }
-
- return result;
-}
-
-static void fjes_free_irq(struct fjes_adapter *adapter)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- adapter->interrupt_watch_enable = false;
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
-
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
-
- if (adapter->irq_registered) {
- free_irq(adapter->hw.hw_res.irq, adapter);
- adapter->irq_registered = false;
- }
-}
-
-static const struct net_device_ops fjes_netdev_ops = {
- .ndo_open = fjes_open,
- .ndo_stop = fjes_close,
- .ndo_start_xmit = fjes_xmit_frame,
- .ndo_get_stats64 = fjes_get_stats64,
- .ndo_change_mtu = fjes_change_mtu,
- .ndo_tx_timeout = fjes_tx_retry,
- .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+static struct resource fjes_resource[] = {
+ DEFINE_RES_MEM(0, 1),
+ DEFINE_RES_IRQ(0)
};
-/* fjes_open - Called when a network interface is made active */
-static int fjes_open(struct net_device *netdev)
+static int fjes_acpi_add(struct acpi_device *device)
{
- struct fjes_adapter *adapter = netdev_priv(netdev);
- struct fjes_hw *hw = &adapter->hw;
- int result;
-
- if (adapter->open_guard)
- return -ENXIO;
-
- result = fjes_setup_resources(adapter);
- if (result)
- goto err_setup_res;
-
- hw->txrx_stop_req_bit = 0;
- hw->epstop_req_bit = 0;
+ struct platform_device *plat_dev;
+ acpi_status status;
- napi_enable(&adapter->napi);
+ if (!is_extended_socket_device(device))
+ return -ENODEV;
- fjes_hw_capture_interrupt_status(hw);
+ if (acpi_check_extended_socket_status(device))
+ return -ENODEV;
- result = fjes_request_irq(adapter);
- if (result)
- goto err_req_irq;
+ status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ fjes_get_acpi_resource, fjes_resource);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
+ /* create platform_device */
+ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+ ARRAY_SIZE(fjes_resource));
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
- netif_tx_start_all_queues(netdev);
- netif_carrier_on(netdev);
+ device->driver_data = plat_dev;
return 0;
-
-err_req_irq:
- fjes_free_irq(adapter);
- napi_disable(&adapter->napi);
-
-err_setup_res:
- fjes_free_resources(adapter);
- return result;
}
-/* fjes_close - Disables a network interface */
-static int fjes_close(struct net_device *netdev)
+static int fjes_acpi_remove(struct acpi_device *device)
{
- struct fjes_adapter *adapter = netdev_priv(netdev);
- struct fjes_hw *hw = &adapter->hw;
- unsigned long flags;
- int epidx;
-
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
-
- fjes_hw_raise_epstop(hw);
-
- napi_disable(&adapter->napi);
-
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
- continue;
-
- if (fjes_hw_get_partner_ep_status(hw, epidx) ==
- EP_PARTNER_SHARED)
- adapter->hw.ep_shm_info[epidx]
- .tx.info->v1i.rx_status &=
- ~FJES_RX_POLL_WORK;
- }
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
-
- fjes_free_irq(adapter);
-
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
- cancel_work_sync(&adapter->unshare_watch_task);
- adapter->unshare_watch_bitmask = 0;
- cancel_work_sync(&adapter->raise_intr_rxdata_task);
- cancel_work_sync(&adapter->tx_stall_task);
-
- cancel_work_sync(&hw->update_zone_task);
- cancel_work_sync(&hw->epstop_task);
-
- fjes_hw_wait_epstop(hw);
+ struct platform_device *plat_dev;
- fjes_free_resources(adapter);
+ plat_dev = (struct platform_device *)acpi_driver_data(device);
+ platform_device_unregister(plat_dev);
return 0;
}
+static struct acpi_driver fjes_acpi_driver = {
+ .name = DRV_NAME,
+ .class = DRV_NAME,
+ .owner = THIS_MODULE,
+ .ids = fjes_acpi_ids,
+ .ops = {
+ .add = fjes_acpi_add,
+ .remove = fjes_acpi_remove,
+ },
+};
+
static int fjes_setup_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -421,6 +244,188 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
return 0;
}
+static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
+
+ adapter->unset_rx_last = true;
+ napi_schedule(&adapter->napi);
+}
+
+static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+ unsigned long flags;
+
+ set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_stop_req_irq_pre(hw, src_epid, status);
+ switch (status) {
+ case EP_PARTNER_WAITING:
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ fallthrough;
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ break;
+ case EP_PARTNER_SHARED:
+ set_bit(src_epid, &hw->epstop_req_bit);
+
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq, &hw->epstop_task);
+ break;
+ }
+ trace_fjes_stop_req_irq_post(hw, src_epid);
+}
+
+static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+ unsigned long flags;
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
+ switch (status) {
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ break;
+ case EP_PARTNER_WAITING:
+ if (src_epid < hw->my_epid) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ }
+ break;
+ case EP_PARTNER_SHARED:
+ if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
+ FJES_RX_STOP_REQ_REQUEST) {
+ set_bit(src_epid, &hw->epstop_req_bit);
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq,
+ &hw->epstop_task);
+ }
+ break;
+ }
+ trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
+}
+
+static void fjes_update_zone_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ if (!work_pending(&hw->update_zone_task))
+ queue_work(adapter->control_wq, &hw->update_zone_task);
+}
+
+static irqreturn_t fjes_intr(int irq, void *data)
+{
+ struct fjes_adapter *adapter = data;
+ struct fjes_hw *hw = &adapter->hw;
+ irqreturn_t ret;
+ u32 icr;
+
+ icr = fjes_hw_capture_interrupt_status(hw);
+
+ if (icr & REG_IS_MASK_IS_ASSERT) {
+ if (icr & REG_ICTL_MASK_RX_DATA) {
+ fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_rx += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
+ fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_stop += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
+ fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_unshare += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
+ fjes_hw_set_irqmask(hw,
+ REG_ICTL_MASK_TXRX_STOP_DONE, true);
+
+ if (icr & REG_ICTL_MASK_INFO_UPDATE) {
+ fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_zoneupdate += 1;
+ }
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ return ret;
+}
+
+static int fjes_request_irq(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int result = -1;
+
+ adapter->interrupt_watch_enable = true;
+ if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+
+ if (!adapter->irq_registered) {
+ result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
+ IRQF_SHARED, netdev->name, adapter);
+ if (result)
+ adapter->irq_registered = false;
+ else
+ adapter->irq_registered = true;
+ }
+
+ return result;
+}
+
+static void fjes_free_irq(struct fjes_adapter *adapter)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ adapter->interrupt_watch_enable = false;
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+ if (adapter->irq_registered) {
+ free_irq(adapter->hw.hw_res.irq, adapter);
+ adapter->irq_registered = false;
+ }
+}
+
static void fjes_free_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -477,121 +482,91 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
}
}
-static void fjes_tx_stall_task(struct work_struct *work)
+/* fjes_open - Called when a network interface is made active */
+static int fjes_open(struct net_device *netdev)
{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, tx_stall_task);
- struct net_device *netdev = adapter->netdev;
+ struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
- int all_queue_available, sendable;
- enum ep_partner_status pstatus;
- int max_epid, my_epid, epid;
- union ep_buffer_info *info;
- int i;
-
- if (((long)jiffies -
- dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
- netif_wake_queue(netdev);
- return;
- }
-
- my_epid = hw->my_epid;
- max_epid = hw->max_epid;
+ int result;
- for (i = 0; i < 5; i++) {
- all_queue_available = 1;
+ if (adapter->open_guard)
+ return -ENXIO;
- for (epid = 0; epid < max_epid; epid++) {
- if (my_epid == epid)
- continue;
+ result = fjes_setup_resources(adapter);
+ if (result)
+ goto err_setup_res;
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- sendable = (pstatus == EP_PARTNER_SHARED);
- if (!sendable)
- continue;
+ hw->txrx_stop_req_bit = 0;
+ hw->epstop_req_bit = 0;
- info = adapter->hw.ep_shm_info[epid].tx.info;
+ napi_enable(&adapter->napi);
- if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
- return;
+ fjes_hw_capture_interrupt_status(hw);
- if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
- info->v1i.count_max)) {
- all_queue_available = 0;
- break;
- }
- }
+ result = fjes_request_irq(adapter);
+ if (result)
+ goto err_req_irq;
- if (all_queue_available) {
- netif_wake_queue(netdev);
- return;
- }
- }
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
- usleep_range(50, 100);
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
- queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
-}
+ return 0;
-static void fjes_force_close_task(struct work_struct *work)
-{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, force_close_task);
- struct net_device *netdev = adapter->netdev;
+err_req_irq:
+ fjes_free_irq(adapter);
+ napi_disable(&adapter->napi);
- rtnl_lock();
- dev_close(netdev);
- rtnl_unlock();
+err_setup_res:
+ fjes_free_resources(adapter);
+ return result;
}
-static void fjes_raise_intr_rxdata_task(struct work_struct *work)
+/* fjes_close - Disables a network interface */
+static int fjes_close(struct net_device *netdev)
{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, raise_intr_rxdata_task);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status pstatus;
- int max_epid, my_epid, epid;
+ unsigned long flags;
+ int epidx;
- my_epid = hw->my_epid;
- max_epid = hw->max_epid;
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
- for (epid = 0; epid < max_epid; epid++)
- hw->ep_shm_info[epid].tx_status_work = 0;
+ fjes_hw_raise_epstop(hw);
- for (epid = 0; epid < max_epid; epid++) {
- if (epid == my_epid)
- continue;
+ napi_disable(&adapter->napi);
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- if (pstatus == EP_PARTNER_SHARED) {
- hw->ep_shm_info[epid].tx_status_work =
- hw->ep_shm_info[epid].tx.info->v1i.tx_status;
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
- if (hw->ep_shm_info[epid].tx_status_work ==
- FJES_TX_DELAY_SEND_PENDING) {
- hw->ep_shm_info[epid].tx.info->v1i.tx_status =
- FJES_TX_DELAY_SEND_NONE;
- }
- }
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
- for (epid = 0; epid < max_epid; epid++) {
- if (epid == my_epid)
- continue;
+ fjes_free_irq(adapter);
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- if ((hw->ep_shm_info[epid].tx_status_work ==
- FJES_TX_DELAY_SEND_PENDING) &&
- (pstatus == EP_PARTNER_SHARED) &&
- !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
- FJES_RX_POLL_WORK)) {
- fjes_hw_raise_interrupt(hw, epid,
- REG_ICTL_MASK_RX_DATA);
- hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
- }
- }
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ adapter->unshare_watch_bitmask = 0;
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
- usleep_range(500, 1000);
+ cancel_work_sync(&hw->update_zone_task);
+ cancel_work_sync(&hw->epstop_task);
+
+ fjes_hw_wait_epstop(hw);
+
+ fjes_free_resources(adapter);
+
+ return 0;
}
static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
@@ -787,13 +762,6 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
-static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
-{
- struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
-
- netif_tx_wake_queue(queue);
-}
-
static void
fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
@@ -871,6 +839,13 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
+{
+ struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
+
+ netif_tx_wake_queue(queue);
+}
+
static int fjes_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
@@ -907,137 +882,29 @@ static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
- int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status status;
- unsigned long flags;
-
- status = fjes_hw_get_partner_ep_status(hw, src_epid);
- trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
- switch (status) {
- case EP_PARTNER_UNSHARE:
- case EP_PARTNER_COMPLETE:
- default:
- break;
- case EP_PARTNER_WAITING:
- if (src_epid < hw->my_epid) {
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
- FJES_RX_STOP_REQ_DONE;
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
-
- clear_bit(src_epid, &hw->txrx_stop_req_bit);
- set_bit(src_epid, &adapter->unshare_watch_bitmask);
-
- if (!work_pending(&adapter->unshare_watch_task))
- queue_work(adapter->control_wq,
- &adapter->unshare_watch_task);
- }
- break;
- case EP_PARTNER_SHARED:
- if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
- FJES_RX_STOP_REQ_REQUEST) {
- set_bit(src_epid, &hw->epstop_req_bit);
- if (!work_pending(&hw->epstop_task))
- queue_work(adapter->control_wq,
- &hw->epstop_task);
- }
- break;
- }
- trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
-}
-
-static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status status;
- unsigned long flags;
-
- set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
-
- status = fjes_hw_get_partner_ep_status(hw, src_epid);
- trace_fjes_stop_req_irq_pre(hw, src_epid, status);
- switch (status) {
- case EP_PARTNER_WAITING:
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
- FJES_RX_STOP_REQ_DONE;
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
- clear_bit(src_epid, &hw->txrx_stop_req_bit);
- fallthrough;
- case EP_PARTNER_UNSHARE:
- case EP_PARTNER_COMPLETE:
- default:
- set_bit(src_epid, &adapter->unshare_watch_bitmask);
- if (!work_pending(&adapter->unshare_watch_task))
- queue_work(adapter->control_wq,
- &adapter->unshare_watch_task);
- break;
- case EP_PARTNER_SHARED:
- set_bit(src_epid, &hw->epstop_req_bit);
-
- if (!work_pending(&hw->epstop_task))
- queue_work(adapter->control_wq, &hw->epstop_task);
- break;
- }
- trace_fjes_stop_req_irq_post(hw, src_epid);
-}
-
-static void fjes_update_zone_irq(struct fjes_adapter *adapter,
- int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- if (!work_pending(&hw->update_zone_task))
- queue_work(adapter->control_wq, &hw->update_zone_task);
-}
+static const struct net_device_ops fjes_netdev_ops = {
+ .ndo_open = fjes_open,
+ .ndo_stop = fjes_close,
+ .ndo_start_xmit = fjes_xmit_frame,
+ .ndo_get_stats64 = fjes_get_stats64,
+ .ndo_change_mtu = fjes_change_mtu,
+ .ndo_tx_timeout = fjes_tx_retry,
+ .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+};
-static irqreturn_t fjes_intr(int irq, void *data)
+/* fjes_netdev_setup - netdevice initialization routine */
+static void fjes_netdev_setup(struct net_device *netdev)
{
- struct fjes_adapter *adapter = data;
- struct fjes_hw *hw = &adapter->hw;
- irqreturn_t ret;
- u32 icr;
-
- icr = fjes_hw_capture_interrupt_status(hw);
-
- if (icr & REG_IS_MASK_IS_ASSERT) {
- if (icr & REG_ICTL_MASK_RX_DATA) {
- fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_rx += 1;
- }
-
- if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
- fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_stop += 1;
- }
-
- if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
- fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_unshare += 1;
- }
-
- if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
- fjes_hw_set_irqmask(hw,
- REG_ICTL_MASK_TXRX_STOP_DONE, true);
-
- if (icr & REG_ICTL_MASK_INFO_UPDATE) {
- fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_zoneupdate += 1;
- }
-
- ret = IRQ_HANDLED;
- } else {
- ret = IRQ_NONE;
- }
+ ether_setup(netdev);
- return ret;
+ netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
+ netdev->netdev_ops = &fjes_netdev_ops;
+ fjes_set_ethtool_ops(netdev);
+ netdev->mtu = fjes_support_mtu[3];
+ netdev->min_mtu = fjes_support_mtu[0];
+ netdev->max_mtu = fjes_support_mtu[3];
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
@@ -1087,16 +954,6 @@ static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
}
-static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
-
- adapter->unset_rx_last = true;
- napi_schedule(&adapter->napi);
-}
-
static int fjes_poll(struct napi_struct *napi, int budget)
{
struct fjes_adapter *adapter =
@@ -1196,182 +1053,130 @@ static int fjes_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* fjes_probe - Device Initialization Routine */
-static int fjes_probe(struct platform_device *plat_dev)
+static int fjes_sw_init(struct fjes_adapter *adapter)
{
- struct fjes_adapter *adapter;
- struct net_device *netdev;
- struct resource *res;
- struct fjes_hw *hw;
- u8 addr[ETH_ALEN];
- int err;
-
- err = -ENOMEM;
- netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
- NET_NAME_UNKNOWN, fjes_netdev_setup,
- FJES_MAX_QUEUES);
-
- if (!netdev)
- goto err_out;
+ struct net_device *netdev = adapter->netdev;
- SET_NETDEV_DEV(netdev, &plat_dev->dev);
+ netif_napi_add(netdev, &adapter->napi, fjes_poll);
- dev_set_drvdata(&plat_dev->dev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->plat_dev = plat_dev;
- hw = &adapter->hw;
- hw->back = adapter;
+ return 0;
+}
- /* setup the private structure */
- err = fjes_sw_init(adapter);
- if (err)
- goto err_free_netdev;
+static void fjes_force_close_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, force_close_task);
+ struct net_device *netdev = adapter->netdev;
- INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
- adapter->force_reset = false;
- adapter->open_guard = false;
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+}
- adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
- if (unlikely(!adapter->txrx_wq)) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
+static void fjes_tx_stall_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, tx_stall_task);
+ struct net_device *netdev = adapter->netdev;
+ struct fjes_hw *hw = &adapter->hw;
+ int all_queue_available, sendable;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
+ union ep_buffer_info *info;
+ int i;
- adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
- WQ_MEM_RECLAIM, 0);
- if (unlikely(!adapter->control_wq)) {
- err = -ENOMEM;
- goto err_free_txrx_wq;
+ if (((long)jiffies -
+ dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
+ netif_wake_queue(netdev);
+ return;
}
- INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
- INIT_WORK(&adapter->raise_intr_rxdata_task,
- fjes_raise_intr_rxdata_task);
- INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
- adapter->unshare_watch_bitmask = 0;
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
- INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
- adapter->interrupt_watch_enable = false;
+ for (i = 0; i < 5; i++) {
+ all_queue_available = 1;
- res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- goto err_free_control_wq;
- }
- hw->hw_res.start = res->start;
- hw->hw_res.size = resource_size(res);
- hw->hw_res.irq = platform_get_irq(plat_dev, 0);
- if (hw->hw_res.irq < 0) {
- err = hw->hw_res.irq;
- goto err_free_control_wq;
- }
+ for (epid = 0; epid < max_epid; epid++) {
+ if (my_epid == epid)
+ continue;
- err = fjes_hw_init(&adapter->hw);
- if (err)
- goto err_free_control_wq;
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ sendable = (pstatus == EP_PARTNER_SHARED);
+ if (!sendable)
+ continue;
- /* setup MAC address (02:00:00:00:00:[epid])*/
- addr[0] = 2;
- addr[1] = 0;
- addr[2] = 0;
- addr[3] = 0;
- addr[4] = 0;
- addr[5] = hw->my_epid; /* EPID */
- eth_hw_addr_set(netdev, addr);
+ info = adapter->hw.ep_shm_info[epid].tx.info;
- err = register_netdev(netdev);
- if (err)
- goto err_hw_exit;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return;
- netif_carrier_off(netdev);
+ if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
+ info->v1i.count_max)) {
+ all_queue_available = 0;
+ break;
+ }
+ }
- fjes_dbg_adapter_init(adapter);
+ if (all_queue_available) {
+ netif_wake_queue(netdev);
+ return;
+ }
+ }
- return 0;
+ usleep_range(50, 100);
-err_hw_exit:
- fjes_hw_exit(&adapter->hw);
-err_free_control_wq:
- destroy_workqueue(adapter->control_wq);
-err_free_txrx_wq:
- destroy_workqueue(adapter->txrx_wq);
-err_free_netdev:
- free_netdev(netdev);
-err_out:
- return err;
+ queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
}
-/* fjes_remove - Device Removal Routine */
-static int fjes_remove(struct platform_device *plat_dev)
+static void fjes_raise_intr_rxdata_task(struct work_struct *work)
{
- struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
- struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, raise_intr_rxdata_task);
struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
- fjes_dbg_adapter_exit(adapter);
-
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
- cancel_work_sync(&adapter->unshare_watch_task);
- cancel_work_sync(&adapter->raise_intr_rxdata_task);
- cancel_work_sync(&adapter->tx_stall_task);
- if (adapter->control_wq)
- destroy_workqueue(adapter->control_wq);
- if (adapter->txrx_wq)
- destroy_workqueue(adapter->txrx_wq);
-
- unregister_netdev(netdev);
-
- fjes_hw_exit(hw);
-
- netif_napi_del(&adapter->napi);
-
- free_netdev(netdev);
-
- return 0;
-}
-
-static int fjes_sw_init(struct fjes_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
-
- return 0;
-}
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
-/* fjes_netdev_setup - netdevice initialization routine */
-static void fjes_netdev_setup(struct net_device *netdev)
-{
- ether_setup(netdev);
+ for (epid = 0; epid < max_epid; epid++)
+ hw->ep_shm_info[epid].tx_status_work = 0;
- netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
- netdev->netdev_ops = &fjes_netdev_ops;
- fjes_set_ethtool_ops(netdev);
- netdev->mtu = fjes_support_mtu[3];
- netdev->min_mtu = fjes_support_mtu[0];
- netdev->max_mtu = fjes_support_mtu[3];
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-}
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
-static void fjes_irq_watch_task(struct work_struct *work)
-{
- struct fjes_adapter *adapter = container_of(to_delayed_work(work),
- struct fjes_adapter, interrupt_watch_task);
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if (pstatus == EP_PARTNER_SHARED) {
+ hw->ep_shm_info[epid].tx_status_work =
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status;
- local_irq_disable();
- fjes_intr(adapter->hw.hw_res.irq, adapter);
- local_irq_enable();
+ if (hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) {
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status =
+ FJES_TX_DELAY_SEND_NONE;
+ }
+ }
+ }
- if (fjes_rxframe_search_exist(adapter, 0) >= 0)
- napi_schedule(&adapter->napi);
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
- if (adapter->interrupt_watch_enable) {
- if (!delayed_work_pending(&adapter->interrupt_watch_task))
- queue_delayed_work(adapter->control_wq,
- &adapter->interrupt_watch_task,
- FJES_IRQ_WATCH_DELAY);
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if ((hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) &&
+ (pstatus == EP_PARTNER_SHARED) &&
+ !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+ FJES_RX_POLL_WORK)) {
+ fjes_hw_raise_interrupt(hw, epid,
+ REG_ICTL_MASK_RX_DATA);
+ hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
+ }
}
+
+ usleep_range(500, 1000);
}
static void fjes_watch_unshare_task(struct work_struct *work)
@@ -1508,6 +1313,169 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
}
+static void fjes_irq_watch_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(to_delayed_work(work),
+ struct fjes_adapter, interrupt_watch_task);
+
+ local_irq_disable();
+ fjes_intr(adapter->hw.hw_res.irq, adapter);
+ local_irq_enable();
+
+ if (fjes_rxframe_search_exist(adapter, 0) >= 0)
+ napi_schedule(&adapter->napi);
+
+ if (adapter->interrupt_watch_enable) {
+ if (!delayed_work_pending(&adapter->interrupt_watch_task))
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+}
+
+/* fjes_probe - Device Initialization Routine */
+static int fjes_probe(struct platform_device *plat_dev)
+{
+ struct fjes_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *res;
+ struct fjes_hw *hw;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = -ENOMEM;
+ netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
+ NET_NAME_UNKNOWN, fjes_netdev_setup,
+ FJES_MAX_QUEUES);
+
+ if (!netdev)
+ goto err_out;
+
+ SET_NETDEV_DEV(netdev, &plat_dev->dev);
+
+ dev_set_drvdata(&plat_dev->dev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->plat_dev = plat_dev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ /* setup the private structure */
+ err = fjes_sw_init(adapter);
+ if (err)
+ goto err_free_netdev;
+
+ INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
+ adapter->force_reset = false;
+ adapter->open_guard = false;
+
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->txrx_wq)) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
+ WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->control_wq)) {
+ err = -ENOMEM;
+ goto err_free_txrx_wq;
+ }
+
+ INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
+ INIT_WORK(&adapter->raise_intr_rxdata_task,
+ fjes_raise_intr_rxdata_task);
+ INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
+ adapter->unshare_watch_bitmask = 0;
+
+ INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
+ adapter->interrupt_watch_enable = false;
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_free_control_wq;
+ }
+ hw->hw_res.start = res->start;
+ hw->hw_res.size = resource_size(res);
+ hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+ if (hw->hw_res.irq < 0) {
+ err = hw->hw_res.irq;
+ goto err_free_control_wq;
+ }
+
+ err = fjes_hw_init(&adapter->hw);
+ if (err)
+ goto err_free_control_wq;
+
+ /* setup MAC address (02:00:00:00:00:[epid])*/
+ addr[0] = 2;
+ addr[1] = 0;
+ addr[2] = 0;
+ addr[3] = 0;
+ addr[4] = 0;
+ addr[5] = hw->my_epid; /* EPID */
+ eth_hw_addr_set(netdev, addr);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_hw_exit;
+
+ netif_carrier_off(netdev);
+
+ fjes_dbg_adapter_init(adapter);
+
+ return 0;
+
+err_hw_exit:
+ fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+ destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+ destroy_workqueue(adapter->txrx_wq);
+err_free_netdev:
+ free_netdev(netdev);
+err_out:
+ return err;
+}
+
+/* fjes_remove - Device Removal Routine */
+static int fjes_remove(struct platform_device *plat_dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_dbg_adapter_exit(adapter);
+
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
+ if (adapter->control_wq)
+ destroy_workqueue(adapter->control_wq);
+ if (adapter->txrx_wq)
+ destroy_workqueue(adapter->txrx_wq);
+
+ unregister_netdev(netdev);
+
+ fjes_hw_exit(hw);
+
+ netif_napi_del(&adapter->napi);
+
+ free_netdev(netdev);
+
+ return 0;
+}
+
+static struct platform_driver fjes_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = fjes_probe,
+ .remove = fjes_remove,
+};
+
static acpi_status
acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
void *context, void **return_value)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7962c37b3f14..f393e454f45c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -503,12 +503,9 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
off_gnv = skb_gro_offset(skb);
hlen = off_gnv + sizeof(*gh);
- gh = skb_gro_header_fast(skb, off_gnv);
- if (skb_gro_header_hard(skb, hlen)) {
- gh = skb_gro_header_slow(skb, hlen, off_gnv);
- if (unlikely(!gh))
- goto out;
- }
+ gh = skb_gro_header(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
if (gh->ver != GENEVE_VER || gh->oam)
goto out;
@@ -1200,8 +1197,8 @@ static const struct net_device_ops geneve_netdev_ops = {
static void geneve_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
- strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
}
static const struct ethtool_ops geneve_ethtool_ops = {
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index a208e2b1a9af..15c7dc82107f 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1859,6 +1859,7 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = gtp_genl_ops,
.n_small_ops = ARRAY_SIZE(gtp_genl_ops),
+ .resv_start_op = GTP_CMD_ECHOREQ + 1,
.mcgrps = gtp_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8297411e87ea..a6184d6c7b15 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -600,7 +600,7 @@ static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_DRIVERNAME:
if (s->ops && s->ops->drvname) {
- strlcpy(bi.data.drivername, s->ops->drvname,
+ strscpy(bi.data.drivername, s->ops->drvname,
sizeof(bi.data.drivername));
break;
}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 74e845fa2e07..aa8f828a0ae7 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
pci_release_regions(pdev);
+ pci_disable_device(pdev);
out2:
free_netdev(dev);
out3:
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6e42cb03e226..f066de0da492 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1779,8 +1779,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
}
/* Enable NAPI handler before init callbacks */
- netif_napi_add(ndev, &net_device->chan_table[0].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 15ebd5426604..5f08482065ca 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -935,8 +935,8 @@ int netvsc_recv_callback(struct net_device *net,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 6da36cb8af80..11f767a20444 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1575,7 +1575,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
for (i = 1; i < net_device->num_chn; i++)
netif_napi_add(net, &net_device->chan_table[i].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netvsc_poll);
return net_device;
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 6afdf1622944..5cf218c674a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1310,10 +1310,11 @@ static void adf7242_remove(struct spi_device *spi)
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 42c0b451088d..450b16ad40a4 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2293,7 +2293,7 @@ static int ca8210_set_csma_params(
* @retries: Number of retries
*
* Sets the number of times to retry a transmission if no acknowledgment was
- * was received from the other end when one was requested.
+ * received from the other end when one was requested.
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 1e1f40f628a0..c69b87d3837d 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 38c217bd7c82..2f0544dd7c2a 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -630,6 +630,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = hwsim_nl_ops,
.n_small_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .resv_start_op = MAC802154_HWSIM_CMD_NEW_EDGE + 1,
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 8b2220eb6b92..48255fc4b25c 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -13,4 +13,6 @@ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
+ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
+
ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 1c1895aea811..e0d71f609272 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -526,7 +526,7 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
.version = IPA_VERSION_3_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 58b708d2fc75..383ef1890065 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -407,11 +407,11 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
- BCR_TX_NOT_USING_BRESP_FMASK |
- BCR_SUSPEND_L2_IRQ_FMASK |
- BCR_HOLB_DROP_L2_IRQ_FMASK |
- BCR_DUAL_TX_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 9e307eebd33f..bea2da1c4c51 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -56,9 +56,9 @@
* element can also contain an immediate command, requesting the IPA perform
* actions other than data transfer.
*
- * Each TRE refers to a block of data--also located DRAM. After writing one
- * or more TREs to a channel, the writer (either the IPA or an EE) writes a
- * doorbell register to inform the receiving side how many elements have
+ * Each TRE refers to a block of data--also located in DRAM. After writing
+ * one or more TREs to a channel, the writer (either the IPA or an EE) writes
+ * a doorbell register to inform the receiving side how many elements have
* been written.
*
* Each channel has a GSI "event ring" associated with it. An event ring
@@ -710,43 +710,32 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- const struct list_head *list;
+ u32 pending_id = trans_info->pending_id;
struct gsi_trans *trans;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* There is a small chance a TX transaction got allocated just
- * before we disabled transmits, so check for that.
- */
- if (channel->toward_ipa) {
- list = &trans_info->alloc;
- if (!list_empty(list))
- goto done;
- list = &trans_info->committed;
- if (!list_empty(list))
- goto done;
- list = &trans_info->pending;
- if (!list_empty(list))
- goto done;
+ u16 trans_id;
+
+ if (channel->toward_ipa && pending_id != trans_info->free_id) {
+ /* There is a small chance a TX transaction got allocated
+ * just before we disabled transmits, so check for that.
+ * The last allocated, committed, or pending transaction
+ * precedes the first free transaction.
+ */
+ trans_id = trans_info->free_id - 1;
+ } else if (trans_info->polled_id != pending_id) {
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ *
+ * The last completed or polled transaction precedes the
+ * first pending transaction.
+ */
+ trans_id = pending_id - 1;
+ } else {
+ return NULL;
}
- /* Otherwise (TX or RX) we want to wait for anything that
- * has completed, or has been polled but not released yet.
- */
- list = &trans_info->complete;
- if (!list_empty(list))
- goto done;
- list = &trans_info->polled;
- if (list_empty(list))
- list = NULL;
-done:
- trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
-
/* Caller will wait for this, so take a reference */
- if (trans)
- refcount_inc(&trans->refcount);
-
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ refcount_inc(&trans->refcount);
return trans;
}
@@ -1358,8 +1347,8 @@ gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
* we update transactions to record their actual received lengths.
*
* When an event for a TX channel arrives we use information in the
- * transaction to report the number of requests and bytes have been
- * transferred.
+ * transaction to report the number of requests and bytes that have
+ * been transferred.
*
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
@@ -1485,8 +1474,8 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
}
-/* Consult hardware, move any newly completed transactions to completed list */
-static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
+/* Consult hardware, move newly completed transactions to completed state */
+void gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1505,12 +1494,12 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return NULL;
+ return;
/* Get the transaction for the latest completed event. */
trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
if (!trans)
- return NULL;
+ return;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
@@ -1518,8 +1507,6 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
* up the network stack.
*/
gsi_evt_ring_update(gsi, evt_ring_id, index);
-
- return gsi_channel_trans_complete(channel);
}
/**
@@ -1528,21 +1515,18 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
*
* Return: Transaction pointer, or null if none are available
*
- * This function returns the first entry on a channel's completed transaction
- * list. If that list is empty, the hardware is consulted to determine
- * whether any new transactions have completed. If so, they're moved to the
- * completed list and the new first entry is returned. If there are no more
- * completed transactions, a null pointer is returned.
+ * This function returns the first of a channel's completed transactions.
+ * If no transactions are in completed state, the hardware is consulted to
+ * determine whether any new transactions have completed. If so, they're
+ * moved to completed state and the first such transaction is returned.
+ * If there are no more completed transactions, a null pointer is returned.
*/
static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
{
struct gsi_trans *trans;
- /* Get the first transaction from the completed list */
+ /* Get the first completed transaction */
trans = gsi_channel_trans_complete(channel);
- if (!trans) /* List is empty; see if there's more to do */
- trans = gsi_channel_update(channel);
-
if (trans)
gsi_trans_move_polled(trans);
@@ -1623,7 +1607,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_poll);
else
netif_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ gsi_channel_poll);
return 0;
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 23de5f67374c..49dcadba4e0b 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -31,14 +31,6 @@ struct gsi_trans;
struct gsi_channel_data;
struct ipa_gsi_endpoint_data;
-/* Execution environment IDs */
-enum gsi_ee_id {
- GSI_EE_AP = 0x0,
- GSI_EE_MODEM = 0x1,
- GSI_EE_UC = 0x2,
- GSI_EE_TZ = 0x3,
-};
-
struct gsi_ring {
void *virt; /* ring array base address */
dma_addr_t addr; /* primarily low 32 bits used */
@@ -82,18 +74,18 @@ struct gsi_trans_pool {
struct gsi_trans_info {
atomic_t tre_avail; /* TREs available for allocation */
- struct gsi_trans_pool pool; /* transaction pool */
+
+ u16 free_id; /* first free trans in array */
+ u16 allocated_id; /* first allocated transaction */
+ u16 committed_id; /* first committed transaction */
+ u16 pending_id; /* first pending transaction */
+ u16 completed_id; /* first completed transaction */
+ u16 polled_id; /* first polled transaction */
+ struct gsi_trans *trans; /* transaction array */
struct gsi_trans **map; /* TRE -> transaction map */
struct gsi_trans_pool sg_pool; /* scatterlist pool */
struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
-
- spinlock_t spinlock; /* protects updates to the lists */
- struct list_head alloc; /* allocated, not committed */
- struct list_head committed; /* committed, awaiting doorbell */
- struct list_head pending; /* pending, awaiting completion */
- struct list_head complete; /* completed, awaiting poll */
- struct list_head polled; /* returned by gsi_channel_poll_one() */
};
/* Hardware values signifying the state of a channel */
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index 0b2516fa21b5..c65f7c5cdc8d 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_PRIVATE_H_
#define _GSI_PRIVATE_H_
@@ -18,13 +18,13 @@ struct gsi_channel;
/**
* gsi_trans_move_complete() - Mark a GSI transaction completed
- * @trans: Transaction to commit
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_complete(struct gsi_trans *trans);
/**
* gsi_trans_move_polled() - Mark a transaction polled
- * @trans: Transaction to update
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_polled(struct gsi_trans *trans);
@@ -94,6 +94,14 @@ void gsi_channel_trans_exit(struct gsi_channel *channel);
*/
void gsi_channel_doorbell(struct gsi_channel *channel);
+/* gsi_channel_update() - Update knowledge of channel hardware state
+ * @channel: Channel to be updated
+ *
+ * Consult hardware, change the state of any newly-completed transactions
+ * on a channel.
+ */
+void gsi_channel_update(struct gsi_channel *channel);
+
/**
* gsi_ring_virt() - Return virtual address for a ring entry
* @ring: Ring whose address is to be translated
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 5bd8b31656d3..3763359f208f 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
@@ -55,14 +55,10 @@
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c020 + 0x1000 * (ee))
+ (0x0000c020 + 0x1000 * GSI_EE_AP)
#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c024 + 0x1000 * (ee))
+ (0x0000c024 + 0x1000 * GSI_EE_AP)
/* All other register offsets are relative to gsi->virt */
@@ -81,9 +77,7 @@ enum gsi_channel_type {
};
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
- (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
@@ -112,9 +106,7 @@ chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
}
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
- (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
static inline u32 r_length_encoded(enum ipa_version version, u32 length)
@@ -125,19 +117,13 @@ static inline u32 r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
- (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
- (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_QOS_OFFSET(ch) \
- GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
- (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
@@ -158,29 +144,19 @@ enum gsi_prefetch_mode {
};
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
- (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
- (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
- (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
- (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
- (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define EV_CHTYPE_FMASK GENMASK(3, 0)
#define EV_EE_FMASK GENMASK(7, 4)
@@ -190,9 +166,7 @@ enum gsi_prefetch_mode {
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
- (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
{
@@ -202,83 +176,53 @@ static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
- (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
- (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
- (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
- (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define MODT_FMASK GENMASK(15, 0)
#define MODC_FMASK GENMASK(23, 16)
#define MOD_CNT_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
- (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
- (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
- (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
- (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
- (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
- (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
- (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
- GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
- (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+ (0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
- (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+ (0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
#define GSI_GSI_STATUS_OFFSET \
- GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
- (0x0001f000 + 0x4000 * (ee))
+ (0x0001f000 + 0x4000 * GSI_EE_AP)
#define ENABLED_FMASK GENMASK(0, 0)
#define GSI_CH_CMD_OFFSET \
- GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CH_CMD_OFFSET(ee) \
- (0x0001f008 + 0x4000 * (ee))
+ (0x0001f008 + 0x4000 * GSI_EE_AP)
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
@@ -293,9 +237,7 @@ enum gsi_ch_cmd_opcode {
};
#define GSI_EV_CH_CMD_OFFSET \
- GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
- (0x0001f010 + 0x4000 * (ee))
+ (0x0001f010 + 0x4000 * GSI_EE_AP)
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
@@ -307,9 +249,7 @@ enum gsi_evt_cmd_opcode {
};
#define GSI_GENERIC_CMD_OFFSET \
- GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
- (0x0001f018 + 0x4000 * (ee))
+ (0x0001f018 + 0x4000 * GSI_EE_AP)
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
@@ -326,9 +266,7 @@ enum gsi_generic_cmd_opcode {
/* The next register is present for IPA v3.5.1 and above */
#define GSI_GSI_HW_PARAM_2_OFFSET \
- GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
- (0x0001f040 + 0x4000 * (ee))
+ (0x0001f040 + 0x4000 * GSI_EE_AP)
#define IRAM_SIZE_FMASK GENMASK(2, 0)
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
@@ -357,13 +295,9 @@ enum gsi_iram_size {
/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
- (0x0001f080 + 0x4000 * (ee))
+ (0x0001f080 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
- (0x0001f088 + 0x4000 * (ee))
+ (0x0001f088 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
enum gsi_irq_type_id {
@@ -377,62 +311,38 @@ enum gsi_irq_type_id {
};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
- (0x0001f090 + 0x4000 * (ee))
+ (0x0001f090 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
- (0x0001f094 + 0x4000 * (ee))
+ (0x0001f094 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f098 + 0x4000 * (ee))
+ (0x0001f098 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f09c + 0x4000 * (ee))
+ (0x0001f09c + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a0 + 0x4000 * (ee))
+ (0x0001f0a0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a4 + 0x4000 * (ee))
+ (0x0001f0a4 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
- (0x0001f0b0 + 0x4000 * (ee))
+ (0x0001f0b0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
- (0x0001f0b8 + 0x4000 * (ee))
+ (0x0001f0b8 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f0c0 + 0x4000 * (ee))
+ (0x0001f0c0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
- (0x0001f100 + 0x4000 * (ee))
+ (0x0001f100 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
- (0x0001f108 + 0x4000 * (ee))
+ (0x0001f108 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f110 + 0x4000 * (ee))
+ (0x0001f110 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the GLOB_IRQ_* registers */
enum gsi_global_irq_id {
ERROR_INT = 0x0,
@@ -442,17 +352,11 @@ enum gsi_global_irq_id {
};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
- (0x0001f118 + 0x4000 * (ee))
+ (0x0001f118 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
- (0x0001f120 + 0x4000 * (ee))
+ (0x0001f120 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
- (0x0001f128 + 0x4000 * (ee))
+ (0x0001f128 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the (general) GSI_IRQ_* registers */
enum gsi_general_id {
BREAK_POINT = 0x0,
@@ -462,15 +366,11 @@ enum gsi_general_id {
};
#define GSI_CNTXT_INTSET_OFFSET \
- GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
- (0x0001f180 + 0x4000 * (ee))
+ (0x0001f180 + 0x4000 * GSI_EE_AP)
#define INTYPE_FMASK GENMASK(0, 0)
#define GSI_ERROR_LOG_OFFSET \
- GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
- (0x0001f200 + 0x4000 * (ee))
+ (0x0001f200 + 0x4000 * GSI_EE_AP)
/* Fields below are present for IPA v3.5.1 and above */
#define ERR_ARG3_FMASK GENMASK(3, 0)
@@ -501,14 +401,10 @@ enum gsi_err_type {
};
#define GSI_ERROR_LOG_CLR_OFFSET \
- GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
- (0x0001f210 + 0x4000 * (ee))
+ (0x0001f210 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SCRATCH_0_OFFSET \
- GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
- (0x0001f400 + 0x4000 * (ee))
+ (0x0001f400 + 0x4000 * GSI_EE_AP)
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 18e7e8c405be..26b7f683a3e1 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -22,37 +22,36 @@
* DOC: GSI Transactions
*
* A GSI transaction abstracts the behavior of a GSI channel by representing
- * everything about a related group of IPA commands in a single structure.
- * (A "command" in this sense is either a data transfer or an IPA immediate
+ * everything about a related group of IPA operations in a single structure.
+ * (A "operation" in this sense is either a data transfer or an IPA immediate
* command.) Most details of interaction with the GSI hardware are managed
- * by the GSI transaction core, allowing users to simply describe commands
+ * by the GSI transaction core, allowing users to simply describe operations
* to be performed. When a transaction has completed a callback function
* (dependent on the type of endpoint associated with the channel) allows
* cleanup of resources associated with the transaction.
*
- * To perform a command (or set of them), a user of the GSI transaction
+ * To perform an operation (or set of them), a user of the GSI transaction
* interface allocates a transaction, indicating the number of TREs required
- * (one per command). If sufficient TREs are available, they are reserved
+ * (one per operation). If sufficient TREs are available, they are reserved
* for use in the transaction and the allocation succeeds. This way
- * exhaustion of the available TREs in a channel ring is detected
- * as early as possible. All resources required to complete a transaction
- * are allocated at transaction allocation time.
+ * exhaustion of the available TREs in a channel ring is detected as early
+ * as possible. Any other resources that might be needed to complete a
+ * transaction are also allocated when the transaction is allocated.
*
- * Commands performed as part of a transaction are represented in an array
- * of Linux scatterlist structures. This array is allocated with the
- * transaction, and its entries are initialized using standard scatterlist
- * functions (such as sg_set_buf() or skb_to_sgvec()).
+ * Operations performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures, allocated with the transaction. These
+ * scatterlist structures are initialized by "adding" operations to the
+ * transaction. If a buffer in an operation must be mapped for DMA, this is
+ * done at the time it is added to the transaction. It is possible for a
+ * mapping error to occur when an operation is added. In this case the
+ * transaction should simply be freed; this correctly releases resources
+ * associated with the transaction.
*
- * Once a transaction's scatterlist structures have been initialized, the
- * transaction is committed. The caller is responsible for mapping buffers
- * for DMA if necessary, and this should be done *before* allocating
- * the transaction. Between a successful allocation and commit of a
- * transaction no errors should occur.
- *
- * Committing transfers ownership of the entire transaction to the GSI
- * transaction core. The GSI transaction code formats the content of
- * the scatterlist array into the channel ring buffer and informs the
- * hardware that new TREs are available to process.
+ * Once all operations have been successfully added to a transaction, the
+ * transaction is committed. Committing transfers ownership of the entire
+ * transaction to the GSI transaction core. The GSI transaction code
+ * formats the content of the scatterlist array into the channel ring
+ * buffer and informs the hardware that new TREs are available to process.
*
* The last TRE in each transaction is marked to interrupt the AP when the
* GSI hardware has completed it. Because transfers described by TREs are
@@ -125,11 +124,10 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
memset(pool, 0, sizeof(*pool));
}
-/* Allocate the requested number of (zeroed) entries from the pool */
-/* Home-grown DMA pool. This way we can preallocate and use the tre_count
- * to guarantee allocations will succeed. Even though we specify max_alloc
- * (and it can be more than one), we only allow allocation of a single
- * element from a DMA pool.
+/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
+ * allocations will succeed. The immediate commands in a transaction can
+ * require up to max_alloc elements from the pool. But we only allow
+ * allocation of a single element from a DMA pool at a time.
*/
int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
size_t size, u32 count, u32 max_alloc)
@@ -237,68 +235,63 @@ gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
/* Return the oldest completed transaction for a channel (or null) */
struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
{
- return list_first_entry_or_null(&channel->trans_info.complete,
- struct gsi_trans, links);
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_id = trans_info->completed_id;
+
+ if (trans_id == trans_info->pending_id) {
+ gsi_channel_update(channel);
+ if (trans_id == trans_info->pending_id)
+ return NULL;
+ }
+
+ return &trans_info->trans[trans_id %= channel->tre_count];
}
-/* Move a transaction from the allocated list to the committed list */
+/* Move a transaction from allocated to committed state */
static void gsi_trans_move_committed(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
-
- list_move_tail(&trans->links, &trans_info->committed);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* This allocated transaction is now committed */
+ trans_info->allocated_id++;
}
-/* Move transactions from the committed list to the pending list */
+/* Move committed transactions to pending state */
static void gsi_trans_move_pending(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct list_head list;
-
- spin_lock_bh(&trans_info->spinlock);
+ u16 trans_index = trans - &trans_info->trans[0];
+ u16 delta;
- /* Move this transaction and all predecessors to the pending list */
- list_cut_position(&list, &trans_info->committed, &trans->links);
- list_splice_tail(&list, &trans_info->pending);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* These committed transactions are now pending */
+ delta = trans_index - trans_info->committed_id + 1;
+ trans_info->committed_id += delta % channel->tre_count;
}
-/* Move a transaction and all of its predecessors from the pending list
- * to the completed list.
- */
+/* Move pending transactions to completed state */
void gsi_trans_move_complete(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct list_head list;
+ u16 trans_index = trans - trans_info->trans;
+ u16 delta;
- spin_lock_bh(&trans_info->spinlock);
-
- /* Move this transaction and all predecessors to completed list */
- list_cut_position(&list, &trans_info->pending, &trans->links);
- list_splice_tail(&list, &trans_info->complete);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* These pending transactions are now completed */
+ delta = trans_index - trans_info->pending_id + 1;
+ delta %= channel->tre_count;
+ trans_info->pending_id += delta;
}
-/* Move a transaction from the completed list to the polled list */
+/* Move a transaction from completed to polled state */
void gsi_trans_move_polled(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
-
- list_move_tail(&trans->links, &trans_info->polled);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* This completed transaction is now polled */
+ trans_info->completed_id++;
}
/* Reserve some number of TREs on a channel. Returns true if successful */
@@ -343,20 +336,22 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_channel *channel = &gsi->channel[channel_id];
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
+ u16 trans_index;
if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
- /* We reserve the TREs now, but consume them at commit time.
- * If there aren't enough available, we're done.
- */
+ /* If we can't reserve the TREs for the transaction, we're done */
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the transaction */
- trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans_index = trans_info->free_id % channel->tre_count;
+ trans = &trans_info->trans[trans_index];
+ memset(trans, 0, sizeof(*trans));
+
+ /* Initialize non-zero fields in the transaction */
trans->gsi = gsi;
trans->channel_id = channel_id;
trans->rsvd_count = tre_count;
@@ -367,45 +362,37 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
sg_init_marker(trans->sgl, tre_count);
trans->direction = direction;
-
- spin_lock_bh(&trans_info->spinlock);
-
- list_add_tail(&trans->links, &trans_info->alloc);
-
- spin_unlock_bh(&trans_info->spinlock);
-
refcount_set(&trans->refcount, 1);
+ /* This free transaction is now allocated */
+ trans_info->free_id++;
+
return trans;
}
/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
- refcount_t *refcount = &trans->refcount;
struct gsi_trans_info *trans_info;
- bool last;
- /* We must hold the lock to release the last reference */
- if (refcount_dec_not_one(refcount))
+ if (!refcount_dec_and_test(&trans->refcount))
return;
+ /* Unused transactions are allocated but never committed, pending,
+ * completed, or polled.
+ */
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* Reference might have been added before we got the lock */
- last = refcount_dec_and_test(refcount);
- if (last)
- list_del(&trans->links);
-
- spin_unlock_bh(&trans_info->spinlock);
-
- if (!last)
- return;
-
- if (trans->used_count)
+ if (!trans->used_count) {
+ trans_info->allocated_id++;
+ trans_info->committed_id++;
+ trans_info->pending_id++;
+ trans_info->completed_id++;
+ } else {
ipa_gsi_trans_release(trans);
+ }
+
+ /* This transaction is now free */
+ trans_info->polled_id++;
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
@@ -548,8 +535,8 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
*
* Formats channel ring TRE entries based on the content of the scatterlist.
* Maps a transaction pointer to the last ring entry used for the transaction,
- * so it can be recovered when it completes. Moves the transaction to the
- * pending list. Finally, updates the channel ring pointer and optionally
+ * so it can be recovered when it completes. Moves the transaction to
+ * pending state. Finally, updates the channel ring pointer and optionally
* rings the doorbell.
*/
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
@@ -654,23 +641,27 @@ void gsi_trans_complete(struct gsi_trans *trans)
void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct gsi_trans *trans;
- bool cancelled;
+ u16 trans_id = trans_info->pending_id;
/* channel->gsi->mutex is held by caller */
- spin_lock_bh(&trans_info->spinlock);
- cancelled = !list_empty(&trans_info->pending);
- list_for_each_entry(trans, &trans_info->pending, links)
- trans->cancelled = true;
+ /* If there are no pending transactions, we're done */
+ if (trans_id == trans_info->committed_id)
+ return;
- list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+ /* Mark all pending transactions cancelled */
+ do {
+ struct gsi_trans *trans;
+
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ trans->cancelled = true;
+ } while (++trans_id != trans_info->committed_id);
- spin_unlock_bh(&trans_info->spinlock);
+ /* All pending transactions are now completed */
+ trans_info->pending_id = trans_info->committed_id;
/* Schedule NAPI polling to complete the cancelled transactions */
- if (cancelled)
- napi_schedule(&channel->napi);
+ napi_schedule(&channel->napi);
}
/* Issue a command to read a single byte from a channel */
@@ -736,10 +727,16 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
* modulo that number to determine the next one that's free.
* Transactions are allocated one at a time.
*/
- ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
- tre_max, 1);
- if (ret)
+ trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
+ GFP_KERNEL);
+ if (!trans_info->trans)
return -ENOMEM;
+ trans_info->free_id = 0; /* all modulo channel->tre_count */
+ trans_info->allocated_id = 0;
+ trans_info->committed_id = 0;
+ trans_info->pending_id = 0;
+ trans_info->completed_id = 0;
+ trans_info->polled_id = 0;
/* A completion event contains a pointer to the TRE that caused
* the event (which will be the last one used by the transaction).
@@ -765,19 +762,13 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
if (ret)
goto err_map_free;
- spin_lock_init(&trans_info->spinlock);
- INIT_LIST_HEAD(&trans_info->alloc);
- INIT_LIST_HEAD(&trans_info->committed);
- INIT_LIST_HEAD(&trans_info->pending);
- INIT_LIST_HEAD(&trans_info->complete);
- INIT_LIST_HEAD(&trans_info->polled);
return 0;
err_map_free:
kfree(trans_info->map);
err_trans_free:
- gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->trans);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
@@ -791,6 +782,6 @@ void gsi_channel_trans_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
gsi_trans_pool_exit(&trans_info->sg_pool);
- gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->trans);
kfree(trans_info->map);
}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 7084507830c2..30c1c2dc77c6 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _GSI_TRANS_H_
#define _GSI_TRANS_H_
@@ -29,7 +29,6 @@ struct gsi_trans_pool;
* struct gsi_trans - a GSI transaction
*
* Most fields in this structure for internal use by the transaction core code:
- * @links: Links for channel transaction lists by state
* @gsi: GSI pointer
* @channel_id: Channel number transaction is associated with
* @cancelled: If set by the core code, transaction was cancelled
@@ -50,8 +49,6 @@ struct gsi_trans_pool;
* received.
*/
struct gsi_trans {
- struct list_head links; /* gsi_channel lists */
-
struct gsi *gsi;
u8 channel_id;
@@ -77,7 +74,7 @@ struct gsi_trans {
/**
* gsi_trans_pool_init() - Initialize a pool of structures for transactions
- * @pool: GSI transaction poll pointer
+ * @pool: GSI transaction pool pointer
* @size: Size of elements in the pool
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 4fc3c72359f5..09ead433ec38 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_H_
#define _IPA_H_
@@ -44,6 +44,7 @@ struct ipa_interrupt;
* @uc_loaded: true after microcontroller has reported it's ready
* @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
+ * @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
@@ -90,6 +91,7 @@ struct ipa {
dma_addr_t reg_addr;
void __iomem *reg_virt;
+ const struct ipa_regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 6dea40259b60..26c3db9f52b1 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -32,7 +32,7 @@
* immediate command's opcode. The payload for a command resides in AP
* memory and is described by a single scatterlist entry in its transaction.
* Commands do not require a transaction completion callback, and are
- * (currently) always issued using gsi_trans_commit_wait().
+ * always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
@@ -305,6 +305,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
const char *name;
u32 offset;
@@ -312,7 +313,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -325,7 +327,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 9215ddad1010..8e4243c1f0bb 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_CMD_H_
#define _IPA_CMD_H_
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index e15eb3cd3e33..e5a6ce75c7dd 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
@@ -31,7 +31,7 @@
* communication path between the IPA and a particular execution environment
* (EE), such as the AP or Modem. Each EE has a set of channels associated
* with it, and each channel has an ID unique for that EE. For the most part
- * the only GSI channels of concern to this driver belong to the AP
+ * the only GSI channels of concern to this driver belong to the AP.
*
* An endpoint is an IPA construct representing a single channel anywhere
* in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 66d2bfdf9e42..093e11ec7c2d 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -23,8 +23,6 @@
#include "ipa_gsi.h"
#include "ipa_power.h"
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-
/* Hardware is told about receive buffers once a "batch" has been queued */
#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
@@ -72,14 +70,6 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
-static u32 aggr_byte_limit_max(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_5)
- return field_max(aggr_byte_limit_fmask(true));
-
- return field_max(aggr_byte_limit_fmask(false));
-}
-
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
{
@@ -111,6 +101,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
+ const struct ipa_reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
@@ -171,7 +162,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
*/
aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- limit = aggr_byte_limit_max(ipa->version);
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+
+ limit = ipa_reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
@@ -182,6 +175,15 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true; /* Nothing more to check for RX */
}
+ /* Starting with IPA v4.5 sequencer replication is obsolete */
+ if (ipa->version >= IPA_VERSION_4_5) {
+ if (data->endpoint.config.tx.seq_rep_type) {
+ dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+ }
+
if (data->endpoint.config.status_enable) {
other_name = data->endpoint.config.tx.status_endpoint;
if (other_name >= count) {
@@ -299,8 +301,10 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
- u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 field_id;
+ u32 offset;
bool state;
u32 mask;
u32 val;
@@ -310,9 +314,13 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
- mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
-
+ reg = ipa_reg(ipa, ENDP_INIT_CTRL);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
+
+ field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
+ mask = ipa_reg_bit(reg, field_id);
+
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
@@ -339,13 +347,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
WARN_ON(!(mask & ipa->available));
- offset = ipa_reg_state_aggr_active_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
return !!(val & mask);
}
@@ -354,10 +362,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
WARN_ON(!(mask & ipa->available));
- iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+ reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
+ iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -456,6 +466,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
while (initialized) {
u32 endpoint_id = __ffs(initialized);
struct ipa_endpoint *endpoint;
+ const struct ipa_reg *reg;
u32 offset;
initialized ^= BIT(endpoint_id);
@@ -465,7 +476,8 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -485,22 +497,23 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_CFG);
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->config.checksum) {
- enum ipa_version version = endpoint->ipa->version;
+ enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
- u32 checksum_offset;
+ u32 off;
/* Checksum header offset is in 4-byte units */
- checksum_offset = sizeof(struct rmnet_map_header);
- checksum_offset /= sizeof(u32);
- val |= u32_encode_bits(checksum_offset,
- CS_METADATA_HDR_OFFSET_FMASK);
+ off = sizeof(struct rmnet_map_header) / sizeof(u32);
+ val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -513,24 +526,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
+ val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
- u32 offset;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
- offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
- val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
+ reg = ipa_reg(ipa, ENDP_INIT_NAT);
+ val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static u32
@@ -554,6 +569,50 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
return header_size;
}
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static u32 ipa_header_size_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 header_size)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(header_size > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(field_max);
+ WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
+ val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static u32 ipa_metadata_offset_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 offset)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(offset > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(field_max);
+ WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+
+ return val;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -577,36 +636,38 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_HDR);
if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
header_size = ipa_qmap_header_size(version, endpoint);
- val = ipa_header_size_encoded(version, header_size);
+ val = ipa_header_size_encode(version, reg, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 offset; /* Field offset within header */
+ u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
- offset = offsetof(struct rmnet_map_header, mux_id);
- val |= ipa_metadata_offset_encoded(version, offset);
+ off = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encode(version, reg, off);
/* Where IPA will write the length */
- offset = offsetof(struct rmnet_map_header, pkt_len);
+ off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
+ off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
- val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
+ val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -614,19 +675,21 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->config.rx.pad_align;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
- val |= HDR_ENDIANNESS_FMASK; /* big endian */
+ val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
@@ -636,16 +699,16 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
+ val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+ val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
@@ -653,191 +716,170 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 offset;
+ u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 off; /* Field offset within header */
- offset = offsetof(struct rmnet_map_header, pkt_len);
- offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
- val |= u32_encode_bits(offset,
- HDR_OFST_PKT_SIZE_MSB_FMASK);
+ off = offsetof(struct rmnet_map_header, pkt_len);
+ /* Low bits are in the ENDP_INIT_HDR register */
+ off >>= hweight32(mask);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + offset);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
- offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_MODE);
if (endpoint->config.dma_mode) {
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
- u32 dma_endpoint_id;
+ u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
-
- val = u32_encode_bits(IPA_DMA, MODE_FMASK);
- val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
}
-/* Encoded values for AGGR endpoint register fields */
-static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
+/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
+ * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
+ * they're configured to have granularity 100 usec and 1 msec, respectively.
+ *
+ * The return value is the positive or negative Qtime value to use to
+ * express the (microsecond) time provided. A positive return value
+ * means pulse generator 0 can be used; otherwise use pulse generator 1.
+ */
+static int ipa_qtime_val(u32 microseconds, u32 max)
{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
+ u32 val;
- return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
+ /* Use 100 microsecond granularity if possible */
+ val = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (val <= max)
+ return (int)val;
+
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ val = DIV_ROUND_CLOSEST(microseconds, 1000);
+ WARN_ON(val > max);
+
+ return (int)-val;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
- u32 gran_sel;
- u32 fmask;
+ u32 max;
u32 val;
- if (version < IPA_VERSION_4_5) {
- /* We set aggregation granularity in ipa_hardware_config() */
- fmask = aggr_time_limit_fmask(true);
- val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
- WARN(val > field_max(fmask),
- "aggr_time_limit too large (%u > %u usec)\n",
- val, field_max(fmask) * IPA_AGGR_GRANULARITY);
-
- return u32_encode_bits(val, fmask);
- }
-
- /* IPA v4.5 expresses the time limit using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- fmask = aggr_time_limit_fmask(false);
- val = DIV_ROUND_CLOSEST(limit, 100);
- if (val > field_max(fmask)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = AGGR_GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(limit, 1000);
- WARN(val > field_max(fmask),
- "aggr_time_limit too large (%u > %u usec)\n",
- limit, field_max(fmask) * 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
- }
+ if (!microseconds)
+ return 0; /* Nothing to compute if time limit is 0 */
- return gran_sel | u32_encode_bits(val, fmask);
-}
+ max = ipa_reg_field_max(reg, TIME_LIMIT);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
-static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
-{
- u32 val = enabled ? 1 : 0;
+ return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
+ }
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
+ /* We program aggregation granularity in ipa_hardware_config() */
+ val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ microseconds, max * IPA_AGGR_GRANULARITY);
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
+ return ipa_reg_encode(reg, TIME_LIMIT, val);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
- enum ipa_version version = endpoint->ipa->version;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
- bool close_eof;
u32 limit;
rx_config = &endpoint->config.rx;
- val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- val |= aggr_byte_limit_encoded(version, limit);
+ val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
- val |= aggr_time_limit_encoded(version, limit);
+ val |= aggr_time_limit_encode(ipa, reg, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = rx_config->aggr_close_eof;
- val |= aggr_sw_eof_active_encoded(version, close_eof);
+ if (rx_config->aggr_close_eof)
+ val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
- AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
-}
-
-/* Return the Qtime-based head-of-line blocking timer value that
- * represents the given number of microseconds. The result
- * includes both the timer value and the selected timer granularity.
- */
-static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
-{
- u32 gran_sel;
- u32 val;
-
- /* IPA v4.5 expresses time limits using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val > field_max(TIME_LIMIT_FMASK)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
- }
-
- return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -845,12 +887,11 @@ static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
* derived from the 19.2 MHz SoC XO clock. For older IPA versions
* each tick represents 128 cycles of the IPA core clock.
*
- * Return the encoded value that should be written to that register
- * that represents the timeout period provided. For IPA v4.2 this
- * encodes a base and scale value, while for earlier versions the
- * value is a simple tick count.
+ * Return the encoded value representing the timeout period provided
+ * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
u32 width;
u32 scale;
@@ -862,18 +903,34 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
- if (ipa->version >= IPA_VERSION_4_5)
- return hol_block_timer_qtime_val(ipa, microseconds);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
- /* Use 64 bit arithmetic to avoid overflow... */
+ return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ }
+
+ /* Use 64 bit arithmetic to avoid overflow */
rate = ipa_core_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
- /* ...but we still need to fit into a 32-bit register */
- WARN_ON(ticks > U32_MAX);
+
+ /* We still need the result to fit into the field */
+ WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return (u32)ticks;
+ return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -883,8 +940,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
* count, and extract the number of bits in the base field
* such that high bit is included.
*/
- high = fls(ticks); /* 1..32 */
- width = HWEIGHT32(BASE_VALUE_FMASK);
+ high = fls(ticks); /* 1..32 (or warning above) */
+ width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -894,8 +951,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
scale++;
}
- val = u32_encode_bits(scale, SCALE_FMASK);
- val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
+ val = ipa_reg_encode(reg, TIMER_SCALE, scale);
+ val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -906,28 +963,34 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
- val = hol_block_timer_val(ipa, microseconds);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
+ val = hol_block_timer_encode(ipa, reg, microseconds);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- val = enable ? HOL_BLOCK_EN_FMASK : 0;
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
+ val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+
+ iowrite32(val, ipa->reg_virt + offset);
+
/* When enabling, the register must be written twice for IPA v4.5+ */
- if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ if (enable && ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Assumes HOL_BLOCK is in disabled state */
@@ -960,46 +1023,58 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ u32 resource_group = endpoint->config.resource_group;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
- val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
+ val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
- u32 val = 0;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_SEQ);
+
/* Low-order byte configures primary packet processing */
- val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
+ val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
- /* Second byte configures replicated packet processing */
- val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
- SEQ_REP_TYPE_FMASK);
+ /* Second byte (if supported) configures replicated packet processing */
+ if (ipa->version < IPA_VERSION_4_5)
+ val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/**
@@ -1049,13 +1124,12 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
- val |= STATUS_EN_FMASK;
+ val |= ipa_reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
@@ -1063,16 +1137,16 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= u32_encode_bits(status_endpoint_id,
- STATUS_ENDP_FMASK);
+ val |= ipa_reg_encode(reg, STATUS_ENDP,
+ status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning status element precedes
- * packet (not present for IPA v4.5)
+ * packet (not present for IPA v4.5+)
*/
- /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
@@ -1412,16 +1486,18 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
+ const struct ipa_reg *reg;
u32 val;
+ reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_HDR_TABLE_FMASK;
- val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
- val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+ val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ /* ROUTE_DEF_HDR_OFST is 0 */
+ val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1765,6 +1841,7 @@ void ipa_endpoint_teardown(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
u32 initialized;
u32 rx_base;
u32 rx_mask;
@@ -1791,11 +1868,12 @@ int ipa_endpoint_config(struct ipa *ipa)
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
- val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+ reg = ipa_reg(ipa, FLAVOR_0);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
+ rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1804,7 +1882,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
+ max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 28e0a7386fd7..d8dfa24f5214 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 307bed2ee707..c269432f9c2e 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
/* DOC: IPA Interrupts
@@ -53,13 +53,15 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
+ const struct ipa_reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
if (uc_irq)
iowrite32(mask, ipa->reg_virt + offset);
@@ -80,6 +82,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ const struct ipa_reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -95,7 +98,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- offset = ipa_reg_irq_stts_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_STTS);
+ offset = ipa_reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -112,7 +116,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
iowrite32(pending, ipa->reg_virt + offset);
}
out_power_put:
@@ -128,6 +133,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id);
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
@@ -137,7 +143,8 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_en_offset(ipa->version);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
+ offset = ipa_reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
@@ -164,18 +171,18 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- offset = ipa_reg_irq_suspend_info_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_clr_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
@@ -189,7 +196,7 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
return;
@@ -198,8 +205,9 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Remove the handler for an IPA interrupt type */
@@ -207,15 +215,16 @@ void
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
return;
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
interrupt->handler[ipa_irq] = NULL;
}
@@ -225,8 +234,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
+ const struct ipa_reg *reg;
unsigned int irq;
- u32 offset;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
@@ -244,8 +253,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(0, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 231390cea52a..f31fd9965fdc 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_INTERRUPT_H_
#define _IPA_INTERRUPT_H_
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 32962d885acd..3461ad3029ab 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -183,31 +183,97 @@ static void ipa_teardown(struct ipa *ipa)
gsi_teardown(&ipa->gsi);
}
+static void
+ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
+{
+ const struct ipa_reg *reg;
+ u32 val;
+
+ /* IPA v4.5+ has no backward compatibility register */
+ if (ipa->version >= IPA_VERSION_4_5)
+ return;
+
+ reg = ipa_reg(ipa, IPA_BCR);
+ val = data->backward_compat;
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_tx(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
+ return;
+
+ /* Disable PA mask to allow HOLB drop */
+ reg = ipa_reg(ipa, IPA_TX_CFG);
+ offset = ipa_reg_offset(reg);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_hardware_config_clkon(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 val;
+
+ if (version >= IPA_VERSION_4_5)
+ return;
+
+ if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
+ return;
+
+ /* Implement some hardware workarounds */
+ reg = ipa_reg(ipa, CLKON_CFG);
+ if (version == IPA_VERSION_3_1) {
+ /* Disable MISC clock gating */
+ val = ipa_reg_bit(reg, CLKON_MISC);
+ } else { /* IPA v4.0+ */
+ /* Enable open global clocks in the CLKON configuration */
+ val = ipa_reg_bit(reg, CLKON_GLOBAL);
+ val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ }
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Nothing to configure prior to IPA v4.0 */
if (ipa->version < IPA_VERSION_4_0)
return;
- val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ reg = ipa_reg(ipa, COMP_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
- val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
- val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
- /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
+ /* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
- val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
- iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Configure DDR and (possibly) PCIe max read/write QSB values */
@@ -216,6 +282,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
+ const struct ipa_reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -224,25 +291,31 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
/* Max outstanding write accesses for QSB masters */
- val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_WRITES);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= u32_encode_bits(data1->max_writes,
- GEN_QMB_1_MAX_WRITES_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
+ data1->max_writes);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
- val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_READS);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data0->max_reads_beats,
- GEN_QMB_0_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val |= u32_encode_bits(data1->max_reads,
- GEN_QMB_1_MAX_READS_FMASK);
+ val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
+ data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data1->max_reads_beats,
- GEN_QMB_1_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -278,48 +351,96 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
- iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
+ val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+ val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
- val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
+ val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
- val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+
+ iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ val |= ipa_reg_bit(reg, DIV_ENABLE);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Before IPA v4.5 timing is controlled by a counter register */
+static void ipa_hardware_config_counter(struct ipa *ipa)
+{
+ u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ const struct ipa_reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, COUNTER_CFG);
+ /* If defined, EOT_COAL_GRANULARITY is 0 */
+ val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_timing(struct ipa *ipa)
+{
+ if (ipa->version < IPA_VERSION_4_5)
+ ipa_hardware_config_counter(ipa);
+ else
+ ipa_qtime_config(ipa);
+}
+
+static void ipa_hardware_config_hashing(struct ipa *ipa)
+{
+ const struct ipa_reg *reg;
+
+ if (ipa->version != IPA_VERSION_4_2)
+ return;
+
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
+
+ /* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
+ * IPV4_FILTER_HASH are all zero.
+ */
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- val = u32_encode_bits(enter_idle_debounce_thresh,
- ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
+ val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= CONST_NON_IDLE_ENABLE_FMASK;
+ val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
- offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -349,55 +470,13 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
{
- enum ipa_version version = ipa->version;
- u32 granularity;
- u32 val;
-
- /* IPA v4.5+ has no backward compatibility register */
- if (version < IPA_VERSION_4_5) {
- val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
- }
-
- /* Implement some hardware workarounds */
- if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
- /* Disable PA mask to allow HOLB drop */
- val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
-
- /* Enable open global clocks in the CLKON configuration */
- val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
- } else if (version == IPA_VERSION_3_1) {
- val = MISC_FMASK; /* Disable MISC clock gating */
- } else {
- val = 0; /* No CLKON configuration needed */
- }
- if (val)
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
+ ipa_hardware_config_bcr(ipa, data);
+ ipa_hardware_config_tx(ipa);
+ ipa_hardware_config_clkon(ipa);
ipa_hardware_config_comp(ipa);
-
- /* Configure system bus limits */
ipa_hardware_config_qsb(ipa, data);
-
- if (version < IPA_VERSION_4_5) {
- /* Configure aggregation timer granularity */
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- } else {
- ipa_qtime_config(ipa);
- }
-
- /* IPA v4.2 does not support hashed tables, so disable them */
- if (version == IPA_VERSION_4_2) {
- u32 offset = ipa_reg_filt_rout_hash_en_offset(version);
-
- iowrite32(0, ipa->reg_virt + offset);
- }
-
- /* Enable dynamic clock division */
+ ipa_hardware_config_timing(ipa);
+ ipa_hardware_config_hashing(ipa);
ipa_hardware_dcd_config(ipa);
}
@@ -612,29 +691,6 @@ static void ipa_validate_build(void)
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
- BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY_FMASK));
-}
-
-static bool ipa_version_valid(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_0:
- case IPA_VERSION_3_1:
- case IPA_VERSION_3_5:
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- case IPA_VERSION_4_2:
- case IPA_VERSION_4_5:
- case IPA_VERSION_4_7:
- case IPA_VERSION_4_9:
- case IPA_VERSION_4_11:
- return true;
-
- default:
- return false;
- }
}
/**
@@ -678,8 +734,8 @@ static int ipa_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!ipa_version_valid(data->version)) {
- dev_err(dev, "invalid IPA version\n");
+ if (!ipa_version_supported(data->version)) {
+ dev_err(dev, "unsupported IPA version %u\n", data->version);
return -EINVAL;
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1e9eae208e44..f84c6830495a 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -112,8 +113,10 @@ int ipa_mem_setup(struct ipa *ipa)
/* Tell the hardware where the processing context area is located */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
- val = proc_cntxt_base_addr_encoded(ipa->version, offset);
- iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
+
+ reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
+ val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
return 0;
}
@@ -306,6 +309,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
@@ -314,12 +318,14 @@ int ipa_mem_config(struct ipa *ipa)
u32 i;
/* Check the advertised location and size of the shared memory area */
- val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+ reg = ipa_reg(ipa, SHARED_MEM_SIZE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+ mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
@@ -568,7 +574,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index c8b1c4d9c507..423422a2a445 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/errno.h>
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index e64ccc2402e9..d85718db9a57 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_MODEM_H_
#define _IPA_MODEM_H_
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index db5ac7552286..8420f93128a2 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/clk.h>
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 6f84f057a209..896f052e51a1 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_POWER_H_
#define _IPA_POWER_H_
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index ec010cf2e816..8295fd4b70d1 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 856ef629ccc8..1c236826c17a 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_H_
#define _IPA_QMI_H_
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 6838e8065072..97c0befe8d86 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/stddef.h>
#include <linux/soc/qcom/qmi.h>
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 495e85abe50b..e29663965f43 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_MSG_H_
#define _IPA_QMI_MSG_H_
@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index e6147a1cd787..22f067741d9b 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/io.h>
@@ -9,11 +9,105 @@
#include "ipa.h"
#include "ipa_reg.h"
+/* Is this register valid and defined for the current IPA version? */
+static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ enum ipa_version version = ipa->version;
+ bool valid;
+
+ /* Check for bogus (out of range) register IDs */
+ if ((u32)reg_id >= ipa->regs->reg_count)
+ return false;
+
+ switch (reg_id) {
+ case IPA_BCR:
+ case COUNTER_CFG:
+ valid = version < IPA_VERSION_4_5;
+ break;
+
+ case IPA_TX_CFG:
+ case FLAVOR_0:
+ case IDLE_INDICATION_CFG:
+ valid = version >= IPA_VERSION_3_5;
+ break;
+
+ case QTIME_TIMESTAMP_CFG:
+ case TIMERS_XO_CLK_DIV_CFG:
+ case TIMERS_PULSE_GRAN_CFG:
+ valid = version >= IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_45_RSRC_TYPE:
+ case DST_RSRC_GRP_45_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_67_RSRC_TYPE:
+ case DST_RSRC_GRP_67_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1;
+ break;
+
+ case ENDP_FILTER_ROUTER_HSH_CFG:
+ valid = version != IPA_VERSION_4_2;
+ break;
+
+ case IRQ_SUSPEND_EN:
+ case IRQ_SUSPEND_CLR:
+ valid = version >= IPA_VERSION_3_1;
+ break;
+
+ default:
+ valid = true; /* Others should be defined for all versions */
+ break;
+ }
+
+ /* To be valid, it must be defined */
+
+ return valid && ipa->regs->reg[reg_id];
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ return NULL;
+
+ return ipa->regs->reg[reg_id];
+}
+
+static const struct ipa_regs *ipa_regs(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ return &ipa_regs_v3_1;
+ case IPA_VERSION_3_5_1:
+ return &ipa_regs_v3_5_1;
+ case IPA_VERSION_4_2:
+ return &ipa_regs_v4_2;
+ case IPA_VERSION_4_5:
+ return &ipa_regs_v4_5;
+ case IPA_VERSION_4_9:
+ return &ipa_regs_v4_9;
+ case IPA_VERSION_4_11:
+ return &ipa_regs_v4_11;
+ default:
+ return NULL;
+ }
+}
+
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_regs *regs;
struct resource *res;
+ regs = ipa_regs(ipa->version);
+ if (!regs)
+ return -EINVAL;
+
+ if (WARN_ON(regs->reg_count > IPA_REG_ID_COUNT))
+ return -EINVAL;
+
/* Setup IPA register memory */
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-reg");
@@ -28,6 +122,7 @@ int ipa_reg_init(struct ipa *ipa)
return -ENOMEM;
}
ipa->reg_addr = res->start;
+ ipa->regs = regs;
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..7bf70f70f63f 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
#include <linux/bitfield.h>
+#include <linux/bug.h>
#include "ipa_version.h"
@@ -16,304 +17,325 @@ struct ipa;
* DOC: IPA Registers
*
* IPA registers are located within the "ipa-reg" address space defined by
- * Device Tree. The offset of each register within that space is specified
- * by symbols defined below. The address space is mapped to virtual memory
- * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ * Device Tree. Each register has a specified offset within that space,
+ * which is mapped into virtual memory space in ipa_mem_init(). Each
+ * has a unique identifer, taken from the ipa_reg_id enumerated type.
+ * All IPA registers are 32 bits wide.
*
- * Certain register types are duplicated for a number of instances of
- * something. For example, each IPA endpoint has an set of registers
- * defining its configuration. The offset to an endpoint's set of registers
- * is computed based on an "base" offset, plus an endpoint's ID multiplied
- * and a "stride" value for the register. For such registers, the offset is
- * computed by a function-like macro that takes a parameter used in the
- * computation.
+ * Certain "parameterized" register types are duplicated for a number of
+ * instances of something. For example, each IPA endpoint has an set of
+ * registers defining its configuration. The offset to an endpoint's set
+ * of registers is computed based on an "base" offset, plus an endpoint's
+ * ID multiplied and a "stride" value for the register. Similarly, some
+ * registers have an offset that depends on execution environment. In
+ * this case, the stride is multiplied by a member of the gsi_ee_id
+ * enumerated type.
*
- * Some register offsets depend on execution environment. For these an "ee"
- * parameter is supplied to the offset macro. The "ee" value is a member of
- * the gsi_ee enumerated type.
+ * Each version of IPA implements an array of ipa_reg structures indexed
+ * by register ID. Each entry in the array specifies the base offset and
+ * (for parameterized registers) a non-zero stride value. Not all versions
+ * of IPA define all registers. The offset for a register is returned by
+ * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * zero is returned for an undefined register (this should never happen).
*
- * The offset of a register dependent on endpoint ID is computed by a macro
- * that is supplied a parameter "ep", "txep", or "rxep". A register with an
- * "ep" parameter is valid for any endpoint; a register with a "txep" or
- * "rxep" parameter is valid only for TX or RX endpoints, respectively. The
- * "*ep" value is assumed to be less than the maximum valid endpoint ID
- * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
- *
- * The offset of registers related to filter and route tables is computed
- * by a macro that is supplied a parameter "er". The "er" represents an
- * endpoint ID for filters, or a route ID for routes. For filters, the
- * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
- * because not all endpoints support filtering. For routes, the route ID
- * must be less than IPA_ROUTE_MAX.
- *
- * The offset of registers related to resource types is computed by a macro
- * that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
- * source endpoint resources or the ipa_resource_type_dst enumerated type
- * for destination endpoint resources.
- *
- * Some registers encode multiple fields within them. For these, each field
- * has a symbol below defining a field mask that encodes both the position
- * and width of the field within its register.
- *
- * In some cases, different versions of IPA hardware use different offset or
- * field mask values. In such cases an inline_function(ipa) is used rather
- * than a MACRO to define the offset or field mask to use.
- *
- * Finally, some registers hold bitmasks representing endpoints. In such
- * cases the @available field in the @ipa structure defines the "full" set
- * of valid bits for the register.
+ * Some registers encode multiple fields within them. Each field in
+ * such a register has a unique identifier (from an enumerated type).
+ * The position and width of the fields in a register are defined by
+ * an array of field masks, indexed by field ID. Two functions are
+ * used to access register fields; both take an ipa_reg structure as
+ * argument. To encode a value to be represented in a register field,
+ * the value and field ID are passed to ipa_reg_encode(). To extract
+ * a value encoded in a register field, the field ID is passed to
+ * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * can be used to either encode the bit value, or to generate a mask
+ * used to extract the bit value.
*/
-#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
-/* The next field is not supported for IPA v4.0+, not present for IPA v4.5+ */
-#define ENABLE_FMASK GENMASK(0, 0)
-/* The next field is present for IPA v4.7+ */
-#define RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS_FMASK GENMASK(0, 0)
-#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
-#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
-#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
-/* The next field is not present for IPA v4.5+ */
-#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
-/* The next twelve fields are present for IPA v4.0+ */
-#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
-#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
-#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
-#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
-#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
-#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
-#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
-#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
-#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
-#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
-#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
-#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
-/* The next five fields are present for IPA v4.9+ */
-#define QMB_RAM_RD_CACHE_DISABLE_FMASK GENMASK(19, 19)
-#define GENQMB_AOOOWR_FMASK GENMASK(20, 20)
-#define IF_OUT_OF_BUF_STOP_RESET_MASK_EN_FMASK GENMASK(21, 21)
-#define GEN_QMB_1_DYNAMIC_ASIZE_FMASK GENMASK(30, 30)
-#define GEN_QMB_0_DYNAMIC_ASIZE_FMASK GENMASK(31, 31)
-
-/* Encoded value for COMP_CFG register ATOMIC_FETCHER_ARB_LOCK_DIS field */
-static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
- u32 mask)
-{
- WARN_ON(version < IPA_VERSION_4_0);
+/* enum ipa_reg_id - IPA register IDs */
+enum ipa_reg_id {
+ COMP_CFG,
+ CLKON_CFG,
+ ROUTE,
+ SHARED_MEM_SIZE,
+ QSB_MAX_WRITES,
+ QSB_MAX_READS,
+ FILT_ROUT_HASH_EN,
+ FILT_ROUT_HASH_FLUSH,
+ STATE_AGGR_ACTIVE,
+ IPA_BCR, /* Not IPA v4.5+ */
+ LOCAL_PKT_PROC_CNTXT,
+ AGGR_FORCE_CLOSE,
+ COUNTER_CFG, /* Not IPA v4.5+ */
+ IPA_TX_CFG, /* IPA v3.5+ */
+ FLAVOR_0, /* IPA v3.5+ */
+ IDLE_INDICATION_CFG, /* IPA v3.5+ */
+ QTIME_TIMESTAMP_CFG, /* IPA v4.5+ */
+ TIMERS_XO_CLK_DIV_CFG, /* IPA v4.5+ */
+ TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
+ SRC_RSRC_GRP_01_RSRC_TYPE,
+ SRC_RSRC_GRP_23_RSRC_TYPE,
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ DST_RSRC_GRP_01_RSRC_TYPE,
+ DST_RSRC_GRP_23_RSRC_TYPE,
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
+ ENDP_INIT_CFG,
+ ENDP_INIT_NAT, /* TX only */
+ ENDP_INIT_HDR,
+ ENDP_INIT_HDR_EXT,
+ ENDP_INIT_HDR_METADATA_MASK, /* RX only */
+ ENDP_INIT_MODE, /* TX only */
+ ENDP_INIT_AGGR,
+ ENDP_INIT_HOL_BLOCK_EN, /* RX only */
+ ENDP_INIT_HOL_BLOCK_TIMER, /* RX only */
+ ENDP_INIT_DEAGGR, /* TX only */
+ ENDP_INIT_RSRC_GRP,
+ ENDP_INIT_SEQ, /* TX only */
+ ENDP_STATUS,
+ ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
+ /* The IRQ registers are only used for GSI_EE_AP */
+ IPA_IRQ_STTS,
+ IPA_IRQ_EN,
+ IPA_IRQ_CLR,
+ IPA_IRQ_UC,
+ IRQ_SUSPEND_INFO,
+ IRQ_SUSPEND_EN, /* IPA v3.1+ */
+ IRQ_SUSPEND_CLR, /* IPA v3.1+ */
+ IPA_REG_ID_COUNT, /* Last; not an ID */
+};
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(20, 17));
+/**
+ * struct ipa_reg - An IPA register descriptor
+ * @offset: Register offset relative to base of the "ipa-reg" memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the IPA register
+ */
+struct ipa_reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
- if (version == IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(24, 22));
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define IPA_REG(__NAME, __reg_id, __offset) \
+ IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
- return u32_encode_bits(mask, GENMASK(23, 22));
-}
+/* Helper macro for defining parameterized registers, specifying stride */
+#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
-/* Encoded value for COMP_CFG register FULL_FLUSH_WAIT_RS_CLOSURE_EN field */
-static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
- bool enable)
-{
- u32 val = enable ? 1 : 0;
+#define IPA_REG_FIELDS(__NAME, __name, __offset) \
+ IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
- WARN_ON(version < IPA_VERSION_4_5);
+#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
+ .fmask = ipa_reg_ ## __name ## _fmask, \
+ }
- if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
- return u32_encode_bits(val, GENMASK(21, 21));
+/**
+ * struct ipa_regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct ipa_regs {
+ u32 reg_count;
+ const struct ipa_reg **reg;
+};
- return u32_encode_bits(val, GENMASK(17, 17));
-}
+/* COMP_CFG register */
+enum ipa_reg_comp_cfg_field_id {
+ COMP_CFG_ENABLE, /* Not IPA v4.0+ */
+ RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS, /* IPA v4.7+ */
+ GSI_SNOC_BYPASS_DIS,
+ GEN_QMB_0_SNOC_BYPASS_DIS,
+ GEN_QMB_1_SNOC_BYPASS_DIS,
+ IPA_DCMP_FAST_CLK_EN, /* Not IPA v4.5+ */
+ IPA_QMB_SELECT_CONS_EN, /* IPA v4.0+ */
+ IPA_QMB_SELECT_PROD_EN, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS, /* IPA v4.0+ */
+ GSI_SNOC_CNOC_LOOP_PROT_DISABLE, /* IPA v4.0+ */
+ GSI_MULTI_AXI_MASTERS_DIS, /* IPA v4.0+ */
+ IPA_QMB_SELECT_GLOBAL_EN, /* IPA v4.0+ */
+ QMB_RAM_RD_CACHE_DISABLE, /* IPA v4.9+ */
+ GENQMB_AOOOWR, /* IPA v4.9+ */
+ IF_OUT_OF_BUF_STOP_RESET_MASK_EN, /* IPA v4.9+ */
+ GEN_QMB_1_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ GEN_QMB_0_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ ATOMIC_FETCHER_ARB_LOCK_DIS, /* IPA v4.0+ */
+ FULL_FLUSH_WAIT_RS_CLOSURE_EN, /* IPA v4.5+ */
+};
-#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
-#define RX_FMASK GENMASK(0, 0)
-#define PROC_FMASK GENMASK(1, 1)
-#define TX_WRAPPER_FMASK GENMASK(2, 2)
-#define MISC_FMASK GENMASK(3, 3)
-#define RAM_ARB_FMASK GENMASK(4, 4)
-#define FTCH_HPS_FMASK GENMASK(5, 5)
-#define FTCH_DPS_FMASK GENMASK(6, 6)
-#define HPS_FMASK GENMASK(7, 7)
-#define DPS_FMASK GENMASK(8, 8)
-#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
-#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
-#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
-#define RSRC_MNGR_FMASK GENMASK(12, 12)
-#define CTX_HANDLER_FMASK GENMASK(13, 13)
-#define ACK_MNGR_FMASK GENMASK(14, 14)
-#define D_DCPH_FMASK GENMASK(15, 15)
-#define H_DCPH_FMASK GENMASK(16, 16)
-/* The next field is not present for IPA v4.5+ */
-#define DCMP_FMASK GENMASK(17, 17)
-/* The next three fields are present for IPA v3.5+ */
-#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
-#define TX_0_FMASK GENMASK(19, 19)
-#define TX_1_FMASK GENMASK(20, 20)
-/* The next field is present for IPA v3.5.1+ */
-#define FNR_FMASK GENMASK(21, 21)
-/* The next eight fields are present for IPA v4.0+ */
-#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
-#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
-#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
-#define QMB_FMASK GENMASK(25, 25)
-#define WEIGHT_ARB_FMASK GENMASK(26, 26)
-#define GSI_IF_FMASK GENMASK(27, 27)
-#define GLOBAL_FMASK GENMASK(28, 28)
-#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
-/* The next field is present for IPA v4.5+ */
-#define DPL_FIFO_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.7+ */
-#define DRBIP_FMASK GENMASK(31, 31)
-
-#define IPA_REG_ROUTE_OFFSET 0x00000048
-#define ROUTE_DIS_FMASK GENMASK(0, 0)
-#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
-#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
-#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
-#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
-#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
-
-#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
-#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
-#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
-
-#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
-#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
-
-#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
-#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0+ */
-#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
-#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-
-static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x000008c;
+/* CLKON_CFG register */
+enum ipa_reg_clkon_cfg_field_id {
+ CLKON_RX,
+ CLKON_PROC,
+ TX_WRAPPER,
+ CLKON_MISC,
+ RAM_ARB,
+ FTCH_HPS,
+ FTCH_DPS,
+ CLKON_HPS,
+ CLKON_DPS,
+ RX_HPS_CMDQS,
+ HPS_DPS_CMDQS,
+ DPS_TX_CMDQS,
+ RSRC_MNGR,
+ CTX_HANDLER,
+ ACK_MNGR,
+ D_DCPH,
+ H_DCPH,
+ CLKON_DCMP, /* IPA v4.5+ */
+ NTF_TX_CMDQS, /* IPA v3.5+ */
+ CLKON_TX_0, /* IPA v3.5+ */
+ CLKON_TX_1, /* IPA v3.5+ */
+ CLKON_FNR, /* IPA v3.5.1+ */
+ QSB2AXI_CMDQ_L, /* IPA v4.0+ */
+ AGGR_WRAPPER, /* IPA v4.0+ */
+ RAM_SLAVEWAY, /* IPA v4.0+ */
+ CLKON_QMB, /* IPA v4.0+ */
+ WEIGHT_ARB, /* IPA v4.0+ */
+ GSI_IF, /* IPA v4.0+ */
+ CLKON_GLOBAL, /* IPA v4.0+ */
+ GLOBAL_2X_CLK, /* IPA v4.0+ */
+ DPL_FIFO, /* IPA v4.5+ */
+ DRBIP, /* IPA v4.7+ */
+};
- return 0x0000148;
-}
+/* ROUTE register */
+enum ipa_reg_route_field_id {
+ ROUTE_DIS,
+ ROUTE_DEF_PIPE,
+ ROUTE_DEF_HDR_TABLE,
+ ROUTE_DEF_HDR_OFST,
+ ROUTE_FRAG_DEF_PIPE,
+ ROUTE_DEF_RETAIN_HDR,
+};
-static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000090;
+/* SHARED_MEM_SIZE register */
+enum ipa_reg_shared_mem_size_field_id {
+ MEM_SIZE,
+ MEM_BADDR,
+};
- return 0x000014c;
-}
+/* QSB_MAX_WRITES register */
+enum ipa_reg_qsb_max_writes_field_id {
+ GEN_QMB_0_MAX_WRITES,
+ GEN_QMB_1_MAX_WRITES,
+};
-/* The next four fields are used for the hash enable and flush registers */
-#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+/* QSB_MAX_READS register */
+enum ipa_reg_qsb_max_reads_field_id {
+ GEN_QMB_0_MAX_READS,
+ GEN_QMB_1_MAX_READS,
+ GEN_QMB_0_MAX_READS_BEATS, /* IPA v4.0+ */
+ GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
+};
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000010c;
+/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
+enum ipa_reg_rout_hash_field_id {
+ IPV6_ROUTER_HASH,
+ IPV6_FILTER_HASH,
+ IPV4_ROUTER_HASH,
+ IPV4_FILTER_HASH,
+};
- return 0x000000b4;
-}
+/* BCR register */
+enum ipa_bcr_compat {
+ BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
+ BCR_TX_NOT_USING_BRESP = 0x1, /* Not IPA v4.2+ */
+ BCR_TX_SUSPEND_IRQ_ASSERT_ONCE = 0x2, /* Not IPA v4.0+ */
+ BCR_SUSPEND_L2_IRQ = 0x3, /* Not IPA v4.2+ */
+ BCR_HOLB_DROP_L2_IRQ = 0x4, /* Not IPA v4.2+ */
+ BCR_DUAL_TX = 0x5, /* IPA v3.5+ */
+ BCR_ENABLE_FILTER_DATA_CACHE = 0x6, /* IPA v3.5+ */
+ BCR_NOTIF_PRIORITY_OVER_ZLT = 0x7, /* IPA v3.5+ */
+ BCR_FILTER_PREFETCH_EN = 0x8, /* IPA v3.5+ */
+ BCR_ROUTER_PREFETCH_EN = 0x9, /* IPA v3.5+ */
+};
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_BCR_OFFSET 0x000001d0
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
-#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
-/* The next field is invalid for IPA v4.0+ */
-#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
-#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
-/* The next five fields are present for IPA v3.5+ */
-#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
-#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
-#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
-#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
-#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
-
-/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */
-#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8
-
-/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */
-static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version,
- u32 addr)
-{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(addr, GENMASK(16, 0));
+/* LOCAL_PKT_PROC_CNTXT register */
+enum ipa_reg_local_pkt_proc_cntxt_field_id {
+ IPA_BASE_ADDR,
+};
- return u32_encode_bits(addr, GENMASK(17, 0));
-}
+/* COUNTER_CFG register */
+enum ipa_reg_counter_cfg_field_id {
+ EOT_COAL_GRANULARITY, /* Not v3.5+ */
+ AGGR_GRANULARITY,
+};
-/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
-
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-/* The next field is not present for IPA v3.5+ */
-#define EOT_COAL_GRANULARITY GENMASK(3, 0)
-#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_TX_CFG_OFFSET 0x000001fc
-/* The next three fields are not present for IPA v4.0+ */
-#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
-/* The next six fields are present for IPA v4.0+ */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
-#define PA_MASK_EN_FMASK GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
-/* The next field is present for IPA v4.5+ */
-#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
-/* The next field is present for IPA v4.2+, but not IPA v4.5 */
-#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
-/* The next field is present for IPA v4.2 only */
-#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
-#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
-
-/* The next register is present for IPA v3.5+ */
-static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
-{
- if (version >= IPA_VERSION_4_2)
- return 0x00000240;
+/* IPA_TX_CFG register */
+enum ipa_reg_ipa_tx_cfg_field_id {
+ TX0_PREFETCH_DISABLE, /* Not v4.0+ */
+ TX1_PREFETCH_DISABLE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_THRESHOLD, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_EN, /* v4.0+ */
+ DMAW_MAX_BEATS_256_DIS, /* v4.0+ */
+ PA_MASK_EN, /* v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* v4.0+ */
+ DUAL_TX_ENABLE, /* v4.5+ */
+ SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
+ SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+};
- return 0x00000220;
-}
+/* FLAVOR_0 register */
+enum ipa_reg_flavor_0_field_id {
+ MAX_PIPES,
+ MAX_CONS_PIPES,
+ MAX_PROD_PIPES,
+ PROD_LOWEST,
+};
+
+/* IDLE_INDICATION_CFG register */
+enum ipa_reg_idle_indication_cfg_field_id {
+ ENTER_IDLE_DEBOUNCE_THRESH,
+ CONST_NON_IDLE_ENABLE,
+};
+
+/* QTIME_TIMESTAMP_CFG register */
+enum ipa_reg_qtime_timestamp_cfg_field_id {
+ DPL_TIMESTAMP_LSB,
+ DPL_TIMESTAMP_SEL,
+ TAG_TIMESTAMP_LSB,
+ NAT_TIMESTAMP_LSB,
+};
-#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
-#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
-#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
-#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
-#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
-#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
-#define DIV_VALUE_FMASK GENMASK(8, 0)
-#define DIV_ENABLE_FMASK GENMASK(31, 31)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
-#define GRAN_0_FMASK GENMASK(2, 0)
-#define GRAN_1_FMASK GENMASK(5, 3)
-#define GRAN_2_FMASK GENMASK(8, 6)
-/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+/* TIMERS_XO_CLK_DIV_CFG register */
+enum ipa_reg_timers_xo_clk_div_cfg_field_id {
+ DIV_VALUE,
+ DIV_ENABLE,
+};
+
+/* TIMERS_PULSE_GRAN_CFG register */
+enum ipa_reg_timers_pulse_gran_cfg_field_id {
+ PULSE_GRAN_0,
+ PULSE_GRAN_1,
+ PULSE_GRAN_2,
+};
+
+/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
enum ipa_pulse_gran {
IPA_GRAN_10_US = 0x0,
IPA_GRAN_20_US = 0x1,
@@ -325,267 +347,160 @@ enum ipa_pulse_gran {
IPA_GRAN_655350_US = 0x7,
};
-/* Not all of the following are present (depends on IPA version) */
-#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000400 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000404 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000408 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000040c + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000500 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000504 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000508 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000050c + 0x0020 * (rt))
-/* The next four fields are used for all resource group registers */
-#define X_MIN_LIM_FMASK GENMASK(5, 0)
-#define X_MAX_LIM_FMASK GENMASK(13, 8)
-/* The next two fields are not always present (if resource count is odd) */
-#define Y_MIN_LIM_FMASK GENMASK(21, 16)
-#define Y_MAX_LIM_FMASK GENMASK(29, 24)
-
-#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
- (0x00000800 + 0x0070 * (ep))
-/* Valid only for RX (IPA producer) endpoints (do not use for IPA v4.0+) */
-#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
-/* Valid only for TX (IPA consumer) endpoints */
-#define ENDP_DELAY_FMASK GENMASK(1, 1)
-
-#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
- (0x00000808 + 0x0070 * (ep))
-#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
-#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
-#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
-#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/* {SRC,DST}_RSRC_GRP_{01,23,45,67}_RSRC_TYPE registers */
+enum ipa_reg_rsrc_grp_rsrc_type_field_id {
+ X_MIN_LIM,
+ X_MAX_LIM,
+ Y_MIN_LIM,
+ Y_MAX_LIM,
+};
+
+/* ENDP_INIT_CTRL register */
+enum ipa_reg_endp_init_ctrl_field_id {
+ ENDP_SUSPEND, /* Not v4.0+ */
+ ENDP_DELAY, /* Not v4.2+ */
+};
+
+/* ENDP_INIT_CFG register */
+enum ipa_reg_endp_init_cfg_field_id {
+ FRAG_OFFLOAD_EN,
+ CS_OFFLOAD_EN,
+ CS_METADATA_HDR_OFFSET,
+ CS_GEN_QMB_MASTER_SEL,
+};
/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0x0,
- IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
- IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
- IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL /* TX */ = 0x1, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_DL /* RX */ = 0x2, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_INLINE /* TX and RX */ = 0x1, /* IPA v4.5+ */
};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \
- (0x0000080c + 0x0070 * (ep))
-#define NAT_EN_FMASK GENMASK(1, 0)
+/* ENDP_INIT_NAT register */
+enum ipa_reg_endp_init_nat_field_id {
+ NAT_EN,
+};
/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
enum ipa_nat_en {
- IPA_NAT_BYPASS = 0x0,
- IPA_NAT_SRC = 0x1,
- IPA_NAT_DST = 0x2,
-};
-
-#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
- (0x00000810 + 0x0070 * (ep))
-#define HDR_LEN_FMASK GENMASK(5, 0)
-#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
-#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
-#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
-#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
-#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
-/* The next field is not present for IPA v4.9+ */
-#define HDR_A5_MUX_FMASK GENMASK(26, 26)
-#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
-/* The next two fields are present for IPA v4.5+ */
-#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
-#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
-
-/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
-static inline u32 ipa_header_size_encoded(enum ipa_version version,
- u32 header_size)
-{
- u32 size = header_size & field_mask(HDR_LEN_FMASK);
- u32 val;
-
- val = u32_encode_bits(size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(header_size != size);
- return val;
- }
-
- /* IPA v4.5 adds a few more most-significant bits */
- size = header_size >> hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
-
- return val;
-}
-
-/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
-static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
- u32 offset)
-{
- u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
- u32 val;
-
- val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(offset != off);
- return val;
- }
+ IPA_NAT_BYPASS = 0x0,
+ IPA_NAT_SRC = 0x1,
+ IPA_NAT_DST = 0x2,
+};
- /* IPA v4.5 adds a few more most-significant bits */
- off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
+/* ENDP_INIT_HDR register */
+enum ipa_reg_endp_init_hdr_field_id {
+ HDR_LEN,
+ HDR_OFST_METADATA_VALID,
+ HDR_OFST_METADATA,
+ HDR_ADDITIONAL_CONST_LEN,
+ HDR_OFST_PKT_SIZE_VALID,
+ HDR_OFST_PKT_SIZE,
+ HDR_A5_MUX, /* Not v4.9+ */
+ HDR_LEN_INC_DEAGG_HDR,
+ HDR_METADATA_REG_VALID, /* Not v4.5+ */
+ HDR_LEN_MSB, /* v4.5+ */
+ HDR_OFST_METADATA_MSB, /* v4.5+ */
+};
- return val;
-}
+/* ENDP_INIT_HDR_EXT register */
+enum ipa_reg_endp_init_hdr_ext_field_id {
+ HDR_ENDIANNESS,
+ HDR_TOTAL_LEN_OR_PAD_VALID,
+ HDR_TOTAL_LEN_OR_PAD,
+ HDR_PAYLOAD_LEN_INC_PADDING,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET,
+ HDR_PAD_TO_ALIGNMENT,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
+ HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
+ HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+};
-#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
- (0x00000814 + 0x0070 * (ep))
-#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
-#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
-#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
-#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
-#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-/* The next three fields are present for IPA v4.5+ */
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
-#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
-#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
- (0x00000818 + 0x0070 * (rxep))
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
- (0x00000820 + 0x0070 * (txep))
-#define MODE_FMASK GENMASK(2, 0)
-/* The next field is present for IPA v4.5+ */
-#define DCPH_ENABLE_FMASK GENMASK(3, 3)
-#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
-#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
-#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
-#define PAD_EN_FMASK GENMASK(29, 29)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.9+ */
-#define DRBIP_ACL_ENABLE GENMASK(30, 30)
+/* ENDP_INIT_MODE register */
+enum ipa_reg_endp_init_mode_field_id {
+ ENDP_MODE,
+ DCPH_ENABLE, /* v4.5+ */
+ DEST_PIPE_INDEX,
+ BYTE_THRESHOLD,
+ PIPE_REPLICATION_EN,
+ PAD_EN,
+ HDR_FTCH_DISABLE, /* v4.5+ */
+ DRBIP_ACL_ENABLE, /* v4.9+ */
+};
/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
enum ipa_mode {
- IPA_BASIC = 0x0,
- IPA_ENABLE_FRAMING_HDLC = 0x1,
- IPA_ENABLE_DEFRAMING_HDLC = 0x2,
- IPA_DMA = 0x3,
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
};
-#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
- (0x00000824 + 0x0070 * (ep))
-#define AGGR_EN_FMASK GENMASK(1, 0)
-#define AGGR_TYPE_FMASK GENMASK(4, 2)
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_byte_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_time_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_pkt_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_sw_eof_active_fmask(bool legacy)
-{
- return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_force_close_fmask(bool legacy)
-{
- return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
-{
- return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
-}
-
-/* The next field is present for IPA v4.5+ */
-#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
+/* ENDP_INIT_AGGR register */
+enum ipa_reg_endp_init_aggr_field_id {
+ AGGR_EN,
+ AGGR_TYPE,
+ BYTE_LIMIT,
+ TIME_LIMIT,
+ PKT_LIMIT,
+ SW_EOF_ACTIVE,
+ FORCE_CLOSE,
+ HARD_BYTE_LIMIT_EN,
+ AGGR_GRAN_SEL,
+};
/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0x0, /* (TX, RX) */
- IPA_ENABLE_AGGR = 0x1, /* (RX) */
- IPA_ENABLE_DEAGGR = 0x2, /* (TX) */
+ IPA_BYPASS_AGGR /* TX and RX */ = 0x0,
+ IPA_ENABLE_AGGR /* RX */ = 0x1,
+ IPA_ENABLE_DEAGGR /* TX */ = 0x2,
};
/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */
enum ipa_aggr_type {
- IPA_MBIM_16 = 0x0,
- IPA_HDLC = 0x1,
- IPA_TLP = 0x2,
- IPA_RNDIS = 0x3,
- IPA_GENERIC = 0x4,
- IPA_COALESCE = 0x5,
- IPA_QCMAP = 0x6,
-};
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
- (0x0000082c + 0x0070 * (rxep))
-#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
- (0x00000830 + 0x0070 * (rxep))
-/* The next two fields are present for IPA v4.2 only */
-#define BASE_VALUE_FMASK GENMASK(4, 0)
-#define SCALE_FMASK GENMASK(12, 8)
-/* The next two fields are present for IPA v4.5 */
-#define TIME_LIMIT_FMASK GENMASK(4, 0)
-#define GRAN_SEL_FMASK GENMASK(8, 8)
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
- (0x00000834 + 0x0070 * (txep))
-#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
-#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
-#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
-#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
-#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
-#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
-
-#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
- (0x00000838 + 0x0070 * (ep))
-/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
-static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
-{
- if (version < IPA_VERSION_3_5 || version == IPA_VERSION_4_5)
- return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
- if (version == IPA_VERSION_4_2 || version == IPA_VERSION_4_7)
- return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+/* ENDP_INIT_HOL_BLOCK_EN register */
+enum ipa_reg_endp_init_hol_block_en_field_id {
+ HOL_BLOCK_EN,
+};
- return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
-}
+/* ENDP_INIT_HOL_BLOCK_TIMER register */
+enum ipa_reg_endp_init_hol_block_timer_field_id {
+ TIMER_BASE_VALUE, /* Not v4.5+ */
+ TIMER_SCALE, /* v4.2 only */
+ TIMER_LIMIT, /* v4.5+ */
+ TIMER_GRAN_SEL, /* v4.5+ */
+};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
- (0x0000083c + 0x0070 * (txep))
-#define SEQ_TYPE_FMASK GENMASK(7, 0)
-#define SEQ_REP_TYPE_FMASK GENMASK(15, 8)
+/* ENDP_INIT_DEAGGR register */
+enum ipa_reg_endp_deaggr_field_id {
+ DEAGGR_HDR_LEN,
+ SYSPIPE_ERR_DETECTION,
+ PACKET_OFFSET_VALID,
+ PACKET_OFFSET_LOCATION,
+ IGNORE_MIN_PKT_ERR,
+ MAX_PACKET_LEN,
+};
+
+/* ENDP_INIT_RSRC_GRP register */
+enum ipa_reg_endp_init_rsrc_grp_field_id {
+ ENDP_RSRC_GRP,
+};
+
+/* ENDP_INIT_SEQ register */
+enum ipa_reg_endp_init_seq_field_id {
+ SEQ_TYPE,
+ SEQ_REP_TYPE, /* Not v4.5+ */
+};
/**
* enum ipa_seq_type - HPS and DPS sequencer type
@@ -629,76 +544,36 @@ enum ipa_seq_rep_type {
IPA_SEQ_REP_DMA_PARSER = 0x08,
};
-#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
- (0x00000840 + 0x0070 * (ep))
-#define STATUS_EN_FMASK GENMASK(0, 0)
-#define STATUS_ENDP_FMASK GENMASK(5, 1)
-/* The next field is not present for IPA v4.5+ */
-#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0+ */
-#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-
-/* The next register is not present for IPA v4.2 (which no hashing support) */
-#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
- (0x0000085c + 0x0070 * (er))
-#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
-#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
-#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
-#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
-#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
-#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
-#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
-#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
-
-#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
-#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
-#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
-#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
-#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
-#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
-#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
-#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
-
-static inline u32 ipa_reg_irq_stts_ee_n_offset(enum ipa_version version,
- u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003008 + 0x1000 * ee;
-
- return 0x00004008 + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_stts_offset(enum ipa_version version)
-{
- return ipa_reg_irq_stts_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000300c + 0x1000 * ee;
-
- return 0x0000400c + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_en_offset(enum ipa_version version)
-{
- return ipa_reg_irq_en_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_clr_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003010 + 0x1000 * ee;
-
- return 0x00004010 + 0x1000 * ee;
-}
+/* ENDP_STATUS register */
+enum ipa_reg_endp_status_field_id {
+ STATUS_EN,
+ STATUS_ENDP,
+ STATUS_LOCATION, /* Not v4.5+ */
+ STATUS_PKT_SUPPRESS, /* v4.0+ */
+};
-static inline u32 ipa_reg_irq_clr_offset(enum ipa_version version)
-{
- return ipa_reg_irq_clr_ee_n_offset(version, GSI_EE_AP);
-}
+/* ENDP_FILTER_ROUTER_HSH_CFG register */
+enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
+ FILTER_HASH_MSK_SRC_ID,
+ FILTER_HASH_MSK_SRC_IP,
+ FILTER_HASH_MSK_DST_IP,
+ FILTER_HASH_MSK_SRC_PORT,
+ FILTER_HASH_MSK_DST_PORT,
+ FILTER_HASH_MSK_PROTOCOL,
+ FILTER_HASH_MSK_METADATA,
+ FILTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+
+ ROUTER_HASH_MSK_SRC_ID,
+ ROUTER_HASH_MSK_SRC_IP,
+ ROUTER_HASH_MSK_DST_IP,
+ ROUTER_HASH_MSK_SRC_PORT,
+ ROUTER_HASH_MSK_DST_PORT,
+ ROUTER_HASH_MSK_PROTOCOL,
+ ROUTER_HASH_MSK_METADATA,
+ ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+};
+/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
* @IPA_IRQ_UC_0: Microcontroller event interrupt
@@ -774,74 +649,82 @@ enum ipa_irq_id {
IPA_IRQ_COUNT, /* Last; not an id */
};
-static inline u32 ipa_reg_irq_uc_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000301c + 0x1000 * ee;
+/* IPA_IRQ_UC register */
+enum ipa_reg_ipa_irq_uc_field_id {
+ UC_INTR,
+};
- return 0x0000401c + 0x1000 * ee;
-}
+extern const struct ipa_regs ipa_regs_v3_1;
+extern const struct ipa_regs ipa_regs_v3_5_1;
+extern const struct ipa_regs ipa_regs_v4_2;
+extern const struct ipa_regs ipa_regs_v4_5;
+extern const struct ipa_regs ipa_regs_v4_9;
+extern const struct ipa_regs ipa_regs_v4_11;
-static inline u32 ipa_reg_irq_uc_offset(enum ipa_version version)
+/* Return the field mask for a field in a register */
+static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
{
- return ipa_reg_irq_uc_ee_n_offset(version, GSI_EE_AP);
-}
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
-#define UC_INTR_FMASK GENMASK(0, 0)
+ return reg->fmask[field_id];
+}
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-static inline u32
-ipa_reg_irq_suspend_info_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the mask for a single-bit field in a register */
+static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
{
- if (version == IPA_VERSION_3_0)
- return 0x00003098 + 0x1000 * ee;
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003030 + 0x1000 * ee;
+ WARN_ON(!is_power_of_2(fmask));
- return 0x00004030 + 0x1000 * ee;
+ return fmask;
}
+/* Encode a value into the given field of a register */
static inline u32
-ipa_reg_irq_suspend_info_offset(enum ipa_version version)
+ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_info_ee_n_offset(version, GSI_EE_AP);
-}
+ u32 fmask = ipa_reg_fmask(reg, field_id);
-/* ipa->available defines the valid bits in the SUSPEND_EN register */
-static inline u32
-ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- WARN_ON(version == IPA_VERSION_3_0);
+ if (!fmask)
+ return 0;
- if (version < IPA_VERSION_4_9)
- return 0x00003034 + 0x1000 * ee;
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
- return 0x00004034 + 0x1000 * ee;
+ return val;
}
+/* Given a register value, decode (extract) the value in the given field */
static inline u32
-ipa_reg_irq_suspend_en_offset(enum ipa_version version)
+ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_en_ee_n_offset(version, GSI_EE_AP);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
}
-/* ipa->available defines the valid bits in the SUSPEND_CLR register */
-static inline u32
-ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
{
- WARN_ON(version == IPA_VERSION_3_0);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003038 + 0x1000 * ee;
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
- return 0x00004038 + 0x1000 * ee;
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
+{
+ return reg ? reg->offset : 0;
}
-static inline u32
-ipa_reg_irq_suspend_clr_offset(enum ipa_version version)
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
{
- return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP);
+ return reg ? reg->offset + n * reg->stride : 0;
}
int ipa_reg_init(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 06cec7199382..a257f0e5e361 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -69,20 +69,21 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
}
static void
-ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
+ const struct ipa_reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
- val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -91,34 +92,35 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_src[resource_type];
- offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
@@ -127,34 +129,35 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_dst[resource_type];
- offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
/* Configure resources; there is no ipa_resource_deconfig() */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 211233612039..5620dc271fac 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 59cee31a7383..9b969b03d1a4 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SMP2P_H_
#define _IPA_SMP2P_H_
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index c0c8641cdd14..5cbc15a971f9 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2022 Linaro Ltd. */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index 4a3ffd1e4e3f..58ba22810bab 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SYSFS_H_
#define _IPA_SYSFS_H_
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2f5a58bfc529..510ff2dc8999 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
@@ -386,8 +384,9 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ const struct ipa_reg *reg;
struct gsi_trans *trans;
+ u32 offset;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -399,8 +398,13 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
- val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
+
+ val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
ipa_cmd_register_write_add(trans, offset, val, val, false);
@@ -518,15 +522,18 @@ int ipa_table_setup(struct ipa *ipa)
static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -567,13 +574,17 @@ static bool ipa_route_id_modem(u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, route_id);
+
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index b6a9a0d79d68..395189f75d78 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index fe11910518d9..f0ee47281015 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -222,7 +222,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* Fill in the command data */
@@ -233,9 +233,10 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
shared->response_param = 0;
/* Use an interrupt to tell the microcontroller the command is ready */
- val = u32_encode_bits(1, UC_INTR_FMASK);
- offset = ipa_reg_irq_uc_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_UC);
+ val = ipa_reg_bit(reg, UC_INTR);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 23847f934d64..8514096e6f36 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_UC_H_
#define _IPA_UC_H_
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 6c16c895d842..7870e0cc3d7c 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_VERSION_H_
#define _IPA_VERSION_H_
@@ -19,10 +19,10 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
- * Please update ipa_version_valid() and ipa_version_string() whenever a
- * new version is added.
+ * Please update ipa_version_string() whenever a new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,
@@ -36,6 +36,30 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
+ IPA_VERSION_COUNT, /* Last; not a version */
+};
+
+static inline bool ipa_version_supported(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
new file mode 100644
index 000000000000..116b27717e3d
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ [EOT_COAL_GRANULARITY] = GENMASK(3, 0),
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
new file mode 100644
index 000000000000..6e2f939b18f1
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ [TX0_PREFETCH_DISABLE] = BIT(0),
+ [TX1_PREFETCH_DISABLE] = BIT(1),
+ [PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
new file mode 100644
index 000000000000..8fd36569bb9f
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ /* Bit 18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(23, 22),
+ /* Bits 24-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
new file mode 100644
index 000000000000..f8e78e1907c8
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ /* Bit 17 reserved */
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ [SSPND_PA_NO_BQ_STATE] = BIT(19),
+ /* Bits 20-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_SCALE] = GENMASK(12, 8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
new file mode 100644
index 000000000000..d32b805abb11
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ /* Bits 18-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
new file mode 100644
index 000000000000..eabbc5451937
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(24, 22),
+ /* Bits 25-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index dfeb5b392e64..bb1c298c1e78 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -589,7 +590,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 49ba8a50dfb1..54c94a69c2bb 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -408,8 +408,8 @@ static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev,
static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
}
static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index ef02f2cf5ce1..cbabca167a07 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ee6087e7b2bf..c891b60937a7 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -18,14 +18,13 @@
#include <net/sock.h>
#include <net/gro_cells.h>
#include <net/macsec.h>
+#include <net/dst_metadata.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
#include <uapi/linux/if_macsec.h>
-#define MACSEC_SCI_LEN 8
-
/* SecTAG length = macsec_eth_header without the optional SCI */
#define MACSEC_TAG_LEN 6
@@ -46,20 +45,10 @@ struct macsec_eth_header {
u8 secure_channel_id[8]; /* optional */
} __packed;
-#define MACSEC_TCI_VERSION 0x80
-#define MACSEC_TCI_ES 0x40 /* end station */
-#define MACSEC_TCI_SC 0x20 /* SCI present */
-#define MACSEC_TCI_SCB 0x10 /* epon */
-#define MACSEC_TCI_E 0x08 /* encryption */
-#define MACSEC_TCI_C 0x04 /* changed text */
-#define MACSEC_AN_MASK 0x03 /* association number */
-#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
-
/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
#define MIN_NON_SHORT_LEN 48
#define GCM_AES_IV_LEN 12
-#define DEFAULT_ICV_LEN 16
#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
@@ -243,7 +232,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
return (struct macsec_cb *)skb->cb;
}
-#define MACSEC_PORT_ES (htons(0x0001))
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
@@ -258,14 +246,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCODING_SA 0
#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
-static bool send_sci(const struct macsec_secy *secy)
-{
- const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
-
- return tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
-}
-
static sci_t make_sci(const u8 *addr, __be16 port)
{
sci_t sci;
@@ -330,7 +310,7 @@ static void macsec_fill_sectag(struct macsec_eth_header *h,
/* with GCM, C/E clear for !encrypt, both set for encrypt */
if (tx_sc->encrypt)
h->tci_an |= MACSEC_TCI_CONFID;
- else if (secy->icv_len != DEFAULT_ICV_LEN)
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
h->tci_an |= MACSEC_TCI_C;
h->tci_an |= tx_sc->encoding_sa;
@@ -462,11 +442,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -659,7 +634,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- sci_present = send_sci(secy);
+ sci_present = macsec_send_sci(secy);
hh = skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
@@ -1029,11 +1004,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
struct ethhdr *hdr = eth_hdr(skb);
+ struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ md_dst = skb_metadata_dst(skb);
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1044,6 +1021,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+ if (md_dst && md_dst->type == METADATA_MACSEC &&
+ (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
+ continue;
+
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1301,7 +1282,7 @@ nosci:
/* 10.6.1 if the SC is not found */
cbit = !!(hdr->tci_an & MACSEC_TCI_C);
if (!cbit)
- macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
+ macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
@@ -1682,22 +1663,8 @@ static int macsec_offload(int (* const func)(struct macsec_context *),
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_lock(&ctx->phydev->lock);
- /* Phase I: prepare. The drive should fail here if there are going to be
- * issues in the commit phase.
- */
- ctx->prepare = true;
ret = (*func)(ctx);
- if (ret)
- goto phy_unlock;
- /* Phase II: commit. This step cannot fail. */
- ctx->prepare = false;
- ret = (*func)(ctx);
- /* This should never happen: commit is not allowed to fail */
- if (unlikely(ret))
- WARN(1, "MACsec offloading commit failed (%d)\n", ret);
-
-phy_unlock:
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_unlock(&ctx->phydev->lock);
@@ -1847,6 +1814,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rx_sa->sc = rx_sc;
+ if (secy->xpn) {
+ rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -1869,12 +1842,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
- if (secy->xpn) {
- rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@@ -2089,6 +2056,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ if (secy->xpn) {
+ tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -2111,12 +2084,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
- if (secy->xpn) {
- tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
@@ -3409,6 +3376,7 @@ static struct genl_family macsec_fam __ro_after_init = {
.module = THIS_MODULE,
.small_ops = macsec_genl_ops,
.n_small_ops = ARRAY_SIZE(macsec_genl_ops),
+ .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
@@ -3420,6 +3388,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
int ret, len;
if (macsec_is_offloaded(netdev_priv(dev))) {
+ struct metadata_dst *md_dst = secy->tx_sc.md_dst;
+
+ skb_dst_drop(skb);
+ dst_hold(&md_dst->dst);
+ skb_dst_set(skb, &md_dst->dst);
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
@@ -3661,7 +3634,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
eth_hw_addr_set(dev, addr->sa_data);
- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -3748,6 +3720,8 @@ static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ if (macsec->secy.tx_sc.md_dst)
+ metadata_dst_free(macsec->secy.tx_sc.md_dst);
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
@@ -4000,6 +3974,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -4015,6 +3994,13 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
return -ENOMEM;
}
+ secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!secy->tx_sc.md_dst) {
+ free_percpu(secy->tx_sc.stats);
+ free_percpu(macsec->stats);
+ return -ENOMEM;
+ }
+
if (sci == MACSEC_UNDEF_SCI)
sci = dev_to_sci(dev, MACSEC_PORT_ES);
@@ -4028,6 +4014,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
secy->xpn = DEFAULT_XPN;
secy->sci = sci;
+ secy->tx_sc.md_dst->u.macsec_info.sci = sci;
secy->tx_sc.active = true;
secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
@@ -4046,7 +4033,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
{
struct macsec_dev *macsec = macsec_priv(dev);
rx_handler_func_t *rx_handler;
- u8 icv_len = DEFAULT_ICV_LEN;
+ u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
int err, mtu;
sci_t sci;
@@ -4170,7 +4157,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
- u8 icv_len = DEFAULT_ICV_LEN;
+ u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
int flag;
bool es, scb, sci;
@@ -4182,7 +4169,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
- if (icv_len != DEFAULT_ICV_LEN) {
+ if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
char dummy_key[DEFAULT_SAK_LEN] = { 0 };
struct crypto_aead *dummy_tfm;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1080d6ebff63..713e3354cb2e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1043,8 +1043,8 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index cecf8c63096c..d1f435788e90 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -207,7 +207,7 @@ static struct notifier_block macvtap_notifier_block __read_mostly = {
.notifier_call = macvtap_device_event,
};
-static int macvtap_init(void)
+static int __init macvtap_init(void)
{
int err;
@@ -241,7 +241,7 @@ out1:
}
module_init(macvtap_init);
-static void macvtap_exit(void)
+static void __exit macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
unregister_netdevice_notifier(&macvtap_notifier_block);
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index 53846c6b56ca..0762c735dd8a 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -986,7 +986,7 @@ out:
return rc;
}
-static int mctp_i2c_remove(struct i2c_client *client)
+static void mctp_i2c_remove(struct i2c_client *client)
{
struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
@@ -999,8 +999,6 @@ static int mctp_i2c_remove(struct i2c_client *client)
mctp_i2c_free_client(mcli);
mutex_unlock(&driver_clients_lock);
- /* Callers ignore return code */
- return 0;
}
/* We look for a 'mctp-controller' property on I2C busses as they are
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 3e79c2c51929..689e728345ce 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -10,10 +10,31 @@
#include <linux/fwnode_mdio.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/pse-pd/pse.h>
MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
MODULE_LICENSE("GPL");
+static struct pse_control *
+fwnode_find_pse_control(struct fwnode_handle *fwnode)
+{
+ struct pse_control *psec;
+ struct device_node *np;
+
+ if (!IS_ENABLED(CONFIG_PSE_CONTROLLER))
+ return NULL;
+
+ np = to_of_node(fwnode);
+ if (!np)
+ return NULL;
+
+ psec = of_pse_control_get(np);
+ if (PTR_ERR(psec) == -ENOENT)
+ return NULL;
+
+ return psec;
+}
+
static struct mii_timestamper *
fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
{
@@ -47,7 +68,9 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
* just fall back to poll mode
*/
if (rc == -EPROBE_DEFER)
- rc = -ENODEV;
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
+ if (rc == -EPROBE_DEFER)
+ return rc;
if (rc > 0) {
phy->irq = rc;
@@ -89,14 +112,21 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
struct fwnode_handle *child, u32 addr)
{
struct mii_timestamper *mii_ts = NULL;
+ struct pse_control *psec = NULL;
struct phy_device *phy;
bool is_c45 = false;
u32 phy_id;
int rc;
+ psec = fwnode_find_pse_control(child);
+ if (IS_ERR(psec))
+ return PTR_ERR(psec);
+
mii_ts = fwnode_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
+ if (IS_ERR(mii_ts)) {
+ rc = PTR_ERR(mii_ts);
+ goto clean_pse;
+ }
rc = fwnode_property_match_string(child, "compatible",
"ethernet-phy-ieee802.3-c45");
@@ -108,8 +138,8 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
else
phy = phy_device_create(bus, addr, phy_id, 0, NULL);
if (IS_ERR(phy)) {
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
+ rc = PTR_ERR(phy);
+ goto clean_mii_ts;
}
if (is_acpi_node(child)) {
@@ -123,25 +153,33 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
/* All data is now stored in the phy struct, so register it */
rc = phy_device_register(phy);
if (rc) {
- phy_device_free(phy);
fwnode_handle_put(phy->mdio.dev.fwnode);
- return rc;
+ goto clean_phy;
}
} else if (is_of_node(child)) {
rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr);
- if (rc) {
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
- return rc;
- }
+ if (rc)
+ goto clean_phy;
}
+ phy->psec = psec;
+
/* phy->mii_ts may already be defined by the PHY driver. A
* mii_timestamper probed via the device tree will still have
* precedence.
*/
if (mii_ts)
phy->mii_ts = mii_ts;
+
return 0;
+
+clean_phy:
+ phy_device_free(phy);
+clean_mii_ts:
+ unregister_mii_timestamper(mii_ts);
+clean_pse:
+ pse_control_put(psec);
+
+ return rc;
}
EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 09200a70b315..bf8bf5e20faf 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -3,6 +3,7 @@
* MDIO I2C bridge
*
* Copyright (C) 2015-2016 Russell King
+ * Copyright (C) 2021 Marek Behun
*
* Network PHYs can appear on I2C buses when they are part of SFP module.
* This driver exposes these PHYs to the networking PHY code, allowing
@@ -12,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
@@ -28,7 +30,7 @@ static unsigned int i2c_mii_phy_addr(int phy_id)
return phy_id + 0x40;
}
-static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
@@ -62,7 +64,8 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
return data[0] << 8 | data[1];
}
-static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
@@ -91,9 +94,288 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
return ret < 0 ? ret : 0;
}
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c)
+/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
+ * instead via address 0x51, when SFP page is set to 0x03 and password to
+ * 0xffffffff.
+ *
+ * address size contents description
+ * ------- ---- -------- -----------
+ * 0x80 1 CMD 0x01/0x02/0x04 for write/read/done
+ * 0x81 1 DEV Clause 45 device
+ * 0x82 2 REG Clause 45 register
+ * 0x84 2 VAL Register value
+ */
+#define ROLLBALL_PHY_I2C_ADDR 0x51
+
+#define ROLLBALL_PASSWORD (SFP_VSL + 3)
+
+#define ROLLBALL_CMD_ADDR 0x80
+#define ROLLBALL_DATA_ADDR 0x81
+
+#define ROLLBALL_CMD_WRITE 0x01
+#define ROLLBALL_CMD_READ 0x02
+#define ROLLBALL_CMD_DONE 0x04
+
+#define SFP_PAGE_ROLLBALL_MDIO 3
+
+static int __i2c_transfer_err(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+
+ ret = __i2c_transfer(i2c, msgs, num);
+ if (ret < 0)
+ return ret;
+ else if (ret != num)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int __i2c_rollball_get_page(struct i2c_adapter *i2c, int bus_addr,
+ u8 *page)
+{
+ struct i2c_msg msgs[2];
+ u8 addr = SFP_PAGE;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &addr;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = page;
+
+ return __i2c_transfer_err(i2c, msgs, 2);
+}
+
+static int __i2c_rollball_set_page(struct i2c_adapter *i2c, int bus_addr,
+ u8 page)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = SFP_PAGE;
+ buf[1] = page;
+
+ msg.addr = bus_addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buf;
+
+ return __i2c_transfer_err(i2c, &msg, 1);
+}
+
+/* In order to not interfere with other SFP code (which possibly may manipulate
+ * SFP_PAGE), for every transfer we do this:
+ * 1. lock the bus
+ * 2. save content of SFP_PAGE
+ * 3. set SFP_PAGE to 3
+ * 4. do the transfer
+ * 5. restore original SFP_PAGE
+ * 6. unlock the bus
+ * Note that one might think that steps 2 to 5 could be theoretically done all
+ * in one call to i2c_transfer (by constructing msgs array in such a way), but
+ * unfortunately tests show that this does not work :-( Changed SFP_PAGE does
+ * not take into account until i2c_transfer() is done.
+ */
+static int i2c_transfer_rollball(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ int ret, main_err = 0;
+ u8 saved_page;
+
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ /* save original page */
+ ret = __i2c_rollball_get_page(i2c, msgs->addr, &saved_page);
+ if (ret)
+ goto unlock;
+
+ /* change to RollBall MDIO page */
+ ret = __i2c_rollball_set_page(i2c, msgs->addr, SFP_PAGE_ROLLBALL_MDIO);
+ if (ret)
+ goto unlock;
+
+ /* do the transfer; we try to restore original page if this fails */
+ ret = __i2c_transfer_err(i2c, msgs, num);
+ if (ret)
+ main_err = ret;
+
+ /* restore original page */
+ ret = __i2c_rollball_set_page(i2c, msgs->addr, saved_page);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ return main_err ? : ret;
+}
+
+static int i2c_rollball_mii_poll(struct mii_bus *bus, int bus_addr, u8 *buf,
+ size_t len)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+ u8 cmd_addr, tmp, *res;
+ int i, ret;
+
+ cmd_addr = ROLLBALL_CMD_ADDR;
+
+ res = buf ? buf : &tmp;
+ len = buf ? len : 1;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &cmd_addr;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = res;
+
+ /* By experiment it takes up to 70 ms to access a register for these
+ * SFPs. Sleep 20ms between iterations and try 10 times.
+ */
+ i = 10;
+ do {
+ msleep(20);
+
+ ret = i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
+ if (ret)
+ return ret;
+
+ if (*res == ROLLBALL_CMD_DONE)
+ return 0;
+ } while (i-- > 0);
+
+ dev_dbg(&bus->dev, "poll timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
+ u8 *data, size_t len)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+ u8 cmdbuf[2];
+
+ cmdbuf[0] = ROLLBALL_CMD_ADDR;
+ cmdbuf[1] = cmd;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = 0;
+ msgs[1].len = sizeof(cmdbuf);
+ msgs[1].buf = cmdbuf;
+
+ return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
+}
+
+static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
+{
+ u8 buf[4], res[6];
+ int bus_addr, ret;
+ u16 val;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
+ return 0xffff;
+
+ buf[0] = ROLLBALL_DATA_ADDR;
+ buf[1] = (reg >> 16) & 0x1f;
+ buf[2] = (reg >> 8) & 0xff;
+ buf[3] = reg & 0xff;
+
+ ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_READ, buf,
+ sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_rollball_mii_poll(bus, bus_addr, res, sizeof(res));
+ if (ret == -ETIMEDOUT)
+ return 0xffff;
+ else if (ret < 0)
+ return ret;
+
+ val = res[4] << 8 | res[5];
+
+ return val;
+}
+
+static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ int bus_addr, ret;
+ u8 buf[6];
+
+ if (!(reg & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
+ return 0;
+
+ buf[0] = ROLLBALL_DATA_ADDR;
+ buf[1] = (reg >> 16) & 0x1f;
+ buf[2] = (reg >> 8) & 0xff;
+ buf[3] = reg & 0xff;
+ buf[4] = val >> 8;
+ buf[5] = val & 0xff;
+
+ ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_WRITE, buf,
+ sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_rollball_mii_poll(bus, bus_addr, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int i2c_mii_init_rollball(struct i2c_adapter *i2c)
+{
+ struct i2c_msg msg;
+ u8 pw[5];
+ int ret;
+
+ pw[0] = ROLLBALL_PASSWORD;
+ pw[1] = 0xff;
+ pw[2] = 0xff;
+ pw[3] = 0xff;
+ pw[4] = 0xff;
+
+ msg.addr = ROLLBALL_PHY_I2C_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(pw);
+ msg.buf = pw;
+
+ ret = i2c_transfer(i2c, &msg, 1);
+ if (ret < 0)
+ return ret;
+ else if (ret != 1)
+ return -EIO;
+ else
+ return 0;
+}
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol)
{
struct mii_bus *mii;
+ int ret;
if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
return ERR_PTR(-EINVAL);
@@ -104,10 +386,28 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c)
snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent));
mii->parent = parent;
- mii->read = i2c_mii_read;
- mii->write = i2c_mii_write;
mii->priv = i2c;
+ switch (protocol) {
+ case MDIO_I2C_ROLLBALL:
+ ret = i2c_mii_init_rollball(i2c);
+ if (ret < 0) {
+ dev_err(parent,
+ "Cannot initialize RollBall MDIO I2C protocol: %d\n",
+ ret);
+ mdiobus_free(mii);
+ return ERR_PTR(ret);
+ }
+
+ mii->read = i2c_mii_read_rollball;
+ mii->write = i2c_mii_write_rollball;
+ break;
+ default:
+ mii->read = i2c_mii_read_default;
+ mii->write = i2c_mii_write_default;
+ break;
+ }
+
return mii;
}
EXPORT_SYMBOL_GPL(mdio_i2c_alloc);
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 08541007b18a..51f68daac152 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/mfd/ocelot.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
@@ -270,44 +271,25 @@ static int mscc_miim_clk_set(struct mii_bus *bus)
static int mscc_miim_probe(struct platform_device *pdev)
{
- struct regmap *mii_regmap, *phy_regmap = NULL;
struct device_node *np = pdev->dev.of_node;
+ struct regmap *mii_regmap, *phy_regmap;
struct device *dev = &pdev->dev;
- void __iomem *regs, *phy_regs;
struct mscc_miim_dev *miim;
- struct resource *res;
struct mii_bus *bus;
int ret;
- regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(regs)) {
- dev_err(dev, "Unable to map MIIM registers\n");
- return PTR_ERR(regs);
- }
-
- mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config);
-
- if (IS_ERR(mii_regmap)) {
- dev_err(dev, "Unable to create MIIM regmap\n");
- return PTR_ERR(mii_regmap);
- }
+ mii_regmap = ocelot_regmap_from_resource(pdev, 0,
+ &mscc_miim_regmap_config);
+ if (IS_ERR(mii_regmap))
+ return dev_err_probe(dev, PTR_ERR(mii_regmap),
+ "Unable to create MIIM regmap\n");
/* This resource is optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- phy_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(phy_regs)) {
- dev_err(dev, "Unable to map internal phy registers\n");
- return PTR_ERR(phy_regs);
- }
-
- phy_regmap = devm_regmap_init_mmio(dev, phy_regs,
- &mscc_miim_phy_regmap_config);
- if (IS_ERR(phy_regmap)) {
- dev_err(dev, "Unable to create phy register regmap\n");
- return PTR_ERR(phy_regmap);
- }
- }
+ phy_regmap = ocelot_regmap_from_resource_optional(pdev, 1,
+ &mscc_miim_phy_regmap_config);
+ if (IS_ERR(phy_regmap))
+ return dev_err_probe(dev, PTR_ERR(phy_regmap),
+ "Unable to create phy register regmap\n");
ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
if (ret < 0) {
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index b8866bc3f2e8..4a2e94faf57e 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -233,11 +233,9 @@ static int g12a_ephy_glue_clk_register(struct device *dev)
snprintf(in_name, sizeof(in_name), "clkin%d", i);
clk = devm_clk_get(dev, in_name);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "Missing clock %s\n", in_name);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Missing clock %s\n", in_name);
parent_names[i] = __clk_get_name(clk);
}
@@ -317,12 +315,9 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get peripheral clock\n");
- return ret;
- }
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(dev, PTR_ERR(priv->pclk),
+ "failed to get peripheral clock\n");
/* Make sure the device registers are clocked */
ret = clk_prepare_enable(priv->pclk);
@@ -339,8 +334,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
&priv->mux_handle, dev, NULL);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "mdio multiplexer init failed: %d", ret);
+ dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
goto err;
}
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index c02fb2a067ee..c02c9c660016 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -159,12 +159,9 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
mdio_mux_mmioreg_switch_fn,
&s->mux_handle, s, NULL);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to register mdio-mux bus %pOF\n", np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register mdio-mux bus %pOF\n", np);
pdev->dev.platform_data = s;
diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index 527acfc3c045..bfa5af577b0a 100644
--- a/drivers/net/mdio/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
@@ -72,12 +72,9 @@ static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
return -ENOMEM;
s->muxc = devm_mux_control_get(dev, NULL);
- if (IS_ERR(s->muxc)) {
- ret = PTR_ERR(s->muxc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(s->muxc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(s->muxc),
+ "Failed to get mux\n");
platform_set_drvdata(pdev, s);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 9e3c815a070f..796e9c7857d0 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 21a0435c02de..7a28e082436e 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -324,8 +324,8 @@ static const struct net_device_ops failover_dev_ops = {
static void nfo_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
}
static int nfo_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ddac61d79145..bdff9ac5056d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(oops_only, "Only log oops messages");
#ifndef MODULE
static int __init option_setup(char *opt)
{
- strlcpy(config, opt, MAX_PARAM_LENGTH);
+ strscpy(config, opt, MAX_PARAM_LENGTH);
return 1;
}
__setup("netconsole=", option_setup);
@@ -178,7 +178,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
goto fail;
nt->np.name = "netconsole";
- strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
+ strscpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
@@ -414,7 +414,7 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
return -EINVAL;
}
- strlcpy(nt->np.dev_name, buf, IFNAMSIZ);
+ strscpy(nt->np.dev_name, buf, IFNAMSIZ);
/* Get rid of possible trailing newline from echo(1) */
len = strnlen(nt->np.dev_name, IFNAMSIZ);
@@ -630,7 +630,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
return ERR_PTR(-ENOMEM);
nt->np.name = "netconsole";
- strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
+ strscpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
@@ -708,7 +708,7 @@ restart:
if (nt->np.dev == dev) {
switch (event) {
case NETDEV_CHANGENAME:
- strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
+ strscpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_RELEASE:
case NETDEV_JOIN:
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index e88f783c297e..794fc0cc73b8 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -965,7 +965,6 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- int ret;
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
@@ -976,15 +975,25 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- ret = nsim_dev_reload_create(nsim_dev, extack);
- return ret;
+
+ return nsim_dev_reload_create(nsim_dev, extack);
}
static int nsim_dev_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
- return devlink_info_driver_name_put(req, DRV_NAME);
+ int err;
+
+ err = devlink_info_driver_name_put(req, DRV_NAME);
+ if (err)
+ return err;
+ err = devlink_info_version_stored_put_ext(req, "fw.mgmt", "10.20.30",
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
+ if (err)
+ return err;
+ return devlink_info_version_running_put_ext(req, "fw.mgmt", "10.20.30",
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
}
#define NSIM_DEV_FLASH_SIZE 500000
@@ -1312,8 +1321,7 @@ nsim_dev_devlink_trap_drop_counter_get(struct devlink *devlink,
static const struct devlink_ops nsim_dev_devlink_ops = {
.eswitch_mode_set = nsim_devlink_eswitch_mode_set,
.eswitch_mode_get = nsim_devlink_eswitch_mode_get,
- .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
- DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 605a38e16db0..0e58aa7f0374 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_fail_fops.fops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e470e3398abc..9a1a5b203624 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&ns->syncp);
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
}
static int
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 80bdc07f2cd3..464d88ca8ab0 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -364,9 +364,9 @@ static void ntb_get_drvinfo(struct net_device *ndev,
{
struct ntb_netdev *dev = netdev_priv(ndev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
}
static int ntb_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 6289b7c765f1..6e7e6c346a3e 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -26,4 +26,10 @@ config PCS_RZN1_MIIC
on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
pass-through mode for MII.
+config PCS_ALTERA_TSE
+ tristate
+ help
+ This module provides helper functions for the Altera Triple Speed
+ Ethernet SGMII PCS, that can be found on the Intel Socfpga family.
+
endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index 0ff5388fcdea..4c780d8f2e98 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -6,3 +6,4 @@ pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
+obj-$(CONFIG_PCS_ALTERA_TSE) += pcs-altera-tse.o
diff --git a/drivers/net/pcs/pcs-altera-tse.c b/drivers/net/pcs/pcs-altera-tse.c
new file mode 100644
index 000000000000..97a7cabff962
--- /dev/null
+++ b/drivers/net/pcs/pcs-altera-tse.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Bootlin
+ *
+ * Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/pcs-altera-tse.h>
+
+/* SGMII PCS register addresses
+ */
+#define SGMII_PCS_SCRATCH 0x10
+#define SGMII_PCS_REV 0x11
+#define SGMII_PCS_LINK_TIMER_0 0x12
+#define SGMII_PCS_LINK_TIMER_REG(x) (0x12 + (x))
+#define SGMII_PCS_LINK_TIMER_1 0x13
+#define SGMII_PCS_IF_MODE 0x14
+#define PCS_IF_MODE_SGMII_ENA BIT(0)
+#define PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define PCS_IF_MODE_SGMI_SPEED_MASK GENMASK(3, 2)
+#define PCS_IF_MODE_SGMI_SPEED_10 (0 << 2)
+#define PCS_IF_MODE_SGMI_SPEED_100 (1 << 2)
+#define PCS_IF_MODE_SGMI_SPEED_1000 (2 << 2)
+#define PCS_IF_MODE_SGMI_HALF_DUPLEX BIT(4)
+#define PCS_IF_MODE_SGMI_PHY_AN BIT(5)
+#define SGMII_PCS_DIS_READ_TO 0x15
+#define SGMII_PCS_READ_TO 0x16
+#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
+
+struct altera_tse_pcs {
+ struct phylink_pcs pcs;
+ void __iomem *base;
+ int reg_width;
+};
+
+static struct altera_tse_pcs *phylink_pcs_to_tse_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct altera_tse_pcs, pcs);
+}
+
+static u16 tse_pcs_read(struct altera_tse_pcs *tse_pcs, int regnum)
+{
+ if (tse_pcs->reg_width == 4)
+ return readl(tse_pcs->base + regnum * 4);
+ else
+ return readw(tse_pcs->base + regnum * 2);
+}
+
+static void tse_pcs_write(struct altera_tse_pcs *tse_pcs, int regnum,
+ u16 value)
+{
+ if (tse_pcs->reg_width == 4)
+ writel(value, tse_pcs->base + regnum * 4);
+ else
+ writew(value, tse_pcs->base + regnum * 2);
+}
+
+static int tse_pcs_reset(struct altera_tse_pcs *tse_pcs)
+{
+ int i = 0;
+ u16 bmcr;
+
+ /* Reset PCS block */
+ bmcr = tse_pcs_read(tse_pcs, MII_BMCR);
+ bmcr |= BMCR_RESET;
+ tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
+
+ for (i = 0; i < SGMII_PCS_SW_RESET_TIMEOUT; i++) {
+ if (!(tse_pcs_read(tse_pcs, MII_BMCR) & BMCR_RESET))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int alt_tse_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ return 1;
+
+ return -EINVAL;
+}
+
+static int alt_tse_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u32 ctrl, if_mode;
+
+ ctrl = tse_pcs_read(tse_pcs, MII_BMCR);
+ if_mode = tse_pcs_read(tse_pcs, SGMII_PCS_IF_MODE);
+
+ /* Set link timer to 1.6ms, as per the MegaCore Function User Guide */
+ tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_0, 0x0D40);
+ tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_1, 0x03);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ if_mode |= PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ if_mode &= ~(PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA);
+ if_mode |= PCS_IF_MODE_SGMI_SPEED_1000;
+ }
+
+ ctrl |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
+
+ tse_pcs_write(tse_pcs, MII_BMCR, ctrl);
+ tse_pcs_write(tse_pcs, SGMII_PCS_IF_MODE, if_mode);
+
+ return tse_pcs_reset(tse_pcs);
+}
+
+static void alt_tse_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u16 bmsr, lpa;
+
+ bmsr = tse_pcs_read(tse_pcs, MII_BMSR);
+ lpa = tse_pcs_read(tse_pcs, MII_LPA);
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+}
+
+static void alt_tse_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u16 bmcr;
+
+ bmcr = tse_pcs_read(tse_pcs, MII_BMCR);
+ bmcr |= BMCR_ANRESTART;
+ tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
+
+ /* This PCS seems to require a soft reset to re-sync the AN logic */
+ tse_pcs_reset(tse_pcs);
+}
+
+static const struct phylink_pcs_ops alt_tse_pcs_ops = {
+ .pcs_validate = alt_tse_pcs_validate,
+ .pcs_get_state = alt_tse_pcs_get_state,
+ .pcs_config = alt_tse_pcs_config,
+ .pcs_an_restart = alt_tse_pcs_an_restart,
+};
+
+struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
+ void __iomem *pcs_base, int reg_width)
+{
+ struct altera_tse_pcs *tse_pcs;
+
+ if (reg_width != 4 && reg_width != 2)
+ return ERR_PTR(-EINVAL);
+
+ tse_pcs = devm_kzalloc(&ndev->dev, sizeof(*tse_pcs), GFP_KERNEL);
+ if (!tse_pcs)
+ return ERR_PTR(-ENOMEM);
+
+ tse_pcs->pcs.ops = &alt_tse_pcs_ops;
+ tse_pcs->base = pcs_base;
+ tse_pcs->reg_width = reg_width;
+
+ return &tse_pcs->pcs;
+}
+EXPORT_SYMBOL_GPL(alt_tse_pcs_create);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Altera TSE PCS driver");
+MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index ee374a85544a..134637584a83 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -749,7 +749,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) {
- strlcpy(&data[i * ETH_GSTRING_LEN],
+ strscpy(&data[i * ETH_GSTRING_LEN],
adin_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index b6d139501199..7619d6185801 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -15,6 +15,8 @@
#include <linux/property.h>
#define PHY_ID_ADIN1100 0x0283bc81
+#define PHY_ID_ADIN1110 0x0283bc91
+#define PHY_ID_ADIN2111 0x0283bca1
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
@@ -265,7 +267,8 @@ static int adin_probe(struct phy_device *phydev)
static struct phy_driver adin_driver[] = {
{
- PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100),
+ .phy_id = PHY_ID_ADIN1100,
+ .phy_id_mask = 0xffffffcf,
.name = "ADIN1100",
.get_features = adin_get_features,
.soft_reset = adin_soft_reset,
@@ -284,6 +287,8 @@ module_phy_driver(adin_driver);
static struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
{ }
};
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 8b7a46db30e0..47a76df36b74 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -27,9 +27,12 @@
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX 1
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII 3
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI 4
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI 7
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
#define MDIO_AN_VEND_PROV 0xc400
@@ -91,6 +94,22 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M 0x0310
+#define VEND1_GLOBAL_CFG_100M 0x031b
+#define VEND1_GLOBAL_CFG_1G 0x031c
+#define VEND1_GLOBAL_CFG_2_5G 0x031d
+#define VEND1_GLOBAL_CFG_5G 0x031e
+#define VEND1_GLOBAL_CFG_10G 0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -125,6 +144,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP 1000
+#define AQR107_OP_IN_PROG_TIMEOUT 100000
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -335,40 +360,57 @@ static int aqr_read_status(struct phy_device *phydev)
static int aqr107_read_rate(struct phy_device *phydev)
{
+ u32 config_reg;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
if (val < 0)
return val;
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
case MDIO_AN_TX_VEND_STATUS1_10BASET:
phydev->speed = SPEED_10;
+ config_reg = VEND1_GLOBAL_CFG_10M;
break;
case MDIO_AN_TX_VEND_STATUS1_100BASETX:
phydev->speed = SPEED_100;
+ config_reg = VEND1_GLOBAL_CFG_100M;
break;
case MDIO_AN_TX_VEND_STATUS1_1000BASET:
phydev->speed = SPEED_1000;
+ config_reg = VEND1_GLOBAL_CFG_1G;
break;
case MDIO_AN_TX_VEND_STATUS1_2500BASET:
phydev->speed = SPEED_2500;
+ config_reg = VEND1_GLOBAL_CFG_2_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_5000BASET:
phydev->speed = SPEED_5000;
+ config_reg = VEND1_GLOBAL_CFG_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_10GBASET:
phydev->speed = SPEED_10000;
+ config_reg = VEND1_GLOBAL_CFG_10G;
break;
default:
phydev->speed = SPEED_UNKNOWN;
- break;
+ return 0;
}
- if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
+ VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
else
- phydev->duplex = DUPLEX_HALF;
+ phydev->rate_matching = RATE_MATCH_NONE;
return 0;
}
@@ -392,15 +434,24 @@ static int aqr107_read_status(struct phy_device *phydev)
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+ phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+ phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+ phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
break;
@@ -513,11 +564,14 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
- phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
+ phydev->interface != PHY_INTERFACE_MODE_XAUI &&
+ phydev->interface != PHY_INTERFACE_MODE_RXAUI)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
@@ -597,16 +651,62 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+ int val, err;
+
+ /* The datasheet notes to wait at least 1ms after issuing a
+ * processor intensive operation before checking.
+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+ * because that just determines the maximum time slept, not the minimum.
+ */
+ usleep_range(1000, 5000);
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_GEN_STAT2, val,
+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (err) {
+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int aqr107_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ if (iface == PHY_INTERFACE_MODE_10GBASER ||
+ iface == PHY_INTERFACE_MODE_2500BASEX ||
+ iface == PHY_INTERFACE_MODE_NA)
+ return RATE_MATCH_PAUSE;
+ return RATE_MATCH_NONE;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
@@ -658,6 +758,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
@@ -676,6 +777,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
@@ -702,6 +804,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 59fe356942b5..9e9adde335c8 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -115,6 +115,7 @@
#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
#define AT803X_DEBUG_REG_3C 0x3C
@@ -192,6 +193,9 @@
#define AT803X_KEEP_PLL_ENABLED BIT(0)
#define AT803X_DISABLE_SMARTEEE BIT(1)
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
/* ADC threshold */
#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
@@ -672,6 +676,7 @@ static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
phy_interface_t iface;
linkmode_zero(phy_support);
@@ -682,7 +687,7 @@ static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phylink_set(phy_support, Asym_Pause);
linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
/* Some modules support 10G modes as well as others we support.
* Mask out non-supported modes so the correct interface is picked.
*/
@@ -730,6 +735,9 @@ static int at803x_parse_dt(struct phy_device *phydev)
if (of_property_read_bool(node, "qca,disable-smarteee"))
priv->flags |= AT803X_DISABLE_SMARTEEE;
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
if (!tw || tw > 255) {
phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
@@ -999,6 +1007,20 @@ static int at8031_pll_config(struct phy_device *phydev)
AT803X_DEBUG_PLL_ON, 0);
}
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
@@ -1051,6 +1073,10 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
+
/* Ar803x extended next page bit is enabled by default. Cisco
* multigig switches read this bit and attempt to negotiate 10Gbps
* rates even if the next page bit is disabled. This is incorrect
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 287cccf8f7f4..b2c0baa51f39 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -519,7 +519,7 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
}
EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 31fbcdddc9ad..ad71c88c87e7 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -766,6 +766,41 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static int brcm_fet_suspend(struct phy_device *phydev)
+{
+ int reg, err, err2, brcmtest;
+
+ /* We cannot use a read/modify/write here otherwise the PHY continues
+ * to drive LEDs which defeats the purpose of low power mode.
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_PDOWN);
+ if (err < 0)
+ return err;
+
+ /* Enable shadow register access */
+ brcmtest = phy_read(phydev, MII_BRCM_FET_BRCMTEST);
+ if (brcmtest < 0)
+ return brcmtest;
+
+ reg = brcmtest | MII_BRCM_FET_BT_SRE;
+
+ err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg);
+ if (err < 0)
+ return err;
+
+ /* Set standby mode */
+ err = phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4,
+ MII_BRCM_FET_SHDW_AM4_STANDBY,
+ MII_BRCM_FET_SHDW_AM4_STANDBY);
+
+ /* Disable shadow register access */
+ err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest);
+ if (!err)
+ err = err2;
+
+ return err;
+}
+
static int bcm54xx_phy_probe(struct phy_device *phydev)
{
struct bcm54xx_phy_priv *priv;
@@ -1033,6 +1068,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = brcm_fet_config_init,
.config_intr = brcm_fet_config_intr,
.handle_interrupt = brcm_fet_handle_interrupt,
+ .suspend = brcm_fet_suspend,
+ .resume = brcm_fet_config_init,
}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
@@ -1041,6 +1078,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = brcm_fet_config_init,
.config_intr = brcm_fet_config_intr,
.handle_interrupt = brcm_fet_handle_interrupt,
+ .suspend = brcm_fet_suspend,
+ .resume = brcm_fet_config_init,
}, {
.phy_id = PHY_ID_BCM5395,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index f070776ca904..fd9ad4820192 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -478,6 +478,7 @@ static int mv2222_config_init(struct phy_device *phydev)
static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
phy_interface_t sfp_interface;
struct mv2222_data *priv;
@@ -489,7 +490,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
priv = (struct mv2222_data *)phydev->priv;
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a714150f5e8c..2810f4f9da0c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1952,7 +1952,7 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < count; i++) {
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -2845,6 +2845,7 @@ static int marvell_probe(struct phy_device *phydev)
static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
phy_interface_t interface;
struct device *dev;
@@ -2856,7 +2857,7 @@ static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, supported);
+ sfp_parse_support(phydev->sfp_bus, id, supported, interfaces);
interface = sfp_select_interface(phydev->sfp_bus, supported);
dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 2b7d0720720b..383a9c9f36e5 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -96,6 +96,11 @@ enum {
MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
+ /* SerDes reinitialization 88E21X0 */
+ MV_AN_21X0_SERDES_CTRL2 = 0x800f,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS = BIT(13),
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT = BIT(15),
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@@ -117,16 +122,16 @@ enum {
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5,
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7,
- MV_V2_PORT_INTR_STS = 0xf040,
- MV_V2_PORT_INTR_MASK = 0xf043,
- MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
- MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
- MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
- MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
+ MV_V2_PORT_INTR_STS = 0xf040,
+ MV_V2_PORT_INTR_MASK = 0xf043,
+ MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
+ MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
+ MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
+ MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
/* Wake on LAN registers */
- MV_V2_WOL_CTRL = 0xf06e,
- MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
- MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
+ MV_V2_WOL_CTRL = 0xf06e,
+ MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
/* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
@@ -140,6 +145,8 @@ struct mv3310_chip {
bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
+ int (*set_mactype)(struct phy_device *phydev, int mactype);
+ int (*select_mactype)(unsigned long *interfaces);
int (*init_interface)(struct phy_device *phydev, int mactype);
#ifdef CONFIG_HWMON
@@ -466,9 +473,10 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
phy_interface_t iface;
- sfp_parse_support(phydev->sfp_bus, id, support);
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
iface = sfp_select_interface(phydev->sfp_bus, support);
if (iface != PHY_INTERFACE_MODE_10GBASER) {
@@ -593,6 +601,49 @@ static int mv2110_get_mactype(struct phy_device *phydev)
return mactype & MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
}
+static int mv2110_set_mactype(struct phy_device *phydev, int mactype)
+{
+ int err, val;
+
+ mactype &= MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
+ err = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_21X0_PORT_CTRL,
+ MV_PMA_21X0_PORT_CTRL_SWRST |
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK,
+ MV_PMA_21X0_PORT_CTRL_SWRST | mactype);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS |
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT);
+ if (err)
+ return err;
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_AN,
+ MV_AN_21X0_SERDES_CTRL2, val,
+ !(val &
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT),
+ 5000, 100000, true);
+ if (err)
+ return err;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS);
+}
+
+static int mv2110_select_mactype(unsigned long *interfaces)
+{
+ if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ !test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH;
+ else
+ return -1;
+}
+
static int mv3310_get_mactype(struct phy_device *phydev)
{
int mactype;
@@ -604,6 +655,46 @@ static int mv3310_get_mactype(struct phy_device *phydev)
return mactype & MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
}
+static int mv3310_set_mactype(struct phy_device *phydev, int mactype)
+{
+ int ret;
+
+ mactype &= MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_MASK,
+ mactype);
+ if (ret <= 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_33X0_PORT_CTRL_SWRST);
+}
+
+static int mv3310_select_mactype(unsigned long *interfaces)
+{
+ if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_XAUI, interfaces))
+ return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_XAUI, interfaces))
+ return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER;
+ else
+ return -1;
+}
+
static int mv2110_init_interface(struct phy_device *phydev, int mactype)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
@@ -687,6 +778,20 @@ static int mv3310_config_init(struct phy_device *phydev)
if (err)
return err;
+ /* If host provided host supported interface modes, try to select the
+ * best one
+ */
+ if (!phy_interface_empty(phydev->host_interfaces)) {
+ mactype = chip->select_mactype(phydev->host_interfaces);
+ if (mactype >= 0) {
+ phydev_info(phydev, "Changing MACTYPE to %i\n",
+ mactype);
+ err = chip->set_mactype(phydev, mactype);
+ if (err)
+ return err;
+ }
+ }
+
mactype = chip->get_mactype(phydev);
if (mactype < 0)
return mactype;
@@ -1049,6 +1154,8 @@ static const struct mv3310_chip mv3310_type = {
.has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3310_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
+ .set_mactype = mv3310_set_mactype,
+ .select_mactype = mv3310_select_mactype,
.init_interface = mv3310_init_interface,
#ifdef CONFIG_HWMON
@@ -1060,6 +1167,8 @@ static const struct mv3310_chip mv3340_type = {
.has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3340_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
+ .set_mactype = mv3310_set_mactype,
+ .select_mactype = mv3310_select_mactype,
.init_interface = mv3340_init_interface,
#ifdef CONFIG_HWMON
@@ -1070,6 +1179,8 @@ static const struct mv3310_chip mv3340_type = {
static const struct mv3310_chip mv2110_type = {
.init_supported_interfaces = mv2110_init_supported_interfaces,
.get_mactype = mv2110_get_mactype,
+ .set_mactype = mv2110_set_mactype,
+ .select_mactype = mv2110_select_mactype,
.init_interface = mv2110_init_interface,
#ifdef CONFIG_HWMON
@@ -1080,6 +1191,8 @@ static const struct mv3310_chip mv2110_type = {
static const struct mv3310_chip mv2111_type = {
.init_supported_interfaces = mv2111_init_supported_interfaces,
.get_mactype = mv2110_get_mactype,
+ .set_mactype = mv2110_set_mactype,
+ .select_mactype = mv2110_select_mactype,
.init_interface = mv2110_init_interface,
#ifdef CONFIG_HWMON
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8a2dbe849866..f82090bdf7ab 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -232,7 +232,7 @@ static ssize_t mdio_bus_stat_field_show(struct device *dev,
val = mdio_bus_get_stat(&bus->stats[sattr->addr],
sattr->field_offset);
- return sprintf(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
@@ -251,7 +251,7 @@ static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset);
- return sprintf(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
#define MDIO_BUS_STATS_ATTR_DECL(field, file) \
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 73f7962a37d3..c49062ad72c6 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
irq_status == INTSRC_ENERGY_DETECT)
return IRQ_HANDLED;
- /* Give PHY some time before MAC starts sending data. This works
- * around an issue where network doesn't come up properly.
- */
- if (!(irq_status & INTSRC_LINK_DOWN))
- phy_queue_state_machine(phydev, msecs_to_jiffies(100));
- else
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e78d0bf69bc3..3757e069c486 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -92,6 +92,15 @@
#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0)
#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+#define KSZPHY_WIRE_PAIR_MASK 0x3
+
+#define LAN8814_CABLE_DIAG 0x12
+#define LAN8814_CABLE_DIAG_STAT_MASK GENMASK(9, 8)
+#define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0)
+#define LAN8814_PAIR_BIT_SHIFT 12
+
+#define LAN8814_WIRE_PAIR_MASK 0xF
+
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
@@ -257,6 +266,8 @@ static struct kszphy_hw_stat kszphy_hw_stats[] = {
struct kszphy_type {
u32 led_mode_reg;
u16 interrupt_level_mask;
+ u16 cable_diag_reg;
+ unsigned long pair_mask;
bool has_broadcast_disable;
bool has_nand_tree_disable;
bool has_rmii_ref_clk_sel;
@@ -313,6 +324,13 @@ struct kszphy_priv {
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
+ .cable_diag_reg = LAN8814_CABLE_DIAG,
+ .pair_mask = LAN8814_WIRE_PAIR_MASK,
+};
+
+static const struct kszphy_type ksz886x_type = {
+ .cable_diag_reg = KSZ8081_LMD,
+ .pair_mask = KSZPHY_WIRE_PAIR_MASK,
};
static const struct kszphy_type ksz8021_type = {
@@ -1650,7 +1668,7 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -1796,6 +1814,17 @@ static int kszphy_probe(struct phy_device *phydev)
return 0;
}
+static int lan8814_cable_test_start(struct phy_device *phydev)
+{
+ /* If autoneg is enabled, we won't be able to test cross pair
+ * short. In this case, the PHY will "detect" a link and
+ * confuse the internal state machine - disable auto neg here.
+ * Set the speed to 1000mbit and full duplex.
+ */
+ return phy_modify(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+}
+
static int ksz886x_cable_test_start(struct phy_device *phydev)
{
if (phydev->dev_flags & MICREL_KSZ8_P1_ERRATA)
@@ -1809,9 +1838,9 @@ static int ksz886x_cable_test_start(struct phy_device *phydev)
return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
}
-static int ksz886x_cable_test_result_trans(u16 status)
+static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
{
- switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_NORMAL:
return ETHTOOL_A_CABLE_RESULT_CODE_OK;
case KSZ8081_LMD_STAT_SHORT:
@@ -1825,15 +1854,15 @@ static int ksz886x_cable_test_result_trans(u16 status)
}
}
-static bool ksz886x_cable_test_failed(u16 status)
+static bool ksz886x_cable_test_failed(u16 status, u16 mask)
{
- return FIELD_GET(KSZ8081_LMD_STAT_MASK, status) ==
+ return FIELD_GET(mask, status) ==
KSZ8081_LMD_STAT_FAIL;
}
-static bool ksz886x_cable_test_fault_length_valid(u16 status)
+static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
{
- switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_OPEN:
fallthrough;
case KSZ8081_LMD_STAT_SHORT:
@@ -1842,29 +1871,79 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status)
return false;
}
-static int ksz886x_cable_test_fault_length(u16 status)
+static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask)
{
int dt;
/* According to the data sheet the distance to the fault is
- * DELTA_TIME * 0.4 meters.
+ * DELTA_TIME * 0.4 meters for ksz phys.
+ * (DELTA_TIME - 22) * 0.8 for lan8814 phy.
*/
- dt = FIELD_GET(KSZ8081_LMD_DELTA_TIME_MASK, status);
+ dt = FIELD_GET(data_mask, status);
- return (dt * 400) / 10;
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_LAN8814)
+ return ((dt - 22) * 800) / 10;
+ else
+ return (dt * 400) / 10;
}
static int ksz886x_cable_test_wait_for_completion(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
int val, ret;
- ret = phy_read_poll_timeout(phydev, KSZ8081_LMD, val,
+ ret = phy_read_poll_timeout(phydev, type->cable_diag_reg, val,
!(val & KSZ8081_LMD_ENABLE_TEST),
30000, 100000, true);
return ret < 0 ? ret : 0;
}
+static int lan8814_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = { ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ u32 fault_length;
+ int ret;
+ int val;
+
+ val = KSZ8081_LMD_ENABLE_TEST;
+ val = val | (pair << LAN8814_PAIR_BIT_SHIFT);
+
+ ret = phy_write(phydev, LAN8814_CABLE_DIAG, val);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz886x_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, LAN8814_CABLE_DIAG);
+ if (val < 0)
+ return val;
+
+ if (ksz886x_cable_test_failed(val, LAN8814_CABLE_DIAG_STAT_MASK))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_result_trans(val,
+ LAN8814_CABLE_DIAG_STAT_MASK
+ ));
+ if (ret)
+ return ret;
+
+ if (!ksz886x_cable_test_fault_length_valid(val, LAN8814_CABLE_DIAG_STAT_MASK))
+ return 0;
+
+ fault_length = ksz886x_cable_test_fault_length(phydev, val,
+ LAN8814_CABLE_DIAG_VCT_DATA_MASK);
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length);
+}
+
static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
{
static const int ethtool_pair[] = {
@@ -1872,6 +1951,7 @@ static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
ETHTOOL_A_CABLE_PAIR_B,
};
int ret, val, mdix;
+ u32 fault_length;
/* There is no way to choice the pair, like we do one ksz9031.
* We can workaround this limitation by using the MDI-X functionality.
@@ -1910,25 +1990,27 @@ static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
if (val < 0)
return val;
- if (ksz886x_cable_test_failed(val))
+ if (ksz886x_cable_test_failed(val, KSZ8081_LMD_STAT_MASK))
return -EAGAIN;
ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
- ksz886x_cable_test_result_trans(val));
+ ksz886x_cable_test_result_trans(val, KSZ8081_LMD_STAT_MASK));
if (ret)
return ret;
- if (!ksz886x_cable_test_fault_length_valid(val))
+ if (!ksz886x_cable_test_fault_length_valid(val, KSZ8081_LMD_STAT_MASK))
return 0;
- return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- ksz886x_cable_test_fault_length(val));
+ fault_length = ksz886x_cable_test_fault_length(phydev, val, KSZ8081_LMD_DELTA_TIME_MASK);
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length);
}
static int ksz886x_cable_test_get_status(struct phy_device *phydev,
bool *finished)
{
- unsigned long pair_mask = 0x3;
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ unsigned long pair_mask = type->pair_mask;
int retries = 20;
int pair, ret;
@@ -1937,7 +2019,10 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
/* Try harder if link partner is active */
while (pair_mask && retries--) {
for_each_set_bit(pair, &pair_mask, 4) {
- ret = ksz886x_cable_test_one_pair(phydev, pair);
+ if (type->cable_diag_reg == LAN8814_CABLE_DIAG)
+ ret = lan8814_cable_test_one_pair(phydev, pair);
+ else
+ ret = ksz886x_cable_test_one_pair(phydev, pair);
if (ret == -EAGAIN)
continue;
if (ret < 0)
@@ -2676,19 +2761,82 @@ static int lan8804_config_init(struct phy_device *phydev)
return 0;
}
+static irqreturn_t lan8804_handle_interrupt(struct phy_device *phydev)
+{
+ int status;
+
+ status = phy_read(phydev, LAN8814_INTS);
+ if (status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (status > 0)
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+#define LAN8804_OUTPUT_CONTROL 25
+#define LAN8804_OUTPUT_CONTROL_INTR_BUFFER BIT(14)
+#define LAN8804_CONTROL 31
+#define LAN8804_CONTROL_INTR_POLARITY BIT(14)
+
+static int lan8804_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ /* This is an internal PHY of lan966x and is not possible to change the
+ * polarity on the GIC found in lan966x, therefore change the polarity
+ * of the interrupt in the PHY from being active low instead of active
+ * high.
+ */
+ phy_write(phydev, LAN8804_CONTROL, LAN8804_CONTROL_INTR_POLARITY);
+
+ /* By default interrupt buffer is open-drain in which case the interrupt
+ * can be active only low. Therefore change the interrupt buffer to be
+ * push-pull to be able to change interrupt polarity
+ */
+ phy_write(phydev, LAN8804_OUTPUT_CONTROL,
+ LAN8804_OUTPUT_CONTROL_INTR_BUFFER);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ if (err)
+ return err;
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
int irq_status, tsu_irq_status;
+ int ret = IRQ_NONE;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
- phy_trigger_machine(phydev);
-
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
while (1) {
tsu_irq_status = lanphy_read_page_reg(phydev, 4,
LAN8814_INTR_STS_REG);
@@ -2697,12 +2845,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
(tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
LAN8814_INTR_STS_REG_1588_TSU1_ |
LAN8814_INTR_STS_REG_1588_TSU2_ |
- LAN8814_INTR_STS_REG_1588_TSU3_)))
+ LAN8814_INTR_STS_REG_1588_TSU3_))) {
lan8814_handle_ptp_interrupt(phydev);
- else
+ ret = IRQ_HANDLED;
+ } else {
break;
+ }
}
- return IRQ_HANDLED;
+
+ return ret;
}
static int lan8814_ack_interrupt(struct phy_device *phydev)
@@ -2729,9 +2880,9 @@ static int lan8814_config_intr(struct phy_device *phydev)
if (err)
return err;
- err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
} else {
- err = phy_write(phydev, LAN8814_INTC, 0);
+ err = phy_write(phydev, LAN8814_INTC, 0);
if (err)
return err;
@@ -2873,12 +3024,18 @@ static int lan8814_config_init(struct phy_device *phydev)
return 0;
}
+/* It is expected that there will not be any 'lan8814_take_coma_mode'
+ * function called in suspend. Because the GPIO line can be shared, so if one of
+ * the phys goes back in coma mode, then all the other PHYs will go, which is
+ * wrong.
+ */
static int lan8814_release_coma_mode(struct phy_device *phydev)
{
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
+ GPIOD_OUT_HIGH_OPEN_DRAIN |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
@@ -3105,6 +3262,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = lan8814_config_init,
.driver_data = &lan8814_type,
.probe = lan8814_probe,
@@ -3117,6 +3275,8 @@ static struct phy_driver ksphy_driver[] = {
.resume = kszphy_resume,
.config_intr = lan8814_config_intr,
.handle_interrupt = lan8814_handle_interrupt,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8804,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -3131,6 +3291,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = kszphy_resume,
+ .config_intr = lan8804_config_intr,
+ .handle_interrupt = lan8804_handle_interrupt,
}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -3163,6 +3325,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
+ .driver_data = &ksz886x_type,
/* PHY_BASIC_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.config_init = kszphy_config_init,
@@ -3185,6 +3348,8 @@ static struct phy_driver ksphy_driver[] = {
.name = "Microchip KSZ9477",
/* PHY_GBIT_FEATURES */
.config_init = kszphy_config_init,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index d4c93d59bc53..8569a545e0a3 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -28,12 +28,16 @@
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
/* Interrupt Mask Register */
#define LAN87XX_INTERRUPT_MASK (0x19)
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+#define LAN87XX_INTERRUPT_MASK_2 (0x09)
+#define LAN87XX_MASK_COMM_RDY BIT(10)
+
/* MISC Control 1 Register */
#define LAN87XX_CTRL_1 (0x11)
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
@@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
int rc, val = 0;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
+ /* clear all interrupt */
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (rc < 0)
+ return rc;
+
+ /* enable link down and comm ready interrupt */
+ val = LAN87XX_MASK_LINK_DOWN;
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ val = LAN87XX_MASK_COMM_RDY;
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
} else {
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
- if (rc)
+ if (rc < 0)
return rc;
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
}
return rc < 0 ? rc : 0;
@@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
if (irq_status < 0) {
phy_error(phydev);
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index b7b2521c73fb..ee5b17edca39 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -706,14 +706,6 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
struct phy_device *phydev = ctx->phydev;
struct vsc8531_private *priv = phydev->priv;
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->rx_sa = ctx->sa.rx_sa;
@@ -730,24 +722,13 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
struct macsec_flow *flow, bool update)
{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->tx_sa = ctx->sa.tx_sa;
/* Always match untagged packets on egress */
flow->match.untagged = 1;
- return vsc8584_macsec_add_flow(phydev, flow, update);
+ return vsc8584_macsec_add_flow(ctx->phydev, flow, update);
}
static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
@@ -755,10 +736,6 @@ static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_enable(ctx->phydev, flow);
@@ -770,10 +747,6 @@ static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_disable(ctx->phydev, flow);
@@ -785,12 +758,8 @@ static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_secy *secy = ctx->secy;
- if (ctx->prepare) {
- if (priv->secy)
- return -EEXIST;
-
- return 0;
- }
+ if (priv->secy)
+ return -EEXIST;
priv->secy = secy;
@@ -807,10 +776,6 @@ static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_del_flow(ctx->phydev, flow);
@@ -823,10 +788,6 @@ static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
{
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
vsc8584_macsec_del_secy(ctx);
return vsc8584_macsec_add_secy(ctx);
}
@@ -847,10 +808,6 @@ static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
if (flow->bank == MACSEC_INGR && flow->rx_sa &&
flow->rx_sa->sc->sci == ctx->rx_sc->sci)
@@ -862,33 +819,40 @@ static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_rxsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
@@ -899,11 +863,8 @@ static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
@@ -911,33 +872,40 @@ static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_txsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_txsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
@@ -948,11 +916,8 @@ static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 7e3017e7a1c0..8a13b1ad9a33 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -136,7 +136,7 @@ static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
return;
for (i = 0; i < priv->nstats; i++)
- strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
+ strscpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
ETH_GSTRING_LEN);
}
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 2a8195c50d14..ec91e671f8aa 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -10,6 +10,7 @@
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/hwmon.h>
#include <linux/bitfield.h>
@@ -34,6 +35,11 @@
#define MII_CFG1 18
#define MII_CFG1_MASTER_SLAVE BIT(15)
#define MII_CFG1_AUTO_OP BIT(14)
+#define MII_CFG1_INTERFACE_MODE_MASK GENMASK(9, 8)
+#define MII_CFG1_MII_MODE (0x0 << 8)
+#define MII_CFG1_RMII_MODE_REFCLK_IN BIT(8)
+#define MII_CFG1_RMII_MODE_REFCLK_OUT BIT(9)
+#define MII_CFG1_REVMII_MODE GENMASK(9, 8)
#define MII_CFG1_SLEEP_CONFIRM BIT(6)
#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
#define MII_CFG1_LED_MODE_LINKUP 0
@@ -72,11 +78,15 @@
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
+/* Configure REF_CLK as input in RMII mode */
+#define TJA110X_RMII_MODE_REFCLK_IN BIT(0)
+
struct tja11xx_priv {
char *hwmon_name;
struct device *hwmon_dev;
struct phy_device *phydev;
struct work_struct phy_register_work;
+ u32 flags;
};
struct tja11xx_phy_stats {
@@ -251,8 +261,34 @@ do_test:
return __genphy_config_aneg(phydev, changed);
}
+static int tja11xx_get_interface_mode(struct phy_device *phydev)
+{
+ struct tja11xx_priv *priv = phydev->priv;
+ int mii_mode;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ mii_mode = MII_CFG1_MII_MODE;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ mii_mode = MII_CFG1_REVMII_MODE;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (priv->flags & TJA110X_RMII_MODE_REFCLK_IN)
+ mii_mode = MII_CFG1_RMII_MODE_REFCLK_IN;
+ else
+ mii_mode = MII_CFG1_RMII_MODE_REFCLK_OUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mii_mode;
+}
+
static int tja11xx_config_init(struct phy_device *phydev)
{
+ u16 reg_mask, reg_val;
int ret;
ret = tja11xx_enable_reg_write(phydev);
@@ -265,15 +301,32 @@ static int tja11xx_config_init(struct phy_device *phydev)
switch (phydev->phy_id & PHY_ID_MASK) {
case PHY_ID_TJA1100:
- ret = phy_modify(phydev, MII_CFG1,
- MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
- MII_CFG1_LED_ENABLE,
- MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
- MII_CFG1_LED_ENABLE);
+ reg_mask = MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
+ MII_CFG1_LED_ENABLE;
+ reg_val = MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
+ MII_CFG1_LED_ENABLE;
+
+ reg_mask |= MII_CFG1_INTERFACE_MODE_MASK;
+ ret = tja11xx_get_interface_mode(phydev);
+ if (ret < 0)
+ return ret;
+
+ reg_val |= (ret & 0xffff);
+ ret = phy_modify(phydev, MII_CFG1, reg_mask, reg_val);
if (ret)
return ret;
break;
case PHY_ID_TJA1101:
+ reg_mask = MII_CFG1_INTERFACE_MODE_MASK;
+ ret = tja11xx_get_interface_mode(phydev);
+ if (ret < 0)
+ return ret;
+
+ reg_val = ret & 0xffff;
+ ret = phy_modify(phydev, MII_CFG1, reg_mask, reg_val);
+ if (ret)
+ return ret;
+ fallthrough;
case PHY_ID_TJA1102:
ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
if (ret)
@@ -458,16 +511,36 @@ static int tja11xx_hwmon_register(struct phy_device *phydev,
return PTR_ERR_OR_ZERO(priv->hwmon_dev);
}
+static int tja11xx_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct tja11xx_priv *priv = phydev->priv;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "nxp,rmii-refclk-in"))
+ priv->flags |= TJA110X_RMII_MODE_REFCLK_IN;
+
+ return 0;
+}
+
static int tja11xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct tja11xx_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->phydev = phydev;
+ phydev->priv = priv;
+
+ ret = tja11xx_parse_dt(phydev);
+ if (ret)
+ return ret;
return tja11xx_hwmon_register(phydev, priv);
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 1f2531a1a876..2c8bf438ea61 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -74,6 +74,80 @@ const char *phy_duplex_to_str(unsigned int duplex)
}
EXPORT_SYMBOL_GPL(phy_duplex_to_str);
+/**
+ * phy_rate_matching_to_str - Return a string describing the rate matching
+ *
+ * @rate_matching: Type of rate matching to describe
+ */
+const char *phy_rate_matching_to_str(int rate_matching)
+{
+ switch (rate_matching) {
+ case RATE_MATCH_NONE:
+ return "none";
+ case RATE_MATCH_PAUSE:
+ return "pause";
+ case RATE_MATCH_CRS:
+ return "crs";
+ case RATE_MATCH_OPEN_LOOP:
+ return "open-loop";
+ }
+ return "Unsupported (update phy-core.c)";
+}
+EXPORT_SYMBOL_GPL(phy_rate_matching_to_str);
+
+/**
+ * phy_interface_num_ports - Return the number of links that can be carried by
+ * a given MAC-PHY physical link. Returns 0 if this is
+ * unknown, the number of links else.
+ *
+ * @interface: The interface mode we want to get the number of ports
+ */
+int phy_interface_num_ports(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XLGMII:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return 1;
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return 4;
+ case PHY_INTERFACE_MODE_MAX:
+ WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode");
+ return 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_interface_num_ports);
+
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed.
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8d3ee3a6495b..e741d8aebffe 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -115,6 +115,33 @@ void phy_print_status(struct phy_device *phydev)
EXPORT_SYMBOL(phy_print_status);
/**
+ * phy_get_rate_matching - determine if rate matching is supported
+ * @phydev: The phy device to return rate matching for
+ * @iface: The interface mode to use
+ *
+ * This determines the type of rate matching (if any) that @phy supports
+ * using @iface. @iface may be %PHY_INTERFACE_MODE_NA to determine if any
+ * interface supports rate matching.
+ *
+ * Return: The type of rate matching @phy supports for @iface, or
+ * %RATE_MATCH_NONE.
+ */
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int ret = RATE_MATCH_NONE;
+
+ if (phydev->drv->get_rate_matching) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_rate_matching(phydev, iface);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_get_rate_matching);
+
+/**
* phy_config_interrupt - configure the PHY device for the requested interrupts
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
@@ -256,6 +283,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.duplex = phydev->duplex;
cmd->base.master_slave_cfg = phydev->master_slave_get;
cmd->base.master_slave_state = phydev->master_slave_state;
+ cmd->base.rate_matching = phydev->rate_matching;
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0c6efd792690..57849ac0384e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -316,11 +317,13 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
- /* If we managed to get here with the PHY state machine in a state other
- * than PHY_HALTED this is an indication that something went wrong and
- * we should most likely be using MAC managed PM and we are not.
+ /* If we managed to get here with the PHY state machine in a state
+ * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication
+ * that something went wrong and we should most likely be using
+ * MAC managed PM, but we are not.
*/
- WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY &&
+ phydev->state != PHY_UP);
ret = phy_init_hw(phydev);
if (ret < 0)
@@ -370,7 +373,7 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
if (!fixup)
return -ENOMEM;
- strlcpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
+ strscpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
fixup->phy_uid = phy_uid;
fixup->phy_uid_mask = phy_uid_mask;
fixup->run = run;
@@ -520,7 +523,7 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
+ return sysfs_emit(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
}
static DEVICE_ATTR_RO(phy_id);
@@ -535,7 +538,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
else
mode = phy_modes(phydev->interface);
- return sprintf(buf, "%s\n", mode);
+ return sysfs_emit(buf, "%s\n", mode);
}
static DEVICE_ATTR_RO(phy_interface);
@@ -545,7 +548,7 @@ phy_has_fixups_show(struct device *dev, struct device_attribute *attr,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "%d\n", phydev->has_fixups);
+ return sysfs_emit(buf, "%d\n", phydev->has_fixups);
}
static DEVICE_ATTR_RO(phy_has_fixups);
@@ -555,7 +558,7 @@ static ssize_t phy_dev_flags_show(struct device *dev,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "0x%08x\n", phydev->dev_flags);
+ return sysfs_emit(buf, "0x%08x\n", phydev->dev_flags);
}
static DEVICE_ATTR_RO(phy_dev_flags);
@@ -989,6 +992,7 @@ EXPORT_SYMBOL(phy_device_register);
void phy_device_remove(struct phy_device *phydev)
{
unregister_mii_timestamper(phydev->mii_ts);
+ pse_control_put(phydev->psec);
device_del(&phydev->mdio.dev);
@@ -1310,7 +1314,7 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "%d\n", !phydev->attached_dev);
+ return sysfs_emit(buf, "%d\n", !phydev->attached_dev);
}
static DEVICE_ATTR_RO(phy_standalone);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9bd69328dc4d..75464df191ef 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -77,6 +77,7 @@ struct phylink {
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
+ DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
u8 sfp_port;
};
@@ -155,8 +156,84 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
-static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
- unsigned long caps)
+/**
+ * phylink_interface_max_speed() - get the maximum speed of a phy interface
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ *
+ * Determine the maximum speed of a phy interface. This is intended to help
+ * determine the correct speed to pass to the MAC when the phy is performing
+ * rate matching.
+ *
+ * Return: The maximum speed of @interface
+ */
+static int phylink_interface_max_speed(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ return SPEED_100;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ return SPEED_1000;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return SPEED_2500;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ return SPEED_5000;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return SPEED_10000;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ return SPEED_25000;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ return SPEED_40000;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ /* No idea! Garbage in, unknown out */
+ return SPEED_UNKNOWN;
+ }
+
+ /* If we get here, someone forgot to add an interface mode above */
+ WARN_ON_ONCE(1);
+ return SPEED_UNKNOWN;
+}
+
+/**
+ * phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @caps: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that are
+ * supported by the @caps. @linkmodes must have been initialised previously.
+ */
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps)
{
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
@@ -295,21 +372,72 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
__set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
}
}
+EXPORT_SYMBOL_GPL(phylink_caps_to_linkmodes);
+
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF },
+};
/**
- * phylink_get_linkmodes() - get acceptable link modes
- * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * phylink_cap_from_speed_duplex - Get mac capability from speed/duplex
+ * @speed: the speed to search for
+ * @duplex: the duplex to search for
+ *
+ * Find the mac capability for a given speed and duplex.
+ *
+ * Return: A mask with the mac capability patching @speed and @duplex, or 0 if
+ * there were no matches.
+ */
+static unsigned long phylink_cap_from_speed_duplex(int speed,
+ unsigned int duplex)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++) {
+ if (speed == phylink_caps_params[i].speed &&
+ duplex == phylink_caps_params[i].duplex)
+ return phylink_caps_params[i].mask;
+ }
+
+ return 0;
+}
+
+/**
+ * phylink_get_capabilities() - get capabilities for a given MAC
* @interface: phy interface mode defined by &typedef phy_interface_t
* @mac_capabilities: bitmask of MAC capabilities
+ * @rate_matching: type of rate matching being performed
*
- * Set all possible pause, speed and duplex linkmodes in @linkmodes that
- * are supported by the @interface mode and @mac_capabilities. @linkmodes
- * must have been initialised previously.
+ * Get the MAC capabilities that are supported by the @interface mode and
+ * @mac_capabilities.
*/
-void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
- unsigned long mac_capabilities)
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching)
{
+ int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ unsigned long matched_caps = 0;
switch (interface) {
case PHY_INTERFACE_MODE_USXGMII:
@@ -321,6 +449,7 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_GMII:
caps |= MAC_1000HD | MAC_1000FD;
@@ -344,6 +473,7 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
case PHY_INTERFACE_MODE_1000BASEX:
caps |= MAC_1000HD;
fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEKX:
case PHY_INTERFACE_MODE_TRGMII:
caps |= MAC_1000FD;
break;
@@ -381,9 +511,55 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
break;
}
- phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+ switch (rate_matching) {
+ case RATE_MATCH_OPEN_LOOP:
+ /* TODO */
+ fallthrough;
+ case RATE_MATCH_NONE:
+ matched_caps = 0;
+ break;
+ case RATE_MATCH_PAUSE: {
+ /* The MAC must support asymmetric pause towards the local
+ * device for this. We could allow just symmetric pause, but
+ * then we might have to renegotiate if the link partner
+ * doesn't support pause. This is because there's no way to
+ * accept pause frames without transmitting them if we only
+ * support symmetric pause.
+ */
+ if (!(mac_capabilities & MAC_SYM_PAUSE) ||
+ !(mac_capabilities & MAC_ASYM_PAUSE))
+ break;
+
+ /* We can't adapt if the MAC doesn't support the interface's
+ * max speed at full duplex.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_FULL)) {
+ /* Although a duplex-matching phy might exist, we
+ * conservatively remove these modes because the MAC
+ * will not be aware of the half-duplex nature of the
+ * link.
+ */
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
+ }
+ break;
+ }
+ case RATE_MATCH_CRS:
+ /* The MAC must support half duplex at the interface's max
+ * speed.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_HALF)) {
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= mac_capabilities;
+ }
+ break;
+ }
+
+ return (caps & mac_capabilities) | matched_caps;
}
-EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+EXPORT_SYMBOL_GPL(phylink_get_capabilities);
/**
* phylink_generic_validate() - generic validate() callback implementation
@@ -400,10 +576,14 @@ void phylink_generic_validate(struct phylink_config *config,
struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ unsigned long caps;
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
- phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+ caps = phylink_get_capabilities(state->interface,
+ config->mac_capabilities,
+ state->rate_matching);
+ phylink_caps_to_linkmodes(mask, caps);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
@@ -458,8 +638,9 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
-static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
- struct phylink_link_state *state)
+static int phylink_validate_mask(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state,
+ const unsigned long *interfaces)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, };
@@ -468,7 +649,7 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
int intf;
for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
- if (test_bit(intf, pl->config->supported_interfaces)) {
+ if (test_bit(intf, interfaces)) {
linkmode_copy(s, supported);
t = *state;
@@ -489,12 +670,14 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- if (!phy_interface_empty(pl->config->supported_interfaces)) {
+ const unsigned long *interfaces = pl->config->supported_interfaces;
+
+ if (!phy_interface_empty(interfaces)) {
if (state->interface == PHY_INTERFACE_MODE_NA)
- return phylink_validate_any(pl, supported, state);
+ return phylink_validate_mask(pl, supported, state,
+ interfaces);
- if (!test_bit(state->interface,
- pl->config->supported_interfaces))
+ if (!test_bit(state->interface, interfaces))
return -EINVAL;
}
@@ -632,6 +815,12 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RTBI:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -774,11 +963,12 @@ static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
phylink_dbg(pl,
- "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+ "%s: mode=%s/%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
__func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
+ phy_rate_matching_to_str(state->rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
@@ -915,7 +1105,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- if (state->an_enabled) {
+ state->rate_matching = pl->link_config.rate_matching;
+ if (state->an_enabled) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -998,19 +1189,43 @@ static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
+ int speed, duplex;
+ bool rx_pause;
+
+ speed = link_state.speed;
+ duplex = link_state.duplex;
+ rx_pause = !!(link_state.pause & MLO_PAUSE_RX);
+
+ switch (link_state.rate_matching) {
+ case RATE_MATCH_PAUSE:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will send
+ * pause frames to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_FULL;
+ rx_pause = true;
+ break;
+
+ case RATE_MATCH_CRS:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will cause
+ * collisions to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_HALF;
+ break;
+ }
pl->cur_interface = link_state.interface;
if (pl->pcs && pl->pcs->ops->pcs_link_up)
pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
- pl->cur_interface,
- link_state.speed, link_state.duplex);
+ pl->cur_interface, speed, duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev,
- pl->cur_link_an_mode, pl->cur_interface,
- link_state.speed, link_state.duplex,
- !!(link_state.pause & MLO_PAUSE_TX),
- !!(link_state.pause & MLO_PAUSE_RX));
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->cur_interface, speed, duplex,
+ !!(link_state.pause & MLO_PAUSE_TX), rx_pause);
if (ndev)
netif_carrier_on(ndev);
@@ -1102,6 +1317,17 @@ static void phylink_resolve(struct work_struct *w)
}
link_state.interface = pl->phy_state.interface;
+ /* If we are doing rate matching, then the
+ * link speed/duplex comes from the PHY
+ */
+ if (pl->phy_state.rate_matching) {
+ link_state.rate_matching =
+ pl->phy_state.rate_matching;
+ link_state.speed = pl->phy_state.speed;
+ link_state.duplex =
+ pl->phy_state.duplex;
+ }
+
/* If we have a PHY, we need to update with
* the PHY flow control bits.
*/
@@ -1336,6 +1562,7 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
mutex_lock(&pl->state_mutex);
pl->phy_state.speed = phydev->speed;
pl->phy_state.duplex = phydev->duplex;
+ pl->phy_state.rate_matching = phydev->rate_matching;
pl->phy_state.pause = MLO_PAUSE_NONE;
if (tx_pause)
pl->phy_state.pause |= MLO_PAUSE_TX;
@@ -1347,10 +1574,11 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
+ phy_rate_matching_to_str(phydev->rate_matching),
phylink_pause_to_str(pl->phy_state.pause));
}
@@ -1387,6 +1615,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
config.interface = PHY_INTERFACE_MODE_NA;
else
config.interface = interface;
+ config.rate_matching = phy_get_rate_matching(phy, config.interface);
ret = phylink_validate(pl, supported, &config);
if (ret) {
@@ -1414,6 +1643,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
pl->phy_state.pause = MLO_PAUSE_NONE;
pl->phy_state.speed = SPEED_UNKNOWN;
pl->phy_state.duplex = DUPLEX_UNKNOWN;
+ pl->phy_state.rate_matching = RATE_MATCH_NONE;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -1439,7 +1669,7 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
{
if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(interface))))
+ phy_interface_mode_is_8023z(interface) && !pl->sfp_bus)))
return -EINVAL;
if (pl->phydev)
@@ -1856,8 +2086,10 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
{
phylink_merge_link_mode(kset->link_modes.advertising, state->advertising);
linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising);
- kset->base.speed = state->speed;
- kset->base.duplex = state->duplex;
+ if (kset->base.rate_matching == RATE_MATCH_NONE) {
+ kset->base.speed = state->speed;
+ kset->base.duplex = state->duplex;
+ }
kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE :
AUTONEG_DISABLE;
}
@@ -2571,21 +2803,85 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static int phylink_sfp_config(struct phylink *pl, u8 mode,
- const unsigned long *supported,
- const unsigned long *advertising)
+static const phy_interface_t phylink_sfp_interface_preference[] = {
+ PHY_INTERFACE_MODE_25GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_5GBASER,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_100BASEX,
+};
+
+static DECLARE_PHY_INTERFACE_MASK(phylink_sfp_interfaces);
+
+static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
+ const unsigned long *intf)
+{
+ phy_interface_t interface;
+ size_t i;
+
+ interface = PHY_INTERFACE_MODE_NA;
+ for (i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); i++)
+ if (test_bit(phylink_sfp_interface_preference[i], intf)) {
+ interface = phylink_sfp_interface_preference[i];
+ break;
+ }
+
+ return interface;
+}
+
+static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ bool changed = false;
+
+ phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
+ phylink_an_mode_str(mode), phy_modes(state->interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+
+ if (!linkmode_equal(pl->supported, supported)) {
+ linkmode_copy(pl->supported, supported);
+ changed = true;
+ }
+
+ if (!linkmode_equal(pl->link_config.advertising, state->advertising)) {
+ linkmode_copy(pl->link_config.advertising, state->advertising);
+ changed = true;
+ }
+
+ if (pl->cur_link_an_mode != mode ||
+ pl->link_config.interface != state->interface) {
+ pl->cur_link_an_mode = mode;
+ pl->link_config.interface = state->interface;
+
+ changed = true;
+
+ phylink_info(pl, "switched to %s/%s link mode\n",
+ phylink_an_mode_str(mode),
+ phy_modes(state->interface));
+ }
+
+ if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state))
+ phylink_mac_initial_config(pl, false);
+}
+
+static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
+ struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
phy_interface_t iface;
- bool changed;
int ret;
- linkmode_copy(support, supported);
+ linkmode_copy(support, phy->supported);
memset(&config, 0, sizeof(config));
- linkmode_copy(config.advertising, advertising);
+ linkmode_copy(config.advertising, phy->advertising);
config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
@@ -2622,60 +2918,100 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
return ret;
}
- phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ pl->link_port = pl->sfp_port;
+
+ phylink_sfp_set_config(pl, mode, support, &config);
+
+ return 0;
+}
+
+static int phylink_sfp_config_optical(struct phylink *pl)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phylink_link_state config;
+ phy_interface_t interface;
+ int ret;
+
+ phylink_dbg(pl, "optical SFP: interfaces=[mac=%*pbl, sfp=%*pbl]\n",
+ (int)PHY_INTERFACE_MODE_MAX,
+ pl->config->supported_interfaces,
+ (int)PHY_INTERFACE_MODE_MAX,
+ pl->sfp_interfaces);
- if (phy_interface_mode_is_8023z(iface) && pl->phydev)
+ /* Find the union of the supported interfaces by the PCS/MAC and
+ * the SFP module.
+ */
+ phy_interface_and(interfaces, pl->config->supported_interfaces,
+ pl->sfp_interfaces);
+ if (phy_interface_empty(interfaces)) {
+ phylink_err(pl, "unsupported SFP module: no common interface modes\n");
return -EINVAL;
+ }
- changed = !linkmode_equal(pl->supported, support) ||
- !linkmode_equal(pl->link_config.advertising,
- config.advertising);
- if (changed) {
- linkmode_copy(pl->supported, support);
- linkmode_copy(pl->link_config.advertising, config.advertising);
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(support, pl->sfp_support);
+ linkmode_copy(config.advertising, pl->sfp_support);
+ config.speed = SPEED_UNKNOWN;
+ config.duplex = DUPLEX_UNKNOWN;
+ config.pause = MLO_PAUSE_AN;
+ config.an_enabled = true;
+
+ /* For all the interfaces that are supported, reduce the sfp_support
+ * mask to only those link modes that can be supported.
+ */
+ ret = phylink_validate_mask(pl, pl->sfp_support, &config, interfaces);
+ if (ret) {
+ phylink_err(pl, "unsupported SFP module: validation with support %*pb failed\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ return ret;
}
- if (pl->cur_link_an_mode != mode ||
- pl->link_config.interface != config.interface) {
- pl->link_config.interface = config.interface;
- pl->cur_link_an_mode = mode;
+ interface = phylink_choose_sfp_interface(pl, interfaces);
+ if (interface == PHY_INTERFACE_MODE_NA) {
+ phylink_err(pl, "failed to select SFP interface\n");
+ return -EINVAL;
+ }
- changed = true;
+ phylink_dbg(pl, "optical SFP: chosen %s interface\n",
+ phy_modes(interface));
- phylink_info(pl, "switched to %s/%s link mode\n",
- phylink_an_mode_str(mode),
- phy_modes(config.interface));
+ config.interface = interface;
+
+ /* Ignore errors if we're expecting a PHY to attach later */
+ ret = phylink_validate(pl, support, &config);
+ if (ret) {
+ phylink_err(pl, "validation with support %*pb failed: %pe\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
+ return ret;
}
pl->link_port = pl->sfp_port;
- if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
- &pl->phylink_disable_state))
- phylink_mac_initial_config(pl, false);
+ phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
- return ret;
+ return 0;
}
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
struct phylink *pl = upstream;
- unsigned long *support = pl->sfp_support;
ASSERT_RTNL();
- linkmode_zero(support);
- sfp_parse_support(pl->sfp_bus, id, support);
- pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+ linkmode_zero(pl->sfp_support);
+ phy_interface_zero(pl->sfp_interfaces);
+ sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces);
+ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support);
/* If this module may have a PHY connecting later, defer until later */
pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
if (pl->sfp_may_have_phy)
return 0;
- return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+ return phylink_sfp_config_optical(pl);
}
static int phylink_sfp_module_start(void *upstream)
@@ -2694,8 +3030,7 @@ static int phylink_sfp_module_start(void *upstream)
if (!pl->sfp_may_have_phy)
return 0;
- return phylink_sfp_config(pl, MLO_AN_INBAND,
- pl->sfp_support, pl->sfp_support);
+ return phylink_sfp_config_optical(pl);
}
static void phylink_sfp_module_stop(void *upstream)
@@ -2755,8 +3090,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
else
mode = MLO_AN_INBAND;
+ /* Set the PHY's host supported interfaces */
+ phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
+ pl->config->supported_interfaces);
+
/* Do the initial configuration */
- ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising);
+ ret = phylink_sfp_config_phy(pl, mode, phy);
if (ret < 0)
return ret;
@@ -2929,6 +3268,7 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
@@ -3107,4 +3447,15 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
}
EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
+static int __init phylink_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i)
+ __set_bit(phylink_sfp_interface_preference[i],
+ phylink_sfp_interfaces);
+
+ return 0;
+}
+
+module_init(phylink_init);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a5671ab896b3..3d99fd6664d7 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -70,6 +70,7 @@
#define RTLGEN_SPEED_MASK 0x0630
#define RTL_GENERIC_PHYID 0x001cc800
+#define RTL_8211FVD_PHYID 0x001cc878
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -78,6 +79,7 @@ MODULE_LICENSE("GPL");
struct rtl821x_priv {
u16 phycr1;
u16 phycr2;
+ bool has_phycr2;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -94,6 +96,7 @@ static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct rtl821x_priv *priv;
+ u32 phy_id = phydev->drv->phy_id;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -108,13 +111,16 @@ static int rtl821x_probe(struct phy_device *phydev)
if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
- if (ret < 0)
- return ret;
+ priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
+ if (priv->has_phycr2) {
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ if (ret < 0)
+ return ret;
- priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
- if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
- priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
+ if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
+ priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ }
phydev->priv = priv;
@@ -400,12 +406,14 @@ static int rtl8211f_config_init(struct phy_device *phydev)
val_rxdly ? "enabled" : "disabled");
}
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
- RTL8211F_CLKOUT_EN, priv->phycr2);
- if (ret < 0) {
- dev_err(dev, "clkout configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
+ if (priv->has_phycr2) {
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_CLKOUT_EN, priv->phycr2);
+ if (ret < 0) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return genphy_soft_reset(phydev);
@@ -924,6 +932,18 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID),
+ .name = "RTL8211F-VD Gigabit Ethernet",
+ .probe = rtl821x_probe,
+ .config_init = &rtl8211f_config_init,
+ .read_status = rtlgen_read_status,
+ .config_intr = &rtl8211f_config_intr,
+ .handle_interrupt = rtl8211f_handle_interrupt,
+ .suspend = genphy_suspend,
+ .resume = rtl821x_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
.name = "Generic FE-GE Realtek PHY",
.match_phy_device = rtlgen_match_phy_device,
.read_status = rtlgen_read_status,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 15aa5ac1ff49..29e3fa86bac3 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -10,12 +10,6 @@
#include "sfp.h"
-struct sfp_quirk {
- const char *vendor;
- const char *part;
- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes);
-};
-
/**
* struct sfp_bus - internal representation of a sfp bus
*/
@@ -38,93 +32,6 @@ struct sfp_bus {
bool started;
};
-static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
- unsigned long *modes)
-{
- phylink_set(modes, 2500baseX_Full);
-}
-
-static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
- unsigned long *modes)
-{
- /* Ubiquiti U-Fiber Instant module claims that support all transceiver
- * types including 10G Ethernet which is not truth. So clear all claimed
- * modes and set only one mode which module supports: 1000baseX_Full.
- */
- phylink_zero(modes);
- phylink_set(modes, 1000baseX_Full);
-}
-
-static const struct sfp_quirk sfp_quirks[] = {
- {
- // Alcatel Lucent G-010S-P can operate at 2500base-X, but
- // incorrectly report 2500MBd NRZ in their EEPROM
- .vendor = "ALCATELLUCENT",
- .part = "G010SP",
- .modes = sfp_quirk_2500basex,
- }, {
- // Alcatel Lucent G-010S-A can operate at 2500base-X, but
- // report 3.2GBd NRZ in their EEPROM
- .vendor = "ALCATELLUCENT",
- .part = "3FE46541AA",
- .modes = sfp_quirk_2500basex,
- }, {
- // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd
- // NRZ in their EEPROM
- .vendor = "HUAWEI",
- .part = "MA5671A",
- .modes = sfp_quirk_2500basex,
- }, {
- // Lantech 8330-262D-E can operate at 2500base-X, but
- // incorrectly report 2500MBd NRZ in their EEPROM
- .vendor = "Lantech",
- .part = "8330-262D-E",
- .modes = sfp_quirk_2500basex,
- }, {
- .vendor = "UBNT",
- .part = "UF-INSTANT",
- .modes = sfp_quirk_ubnt_uf_instant,
- },
-};
-
-static size_t sfp_strlen(const char *str, size_t maxlen)
-{
- size_t size, i;
-
- /* Trailing characters should be filled with space chars */
- for (i = 0, size = 0; i < maxlen; i++)
- if (str[i] != ' ')
- size = i + 1;
-
- return size;
-}
-
-static bool sfp_match(const char *qs, const char *str, size_t len)
-{
- if (!qs)
- return true;
- if (strlen(qs) != len)
- return false;
- return !strncmp(qs, str, len);
-}
-
-static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
-{
- const struct sfp_quirk *q;
- unsigned int i;
- size_t vs, ps;
-
- vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
- ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
-
- for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
- if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
- sfp_match(q->part, id->base.vendor_pn, ps))
- return q;
-
- return NULL;
-}
-
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -232,12 +139,14 @@ EXPORT_SYMBOL_GPL(sfp_may_have_phy);
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
* @support: pointer to an array of unsigned long for the ethtool support mask
+ * @interfaces: pointer to an array of unsigned long for phy interface modes
+ * mask
*
* Parse the EEPROM identification information and derive the supported
* ethtool link modes for the module.
*/
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support)
+ unsigned long *support, unsigned long *interfaces)
{
unsigned int br_min, br_nom, br_max;
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
@@ -264,54 +173,81 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
/* Set ethtool support from the compliance fields. */
- if (id->base.e10g_base_sr)
+ if (id->base.e10g_base_sr) {
phylink_set(modes, 10000baseSR_Full);
- if (id->base.e10g_base_lr)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_lr) {
phylink_set(modes, 10000baseLR_Full);
- if (id->base.e10g_base_lrm)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_lrm) {
phylink_set(modes, 10000baseLRM_Full);
- if (id->base.e10g_base_er)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_er) {
phylink_set(modes, 10000baseER_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
if (id->base.e1000_base_sx ||
id->base.e1000_base_lx ||
- id->base.e1000_base_cx)
+ id->base.e1000_base_cx) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
if (id->base.e1000_base_t) {
phylink_set(modes, 1000baseT_Half);
phylink_set(modes, 1000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, interfaces);
}
/* 1000Base-PX or 1000Base-BX10 */
if ((id->base.e_base_px || id->base.e_base_bx10) &&
- br_min <= 1300 && br_max >= 1200)
+ br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
/* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */
- if (id->base.e100_base_fx || id->base.e100_base_lx)
+ if (id->base.e100_base_fx || id->base.e100_base_lx) {
phylink_set(modes, 100baseFX_Full);
- if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100)
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces);
+ }
+ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100) {
phylink_set(modes, 100baseFX_Full);
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces);
+ }
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
*/
if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
/* This may look odd, but some manufacturers use 12000MBd */
- if (br_min <= 12000 && br_max >= 10300)
+ if (br_min <= 12000 && br_max >= 10300) {
phylink_set(modes, 10000baseCR_Full);
- if (br_min <= 3200 && br_max >= 3100)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (br_min <= 3200 && br_max >= 3100) {
phylink_set(modes, 2500baseX_Full);
- if (br_min <= 1300 && br_max >= 1200)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
+ if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
}
if (id->base.sfp_ct_passive) {
- if (id->base.passive.sff8431_app_e)
+ if (id->base.passive.sff8431_app_e) {
phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
}
if (id->base.sfp_ct_active) {
if (id->base.active.sff8431_app_e ||
id->base.active.sff8431_lim) {
phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
}
}
@@ -336,12 +272,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_10GBASE_T_SFI:
case SFF8024_ECC_10GBASE_T_SR:
phylink_set(modes, 10000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
break;
case SFF8024_ECC_5GBASE_T:
phylink_set(modes, 5000baseT_Full);
break;
case SFF8024_ECC_2_5GBASE_T:
phylink_set(modes, 2500baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
break;
default:
dev_warn(bus->sfp_dev,
@@ -354,10 +292,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (id->base.fc_speed_100 ||
id->base.fc_speed_200 ||
id->base.fc_speed_400) {
- if (id->base.br_nominal >= 31)
+ if (id->base.br_nominal >= 31) {
phylink_set(modes, 2500baseX_Full);
- if (id->base.br_nominal >= 12)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
+ if (id->base.br_nominal >= 12) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
}
/* If we haven't discovered any modes that this module supports, try
@@ -370,14 +312,18 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* 2500BASE-X, so we allow some slack here.
*/
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
- if (br_min <= 1300 && br_max >= 1200)
+ if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
- if (br_min <= 3200 && br_max >= 2500)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
+ if (br_min <= 3200 && br_max >= 2500) {
phylink_set(modes, 2500baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
}
- if (bus->sfp_quirk)
- bus->sfp_quirk->modes(id, modes);
+ if (bus->sfp_quirk && bus->sfp_quirk->modes)
+ bus->sfp_quirk->modes(id, modes, interfaces);
linkmode_or(support, support, modes);
@@ -786,12 +732,13 @@ void sfp_link_down(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_link_down);
-int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk)
{
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
- bus->sfp_quirk = sfp_lookup_quirk(id);
+ bus->sfp_quirk = quirk;
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 63f90fe9a4d2..40c9a64c5e30 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -166,6 +166,7 @@ static const enum gpiod_flags gpio_flags[] = {
* on board (for a copper SFP) time to initialise.
*/
#define T_WAIT msecs_to_jiffies(50)
+#define T_WAIT_ROLLBALL msecs_to_jiffies(25000)
#define T_START_UP msecs_to_jiffies(300)
#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
@@ -205,8 +206,11 @@ static const enum gpiod_flags gpio_flags[] = {
/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
+ * RollBall SFPs access phy via SFP Enhanced Digital Diagnostic Interface
+ * via address 0x51 (mdio-i2c will use RollBall protocol on this address).
*/
-#define SFP_PHY_ADDR 22
+#define SFP_PHY_ADDR 22
+#define SFP_PHY_ADDR_ROLLBALL 17
struct sff_data {
unsigned int gpios;
@@ -218,6 +222,7 @@ struct sfp {
struct i2c_adapter *i2c;
struct mii_bus *i2c_mii;
struct sfp_bus *sfp_bus;
+ enum mdio_i2c_proto mdio_protocol;
struct phy_device *mod_phy;
const struct sff_data *type;
size_t i2c_block_size;
@@ -234,6 +239,7 @@ struct sfp {
bool need_poll;
struct mutex st_mutex; /* Protects state */
+ unsigned int state_hw_mask;
unsigned int state_soft_mask;
unsigned int state;
struct delayed_work poll;
@@ -250,8 +256,11 @@ struct sfp {
struct sfp_eeprom_id id;
unsigned int module_power_mW;
unsigned int module_t_start_up;
+ unsigned int module_t_wait;
bool tx_fault_ignore;
+ const struct sfp_quirk *quirk;
+
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
struct delayed_work hwmon_probe;
@@ -308,6 +317,136 @@ static const struct of_device_id sfp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sfp_of_match);
+static void sfp_fixup_long_startup(struct sfp *sfp)
+{
+ sfp->module_t_start_up = T_START_UP_BAD_GPON;
+}
+
+static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
+{
+ sfp->tx_fault_ignore = true;
+}
+
+static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+{
+ /* Ignore the TX_FAULT and LOS signals on this module.
+ * these are possibly used for other purposes on this
+ * module, e.g. a serial port.
+ */
+ sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
+}
+
+static void sfp_fixup_rollball(struct sfp *sfp)
+{
+ sfp->mdio_protocol = MDIO_I2C_ROLLBALL;
+ sfp->module_t_wait = T_WAIT_ROLLBALL;
+}
+
+static void sfp_fixup_rollball_cc(struct sfp *sfp)
+{
+ sfp_fixup_rollball(sfp);
+
+ /* Some RollBall SFPs may have wrong (zero) extended compliance code
+ * burned in EEPROM. For PHY probing we need the correct one.
+ */
+ sfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SFI;
+}
+
+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ unsigned long *modes,
+ unsigned long *interfaces)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+}
+
+static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
+ unsigned long *modes,
+ unsigned long *interfaces)
+{
+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver
+ * types including 10G Ethernet which is not truth. So clear all claimed
+ * modes and set only one mode which module supports: 1000baseX_Full.
+ */
+ linkmode_zero(modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
+}
+
+#define SFP_QUIRK(_v, _p, _m, _f) \
+ { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, }
+#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL)
+#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f)
+
+static const struct sfp_quirk sfp_quirks[] = {
+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly
+ // report 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex),
+
+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd
+ // NRZ in their EEPROM
+ SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
+ sfp_fixup_long_startup),
+
+ SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+
+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
+ // their EEPROM
+ SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
+ // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ // 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+
+ SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
+
+ SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball),
+ SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball),
+};
+
+static size_t sfp_strlen(const char *str, size_t maxlen)
+{
+ size_t size, i;
+
+ /* Trailing characters should be filled with space chars, but
+ * some manufacturers can't read SFF-8472 and use NUL.
+ */
+ for (i = 0, size = 0; i < maxlen; i++)
+ if (str[i] != ' ' && str[i] != '\0')
+ size = i + 1;
+
+ return size;
+}
+
+static bool sfp_match(const char *qs, const char *str, size_t len)
+{
+ if (!qs)
+ return true;
+ if (strlen(qs) != len)
+ return false;
+ return !strncmp(qs, str, len);
+}
+
+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
+{
+ const struct sfp_quirk *q;
+ unsigned int i;
+ size_t vs, ps;
+
+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
+
+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
+ if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
+ sfp_match(q->part, id->base.vendor_pn, ps))
+ return q;
+
+ return NULL;
+}
+
static unsigned long poll_jiffies;
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -419,9 +558,6 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
{
- struct mii_bus *i2c_mii;
- int ret;
-
if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
return -EINVAL;
@@ -429,7 +565,15 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
sfp->read = sfp_i2c_read;
sfp->write = sfp_i2c_write;
- i2c_mii = mdio_i2c_alloc(sfp->dev, i2c);
+ return 0;
+}
+
+static int sfp_i2c_mdiobus_create(struct sfp *sfp)
+{
+ struct mii_bus *i2c_mii;
+ int ret;
+
+ i2c_mii = mdio_i2c_alloc(sfp->dev, sfp->i2c, sfp->mdio_protocol);
if (IS_ERR(i2c_mii))
return PTR_ERR(i2c_mii);
@@ -447,6 +591,12 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
return 0;
}
+static void sfp_i2c_mdiobus_destroy(struct sfp *sfp)
+{
+ mdiobus_unregister(sfp->i2c_mii);
+ sfp->i2c_mii = NULL;
+}
+
/* Interface */
static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
@@ -499,17 +649,18 @@ static void sfp_soft_set_state(struct sfp *sfp, unsigned int state)
static void sfp_soft_start_poll(struct sfp *sfp)
{
const struct sfp_eeprom_id *id = &sfp->id;
+ unsigned int mask = 0;
sfp->state_soft_mask = 0;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE &&
- !sfp->gpio[GPIO_TX_DISABLE])
- sfp->state_soft_mask |= SFP_F_TX_DISABLE;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT &&
- !sfp->gpio[GPIO_TX_FAULT])
- sfp->state_soft_mask |= SFP_F_TX_FAULT;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS &&
- !sfp->gpio[GPIO_LOS])
- sfp->state_soft_mask |= SFP_F_LOS;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE)
+ mask |= SFP_F_TX_DISABLE;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT)
+ mask |= SFP_F_TX_FAULT;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS)
+ mask |= SFP_F_LOS;
+
+ // Poll the soft state for hardware pins we want to ignore
+ sfp->state_soft_mask = ~sfp->state_hw_mask & mask;
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
!sfp->need_poll)
@@ -523,10 +674,11 @@ static void sfp_soft_stop_poll(struct sfp *sfp)
static unsigned int sfp_get_state(struct sfp *sfp)
{
- unsigned int state = sfp->get_state(sfp);
+ unsigned int soft = sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT);
+ unsigned int state;
- if (state & SFP_F_PRESENT &&
- sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT))
+ state = sfp->get_state(sfp) & sfp->state_hw_mask;
+ if (state & SFP_F_PRESENT && soft)
state |= sfp_soft_get_state(sfp);
return state;
@@ -1195,90 +1347,45 @@ static const struct hwmon_ops sfp_hwmon_ops = {
.read_string = sfp_hwmon_read_string,
};
-static u32 sfp_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_chip = {
- .type = hwmon_chip,
- .config = sfp_hwmon_chip_config,
-};
-
-static u32 sfp_hwmon_temp_config[] = {
- HWMON_T_INPUT |
- HWMON_T_MAX | HWMON_T_MIN |
- HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
- HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
- HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = {
- .type = hwmon_temp,
- .config = sfp_hwmon_temp_config,
-};
-
-static u32 sfp_hwmon_vcc_config[] = {
- HWMON_I_INPUT |
- HWMON_I_MAX | HWMON_I_MIN |
- HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
- HWMON_I_CRIT | HWMON_I_LCRIT |
- HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
- HWMON_I_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = {
- .type = hwmon_in,
- .config = sfp_hwmon_vcc_config,
-};
-
-static u32 sfp_hwmon_bias_config[] = {
- HWMON_C_INPUT |
- HWMON_C_MAX | HWMON_C_MIN |
- HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
- HWMON_C_CRIT | HWMON_C_LCRIT |
- HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
- HWMON_C_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = {
- .type = hwmon_curr,
- .config = sfp_hwmon_bias_config,
-};
-
-static u32 sfp_hwmon_power_config[] = {
- /* Transmit power */
- HWMON_P_INPUT |
- HWMON_P_MAX | HWMON_P_MIN |
- HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
- HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
- HWMON_P_LABEL,
- /* Receive power */
- HWMON_P_INPUT |
- HWMON_P_MAX | HWMON_P_MIN |
- HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
- HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
- HWMON_P_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_power_channel_info = {
- .type = hwmon_power,
- .config = sfp_hwmon_power_config,
-};
-
static const struct hwmon_channel_info *sfp_hwmon_info[] = {
- &sfp_hwmon_chip,
- &sfp_hwmon_vcc_channel_info,
- &sfp_hwmon_temp_channel_info,
- &sfp_hwmon_bias_channel_info,
- &sfp_hwmon_power_channel_info,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT |
+ HWMON_I_MAX | HWMON_I_MIN |
+ HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
+ HWMON_I_CRIT | HWMON_I_LCRIT |
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
+ HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT |
+ HWMON_C_MAX | HWMON_C_MIN |
+ HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
+ HWMON_C_CRIT | HWMON_C_LCRIT |
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ /* Transmit power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
+ /* Receive power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL),
NULL,
};
@@ -1505,12 +1612,12 @@ static void sfp_sm_phy_detach(struct sfp *sfp)
sfp->mod_phy = NULL;
}
-static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
+static int sfp_sm_probe_phy(struct sfp *sfp, int addr, bool is_c45)
{
struct phy_device *phy;
int err;
- phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ phy = get_phy_device(sfp->i2c_mii, addr, is_c45);
if (phy == ERR_PTR(-ENODEV))
return PTR_ERR(phy);
if (IS_ERR(phy)) {
@@ -1606,6 +1713,14 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
}
}
+static int sfp_sm_add_mdio_bus(struct sfp *sfp)
+{
+ if (sfp->mdio_protocol != MDIO_I2C_NONE)
+ return sfp_i2c_mdiobus_create(sfp);
+
+ return 0;
+}
+
/* Probe a SFP for a PHY device if the module supports copper - the PHY
* normally sits at I2C bus address 0x56, and may either be a clause 22
* or clause 45 PHY.
@@ -1621,19 +1736,23 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp)
{
int err = 0;
- switch (sfp->id.base.extended_cc) {
- case SFF8024_ECC_10GBASE_T_SFI:
- case SFF8024_ECC_10GBASE_T_SR:
- case SFF8024_ECC_5GBASE_T:
- case SFF8024_ECC_2_5GBASE_T:
- err = sfp_sm_probe_phy(sfp, true);
+ switch (sfp->mdio_protocol) {
+ case MDIO_I2C_NONE:
break;
- default:
- if (sfp->id.base.e1000_base_t)
- err = sfp_sm_probe_phy(sfp, false);
+ case MDIO_I2C_MARVELL_C22:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, false);
+ break;
+
+ case MDIO_I2C_C45:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, true);
+ break;
+
+ case MDIO_I2C_ROLLBALL:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR_ROLLBALL, true);
break;
}
+
return err;
}
@@ -1947,17 +2066,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
if (ret < 0)
return ret;
- if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
- !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
- sfp->module_t_start_up = T_START_UP_BAD_GPON;
+ /* Initialise state bits to use from hardware */
+ sfp->state_hw_mask = SFP_F_PRESENT;
+ if (sfp->gpio[GPIO_TX_DISABLE])
+ sfp->state_hw_mask |= SFP_F_TX_DISABLE;
+ if (sfp->gpio[GPIO_TX_FAULT])
+ sfp->state_hw_mask |= SFP_F_TX_FAULT;
+ if (sfp->gpio[GPIO_LOS])
+ sfp->state_hw_mask |= SFP_F_LOS;
+
+ sfp->module_t_start_up = T_START_UP;
+ sfp->module_t_wait = T_WAIT;
+
+ sfp->tx_fault_ignore = false;
+
+ if (sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SFI ||
+ sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SR ||
+ sfp->id.base.extended_cc == SFF8024_ECC_5GBASE_T ||
+ sfp->id.base.extended_cc == SFF8024_ECC_2_5GBASE_T)
+ sfp->mdio_protocol = MDIO_I2C_C45;
+ else if (sfp->id.base.e1000_base_t)
+ sfp->mdio_protocol = MDIO_I2C_MARVELL_C22;
else
- sfp->module_t_start_up = T_START_UP;
+ sfp->mdio_protocol = MDIO_I2C_NONE;
- if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) &&
- !memcmp(id.base.vendor_pn, "MA5671A ", 16))
- sfp->tx_fault_ignore = true;
- else
- sfp->tx_fault_ignore = false;
+ sfp->quirk = sfp_lookup_quirk(&id);
+ if (sfp->quirk && sfp->quirk->fixup)
+ sfp->quirk->fixup(sfp);
return 0;
}
@@ -2071,7 +2206,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
break;
/* Report the module insertion to the upstream device */
- err = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id,
+ sfp->quirk);
if (err < 0) {
sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
break;
@@ -2130,6 +2266,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp_module_stop(sfp->sfp_bus);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
+ if (sfp->i2c_mii)
+ sfp_i2c_mdiobus_destroy(sfp);
sfp_module_tx_disable(sfp);
sfp_soft_stop_poll(sfp);
sfp_sm_next(sfp, SFP_S_DOWN, 0);
@@ -2153,9 +2291,10 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
/* We need to check the TX_FAULT state, which is not defined
* while TX_DISABLE is asserted. The earliest we want to do
- * anything (such as probe for a PHY) is 50ms.
+ * anything (such as probe for a PHY) is 50ms (or more on
+ * specific modules).
*/
- sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT);
+ sfp_sm_next(sfp, SFP_S_WAIT, sfp->module_t_wait);
break;
case SFP_S_WAIT:
@@ -2169,8 +2308,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
* deasserting.
*/
timeout = sfp->module_t_start_up;
- if (timeout > T_WAIT)
- timeout -= T_WAIT;
+ if (timeout > sfp->module_t_wait)
+ timeout -= sfp->module_t_wait;
else
timeout = 1;
@@ -2192,6 +2331,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp->sm_fault_retries == N_FAULT_INIT);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
init_done:
+ /* Create mdiobus and start trying for PHY */
+ ret = sfp_sm_add_mdio_bus(sfp);
+ if (ret < 0) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
+ }
sfp->sm_phy_retries = R_PHY_RETRY;
goto phy_probe;
}
@@ -2573,6 +2718,8 @@ static int sfp_probe(struct platform_device *pdev)
return PTR_ERR(sfp->gpio[i]);
}
+ sfp->state_hw_mask = SFP_F_PRESENT;
+
sfp->get_state = sfp_gpio_get_state;
sfp->set_state = sfp_gpio_set_state;
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 27226535c72b..6cf1643214d3 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -6,6 +6,14 @@
struct sfp;
+struct sfp_quirk {
+ const char *vendor;
+ const char *part;
+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes,
+ unsigned long *interfaces);
+ void (*fixup)(struct sfp *sfp);
+};
+
struct sfp_socket_ops {
void (*attach)(struct sfp *sfp);
void (*detach)(struct sfp *sfp);
@@ -23,7 +31,8 @@ int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev);
void sfp_remove_phy(struct sfp_bus *bus);
void sfp_link_up(struct sfp_bus *bus);
void sfp_link_down(struct sfp_bus *bus);
-int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk);
void sfp_module_remove(struct sfp_bus *bus);
int sfp_module_start(struct sfp_bus *bus);
void sfp_module_stop(struct sfp_bus *bus);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 69423b8965b3..ac7481ce2fc1 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -46,7 +46,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
struct smsc_phy_priv {
u16 intmask;
bool energy_enable;
- struct clk *refclk;
};
static int smsc_phy_ack_interrupt(struct phy_device *phydev)
@@ -285,20 +284,12 @@ static void smsc_get_stats(struct phy_device *phydev,
data[i] = smsc_get_stat(phydev, i);
}
-static void smsc_phy_remove(struct phy_device *phydev)
-{
- struct smsc_phy_priv *priv = phydev->priv;
-
- clk_disable_unprepare(priv->refclk);
- clk_put(priv->refclk);
-}
-
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
struct smsc_phy_priv *priv;
- int ret;
+ struct clk *refclk;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -312,22 +303,12 @@ static int smsc_phy_probe(struct phy_device *phydev)
phydev->priv = priv;
/* Make clk optional to keep DTB backward compatibility. */
- priv->refclk = clk_get_optional(dev, NULL);
- if (IS_ERR(priv->refclk))
- return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ refclk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(refclk))
+ return dev_err_probe(dev, PTR_ERR(refclk),
"Failed to request clock\n");
- ret = clk_prepare_enable(priv->refclk);
- if (ret)
- return ret;
-
- ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
- if (ret) {
- clk_disable_unprepare(priv->refclk);
- return ret;
- }
-
- return 0;
+ return clk_set_rate(refclk, 50 * 1000 * 1000);
}
static struct phy_driver smsc_phy_driver[] = {
@@ -429,7 +410,6 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
- .remove = smsc_phy_remove,
/* basic functions */
.read_status = lan87xx_read_status,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index ff37f8ba6758..d4202d40d47a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
@@ -137,15 +136,10 @@ static const struct ks8995_chip_params ks8995_chip[] = {
},
};
-struct ks8995_pdata {
- int reset_gpio;
- enum of_gpio_flags reset_gpio_flags;
-};
-
struct ks8995_switch {
struct spi_device *spi;
struct mutex lock;
- struct ks8995_pdata *pdata;
+ struct gpio_desc *reset_gpio;
struct bin_attribute regs_attr;
const struct ks8995_chip_params *chip;
int revision_id;
@@ -401,24 +395,6 @@ err_out:
return err;
}
-/* ks8995_parse_dt - setup platform data from devicetree
- * @ks: pointer to switch instance
- *
- * Parses supported DT properties and sets up platform data
- * accordingly.
- */
-static void ks8995_parse_dt(struct ks8995_switch *ks)
-{
- struct device_node *np = ks->spi->dev.of_node;
- struct ks8995_pdata *pdata = ks->pdata;
-
- if (!np)
- return;
-
- pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0,
- &pdata->reset_gpio_flags);
-}
-
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
@@ -449,38 +425,22 @@ static int ks8995_probe(struct spi_device *spi)
ks->spi = spi;
ks->chip = &ks8995_chip[variant];
- if (ks->spi->dev.of_node) {
- ks->pdata = devm_kzalloc(&spi->dev, sizeof(*ks->pdata),
- GFP_KERNEL);
- if (!ks->pdata)
- return -ENOMEM;
-
- ks->pdata->reset_gpio = -1;
-
- ks8995_parse_dt(ks);
+ ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(ks->reset_gpio);
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset gpio: %d\n", err);
+ return err;
}
- if (!ks->pdata)
- ks->pdata = spi->dev.platform_data;
+ err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
+ if (err)
+ return err;
/* de-assert switch reset */
- if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) {
- unsigned long flags;
-
- flags = (ks->pdata->reset_gpio_flags == OF_GPIO_ACTIVE_LOW ?
- GPIOF_ACTIVE_LOW : 0);
-
- err = devm_gpio_request_one(&spi->dev,
- ks->pdata->reset_gpio,
- flags, "switch-reset");
- if (err) {
- dev_err(&spi->dev,
- "failed to get reset-gpios: %d\n", err);
- return -EIO;
- }
-
- gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 0);
- }
+ /* FIXME: this likely requires a delay */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
spi_set_drvdata(spi, ks);
@@ -524,8 +484,7 @@ static void ks8995_remove(struct spi_device *spi)
sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
/* assert reset */
- if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio))
- gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1);
+ gpiod_set_value_cansleep(ks->reset_gpio, 1);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
new file mode 100644
index 000000000000..73d163704068
--- /dev/null
+++ b/drivers/net/pse-pd/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Ethernet Power Sourcing Equipment drivers
+#
+
+menuconfig PSE_CONTROLLER
+ bool "Ethernet Power Sourcing Equipment Support"
+ help
+ Generic Power Sourcing Equipment Controller support.
+
+ If unsure, say no.
+
+if PSE_CONTROLLER
+
+config PSE_REGULATOR
+ tristate "Regulator based PSE controller"
+ help
+ This module provides support for simple regulator based Ethernet Power
+ Sourcing Equipment without automatic classification support. For
+ example for basic implementation of PoDL (802.3bu) specification.
+
+endif
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
new file mode 100644
index 000000000000..1b8aa4c70f0b
--- /dev/null
+++ b/drivers/net/pse-pd/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for Linux PSE drivers
+
+obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
+
+obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
new file mode 100644
index 000000000000..146b81f08a89
--- /dev/null
+++ b/drivers/net/pse-pd/pse_core.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Framework for Ethernet Power Sourcing Equipment
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/pse-pd/pse.h>
+
+static DEFINE_MUTEX(pse_list_mutex);
+static LIST_HEAD(pse_controller_list);
+
+/**
+ * struct pse_control - a PSE control
+ * @pcdev: a pointer to the PSE controller device
+ * this PSE control belongs to
+ * @list: list entry for the pcdev's PSE controller list
+ * @id: ID of the PSE line in the PSE controller device
+ * @refcnt: Number of gets of this pse_control
+ */
+struct pse_control {
+ struct pse_controller_dev *pcdev;
+ struct list_head list;
+ unsigned int id;
+ struct kref refcnt;
+};
+
+/**
+ * of_pse_zero_xlate - dummy function for controllers with one only control
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
+ * controllers with #pse-cells = <0>.
+ */
+static int of_pse_zero_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ return 0;
+}
+
+/**
+ * of_pse_simple_xlate - translate pse_spec to the PSE line number
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
+ * controllers with 1:1 mapping, where PSE lines can be indexed by number
+ * without gaps.
+ */
+static int of_pse_simple_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ if (pse_spec->args[0] >= pcdev->nr_lines)
+ return -EINVAL;
+
+ return pse_spec->args[0];
+}
+
+/**
+ * pse_controller_register - register a PSE controller device
+ * @pcdev: a pointer to the initialized PSE controller device
+ */
+int pse_controller_register(struct pse_controller_dev *pcdev)
+{
+ if (!pcdev->of_xlate) {
+ if (pcdev->of_pse_n_cells == 0)
+ pcdev->of_xlate = of_pse_zero_xlate;
+ else if (pcdev->of_pse_n_cells == 1)
+ pcdev->of_xlate = of_pse_simple_xlate;
+ }
+
+ mutex_init(&pcdev->lock);
+ INIT_LIST_HEAD(&pcdev->pse_control_head);
+
+ mutex_lock(&pse_list_mutex);
+ list_add(&pcdev->list, &pse_controller_list);
+ mutex_unlock(&pse_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pse_controller_register);
+
+/**
+ * pse_controller_unregister - unregister a PSE controller device
+ * @pcdev: a pointer to the PSE controller device
+ */
+void pse_controller_unregister(struct pse_controller_dev *pcdev)
+{
+ mutex_lock(&pse_list_mutex);
+ list_del(&pcdev->list);
+ mutex_unlock(&pse_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pse_controller_unregister);
+
+static void devm_pse_controller_release(struct device *dev, void *res)
+{
+ pse_controller_unregister(*(struct pse_controller_dev **)res);
+}
+
+/**
+ * devm_pse_controller_register - resource managed pse_controller_register()
+ * @dev: device that is registering this PSE controller
+ * @pcdev: a pointer to the initialized PSE controller device
+ *
+ * Managed pse_controller_register(). For PSE controllers registered by
+ * this function, pse_controller_unregister() is automatically called on
+ * driver detach. See pse_controller_register() for more information.
+ */
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev)
+{
+ struct pse_controller_dev **pcdevp;
+ int ret;
+
+ pcdevp = devres_alloc(devm_pse_controller_release, sizeof(*pcdevp),
+ GFP_KERNEL);
+ if (!pcdevp)
+ return -ENOMEM;
+
+ ret = pse_controller_register(pcdev);
+ if (ret) {
+ devres_free(pcdevp);
+ return ret;
+ }
+
+ *pcdevp = pcdev;
+ devres_add(dev, pcdevp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_pse_controller_register);
+
+/* PSE control section */
+
+static void __pse_control_release(struct kref *kref)
+{
+ struct pse_control *psec = container_of(kref, struct pse_control,
+ refcnt);
+
+ lockdep_assert_held(&pse_list_mutex);
+
+ module_put(psec->pcdev->owner);
+
+ list_del(&psec->list);
+ kfree(psec);
+}
+
+static void __pse_control_put_internal(struct pse_control *psec)
+{
+ lockdep_assert_held(&pse_list_mutex);
+
+ kref_put(&psec->refcnt, __pse_control_release);
+}
+
+/**
+ * pse_control_put - free the PSE control
+ * @psec: PSE control pointer
+ */
+void pse_control_put(struct pse_control *psec)
+{
+ if (IS_ERR_OR_NULL(psec))
+ return;
+
+ mutex_lock(&pse_list_mutex);
+ __pse_control_put_internal(psec);
+ mutex_unlock(&pse_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pse_control_put);
+
+static struct pse_control *
+pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
+{
+ struct pse_control *psec;
+
+ lockdep_assert_held(&pse_list_mutex);
+
+ list_for_each_entry(psec, &pcdev->pse_control_head, list) {
+ if (psec->id == index) {
+ kref_get(&psec->refcnt);
+ return psec;
+ }
+ }
+
+ psec = kzalloc(sizeof(*psec), GFP_KERNEL);
+ if (!psec)
+ return ERR_PTR(-ENOMEM);
+
+ if (!try_module_get(pcdev->owner)) {
+ kfree(psec);
+ return ERR_PTR(-ENODEV);
+ }
+
+ psec->pcdev = pcdev;
+ list_add(&psec->list, &pcdev->pse_control_head);
+ psec->id = index;
+ kref_init(&psec->refcnt);
+
+ return psec;
+}
+
+struct pse_control *
+of_pse_control_get(struct device_node *node)
+{
+ struct pse_controller_dev *r, *pcdev;
+ struct of_phandle_args args;
+ struct pse_control *psec;
+ int psec_id;
+ int ret;
+
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_parse_phandle_with_args(node, "pses", "#pse-cells", 0, &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&pse_list_mutex);
+ pcdev = NULL;
+ list_for_each_entry(r, &pse_controller_list, list) {
+ if (args.np == r->dev->of_node) {
+ pcdev = r;
+ break;
+ }
+ }
+
+ if (!pcdev) {
+ psec = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ if (WARN_ON(args.args_count != pcdev->of_pse_n_cells)) {
+ psec = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ psec_id = pcdev->of_xlate(pcdev, &args);
+ if (psec_id < 0) {
+ psec = ERR_PTR(psec_id);
+ goto out;
+ }
+
+ /* pse_list_mutex also protects the pcdev's pse_control list */
+ psec = pse_control_get_internal(pcdev, psec_id);
+
+out:
+ mutex_unlock(&pse_list_mutex);
+ of_node_put(args.np);
+
+ return psec;
+}
+EXPORT_SYMBOL_GPL(of_pse_control_get);
+
+/**
+ * pse_ethtool_get_status - get status of PSE control
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @status: struct to store PSE status
+ */
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ const struct pse_controller_ops *ops;
+ int err;
+
+ ops = psec->pcdev->ops;
+
+ if (!ops->ethtool_get_status) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not support status report");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&psec->pcdev->lock);
+ err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status);
+ mutex_unlock(&psec->pcdev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
+
+/**
+ * pse_ethtool_set_config - set PSE control configuration
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @config: Configuration of the test to run
+ */
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ const struct pse_controller_ops *ops;
+ int err;
+
+ ops = psec->pcdev->ops;
+
+ if (!ops->ethtool_set_config) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not configuration");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&psec->pcdev->lock);
+ err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config);
+ mutex_unlock(&psec->pcdev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
new file mode 100644
index 000000000000..e2bf8306ca90
--- /dev/null
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Driver for the regulator based Ethernet Power Sourcing Equipment, without
+// auto classification support.
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+#include <linux/regulator/consumer.h>
+
+struct pse_reg_priv {
+ struct pse_controller_dev pcdev;
+ struct regulator *ps; /*power source */
+ enum ethtool_podl_pse_admin_state admin_state;
+};
+
+static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct pse_reg_priv, pcdev);
+}
+
+static int
+pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+
+ if (priv->admin_state == config->admin_cotrol)
+ return 0;
+
+ switch (config->admin_cotrol) {
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
+ ret = regulator_enable(priv->ps);
+ break;
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
+ ret = regulator_disable(priv->ps);
+ break;
+ default:
+ dev_err(pcdev->dev, "Unknown admin state %i\n",
+ config->admin_cotrol);
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ priv->admin_state = config->admin_cotrol;
+
+ return 0;
+}
+
+static int
+pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+
+ ret = regulator_is_enabled(priv->ps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ else
+ status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
+
+ status->podl_admin_state = priv->admin_state;
+
+ return 0;
+}
+
+static const struct pse_controller_ops pse_reg_ops = {
+ .ethtool_get_status = pse_reg_ethtool_get_status,
+ .ethtool_set_config = pse_reg_ethtool_set_config,
+};
+
+static int
+pse_reg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pse_reg_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!pdev->dev.of_node)
+ return -ENOENT;
+
+ priv->ps = devm_regulator_get_exclusive(dev, "pse");
+ if (IS_ERR(priv->ps))
+ return dev_err_probe(dev, PTR_ERR(priv->ps),
+ "failed to get PSE regulator.\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = regulator_is_enabled(priv->ps);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ else
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
+
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &pse_reg_ops;
+ priv->pcdev.dev = dev;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ dev_err(dev, "failed to register PSE controller (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id pse_reg_of_match[] = {
+ { .compatible = "podl-pse-regulator", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pse_reg_of_match);
+
+static struct platform_driver pse_reg_driver = {
+ .probe = pse_reg_probe,
+ .driver = {
+ .name = "PSE regulator",
+ .of_match_table = of_match_ptr(pse_reg_of_match),
+ },
+};
+module_platform_driver(pse_reg_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("regulator based Ethernet Power Sourcing Equipment");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pse-regulator");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 39e61e07e489..fbcb9d05da64 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -443,10 +443,10 @@ static void rionet_get_drvinfo(struct net_device *ndev,
{
struct rionet_private *rnet = netdev_priv(ndev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
- strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "n/a", sizeof(info->fw_version));
+ strscpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
}
static u32 rionet_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index aac133a1e27a..62ade69295a9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
@@ -2070,8 +2082,8 @@ static const struct net_device_ops team_netdev_ops = {
static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
@@ -2840,6 +2852,7 @@ static struct genl_family team_nl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = team_nl_ops,
.n_small_ops = ARRAY_SIZE(team_nl_ops),
+ .resv_start_op = TEAM_CMD_PORT_LIST_GET + 1,
.mcgrps = team_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
};
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ff5d0e98a088..83fcaeb2ac5e 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
+ * Networking over Thunderbolt/USB4 cables using USB4NET protocol
+ * (formerly Apple ThunderboltIP).
*
* Copyright (C) 2017, Intel Corporation
* Authors: Amir Levy <amir.jer.levy@intel.com>
@@ -30,6 +31,7 @@
#define TBNET_RING_SIZE 256
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 10
+#define TBNET_E2E BIT(0)
#define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K
@@ -209,6 +211,10 @@ static const uuid_t tbnet_svc_uuid =
static struct tb_property_dir *tbnet_dir;
+static bool tbnet_e2e = true;
+module_param_named(e2e, tbnet_e2e, bool, 0444);
+MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
+
static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
enum thunderbolt_ip_type type, size_t size, u32 command_id)
@@ -612,18 +618,13 @@ static void tbnet_connected_work(struct work_struct *work)
return;
}
- /* Both logins successful so enable the high-speed DMA paths and
- * start the network device queue.
+ /* Both logins successful so enable the rings, high-speed DMA
+ * paths and start the network device queue.
+ *
+ * Note we enable the DMA paths last to make sure we have primed
+ * the Rx ring before any incoming packets are allowed to
+ * arrive.
*/
- ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
- net->rx_ring.ring->hop,
- net->remote_transmit_path,
- net->tx_ring.ring->hop);
- if (ret) {
- netdev_err(net->dev, "failed to enable DMA paths\n");
- return;
- }
-
tb_ring_start(net->tx_ring.ring);
tb_ring_start(net->rx_ring.ring);
@@ -635,10 +636,21 @@ static void tbnet_connected_work(struct work_struct *work)
if (ret)
goto err_free_rx_buffers;
+ ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
+ net->rx_ring.ring->hop,
+ net->remote_transmit_path,
+ net->tx_ring.ring->hop);
+ if (ret) {
+ netdev_err(net->dev, "failed to enable DMA paths\n");
+ goto err_free_tx_buffers;
+ }
+
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
return;
+err_free_tx_buffers:
+ tbnet_free_buffers(&net->tx_ring);
err_free_rx_buffers:
tbnet_free_buffers(&net->rx_ring);
err_stop_rings:
@@ -867,6 +879,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
+ unsigned int flags;
int hopid;
netif_carrier_off(dev);
@@ -891,9 +904,14 @@ static int tbnet_open(struct net_device *dev)
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
- ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
- RING_FLAG_FRAME, 0, sof_mask, eof_mask,
- tbnet_start_poll, net);
+ flags = RING_FLAG_FRAME;
+ /* Only enable full E2E if the other end supports it too */
+ if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+ flags |= RING_FLAG_E2E;
+
+ ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+ net->tx_ring.ring->hop, sof_mask,
+ eof_mask, tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");
tb_ring_free(net->tx_ring.ring);
@@ -1264,7 +1282,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
dev->features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
- netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &net->napi, tbnet_poll);
/* MTU range: 68 - 65522 */
dev->min_mtu = ETH_MIN_MTU;
@@ -1356,6 +1374,7 @@ static struct tb_service_driver tbnet_driver = {
static int __init tbnet_init(void)
{
+ unsigned int flags;
int ret;
tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
@@ -1365,12 +1384,11 @@ static int __init tbnet_init(void)
tb_property_add_immediate(tbnet_dir, "prtcid", 1);
tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
- /* Currently only announce support for match frags ID (bit 1). Bit 0
- * is reserved for full E2E flow control which we do not support at
- * the moment.
- */
- tb_property_add_immediate(tbnet_dir, "prtcstns",
- TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
+
+ flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
+ if (tbnet_e2e)
+ flags |= TBNET_E2E;
+ tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
if (ret) {
@@ -1393,5 +1411,5 @@ module_exit(tbnet_exit);
MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_DESCRIPTION("Thunderbolt network driver");
+MODULE_DESCRIPTION("Thunderbolt/USB4 network driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 259b2b84b2b3..27c6d235cbda 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2664,7 +2664,7 @@ static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
- return sprintf(buf, "0x%x\n", tun_flags(tun));
+ return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
}
static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
@@ -2672,9 +2672,9 @@ static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return uid_valid(tun->owner)?
- sprintf(buf, "%u\n",
- from_kuid_munged(current_user_ns(), tun->owner)):
- sprintf(buf, "-1\n");
+ sysfs_emit(buf, "%u\n",
+ from_kuid_munged(current_user_ns(), tun->owner)) :
+ sysfs_emit(buf, "-1\n");
}
static ssize_t group_show(struct device *dev, struct device_attribute *attr,
@@ -2682,9 +2682,9 @@ static ssize_t group_show(struct device *dev, struct device_attribute *attr,
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return gid_valid(tun->group) ?
- sprintf(buf, "%u\n",
- from_kgid_munged(current_user_ns(), tun->group)):
- sprintf(buf, "-1\n");
+ sysfs_emit(buf, "%u\n",
+ from_kgid_munged(current_user_ns(), tun->group)) :
+ sysfs_emit(buf, "-1\n");
}
static DEVICE_ATTR_RO(tun_flags);
@@ -2828,7 +2828,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
rcu_assign_pointer(tfile->tun, tun);
}
- netif_carrier_on(tun->dev);
+ if (ifr->ifr_flags & IFF_NO_CARRIER)
+ netif_carrier_off(tun->dev);
+ else
+ netif_carrier_on(tun->dev);
/* Make sure persistent devices do not get stuck in
* xoff state.
@@ -3056,8 +3059,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
- return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
- (unsigned int __user*)argp);
+ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
+ TUN_FEATURES, (unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE) {
return tun_set_queue(file, &ifr);
} else if (cmd == SIOCGSKNS) {
@@ -3540,15 +3543,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
- strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
+ strscpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case IFF_TAP:
- strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
+ strscpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 76659c1c525a..4402eedb3d1a 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -168,7 +168,7 @@ config USB_NET_AX8817X
tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
depends on USB_USBNET
select CRC32
- select PHYLIB
+ select PHYLINK
select AX88796B_PHY
imply NET_SELFTESTS
default y
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 3020e81159d0..a017e9de2119 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -201,7 +201,7 @@ static void aqc111_get_drvinfo(struct net_device *net,
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u",
aqc111_data->fw_ver.major,
aqc111_data->fw_ver.minor,
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 21c1ca275cc4..74162190bccc 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -27,6 +27,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <net/selftests.h>
+#include <linux/phylink.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -185,6 +186,8 @@ struct asix_common_private {
struct mii_bus *mdio;
struct phy_device *phydev;
struct phy_device *phydev_int;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u16 phy_addr;
bool embd_phy;
u8 chipcode;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 9ea91c3ff045..72ffc89b477a 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -752,8 +752,8 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
int asix_set_mac_address(struct net_device *net, void *p)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 5b5eb630c4b7..11f60d32be82 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -303,6 +303,24 @@ static int ax88772_ethtool_get_sset_count(struct net_device *ndev, int sset)
}
}
+static void ax88772_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct usbnet *dev = netdev_priv(ndev);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int ax88772_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct usbnet *dev = netdev_priv(ndev);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = usbnet_get_link,
@@ -319,6 +337,8 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.self_test = net_selftest,
.get_strings = ax88772_ethtool_get_strings,
.get_sset_count = ax88772_ethtool_get_sset_count,
+ .get_pauseparam = ax88772_ethtool_get_pauseparam,
+ .set_pauseparam = ax88772_ethtool_set_pauseparam,
};
static int ax88772_reset(struct usbnet *dev)
@@ -343,7 +363,7 @@ static int ax88772_reset(struct usbnet *dev)
if (ret < 0)
goto out;
- phy_start(priv->phydev);
+ phylink_start(priv->phylink);
return 0;
@@ -590,8 +610,11 @@ static void ax88772_suspend(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
u16 medium;
- if (netif_running(dev->net))
- phy_stop(priv->phydev);
+ if (netif_running(dev->net)) {
+ rtnl_lock();
+ phylink_suspend(priv->phylink, false);
+ rtnl_unlock();
+ }
/* Stop MAC operation */
medium = asix_read_medium_status(dev, 1);
@@ -622,8 +645,11 @@ static void ax88772_resume(struct usbnet *dev)
if (!priv->reset(dev, 1))
break;
- if (netif_running(dev->net))
- phy_start(priv->phydev);
+ if (netif_running(dev->net)) {
+ rtnl_lock();
+ phylink_resume(priv->phylink);
+ rtnl_unlock();
+ }
}
static int asix_resume(struct usb_interface *intf)
@@ -667,8 +693,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return -ENODEV;
}
- ret = phy_connect_direct(dev->net, priv->phydev, &asix_adjust_link,
- PHY_INTERFACE_MODE_INTERNAL);
+ ret = phylink_connect_phy(priv->phylink, priv->phydev);
if (ret) {
netdev_err(dev->net, "Could not connect PHY\n");
return ret;
@@ -688,6 +713,9 @@ static int ax88772_init_phy(struct usbnet *dev)
*/
priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR);
if (!priv->phydev_int) {
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
netdev_err(dev->net, "Could not find internal PHY\n");
return -ENODEV;
}
@@ -698,6 +726,89 @@ static int ax88772_init_phy(struct usbnet *dev)
return 0;
}
+static void ax88772_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Nothing to do */
+}
+
+static void ax88772_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+
+ asix_write_medium_mode(dev, 0, 0);
+ usbnet_link_change(dev, false, false);
+}
+
+static void ax88772_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+ u16 m = AX_MEDIUM_AC | AX_MEDIUM_RE;
+
+ m |= duplex ? AX_MEDIUM_FD : 0;
+
+ switch (speed) {
+ case SPEED_100:
+ m |= AX_MEDIUM_PS;
+ break;
+ case SPEED_10:
+ break;
+ default:
+ return;
+ }
+
+ if (tx_pause)
+ m |= AX_MEDIUM_TFC;
+
+ if (rx_pause)
+ m |= AX_MEDIUM_RFC;
+
+ asix_write_medium_mode(dev, m, 0);
+ usbnet_link_change(dev, true, false);
+}
+
+static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = ax88772_mac_config,
+ .mac_link_down = ax88772_mac_link_down,
+ .mac_link_up = ax88772_mac_link_up,
+};
+
+static int ax88772_phylink_setup(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+ phy_interface_t phy_if_mode;
+ struct phylink *phylink;
+
+ priv->phylink_config.dev = &dev->net->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ priv->phylink_config.supported_interfaces);
+
+ if (priv->embd_phy)
+ phy_if_mode = PHY_INTERFACE_MODE_INTERNAL;
+ else
+ phy_if_mode = PHY_INTERFACE_MODE_RMII;
+
+ phylink = phylink_create(&priv->phylink_config, dev->net->dev.fwnode,
+ phy_if_mode, &ax88772_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phylink = phylink;
+ return 0;
+}
+
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct asix_common_private *priv;
@@ -788,14 +899,22 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
return ret;
- return ax88772_init_phy(dev);
+ ret = ax88772_phylink_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = ax88772_init_phy(dev);
+ if (ret)
+ phylink_destroy(priv->phylink);
+
+ return ret;
}
static int ax88772_stop(struct usbnet *dev)
{
struct asix_common_private *priv = dev->driver_priv;
- phy_stop(priv->phydev);
+ phylink_stop(priv->phylink);
return 0;
}
@@ -804,7 +923,10 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct asix_common_private *priv = dev->driver_priv;
- phy_disconnect(priv->phydev);
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
+ phylink_destroy(priv->phylink);
asix_rx_fixup_common_free(dev->driver_priv);
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 843893482abd..ff439ef535ac 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -672,8 +672,8 @@ static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2de09ad5bac0..e11f70911acc 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3226ab33afae..f18ab8e220db 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -4374,7 +4374,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
- netif_napi_add(netdev, &dev->napi, lan78xx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &dev->napi, lan78xx_poll);
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index feb247e355f7..81ca64debc5b 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -894,7 +894,7 @@ static void pegasus_get_drvinfo(struct net_device *dev,
{
pegasus_t *pegasus = netdev_priv(dev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 709e3c59e340..26c34a7c21bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1087,6 +1087,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1401,6 +1402,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0f6efaabaa32..a481a1d831e2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -770,6 +770,8 @@ enum rtl8152_flags {
RX_EPROTO,
};
+#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
+#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
@@ -1873,7 +1875,9 @@ static void intr_callback(struct urb *urb)
"Stop submitting intr, status %d\n", status);
return;
case -EOVERFLOW:
- netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ if (net_ratelimit())
+ netif_info(tp, intr, tp->netdev,
+ "intr status -EOVERFLOW\n");
goto resubmit;
/* -EPIPE: should clear the halt */
default:
@@ -2726,22 +2730,26 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
ocp_data |= RCR_AM | RCR_AAP;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(netdev) > multicast_filter_limit) ||
- (netdev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev->flags & IFF_MULTICAST &&
+ netdev_mc_count(netdev) > multicast_filter_limit) ||
+ (netdev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
ocp_data |= RCR_AM;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
} else {
- struct netdev_hw_addr *ha;
-
mc_filter[1] = 0;
mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- ocp_data |= RCR_AM;
+ if (netdev->flags & IFF_MULTICAST) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ ocp_data |= RCR_AM;
+ }
}
}
@@ -5906,6 +5914,11 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -6431,21 +6444,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
- switch (tp->version) {
- case RTL_VER_10:
- case RTL_VER_11:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
- break;
- case RTL_VER_12:
- case RTL_VER_13:
- case RTL_VER_15:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
- break;
- default:
- break;
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
}
static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6557,6 +6557,11 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -8604,11 +8609,11 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
{
struct r8152 *tp = netdev_priv(netdev);
- strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, MODULENAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
- strlcpy(info->fw_version, tp->rtl_fw.version,
+ strscpy(info->fw_version, tp->rtl_fw.version,
sizeof(info->fw_version));
}
@@ -9584,6 +9589,8 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
if (vendor_id == VENDOR_ID_LENOVO) {
switch (product_id) {
+ case DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB:
+ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
@@ -9831,6 +9838,7 @@ static const struct usb_device_id rtl8152_table[] = {
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 3d2bf2acca94..97afd7335d86 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -769,8 +769,8 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
{
rtl8150_t *dev = netdev_priv(netdev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index bb4cbe8fc846..b3ae949e6f1c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -612,8 +612,8 @@ static void sierra_net_get_drvinfo(struct net_device *net,
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
static u32 sierra_net_get_link(struct net_device *net)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aaa89b4cfd50..64a9a80b2309 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1050,9 +1050,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strlcpy (info->driver, dev->driver_name, sizeof info->driver);
- strlcpy (info->fw_version, dev->driver_info->description,
- sizeof info->fw_version);
+ strscpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strscpy(info->fw_version, dev->driver_info->description,
+ sizeof(info->fw_version));
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
}
EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
@@ -1598,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf)
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1614,7 +1615,11 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- usb_scuttle_anchored_urbs(&dev->deferred);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
if (dev->driver_info->unbind)
dev->driver_info->unbind(dev, intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 466da01ba2e3..09682ea3354e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -128,8 +128,8 @@ static int veth_get_link_ksettings(struct net_device *dev,
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -1070,7 +1070,7 @@ static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
struct veth_rq *rq = &priv->rq[i];
if (!napi_already_on)
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll);
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
if (err < 0)
goto err_rxq_reg;
@@ -1184,7 +1184,7 @@ static int veth_napi_enable_range(struct net_device *dev, int start, int end)
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll);
}
err = __veth_napi_enable_range(dev, start, end);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d934774e9733..e0e57083d442 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1211,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
if (!hdr_hash || !skb)
return;
- switch ((int)hdr_hash->hash_report) {
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1229,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -2594,9 +2594,9 @@ static void virtnet_get_drvinfo(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
}
@@ -3432,29 +3432,6 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
(unsigned int)GOOD_PACKET_LEN);
}
-static void virtnet_config_sizes(struct virtnet_info *vi, u32 *sizes)
-{
- u32 i, rx_size, tx_size;
-
- if (vi->speed == SPEED_UNKNOWN || vi->speed < SPEED_10000) {
- rx_size = 1024;
- tx_size = 1024;
-
- } else if (vi->speed < SPEED_40000) {
- rx_size = 1024 * 4;
- tx_size = 1024 * 4;
-
- } else {
- rx_size = 1024 * 8;
- tx_size = 1024 * 8;
- }
-
- for (i = 0; i < vi->max_queue_pairs; i++) {
- sizes[rxq2vq(i)] = rx_size;
- sizes[txq2vq(i)] = tx_size;
- }
-}
-
static int virtnet_find_vqs(struct virtnet_info *vi)
{
vq_callback_t **callbacks;
@@ -3462,7 +3439,6 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
- u32 *sizes;
bool *ctx;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
@@ -3490,15 +3466,10 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx = NULL;
}
- sizes = kmalloc_array(total_vqs, sizeof(*sizes), GFP_KERNEL);
- if (!sizes)
- goto err_sizes;
-
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
callbacks[total_vqs - 1] = NULL;
names[total_vqs - 1] = "control";
- sizes[total_vqs - 1] = 64;
}
/* Allocate/initialize parameters for send/receive virtqueues */
@@ -3513,10 +3484,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx[rxq2vq(i)] = true;
}
- virtnet_config_sizes(vi, sizes);
-
- ret = virtio_find_vqs_ctx_size(vi->vdev, total_vqs, vqs, callbacks,
- names, sizes, ctx, NULL);
+ ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
+ names, ctx, NULL);
if (ret)
goto err_find;
@@ -3536,8 +3505,6 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
err_find:
- kfree(sizes);
-err_sizes:
kfree(ctx);
err_ctx:
kfree(names);
@@ -3897,9 +3864,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
- virtnet_init_settings(dev);
- virtnet_update_settings(vi);
-
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
@@ -3912,6 +3876,8 @@ static int virtnet_probe(struct virtio_device *vdev)
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
+ virtnet_init_settings(dev);
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
vi->failover = net_failover_create(vi->dev);
if (IS_ERR(vi->failover)) {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 53b3b241e027..d3e7b27eb933 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3882,11 +3882,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
netif_napi_add(adapter->netdev,
&adapter->rx_queue[i].napi,
- vmxnet3_poll_rx_only, 64);
+ vmxnet3_poll_rx_only);
}
} else {
netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
- vmxnet3_poll, 64);
+ vmxnet3_poll);
}
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e2034adc3a1a..18cf7c723201 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -209,12 +209,12 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
+ strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 5df7a0abc39d..badf6f09ae51 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1541,8 +1541,8 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
static void vrf_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops vrf_ethtool_ops = {
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index c3285242f74f..6ab669dcd1c6 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -713,12 +713,9 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
- vh = skb_gro_header_fast(skb, off_vx);
- if (skb_gro_header_hard(skb, hlen)) {
- vh = skb_gro_header_slow(skb, hlen, off_vx);
- if (unlikely(!vh))
- goto out;
- }
+ vh = skb_gro_header(skb, hlen, off_vx);
+ if (unlikely(!vh))
+ goto out;
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
@@ -3313,8 +3310,8 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
static void vxlan_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
static int vxlan_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index d0f3b6d7f408..43c8c84e7ea8 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+ struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) &&
- addr->sa_family == AF_INET) ||
- (len == sizeof(struct sockaddr_in6) &&
- addr->sa_family == AF_INET6)) {
- struct endpoint endpoint = { { { 0 } } };
-
- memcpy(&endpoint.addr, addr, len);
+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+ endpoint.addr4 = *(struct sockaddr_in *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
@@ -621,6 +620,7 @@ static const struct genl_ops genl_ops[] = {
static struct genl_family genl_family __ro_after_init = {
.ops = genl_ops,
.n_ops = ARRAY_SIZE(genl_ops),
+ .resv_start_op = WG_CMD_SET_DEVICE + 1,
.name = WG_GENL_NAME,
.version = WG_GENL_VERSION,
.maxattr = WGDEVICE_A_MAX,
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index 1acd00ab2fbc..1cb502a932e0 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -54,8 +54,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
skb_queue_head_init(&peer->staged_packet_queue);
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
- netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll);
napi_enable(&peer->napi);
list_add_tail(&peer->peer_list, &wg->peer_list);
INIT_LIST_HEAD(&peer->allowedips_list);
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index ba87d294604f..d4bb40a695ab 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -6,29 +6,28 @@
#ifdef DEBUG
#include <linux/jiffies.h>
-#include <linux/hrtimer.h>
static const struct {
bool result;
- u64 nsec_to_sleep_before;
+ unsigned int msec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
- total_nsecs += expected_results[i].nsec_to_sleep_before;
- return nsecs_to_jiffies(total_nsecs);
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
- if (expected_results[i].nsec_to_sleep_before) {
- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
- }
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
++test;
#endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 4481ed375f55..af6546572df2 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -101,7 +101,7 @@ int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
/* Step 1: Read 4 bytes of the target info and check if it is
- * the special sentinal version word or the first word in the
+ * the special sentinel version word or the first word in the
* version response.
*/
resplen = sizeof(u32);
@@ -111,7 +111,7 @@ int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
return ret;
}
- /* Some SDIO boards have a special sentinal byte before the real
+ /* Some SDIO boards have a special sentinel byte before the real
* version response.
*/
if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index c45c814fd122..59926227bd49 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1323,7 +1323,7 @@ EXPORT_SYMBOL(ath10k_ce_per_engine_service);
/*
* Handler for per-engine interrupts on ALL active CEs.
* This is used in cases where the system is sharing a
- * single interrput for all CEs
+ * single interrupt for all CEs
*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 276954b70d63..400f332a7ff0 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -98,6 +98,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -136,6 +137,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -175,6 +177,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -209,6 +212,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -247,6 +251,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -285,6 +290,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -323,6 +329,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -365,6 +372,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -409,6 +417,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -460,6 +469,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -508,6 +518,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -546,6 +557,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -586,6 +598,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -617,6 +630,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = true,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -662,6 +676,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -693,6 +708,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
.hw_restart_disconnect = true,
+ .use_fw_tx_credits = false,
},
};
@@ -3080,7 +3096,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* enabled always.
*
* We can still enable BTCOEX if firmware has the support
- * eventhough btceox_support value is
+ * even though btceox_support value is
* ATH10K_DT_BTCOEX_NOT_FOUND
*/
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index d70d7d088a2b..f5de8ce8fb45 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -76,7 +76,7 @@
/* The magic used by QCA spec */
#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
-/* Default Airtime weight multipler (Tuned for multiclient performance) */
+/* Default Airtime weight multiplier (Tuned for multiclient performance) */
#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
#define ATH10K_MAX_RETRY_COUNT 30
@@ -857,7 +857,7 @@ enum ath10k_dev_flags {
/* Disable HW crypto engine */
ATH10K_FLAG_HW_CRYPTO_DISABLED,
- /* Bluetooth coexistance enabled */
+ /* Bluetooth coexistence enabled */
ATH10K_FLAG_BTCOEX,
/* Per Station statistics service */
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index fe6b6f97a916..2d1634a890dd 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -531,7 +531,7 @@ static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = {
{0x40000, 0x400A4},
- /* SI register is skiped here.
+ /* SI register is skipped here.
* Because it will cause bus hang
*
* {0x50000, 0x50018},
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 240d70515088..437b9759f05d 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -125,7 +125,7 @@ enum ath10k_mem_region_type {
* To minimize the size of the array, the list must obey the format:
* '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
* also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
- * we may encouter error in the dump processing.
+ * we may encounter error in the dump processing.
*/
struct ath10k_mem_section {
u32 start;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 39378e3f9b2b..c861e66ef6bc 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1081,7 +1081,7 @@ exit:
* struct available..
*/
-/* This generally cooresponds to the debugfs fw_stats file */
+/* This generally corresponds to the debugfs fw_stats file */
static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 367539f2c370..87a3365330ff 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -498,7 +498,7 @@ static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i)
{
switch (i) {
case ATH10K_AMPDU_SUBFRM_NUM_10:
- return "upto 10";
+ return "up to 10";
case ATH10K_AMPDU_SUBFRM_NUM_20:
return "11-20";
case ATH10K_AMPDU_SUBFRM_NUM_30:
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index fab398046a3f..6d1784f74bea 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -947,13 +947,18 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
return -ECOMM;
}
- htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ if (ar->hw_params.use_fw_tx_credits)
+ htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ else
+ htc->total_transmit_credits = 1;
+
htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
ath10k_dbg(ar, ATH10K_DBG_HTC,
- "Target ready! transmit resources: %d size:%d\n",
+ "Target ready! transmit resources: %d size:%d actual credits:%d\n",
htc->total_transmit_credits,
- htc->target_credit_size);
+ htc->target_credit_size,
+ msg->ready.credit_count);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 8a075a711b71..e76aab973320 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -301,12 +301,16 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
ath10k_htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
+
dma_free_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
htt->rx_ring.alloc_idx.vaddr,
htt->rx_ring.alloc_idx.paddr);
+ htt->rx_ring.alloc_idx.vaddr = NULL;
kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
}
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
@@ -846,8 +850,10 @@ err_dma_idx:
ath10k_htt_get_rx_ring_size(htt),
vaddr_ring,
htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
err_netbuf:
return -ENOMEM;
}
@@ -2496,7 +2502,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
- * same limitiation here as well.
+ * same limitation here as well.
*/
if (num_mpdu_ranges > 1)
ath10k_warn(ar,
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a19b0795c86d..bd603feb7953 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1112,7 +1112,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
int len = 0;
int ret;
- /* Response IDs are echo-ed back only for host driver convienence
+ /* Response IDs are echo-ed back only for host driver convenience
* purposes. They aren't used for anything in the driver yet so use 0.
*/
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index e52e41a70321..6d32b43a4da6 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -84,7 +84,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
- /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+ /* Note: qca99x0 supports up to 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
* It is not really necessary to assign address for newly supported
* CEs in this address table.
@@ -120,7 +120,7 @@ const struct ath10k_hw_regs qca4019_regs = {
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
- /* qca4019 supports upto 12 copy engines. Since base address
+ /* qca4019 supports up to 12 copy engines. Since base address
* of ce8 to ce11 are not directly referred in the code,
* no need have them in separate members in this table.
* Copy Engine Address
@@ -924,7 +924,7 @@ static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
ath10k_hif_write32(ar, address, msb);
}
-/* 1. Write to memory region of target, such as IRAM adn DRAM.
+/* 1. Write to memory region of target, such as IRAM and DRAM.
* 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
* can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
* 3. In order to access the region other than the above,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 93acf0dd580a..1b99f3a39a11 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -635,6 +635,8 @@ struct ath10k_hw_params {
bool dynamic_sar_support;
bool hw_restart_disconnect;
+
+ bool use_fw_tx_credits;
};
struct htt_resp;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 9dd3b8fba4b0..ec8d5b29bc72 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -864,11 +864,36 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
+{
+ int peer_id, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+}
+
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
- int peer_id;
- int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -880,25 +905,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
- for_each_set_bit(peer_id, peer->peer_ids,
- ATH10K_MAX_NUM_PEER_IDS) {
- ar->peer_map[peer_id] = NULL;
- }
-
- /* Double check that peer is properly un-referenced from
- * the peer_map
- */
- for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
- if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
- peer->addr, peer, i);
- ar->peer_map[i] = NULL;
- }
- }
-
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
spin_unlock_bh(&ar->data_lock);
}
@@ -4044,7 +4051,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
ath10k_tx_h_seq_no(vif, skb);
break;
case ATH10K_HW_TXRX_ETHERNET:
- /* Convert 802.11->802.3 header only if the frame was erlier
+ /* Convert 802.11->802.3 header only if the frame was earlier
* encapsulated to 802.11 by mac80211. Otherwise pass it as is.
*/
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
@@ -7621,10 +7628,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/* Clean up the peer object as well since we
* must have failed to do this above.
*/
- list_del(&peer->list);
- ar->peer_map[i] = NULL;
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
}
spin_unlock_bh(&ar->data_lock);
@@ -8093,7 +8097,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* TODO: Implement this function properly
* For now it is needed to reply to Probe Requests in IBSS mode.
- * Propably we need this information from FW.
+ * Probably we need this information from FW.
*/
static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
{
@@ -8516,7 +8520,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->deflink.bandwidth,
sta->deflink.rx_nss,
- sta->smps_mode);
+ sta->deflink.smps_mode);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = WMI_PEER_CHWIDTH_20MHZ;
@@ -8550,7 +8554,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -8563,7 +8567,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
break;
case IEEE80211_SMPS_NUM_MODES:
ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
- sta->smps_mode, sta->addr);
+ sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -9682,7 +9686,7 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
},
};
-/* FIXME: This is not thouroughly tested. These combinations may over- or
+/* FIXME: This is not thoroughly tested. These combinations may over- or
* underestimate hw/fw capabilities.
*/
static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
@@ -9922,7 +9926,7 @@ int ath10k_mac_register(struct ath10k *ar)
WLAN_CIPHER_SUITE_BIP_GMAC_128,
WLAN_CIPHER_SUITE_BIP_GMAC_256,
- /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
+ /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256
* and CCMP-256 in hardware.
*/
WLAN_CIPHER_SUITE_GCMP,
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bf1c938be7d0..e56c6a6b1379 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1244,7 +1244,7 @@ static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
unsigned int nbytes, max_nbytes, nentries;
int orig_len;
- /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ /* No need to acquire ce_lock for CE5, since this is the only place CE5
* is processed other than init and deinit. Before releasing CE5
* buffers, interrupts are disabled. Thus CE5 access is serialized.
*/
@@ -3215,8 +3215,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index cf64898b9447..480cd97ab739 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -81,7 +81,7 @@ struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ath10k_ce_pipe *ce_hdl;
- /* Our pipe number; facilitiates use of pipe_info ptrs. */
+ /* Our pipe number; facilitates use of pipe_info ptrs. */
u8 pipe_num;
/* Convenience back pointer to hif_ce_state. */
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index d7e406916bc8..66cb7a1e628a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -792,7 +792,7 @@ static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
return;
/*
- * HACK: sleep for a while inbetween receiving the msa info response
+ * HACK: sleep for a while between receiving the msa info response
* and the XPU update to prevent SDM845 from crashing due to a security
* violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
*/
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 6ce2a8b1060d..777e53aa69dc 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -448,7 +448,7 @@ struct rx_mpdu_end {
* - 4 bytes for WEP
* - 8 bytes for TKIP, AES
* [padding to 4 bytes]
- * c) A-MSDU subframe header (14 bytes) if appliable
+ * c) A-MSDU subframe header (14 bytes) if applicable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b).
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 24283c02a5ef..79e09c7a82b3 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1057,7 +1057,7 @@ static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
out:
/* An optimization to bypass reading the IRQ status registers
- * unecessarily which can re-wake the target, if upper layers
+ * unnecessarily which can re-wake the target, if upper layers
* determine that we are in a low-throughput mode, we can rely on
* taking another interrupt rather than re-checking the status
* registers which can re-wake the target.
@@ -2531,8 +2531,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 5576ad9fd116..cfcb759a87de 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1242,8 +1242,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
static void ath10k_snoc_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
}
static int ath10k_snoc_request_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 36c9a1364253..cefd97323dfe 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -98,7 +98,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
- /* display in millidegree celcius */
+ /* display in millidegree celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 5fdb020f4da3..1f4de9fbf2b3 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -19,7 +19,7 @@ struct ath10k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
u32 quiet_period;
- /* temperature value in Celcius degree
+ /* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index ad6471b21796..b0067af685b1 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1014,8 +1014,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
index 34d683e8fc18..48e066ba8162 100644
--- a/drivers/net/wireless/ath/ath10k/usb.h
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -26,7 +26,7 @@
#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
-/* diagnostic command defnitions */
+/* diagnostic command definitions */
#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2
#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index b39c9b78b32b..dbb48d70f2e9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1813,7 +1813,7 @@ struct wmi_tlv_pdev_get_temp_cmd {
struct wmi_tlv_pdev_temperature_event {
__le32 tlv_hdr;
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
__le32 temperature;
__le32 pdev_id;
} __packed;
@@ -2548,7 +2548,7 @@ struct nlo_channel_prediction_cfg {
/* Preconfigured stationary threshold.
* Lesser value means more conservative. Bigger value means more aggressive.
- * Maximum is 100 and mininum is 0.
+ * Maximum is 100 and minimum is 0.
*/
__le32 stationary_threshold;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 074d8ba5072a..980d4124fa28 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3555,7 +3555,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
__le32 t;
u32 v, tim_len;
- /* When FW reports 0 in tim_len, ensure atleast first byte
+ /* When FW reports 0 in tim_len, ensure at least first byte
* in tim_bitmap is considered for pvm calculation.
*/
tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4abd12e78028..6de3cc4640a0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3170,7 +3170,7 @@ struct wmi_start_scan_common {
/* dwell time in msec on passive channels */
__le32 dwell_time_passive;
/*
- * min time in msec on the BSS channel,only valid if atleast one
+ * min time in msec on the BSS channel,only valid if at least one
* VDEV is active
*/
__le32 min_rest_time;
@@ -3196,7 +3196,7 @@ struct wmi_start_scan_common {
* and bssid_list
*/
__le32 repeat_probe_time;
- /* time in msec between 2 consequetive probe requests with in a set. */
+ /* time in msec between 2 consecutive probe requests with in a set. */
__le32 probe_spacing_time;
/*
* data inactivity time in msec on bss channel that will be used by
@@ -4397,7 +4397,7 @@ struct wmi_pdev_stats_tx {
/* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
@@ -5240,7 +5240,7 @@ enum wmi_vdev_param {
* scheduler.
*/
WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
- /* enable/dsiable WDS for this VDEV */
+ /* enable/disable WDS for this VDEV */
WMI_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_VDEV_PARAM_ATIM_WINDOW,
@@ -5372,7 +5372,7 @@ enum wmi_10x_vdev_param {
* scheduler.
*/
WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
- /* enable/dsiable WDS for this VDEV */
+ /* enable/disable WDS for this VDEV */
WMI_10X_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_10X_VDEV_PARAM_ATIM_WINDOW,
@@ -5904,7 +5904,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
/*
- * Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up.
*/
@@ -6947,7 +6947,7 @@ struct wmi_echo_ev_arg {
};
struct wmi_pdev_temperature_event {
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
__le32 temperature;
} __packed;
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index c47414710138..d34a4d6325b2 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -16,6 +16,8 @@
#include "hif.h"
#include <linux/remoteproc.h>
#include "pcic.h"
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
static const struct of_device_id ath11k_ahb_of_match[] = {
/* TODO: Should we change the compatible string to something similar
@@ -359,6 +361,7 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
+ dev_set_threaded(&irq_grp->napi_ndev, true);
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
@@ -406,7 +409,8 @@ static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
int timeout;
if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
- ab->hw_params.cold_boot_calib == 0)
+ ab->hw_params.cold_boot_calib == 0 ||
+ ab->hw_params.cbcal_restart_fw == 0)
return 0;
ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
@@ -541,7 +545,7 @@ static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ ath11k_ahb_ext_grp_napi_poll);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
@@ -685,11 +689,90 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
return 0;
}
+static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = enable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "ahb device suspended\n");
+
+ return ret;
+}
+
+static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = disable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "ahb device resumed\n");
+
+ return 0;
+}
+
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
.start = ath11k_ahb_start,
.stop = ath11k_ahb_stop,
.read32 = ath11k_ahb_read32,
.write32 = ath11k_ahb_write32,
+ .read = NULL,
.irq_enable = ath11k_ahb_ext_irq_enable,
.irq_disable = ath11k_ahb_ext_irq_disable,
.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
@@ -702,6 +785,7 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
+ .read = NULL,
.irq_enable = ath11k_pcic_ext_irq_enable,
.irq_disable = ath11k_pcic_ext_irq_disable,
.get_msi_address = ath11k_pcic_get_msi_address,
@@ -709,6 +793,10 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.power_down = ath11k_ahb_power_down,
.power_up = ath11k_ahb_power_up,
+ .suspend = ath11k_ahb_hif_suspend,
+ .resume = ath11k_ahb_hif_resume,
+ .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
+ .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
};
static int ath11k_core_get_rproc(struct ath11k_base *ab)
@@ -783,6 +871,34 @@ static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
+ &ab_ahb->smp2p_info.smem_bit);
+ if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
+ ath11k_err(ab, "failed to fetch smem state: %ld\n",
+ PTR_ERR(ab_ahb->smp2p_info.smem_state));
+ return PTR_ERR(ab_ahb->smp2p_info.smem_state);
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return;
+
+ qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
+}
+
static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
@@ -1038,10 +1154,14 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
- ret = ath11k_hal_srng_init(ab);
+ ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
goto err_fw_deinit;
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_release_smp2p_handle;
+
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
@@ -1078,6 +1198,9 @@ err_ce_free:
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
+err_release_smp2p_handle:
+ ath11k_ahb_release_smp2p_handle(ab);
+
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
@@ -1088,20 +1211,10 @@ err_core_free:
return ret;
}
-static int ath11k_ahb_remove(struct platform_device *pdev)
+static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
{
- struct ath11k_base *ab = platform_get_drvdata(pdev);
unsigned long left;
- if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
- ath11k_debugfs_soc_destroy(ab);
- ath11k_qmi_deinit_service(ab);
- goto qmi_fail;
- }
-
- reinit_completion(&ab->driver_recovery);
-
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
left = wait_for_completion_timeout(&ab->driver_recovery,
ATH11K_AHB_RECOVERY_TIMEOUT);
@@ -1111,19 +1224,61 @@ static int ath11k_ahb_remove(struct platform_device *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath11k_ahb_free_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
- ath11k_core_deinit(ab);
-qmi_fail:
ath11k_ahb_free_irq(ab);
ath11k_hal_srng_deinit(ab);
+ ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_ahb_power_down(ab);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ ath11k_ahb_remove_prepare(ab);
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_ahb_free_resources(ab);
return 0;
}
+static void ath11k_ahb_shutdown(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ /* platform shutdown() & remove() are mutually exclusive.
+ * remove() is invoked during rmmod & shutdown() during
+ * system reboot/shutdown.
+ */
+ ath11k_ahb_remove_prepare(ab);
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
+ goto free_resources;
+
+ ath11k_core_deinit(ab);
+
+free_resources:
+ ath11k_ahb_free_resources(ab);
+}
+
static struct platform_driver ath11k_ahb_driver = {
.driver = {
.name = "ath11k",
@@ -1131,6 +1286,7 @@ static struct platform_driver ath11k_ahb_driver = {
},
.probe = ath11k_ahb_probe,
.remove = ath11k_ahb_remove,
+ .shutdown = ath11k_ahb_shutdown,
};
static int ath11k_ahb_init(void)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 58a945411c5b..415ddfd26654 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_AHB_H
#define ATH11K_AHB_H
@@ -8,6 +9,16 @@
#include "core.h"
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH11K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH11K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH11K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+
+enum ath11k_ahb_smp2p_msg_id {
+ ATH11K_AHB_POWER_SAVE_ENTER = 1,
+ ATH11K_AHB_POWER_SAVE_EXIT,
+};
+
struct ath11k_base;
struct ath11k_ahb {
@@ -21,6 +32,11 @@ struct ath11k_ahb {
u32 ce_size;
bool use_tz;
} fw;
+ struct {
+ unsigned short seq_no;
+ unsigned int smem_bit;
+ struct qcom_smem_state *smem_state;
+ } smp2p_info;
};
static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index c14c51f38709..f2da95fd4253 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -250,7 +250,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
static bool ath11k_ce_need_shadow_fix(int ce_id)
{
- /* only ce4 needs shadow workaroud*/
+ /* only ce4 needs shadow workaround */
if (ce_id == 4)
return true;
return false;
@@ -1042,7 +1042,7 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
ret = ath11k_ce_alloc_pipe(ab, i);
if (ret) {
- /* Free any parial successful allocation */
+ /* Free any partial successful allocation */
ath11k_ce_free_pipes(ab);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c3e9e4f7bc24..b99180bc8172 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -70,6 +70,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
+ .fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -81,6 +82,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
+ .cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -106,6 +108,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -141,6 +150,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
+ .fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -152,6 +162,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
+ .cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -177,6 +188,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "qca6390 hw2.0",
@@ -212,6 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -222,6 +241,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -247,6 +267,16 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0171ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "qcn9074 hw1.0",
@@ -281,6 +311,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 16,
.fft_hdr_len = 24,
.max_fft_bins = 1024,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -292,6 +323,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 2,
.num_vdevs = 8,
.num_peers = 128,
@@ -317,6 +349,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6855 hw2.0",
@@ -352,6 +391,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -362,6 +402,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -387,6 +428,16 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6855 hw2.1",
@@ -422,6 +473,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -431,6 +483,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -456,6 +509,16 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6750 hw1.0",
@@ -468,7 +531,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.max_radios = 1,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6750_ops,
- .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .ring_mask = &ath11k_hw_ring_mask_wcn6750,
.internal_sleep_clock = false,
.regs = &wcn6750_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
@@ -491,6 +554,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -499,7 +563,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
- .cold_boot_calib = false,
+ .cold_boot_calib = true,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -508,8 +573,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
- .hal_params = &ath11k_hw_hal_params_qca6390,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_wcn6750,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
@@ -524,7 +589,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.static_window_map = true,
.hybrid_bus_type = true,
.fixed_fw_mem = true,
- .support_off_channel_tx = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = false,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
+ .smp2p_wow_exit = true,
},
};
@@ -535,6 +607,52 @@ static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base
return &ab->pdevs[0];
}
+void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_init(struct ath11k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+
+ init_completion(&ar->fw_stats_complete);
+}
+
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
+{
+ ath11k_fw_stats_pdevs_free(&stats->pdevs);
+ ath11k_fw_stats_vdevs_free(&stats->vdevs);
+ ath11k_fw_stats_bcn_free(&stats->bcn);
+}
+
int ath11k_core_suspend(struct ath11k_base *ab)
{
int ret;
@@ -1544,7 +1662,7 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
@@ -1563,6 +1681,8 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
+
+ reinit_completion(&ab->driver_recovery);
}
static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index afad8f55e433..cf2f52cc4e30 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -498,6 +498,13 @@ struct ath11k_sta {
bool use_4addr_set;
u16 tcl_metadata;
+
+ /* Protected with ar->data_lock */
+ enum ath11k_wmi_peer_ps_state peer_ps_state;
+ u64 ps_start_time;
+ u64 ps_start_jiffies;
+ u64 ps_total_duration;
+ bool peer_current_ps_valid;
};
#define ATH11K_MIN_5G_FREQ 4150
@@ -545,9 +552,6 @@ struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
u32 extd_tx_stats;
- struct ath11k_fw_stats fw_stats;
- struct completion fw_stats_complete;
- bool fw_stats_done;
u32 extd_rx_stats;
u32 pktlog_filter;
u32 pktlog_mode;
@@ -710,6 +714,13 @@ struct ath11k {
u8 twt_enabled;
bool nlo_enabled;
u8 alpha2[REG_ALPHA2_LEN + 1];
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ /* protected by conf_mutex */
+ bool ps_state_enable;
+ bool ps_timekeeper_enable;
};
struct ath11k_band_cap {
@@ -887,7 +898,7 @@ struct ath11k_base {
/* Below regd's are protected by ab->data_lock */
/* This is the regd set for every radio
- * by the firmware during initializatin
+ * by the firmware during initialization
*/
struct ieee80211_regdomain *default_regd[MAX_RADIOS];
/* This regd is set during dynamic country setting
@@ -1112,6 +1123,12 @@ struct ath11k_fw_stats_bcn {
u32 tx_bcn_outage_cnt;
};
+void ath11k_fw_stats_init(struct ath11k *ar);
+void ath11k_fw_stats_pdevs_free(struct list_head *head);
+void ath11k_fw_stats_vdevs_free(struct list_head *head);
+void ath11k_fw_stats_bcn_free(struct list_head *head);
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats);
+
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 9648e0017393..ccdf3d5ba1ab 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -14,6 +14,7 @@
#include "dp_tx.h"
#include "debugfs_htt_stats.h"
#include "peer.h"
+#include "hif.h"
static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
"REO2SW1_RING",
@@ -91,91 +92,35 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
spin_unlock_bh(&dbr_data->lock);
}
-static void ath11k_fw_stats_pdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_pdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_vdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_vdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_bcn_free(struct list_head *head)
-{
- struct ath11k_fw_stats_bcn *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
{
spin_lock_bh(&ar->data_lock);
- ar->debug.fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
spin_unlock_bh(&ar->data_lock);
}
-void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
- struct ath11k_fw_stats stats = {};
- struct ath11k *ar;
+ struct ath11k_base *ab = ar->ab;
struct ath11k_pdev *pdev;
bool is_end;
static unsigned int num_vdev, num_bcn;
size_t total_vdevs_started = 0;
- int i, ret;
-
- INIT_LIST_HEAD(&stats.pdevs);
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
-
- ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
- if (ret) {
- ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
- goto free;
- }
-
- rcu_read_lock();
- ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
- if (!ar) {
- rcu_read_unlock();
- ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
- stats.pdev_id, ret);
- goto free;
- }
+ int i;
- spin_lock_bh(&ar->data_lock);
+ /* WMI_REQUEST_PDEV_STAT request has been already processed */
- if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
- ar->debug.fw_stats_done = true;
- goto complete;
- }
-
- if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->debug.fw_stats_done = true;
- goto complete;
+ if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ ar->fw_stats_done = true;
+ return;
}
- if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats.vdevs)) {
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
ath11k_warn(ab, "empty vdev stats");
- goto complete;
+ return;
}
/* FW sends all the active VDEV stats irrespective of PDEV,
* hence limit until the count of all VDEVs started
@@ -188,43 +133,34 @@ void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb
is_end = ((++num_vdev) == total_vdevs_started);
- list_splice_tail_init(&stats.vdevs,
- &ar->debug.fw_stats.vdevs);
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
if (is_end) {
- ar->debug.fw_stats_done = true;
+ ar->fw_stats_done = true;
num_vdev = 0;
}
- goto complete;
+ return;
}
- if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats.bcn)) {
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
- goto complete;
+ return;
}
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
is_end = ((++num_bcn) == ar->num_started_vdevs);
- list_splice_tail_init(&stats.bcn,
- &ar->debug.fw_stats.bcn);
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
if (is_end) {
- ar->debug.fw_stats_done = true;
+ ar->fw_stats_done = true;
num_bcn = 0;
}
}
-complete:
- complete(&ar->debug.fw_stats_complete);
- rcu_read_unlock();
- spin_unlock_bh(&ar->data_lock);
-
-free:
- ath11k_fw_stats_pdevs_free(&stats.pdevs);
- ath11k_fw_stats_vdevs_free(&stats.vdevs);
- ath11k_fw_stats_bcn_free(&stats.bcn);
}
static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
@@ -245,7 +181,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
ath11k_debugfs_fw_stats_reset(ar);
- reinit_completion(&ar->debug.fw_stats_complete);
+ reinit_completion(&ar->fw_stats_complete);
ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
@@ -255,9 +191,8 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
return ret;
}
- time_left =
- wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1 * HZ);
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
if (!time_left)
return -ETIMEDOUT;
@@ -266,7 +201,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
break;
spin_lock_bh(&ar->data_lock);
- if (ar->debug.fw_stats_done) {
+ if (ar->fw_stats_done) {
spin_unlock_bh(&ar->data_lock);
break;
}
@@ -338,8 +273,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
goto err_free;
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
@@ -410,8 +344,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
goto err_free;
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
@@ -488,14 +421,13 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
}
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
/* since beacon stats request is looped for all active VDEVs, saved fw
* stats is not freed for each request until done for all active VDEVs
*/
spin_lock_bh(&ar->data_lock);
- ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn);
spin_unlock_bh(&ar->data_lock);
file->private_data = buf;
@@ -982,6 +914,63 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
+static int ath11k_open_sram_dump(struct inode *inode, struct file *file)
+{
+ struct ath11k_base *ab = inode->i_private;
+ u8 *buf;
+ u32 start, end;
+ int ret;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+
+ buf = vmalloc(end - start + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath11k_hif_read(ab, buf, start, end);
+ if (ret) {
+ ath11k_warn(ab, "failed to dump sram: %d\n", ret);
+ vfree(buf);
+ return ret;
+ }
+
+ file->private_data = buf;
+ return 0;
+}
+
+static ssize_t ath11k_read_sram_dump(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->f_inode->i_private;
+ const char *buf = file->private_data;
+ int len;
+ u32 start, end;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+ len = end - start + 1;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath11k_release_sram_dump(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops_sram_dump = {
+ .open = ath11k_open_sram_dump,
+ .read = ath11k_read_sram_dump,
+ .release = ath11k_release_sram_dump,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@@ -997,6 +986,10 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
+ if (ab->hw_params.sram_dump.start != 0)
+ debugfs_create_file("sram", 0400, ab->debugfs_soc, ab,
+ &fops_sram_dump);
+
return 0;
}
@@ -1025,7 +1018,7 @@ void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
ar->debug.debugfs_pdev);
- ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+ ar->fw_stats.debugfs_fwstats = fwstats_dir;
/* all stats debugfs files created are under "fw_stats" directory
* created per PDEV
@@ -1036,12 +1029,6 @@ void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
&fops_vdev_stats);
debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
&fops_bcn_stats);
-
- INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
-
- init_completion(&ar->debug.fw_stats_complete);
}
static ssize_t ath11k_write_pktlog_filter(struct file *file,
@@ -1382,6 +1369,193 @@ static const struct file_operations fops_dbr_debug = {
.llseek = default_llseek,
};
+static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ ssize_t ret;
+ u8 ps_timekeeper_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ar->ps_timekeeper_enable = !!ps_timekeeper_enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_timekeeper_enable = {
+ .read = ath11k_read_ps_timekeeper_enable,
+ .write = ath11k_write_ps_timekeeper_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_reset_peer_ps_duration(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_reset_ps_duration(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+ u8 reset_ps_duration;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_reset_peer_ps_duration,
+ ar);
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_ps_duration = {
+ .write = ath11k_write_reset_ps_duration,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ arsta->ps_start_time = 0;
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ps_state_enable = !!ps_state_enable;
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE;
+ ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable) {
+ ar->ps_timekeeper_enable = false;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_peer_ps_state_disable,
+ ar);
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath11k_read_ps_state_enable,
+ .write = ath11k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -1428,6 +1602,20 @@ int ath11k_debugfs_register(struct ath11k *ar)
debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
ar, &fops_dbr_debug);
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar,
+ &fops_ps_state_enable);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("ps_timekeeper_enable", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_ps_timekeeper_enable);
+
+ debugfs_create_file("reset_ps_duration", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_reset_ps_duration);
+ }
+
return 0;
}
@@ -1456,11 +1644,13 @@ static ssize_t ath11k_write_twt_add_dialog(struct file *file,
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_add_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
u8 buf[128] = {0};
int ret;
- if (arvif->ar->twt_enabled == 0) {
- ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
@@ -1490,13 +1680,38 @@ static ssize_t ath11k_write_twt_add_dialog(struct file *file,
if (ret != 16)
return -EINVAL;
+ /* In the case of station vif, TWT is entirely handled by
+ * the firmware based on the input parameters in the TWT enable
+ * WMI command that is sent to the target during assoc.
+ * For manually testing the TWT feature, we need to first disable
+ * TWT and send enable command again with TWT input parameter
+ * sta_cong_timer_ms set to 0.
+ */
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ twt_params.sta_cong_timer_ms = 0;
+
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
if (ret)
- return ret;
+ goto err_twt_add_dialog;
return count;
+
+err_twt_add_dialog:
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return ret;
}
static ssize_t ath11k_write_twt_del_dialog(struct file *file,
@@ -1505,11 +1720,13 @@ static ssize_t ath11k_write_twt_del_dialog(struct file *file,
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_del_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
u8 buf[64] = {0};
int ret;
- if (arvif->ar->twt_enabled == 0) {
- ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
@@ -1535,6 +1752,12 @@ static ssize_t ath11k_write_twt_del_dialog(struct file *file,
if (ret)
return ret;
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
return count;
}
@@ -1638,36 +1861,35 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
.open = simple_open
};
-int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
- if (arvif->vif->type == NL80211_IFTYPE_AP && !arvif->debugfs_twt) {
- arvif->debugfs_twt = debugfs_create_dir("twt",
- arvif->vif->debugfs_dir);
- if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
- ath11k_warn(arvif->ar->ab,
- "failed to create directory %p\n",
- arvif->debugfs_twt);
- arvif->debugfs_twt = NULL;
- return -1;
- }
+ struct ath11k_base *ab = arvif->ar->ab;
- debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_add_dialog);
+ if (arvif->vif->type != NL80211_IFTYPE_AP &&
+ !(arvif->vif->type == NL80211_IFTYPE_STATION &&
+ test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
+ return;
- debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_del_dialog);
+ arvif->debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
- debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_pause_dialog);
+ debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
- debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_resume_dialog);
- }
- return 0;
+ debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
}
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
+ if (!arvif->debugfs_twt)
+ return;
+
debugfs_remove_recursive(arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 30c00cb28311..3af0169f6cf2 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -269,7 +269,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
int ath11k_debugfs_register(struct ath11k *ar);
void ath11k_debugfs_unregister(struct ath11k *ar);
-void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
@@ -306,7 +306,7 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
-int ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
@@ -341,8 +341,8 @@ static inline void ath11k_debugfs_unregister(struct ath11k *ar)
{
}
-static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k *ar,
+ struct ath11k_fw_stats *stats)
{
}
@@ -386,9 +386,8 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-static inline int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
- return 0;
}
static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 5d722b51b125..2b97cbbd28cb 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -630,7 +630,7 @@ struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
* completing the burst, we identify the txop used in the burst and
* incr the corresponding bin.
* Each bin represents 1ms & we have 10 bins in this histogram.
- * they are deined in FW using the following macros
+ * they are defined in FW using the following macros
* #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
* #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
*/
@@ -1897,7 +1897,7 @@ struct htt_phy_counters_tlv {
u32 phytx_abort_cnt;
/* number of times rx abort initiated by phy */
u32 phyrx_abort_cnt;
- /* number of rx defered count initiated by phy */
+ /* number of rx deferred count initiated by phy */
u32 phyrx_defer_abort_cnt;
/* number of sizing events generated at LSTF */
u32 rx_gain_adj_lstf_event_cnt;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 1b1acbdf837a..9cc4ef28e751 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -751,6 +751,102 @@ static const struct file_operations fops_htt_peer_stats_reset = {
.llseek = default_llseek,
};
+static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u64 time_since_station_in_power_save;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ time_since_station_in_power_save = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies);
+ else
+ time_since_station_in_power_save = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n",
+ time_since_station_in_power_save);
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_current_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_current_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ u64 power_save_duration;
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ power_save_duration = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies)
+ + arsta->ps_total_duration;
+ else
+ power_save_duration = arsta->ps_total_duration;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_total_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_total_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -778,4 +874,15 @@ void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vi
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
+
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("current_ps_duration", 0440, dir, sta,
+ &fops_current_ps_duration);
+ debugfs_create_file("total_ps_duration", 0440, dir, sta,
+ &fops_total_ps_duration);
+ }
}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8b790ce72e5d..f5156a7fbdd7 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -131,13 +132,11 @@ static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
switch (type) {
case HAL_WBM2SW_RELEASE:
- if (ring_num < 3) {
- grp_mask = &ab->hw_params.ring_mask->tx[0];
- } else if (ring_num == 3) {
+ if (ring_num == DP_RX_RELEASE_RING_NUM) {
grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
- return -ENOENT;
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
}
break;
case HAL_REO_EXCEPTION:
@@ -371,6 +370,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
+ u8 tcl_num, wbm_num;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -396,9 +396,12 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
+ wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
+
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
- HAL_TCL_DATA, i, 0,
- DP_TCL_DATA_RING_SIZE);
+ HAL_TCL_DATA, tcl_num, 0,
+ ab->hw_params.tx_ring_size);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
@@ -406,7 +409,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
- HAL_WBM2SW_RELEASE, i, 0,
+ HAL_WBM2SW_RELEASE, wbm_num, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
@@ -431,7 +434,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
- 3, 0, DP_RX_RELEASE_RING_SIZE);
+ DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
@@ -774,9 +777,10 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int i, j;
int tot_work_done = 0;
- if (ab->hw_params.ring_mask->tx[grp_id]) {
- i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
- ath11k_dp_tx_completion_handler(ab, i);
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
+ ab->hw_params.ring_mask->tx[grp_id])
+ ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {
@@ -963,7 +967,7 @@ static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
/* When v2_map_support is true:for STA mode, enable address
* search index, tcl uses ast_hash value in the descriptor.
- * When v2_map_support is false: for STA mode, dont' enable
+ * When v2_map_support is false: for STA mode, don't enable
* address search index.
*/
switch (arvif->vdev_type) {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index e9dfa209098b..be9eafc872b3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -203,6 +204,7 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TCL_DATA_RING_SIZE_WCN6750 2048
#define DP_TX_COMP_RING_SIZE 32768
#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
@@ -222,6 +224,8 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+#define DP_RX_RELEASE_RING_NUM 3
+
#define DP_RX_BUFFER_SIZE 2048
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
@@ -299,7 +303,7 @@ struct ath11k_dp {
#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
-/* HTT tx completion is overlayed in wbm_release_ring */
+/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
@@ -466,7 +470,7 @@ enum htt_srng_ring_id {
* 3'b010: 4 usec
* 3'b011: 8 usec (default)
* 3'b100: 16 usec
- * Others: Reserverd
+ * Others: Reserved
* b'19 - response_required:
* Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
* b'20:31 - reserved: reserved for future use
@@ -993,8 +997,7 @@ struct htt_rx_ring_tlv_filter {
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
-/**
- * Enumeration for full monitor mode destination ring select
+/* Enumeration for full monitor mode destination ring select
* 0 - REO destination ring select
* 1 - FW destination ring select
* 2 - SW destination ring select
@@ -1391,8 +1394,7 @@ struct htt_ppdu_stats_info {
struct list_head list;
};
-/**
- * @brief target -> host packet log message
+/* @brief target -> host packet log message
*
* @details
* The following field definitions describe the format of the packet log
@@ -1430,8 +1432,7 @@ struct htt_pktlog_msg {
u8 payload[];
};
-/**
- * @brief host -> target FW extended statistics retrieve
+/* @brief host -> target FW extended statistics retrieve
*
* @details
* The following field definitions describe the format of the HTT host
@@ -1566,8 +1567,7 @@ struct htt_ext_stats_cfg_params {
u32 cfg3;
};
-/**
- * @brief target -> host extended statistics upload
+/* @brief target -> host extended statistics upload
*
* @details
* The following field definitions describe the format of the HTT target
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 2148acf37071..c5a4c34d7749 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2499,7 +2499,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
- * Also, fast_rx expectes the STA to be authorized, hence
+ * Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
@@ -5197,7 +5197,8 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
- memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index c17a2620aad7..8afbba236935 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -93,7 +94,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
- u8 ring_selector = 0, ring_map = 0;
+ u32 ring_selector = 0;
+ u8 ring_map = 0;
bool tcl_ring_retry;
if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
@@ -105,19 +107,13 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
- /* Let the default ring selection be based on current processor
- * number, where one of the 3 tcl rings are selected based on
- * the smp_processor_id(). In case that ring
- * is full/busy, we resort to other available rings.
- * If all rings are full, we drop the packet.
- * //TODO Add throttling logic when all rings are full
- */
- ring_selector = smp_processor_id();
+ ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
+ ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= BIT(ti.ring_id);
@@ -129,7 +125,8 @@ tcl_ring_sel:
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
- if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
+ if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
+ !ab->hw_params.tcl_ring_retry) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
@@ -247,7 +244,7 @@ tcl_ring_sel:
* Restart ring selection if some rings are not checked yet.
*/
if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
- ab->hw_params.max_tx_ring > 1) {
+ ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
}
@@ -755,7 +752,7 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
return 0;
/* Can this be optimized so that we keep the pending command list only
- * for tid delete command to free up the resoruce on the command status
+ * for tid delete command to free up the resource on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index bda71ab5a1f2..2fd224480d45 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -126,7 +126,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
},
{ /* WBM2SW_RELEASE */
.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
- .max_rings = 4,
+ .max_rings = 5,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
@@ -1164,7 +1164,7 @@ void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
{
lockdep_assert_held(&srng->lock);
- /* check whether the ring is emptry. Update the shadow
+ /* check whether the ring is empty. Update the shadow
* HP only when then ring isn't empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 110c337ddf33..6a1f78ee6eb6 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -243,7 +243,7 @@ struct ath11k_base;
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
-/* TCL ring feild mask and offset */
+/* TCL ring field mask and offset */
#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
@@ -268,7 +268,7 @@ struct ath11k_base;
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
-/* REO ring feild mask and offset */
+/* REO ring field mask and offset */
#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
@@ -389,6 +389,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
HAL_SRNG_RING_ID_UMAC_ID_END = 127,
HAL_SRNG_RING_ID_LMAC1_ID_START,
@@ -450,13 +451,13 @@ enum hal_ring_type {
/**
* enum hal_reo_cmd_type: Enum for REO command type
- * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
- * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
- * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
- * @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
* earlier with a 'REO_FLUSH_CACHE' command
- * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
- * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
*/
enum hal_reo_cmd_type {
HAL_REO_CMD_GET_QUEUE_STATS = 0,
@@ -635,7 +636,7 @@ struct hal_srng {
} u;
};
-/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+/* Interrupt mitigation - Batch threshold in terms of number of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
@@ -678,6 +679,7 @@ enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_SW1_BM,
HAL_RX_BUF_RBM_SW2_BM,
HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
};
#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
@@ -873,8 +875,7 @@ struct hal_reo_status {
} u;
};
-/**
- * HAL context to be used to access SRNG APIs (currently used by data path
+/* HAL context to be used to access SRNG APIs (currently used by data path
* and transport (CE) modules)
*/
struct ath11k_hal {
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 24e72e75a8c7..d895ea878d9f 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -607,7 +607,7 @@ struct rx_msdu_desc {
*
* msdu_continuation
* When set, this MSDU buffer was not able to hold the entire MSDU.
- * The next buffer will therefor contain additional information
+ * The next buffer will therefore contain additional information
* related to this MSDU.
*
* msdu_length
@@ -643,7 +643,7 @@ struct rx_msdu_desc {
*
* da_idx_timeout
* Indicates, an unsuccessful MAC destination address search due
- * to the expiration of search timer fot this MSDU.
+ * to the expiration of search timer for this MSDU.
*/
enum hal_reo_dest_ring_buffer_type {
@@ -1678,7 +1678,7 @@ struct hal_wbm_release_ring {
* Producer: SW/TQM/RXDMA/REO/SWITCH
* Consumer: WBM/SW/FW
*
- * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
* for software based completions.
*
* buf_addr_info
@@ -2159,7 +2159,7 @@ struct hal_reo_status_hdr {
* commands.
*
* execution_time (in us)
- * The amount of time REO took to excecute the command. Note that
+ * The amount of time REO took to execute the command. Note that
* this time does not include the duration of the command waiting
* in the command ring, before the execution started.
*
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index c8929de8ce6c..d1b0e36e04a9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hal_desc.h"
@@ -44,8 +45,7 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |=
- FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
- (ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index 36f4f6f6cbc2..c5e88364afe5 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@@ -35,6 +36,7 @@ struct hal_tx_info {
u8 lmac_id;
u8 dscp_tid_tbl_idx;
bool enable_mesh;
+ u8 rbm_id;
};
/* TODO: Check if the actual desc macros can be used instead */
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index e9366f786fbb..659b80d2abd4 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -11,6 +11,7 @@
struct ath11k_hif_ops {
u32 (*read32)(struct ath11k_base *sc, u32 address);
void (*write32)(struct ath11k_base *sc, u32 address, u32 data);
+ int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end);
void (*irq_enable)(struct ath11k_base *sc);
void (*irq_disable)(struct ath11k_base *sc);
int (*start)(struct ath11k_base *sc);
@@ -99,6 +100,15 @@ static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 d
sc->hif.ops->write32(sc, address, data);
}
+static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf,
+ u32 start, u32 end)
+{
+ if (!ab->hif.ops->read)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->read(ab, buf, start, end);
+}
+
static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
@@ -134,4 +144,5 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
else
*msi_data_idx = ce_id;
}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 96db85c55585..dbcc0c4035b6 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -820,6 +820,30 @@ static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
+static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ *
+ * TODO: Add throttling logic when all rings are full
+ */
+ return smp_processor_id();
+}
+
+static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Select the TCL ring based on the flow hash of the SKB instead
+ * of CPU ID. Since applications pumping the traffic can be scheduled
+ * on multiple CPUs, there is a chance that packets of the same flow
+ * could end on different TCL rings, this could sometimes results in
+ * an out of order arrival of the packets at the receiver.
+ */
+ return skb_get_hash(skb);
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -857,6 +881,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -896,6 +921,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -935,6 +961,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -974,6 +1001,7 @@ const struct ath11k_hw_ops qcn9074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6855_ops = {
@@ -1013,6 +1041,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6750_ops = {
@@ -1052,11 +1081,14 @@ const struct ath11k_hw_ops wcn6750_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
};
-#define ATH11K_TX_RING_MASK_0 0x1
-#define ATH11K_TX_RING_MASK_1 0x2
-#define ATH11K_TX_RING_MASK_2 0x4
+#define ATH11K_TX_RING_MASK_0 BIT(0)
+#define ATH11K_TX_RING_MASK_1 BIT(1)
+#define ATH11K_TX_RING_MASK_2 BIT(2)
+#define ATH11K_TX_RING_MASK_3 BIT(3)
+#define ATH11K_TX_RING_MASK_4 BIT(4)
#define ATH11K_RX_RING_MASK_0 0x1
#define ATH11K_RX_RING_MASK_1 0x2
@@ -1903,6 +1935,43 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
},
};
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ 0,
+ ATH11K_TX_RING_MASK_2,
+ 0,
+ ATH11K_TX_RING_MASK_4,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@@ -2332,12 +2401,55 @@ const struct ath11k_hw_regs wcn6750_regs = {
.hal_reo1_misc_ctl = 0x000005d8,
};
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
};
static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index bb5ac940e470..8a3f24862edc 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -122,8 +122,15 @@ struct ath11k_hw_ring_mask {
u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
};
+struct ath11k_hw_tcl2wbm_rbm_map {
+ u8 tcl_ring_num;
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
struct ath11k_hw_hal_params {
enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
};
struct ath11k_hw_params {
@@ -166,6 +173,7 @@ struct ath11k_hw_params {
u8 summary_pad_sz;
u8 fft_hdr_len;
u16 max_fft_bins;
+ bool fragment_160mhz;
} spectral;
u16 interface_modes;
@@ -175,6 +183,7 @@ struct ath11k_hw_params {
bool idle_ps;
bool supports_sta_ps;
bool cold_boot_calib;
+ bool cbcal_restart_fw;
int fw_mem_mode;
u32 num_vdevs;
u32 num_peers;
@@ -200,6 +209,16 @@ struct ath11k_hw_params {
bool hybrid_bus_type;
bool fixed_fw_mem;
bool support_off_channel_tx;
+ bool supports_multi_bssid;
+
+ struct {
+ u32 start;
+ u32 end;
+ } sram_dump;
+
+ bool tcl_ring_retry;
+ u32 tx_ring_size;
+ bool smp2p_wow_exit;
};
struct ath11k_hw_ops {
@@ -242,6 +261,7 @@ struct ath11k_hw_ops {
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ u32 (*get_ring_selector)(struct sk_buff *skb);
};
extern const struct ath11k_hw_ops ipq8074_ops;
@@ -254,9 +274,11 @@ extern const struct ath11k_hw_ops wcn6750_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
@@ -397,4 +419,5 @@ static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
}
extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 7e91e347c9ff..84d956ad4093 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3059,7 +3059,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return ret;
}
- /* Enable all patial BSSID mask for SRG */
+ /* Enable all partial BSSID mask for SRG */
ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
@@ -3077,7 +3077,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return ret;
}
- /* Enable all patial BSSID mask for non-SRG */
+ /* Enable all partial BSSID mask for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
@@ -3350,10 +3350,15 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
- if (info->twt_requester || info->twt_responder)
- ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
- else
+ struct wmi_twt_enable_params twt_params = {0};
+
+ if (info->twt_requester || info->twt_responder) {
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id,
+ &twt_params);
+ } else {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
@@ -3451,7 +3456,7 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
break;
}
}
@@ -4524,6 +4529,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
@@ -4701,7 +4707,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->deflink.bandwidth,
sta->deflink.rx_nss,
- sta->smps_mode);
+ sta->deflink.smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -4737,7 +4743,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -4750,7 +4756,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->smps_mode, sta->addr);
+ sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4954,6 +4960,8 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -4994,7 +5002,7 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
{
bool subfer, subfee;
- int sound_dim = 0;
+ int sound_dim = 0, nsts = 0;
subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
@@ -5004,6 +5012,11 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
subfer = false;
}
+ if (ar->num_rx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ subfee = false;
+ }
+
/* If SU Beaformer is not set, then disable MU Beamformer Capability */
if (!subfer)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
@@ -5016,7 +5029,9 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
*vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
- /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+ nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
/* Enable Sounding Dimension Field only if SU BF is enabled */
if (subfer) {
@@ -5028,9 +5043,15 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
*vht_cap |= sound_dim;
}
- /* Use the STS advertised by FW unless SU Beamformee is not supported*/
- if (!subfee)
- *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ /* Enable Beamformee STS Field only if SU BF is enabled */
+ if (subfee) {
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
+
+ nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ *vht_cap |= nsts;
+ }
}
static struct ieee80211_sta_vht_cap
@@ -6173,6 +6194,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
+ /* In the case of hardware recovery, debugfs files are
+ * not deleted since ieee80211_ops.remove_interface() is
+ * not invoked. In such cases, try to delete the files.
+ * These will be re-created later.
+ */
+ ath11k_debugfs_remove_interface(arvif);
+
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@@ -6354,9 +6382,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
}
- ret = ath11k_debugfs_add_interface(arvif);
- if (ret)
- goto err_peer_del;
+ ath11k_debugfs_add_interface(arvif);
mutex_unlock(&ar->conf_mutex);
@@ -8421,6 +8447,95 @@ exit:
return ret;
}
+static int ath11k_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ spin_unlock_bh(&ar->data_lock);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
+ 1 * HZ);
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param = {0};
+ struct ath11k_fw_stats_pdev *pdev;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power is set as 2 units per dBm in FW. */
+ *dbm = pdev->chan_tx_power / 2;
+
+ spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
+ pdev->chan_tx_power, *dbm);
+ return 0;
+
+err_fallback:
+ mutex_unlock(&ar->conf_mutex);
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.start = ath11k_mac_op_start,
@@ -8471,6 +8586,7 @@ static const struct ieee80211_ops ath11k_ops = {
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = ath11k_mac_op_ipv6_changed,
#endif
+ .get_txpower = ath11k_mac_op_get_txpower,
.set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
.remain_on_channel = ath11k_mac_op_remain_on_channel,
@@ -8777,6 +8893,11 @@ static int __ath11k_mac_register(struct ath11k *ar)
if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+ if (ab->hw_params.supports_multi_bssid) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
@@ -8967,6 +9088,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i;
int ret;
+ u8 mac_addr[ETH_ALEN] = {0};
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
@@ -8979,13 +9101,18 @@ int ath11k_mac_register(struct ath11k_base *ab)
if (ret)
return ret;
+ device_get_mac_address(ab->dev, mac_addr);
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ab->pdevs_macaddr_valid) {
ether_addr_copy(ar->mac_addr, pdev->mac_addr);
} else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ if (is_zero_ether_addr(mac_addr))
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, mac_addr);
ar->mac_addr[4] += i;
}
@@ -9079,6 +9206,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
init_completion(&ar->completed_11d_scan);
+
+ ath11k_fw_stats_init(ar);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index c44df17719f6..86995e8dc913 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -402,8 +402,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
ret = ath11k_mhi_get_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to get msi for mhi\n");
- mhi_free_controller(mhi_ctrl);
- return ret;
+ goto free_controller;
}
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
@@ -412,7 +411,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
if (ret < 0)
- return ret;
+ goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xFFFFFFFF;
@@ -440,18 +439,22 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
default:
ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
ab->hw_rev);
- mhi_free_controller(mhi_ctrl);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_controller;
}
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
- mhi_free_controller(mhi_ctrl);
- return ret;
+ goto free_controller;
}
return 0;
+
+free_controller:
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+ return ret;
}
void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 5bd34a6273d9..99cf3357c66e 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -685,6 +685,7 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
+ .read = ath11k_pcic_read,
.power_down = ath11k_pci_power_down,
.power_up = ath11k_pci_power_up,
.suspend = ath11k_pci_hif_suspend,
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 1adf20ebef27..380f9d37b644 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -140,55 +140,100 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
+static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ if (offset < ATH11K_PCI_WINDOW_START)
+ iowrite32(value, ab->mem + offset);
+ else
+ ab->pci.ops->window_write32(ab, offset, value);
+}
+
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
int ret = 0;
+ bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START)
- iowrite32(value, ab->mem + offset);
- else
- ab->pci.ops->window_write32(ab, offset, value);
+ __ath11k_pcic_write32(ab, offset, value);
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
- !ret)
+ if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
}
EXPORT_SYMBOL(ath11k_pcic_write32);
+static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ u32 val;
+
+ if (offset < ATH11K_PCI_WINDOW_START)
+ val = ioread32(ab->mem + offset);
+ else
+ val = ab->pci.ops->window_read32(ab, offset);
+
+ return val;
+}
+
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
int ret = 0;
u32 val;
+ bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START)
- val = ioread32(ab->mem + offset);
- else
- val = ab->pci.ops->window_read32(ab, offset);
+ val = __ath11k_pcic_read32(ab, offset);
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
- !ret)
+ if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
return val;
}
EXPORT_SYMBOL(ath11k_pcic_read32);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
+{
+ int ret = 0;
+ bool wakeup_required;
+ u32 *data = buf;
+ u32 i;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup) {
+ ret = ab->pci.ops->wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to wakeup for read from 0x%x: %d\n",
+ start, ret);
+ return ret;
+ }
+ }
+
+ for (i = start; i < end + 1; i += 4)
+ *data++ = __ath11k_pcic_read32(ab, i);
+
+ if (wakeup_required && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_read);
+
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
@@ -414,6 +459,7 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
+ dev_set_threaded(&irq_grp->napi_ndev, true);
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
@@ -517,7 +563,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ ath11k_pcic_ext_grp_napi_poll);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
@@ -731,3 +777,37 @@ int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
+
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
+
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+ struct ath11k_ce_pipe *ce_pipe;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ synchronize_irq(ab->irq_num[irq_idx]);
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
index 0afbb34510db..ac012e88bf6d 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.h
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -12,6 +12,8 @@
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
+#define ATH11K_PCI_CE_WAKE_IRQ 2
+
#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
@@ -45,4 +47,8 @@ void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 9e22aaf34b88..1ae7af02c364 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -302,6 +302,21 @@ static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, addr);
+ /* Check if the found peer is what we want to remove.
+ * While the sta is transitioning to another band we may
+ * have 2 peer with the same addr assigned to different
+ * vdev_id. Make sure we are deleting the correct peer.
+ */
+ if (peer && peer->vdev_id == vdev_id)
+ ath11k_peer_rhash_delete(ab, peer);
+
+ /* Fallback to peer list search if the correct peer can't be found.
+ * Skip the deletion of the peer from the rhash since it has already
+ * been deleted in peer add.
+ */
+ if (!peer)
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+
if (!peer) {
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
@@ -312,8 +327,6 @@ static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
return -EINVAL;
}
- ath11k_peer_rhash_delete(ab, peer);
-
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
@@ -372,8 +385,17 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
- spin_unlock_bh(&ar->ab->base_lock);
- return -EINVAL;
+ if (peer->vdev_id == param->vdev_id) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ return -EINVAL;
+ }
+
+ /* Assume sta is transitioning to another band.
+ * Remove here the peer from rhash.
+ */
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
}
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 00136601cb7d..51de2208b789 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1696,6 +1696,13 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
+static struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
@@ -1872,7 +1879,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
/* For QCA6390 by default FW requests a block of ~4M contiguous
* DMA memory, it's hard to allocate from OS. So host returns
- * failure to FW and FW will then request mulitple blocks of small
+ * failure to FW and FW will then request multiple blocks of small
* chunk size memory.
*/
if (!(ab->hw_params.fixed_mem_region ||
@@ -3006,6 +3013,12 @@ static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
struct ath11k_base *ab = qmi->ab;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware ready\n");
+
+ if (!ab->qmi.cal_done) {
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ }
+
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
}
@@ -3023,6 +3036,19 @@ static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cold boot calibration done\n");
}
+static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware init done\n");
+}
+
static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
{
.type = QMI_INDICATION,
@@ -3053,6 +3079,14 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
.fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
},
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
+ .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_init_done_cb,
+ },
};
static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -3145,7 +3179,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
}
break;
- case ATH11K_QMI_EVENT_FW_READY:
+ case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
ath11k_hal_dump_srng_stats(ab);
@@ -3169,6 +3203,22 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
}
break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ /* For targets requiring a FW restart upon cold
+ * boot completion, there is no need to process
+ * FW ready; such targets will receive FW init
+ * done message after FW restart.
+ */
+ if (ab->hw_params.cbcal_restart_fw)
+ break;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ath11k_core_qmi_firmware_ready(ab);
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index c83cf822be81..2ec56a34fa81 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -31,8 +31,9 @@
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
-#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
-#define QMI_WLFW_FW_READY_IND_V01 0x0038
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
@@ -69,6 +70,7 @@ enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
ATH11K_QMI_EVENT_POWER_UP,
ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_FW_INIT_DONE,
ATH11K_QMI_EVENT_MAX,
};
@@ -291,6 +293,10 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
char placeholder;
};
+struct qmi_wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 26ecc1bcd9d5..786d5f36f5e5 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -877,7 +877,7 @@ struct rx_msdu_start_wcn6855 {
*
* l4_offset
* Depending upon mode bit, this field either indicates the
- * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * L4 offset in bytes from the start of RX_HEADER (only valid
* if either ipv4_proto or ipv6_proto is set to 1) or indicates
* the offset in bytes to the start of TCP or UDP header from
* the start of the IP header after decapsulation (Only valid if
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 516a7b4cd180..705868198df4 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -30,6 +30,7 @@
#define ATH11K_SPECTRAL_20MHZ 20
#define ATH11K_SPECTRAL_40MHZ 40
#define ATH11K_SPECTRAL_80MHZ 80
+#define ATH11K_SPECTRAL_160MHZ 160
#define ATH11K_SPECTRAL_SIGNATURE 0xFA
@@ -183,6 +184,8 @@ static int ath11k_spectral_scan_trigger(struct ath11k *ar)
if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
return 0;
+ ar->spectral.is_primary = true;
+
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
@@ -585,6 +588,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
u8 chan_width_mhz, bin_sz;
int ret;
u32 check_length;
+ bool fragment_sample = false;
lockdep_assert_held(&ar->spectral.lock);
@@ -639,6 +643,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
case ATH11K_SPECTRAL_80MHZ:
fft_sample->chan_width_mhz = chan_width_mhz;
break;
+ case ATH11K_SPECTRAL_160MHZ:
+ if (ab->hw_params.spectral.fragment_160mhz) {
+ chan_width_mhz /= 2;
+ fragment_sample = true;
+ }
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
default:
ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
return -EINVAL;
@@ -663,6 +674,17 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
+ /* If freq2 is available then the spectral scan results are fragmented
+ * as primary and secondary
+ */
+ if (fragment_sample && freq) {
+ if (!ar->spectral.is_primary)
+ fft_sample->freq1 = cpu_to_be16(freq);
+
+ /* We have to toggle the is_primary to handle the next report */
+ ar->spectral.is_primary = !ar->spectral.is_primary;
+ }
+
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
ab->hw_params.spectral.fft_sz);
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
index 081744265f2a..96bfa16e18e9 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.h
+++ b/drivers/net/wireless/ath/ath11k/spectral.h
@@ -35,6 +35,7 @@ struct ath11k_spectral {
u16 count;
u8 fft_size;
bool enabled;
+ bool is_primary;
};
#ifdef CONFIG_ATH11K_SPECTRAL
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c96b26f39a25..23ed01bd44f9 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -99,7 +99,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
- /* display in millidegree celcius */
+ /* display in millidegree Celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
index f9af55f3682d..3e39675ef7f5 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.h
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -19,7 +19,7 @@ struct ath11k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
- /* temperature value in Celcius degree
+ /* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 76560587bea0..9535745fe026 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -305,6 +305,34 @@ TRACE_EVENT(ath11k_wmi_diag,
)
);
+TRACE_EVENT(ath11k_ps_timekeeper,
+ TP_PROTO(struct ath11k *ar, const void *peer_addr,
+ u32 peer_ps_timestamp, u8 peer_ps_state),
+ TP_ARGS(ar, peer_addr, peer_ps_timestamp, peer_ps_state),
+
+ TP_STRUCT__entry(__string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __dynamic_array(u8, peer_addr, ETH_ALEN)
+ __field(u8, peer_ps_state)
+ __field(u32, peer_ps_timestamp)
+ ),
+
+ TP_fast_assign(__assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ memcpy(__get_dynamic_array(peer_addr), peer_addr,
+ ETH_ALEN);
+ __entry->peer_ps_state = peer_ps_state;
+ __entry->peer_ps_timestamp = peer_ps_timestamp;
+ ),
+
+ TP_printk("%s %s %u %u",
+ __get_str(driver),
+ __get_str(device),
+ __entry->peer_ps_state,
+ __entry->peer_ps_timestamp
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 88ee4f9d19da..fad9f8d308a2 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -416,7 +416,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
- * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
@@ -991,9 +991,13 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
+ arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1007,6 +1011,17 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+ bss_conf = &arvif->vif->bss_conf;
+
+ if (bss_conf->nontransmitted) {
+ ether_addr_copy(cmd->trans_bssid.addr,
+ bss_conf->transmitter_bssid);
+ cmd->profile_idx = bss_conf->bssid_index;
+ cmd->profile_num = bss_conf->bssid_indicator;
+ }
+ }
+
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
@@ -3064,8 +3079,34 @@ int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
return ret;
}
-int
-ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
+{
+ twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ twt_params->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ twt_params->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ twt_params->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ twt_params->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ twt_params->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ twt_params->mbss_support = 0;
+}
+
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
@@ -3083,28 +3124,22 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
- cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
- cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
- cmd->congestion_thresh_teardown =
- ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
- cmd->congestion_thresh_critical =
- ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
- cmd->interference_thresh_teardown =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
- cmd->interference_thresh_setup =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
- cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
- cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
- cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
- cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
- cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
- cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
- cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
- cmd->remove_sta_slot_interval =
- ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
- /* TODO add MBSSID support */
- cmd->mbss_support = 0;
+ cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
+ cmd->default_slot_size = params->default_slot_size;
+ cmd->congestion_thresh_setup = params->congestion_thresh_setup;
+ cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
+ cmd->congestion_thresh_critical = params->congestion_thresh_critical;
+ cmd->interference_thresh_teardown = params->interference_thresh_teardown;
+ cmd->interference_thresh_setup = params->interference_thresh_setup;
+ cmd->min_no_sta_setup = params->min_no_sta_setup;
+ cmd->min_no_sta_teardown = params->min_no_sta_teardown;
+ cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
+ cmd->min_no_twt_slots = params->min_no_twt_slots;
+ cmd->max_no_sta_twt = params->max_no_sta_twt;
+ cmd->mode_check_interval = params->mode_check_interval;
+ cmd->add_sta_slot_interval = params->add_sta_slot_interval;
+ cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
+ cmd->mbss_support = params->mbss_support;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
@@ -6767,6 +6802,107 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
rcu_read_unlock();
}
+static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ struct ath11k_sta *arsta;
+ const void **tb;
+ enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch sta ps change ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer sta ps chnange ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ ev->peer_macaddr.addr, ev->peer_ps_state,
+ ev->ps_supported_bitmap, ev->peer_ps_valid,
+ ev->peer_ps_timestamp);
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+
+ if (!ar) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
+ peer->vdev_id);
+
+ goto exit;
+ }
+
+ sta = peer->sta;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!sta) {
+ ath11k_warn(ab, "failed to find station entry %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer_previous_ps_state = arsta->peer_ps_state;
+ arsta->peer_ps_state = ev->peer_ps_state;
+ arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
+ !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
+ !ev->peer_ps_valid)
+ goto out;
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_start_time = ev->peer_ps_timestamp;
+ arsta->ps_start_jiffies = jiffies;
+ } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
+ peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_total_duration = arsta->ps_total_duration +
+ (ev->peer_ps_timestamp - arsta->ps_start_time);
+ }
+
+ if (ar->ps_timekeeper_enable)
+ trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
+ ev->peer_ps_timestamp,
+ arsta->peer_ps_state);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
@@ -7409,7 +7545,53 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- ath11k_debugfs_fw_stats_process(ab, skb);
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats_done = true;
+ goto complete;
+ }
+
+ /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
+ * are currently requested only via debugfs fw stats. Hence, processing these
+ * in debugfs context
+ */
+ ath11k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -7960,6 +8142,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
+ case WMI_PEER_STA_PS_STATECHG_EVENTID:
+ ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+ break;
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath11k_wmi_gtk_offload_status_event(ab, skb);
break;
@@ -8962,12 +9147,13 @@ int ath11k_wmi_sta_keepalive(struct ath11k *ar,
cmd->interval = arg->interval;
cmd->method = arg->method;
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
- arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
- arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
- WMI_TAG_STA_KEEPALVE_ARP_RESPONSE) |
- FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
arp->src_ip4_addr = arg->src_ip4_addr;
arp->dest_ip4_addr = arg->dest_ip4_addr;
ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 4da248ffa318..8f2c07d70a4a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -17,7 +17,7 @@ struct ath11k_vif;
#define PSOC_HOST_MAX_NUM_SS (8)
-/* defines to set Packet extension values whic can be 0 us, 8 usec or 16 usec */
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
#define MAX_HE_NSS 8
#define MAX_HE_MODULATION 8
#define MAX_HE_RU 4
@@ -1214,7 +1214,7 @@ enum wmi_tlv_tag {
WMI_TAG_NS_OFFLOAD_TUPLE,
WMI_TAG_FTM_INTG_CMD,
WMI_TAG_STA_KEEPALIVE_CMD,
- WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE,
WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
WMI_TAG_AP_PS_PEER_CMD,
WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
@@ -2090,6 +2090,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
/* The second 128 bits */
@@ -4482,7 +4483,7 @@ struct wmi_pdev_radar_ev {
} __packed;
struct wmi_pdev_temperature_event {
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
s32 temp;
u32 pdev_id;
} __packed;
@@ -4708,7 +4709,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
*/
enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
- /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ /* Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up.
*/
};
@@ -4820,9 +4821,9 @@ enum wmi_rate_preamble {
/**
* enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
- * @WMI_RTS_CTS_DISABLED : RTS/CTS protection is disabled.
- * @WMI_USE_RTS_CTS : RTS/CTS Enabled.
- * @WMI_USE_CTS2SELF : CTS to self protection Enabled.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
*/
enum wmi_rtscts_prot_mode {
WMI_RTS_CTS_DISABLED = 0,
@@ -4833,13 +4834,13 @@ enum wmi_rtscts_prot_mode {
/**
* enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
* protection mode.
- * @WMI_RTSCTS_FOR_NO_RATESERIES - Neither of rate-series should use RTS-CTS
- * @WMI_RTSCTS_FOR_SECOND_RATESERIES - Only second rate-series will use RTS-CTS
- * @WMI_RTSCTS_ACROSS_SW_RETRIES - Only the second rate-series will use RTS-CTS,
- * but if there's a sw retry, both the rate
- * series will use RTS-CTS.
- * @WMI_RTSCTS_ERP - RTS/CTS used for ERP protection for every PPDU.
- * @WMI_RTSCTS_FOR_ALL_RATESERIES - Enable RTS-CTS for all rate series.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
*/
enum wmi_rtscts_profile {
WMI_RTSCTS_FOR_NO_RATESERIES = 0,
@@ -4933,6 +4934,25 @@ struct wmi_wmm_params_all_arg {
#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+struct wmi_twt_enable_params {
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+};
+
struct wmi_twt_enable_params_cmd {
u32 tlv_header;
u32 pdev_id;
@@ -5350,6 +5370,26 @@ struct wmi_debug_log_config_cmd_fixed_param {
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+enum ath11k_wmi_peer_ps_state {
+ WMI_PEER_PS_STATE_OFF,
+ WMI_PEER_PS_STATE_ON,
+ WMI_PEER_PS_STATE_DISABLED,
+};
+
+enum wmi_peer_ps_supported_bitmap {
+ /* Used to indicate that power save state change is valid */
+ WMI_PEER_PS_VALID = 0x1,
+ WMI_PEER_PS_STATE_TIMESTAMP = 0x2,
+};
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_ps_state;
+ u32 ps_supported_bitmap;
+ u32 peer_ps_valid;
+ u32 peer_ps_timestamp;
+} __packed;
+
struct ath11k_wmi_base {
struct ath11k_base *ab;
struct ath11k_pdev_wmi wmi[MAX_RADIOS];
@@ -6039,7 +6079,9 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
struct ath11k_fw_stats *fw_stats, u32 stats_id,
char *buf);
int ath11k_wmi_simulate_radar(struct ath11k *ar);
-int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
struct wmi_twt_add_dialog_params *params);
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index b3e65cd13d83..1dec23b0699c 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -67,6 +68,13 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
int ret;
+ /* In the case of WCN6750, WoW wakeup is done
+ * by sending SMP2P power save exit message
+ * to the target processor.
+ */
+ if (ab->hw_params.smp2p_wow_exit)
+ return 0;
+
reinit_completion(&ab->wow.wakeup_completed);
ret = ath11k_wmi_wow_host_wakeup_ind(ar);
@@ -664,6 +672,12 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
int ret;
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
mutex_lock(&ar->conf_mutex);
ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
@@ -695,13 +709,6 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
- ath11k_mac_drain_tx(ar);
- ret = ath11k_mac_wait_tx_complete(ar);
- if (ret) {
- ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
- goto cleanup;
- }
-
ret = ath11k_wow_set_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e11c7e9accc0..a20e0aeae284 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1124,7 +1124,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
@@ -1249,7 +1249,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct ath6kl *ar = ath6kl_priv(ndev);
@@ -1279,7 +1279,7 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback) (void *cookie,
struct key_params *))
@@ -1314,7 +1314,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 9b5c7d8f2b95..201e45554070 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1014,7 +1014,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
switch (ie_id) {
case ATH6KL_FW_IE_FW_VERSION:
- strlcpy(ar->wiphy->fw_version, data,
+ strscpy(ar->wiphy->fw_version, data,
min(sizeof(ar->wiphy->fw_version), ie_len+1));
ath6kl_dbg(ATH6KL_DBG_BOOT,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index dc0e5ea25673..090ff0600c81 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1744,7 +1744,7 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
- /* on AR93xx and newer, count = 0 will make the the chip send
+ /* on AR93xx and newer, count = 0 will make the chip send
* spectral samples endlessly. Check if this really was intended,
* and fix otherwise.
*/
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 6cf087522157..571062f2e82a 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
if (!avp->assoc)
return false;
- skb = ieee80211_nullfunc_get(sc->hw, vif, false);
+ skb = ieee80211_nullfunc_get(sc->hw, vif, -1, false);
if (!skb)
return false;
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 994ec48b2f66..ca05b07a45e6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -364,33 +364,27 @@ ret:
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 len)
{
uint32_t *pattern = (uint32_t *)skb->data;
- switch (*pattern) {
- case 0x33221199:
- {
+ if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) {
struct htc_panic_bad_vaddr *htc_panic;
htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
htc_panic->exccause, htc_panic->pc,
htc_panic->badvaddr);
- break;
- }
- case 0x33221299:
- {
+ return;
+ }
+ if (*pattern == 0x33221299) {
struct htc_panic_bad_epid *htc_panic;
htc_panic = (struct htc_panic_bad_epid *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"bad epid: 0x%08x\n", htc_panic->epid);
- break;
- }
- default:
- dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
- break;
+ return;
}
+ dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
}
/*
@@ -411,16 +405,26 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
if (!htc_handle || !skb)
return;
+ /* A valid message requires len >= 8.
+ *
+ * sizeof(struct htc_frame_hdr) == 8
+ * sizeof(struct htc_ready_msg) == 8
+ * sizeof(struct htc_panic_bad_vaddr) == 16
+ * sizeof(struct htc_panic_bad_epid) == 8
+ */
+ if (unlikely(len < sizeof(struct htc_frame_hdr)))
+ goto invalid;
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
if (epid == 0x99) {
- ath9k_htc_fw_panic_report(htc_handle, skb);
+ ath9k_htc_fw_panic_report(htc_handle, skb, len);
kfree_skb(skb);
return;
}
if (epid < 0 || epid >= ENDPOINT_MAX) {
+invalid:
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
else
@@ -432,21 +436,30 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
/* Handle trailer */
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
- if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) {
/* Move past the Watchdog pattern */
htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ len -= 4;
+ }
}
/* Get the message ID */
+ if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16)))
+ goto invalid;
msg_id = (__be16 *) ((void *) htc_hdr +
sizeof(struct htc_frame_hdr));
/* Now process HTC messages */
switch (be16_to_cpu(*msg_id)) {
case HTC_MSG_READY_ID:
+ if (unlikely(len < sizeof(struct htc_ready_msg)))
+ goto invalid;
htc_process_target_rdy(htc_handle, htc_hdr);
break;
case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ if (unlikely(len < sizeof(struct htc_frame_hdr) +
+ sizeof(struct htc_conn_svc_rspmsg)))
+ goto invalid;
htc_process_conn_rsp(htc_handle, htc_hdr);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 096a206f49ed..450ab19b1d4e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -710,7 +710,7 @@ struct ath_spec_scan {
/**
* struct ath_hw_ops - callbacks used by hardware code and driver code
*
- * This structure contains callbacks designed to to be used internally by
+ * This structure contains callbacks designed to be used internally by
* hardware code and also by the lower level driver.
*
* @config_pci_powersave:
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index ba16a7f3e23d..ba271a10d4ab 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2160,7 +2160,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = an->ps_key;
else
fi->keyix = ATH9K_TXKEYIX_INVALID;
- fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
+ fi->dyn_smps = sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC;
fi->keytype = keytype;
fi->framelen = framelen;
fi->tx_power = txpower;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 1ab09e1c9ec5..4c1aecd1163c 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -105,7 +105,7 @@ static void carl9170_fw_info(struct ar9170 *ar)
CARL9170FW_GET_MONTH(fw_date),
CARL9170FW_GET_DAY(fw_date));
- strlcpy(ar->hw->wiphy->fw_version, motd_desc->release,
+ strscpy(ar->hw->wiphy->fw_version, motd_desc->release,
sizeof(ar->hw->wiphy->fw_version));
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index f1a43fd1d957..d3a9d00e65e1 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2677,7 +2677,7 @@ struct ani_global_security_stats {
* management information base (MIB) object is enabled */
u32 rx_wep_unencrypted_frm_cnt;
- /* The number of received MSDU packets that that the 802.11 station
+ /* The number of received MSDU packets that the 802.11 station
* discarded because of MIC failures */
u32 rx_mic_fail_cnt;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 8da3955995b6..0802ed728824 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/random.h>
#include "txrx.h"
static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
@@ -278,6 +279,7 @@ static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
struct ieee80211_supported_band *sband;
int idx;
int i;
+ u8 snr_sample = snr & 0xff;
idx = 0;
if (band == NL80211_BAND_5GHZ)
@@ -297,6 +299,8 @@ static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
wcn->chan_survey[idx].rssi = rssi;
wcn->chan_survey[idx].snr = snr;
spin_unlock(&wcn->survey_lock);
+
+ add_device_randomness(&snr_sample, sizeof(snr_sample));
}
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f93bdffa4d1d..40f9a7ef8980 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1620,7 +1620,7 @@ static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
@@ -1696,7 +1696,7 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
@@ -1723,7 +1723,7 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
/* Need to be present or wiphy_new() will WARN */
static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -2072,8 +2072,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
key_params.key = vif->gtk;
key_params.key_len = vif->gtk_len;
key_params.seq_len = IEEE80211_GCMP_PN_LEN;
- rc = wil_cfg80211_add_key(wiphy, ndev, vif->gtk_index, false,
- NULL, &key_params);
+ rc = wil_cfg80211_add_key(wiphy, ndev, -1, vif->gtk_index,
+ false, NULL, &key_params);
if (rc)
wil_err(wil, "vif %d recovery add key failed (%d)\n",
i, rc);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 7da87c9f363f..94e61dbe94f8 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1305,7 +1305,7 @@ void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
board_file = WIL_BOARD_FILE_NAME;
}
- strlcpy(buf, board_file, len);
+ strscpy(buf, board_file, len);
}
static int wil_get_bl_info(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 87a88f26233e..ee7d7e9c2718 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -445,7 +445,7 @@ int wil_if_add(struct wil6210_priv *wil)
wil_dbg_misc(wil, "entered");
- strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
rc = wiphy_register(wiphy);
if (rc < 0) {
@@ -456,14 +456,12 @@ int wil_if_add(struct wil6210_priv *wil)
init_dummy_netdev(&wil->napi_ndev);
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
- wil6210_netdev_poll_rx_edma,
- NAPI_POLL_WEIGHT);
+ wil6210_netdev_poll_rx_edma);
netif_napi_add_tx(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
- wil6210_netdev_poll_rx,
- NAPI_POLL_WEIGHT);
+ wil6210_netdev_poll_rx);
netif_napi_add_tx(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx);
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ea7bd403e706..6a5976a2944c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -780,7 +780,7 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
return; /* FW load will fail after timeout */
}
/* ignore MAC address, we already have it from the boot loader */
- strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) {
wil_dbg_wmi(wil, "rfc calibration result %d\n",
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 0361c8eb2008..45d079b93384 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1518,7 +1518,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->firmware = NULL;
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
- strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
+ strscpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index 982a772a9d87..bfe1be345844 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -118,7 +118,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index aa5c99465674..2c0c019a815d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -2479,11 +2479,7 @@ static void b43_nphy_gain_ctl_workarounds_rev19(struct b43_wldev *dev)
static void b43_nphy_gain_ctl_workarounds_rev7(struct b43_wldev *dev)
{
- struct b43_phy *phy = &dev->phy;
-
- switch (phy->rev) {
- /* TODO */
- }
+ /* TODO - should depend on phy->rev */
}
static void b43_nphy_gain_ctl_workarounds_rev3(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index 38b5be3a84e2..79e6fd205bfb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -88,7 +88,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
led->dev = dev;
led->index = led_index;
led->activelow = activelow;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 2c95a08a5871..9ec0c60b6da1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -87,6 +87,8 @@ struct brcmf_proto_bcdc_header {
* plus any space that might be needed
* for bus alignment padding.
*/
+#define ROUND_UP_MARGIN 2048
+
struct brcmf_bcdc {
u16 reqid;
u8 bus_header[BUS_HEADER_LEN];
@@ -368,8 +370,7 @@ brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
/* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(bcdc->fws)) {
- if (!success)
- brcmf_fws_bustxfail(bcdc->fws, txp);
+ brcmf_fws_bustxcomplete(bcdc->fws, txp, success);
} else {
if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp))
brcmu_pkt_buf_free_skb(txp);
@@ -471,7 +472,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
- sizeof(struct brcmf_proto_bcdc_dcmd);
+ sizeof(struct brcmf_proto_bcdc_dcmd) + ROUND_UP_MARGIN;
return 0;
fail:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index d639bb8b51ae..d0daef674e72 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -983,6 +983,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index ae5af76e2568..2208ab3aa795 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -6,6 +6,8 @@
#ifndef BRCMFMAC_BUS_H
#define BRCMFMAC_BUS_H
+#include <linux/kernel.h>
+#include <linux/firmware.h>
#include "debug.h"
/* IDs of the 6 default common rings of msgbuf protocol */
@@ -34,6 +36,11 @@ enum brcmf_bus_protocol_type {
BRCMF_PROTO_MSGBUF
};
+/* Firmware blobs that may be available */
+enum brcmf_blob_type {
+ BRCMF_BLOB_CLM,
+};
+
struct brcmf_mp_device;
struct brcmf_bus_dcmd {
@@ -60,7 +67,7 @@ struct brcmf_bus_dcmd {
* @wowl_config: specify if dongle is configured for wowl when going to suspend
* @get_ramsize: obtain size of device memory.
* @get_memdump: obtain device memory dump in provided buffer.
- * @get_fwname: obtain firmware name.
+ * @get_blob: obtain a firmware blob.
*
* This structure provides an abstract interface towards the
* bus specific driver. For control messages to common driver
@@ -77,8 +84,8 @@ struct brcmf_bus_ops {
void (*wowl_config)(struct device *dev, bool enabled);
size_t (*get_ramsize)(struct device *dev);
int (*get_memdump)(struct device *dev, void *data, size_t len);
- int (*get_fwname)(struct device *dev, const char *ext,
- unsigned char *fw_name);
+ int (*get_blob)(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type);
void (*debugfs_create)(struct device *dev);
int (*reset)(struct device *dev);
};
@@ -220,10 +227,10 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
}
static inline
-int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext,
- unsigned char *fw_name)
+int brcmf_bus_get_blob(struct brcmf_bus *bus, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
- return bus->ops->get_fwname(bus->dev, ext, fw_name);
+ return bus->ops->get_blob(bus->dev, fw, type);
}
static inline
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index db45da33adfd..dfcfb3333369 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2361,7 +2361,8 @@ done:
static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool unicast, bool multicast)
+ int link_id, u8 key_idx, bool unicast,
+ bool multicast)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
@@ -2395,7 +2396,8 @@ done:
static s32
brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key *key;
@@ -2432,8 +2434,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -2457,8 +2459,8 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
if (params->key_len == 0)
- return brcmf_cfg80211_del_key(wiphy, ndev, key_idx, pairwise,
- mac_addr);
+ return brcmf_cfg80211_del_key(wiphy, ndev, -1, key_idx,
+ pairwise, mac_addr);
if (params->key_len > sizeof(key->data)) {
bphy_err(drvr, "Too long key length (%u)\n", params->key_len);
@@ -2553,8 +2555,9 @@ done:
}
static s32
-brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
- bool pairwise, const u8 *mac_addr, void *cookie,
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie,
struct key_params *params))
{
@@ -2610,7 +2613,8 @@ done:
static s32
brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_idx)
+ struct net_device *ndev, int link_id,
+ u8 key_idx)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -3160,10 +3164,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = cfg->pub;
- struct brcmf_bss_info_le *bi;
- const struct brcmf_tlv *tim;
- size_t ie_len;
- u8 *ie;
+ struct brcmf_bss_info_le *bi = NULL;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -3177,29 +3178,8 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
bphy_err(drvr, "Could not get bss info %d\n", err);
goto update_bss_info_out;
}
-
bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
err = brcmf_inform_single_bss(cfg, bi);
- if (err)
- goto update_bss_info_out;
-
- ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
- ie_len = le32_to_cpu(bi->ie_length);
-
- tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (!tim) {
- /*
- * active scan was done so we could not get dtim
- * information out of probe response.
- * so we speficially query dtim information to dongle.
- */
- u32 var;
- err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
- if (err) {
- bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err);
- goto update_bss_info_out;
- }
- }
update_bss_info_out:
brcmf_dbg(TRACE, "Exit");
@@ -3984,7 +3964,6 @@ brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
struct brcmf_pmk_list_le *pmk_list;
int i;
u32 npmk;
- s32 err;
pmk_list = &cfg->pmk_list;
npmk = le32_to_cpu(pmk_list->npmk);
@@ -3993,10 +3972,8 @@ brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
for (i = 0; i < npmk; i++)
brcmf_dbg(CONN, "PMK[%d]: %pM\n", i, &pmk_list->pmk[i].bssid);
- err = brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list,
- sizeof(*pmk_list));
-
- return err;
+ return brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list,
+ sizeof(*pmk_list));
}
static s32
@@ -5042,13 +5019,10 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err;
brcmf_dbg(TRACE, "Enter\n");
- err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
-
- return err;
+ return brcmf_config_ap_mgmt_ie(ifp->vif, info);
}
static int
@@ -6431,6 +6405,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
cfg->dongle_up = false; /* dongle down */
brcmf_abort_scanning(cfg);
brcmf_deinit_priv_mem(cfg);
+ brcmf_clear_assoc_ies(cfg);
}
static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
@@ -7485,6 +7460,7 @@ static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
return true;
switch (drvr->bus_if->chip) {
+ case BRCM_CC_43430_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
return true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 4ec7773b6906..121893bbaa1d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -641,6 +641,7 @@ static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
*srsize = (32 * 1024);
break;
case BRCM_CC_43430_CHIP_ID:
+ case CY_CC_43439_CHIP_ID:
/* assume sr for now as we can not check
* firmware sr capability at this point.
*/
@@ -732,6 +733,10 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x160000;
case CY_CC_43752_CHIP_ID:
return 0x170000;
+ case BRCM_CC_4378_CHIP_ID:
+ return 0x352000;
+ case CY_CC_89459_CHIP_ID:
+ return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
@@ -1258,7 +1263,8 @@ brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
brcmf_chip_resetcore(core, 0, 0, 0);
/* disable bank #3 remap for this device */
- if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+ if (chip->pub.chip == BRCM_CC_43430_CHIP_ID ||
+ chip->pub.chip == CY_CC_43439_CHIP_ID) {
sr = container_of(core, struct brcmf_core_priv, pub);
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
@@ -1416,10 +1422,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
reg = chip->ops->read32(chip->ctx, addr);
return (reg & pmu_cc3_mask) != 0;
case BRCM_CC_43430_CHIP_ID:
+ case CY_CC_43439_CHIP_ID:
addr = CORE_CC_REG(base, sr_control1);
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
case CY_CC_4373_CHIP_ID:
+ case CY_CC_89459_CHIP_ID:
/* explicitly check SR engine enable bit */
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 7485e784be2a..74020fa10065 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -123,7 +123,6 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
struct brcmf_bus *bus = drvr->bus_if;
struct brcmf_dload_data_le *chunk_buf;
const struct firmware *clm = NULL;
- u8 clm_name[BRCMF_FW_NAME_LEN];
u32 chunk_len;
u32 datalen;
u32 cumulative_len;
@@ -133,15 +132,8 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
brcmf_dbg(TRACE, "Enter\n");
- memset(clm_name, 0, sizeof(clm_name));
- err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name);
- if (err) {
- bphy_err(drvr, "get CLM blob file name failed (%d)\n", err);
- return err;
- }
-
- err = firmware_request_nowarn(&clm, clm_name, bus->dev);
- if (err) {
+ err = brcmf_bus_get_blob(bus, &clm, BRCMF_BLOB_CLM);
+ if (err || !clm) {
brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
err);
return 0;
@@ -261,7 +253,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
&revinfo, sizeof(revinfo));
if (err < 0) {
bphy_err(drvr, "retrieving revision info failed, %d\n", err);
- strlcpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
+ strscpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
} else {
ri->vendorid = le32_to_cpu(revinfo.vendorid);
ri->deviceid = le32_to_cpu(revinfo.deviceid);
@@ -314,7 +306,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* locate firmware version number for ethtool */
ptr = strrchr(buf, ' ') + 1;
- strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+ strscpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
/* Query for 'clmver' to get CLM version info from firmware */
memset(buf, 0, sizeof(buf));
@@ -424,11 +416,11 @@ static void brcmf_mp_attach(void)
* if not set then if available use the platform data version. To make
* sure it gets initialized at all, always copy the module param version
*/
- strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
+ strscpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
BRCMF_FW_ALTPATH_LEN);
if ((brcmfmac_pdata) && (brcmfmac_pdata->fw_alternative_path) &&
(brcmf_mp_global.firmware_path[0] == '\0')) {
- strlcpy(brcmf_mp_global.firmware_path,
+ strscpy(brcmf_mp_global.firmware_path,
brcmfmac_pdata->fw_alternative_path,
BRCMF_FW_ALTPATH_LEN);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 6c5a22a32a96..aa25abffcc7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -53,6 +53,7 @@ struct brcmf_mp_device {
struct brcmfmac_pd_cc *country_codes;
const char *board_type;
unsigned char mac[ETH_ALEN];
+ const char *antenna_sku;
union {
struct brcmfmac_sdio_pd sdio;
} bus;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index bd164a0821f9..595ae3ae561e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -292,6 +292,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
int head_delta;
+ unsigned int tx_bytes = skb->len;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -366,7 +367,7 @@ done:
ndev->stats.tx_dropped++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += tx_bytes;
}
/* Return ok: we always eat the packet */
@@ -561,10 +562,10 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
if (drvr->revinfo.result == 0)
brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, drev, sizeof(info->version));
- strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
- strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, drev, sizeof(info->version));
+ strscpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
+ strscpy(info->bus_info, dev_name(drvr->bus_if->dev),
sizeof(info->bus_info));
}
@@ -1480,8 +1481,10 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
!brcmf_get_pend_8021x_cnt(ifp),
MAX_WAIT_FOR_8021X_TX);
- if (!err)
+ if (!err) {
bphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n");
+ atomic_set(&ifp->pend_8021x_cnt, 0);
+ }
return !err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 0af452dca766..86ff174936a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -24,6 +24,13 @@ static const struct brcmf_dmi_data acepc_t8_data = {
BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
};
+/* The Chuwi Hi8 Pro uses the same Ampak AP6212 module as the Chuwi Vi8 Plus
+ * and the nvram for the Vi8 Plus is already in linux-firmware, so use that.
+ */
+static const struct brcmf_dmi_data chuwi_hi8_pro_data = {
+ BRCM_CC_43430_CHIP_ID, 0, "ilife-S806"
+};
+
static const struct brcmf_dmi_data gpd_win_pocket_data = {
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
};
@@ -76,6 +83,17 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "MRD"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/10/2016"),
+ },
+ .driver_data = (void *)&chuwi_hi8_pro_data,
+ },
+ {
/* Cyberbook T116 rugged tablet */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index d2ac844e1e9f..2c2f3e026c13 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -249,7 +249,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
- drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID)
+ drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID &&
+ drvr->bus_if->chip != CY_CC_43439_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index b8379e4034a4..f2207793f6e2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -21,6 +21,8 @@
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
#define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff"
+#define BRCMF_FW_MACADDR_FMT "macaddr=%pM"
+#define BRCMF_FW_MACADDR_LEN (7 + ETH_ALEN * 3)
enum nvram_parser_state {
IDLE,
@@ -44,6 +46,7 @@ enum nvram_parser_state {
* @multi_dev_v1: detect pcie multi device v1 (compressed).
* @multi_dev_v2: detect pcie multi device v2.
* @boardrev_found: nvram contains boardrev information.
+ * @strip_mac: strip the MAC address.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -57,6 +60,7 @@ struct nvram_parser {
bool multi_dev_v1;
bool multi_dev_v2;
bool boardrev_found;
+ bool strip_mac;
};
/*
@@ -121,6 +125,10 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
nvp->multi_dev_v2 = true;
if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
nvp->boardrev_found = true;
+ /* strip macaddr if platform MAC overrides */
+ if (nvp->strip_mac &&
+ strncmp(&nvp->data[nvp->entry], "macaddr", 7) == 0)
+ st = COMMENT;
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
@@ -209,6 +217,7 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
size = data_len;
/* Add space for properties we may add */
size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
+ size += BRCMF_FW_MACADDR_LEN + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
@@ -368,22 +377,37 @@ static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
nvp->nvram_len++;
}
+static void brcmf_fw_add_macaddr(struct nvram_parser *nvp, u8 *mac)
+{
+ int len;
+
+ len = scnprintf(&nvp->nvram[nvp->nvram_len], BRCMF_FW_MACADDR_LEN + 1,
+ BRCMF_FW_MACADDR_FMT, mac);
+ WARN_ON(len != BRCMF_FW_MACADDR_LEN);
+ nvp->nvram_len += len + 1;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
* End of buffer is completed with token identifying length of buffer.
*/
static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
- u32 *new_length, u16 domain_nr, u16 bus_nr)
+ u32 *new_length, u16 domain_nr, u16 bus_nr,
+ struct device *dev)
{
struct nvram_parser nvp;
u32 pad;
u32 token;
__le32 token_le;
+ u8 mac[ETH_ALEN];
if (brcmf_init_nvram_parser(&nvp, data, data_len) < 0)
return NULL;
+ if (eth_platform_get_mac_address(dev, mac) == 0)
+ nvp.strip_mac = true;
+
while (nvp.pos < data_len) {
nvp.state = nv_parser_states[nvp.state](&nvp);
if (nvp.state == END)
@@ -404,6 +428,9 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
brcmf_fw_add_defaults(&nvp);
+ if (nvp.strip_mac)
+ brcmf_fw_add_macaddr(&nvp, mac);
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
@@ -430,6 +457,7 @@ struct brcmf_fw {
struct device *dev;
struct brcmf_fw_request *req;
u32 curpos;
+ unsigned int board_index;
void (*done)(struct device *dev, int err, struct brcmf_fw_request *req);
};
@@ -537,7 +565,8 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (data)
nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
fwctx->req->domain_nr,
- fwctx->req->bus_nr);
+ fwctx->req->bus_nr,
+ fwctx->dev);
if (free_bcm47xx_nvram)
bcm47xx_nvram_release_contents(data);
@@ -587,39 +616,50 @@ static int brcmf_fw_complete_request(const struct firmware *fw,
static char *brcm_alt_fw_path(const char *path, const char *board_type)
{
- char alt_path[BRCMF_FW_NAME_LEN];
- char suffix[5];
+ char base[BRCMF_FW_NAME_LEN];
+ const char *suffix;
+ char *ret;
+
+ if (!board_type)
+ return NULL;
- strscpy(alt_path, path, BRCMF_FW_NAME_LEN);
- /* At least one character + suffix */
- if (strlen(alt_path) < 5)
+ suffix = strrchr(path, '.');
+ if (!suffix || suffix == path)
return NULL;
- /* strip .txt or .bin at the end */
- strscpy(suffix, alt_path + strlen(alt_path) - 4, 5);
- alt_path[strlen(alt_path) - 4] = 0;
- strlcat(alt_path, ".", BRCMF_FW_NAME_LEN);
- strlcat(alt_path, board_type, BRCMF_FW_NAME_LEN);
- strlcat(alt_path, suffix, BRCMF_FW_NAME_LEN);
+ /* strip extension at the end */
+ strscpy(base, path, BRCMF_FW_NAME_LEN);
+ base[suffix - path] = 0;
- return kstrdup(alt_path, GFP_KERNEL);
+ ret = kasprintf(GFP_KERNEL, "%s.%s%s", base, board_type, suffix);
+ if (!ret)
+ brcmf_err("out of memory allocating firmware path for '%s'\n",
+ path);
+
+ brcmf_dbg(TRACE, "FW alt path: %s\n", ret);
+
+ return ret;
}
static int brcmf_fw_request_firmware(const struct firmware **fw,
struct brcmf_fw *fwctx)
{
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
+ unsigned int i;
int ret;
- /* Files can be board-specific, first try a board-specific path */
- if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
+ /* Files can be board-specific, first try board-specific paths */
+ for (i = 0; i < ARRAY_SIZE(fwctx->req->board_types); i++) {
char *alt_path;
- alt_path = brcm_alt_fw_path(cur->path, fwctx->req->board_type);
+ if (!fwctx->req->board_types[i])
+ goto fallback;
+ alt_path = brcm_alt_fw_path(cur->path,
+ fwctx->req->board_types[i]);
if (!alt_path)
goto fallback;
- ret = request_firmware(fw, alt_path, fwctx->dev);
+ ret = firmware_request_nowarn(fw, alt_path, fwctx->dev);
kfree(alt_path);
if (ret == 0)
return ret;
@@ -653,15 +693,40 @@ static void brcmf_fw_request_done_alt_path(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
struct brcmf_fw_item *first = &fwctx->req->items[0];
+ const char *board_type, *alt_path;
int ret = 0;
- /* Fall back to canonical path if board firmware not found */
- if (!fw)
- ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ if (fw) {
+ brcmf_fw_request_done(fw, ctx);
+ return;
+ }
+
+ /* Try next board firmware */
+ if (fwctx->board_index < ARRAY_SIZE(fwctx->req->board_types)) {
+ board_type = fwctx->req->board_types[fwctx->board_index++];
+ if (!board_type)
+ goto fallback;
+ alt_path = brcm_alt_fw_path(first->path, board_type);
+ if (!alt_path)
+ goto fallback;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
- brcmf_fw_request_done);
+ brcmf_fw_request_done_alt_path);
+ kfree(alt_path);
+
+ if (ret < 0)
+ brcmf_fw_request_done(fw, ctx);
+ return;
+ }
- if (fw || ret < 0)
+fallback:
+ /* Fall back to canonical path if board firmware not found */
+ ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_done);
+
+ if (ret < 0)
brcmf_fw_request_done(fw, ctx);
}
@@ -705,10 +770,11 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- if (fwctx->req->board_type)
+ if (fwctx->req->board_types[0])
alt_path = brcm_alt_fw_path(first->path,
- fwctx->req->board_type);
+ fwctx->req->board_types[0]);
if (alt_path) {
+ fwctx->board_index++;
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_done_alt_path);
@@ -769,7 +835,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
fwnames[j].path[0] = '\0';
/* check if firmware path is provided by module parameter */
if (brcmf_mp_global.firmware_path[0] != '\0') {
- strlcpy(fwnames[j].path, mp_path,
+ strscpy(fwnames[j].path, mp_path,
BRCMF_FW_NAME_LEN);
if (end != '/') {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index e290dec9c53d..1266cbaee072 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -11,6 +11,8 @@
#define BRCMF_FW_DEFAULT_PATH "brcm/"
+#define BRCMF_FW_MAX_BOARD_TYPES 8
+
/**
* struct brcmf_firmware_mapping - Used to map chipid/revmask to firmware
* filename and nvram filename. Each bus type implementation should create
@@ -66,7 +68,7 @@ struct brcmf_fw_request {
u16 domain_nr;
u16 bus_nr;
u32 n_items;
- const char *board_type;
+ const char *board_types[BRCMF_FW_MAX_BOARD_TYPES];
struct brcmf_fw_item items[];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 096f6b969dd8..e1127d7e086d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -419,7 +419,6 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
flowid = flow->hash[i].flowid;
if (flow->rings[flowid]->status != RING_OPEN)
continue;
- flow->rings[flowid]->status = RING_CLOSING;
brcmf_msgbuf_delete_flowring(drvr, flowid);
}
}
@@ -458,10 +457,8 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
(hash[i].ifidx == ifidx)) {
flowid = flow->hash[i].flowid;
- if (flow->rings[flowid]->status == RING_OPEN) {
- flow->rings[flowid]->status = RING_CLOSING;
+ if (flow->rings[flowid]->status == RING_OPEN)
brcmf_msgbuf_delete_flowring(drvr, flowid);
- }
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index c87b829adb0d..f518e025d6e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -135,7 +135,7 @@
/* Link Down indication in WoWL mode: */
#define BRCMF_WOWL_LINKDOWN (1 << 31)
-#define BRCMF_WOWL_MAXPATTERNS 8
+#define BRCMF_WOWL_MAXPATTERNS 16
#define BRCMF_WOWL_MAXPATTERNSIZE 128
#define BRCMF_COUNTRY_BUF_SZ 4
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index d58525ebe618..36af81975855 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -688,7 +688,7 @@ static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
struct brcmf_fws_mac_descriptor *desc)
{
if (desc == &fws->desc.other)
- strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
+ strscpy(desc->name, "MAC-OTHER", sizeof(desc->name));
else if (desc->mac_handle)
scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
desc->mac_handle, desc->interface_id);
@@ -2475,7 +2475,8 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
}
-void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
+void brcmf_fws_bustxcomplete(struct brcmf_fws_info *fws, struct sk_buff *skb,
+ bool success)
{
u32 hslot;
@@ -2483,11 +2484,14 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
brcmu_pkt_buf_free_skb(skb);
return;
}
- brcmf_fws_lock(fws);
- hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
- brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0,
- 1);
- brcmf_fws_unlock(fws);
+
+ if (!success) {
+ brcmf_fws_lock(fws);
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot,
+ 0, 0, 1);
+ brcmf_fws_unlock(fws);
+ }
}
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index b16a9d1c0508..f9c36cd8f1de 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -40,7 +40,8 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_fws_reset_interface(struct brcmf_if *ifp);
void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
-void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bustxcomplete(struct brcmf_fws_info *fws, struct sk_buff *skb,
+ bool success);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index b2d0f7570aa9..cec53f934940 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -71,6 +71,7 @@
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48
+#define BRCMF_MAX_TXSTATUS_WAIT_RETRIES 10
struct msgbuf_common_hdr {
u8 msgtype;
@@ -806,8 +807,12 @@ static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
- if (flowid == BRCMF_FLOWRING_INVALID_ID)
+ if (flowid == BRCMF_FLOWRING_INVALID_ID) {
return -ENOMEM;
+ } else {
+ brcmf_flowring_enqueue(flow, flowid, skb);
+ return 0;
+ }
}
queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
@@ -1395,9 +1400,27 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct msgbuf_tx_flowring_delete_req *delete;
struct brcmf_commonring *commonring;
+ struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid];
+ struct brcmf_flowring *flow = msgbuf->flow;
void *ret_ptr;
u8 ifidx;
int err;
+ int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES;
+
+ /* make sure it is not in txflow */
+ brcmf_commonring_lock(commonring_del);
+ flow->rings[flowid]->status = RING_CLOSING;
+ brcmf_commonring_unlock(commonring_del);
+
+ /* wait for commonring txflow finished */
+ while (retry && atomic_read(&commonring_del->outstanding_tx)) {
+ usleep_range(5000, 10000);
+ retry--;
+ }
+ if (!retry) {
+ brcmf_err("timed out waiting for txstatus\n");
+ atomic_set(&commonring_del->outstanding_tx, 0);
+ }
/* no need to submit if firmware can not be reached */
if (drvr->bus_if->state != BRCMF_BUS_UP) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 2e322edbb907..6a849f4a94dd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 1024
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64
#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 1024
#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 79388d49c256..a83699de01ec 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -70,14 +70,24 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
+ const char *prop;
int irq;
int err;
u32 irqf;
u32 val;
+ /* Apple ARM64 platforms have their own idea of board type, passed in
+ * via the device tree. They also have an antenna SKU parameter
+ */
+ if (!of_property_read_string(np, "brcm,board-type", &prop))
+ settings->board_type = prop;
+
+ if (!of_property_read_string(np, "apple,antenna-sku", &prop))
+ settings->antenna_sku = prop;
+
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
- if (root) {
+ if (root && !settings->board_type) {
char *board_type;
const char *tmp;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 97f0f13dfe50..80083f9ea311 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -59,6 +59,8 @@ BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
+BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
@@ -66,6 +68,7 @@ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.clm_blob");
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
@@ -87,6 +90,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
+ BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
+ BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
};
#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
@@ -118,6 +123,12 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
+#define BRCMF_PCIE_64_PCIE2REG_INTMASK 0xC14
+#define BRCMF_PCIE_64_PCIE2REG_MAILBOXINT 0xC30
+#define BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK 0xC34
+#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0 0xA20
+#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1 0xA24
+
#define BRCMF_PCIE2_INTA 0x01
#define BRCMF_PCIE2_INTB 0x02
@@ -137,6 +148,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
#define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
+#define BRCMF_PCIE_MB_INT_FN0 (BRCMF_PCIE_MB_INT_FN0_0 | \
+ BRCMF_PCIE_MB_INT_FN0_1)
#define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
BRCMF_PCIE_MB_INT_D2H0_DB1 | \
BRCMF_PCIE_MB_INT_D2H1_DB0 | \
@@ -146,6 +159,40 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
+#define BRCMF_PCIE_64_MB_INT_D2H0_DB0 0x1
+#define BRCMF_PCIE_64_MB_INT_D2H0_DB1 0x2
+#define BRCMF_PCIE_64_MB_INT_D2H1_DB0 0x4
+#define BRCMF_PCIE_64_MB_INT_D2H1_DB1 0x8
+#define BRCMF_PCIE_64_MB_INT_D2H2_DB0 0x10
+#define BRCMF_PCIE_64_MB_INT_D2H2_DB1 0x20
+#define BRCMF_PCIE_64_MB_INT_D2H3_DB0 0x40
+#define BRCMF_PCIE_64_MB_INT_D2H3_DB1 0x80
+#define BRCMF_PCIE_64_MB_INT_D2H4_DB0 0x100
+#define BRCMF_PCIE_64_MB_INT_D2H4_DB1 0x200
+#define BRCMF_PCIE_64_MB_INT_D2H5_DB0 0x400
+#define BRCMF_PCIE_64_MB_INT_D2H5_DB1 0x800
+#define BRCMF_PCIE_64_MB_INT_D2H6_DB0 0x1000
+#define BRCMF_PCIE_64_MB_INT_D2H6_DB1 0x2000
+#define BRCMF_PCIE_64_MB_INT_D2H7_DB0 0x4000
+#define BRCMF_PCIE_64_MB_INT_D2H7_DB1 0x8000
+
+#define BRCMF_PCIE_64_MB_INT_D2H_DB (BRCMF_PCIE_64_MB_INT_D2H0_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H0_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H1_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H1_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H2_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H2_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H3_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H3_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H4_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H4_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H5_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H5_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H6_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H6_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H7_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H7_DB1)
+
#define BRCMF_PCIE_SHARED_VERSION_7 7
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
@@ -255,12 +302,24 @@ struct brcmf_pcie_core_info {
u32 wrapbase;
};
+#define BRCMF_OTP_MAX_PARAM_LEN 16
+
+struct brcmf_otp_params {
+ char module[BRCMF_OTP_MAX_PARAM_LEN];
+ char vendor[BRCMF_OTP_MAX_PARAM_LEN];
+ char version[BRCMF_OTP_MAX_PARAM_LEN];
+ bool valid;
+};
+
struct brcmf_pciedev_info {
enum brcmf_pcie_state state;
bool in_irq;
struct pci_dev *pdev;
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
+ char clm_name[BRCMF_FW_NAME_LEN];
+ const struct firmware *clm_fw;
+ const struct brcmf_pcie_reginfo *reginfo;
void __iomem *regs;
void __iomem *tcm;
u32 ram_base;
@@ -280,6 +339,7 @@ struct brcmf_pciedev_info {
void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value);
struct brcmf_mp_device *settings;
+ struct brcmf_otp_params otp;
};
struct brcmf_pcie_ringbuf {
@@ -346,11 +406,49 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
};
+struct brcmf_pcie_reginfo {
+ u32 intmask;
+ u32 mailboxint;
+ u32 mailboxmask;
+ u32 h2d_mailbox_0;
+ u32 h2d_mailbox_1;
+ u32 int_d2h_db;
+ u32 int_fn0;
+};
+
+static const struct brcmf_pcie_reginfo brcmf_reginfo_default = {
+ .intmask = BRCMF_PCIE_PCIE2REG_INTMASK,
+ .mailboxint = BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ .mailboxmask = BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+ .h2d_mailbox_0 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0,
+ .h2d_mailbox_1 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1,
+ .int_d2h_db = BRCMF_PCIE_MB_INT_D2H_DB,
+ .int_fn0 = BRCMF_PCIE_MB_INT_FN0,
+};
+
+static const struct brcmf_pcie_reginfo brcmf_reginfo_64 = {
+ .intmask = BRCMF_PCIE_64_PCIE2REG_INTMASK,
+ .mailboxint = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT,
+ .mailboxmask = BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK,
+ .h2d_mailbox_0 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0,
+ .h2d_mailbox_1 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1,
+ .int_d2h_db = BRCMF_PCIE_64_MB_INT_D2H_DB,
+ .int_fn0 = 0,
+};
+
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq);
static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
+static u16
+brcmf_pcie_read_reg16(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+{
+ void __iomem *address = devinfo->regs + reg_offset;
+
+ return ioread16(address);
+}
+
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@@ -496,6 +594,8 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
+#define READCC32(devinfo, reg) brcmf_pcie_read_reg32(devinfo, \
+ CHIPCREGOFFS(reg))
#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
CHIPCREGOFFS(reg), value)
@@ -779,30 +879,29 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
{
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0);
}
static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
{
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
- BRCMF_PCIE_MB_INT_D2H_DB |
- BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask,
+ devinfo->reginfo->int_d2h_db |
+ devinfo->reginfo->int_fn0);
}
static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
{
if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
brcmf_pcie_write_reg32(devinfo,
- BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
+ devinfo->reginfo->h2d_mailbox_1, 1);
}
static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
- if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
+ if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) {
brcmf_pcie_intr_disable(devinfo);
brcmf_dbg(PCIE, "Enter\n");
return IRQ_WAKE_THREAD;
@@ -817,15 +916,14 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
u32 status;
devinfo->in_irq = true;
- status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
brcmf_dbg(PCIE, "Enter %x\n", status);
if (status) {
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint,
status);
- if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1))
+ if (status & devinfo->reginfo->int_fn0)
brcmf_pcie_handle_mb_data(devinfo);
- if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
+ if (status & devinfo->reginfo->int_d2h_db) {
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
brcmf_proto_msgbuf_rx_trigger(
&devinfo->pdev->dev);
@@ -884,8 +982,8 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
if (devinfo->in_irq)
brcmf_err(bus, "Still in IRQ (processing) !!!\n");
- status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
+ status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status);
devinfo->irq_allocated = false;
}
@@ -937,7 +1035,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
brcmf_dbg(PCIE, "RING !\n");
/* Any arbitrary value will do, lets use 1 */
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1);
return 0;
}
@@ -1382,23 +1480,25 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
return 0;
}
-static
-int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_pcie_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
- fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
- brcmf_pcie_fwnames,
- ARRAY_SIZE(brcmf_pcie_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
+ switch (type) {
+ case BRCMF_BLOB_CLM:
+ *fw = devinfo->clm_fw;
+ devinfo->clm_fw = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!*fw)
+ return -ENOENT;
- kfree(fwreq);
return 0;
}
@@ -1445,7 +1545,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.wowl_config = brcmf_pcie_wowl_config,
.get_ramsize = brcmf_pcie_get_ramsize,
.get_memdump = brcmf_pcie_get_memdump,
- .get_fwname = brcmf_pcie_get_fwname,
+ .get_blob = brcmf_pcie_get_blob,
.reset = brcmf_pcie_reset,
};
@@ -1698,15 +1798,22 @@ static int brcmf_pcie_buscoreprep(void *ctx)
static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
- u32 val;
+ struct brcmf_core *core;
+ u32 val, reg;
devinfo->ci = chip;
brcmf_pcie_reset_device(devinfo);
- val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ /* reginfo is not ready yet */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_PCIE2);
+ if (core->rev >= 64)
+ reg = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT;
+ else
+ reg = BRCMF_PCIE_PCIE2REG_MAILBOXINT;
+
+ val = brcmf_pcie_read_reg32(devinfo, reg);
if (val != 0xffffffff)
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
- val);
+ brcmf_pcie_write_reg32(devinfo, reg, val);
return 0;
}
@@ -1729,8 +1836,206 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.write32 = brcmf_pcie_buscore_write32,
};
+#define BRCMF_OTP_SYS_VENDOR 0x15
+#define BRCMF_OTP_BRCM_CIS 0x80
+
+#define BRCMF_OTP_VENDOR_HDR 0x00000008
+
+static int
+brcmf_pcie_parse_otp_sys_vendor(struct brcmf_pciedev_info *devinfo,
+ u8 *data, size_t size)
+{
+ int idx = 4;
+ const char *chip_params;
+ const char *board_params;
+ const char *p;
+
+ /* 4-byte header and two empty strings */
+ if (size < 6)
+ return -EINVAL;
+
+ if (get_unaligned_le32(data) != BRCMF_OTP_VENDOR_HDR)
+ return -EINVAL;
+
+ chip_params = &data[idx];
+
+ /* Skip first string, including terminator */
+ idx += strnlen(chip_params, size - idx) + 1;
+ if (idx >= size)
+ return -EINVAL;
+
+ board_params = &data[idx];
+
+ /* Skip to terminator of second string */
+ idx += strnlen(board_params, size - idx);
+ if (idx >= size)
+ return -EINVAL;
+
+ /* At this point both strings are guaranteed NUL-terminated */
+ brcmf_dbg(PCIE, "OTP: chip_params='%s' board_params='%s'\n",
+ chip_params, board_params);
+
+ p = skip_spaces(board_params);
+ while (*p) {
+ char tag = *p++;
+ const char *end;
+ size_t len;
+
+ if (*p++ != '=') /* implicit NUL check */
+ return -EINVAL;
+
+ /* *p might be NUL here, if so end == p and len == 0 */
+ end = strchrnul(p, ' ');
+ len = end - p;
+
+ /* leave 1 byte for NUL in destination string */
+ if (len > (BRCMF_OTP_MAX_PARAM_LEN - 1))
+ return -EINVAL;
+
+ /* Copy len characters plus a NUL terminator */
+ switch (tag) {
+ case 'M':
+ strscpy(devinfo->otp.module, p, len + 1);
+ break;
+ case 'V':
+ strscpy(devinfo->otp.vendor, p, len + 1);
+ break;
+ case 'm':
+ strscpy(devinfo->otp.version, p, len + 1);
+ break;
+ }
+
+ /* Skip to next arg, if any */
+ p = skip_spaces(end);
+ }
+
+ brcmf_dbg(PCIE, "OTP: module=%s vendor=%s version=%s\n",
+ devinfo->otp.module, devinfo->otp.vendor,
+ devinfo->otp.version);
+
+ if (!devinfo->otp.module[0] ||
+ !devinfo->otp.vendor[0] ||
+ !devinfo->otp.version[0])
+ return -EINVAL;
+
+ devinfo->otp.valid = true;
+ return 0;
+}
+
+static int
+brcmf_pcie_parse_otp(struct brcmf_pciedev_info *devinfo, u8 *otp, size_t size)
+{
+ int p = 0;
+ int ret = -EINVAL;
+
+ brcmf_dbg(PCIE, "parse_otp size=%zd\n", size);
+
+ while (p < (size - 1)) {
+ u8 type = otp[p];
+ u8 length = otp[p + 1];
+
+ if (type == 0)
+ break;
+
+ if ((p + 2 + length) > size)
+ break;
+
+ switch (type) {
+ case BRCMF_OTP_SYS_VENDOR:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): SYS_VENDOR\n",
+ p, length);
+ ret = brcmf_pcie_parse_otp_sys_vendor(devinfo,
+ &otp[p + 2],
+ length);
+ break;
+ case BRCMF_OTP_BRCM_CIS:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): BRCM_CIS\n",
+ p, length);
+ break;
+ default:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): Unknown type 0x%x\n",
+ p, length, type);
+ break;
+ }
+
+ p += 2 + length;
+ }
+
+ return ret;
+}
+
+static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
+{
+ const struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
+ u32 coreid, base, words, idx, sromctl;
+ u16 *otp;
+ struct brcmf_core *core;
+ int ret;
+
+ switch (devinfo->ci->chip) {
+ case BRCM_CC_4378_CHIP_ID:
+ coreid = BCMA_CORE_GCI;
+ base = 0x1120;
+ words = 0x170;
+ break;
+ default:
+ /* OTP not supported on this chip */
+ return 0;
+ }
+
+ core = brcmf_chip_get_core(devinfo->ci, coreid);
+ if (!core) {
+ brcmf_err(bus, "No OTP core\n");
+ return -ENODEV;
+ }
+
+ if (coreid == BCMA_CORE_CHIPCOMMON) {
+ /* Chips with OTP accessed via ChipCommon need additional
+ * handling to access the OTP
+ */
+ brcmf_pcie_select_core(devinfo, coreid);
+ sromctl = READCC32(devinfo, sromcontrol);
+
+ if (!(sromctl & BCMA_CC_SROM_CONTROL_OTP_PRESENT)) {
+ /* Chip lacks OTP, try without it... */
+ brcmf_err(bus,
+ "OTP unavailable, using default firmware\n");
+ return 0;
+ }
+
+ /* Map OTP to shadow area */
+ WRITECC32(devinfo, sromcontrol,
+ sromctl | BCMA_CC_SROM_CONTROL_OTPSEL);
+ }
+
+ otp = kcalloc(words, sizeof(u16), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ /* Map bus window to SROM/OTP shadow area in core */
+ base = brcmf_pcie_buscore_prep_addr(devinfo->pdev, base + core->base);
+
+ brcmf_dbg(PCIE, "OTP data:\n");
+ for (idx = 0; idx < words; idx++) {
+ otp[idx] = brcmf_pcie_read_reg16(devinfo, base + 2 * idx);
+ brcmf_dbg(PCIE, "[%8x] 0x%04x\n", base + 2 * idx, otp[idx]);
+ }
+
+ if (coreid == BCMA_CORE_CHIPCOMMON) {
+ brcmf_pcie_select_core(devinfo, coreid);
+ WRITECC32(devinfo, sromcontrol, sromctl);
+ }
+
+ ret = brcmf_pcie_parse_otp(devinfo, (u8 *)otp, 2 * words);
+ kfree(otp);
+
+ return ret;
+}
+
#define BRCMF_PCIE_FW_CODE 0
#define BRCMF_PCIE_FW_NVRAM 1
+#define BRCMF_PCIE_FW_CLM 2
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq)
@@ -1755,6 +2060,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
+ devinfo->clm_fw = fwreq->items[BRCMF_PCIE_FW_CLM].binary;
kfree(fwreq);
ret = brcmf_chip_get_raminfo(devinfo->ci);
@@ -1830,6 +2136,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
struct brcmf_fw_name fwnames[] = {
{ ".bin", devinfo->fw_name },
{ ".txt", devinfo->nvram_name },
+ { ".clm_blob", devinfo->clm_name },
};
fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
@@ -1842,11 +2149,51 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
- fwreq->board_type = devinfo->settings->board_type;
+ fwreq->items[BRCMF_PCIE_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_PCIE_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
+ /* Apple platforms with fancy firmware/NVRAM selection */
+ if (devinfo->settings->board_type &&
+ devinfo->settings->antenna_sku &&
+ devinfo->otp.valid) {
+ const struct brcmf_otp_params *otp = &devinfo->otp;
+ struct device *dev = &devinfo->pdev->dev;
+ const char **bt = fwreq->board_types;
+
+ brcmf_dbg(PCIE, "Apple board: %s\n",
+ devinfo->settings->board_type);
+
+ /* Example: apple,shikoku-RASP-m-6.11-X3 */
+ bt[0] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor, otp->version,
+ devinfo->settings->antenna_sku);
+ bt[1] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor, otp->version);
+ bt[2] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor);
+ bt[3] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ devinfo->settings->board_type,
+ otp->module);
+ bt[4] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ devinfo->settings->board_type,
+ devinfo->settings->antenna_sku);
+ bt[5] = devinfo->settings->board_type;
+
+ if (!bt[0] || !bt[1] || !bt[2] || !bt[3] || !bt[4]) {
+ kfree(fwreq);
+ return NULL;
+ }
+ } else {
+ brcmf_dbg(PCIE, "Board: %s\n", devinfo->settings->board_type);
+ fwreq->board_types[0] = devinfo->settings->board_type;
+ }
+
return fwreq;
}
@@ -1857,6 +2204,7 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_fw_request *fwreq;
struct brcmf_pciedev_info *devinfo;
struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_core *core;
struct brcmf_bus *bus;
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
@@ -1876,6 +2224,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
+ core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+ if (core->rev >= 64)
+ devinfo->reginfo = &brcmf_reginfo_64;
+ else
+ devinfo->reginfo = &brcmf_reginfo_default;
+
pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
if (pcie_bus_dev == NULL) {
ret = -ENOMEM;
@@ -1918,6 +2272,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
+ ret = brcmf_pcie_read_otp(devinfo);
+ if (ret) {
+ brcmf_err(bus, "failed to parse OTP\n");
+ goto fail_brcmf;
+ }
+
fwreq = brcmf_pcie_prepare_fw_request(devinfo);
if (!fwreq) {
ret = -ENOMEM;
@@ -1981,6 +2341,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
brcmf_pcie_release_ringbuffers(devinfo);
brcmf_pcie_reset_device(devinfo);
brcmf_pcie_release_resource(devinfo);
+ release_firmware(devinfo->clm_fw);
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
@@ -2038,7 +2399,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
/* Check if device is still up and running, if so we are ready */
- if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
+ if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) {
brcmf_dbg(PCIE, "Try to wakeup device....\n");
if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
goto cleanup;
@@ -2105,6 +2466,9 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index fabfbb0b40b0..d0a7465be586 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -158,12 +158,12 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
struct brcmf_pno_macaddr_le pfn_mac;
u8 *mac_addr = NULL;
u8 *mac_mask = NULL;
- int err, i;
+ int err, i, ri;
- for (i = 0; i < pi->n_reqs; i++)
- if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- mac_addr = pi->reqs[i]->mac_addr;
- mac_mask = pi->reqs[i]->mac_addr_mask;
+ for (ri = 0; ri < pi->n_reqs; ri++)
+ if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[ri]->mac_addr;
+ mac_mask = pi->reqs[ri]->mac_addr_mask;
break;
}
@@ -185,7 +185,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
pfn_mac.mac[0] |= 0x02;
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
- pi->reqs[i]->reqid, pfn_mac.mac);
+ pi->reqs[ri]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8968809399c7..465d95d83759 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -618,6 +618,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio");
BRCMF_FW_DEF(43430B0, "brcmfmac43430b0-sdio");
+BRCMF_FW_CLM_DEF(43439, "brcmfmac43439-sdio");
BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio");
@@ -657,6 +658,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
+ BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
@@ -4129,23 +4131,24 @@ brcmf_sdio_watchdog(struct timer_list *t)
}
}
-static
-int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_sdio_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
- brcmf_sdio_fwnames,
- ARRAY_SIZE(brcmf_sdio_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
+ switch (type) {
+ case BRCMF_BLOB_CLM:
+ *fw = sdiodev->clm_fw;
+ sdiodev->clm_fw = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!*fw)
+ return -ENOENT;
- kfree(fwreq);
return 0;
}
@@ -4180,13 +4183,14 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.wowl_config = brcmf_sdio_wowl_config,
.get_ramsize = brcmf_sdio_bus_get_ramsize,
.get_memdump = brcmf_sdio_bus_get_memdump,
- .get_fwname = brcmf_sdio_get_fwname,
+ .get_blob = brcmf_sdio_get_blob,
.debugfs_create = brcmf_sdio_debugfs_create,
.reset = brcmf_sdio_bus_reset
};
#define BRCMF_SDIO_FW_CODE 0
#define BRCMF_SDIO_FW_NVRAM 1
+#define BRCMF_SDIO_FW_CLM 2
static void brcmf_sdio_firmware_callback(struct device *dev, int err,
struct brcmf_fw_request *fwreq)
@@ -4209,6 +4213,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
code = fwreq->items[BRCMF_SDIO_FW_CODE].binary;
nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len;
+ sdiod->clm_fw = fwreq->items[BRCMF_SDIO_FW_CLM].binary;
kfree(fwreq);
/* try to download image and nvram to the dongle */
@@ -4407,6 +4412,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
struct brcmf_fw_name fwnames[] = {
{ ".bin", bus->sdiodev->fw_name },
{ ".txt", bus->sdiodev->nvram_name },
+ { ".clm_blob", bus->sdiodev->clm_name },
};
fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev,
@@ -4418,7 +4424,9 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
- fwreq->board_type = bus->sdiodev->settings->board_type;
+ fwreq->items[BRCMF_SDIO_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_SDIO_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
+ fwreq->board_types[0] = bus->sdiodev->settings->board_type;
return fwreq;
}
@@ -4574,6 +4582,8 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus->sdiodev->settings)
brcmf_release_module_param(bus->sdiodev->settings);
+ release_firmware(bus->sdiodev->clm_fw);
+ bus->sdiodev->clm_fw = NULL;
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
kfree(bus);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 47351ff458ca..b76d34d36bde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -186,9 +186,11 @@ struct brcmf_sdio_dev {
struct sg_table sgtable;
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
+ char clm_name[BRCMF_FW_NAME_LEN];
bool wowl_enabled;
enum brcmf_sdiod_state state;
struct brcmf_sdiod_freezer *freezer;
+ const struct firmware *clm_fw;
};
/* sdio core registers */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 9fb68c2dc7e3..85e18fb9c497 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1154,24 +1154,11 @@ error:
return NULL;
}
-static
-int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_usb_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
- struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
-
- fwreq = brcmf_fw_alloc_request(bus->chip, bus->chiprev,
- brcmf_usb_fwnames,
- ARRAY_SIZE(brcmf_usb_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
-
- kfree(fwreq);
- return 0;
+ /* No blobs for USB devices... */
+ return -ENOENT;
}
static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
@@ -1180,7 +1167,7 @@ static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
- .get_fwname = brcmf_usb_get_fwname,
+ .get_blob = brcmf_usb_get_blob,
};
#define BRCMF_USB_FW_CODE 0
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
index ae1f3ad40d45..2b0df07ced74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
@@ -123,7 +123,7 @@
*/
/********************************************************************
- * Phy/Core Configuration. Defines macros to to check core phy/rev *
+ * Phy/Core Configuration. Defines macros to check core phy/rev *
* compile-time configuration. Defines default core support. *
* ******************************************************************
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index ed0b707f0cdf..f4939cf62767 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -51,9 +51,12 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_4378_CHIP_ID 0x4378
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
+#define CY_CC_43439_CHIP_ID 43439
#define CY_CC_43752_CHIP_ID 43752
+#define CY_CC_89459_CHIP_ID 0x4355
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -87,7 +90,9 @@
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
#define BRCM_PCIE_4371_DEVICE_ID 0x440d
-
+#define BRCM_PCIE_4378_DEVICE_ID 0x4425
+#define CY_PCIE_89459_DEVICE_ID 0x4415
+#define CY_PCIE_89459_RAW_DEVICE_ID 0x4355
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 5234511dac78..b0f23cf1a621 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5907,8 +5907,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5916,7 +5916,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
- strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
}
@@ -6529,7 +6529,7 @@ static struct pci_driver ipw2100_pci_driver = {
.shutdown = ipw2100_shutdown,
};
-/**
+/*
* Initialize the ipw2100 driver/module
*
* @returns 0 if ok, < 0 errno node con error.
@@ -6561,7 +6561,7 @@ out:
return ret;
}
-/**
+/*
* Cleanup ipw2100 driver registration
*/
static void __exit ipw2100_exit(void)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 029dacebe751..5b483de18c81 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -10424,8 +10424,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10434,7 +10434,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
- strlcpy(info->bus_info, pci_name(p->pci_dev),
+ strscpy(info->bus_info, pci_name(p->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index 55cac934f4ee..09ddd21608d4 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -651,7 +651,7 @@ struct ipw_rx_notification {
struct notif_link_deterioration link_deterioration;
struct notif_calibration calibration;
struct notif_noise noise;
- u8 raw[0];
+ DECLARE_FLEX_ARRAY(u8, raw);
} u;
} __packed;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 7964ef7d15f0..bec7bc273748 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -405,7 +405,7 @@ struct libipw_auth {
__le16 transaction;
__le16 status;
/* challenge */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_channel_switch {
@@ -423,7 +423,6 @@ struct libipw_action {
union {
struct libipw_action_exchange {
u8 token;
- struct libipw_info_element info_element[0];
} exchange;
struct libipw_channel_switch channel_switch;
@@ -441,7 +440,7 @@ struct libipw_disassoc {
struct libipw_probe_request {
struct libipw_hdr_3addr header;
/* SSID, supported rates */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_probe_response {
@@ -451,7 +450,7 @@ struct libipw_probe_response {
__le16 capability;
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
/* Alias beacon for probe_response */
@@ -462,7 +461,7 @@ struct libipw_assoc_request {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_reassoc_request {
@@ -470,7 +469,7 @@ struct libipw_reassoc_request {
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_assoc_response {
@@ -479,7 +478,7 @@ struct libipw_assoc_response {
__le16 status;
__le16 aid;
/* supported rates */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_txb {
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 7a684b76f39b..48d6870bbf4e 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -1329,8 +1329,8 @@ static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_as
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
- if (libipw_parse_info_param
- (frame->info_element, stats->len - sizeof(*frame), network))
+ if (libipw_parse_info_param((void *)frame->variable,
+ stats->len - sizeof(*frame), network))
return 1;
network->mode = 0;
@@ -1389,8 +1389,8 @@ static int libipw_network_init(struct libipw_device *ieee, struct libipw_probe_r
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
- if (libipw_parse_info_param
- (beacon->info_element, stats->len - sizeof(*beacon), network))
+ if (libipw_parse_info_param((void *)beacon->variable,
+ stats->len - sizeof(*beacon), network))
return 1;
network->mode = 0;
@@ -1510,7 +1510,7 @@ static void libipw_process_probe_response(struct libipw_device
struct libipw_network *target;
struct libipw_network *oldest = NULL;
#ifdef CONFIG_LIBIPW_DEBUG
- struct libipw_info_element *info_element = beacon->info_element;
+ struct libipw_info_element *info_element = (void *)beacon->variable;
#endif
unsigned long flags;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 846138d6e33d..7352d5b2095f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3254,7 +3254,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strlcpy(buffer, buf, sizeof(buffer));
+ strscpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index c62f299b9e0a..718efb1aa1b0 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -1167,7 +1167,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
cpu_to_le32(new_rate);
repeat_rate--;
idx++;
- if (idx >= LINK_QUAL_MAX_RETRY_NUM)
- goto out;
}
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
repeat_rate--;
}
-out:
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 4a97310f8fee..28cf4e832152 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -1710,7 +1710,7 @@ struct il4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ DECLARE_FLEX_ARRAY(struct agg_tx_status, agg_status); /* for each agg frame */
} u;
} __packed;
@@ -3365,7 +3365,7 @@ struct il_rx_pkt {
struct il_compressed_ba_resp compressed_ba;
struct il_missed_beacon_notif missed_beacon;
__le32 status;
- u8 raw[0];
+ DECLARE_FLEX_ARRAY(u8, raw);
} u;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 04d27a26260b..341c17fe2af4 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1870,15 +1870,15 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
goto done;
D_ASSOC("spatial multiplexing power save mode: %s\n",
- (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
- (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
+ (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+ (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
"disabled");
sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
@@ -1888,7 +1888,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
case IEEE80211_SMPS_OFF:
break;
default:
- IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index a647a406b87b..b20409f8c13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -140,6 +140,7 @@ config IWLMEI
depends on INTEL_MEI
depends on PM
depends on CFG80211
+ depends on BROKEN
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 8ff967edc8f0..110fda65bd21 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,13 +56,16 @@
#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
+#define IWL_BZ_A_FM4_A_FW_PRE "iwlwifi-bz-a0-fm4-a0-"
#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-"
+#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0-"
#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-"
#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-"
#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-"
#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-"
#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-"
#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-"
+#define IWL_BNJ_B_FM_B_FW_PRE "iwlwifi-BzBnj-b0-fm-b0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
@@ -119,8 +122,12 @@
IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_FM4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_FM4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \
+ IWL_GL_B_FM_B_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@@ -131,6 +138,8 @@
IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_B_FM_B_MODULE_FIRMWARE(api) \
+ IWL_BNJ_B_FM_B_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -240,7 +249,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-#define IWL_DEVICE_BZ_COMMON \
+#define IWL_DEVICE_BZ \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.led_mode = IWL_LED_RF_STATE, \
@@ -276,16 +285,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = LDBG_M2S_BUF_WRAP_CNT, \
.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
}, \
- }
-
-#define IWL_DEVICE_BZ \
- IWL_DEVICE_BZ_COMMON, \
+ }, \
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_BZ, \
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -926,6 +932,13 @@ const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0 = {
+ .fw_name_pre = IWL_BZ_A_FM4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.fw_name_pre = IWL_GL_A_FM_A_FW_PRE,
.uhb_supported = true,
@@ -933,6 +946,13 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_gl_b0_fm_b0 = {
+ .fw_name_pre = IWL_GL_B_FM_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
.fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE,
.uhb_supported = true,
@@ -974,6 +994,13 @@ const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+
+const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0 = {
+ .fw_name_pre = IWL_BNJ_B_FM_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -1007,3 +1034,6 @@ MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_B_FM_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 411a6f6638b4..fefaa414272b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -112,7 +112,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type);
int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_priv *priv,
- const struct iwl_calib_hdr *cmd, int len);
+ const struct iwl_calib_cmd *cmd, size_t len);
void iwl_calib_free_results(struct iwl_priv *priv);
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index a11884fa254b..f488620d2844 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -19,8 +19,7 @@
struct iwl_calib_result {
struct list_head list;
size_t cmd_len;
- struct iwl_calib_hdr hdr;
- /* data follows */
+ struct iwl_calib_cmd cmd;
};
struct statistics_general_data {
@@ -43,12 +42,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
int ret;
hcmd.len[0] = res->cmd_len;
- hcmd.data[0] = &res->hdr;
+ hcmd.data[0] = &res->cmd;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
ret = iwl_dvm_send_cmd(priv, &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d on calib cmd %d\n",
- ret, res->hdr.op_code);
+ ret, res->cmd.hdr.op_code);
return ret;
}
}
@@ -57,19 +56,22 @@ int iwl_send_calib_results(struct iwl_priv *priv)
}
int iwl_calib_set(struct iwl_priv *priv,
- const struct iwl_calib_hdr *cmd, int len)
+ const struct iwl_calib_cmd *cmd, size_t len)
{
struct iwl_calib_result *res, *tmp;
- res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
- GFP_ATOMIC);
+ if (check_sub_overflow(len, sizeof(*cmd), &len))
+ return -ENOMEM;
+
+ res = kmalloc(struct_size(res, cmd.data, len), GFP_ATOMIC);
if (!res)
return -ENOMEM;
- memcpy(&res->hdr, cmd, len);
- res->cmd_len = len;
+ res->cmd = *cmd;
+ memcpy(res->cmd.data, cmd->data, len);
+ res->cmd_len = struct_size(cmd, data, len);
list_for_each_entry(tmp, &priv->calib_results, list) {
- if (tmp->hdr.op_code == res->hdr.op_code) {
+ if (tmp->cmd.hdr.op_code == res->cmd.hdr.op_code) {
list_replace(&tmp->list, &res->list);
kfree(tmp);
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index bbd574091201..1a9eadace188 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -696,6 +696,7 @@ struct iwl_priv {
/* Scan related variables */
unsigned long scan_start;
unsigned long scan_start_tsf;
+ size_t scan_cmd_size;
void *scan_cmd;
enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index baffa1cbe8fc..687c906a9d72 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 - 2020 Intel Corporation
+ * Copyright (C) 2019 - 2020, 2022 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -1242,7 +1242,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1297,7 +1297,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 2d38227dfdd2..a7e85c5c8c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -626,7 +626,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 active_chains;
u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
int ret;
- int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
+ size_t scan_cmd_size = sizeof(struct iwl_scan_cmd) +
MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
priv->fw->ucode_capa.max_probe_length;
const u8 *ssid = NULL;
@@ -649,9 +649,15 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
"fail to allocate memory for scan\n");
return -ENOMEM;
}
+ priv->scan_cmd_size = scan_cmd_size;
+ }
+ if (priv->scan_cmd_size < scan_cmd_size) {
+ IWL_DEBUG_SCAN(priv,
+ "memory needed for scan grew unexpectedly\n");
+ return -ENOMEM;
}
scan = priv->scan_cmd;
- memset(scan, 0, scan_cmd_size);
+ memset(scan, 0, priv->scan_cmd_size);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 476068c0abb7..cef43cf80620 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2022 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -161,12 +161,12 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
sta->addr,
- (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
+ (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ?
"static" :
- (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
+ (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ?
"dynamic" : "disabled");
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_STATIC:
*flags |= STA_FLG_MIMO_DIS_MSK;
break;
@@ -176,7 +176,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
case IEEE80211_SMPS_OFF:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index 4b27a53d0bb4..bb13ca5d666c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -356,18 +356,18 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_priv *priv = data;
- struct iwl_calib_hdr *hdr;
+ struct iwl_calib_cmd *cmd;
if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
return true;
}
- hdr = (struct iwl_calib_hdr *)pkt->data;
+ cmd = (struct iwl_calib_cmd *)pkt->data;
- if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
+ if (iwl_calib_set(priv, cmd, iwl_rx_packet_payload_len(pkt)))
IWL_ERR(priv, "Failed to record calibration data %d\n",
- hdr->op_code);
+ cmd->hdr.op_code);
return false;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index c78d2f1c722c..0b052c2e563a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -20,6 +20,8 @@
* &enum iwl_phy_ops_subcmd_ids
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
+ * @SCAN_GROUP: scan group, uses command IDs from
+ * &enum iwl_scan_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @LOCATION_GROUP: location group, uses command IDs from
* &enum iwl_location_subcmd_ids
@@ -36,6 +38,7 @@ enum iwl_mvm_command_groups {
MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
+ SCAN_GROUP = 0x6,
NAN_GROUP = 0x7,
LOCATION_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 4cd9ab23954e..df0833890e55 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -766,6 +766,65 @@ struct iwl_wowlan_status_v12 {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
+/**
+ * struct iwl_wowlan_info_notif - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @reserved1: reserved
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @reserved2: reserved
+ */
+struct iwl_wowlan_info_notif {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 reserved2[2];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
+ * @wake_packet_length: wakeup packet length
+ * @station_id: station id
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_wake_pkt_notif {
+ __le32 wake_packet_length;
+ u8 station_id;
+ u8 reserved[3];
+ u8 wake_packet[1];
+} __packed; /* WOWLAN_WAKE_PKT_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_d3_end_notif - d3 end notification
+ * @flags: See &enum iwl_d0i3_flags
+ */
+struct iwl_mvm_d3_end_notif {
+ __le32 flags;
+} __packed;
+
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 5204aa94e72a..a0123f81f5d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
@@ -13,6 +13,21 @@
*/
enum iwl_prot_offload_subcmd_ids {
/**
+ * @WOWLAN_WAKE_PKT_NOTIFICATION: Notification in &struct iwl_wowlan_wake_pkt_notif
+ */
+ WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC,
+
+ /**
+ * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif
+ */
+ WOWLAN_INFO_NOTIFICATION = 0xFD,
+
+ /**
+ * @D3_END_NOTIFICATION: End D3 state notification
+ */
+ D3_END_NOTIFICATION = 0xFE,
+
+ /**
* @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
*/
STORED_BEACON_NTF = 0xFF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 1989b270862b..74a01888715b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -668,7 +668,7 @@ struct iwl_rx_no_data {
__le32 phy_info[2];
__le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
- TX_NO_DATA_NTFY_API_S_VER_2 */
+ RX_NO_DATA_NTFY_API_S_VER_2 */
struct iwl_frame_release {
u8 baid;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5543d9cb74c8..7ba0e3409199 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,6 +9,16 @@
/* Scan Commands, Responses, Notifications */
+/**
+ * enum iwl_scan_subcmd_ids - scan commands
+ */
+enum iwl_scan_subcmd_ids {
+ /**
+ * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
+ */
+ OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
+};
+
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
@@ -1188,7 +1198,7 @@ struct iwl_scan_offload_profile_match {
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
/**
- * struct iwl_scan_offload_profiles_query - match results query response
+ * struct iwl_scan_offload_match_info - match results information
* @matched_profiles: bitmap of matched profiles, referencing the
* matches passed in the scan offload request
* @last_scan_age: age of the last offloaded scan
@@ -1200,7 +1210,7 @@ struct iwl_scan_offload_profile_match {
* @reserved: reserved
* @matches: array of match information, one for each match
*/
-struct iwl_scan_offload_profiles_query {
+struct iwl_scan_offload_match_info {
__le32 matched_profiles;
__le32 last_scan_age;
__le32 n_scans_done;
@@ -1210,7 +1220,9 @@ struct iwl_scan_offload_profiles_query {
u8 self_recovery;
__le16 reserved;
struct iwl_scan_offload_profile_match matches[];
-} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 and
+ * SCAN_OFFLOAD_MATCH_INFO_NOTIFICATION_S_VER_1
+ */
/**
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f5b556a103e8..cfa5e1b3c3f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -649,13 +649,16 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_gl_b0_fm_b0;
extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0;
+extern const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index aeb0015b73d2..919b1f478b4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1427,7 +1427,7 @@ struct iwl_wowlan_status_data {
u8 flags;
} igtk;
- u8 wake_packet[];
+ u8 *wake_packet;
};
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
@@ -1480,7 +1480,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
- if (status->wake_packet_bufsize) {
+ if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
@@ -1944,57 +1944,6 @@ out:
return true;
}
-/* Occasionally, templates would be nice. This is one of those times ... */
-#define iwl_mvm_parse_wowlan_status_common(_ver) \
-static struct iwl_wowlan_status_data * \
-iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
- struct iwl_wowlan_status_ ##_ver *data,\
- int len) \
-{ \
- struct iwl_wowlan_status_data *status; \
- int data_size, i; \
- \
- if (len < sizeof(*data)) { \
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return NULL; \
- } \
- \
- data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
- if (len != sizeof(*data) + data_size) { \
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return NULL; \
- } \
- \
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
- if (!status) \
- return NULL; \
- \
- /* copy all the common fields */ \
- status->replay_ctr = le64_to_cpu(data->replay_ctr); \
- status->pattern_number = le16_to_cpu(data->pattern_number); \
- status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
- for (i = 0; i < 8; i++) \
- status->qos_seq_ctr[i] = \
- le16_to_cpu(data->qos_seq_ctr[i]); \
- status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
- status->num_of_gtk_rekeys = \
- le32_to_cpu(data->num_of_gtk_rekeys); \
- status->received_beacons = le32_to_cpu(data->received_beacons); \
- status->wake_packet_length = \
- le32_to_cpu(data->wake_packet_length); \
- status->wake_packet_bufsize = \
- le32_to_cpu(data->wake_packet_bufsize); \
- memcpy(status->wake_packet, data->wake_packet, \
- status->wake_packet_bufsize); \
- \
- return status; \
-}
-
-iwl_mvm_parse_wowlan_status_common(v6)
-iwl_mvm_parse_wowlan_status_common(v7)
-iwl_mvm_parse_wowlan_status_common(v9)
-iwl_mvm_parse_wowlan_status_common(v12)
-
static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_gtk_status_v2 *data)
{
@@ -2054,6 +2003,96 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
((u64)ipn[0] << 40);
}
+static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 i;
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, &data->gtk[0]);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
+
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ status->qos_seq_ctr[i] =
+ le16_to_cpu(data->qos_seq_ctr[i]);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+}
+
+/* Occasionally, templates would be nice. This is one of those times ... */
+#define iwl_mvm_parse_wowlan_status_common(_ver) \
+static struct iwl_wowlan_status_data * \
+iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
+ struct iwl_wowlan_status_ ##_ver *data,\
+ int len) \
+{ \
+ struct iwl_wowlan_status_data *status; \
+ int data_size, i; \
+ \
+ if (len < sizeof(*data)) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
+ if (len != sizeof(*data) + data_size) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ status = kzalloc(sizeof(*status), GFP_KERNEL); \
+ if (!status) \
+ return NULL; \
+ \
+ /* copy all the common fields */ \
+ status->replay_ctr = le64_to_cpu(data->replay_ctr); \
+ status->pattern_number = le16_to_cpu(data->pattern_number); \
+ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
+ for (i = 0; i < 8; i++) \
+ status->qos_seq_ctr[i] = \
+ le16_to_cpu(data->qos_seq_ctr[i]); \
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
+ status->num_of_gtk_rekeys = \
+ le32_to_cpu(data->num_of_gtk_rekeys); \
+ status->received_beacons = le32_to_cpu(data->received_beacons); \
+ status->wake_packet_length = \
+ le32_to_cpu(data->wake_packet_length); \
+ status->wake_packet_bufsize = \
+ le32_to_cpu(data->wake_packet_bufsize); \
+ if (status->wake_packet_bufsize) { \
+ status->wake_packet = \
+ kmemdup(data->wake_packet, \
+ status->wake_packet_bufsize, \
+ GFP_KERNEL); \
+ if (!status->wake_packet) { \
+ kfree(status); \
+ return NULL; \
+ } \
+ } else { \
+ status->wake_packet = NULL; \
+ } \
+ \
+ return status; \
+}
+
+iwl_mvm_parse_wowlan_status_common(v6)
+iwl_mvm_parse_wowlan_status_common(v7)
+iwl_mvm_parse_wowlan_status_common(v9)
+iwl_mvm_parse_wowlan_status_common(v12)
+
static struct iwl_wowlan_status_data *
iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
{
@@ -2173,36 +2212,15 @@ out_free_resp:
return status;
}
-static struct iwl_wowlan_status_data *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
-{
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD,
- IWL_FW_CMD_VER_UNKNOWN);
- __le32 station_id = cpu_to_le32(sta_id);
- u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
-
- if (!mvm->net_detect) {
- /* only for tracing for now */
- int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
- cmd_size, &station_id);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
- }
-
- return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
-}
-
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_wowlan_status_data *status;
int i;
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
if (!status)
goto out_unlock;
@@ -2212,7 +2230,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
/* still at hard-coded place 0 for D3 image */
mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
if (!mvm_ap_sta)
- goto out_free;
+ goto out_unlock;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status->qos_seq_ctr[i];
@@ -2235,11 +2253,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
- kfree(status);
return keep;
-out_free:
- kfree(status);
out_unlock:
mutex_unlock(&mvm->mutex);
return false;
@@ -2248,16 +2263,16 @@ out_unlock:
#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
IWL_SCAN_MAX_PROFILES)
-struct iwl_mvm_nd_query_results {
+struct iwl_mvm_nd_results {
u32 matched_profiles;
u8 matches[ND_QUERY_BUF_LEN];
};
static int
iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *results)
+ struct iwl_mvm_nd_results *results)
{
- struct iwl_scan_offload_profiles_query *query;
+ struct iwl_scan_offload_match_info *query;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
.flags = CMD_WANT_SKB,
@@ -2274,7 +2289,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
- query_len = sizeof(struct iwl_scan_offload_profiles_query);
+ query_len = sizeof(struct iwl_scan_offload_match_info);
matches_len = sizeof(struct iwl_scan_offload_profile_match) *
max_profiles;
} else {
@@ -2305,7 +2320,7 @@ out_free_resp:
}
static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *query,
+ struct iwl_mvm_nd_results *results,
int idx)
{
int n_chans = 0, i;
@@ -2313,13 +2328,13 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
struct iwl_scan_offload_profile_match *matches =
- (struct iwl_scan_offload_profile_match *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
n_chans += hweight8(matches[idx].matching_channels[i]);
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
- (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
n_chans += hweight8(matches[idx].matching_channels[i]);
@@ -2329,7 +2344,7 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
}
static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *query,
+ struct iwl_mvm_nd_results *results,
struct cfg80211_wowlan_nd_match *match,
int idx)
{
@@ -2338,7 +2353,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
struct iwl_scan_offload_profile_match *matches =
- (struct iwl_scan_offload_profile_match *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
@@ -2346,7 +2361,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
mvm->nd_channels[i]->center_freq;
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
- (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
@@ -2355,25 +2370,50 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
}
}
+/**
+ * enum iwl_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF was received
+ */
+enum iwl_d3_notif {
+ IWL_D3_NOTIF_WOWLAN_INFO = BIT(0),
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1),
+ IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2),
+ IWL_D3_ND_MATCH_INFO = BIT(3),
+ IWL_D3_NOTIF_D3_END_NOTIF = BIT(4)
+};
+
+/* manage d3 resume data */
+struct iwl_d3_data {
+ struct iwl_wowlan_status_data *status;
+ bool test;
+ u32 d3_end_flags;
+ u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
+ u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
+ struct iwl_mvm_nd_results *nd_results;
+ bool nd_results_valid;
+};
+
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
{
struct cfg80211_wowlan_nd_info *net_detect = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- struct iwl_wowlan_status_data *status;
- struct iwl_mvm_nd_query_results query;
unsigned long matched_profiles;
u32 reasons = 0;
int i, n_matches, ret;
- status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
- if (status) {
- reasons = status->wakeup_reasons;
- kfree(status);
- }
+ if (WARN_ON(!d3_data || !d3_data->status))
+ goto out;
+
+ reasons = d3_data->status->wakeup_reasons;
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
@@ -2381,13 +2421,22 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
goto out;
- ret = iwl_mvm_netdetect_query_results(mvm, &query);
- if (ret || !query.matched_profiles) {
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0)) {
+ IWL_INFO(mvm, "Query FW for ND results\n");
+ ret = iwl_mvm_netdetect_query_results(mvm, d3_data->nd_results);
+
+ } else {
+ IWL_INFO(mvm, "Notification based ND results\n");
+ ret = d3_data->nd_results_valid ? 0 : -1;
+ }
+
+ if (ret || !d3_data->nd_results->matched_profiles) {
wakeup_report = NULL;
goto out;
}
- matched_profiles = query.matched_profiles;
+ matched_profiles = d3_data->nd_results->matched_profiles;
if (mvm->n_nd_match_sets) {
n_matches = hweight_long(matched_profiles);
} else {
@@ -2404,7 +2453,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct cfg80211_wowlan_nd_match *match;
int idx, n_channels = 0;
- n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
+ n_channels = iwl_mvm_query_num_match_chans(mvm,
+ d3_data->nd_results,
+ i);
match = kzalloc(struct_size(match, channels, n_channels),
GFP_KERNEL);
@@ -2424,7 +2475,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (mvm->n_nd_channels < n_channels)
continue;
- iwl_mvm_query_set_freqs(mvm, &query, match, i);
+ iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
}
out_report_nd:
@@ -2504,16 +2555,317 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
return false;
}
+/*
+ * This function assumes:
+ * 1. The mutex is already held.
+ * 2. The callee functions unlock the mutex.
+ */
+static bool
+iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* if FW uses status notification, status shouldn't be NULL here */
+ if (!d3_data->status) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : mvmvif->ap_sta_id;
+
+ d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
+ }
+
+ if (mvm->net_detect) {
+ iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
+ } else {
+ bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
+
+ return keep;
+ }
+ return false;
+}
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+ IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+static int iwl_mvm_wowlan_store_wake_pkt(struct iwl_mvm *mvm,
+ struct iwl_wowlan_wake_pkt_notif *notif,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 data_size, packet_len = le32_to_cpu(notif->wake_packet_length);
+
+ if (len < sizeof(*notif)) {
+ IWL_ERR(mvm, "Invalid WoWLAN wake packet notification!\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!status)) {
+ IWL_ERR(mvm, "Got wake packet notification but wowlan status data is NULL\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!(status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT))) {
+ IWL_ERR(mvm, "Got wakeup packet but wakeup reason is %x\n",
+ status->wakeup_reasons);
+ return -EIO;
+ }
+
+ data_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, wake_packet);
+
+ /* data_size got the padding from the notification, remove it. */
+ if (packet_len < data_size)
+ data_size = packet_len;
+
+ status->wake_packet = kmemdup(notif->wake_packet, data_size,
+ GFP_ATOMIC);
+
+ if (!status->wake_packet)
+ return -ENOMEM;
+
+ status->wake_packet_length = packet_len;
+ status->wake_packet_bufsize = data_size;
+
+ return 0;
+}
+
+static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data,
+ struct iwl_scan_offload_match_info *notif,
+ u32 len)
+{
+ struct iwl_wowlan_status_data *status = d3_data->status;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_nd_results *results = d3_data->nd_results;
+ size_t i, matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ iwl_umac_scan_get_max_profiles(mvm->fw);
+
+ if (IS_ERR_OR_NULL(vif))
+ return;
+
+ if (len < sizeof(struct iwl_scan_offload_match_info)) {
+ IWL_ERR(mvm, "Invalid scan match info notification\n");
+ return;
+ }
+
+ if (!mvm->net_detect) {
+ IWL_ERR(mvm, "Unexpected scan match info notification\n");
+ return;
+ }
+
+ if (!status || status->wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ IWL_ERR(mvm,
+ "Ignore scan match info notification: no reason\n");
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(notif->n_scans_done);
+#endif
+
+ results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+ IWL_INFO(mvm, "number of matched profiles=%u\n",
+ results->matched_profiles);
+
+ if (results->matched_profiles) {
+ memcpy(results->matches, notif->matches, matches_len);
+ d3_data->nd_results_valid = TRUE;
+ }
+
+ /* no scan should be active at this point */
+ mvm->scan_status = 0;
+ for (i = 0; i < mvm->max_scans; i++)
+ mvm->scan_uid_status[i] = 0;
+}
+
+static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_d3_data *d3_data = data;
+ u32 len;
+ int ret;
+
+ switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+ struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ /* We might get two notifications due to dual bss */
+ IWL_DEBUG_WOWLAN(mvm,
+ "Got additional wowlan info notification\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
+ len);
+ if (d3_data->status &&
+ d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ /* We are supposed to get also wake packet notif */
+ d3_data->notif_expected |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+ struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+ /* We shouldn't get two wake packet notifications */
+ IWL_ERR(mvm,
+ "Got additional wowlan wake packet notification\n");
+ } else {
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ len = iwl_rx_packet_payload_len(pkt);
+ ret = iwl_mvm_wowlan_store_wake_pkt(mvm, notif,
+ d3_data->status,
+ len);
+ if (ret)
+ IWL_ERR(mvm,
+ "Can't parse WOWLAN_WAKE_PKT_NOTIFICATION\n");
+ }
+
+ break;
+ }
+ case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+ struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_ND_MATCH_INFO) {
+ IWL_ERR(mvm,
+ "Got additional netdetect match info\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_ND_MATCH_INFO;
+
+ /* explicitly set this in the 'expected' as well */
+ d3_data->notif_expected |= IWL_D3_ND_MATCH_INFO;
+
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_nd_match_info_handler(mvm, d3_data, notif, len);
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+ struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data;
+
+ d3_data->d3_end_flags = __le32_to_cpu(notif->flags);
+ d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return d3_data->notif_received == d3_data->notif_expected;
+}
+
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+{
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
+ };
+ bool reset = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ if (ret)
+ return ret;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ return -ENOENT;
+ }
+
+ /*
+ * We should trigger resume flow using command only for 22000 family
+ * AX210 and above don't need the command since they have
+ * the doorbell interrupt.
+ */
+ if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
+ fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) {
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
+
+static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data)
+{
+ static const u16 d3_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+ WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ struct iwl_notification_wait wait_d3_notif;
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+
+ ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
+ return ret;
+ }
+
+ return iwl_wait_notification(&mvm->notif_wait, &wait_d3_notif,
+ IWL_MVM_D3_NOTIF_TIMEOUT);
+}
+
+static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_WAKE_PKT_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0);
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
int ret = 1;
- enum iwl_d3_status d3_status;
- bool keep = false;
+ struct iwl_mvm_nd_results results = {};
+ struct iwl_d3_data d3_data = {
+ .test = test,
+ .notif_expected =
+ IWL_D3_NOTIF_WOWLAN_INFO |
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ .nd_results_valid = false,
+ .nd_results = &results,
+ };
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+ bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ bool keep = false;
mutex_lock(&mvm->mutex);
@@ -2537,54 +2889,30 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
- if (ret)
- goto err;
-
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
- goto err;
- }
-
- if (d0i3_first) {
- struct iwl_host_cmd cmd = {
- .id = D0I3_END_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
- };
- int len;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret < 0) {
- IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
- ret);
+ if (resume_notif_based) {
+ d3_data.status = kzalloc(sizeof(*d3_data.status), GFP_KERNEL);
+ if (!d3_data.status) {
+ IWL_ERR(mvm, "Failed to allocate wowlan status\n");
+ ret = -ENOMEM;
goto err;
}
- switch (mvm->cmd_ver.d0i3_resp) {
- case 0:
- break;
- case 1:
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
- if (len != sizeof(u32)) {
- IWL_ERR(mvm,
- "Error with D0I3_END_CMD response size (%d)\n",
- len);
- goto err;
- }
- if (IWL_D0I3_RESET_REQUIRE &
- le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
- iwl_write32(mvm->trans, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- iwl_free_resp(&cmd);
- }
- break;
- default:
- WARN_ON(1);
- }
+
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ if (ret)
+ goto err;
+ } else {
+ ret = iwl_mvm_resume_firmware(mvm, test);
+ if (ret < 0)
+ goto err;
}
/* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ /* when reset is required we can't send these following commands */
+ if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
+ goto query_wakeup_reasons;
+
/*
* Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware
@@ -2598,41 +2926,36 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* Re-configure default SAR profile */
iwl_mvm_sar_select_profile(mvm, 1, 1);
- if (mvm->net_detect) {
+ if (mvm->net_detect && unified_image) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
* fails, continue and try to get the wake-up reasons,
* but trigger a HW restart by keeping a failure code
* in ret.
*/
- if (unified_image)
- ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
- false);
-
- iwl_mvm_query_netdetect_reasons(mvm, vif);
- /* has unlocked the mutex, so skip that */
- goto out;
- } else {
- keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
-#endif
- /* has unlocked the mutex, so skip that */
- goto out_iterate;
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
}
+query_wakeup_reasons:
+ keep = iwl_mvm_choose_query_wakeup_reasons(mvm, vif, &d3_data);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+
err:
- iwl_mvm_free_nd(mvm);
mutex_unlock(&mvm->mutex);
+out:
+ if (d3_data.status)
+ kfree(d3_data.status->wake_packet);
+ kfree(d3_data.status);
+ iwl_mvm_free_nd(mvm);
-out_iterate:
- if (!test)
+ if (!d3_data.test && !mvm->net_detect)
ieee80211_iterate_active_interfaces_mtx(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter,
+ keep ? vif : NULL);
-out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
/* no need to reset the device in unified images, if successful */
@@ -2641,9 +2964,14 @@ out:
if (d0i3_first)
return 0;
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
- if (!ret)
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0)) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (!ret)
+ return 0;
+ } else if (!(d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) {
return 0;
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index c0bd697b080a..1e8123140973 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -430,14 +430,16 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
return -EBUSY;
if (amsdu_len) {
- mvmsta->orig_amsdu_len = sta->max_amsdu_len;
- sta->max_amsdu_len = amsdu_len;
- for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++)
- sta->max_tid_amsdu_len[i] = amsdu_len;
+ mvmsta->orig_amsdu_len = sta->cur->max_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = amsdu_len;
+ sta->deflink.agg.max_amsdu_len = amsdu_len;
+ for (i = 0; i < ARRAY_SIZE(sta->deflink.agg.max_tid_amsdu_len); i++)
+ sta->deflink.agg.max_tid_amsdu_len[i] = amsdu_len;
} else {
- sta->max_amsdu_len = mvmsta->orig_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = mvmsta->orig_amsdu_len;
mvmsta->orig_amsdu_len = 0;
}
+
return count;
}
@@ -451,7 +453,7 @@ static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file,
char buf[32];
int pos;
- pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len);
+ pos = scnprintf(buf, sizeof(buf), "current %d ", sta->cur->max_amsdu_len);
pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n",
mvmsta->orig_amsdu_len);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5eb28f8ee87e..8464c9b7baf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1833,8 +1833,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
+ IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
@@ -3193,7 +3193,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
NL80211_TDLS_SETUP);
}
- sta->max_rc_amsdu_len = 1;
+ sta->deflink.agg.max_rc_amsdu_len = 1;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
/*
@@ -4949,6 +4949,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
{
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 gi_ltf;
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
@@ -5019,9 +5020,12 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
RATE_HT_MCS_INDEX(rate_n_flags) :
u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
- if (format == RATE_MCS_HE_MSK) {
- u32 gi_ltf = u32_get_bits(rate_n_flags,
- RATE_MCS_HE_GI_LTF_MSK);
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (format) {
+ case RATE_MCS_HE_MSK:
+ gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -5060,19 +5064,14 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
- return;
- }
-
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-
- if (format == RATE_MCS_HT_MSK) {
+ break;
+ case RATE_MCS_HT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
-
- } else if (format == RATE_MCS_VHT_MSK) {
+ break;
+ case RATE_MCS_VHT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ break;
}
-
}
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bf35e130c876..97cba526e465 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -860,6 +860,7 @@ struct iwl_mvm {
/* Scan status, cmd (pre-allocated) and auxiliary station */
unsigned int scan_status;
+ size_t scan_cmd_size;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* For CDB this is low band scan type, for non-CDB - type. */
@@ -1079,7 +1080,6 @@ struct iwl_mvm {
struct list_head resp_pasn_list;
struct {
- u8 d0i3_resp;
u8 range_resp;
} cmd_ver;
@@ -1705,7 +1705,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_size(struct iwl_mvm *mvm);
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index db43c8a83a31..d2d42cd48af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -557,6 +557,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_CONFIG_CMD),
@@ -574,6 +581,9 @@ static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+ HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
+ HCMD_NAME(D3_END_NOTIFICATION),
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -593,6 +603,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
@@ -1065,7 +1076,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
- int scan_size;
+ size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
@@ -1188,13 +1199,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
- mvm->cmd_ver.d0i3_resp =
- iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
- 0);
- /* we only support version 1 */
- if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
- goto out_free;
-
mvm->cmd_ver.range_resp =
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_RESPONSE_NOTIF, 5);
@@ -1299,6 +1303,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd)
goto out_free;
+ mvm->scan_cmd_size = scan_size;
/* invalidate ids to prevent accidental removal of sta_id 0 */
mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index d8c3d7ff4f44..2e9081cb6627 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -143,7 +143,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
};
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
max_nss = 1;
for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
@@ -205,7 +205,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
u8 nss = sta->deflink.rx_nss;
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
nss = 1;
for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
@@ -270,7 +270,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
@@ -340,9 +340,9 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- if (sta->max_amsdu_len < size) {
+ if (sta->deflink.agg.max_amsdu_len < size) {
/*
- * In debug sta->max_amsdu_len < size
+ * In debug sta->deflink.agg.max_amsdu_len < size
* so also check with orig_amsdu_len which holds the
* original data before debugfs changed the value
*/
@@ -352,18 +352,18 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
- sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+ sta->deflink.agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled & BIT(i))
- sta->max_tid_amsdu_len[i] =
+ sta->deflink.agg.max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
- sta->max_tid_amsdu_len[i] = 1;
+ sta->deflink.agg.max_tid_amsdu_len[i] = 1;
}
IWL_DEBUG_RATE(mvm,
@@ -450,7 +450,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* since TLC offload works with one mode we can assume
* that only vht/ht is used and also set it as station max amsdu
*/
- sta->max_amsdu_len = max_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = max_amsdu_len;
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index a79043f30775..0b50b816684a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -138,7 +138,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (!sta->deflink.ht_cap.ht_supported)
return false;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return false;
if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
@@ -1491,7 +1491,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
- sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
+ sta->deflink.agg.max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
@@ -1506,22 +1506,23 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvmsta->vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ mvmsta->max_amsdu_len = sta->deflink.agg.max_amsdu_len;
else
- mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500);
+ mvmsta->max_amsdu_len =
+ min_t(int, sta->deflink.agg.max_amsdu_len, 8500);
- sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+ sta->deflink.agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled)
- sta->max_tid_amsdu_len[i] =
+ sta->deflink.agg.max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
- sta->max_tid_amsdu_len[i] = 1;
+ sta->deflink.agg.max_tid_amsdu_len[i] = 1;
}
}
@@ -2933,7 +2934,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = mvmsta->sta_id;
mvmsta->amsdu_enabled = 0;
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ mvmsta->max_amsdu_len = sta->cur->max_amsdu_len;
for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 2c43a9989783..1aadccd8841f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -1191,16 +1191,22 @@ struct iwl_mvm_rx_phy_data {
enum iwl_rx_phy_info_type info_type;
__le32 d0, d1, d2, d3;
__le16 d4;
+
+ u32 rate_n_flags;
+ u32 gp2_on_air_rise;
+ u16 phy_info;
+ u8 energy_a, energy_b;
+ u8 channel;
};
static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags,
struct ieee80211_radiotap_he_mu *he_mu)
{
u32 phy_data2 = le32_to_cpu(phy_data->d2);
u32 phy_data3 = le32_to_cpu(phy_data->d3);
u16 phy_data4 = le16_to_cpu(phy_data->d4);
+ u32 rate_n_flags = phy_data->rate_n_flags;
if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
he_mu->flags1 |=
@@ -1246,7 +1252,6 @@ static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
static void
iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status)
@@ -1260,6 +1265,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 rate_n_flags = phy_data->rate_n_flags;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
u8 offs = 0;
@@ -1331,7 +1337,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status,
- u32 rate_n_flags, int queue)
+ int queue)
{
switch (phy_data->info_type) {
case IWL_RX_PHY_INFO_TYPE_NONE:
@@ -1430,7 +1436,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
le16_encode_bits(le16_get_bits(phy_data->d4,
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
- iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu);
+ iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu);
fallthrough;
case IWL_RX_PHY_INFO_TYPE_HE_MU:
he_mu->flags2 |=
@@ -1444,8 +1450,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
fallthrough;
case IWL_RX_PHY_INFO_TYPE_HE_TB:
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
- iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags,
- he, he_mu, rx_status);
+ iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
break;
case IWL_RX_PHY_INFO_TYPE_HE_SU:
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
@@ -1461,13 +1466,14 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags, u16 phy_info, int queue)
+ int queue)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 rate_n_flags = phy_data->rate_n_flags;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
- u8 stbc, ltf;
+ u8 ltf;
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
@@ -1484,6 +1490,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
};
+ u16 phy_info = phy_data->phy_info;
he = skb_put_data(skb, &known, sizeof(known));
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
@@ -1504,7 +1511,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
- rate_n_flags, queue);
+ queue);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
@@ -1531,19 +1538,6 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
- stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_MCS_NSS_MSK) >>
- RATE_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_HE;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
-
- rx_status->he_dcm =
- !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
-
#define CHECK_TYPE(F) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
(RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
@@ -1661,6 +1655,107 @@ static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
rx_sta_csa->all_sta_unblocked = false;
}
+/*
+ * Note: requires also rx_status->band to be prefilled, as well
+ * as phy_data (apart from phy_data->info_type)
+ */
+static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ bool is_sgi;
+
+ phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+ if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ phy_data->info_type =
+ le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ /* must be before L-SIG data */
+ if (format == RATE_MCS_HE_MSK)
+ iwl_mvm_rx_he(mvm, skb, phy_data, queue);
+
+ iwl_mvm_decode_lsig(skb, phy_data);
+
+ rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->band);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
+ phy_data->energy_a, phy_data->energy_b);
+
+ if (unlikely(mvm->monitor_on))
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
+
+ is_sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate_n_flags) :
+ rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
+ rx_status->encoding = RX_ENC_VHT;
+ break;
+ case RATE_MCS_HE_MSK:
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+ break;
+ }
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ case RATE_MCS_VHT_MSK:
+ case RATE_MCS_HE_MSK:
+ rx_status->nss =
+ u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ default: {
+ int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ rx_status->rate_idx = rate;
+
+ if (WARN_ONCE(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band))
+ rx_status->rate_idx = 0;
+ break;
+ }
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1670,17 +1765,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_hdr *hdr;
u32 len;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
- u32 rate_n_flags, gp2_on_air_rise;
- u16 phy_info;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0, channel, energy_a, energy_b;
+ u8 crypt_len = 0;
size_t desc_size;
- struct iwl_mvm_rx_phy_data phy_data = {
- .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
- };
+ struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
- bool is_sgi;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1696,35 +1786,37 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
- channel = desc->v3.channel;
- gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
- energy_a = desc->v3.energy_a;
- energy_b = desc->v3.energy_b;
+ phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data.channel = desc->v3.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ phy_data.energy_a = desc->v3.energy_a;
+ phy_data.energy_b = desc->v3.energy_b;
phy_data.d0 = desc->v3.phy_data0;
phy_data.d1 = desc->v3.phy_data1;
phy_data.d2 = desc->v3.phy_data2;
phy_data.d3 = desc->v3.phy_data3;
} else {
- rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
- channel = desc->v1.channel;
- gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
- energy_a = desc->v1.energy_a;
- energy_b = desc->v1.energy_b;
+ phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ phy_data.channel = desc->v1.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ phy_data.energy_a = desc->v1.energy_a;
+ phy_data.energy_b = desc->v1.energy_b;
phy_data.d0 = desc->v1.phy_data0;
phy_data.d1 = desc->v1.phy_data1;
phy_data.d2 = desc->v1.phy_data2;
phy_data.d3 = desc->v1.phy_data3;
}
+
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
REPLY_RX_MPDU_CMD, 0) < 4) {
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
- rate_n_flags);
+ phy_data.rate_n_flags);
}
- format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
len = le16_to_cpu(desc->mpdu_len);
@@ -1733,14 +1825,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
}
- phy_info = le16_to_cpu(desc->phy_info);
+ phy_data.phy_info = le16_to_cpu(desc->phy_info);
phy_data.d4 = desc->phy_data4;
- if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
- phy_data.info_type =
- le32_get_bits(phy_data.d1,
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
-
hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
@@ -1763,27 +1850,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
- /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (format == RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
- phy_info, queue);
-
- iwl_mvm_decode_lsig(skb, &phy_data);
-
/*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
@@ -1794,12 +1860,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
le32_to_cpu(desc->status));
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
+
/* set the preamble flag if appropriate */
if (format == RATE_MCS_CCK_MSK &&
- phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
u64 tsf_on_air_rise;
if (mvm->trans->trans_cfg->device_family >=
@@ -1813,24 +1880,20 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
}
- rx_status->device_timestamp = gp2_on_air_rise;
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
} else {
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
}
- rx_status->freq = ieee80211_channel_to_frequency(channel,
- rx_status->band);
- iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
- energy_b);
/* update aggregation data for monitor sake on default queue */
- if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
- bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit;
+ toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
/*
* Toggle is switched whenever new aggregation starts. Make
@@ -1846,9 +1909,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->ampdu_reference = mvm->ampdu_ref;
}
- if (unlikely(mvm->monitor_on))
- iwl_mvm_add_rtap_sniffer_config(mvm, skb);
-
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
@@ -1867,13 +1927,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
}
- if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc,
+ if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
kfree_skb(skb);
goto out;
}
+ iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
+
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
@@ -1971,43 +2033,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- is_sgi = format == RATE_MCS_HE_MSK ?
- iwl_he_is_sgi(rate_n_flags) :
- rate_n_flags & RATE_MCS_SGI_MSK;
-
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
- rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (format == RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (format == RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >>
- RATE_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_VHT;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (!(format == RATE_MCS_HE_MSK)) {
- int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
- rx_status->band);
-
- if (WARN(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band)) {
- kfree_skb(skb);
- goto out;
- }
- rx_status->rate_idx = rate;
- }
-
/* management stuff on default queue */
if (!queue) {
if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
@@ -2039,32 +2064,32 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_no_data *desc = (void *)pkt->data;
- u32 rate_n_flags = le32_to_cpu(desc->rate);
- u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
u32 rssi = le32_to_cpu(desc->rssi);
u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
- u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 channel, energy_a, energy_b;
- u32 format;
struct iwl_mvm_rx_phy_data phy_data = {
- .info_type = le32_get_bits(desc->phy_info[1],
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
.d0 = desc->phy_info[0],
.d1 = desc->phy_info[1],
+ .phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD,
+ .gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time),
+ .rate_n_flags = le32_to_cpu(desc->rate),
+ .energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK),
+ .energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK),
+ .channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK),
};
- bool is_sgi;
+ u32 format;
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
RX_NO_DATA_NOTIF, 0) < 2) {
IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
- rate_n_flags);
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ phy_data.rate_n_flags);
+ phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
- rate_n_flags);
+ phy_data.rate_n_flags);
}
- format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
return;
@@ -2072,10 +2097,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
- energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
- channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
-
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -2106,86 +2127,31 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
- /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (format == RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
- phy_info, queue);
-
- iwl_mvm_decode_lsig(skb, &phy_data);
-
- rx_status->device_timestamp = gp2_on_air_rise;
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(channel,
- rx_status->band);
- iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
- energy_b);
- rcu_read_lock();
+ iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
- is_sgi = format == RATE_MCS_HE_MSK ?
- iwl_he_is_sgi(rate_n_flags) :
- rate_n_flags & RATE_MCS_SGI_MSK;
-
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
- rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (format == RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (format == RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_VHT;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
- /*
- * take the nss from the rx_vec since the rate_n_flags has
- * only 2 bits for the nss which gives a max of 4 ss but
- * there may be up to 8 spatial streams
- */
+ /*
+ * Override the nss from the rx_vec since the rate_n_flags has
+ * only 2 bits for the nss which gives a max of 4 ss but there
+ * may be up to 8 spatial streams.
+ */
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
- } else if (format == RATE_MCS_HE_MSK) {
+ break;
+ case RATE_MCS_HE_MSK:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
- } else {
- int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
- rx_status->band);
-
- if (WARN(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band)) {
- kfree_skb(skb);
- goto out;
- }
- rx_status->rate_idx = rate;
+ break;
}
+ rcu_read_lock();
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
-out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 582a95ffc7ab..acd8803dbcdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2626,7 +2626,7 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
u8 scan_ver;
lockdep_assert_held(&mvm->mutex);
- memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
+ memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -3091,7 +3091,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
-static int iwl_scan_req_umac_get_size(u8 scan_ver)
+static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
case 12:
@@ -3104,7 +3104,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
return 0;
}
-int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ff0d3b3df140..cc92706b3d16 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -116,7 +116,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index f9e08b339e0c..86d20e13bf47 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -926,7 +926,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* Take the min of ieee80211 station and mvm station
*/
max_amsdu_len =
- min_t(unsigned int, sta->max_amsdu_len,
+ min_t(unsigned int, sta->cur->max_amsdu_len,
iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b16d4ae182d1..4f699862e7f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1155,10 +1155,20 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_NO_JACKET,
+ iwl_cfg_bz_a0_fm4_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
iwl_cfg_gl_a0_fm_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
+ iwl_cfg_gl_b0_fm_b0, iwl_bz_name),
/* BZ Z step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1169,11 +1179,16 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
/* BNJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_b0_fm_b0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 68a4572cee53..9c9f87fe8377 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1110,7 +1110,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
poll = iwl_pcie_napi_poll_msix;
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
- poll, NAPI_POLL_WEIGHT);
+ poll);
napi_enable(&rxq->napi);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 0a376f112db9..4e0a0c881697 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3848,7 +3848,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
- strlcpy(info->driver, "hostap", sizeof(info->driver));
+ strscpy(info->driver, "hostap", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index b925e327e091..e127453ab51a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -635,7 +635,7 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
/*
* hw/fw has not accumulated enough sample sets.
* Wait for 100ms, this ought to be enough to
- * to get at least one non-null set of channel
+ * get at least one non-null set of channel
* usage statistics.
*/
msleep(100);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e55f153ff26..df51b5b1f171 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -229,6 +229,7 @@ static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
struct hwsim_sta_priv {
u32 magic;
unsigned int last_link;
+ u16 active_links_rx;
};
#define HWSIM_STA_MAGIC 0x6d537749
@@ -652,7 +653,6 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
- struct ieee80211_chanctx_conf *chanctx;
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -1299,6 +1299,8 @@ static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw,
struct sk_buff *skb;
void *msg_head;
+ WARN_ON(!is_valid_ether_addr(addr));
+
if (!_portid && !hwsim_virtio_enabled)
return;
@@ -1561,6 +1563,42 @@ static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
#endif
}
+static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ (ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_data(hdr->frame_control))) {
+ struct ieee80211_sta *sta;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta_by_link_addrs(data->hw, hdr->addr2,
+ hdr->addr1, &link_id);
+ if (sta) {
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
+ if (ieee80211_has_pm(hdr->frame_control))
+ sp->active_links_rx &= ~BIT(link_id);
+ else
+ sp->active_links_rx |= BIT(link_id);
+ }
+ rcu_read_unlock();
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
+
+ mac80211_hwsim_add_vendor_rtap(skb);
+
+ data->rx_pkts++;
+ data->rx_bytes += skb->len;
+ ieee80211_rx_irqsafe(data->hw, skb);
+}
+
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan)
@@ -1688,13 +1726,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.mactime = now + data2->tsf_offset;
- memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
-
- mac80211_hwsim_add_vendor_rtap(nskb);
-
- data2->rx_pkts++;
- data2->rx_bytes += nskb->len;
- ieee80211_rx_irqsafe(data2->hw, nskb);
+ mac80211_hwsim_rx(data2, &rx_status, nskb);
}
spin_unlock(&hwsim_radio_lock);
@@ -1714,12 +1746,7 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
if (!vif->valid_links)
return &vif->bss_conf;
- /* FIXME: handle multicast TX properly */
- if (is_multicast_ether_addr(hdr->addr1) || WARN_ON_ONCE(!sta)) {
- unsigned int first_link = ffs(vif->valid_links) - 1;
-
- return rcu_dereference(vif->link_conf[first_link]);
- }
+ WARN_ON(is_multicast_ether_addr(hdr->addr1));
if (WARN_ON_ONCE(!sta->valid_links))
return &vif->bss_conf;
@@ -1731,6 +1758,12 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
/* round-robin the available link IDs */
link_id = (sp->last_link + i + 1) % ARRAY_SIZE(vif->link_conf);
+ if (!(vif->active_links & BIT(link_id)))
+ continue;
+
+ if (!(sp->active_links_rx & BIT(link_id)))
+ continue;
+
*link_sta = rcu_dereference(sta->link[link_id]);
if (!*link_sta)
continue;
@@ -1739,6 +1772,10 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
if (WARN_ON_ONCE(!bss_conf))
continue;
+ /* can happen while switching links */
+ if (!rcu_access_pointer(bss_conf->chanctx_conf))
+ continue;
+
sp->last_link = link_id;
return bss_conf;
}
@@ -2401,10 +2438,19 @@ static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
hwsim_check_magic(vif);
hwsim_set_sta_magic(sta);
mac80211_hwsim_sta_rc_update(hw, vif, sta, 0);
+ if (sta->valid_links) {
+ WARN(hweight16(sta->valid_links) > 1,
+ "expect to add STA with single link, have 0x%x\n",
+ sta->valid_links);
+ sp->active_links_rx = sta->valid_links;
+ }
+
return 0;
}
@@ -2430,6 +2476,14 @@ static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_NOTEXIST)
return mac80211_hwsim_sta_add(hw, vif, sta);
+ /*
+ * when client is authorized (AP station marked as such),
+ * enable all links
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
+ ieee80211_set_active_links_async(vif, vif->valid_links);
+
return 0;
}
@@ -2866,11 +2920,6 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = ctx;
- mutex_unlock(&hwsim->mutex);
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2882,11 +2931,6 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = NULL;
- mutex_unlock(&hwsim->mutex);
wiphy_dbg(hw->wiphy,
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
@@ -2899,11 +2943,6 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = ctx;
- mutex_unlock(&hwsim->mutex);
hwsim_check_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2919,6 +2958,18 @@ static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
hwsim_check_magic(vif);
hwsim_check_chanctx_magic(ctx);
+ /* if we activate a link while already associated wake it up */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true);
+ if (skb) {
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan);
+ local_bh_enable();
+ }
+ }
+
return 0;
}
@@ -2929,6 +2980,22 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
hwsim_check_magic(vif);
hwsim_check_chanctx_magic(ctx);
+
+ /* if we deactivate a link while associated suspend it first */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true);
+ if (skb) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan);
+ local_bh_enable();
+ }
+ }
}
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -2995,18 +3062,22 @@ static int mac80211_hwsim_change_vif_links(struct ieee80211_hw *hw,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
- unsigned long rem = old_links & ~new_links ?: BIT(0);
+ unsigned long rem = old_links & ~new_links;
unsigned long add = new_links & ~old_links;
int i;
+ if (!old_links)
+ rem |= BIT(0);
+ if (!new_links)
+ add |= BIT(0);
+
for_each_set_bit(i, &rem, IEEE80211_MLD_MAX_NUM_LINKS)
mac80211_hwsim_config_mac_nl(hw, old[i]->addr, false);
for_each_set_bit(i, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf;
- /* FIXME: figure out how to get the locking here */
- link_conf = rcu_dereference_protected(vif->link_conf[i], 1);
+ link_conf = link_conf_dereference_protected(vif, i);
if (WARN_ON(!link_conf))
continue;
@@ -3021,6 +3092,13 @@ static int mac80211_hwsim_change_sta_links(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
+ hwsim_check_sta_magic(sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ sp->active_links_rx = new_links;
+
return 0;
}
@@ -3208,8 +3286,112 @@ out_err:
static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xffff),
+ .tx_mcs_160 = cpu_to_le16(0xffff),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * Since B0, B1, B2 and B3 are not set in
+ * the supported channel width set field in the
+ * HE PHY capabilities information field the
+ * device is a 20MHz only device on 2.4GHz band.
+ */
+ .only_20mhz = {
+ .rx_tx_mcs7_max_nss = 0x88,
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -3356,9 +3538,132 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P?*/
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ /* TODO: should we support other types, e.g., P2P? */
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -3529,9 +3834,153 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P?*/
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ /* TODO: should we support other types, e.g., P2P? */
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_6ghz_capa = {
+ .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN |
+ IEEE80211_HE_6GHZ_CAP_SM_PS |
+ IEEE80211_HE_6GHZ_CAP_RD_RESPONDER |
+ IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS),
+ },
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -3896,7 +4345,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_remain_on_channel_duration = 1000;
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
- data->chanctx = NULL;
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
@@ -4471,13 +4919,9 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (data2->use_chanctx) {
if (data2->tmp_chan)
channel = data2->tmp_chan;
- else if (data2->chanctx)
- channel = data2->chanctx->def.chan;
} else {
channel = data2->channel;
}
- if (!channel)
- goto out;
if (!hwsim_virtio_enabled) {
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
@@ -4508,6 +4952,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
rx_status.freq);
if (!iter_data.channel)
goto out;
+ rx_status.band = iter_data.channel->band;
mutex_lock(&data2->mutex);
if (!hwsim_chans_compat(iter_data.channel, channel)) {
@@ -4520,11 +4965,13 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
}
}
mutex_unlock(&data2->mutex);
+ } else if (!channel) {
+ goto out;
} else {
rx_status.freq = channel->center_freq;
+ rx_status.band = channel->band;
}
- rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
@@ -4534,10 +4981,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
ieee80211_is_probe_resp(hdr->frame_control))
rx_status.boottime_ns = ktime_get_boottime_ns();
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
- data2->rx_pkts++;
- data2->rx_bytes += skb->len;
- ieee80211_rx_irqsafe(data2->hw, skb);
+ mac80211_hwsim_rx(data2, &rx_status, skb);
return 0;
err:
@@ -4912,6 +5356,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = hwsim_ops,
.n_small_ops = ARRAY_SIZE(hwsim_ops),
+ .resv_start_op = HWSIM_CMD_DEL_MAC_ADDR + 1,
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
@@ -5060,6 +5505,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
nlh = nlmsg_hdr(skb);
gnlh = nlmsg_data(nlh);
+
+ if (skb->len < nlh->nlmsg_len)
+ return -EINVAL;
+
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
hwsim_genl_policy, NULL);
if (err) {
@@ -5102,7 +5551,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
skb->data = skb->head;
- skb_set_tail_pointer(skb, len);
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, len);
hwsim_virtio_handle_cmd(skb);
spin_lock_irqsave(&hwsim_virtio_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index b0b3f59dabc6..3e065cbb0af9 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -546,7 +546,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
pos = scanresp->bssdesc_and_tlvbuffer;
lbs_deb_hex(LBS_DEB_SCAN, "SCAN_RSP", scanresp->bssdesc_and_tlvbuffer,
- scanresp->bssdescriptsize);
+ bsssize);
tsfdesc = pos + bsssize;
tsfsize = 4 + 8 * scanresp->nr_sets;
@@ -1435,7 +1435,7 @@ static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
}
static int lbs_cfg_set_default_key(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -1455,8 +1455,8 @@ static int lbs_cfg_set_default_key(struct wiphy *wiphy,
static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 idx, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct lbs_private *priv = wiphy_priv(wiphy);
u16 key_info;
@@ -1516,7 +1516,8 @@ static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
diff --git a/drivers/net/wireless/marvell/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c
index d8e4f29b690d..9f53308a9935 100644
--- a/drivers/net/wireless/marvell/libertas/ethtool.c
+++ b/drivers/net/wireless/marvell/libertas/ethtool.c
@@ -20,8 +20,8 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
- strlcpy(info->driver, "libertas", sizeof(info->driver));
- strlcpy(info->version, lbs_driver_version, sizeof(info->version));
+ strscpy(info->driver, "libertas", sizeof(info->driver));
+ strscpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 5c9f295536ea..8f5220cee112 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -39,8 +39,7 @@ unsigned int lbs_debug;
EXPORT_SYMBOL_GPL(lbs_debug);
module_param_named(libertas_debug, lbs_debug, int, 0644);
-unsigned int lbs_disablemesh;
-EXPORT_SYMBOL_GPL(lbs_disablemesh);
+static unsigned int lbs_disablemesh;
module_param_named(libertas_disablemesh, lbs_disablemesh, int, 0644);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index bd835288ce57..a04b66284af4 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -335,7 +335,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
struct mwifiex_sta_node *node;
/*
- * If we get a TID, ta pair which is already present dispatch all the
+ * If we get a TID, ta pair which is already present dispatch all
* the packets and move the window size until the ssn
*/
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 134114ac1ac0..535995e8279f 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -142,7 +142,8 @@ static void *mwifiex_cfg80211_get_adapter(struct wiphy *wiphy)
*/
static int
mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
static const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -431,7 +432,7 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast,
+ int link_id, u8 key_index, bool unicast,
bool multicast)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
@@ -456,8 +457,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
*/
static int
mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
struct mwifiex_wep_key *wep_key;
@@ -494,6 +495,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
struct net_device *netdev,
+ int link_id,
u8 key_index)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 26a48d8f49be..b4f945a549f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -2104,7 +2104,7 @@ struct mwifiex_fw_mef_entry {
struct host_cmd_ds_mef_cfg {
__le32 criteria;
__le16 num_entries;
- struct mwifiex_fw_mef_entry mef_entry[];
+ u8 mef_entry_data[];
} __packed;
#define CONNECTION_TYPE_INFRA 0
@@ -2254,7 +2254,7 @@ struct coalesce_receive_filt_rule {
struct host_cmd_ds_coalesce_cfg {
__le16 action;
__le16 num_of_rules;
- struct coalesce_receive_filt_rule rule[];
+ u8 rule_data[];
} __packed;
struct host_cmd_ds_multi_chan_policy {
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index fc77489cc511..7dddb4b5dea1 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -51,9 +51,10 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->if_ops.card_reset(adapter);
}
-static void fw_dump_timer_fn(struct timer_list *t)
+static void fw_dump_work(struct work_struct *work)
{
- struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+ struct mwifiex_adapter *adapter =
+ container_of(work, struct mwifiex_adapter, devdump_work.work);
mwifiex_upload_device_dump(adapter);
}
@@ -309,7 +310,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->active_scan_triggered = false;
timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
adapter->devdump_len = 0;
- timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
+ INIT_DELAYED_WORK(&adapter->devdump_work, fw_dump_work);
}
/*
@@ -388,7 +389,7 @@ static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
del_timer(&adapter->wakeup_timer);
- del_timer_sync(&adapter->devdump_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 87729d251fed..63f861e6b28a 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -37,6 +37,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/of_irq.h>
+#include <linux/workqueue.h>
#include "decl.h"
#include "ioctl.h"
@@ -1043,7 +1044,7 @@ struct mwifiex_adapter {
/* Device dump data/length */
void *devdump_data;
int devdump_len;
- struct timer_list devdump_timer;
+ struct delayed_work devdump_work;
bool ignore_btcoex_events;
};
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index f7f9277602a5..5dcf61761a16 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -644,7 +644,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- int retval;
+ int retval __maybe_unused;
mwifiex_dbg(adapter, EVENT,
"event: Wakeup device...\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 512b5bb9cf6f..e2800a831c8e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1435,7 +1435,7 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
mef_entry = (struct mwifiex_fw_mef_entry *)pos;
mef_entry->mode = mef->mef_entry[i].mode;
mef_entry->action = mef->mef_entry[i].action;
- pos += sizeof(*mef_cfg->mef_entry);
+ pos += sizeof(*mef_entry);
if (mwifiex_cmd_append_rpn_expression(priv,
&mef->mef_entry[i], &pos))
@@ -1631,7 +1631,7 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
coalesce_cfg->action = cpu_to_le16(cmd_action);
coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
- rule = coalesce_cfg->rule;
+ rule = (void *)coalesce_cfg->rule_data;
for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index b95e90a7d124..df9cdd10a494 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -611,8 +611,8 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
* transmission event get lost, in this cornel case,
* user would still get partial of the dump.
*/
- mod_timer(&adapter->devdump_timer,
- jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ schedule_delayed_work(&adapter->devdump_work,
+ msecs_to_jiffies(MWIFIEX_TIMER_10S));
}
/* Overflow check */
@@ -623,7 +623,7 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
adapter->event_skb->data, event_skb->len);
adapter->devdump_len += event_skb->len;
- if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+ if (le16_to_cpu(fw_dump_hdr->type) == FW_DUMP_INFO_ENDED) {
mwifiex_dbg(adapter, MSG,
"receive end of transmission flag event!\n");
goto upload_dump;
@@ -631,7 +631,7 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
return;
upload_dump:
- del_timer_sync(&adapter->devdump_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_upload_device_dump(adapter);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index c2f2ce2a3f95..d3ab9572e711 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -911,14 +911,14 @@ static int mwifiex_usb_prepare_tx_aggr_skb(struct mwifiex_adapter *adapter,
memcpy(payload, skb_tmp->data, skb_tmp->len);
if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
/* do not padding for last packet*/
- *(u16 *)payload = cpu_to_le16(skb_tmp->len);
- *(u16 *)&payload[2] =
+ *(__le16 *)payload = cpu_to_le16(skb_tmp->len);
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
skb_trim(skb_aggr, skb_aggr->len - pad);
} else {
/* add aggregation interface header */
- *(u16 *)payload = cpu_to_le16(skb_tmp->len + pad);
- *(u16 *)&payload[2] =
+ *(__le16 *)payload = cpu_to_le16(skb_tmp->len + pad);
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2);
}
@@ -1097,9 +1097,9 @@ send_aggr_buf:
}
payload = skb->data;
- *(u16 *)&payload[2] =
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
- *(u16 *)payload = cpu_to_le16(skb->len);
+ *(__le16 *)payload = cpu_to_le16(skb->len);
skb_send = skb;
context = &port->tx_data_list[port->tx_data_ix++];
return mwifiex_usb_construct_send_urb(adapter, port, ep,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 40cb91097b2e..4901aa02b4fb 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -758,7 +758,7 @@ mt76_dma_init(struct mt76_dev *dev,
dev->napi_dev.threaded = 1;
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 253cbc1956d1..6de13d641438 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -267,7 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_highest |=
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4da77d47b0a6..87db9498dea4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -252,6 +252,30 @@ struct mt76_queue_ops {
void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
};
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+ MT_PHY_TYPE_VHT,
+ MT_PHY_TYPE_HE_SU = 8,
+ MT_PHY_TYPE_HE_EXT_SU,
+ MT_PHY_TYPE_HE_TB,
+ MT_PHY_TYPE_HE_MU,
+ __MT_PHY_TYPE_HE_MAX,
+};
+
+struct mt76_sta_stats {
+ u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
+ u64 tx_bw[4]; /* 20, 40, 80, 160 */
+ u64 tx_nss[4]; /* 1, 2, 3, 4 */
+ u64 tx_mcs[16]; /* mcs idx */
+ u64 tx_bytes;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+};
+
enum mt76_wcid_flags {
MT_WCID_FLAG_CHECK_PS,
MT_WCID_FLAG_PS,
@@ -299,6 +323,8 @@ struct mt76_wcid {
struct list_head list;
struct idr pktid;
+
+ struct mt76_sta_stats stats;
};
struct mt76_txq {
@@ -342,7 +368,8 @@ struct mt76_rx_tid {
#define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
-#define MT_PACKET_ID_FIRST 2
+#define MT_PACKET_ID_WED 2
+#define MT_PACKET_ID_FIRST 3
#define MT_PACKET_ID_HAS_RATE BIT(7)
/* This is timer for when to give up when waiting for TXS callback,
* with starting time being the time at which the DMA_DONE callback
@@ -527,7 +554,6 @@ struct mt76_usb {
struct mt76_reg_pair *rp;
int rp_len;
u32 base;
- bool burst;
} mcu;
};
@@ -815,26 +841,6 @@ struct mt76_power_limits {
s8 ru[7][12];
};
-enum mt76_phy_type {
- MT_PHY_TYPE_CCK,
- MT_PHY_TYPE_OFDM,
- MT_PHY_TYPE_HT,
- MT_PHY_TYPE_HT_GF,
- MT_PHY_TYPE_VHT,
- MT_PHY_TYPE_HE_SU = 8,
- MT_PHY_TYPE_HE_EXT_SU,
- MT_PHY_TYPE_HE_TB,
- MT_PHY_TYPE_HE_MU,
- __MT_PHY_TYPE_HE_MAX,
-};
-
-struct mt76_sta_stats {
- u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
- u64 tx_bw[4]; /* 20, 40, 80, 160 */
- u64 tx_nss[4]; /* 1, 2, 3, 4 */
- u64 tx_mcs[16]; /* mcs idx */
-};
-
struct mt76_ethtool_worker_info {
u64 *data;
int idx;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 051715ed90dd..ca50feb0b3a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -658,7 +658,7 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
msta->rate_probe = false;
mt7603_wtbl_set_smps(dev, msta,
- sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
spin_unlock_bh(&dev->mt76.lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ad6c7d632eed..d6aae60c440d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1088,7 +1088,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 9bf8545c8c17..8d4733f87cda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1195,12 +1195,16 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ mt7615_mutex_acquire(dev);
+
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
+
+ mt7615_mutex_release(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 49ab3a1f3b9b..304212f5f8da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -83,6 +83,7 @@ static int mt7663s_probe(struct sdio_func *func,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
+ .rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
@@ -180,7 +181,6 @@ static void mt7663s_remove(struct sdio_func *func)
mt76_free_device(&dev->mt76);
}
-#ifdef CONFIG_PM
static int mt7663s_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -235,28 +235,20 @@ static int mt7663s_resume(struct device *dev)
return err;
}
-static const struct dev_pm_ops mt7663s_pm_ops = {
- .suspend = mt7663s_suspend,
- .resume = mt7663s_resume,
-};
-#endif
-
MODULE_DEVICE_TABLE(sdio, mt7663s_table);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7663s_pm_ops, mt7663s_suspend, mt7663s_resume);
+
static struct sdio_driver mt7663s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7663s_probe,
.remove = mt7663s_remove,
.id_table = mt7663s_table,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &mt7663s_pm_ops,
- }
-#endif
+ .drv.pm = pm_sleep_ptr(&mt7663s_pm_ops),
};
module_sdio_driver(mt7663s_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 967641aebf5f..f2d651d7adff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -119,6 +119,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
+ .rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 75afcb469d3c..635192c878cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -63,6 +63,12 @@ enum {
REPEATER_BSSID_MAX = 0x3f,
};
+struct mt76_connac_reg_map {
+ u32 phys;
+ u32 maps;
+ u32 size;
+};
+
struct mt76_connac_pm {
bool enable:1;
bool enable_user:1;
@@ -348,9 +354,10 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
+bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ __le32 *txs_data);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
- int pid, __le32 *txs_data,
- struct mt76_sta_stats *stats);
+ int pid, __le32 *txs_data);
void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
struct sk_buff *skb,
__le32 *rxv, u32 mode);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index 67ce216fb564..f33171bcd343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -158,6 +158,14 @@ enum {
#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
+/* PPDU based TXS */
+#define MT_TXS5_MPDU_TX_BYTE GENMASK(22, 0)
+#define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23)
+
+#define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23)
+
+#define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23)
+
/* RXD DW1 */
#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 18dea8e1fb20..34ac3d81a510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -490,6 +490,10 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
+
+ /* counting non-offloading skbs */
+ wcid->stats.tx_bytes += skb->len;
+ wcid->stats.tx_packets++;
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
@@ -550,35 +554,29 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
-bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
- int pid, __le32 *txs_data,
- struct mt76_sta_stats *stats)
+bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ __le32 *txs_data)
{
+ struct mt76_sta_stats *stats = &wcid->stats;
struct ieee80211_supported_band *sband;
struct mt76_phy *mphy;
- struct ieee80211_tx_info *info;
- struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
bool cck = false;
u32 txrate, txs, mode;
- mt76_tx_status_lock(dev, &list);
- skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
- if (!skb)
- goto out;
-
txs = le32_to_cpu(txs_data[0]);
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
-
- info->status.rates[0].idx = -1;
+ /* PPDU based reporting */
+ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
+ stats->tx_bytes +=
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
+ stats->tx_packets +=
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
+ stats->tx_failed +=
+ le32_get_bits(txs_data[6], MT_TXS6_MPDU_FAIL_CNT);
+ stats->tx_retries +=
+ le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_CNT);
+ }
txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
@@ -613,7 +611,7 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
if (rate.mcs > 31)
- goto out;
+ return false;
rate.flags = RATE_INFO_FLAGS_MCS;
if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
@@ -621,7 +619,7 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
break;
case MT_PHY_TYPE_VHT:
if (rate.mcs > 9)
- goto out;
+ return false;
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
break;
@@ -630,14 +628,14 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
case MT_PHY_TYPE_HE_TB:
case MT_PHY_TYPE_HE_MU:
if (rate.mcs > 11)
- goto out;
+ return false;
rate.he_gi = wcid->rate.he_gi;
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
rate.flags = RATE_INFO_FLAGS_HE_MCS;
break;
default:
- goto out;
+ return false;
}
stats->tx_mode[mode]++;
@@ -662,10 +660,34 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
wcid->rate = rate;
-out:
- if (skb)
- mt76_tx_status_skb_done(dev, skb, &list);
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_txs);
+
+bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data)
+{
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ mt76_tx_status_lock(dev, &list);
+ skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool noacked = !(info->flags & IEEE80211_TX_STAT_ACK);
+
+ if (!(le32_to_cpu(txs_data[0]) & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !noacked;
+ info->status.rates[0].idx = -1;
+ wcid->stats.tx_failed += noacked;
+
+ mt76_connac2_mac_fill_txs(dev, wcid, txs_data);
+ mt76_tx_status_skb_done(dev, skb, &list);
+ }
mt76_tx_status_unlock(dev, &list);
return !!skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 9b17bd97ec09..011fc9729b38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -260,8 +260,10 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, len);
+ if (sta_hdr) {
+ len += le16_to_cpu(sta_hdr->len);
+ sta_hdr->len = cpu_to_le16(len);
+ }
return ptlv;
}
@@ -594,14 +596,14 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vif->type != NL80211_IFTYPE_STATION)
return;
- if (!sta->max_amsdu_len)
+ if (!sta->deflink.agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ amsdu->max_mpdu_size = sta->deflink.agg.max_amsdu_len >=
IEEE80211_MAX_MPDU_LEN_VHT_7991;
wcid->amsdu = true;
@@ -896,7 +898,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
- smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ smps->smps = (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
@@ -2648,7 +2650,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+/* SIFS 20us + 512 byte beacon transmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
{
@@ -2886,6 +2888,10 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
goto out;
}
+ snprintf(dev->hw->wiphy->fw_version,
+ sizeof(dev->hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
release_firmware(fw);
if (!fw_wa)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index f1d7c05bd794..718f427d8f6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -10,6 +10,7 @@
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define FW_FEATURE_ENCRY_MODE BIT(4)
#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+#define FW_FEATURE_NON_DL BIT(6)
#define DL_MODE_ENCRYPT BIT(0)
#define DL_MODE_KEY_IDX GENMASK(2, 1)
@@ -33,6 +34,12 @@
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
+enum {
+ FW_TYPE_DEFAULT = 0,
+ FW_TYPE_CLC = 2,
+ FW_TYPE_MAX_NUM = 255
+};
+
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
@@ -174,7 +181,8 @@ struct mt76_connac2_fw_region {
__le32 addr;
__le32 len;
u8 feature_set;
- u8 rsv1[15];
+ u8 type;
+ u8 rsv1[14];
} __packed;
struct tlv {
@@ -1172,6 +1180,7 @@ enum {
MCU_CE_CMD_SET_ROC = 0x1c,
MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
+ MCU_CE_CMD_SET_CLC = 0x5c,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index de30cf5e2d2f..93d96739f802 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -404,7 +404,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ if (nss > 1 && sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
txwi_flags |= MT_TXWI_FLAGS_MMPS;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index c6c16fe8ee85..02da543dfc5c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -21,29 +21,16 @@ static void
mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- u32 reg, val;
int i;
- if (usb->mcu.burst) {
- WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
-
- reg = usb->mcu.rp[0].reg - usb->mcu.base;
- for (i = 0; i < usb->mcu.rp_len; i++) {
- val = get_unaligned_le32(data + 4 * i);
- usb->mcu.rp[i].reg = reg++;
- usb->mcu.rp[i].value = val;
- }
- } else {
- WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
-
- for (i = 0; i < usb->mcu.rp_len; i++) {
- reg = get_unaligned_le32(data + 8 * i) -
- usb->mcu.base;
- val = get_unaligned_le32(data + 8 * i + 4);
-
- WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
- usb->mcu.rp[i].value = val;
- }
+ WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ u32 reg = get_unaligned_le32(data + 8 * i) - usb->mcu.base;
+ u32 val = get_unaligned_le32(data + 8 * i + 4);
+
+ WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+ usb->mcu.rp[i].value = val;
}
}
@@ -207,7 +194,6 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = data;
usb->mcu.rp_len = n;
usb->mcu.base = base;
- usb->mcu.burst = false;
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index fd76db8f5269..6ef3431cad64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -23,9 +23,9 @@ mt7915_implicit_txbf_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
- return -EBUSY;
-
+ /* The existing connected stations shall reconnect to apply
+ * new implicit txbf configuration.
+ */
dev->ibf = !!val;
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 60ae834d95a6..be97dede2634 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -176,7 +176,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
/*
* We don't support reading GI info from txs packets.
* For accurate tx status reporting and AQL improvement,
- we need to make sure that flags match so polling GI
+ * we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
rate = &msta->wcid.rate;
@@ -232,7 +232,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
- struct mt7915_sta *msta;
+ struct mt7915_sta *msta = NULL;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
@@ -1001,7 +1001,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
- if (pid < MT_PACKET_ID_FIRST)
+ if (pid < MT_PACKET_ID_WED)
return;
if (wcidx >= mt7915_wtbl_size(dev))
@@ -1015,8 +1015,11 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
msta = container_of(wcid, struct mt7915_sta, wcid);
- mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
- &msta->stats);
+ if (pid == MT_PACKET_ID_WED)
+ mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
+ else
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
+
if (!wcid->sta)
goto out;
@@ -1047,7 +1050,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
+ mt7915_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
mt7915_debugfs_rx_fw_monitor(dev, data, len);
@@ -1084,7 +1087,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
break;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
+ mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_FW_MONITOR:
@@ -2071,8 +2074,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
}
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
@@ -2122,8 +2126,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
(twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index bd3386bf0f8a..89b519cfd14c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1010,6 +1010,23 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* offloading flows bypass networking stack, so driver counts and
+ * reports sta statistics via NL80211_STA_INFO when WED is active.
+ */
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+
+ sinfo->tx_packets = msta->wcid.stats.tx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+
+ sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
+ sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
}
static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
@@ -1224,7 +1241,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index f83067961945..8d297e4aa7d4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -925,7 +925,7 @@ mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->max_amsdu_len)
+ if (!sta->deflink.agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
@@ -934,7 +934,7 @@ mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
amsdu->amsdu_en = true;
msta->wcid.amsdu = true;
- switch (sta->max_amsdu_len) {
+ switch (sta->deflink.agg.max_amsdu_len) {
case IEEE80211_MAX_MPDU_LEN_VHT_11454:
if (!is_mt7915(&dev->mt76)) {
amsdu->max_mpdu_size =
@@ -1304,7 +1304,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
ra->phy = *phy;
break;
case RATE_PARAM_MMPS_UPDATE:
- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
break;
default:
break;
@@ -1360,7 +1360,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct sta_phy phy = {};
int ret, nrates = 0;
-#define __sta_phy_bitrate_mask_check(_mcs, _gi, _he) \
+#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \
do { \
u8 i, gi = mask->control[band]._gi; \
gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
@@ -1373,15 +1373,17 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
continue; \
nrates += hweight16(mask->control[band]._mcs[i]); \
phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ if (_ht) \
+ phy.mcs += 8 * i; \
} \
} while (0)
if (sta->deflink.he_cap.has_he) {
- __sta_phy_bitrate_mask_check(he_mcs, he_gi, 1);
+ __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
} else if (sta->deflink.vht_cap.vht_supported) {
- __sta_phy_bitrate_mask_check(vht_mcs, gi, 0);
+ __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
} else if (sta->deflink.ht_cap.ht_supported) {
- __sta_phy_bitrate_mask_check(ht_mcs, gi, 0);
+ __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
phy.mcs = ffs(mask->control[band].legacy) - 1;
@@ -1459,7 +1461,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->channel = chandef->chan->hw_value;
ra->bw = sta->deflink.bandwidth;
ra->phy.bw = sta->deflink.bandwidth;
- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 4499a630e8f1..7bd5f6725d7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -75,6 +75,7 @@ static const u32 mt7915_offs[] = {
[AGG_AWSCR0] = 0x05c,
[AGG_PCR0] = 0x06c,
[AGG_ACR0] = 0x084,
+ [AGG_ACR4] = 0x08c,
[AGG_MRCR] = 0x098,
[AGG_ATCR1] = 0x0f0,
[AGG_ATCR3] = 0x0f4,
@@ -148,6 +149,7 @@ static const u32 mt7916_offs[] = {
[AGG_AWSCR0] = 0x030,
[AGG_PCR0] = 0x040,
[AGG_ACR0] = 0x054,
+ [AGG_ACR4] = 0x05c,
[AGG_MRCR] = 0x068,
[AGG_ATCR1] = 0x1a8,
[AGG_ATCR3] = 0x080,
@@ -204,147 +206,147 @@ static const u32 mt7916_offs[] = {
[ETBF_PAR_RPT0] = 0x100,
};
-static const struct __map mt7915_reg_map[] = {
+static const struct mt76_connac_reg_map mt7915_reg_map[] = {
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
-static const struct __map mt7916_reg_map[] = {
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
- { 0x56000000, 0x04000, 0x1000 }, /* WFDMA_2 (Reserved) */
- { 0x57000000, 0x05000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
- { 0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure cr) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- { 0x820c4000, 0xa8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
- { 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
- { 0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+static const struct mt76_connac_reg_map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
-static const struct __map mt7986_reg_map[] = {
- { 0x54000000, 0x402000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
- { 0x55000000, 0x403000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
- { 0x56000000, 0x404000, 0x1000 }, /* WFDMA_2 (Reserved) */
- { 0x57000000, 0x405000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
- { 0x58000000, 0x406000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
- { 0x59000000, 0x407000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
- { 0x820c0000, 0x408000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x40c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x40e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820e0000, 0x420000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x420400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x420800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x420c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x421000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x421400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820ce000, 0x421c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820e7000, 0x421e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820cf000, 0x422000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e9000, 0x423400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x424000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x424200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x424600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x424800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820ca000, 0x426000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
- { 0x820d0000, 0x430000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x00400000, 0x480000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x490000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x820f0000, 0x4a0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0x4a0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0x4a0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0x4a0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0x4a1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0x4a1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0x4a1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0x4a3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0x4a4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0x4a4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0x4a4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0x4a4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- { 0x820c4000, 0x4a8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
- { 0x820b0000, 0x4ae000, 0x1000 }, /* [APB2] WFSYS_ON */
- { 0x80020000, 0x4b0000, 0x10000}, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0x4c0000, 0x10000}, /* WF_TOP_MISC_ON */
- { 0x89000000, 0x4d0000, 0x1000 }, /* WF_MCU_CFG_ON */
- { 0x89010000, 0x4d1000, 0x1000 }, /* WF_MCU_CIRQ */
- { 0x89020000, 0x4d2000, 0x1000 }, /* WF_MCU_GPT */
- { 0x89030000, 0x4d3000, 0x1000 }, /* WF_MCU_WDT */
- { 0x80010000, 0x4d4000, 0x1000 }, /* WF_AXIDMA */
+static const struct mt76_connac_reg_map mt7986_reg_map[] = {
+ { 0x54000000, 0x402000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x403000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x404000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x405000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x406000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x407000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x408000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x40c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x40e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x420000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x420400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x420800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x420c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x421000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x421400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x421c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x421e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x422000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x423400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x424000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x424200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x424600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x424800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x426000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x430000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x480000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x490000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x820f0000, 0x4a0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0x4a0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0x4a0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0x4a0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0x4a1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0x4a1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0x4a1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0x4a3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0x4a4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0x4a4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0x4a4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0x4a4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0x4a8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0x4ae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0x4b0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0x4c0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x89000000, 0x4d0000, 0x01000 }, /* WF_MCU_CFG_ON */
+ { 0x89010000, 0x4d1000, 0x01000 }, /* WF_MCU_CIRQ */
+ { 0x89020000, 0x4d2000, 0x01000 }, /* WF_MCU_GPT */
+ { 0x89030000, 0x4d3000, 0x01000 }, /* WF_MCU_WDT */
+ { 0x80010000, 0x4d4000, 0x01000 }, /* WF_AXIDMA */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 54ef2a12a443..1eb11617a625 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -127,8 +127,6 @@ struct mt7915_sta {
unsigned long jiffies;
unsigned long ampdu_state;
- struct mt76_sta_stats stats;
-
struct mt76_connac_sta_key_conf bip;
struct {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index d74f609775d3..728a879c3b00 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -99,6 +99,7 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
int ret;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
@@ -112,18 +113,38 @@ static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
if (!ret)
return -EAGAIN;
+ phy = &dev->phy;
+ mt76_set(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_set(dev, MT_AGG_ACR4(phy->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
+
return 0;
}
static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = MT7915_TOKEN_SIZE;
spin_unlock_bh(&dev->mt76.token_lock);
+
+ /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
+ * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
+ */
+ phy = &dev->phy;
+ mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 2493c3ad3c56..5920e705835a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -4,17 +4,11 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
-struct __map {
- u32 phys;
- u32 maps;
- u32 size;
-};
-
/* used to differentiate between generations */
struct mt7915_reg_desc {
const u32 *reg_rev;
const u32 *offs_rev;
- const struct __map *map;
+ const struct mt76_connac_reg_map *map;
u32 map_size;
};
@@ -52,6 +46,7 @@ enum offs_rev {
AGG_AWSCR0,
AGG_PCR0,
AGG_ACR0,
+ AGG_ACR4,
AGG_MRCR,
AGG_ATCR1,
AGG_ATCR3,
@@ -471,6 +466,9 @@ enum offs_rev {
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
+#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR4))
+#define MT_AGG_ACR_PPDU_TXS2H BIT(1)
+
#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
index be4f07ad3af9..47e034a9b003 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
@@ -13,6 +13,7 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
acpi_handle root, handle;
acpi_status status;
u32 i = 0;
+ int ret;
root = ACPI_HANDLE(mdev->dev);
if (!root)
@@ -52,9 +53,11 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
*(*tbl + i) = (u8)sar_unit->integer.value;
}
free:
+ ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+
kfree(sar_root);
- return (i == sar_root->package.count) ? 0 : -EINVAL;
+ return ret;
}
/* MTCL : Country List Table for 6G band */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
index 54f30401343c..4b647278eb30 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
@@ -11,12 +11,15 @@ enum mt7921_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_WIFI_CONF = 0x07c,
- __MT_EE_MAX = 0x3bf
+ MT_EE_HW_TYPE = 0x55b,
+ __MT_EE_MAX = 0x9ff
};
#define MT_EE_WIFI_CONF_TX_MASK BIT(0)
#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(3, 2)
+#define MT_EE_HW_TYPE_ENCAP BIT(0)
+
enum mt7921_eeprom_band {
MT_EE_NA,
MT_EE_5GHZ,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index cd960e23770f..dcdb3cf04ac1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -39,6 +39,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
+ mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
mt76_connac_mcu_set_channel_domain(hw->priv);
mt7921_set_tx_sar_pwr(hw, NULL);
mt7921_mutex_release(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 47f0aa81ab02..e4868c492bc0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -235,7 +235,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- struct mt7921_sta *msta;
+ struct mt7921_sta *msta = NULL;
u16 seq_ctrl = 0;
__le16 fc = 0;
u8 mode = 0;
@@ -486,7 +486,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
return 0;
}
-void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
struct mt7921_sta *msta;
u16 fc, tid;
@@ -509,7 +509,6 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
-EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
@@ -539,8 +538,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
msta = container_of(wcid, struct mt7921_sta, wcid);
- mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
- &msta->stats);
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
@@ -552,7 +550,134 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
out:
rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
+
+void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt76_connac_txp_skb_unmap(mdev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+ if (sta) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ wcid_idx = wcid->idx;
+ } else {
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+out:
+ t->skb = NULL;
+ mt76_put_txwi(mdev, t);
+}
+EXPORT_SYMBOL_GPL(mt7921_txwi_free);
+
+static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
+{
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
+ LIST_HEAD(free_list);
+ bool wake = false;
+ u8 i, count;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ if (WARN_ON_ONCE((void *)&tx_info[count] > end))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(tx_info[i]);
+ u8 stat;
+
+ /* 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7921_sta *msta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ continue;
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
+ }
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+
+ rcu_read_lock();
+ mt7921_mac_sta_poll(dev);
+ rcu_read_unlock();
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
+ mt7921_mac_tx_free(dev, data, len); /* mmio */
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7921_rx_check);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
@@ -570,6 +695,11 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
type = PKT_TYPE_NORMAL_MCU;
switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
+ mt7921_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
case PKT_TYPE_RX_EVENT:
mt7921_mcu_rx_event(dev, skb);
break;
@@ -780,6 +910,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
void mt7921_reset(struct mt76_dev *mdev)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt76_connac_pm *pm = &dev->pm;
if (!dev->hw_init_done)
return;
@@ -787,8 +918,12 @@ void mt7921_reset(struct mt76_dev *mdev)
if (dev->hw_full_reset)
return;
+ if (pm->suspended)
+ return;
+
queue_work(dev->mt76.wq, &dev->reset_work);
}
+EXPORT_SYMBOL_GPL(mt7921_reset);
void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 1438a9f8d1fd..7e409ac7d9a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -752,6 +752,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
@@ -1045,7 +1046,7 @@ mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
@@ -1404,6 +1405,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ mt7921_mutex_acquire(dev);
+
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
@@ -1411,6 +1414,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
MCU_UNI_CMD(STA_REC_UPDATE));
+
+ mt7921_mutex_release(dev);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1526,17 +1531,23 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7921_dev *dev = mt7921_hw_dev(hw);
int err;
+ mt7921_mutex_acquire(dev);
+
err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
true);
if (err)
- return err;
+ goto out;
err = mt7921_mcu_set_bss_pm(dev, vif, true);
if (err)
- return err;
+ goto out;
+
+ err = mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+out:
+ mt7921_mutex_release(dev);
- return mt7921_mcu_sta_update(dev, NULL, vif, true,
- MT76_STA_INFO_STATE_NONE);
+ return err;
}
static void
@@ -1548,11 +1559,16 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7921_dev *dev = mt7921_hw_dev(hw);
int err;
+ mt7921_mutex_acquire(dev);
+
err = mt7921_mcu_set_bss_pm(dev, vif, false);
if (err)
- return;
+ goto out;
mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
+
+out:
+ mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index da12d0ae0835..67bf92969a7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -2,14 +2,20 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/fs.h>
+#include <linux/firmware.h>
#include "mt7921.h"
#include "mt7921_trace.h"
+#include "eeprom.h"
#include "mcu.h"
#include "mac.h"
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
+static bool mt7921_disable_clc;
+module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
+MODULE_PARM_DESC(disable_clc, "disable CLC support");
+
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
@@ -84,6 +90,27 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
}
EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
+static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val)
+{
+ struct mt7921_mcu_eeprom_info *res, req = {
+ .addr = cpu_to_le32(round_down(offset,
+ MT7921_EEPROM_BLOCK_SIZE)),
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7921_mcu_eeprom_info *)skb->data;
+ *val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int
@@ -354,6 +381,90 @@ static char *mt7921_ram_name(struct mt7921_dev *dev)
return ret;
}
+static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name)
+{
+ const struct mt76_connac2_fw_trailer *hdr;
+ const struct mt76_connac2_fw_region *region;
+ const struct mt7921_clc *clc;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7921_phy *phy = &dev->phy;
+ const struct firmware *fw;
+ int ret, i, len, offset = 0;
+ u8 *clc_base = NULL, hw_encap = 0;
+
+ if (mt7921_disable_clc ||
+ mt76_is_usb(&dev->mt76))
+ return 0;
+
+ if (mt76_is_mmio(&dev->mt76)) {
+ ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
+ if (ret)
+ return ret;
+ hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
+ }
+
+ ret = request_firmware(&fw, fw_name, mdev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(mdev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+ for (i = 0; i < hdr->n_region; i++) {
+ region = (const void *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ len = le32_to_cpu(region->len);
+
+ /* check if we have valid buffer size */
+ if (offset + len > fw->size) {
+ dev_err(mdev->dev, "Invalid firmware region\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((region->feature_set & FW_FEATURE_NON_DL) &&
+ region->type == FW_TYPE_CLC) {
+ clc_base = (u8 *)(fw->data + offset);
+ break;
+ }
+ offset += len;
+ }
+
+ if (!clc_base)
+ goto out;
+
+ for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
+ clc = (const struct mt7921_clc *)(clc_base + offset);
+
+ /* do not init buf again if chip reset triggered */
+ if (phy->clc[clc->idx])
+ continue;
+
+ /* header content sanity */
+ if (clc->idx == MT7921_CLC_POWER &&
+ u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
+ continue;
+
+ phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
+ le32_to_cpu(clc->len),
+ GFP_KERNEL);
+
+ if (!phy->clc[clc->idx]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
static int mt7921_load_firmware(struct mt7921_dev *dev)
{
int ret;
@@ -423,6 +534,10 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
return err;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ err = mt7921_load_clc(dev, mt7921_ram_name(dev));
+ if (err)
+ return err;
+
return mt7921_mcu_fw_log_2_host(dev, 1);
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
@@ -930,3 +1045,86 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&req, sizeof(req), true);
}
+
+static
+int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap,
+ struct mt7921_clc *clc,
+ u8 idx)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 ver;
+ u8 pad0;
+ __le16 len;
+ u8 idx;
+ u8 env;
+ u8 pad1[2];
+ u8 alpha2[2];
+ u8 type[2];
+ u8 rsvd[64];
+ } __packed req = {
+ .idx = idx,
+ .env = env_cap,
+ };
+ int ret, valid_cnt = 0;
+ u8 i, *pos;
+
+ if (!clc)
+ return 0;
+
+ pos = clc->data;
+ for (i = 0; i < clc->nr_country; i++) {
+ struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
+ u16 len = le16_to_cpu(rule->len);
+
+ pos += len + sizeof(*rule);
+ if (rule->alpha2[0] != alpha2[0] ||
+ rule->alpha2[1] != alpha2[1])
+ continue;
+
+ memcpy(req.alpha2, rule->alpha2, 2);
+ memcpy(req.type, rule->type, 2);
+
+ req.len = cpu_to_le16(sizeof(req) + len);
+ skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
+ le16_to_cpu(req.len),
+ sizeof(req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_put_data(skb, rule->data, len);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CE_CMD(SET_CLC), false);
+ if (ret < 0)
+ return ret;
+ valid_cnt++;
+ }
+
+ if (!valid_cnt)
+ return -ENOENT;
+
+ return 0;
+}
+
+int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap)
+{
+ struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy;
+ int i, ret;
+
+ /* submit all clc config */
+ for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
+ ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
+ phy->clc[i], i);
+
+ /* If no country found, set "00" as default */
+ if (ret == -ENOENT)
+ ret = __mt7921_mcu_set_clc(dev, "00",
+ ENVIRON_INDOOR,
+ phy->clc[i], i);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index 0d20f7d8d474..96dc870fd35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -41,7 +41,7 @@ enum {
struct mt7921_mcu_eeprom_info {
__le32 addr;
__le32 valid;
- u8 data[16];
+ u8 data[MT7921_EEPROM_BLOCK_SIZE];
} __packed;
#define MT_RA_RATE_NSS GENMASK(8, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index c161031ac62a..eaba114a9c7e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -41,6 +41,8 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
+#define MT7921_EEPROM_BLOCK_SIZE 16
+
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
@@ -100,7 +102,6 @@ struct mt7921_sta {
unsigned long last_txs;
unsigned long ampdu_state;
- struct mt76_sta_stats stats;
struct mt76_connac_sta_key_conf bip;
};
@@ -149,6 +150,29 @@ struct mib_stats {
u32 tx_amsdu_cnt;
};
+enum {
+ MT7921_CLC_POWER,
+ MT7921_CLC_CHAN,
+ MT7921_CLC_MAX_NUM,
+};
+
+struct mt7921_clc_rule {
+ u8 alpha2[2];
+ u8 type[2];
+ __le16 len;
+ u8 data[];
+} __packed;
+
+struct mt7921_clc {
+ __le32 len;
+ u8 idx;
+ u8 ver;
+ u8 nr_country;
+ u8 type;
+ u8 rsv[8];
+ u8 data[];
+};
+
struct mt7921_phy {
struct mt76_phy *mt76;
struct mt7921_dev *dev;
@@ -174,6 +198,8 @@ struct mt7921_phy {
#ifdef CONFIG_ACPI
struct mt7921_acpi_sar *acpisar;
#endif
+
+ struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
@@ -380,6 +406,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_token_put(struct mt7921_dev *dev);
+bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -410,14 +437,13 @@ int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
-void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
+void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list);
void mt7921_mac_sta_poll(struct mt7921_dev *dev);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
-bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
-void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
int mt7921e_mac_reset(struct mt7921_dev *dev);
int mt7921e_mcu_init(struct mt7921_dev *dev);
@@ -479,4 +505,7 @@ mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
#endif
int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
+
+int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index ea3069d18c35..8a53d8f286db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -123,54 +123,51 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
+ static const struct mt76_connac_reg_map fixed_map[] = {
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x01000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x01000 }, /* WF_MDP_TOP */
+ { 0x74030000, 0x10000, 0x10000 }, /* PCIE_MAC_IREG */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
@@ -187,7 +184,7 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
if (ofs > fixed_map[i].size)
continue;
- return fixed_map[i].mapped + ofs;
+ return fixed_map[i].maps + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
@@ -238,8 +235,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt76_connac_tx_complete_skb,
- .rx_check = mt7921e_rx_check,
- .rx_skb = mt7921e_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
+ .rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
@@ -288,6 +285,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
goto err_free_pci_vec;
}
+ pci_set_drvdata(pdev, mdev);
+
dev = container_of(mdev, struct mt7921_dev, mt76);
dev->hif_ops = &mt7921_pcie_ops;
@@ -367,6 +366,7 @@ static int mt7921_pci_suspend(struct device *device)
int i, err;
pm->suspended = true;
+ flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -409,9 +409,6 @@ static int mt7921_pci_suspend(struct device *device)
if (err)
goto restore_napi;
- if (err)
- goto restore_napi;
-
return 0;
restore_napi:
@@ -428,6 +425,9 @@ restore_napi:
restore_suspend:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
@@ -441,7 +441,7 @@ static int mt7921_pci_resume(struct device *device)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto failed;
mt7921_wpdma_reinit_cond(dev);
@@ -471,11 +471,12 @@ static int mt7921_pci_resume(struct device *device)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
- if (err)
- return err;
-
+failed:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index e1800674089a..8dd60408b117 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -53,154 +53,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
-static void
-mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
- struct list_head *free_list)
-{
- struct mt76_dev *mdev = &dev->mt76;
- __le32 *txwi;
- u16 wcid_idx;
-
- mt76_connac_txp_skb_unmap(mdev, t);
- if (!t->skb)
- goto out;
-
- txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
- if (sta) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- wcid_idx = wcid->idx;
- } else {
- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
- }
-
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-
-out:
- t->skb = NULL;
- mt76_put_txwi(mdev, t);
-}
-
-static void
-mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
-{
- struct mt76_connac_tx_free *free = data;
- __le32 *tx_info = (__le32 *)(data + sizeof(*free));
- struct mt76_dev *mdev = &dev->mt76;
- struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
- struct sk_buff *skb, *tmp;
- void *end = data + len;
- LIST_HEAD(free_list);
- bool wake = false;
- u8 i, count;
-
- /* clean DMA queues and unmap buffers first */
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
-
- count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
- if (WARN_ON_ONCE((void *)&tx_info[count] > end))
- return;
-
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(tx_info[i]);
- u8 stat;
-
- /* 1'b1: new wcid pair.
- * 1'b0: msdu_id with the same 'wcid pair' as above.
- */
- if (info & MT_TX_FREE_PAIR) {
- struct mt7921_sta *msta;
- struct mt76_wcid *wcid;
- u16 idx;
-
- count++;
- idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
- sta = wcid_to_sta(wcid);
- if (!sta)
- continue;
-
- msta = container_of(wcid, struct mt7921_sta, wcid);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
- continue;
- }
-
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- stat = FIELD_GET(MT_TX_FREE_STATUS, info);
-
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
- continue;
-
- mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
- }
-
- if (wake)
- mt76_set_tx_blocked(&dev->mt76, false);
-
- list_for_each_entry_safe(skb, tmp, &free_list, list) {
- skb_list_del_init(skb);
- napi_consume_skb(skb, 1);
- }
-
- rcu_read_lock();
- mt7921_mac_sta_poll(dev);
- rcu_read_unlock();
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
-bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- __le32 *rxd = (__le32 *)data;
- __le32 *end = (__le32 *)&rxd[len / 4];
- enum rx_pkt_type type;
-
- type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7921e_mac_tx_free(dev, data, len);
- return false;
- case PKT_TYPE_TXS:
- for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7921_mac_add_txs(dev, rxd);
- return false;
- default:
- return true;
- }
-}
-
-void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- __le32 *rxd = (__le32 *)skb->data;
- enum rx_pkt_type type;
-
- type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7921e_mac_tx_free(dev, skb->data, skb->len);
- napi_consume_skb(skb, 1);
- break;
- default:
- mt7921_queue_rx_skb(mdev, q, skb);
- break;
- }
-}
-
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
@@ -261,7 +113,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
err = mt7921e_driver_own(dev);
if (err)
- return err;
+ goto out;
err = mt7921_run_firmware(dev);
if (err)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index 5efda694fb9d..86340d3205c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -30,12 +30,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ mdev->mcu.timeout = 3 * HZ;
if (cmd == MCU_CMD(FW_SCATTER))
txq = MT_MCUQ_FWDL;
@@ -59,6 +54,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
if (err)
return err;
+ mt76_rmw_field(dev, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS, 1);
+
err = mt7921_run_firmware(dev);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index ea643260ceb6..c65582acfa55 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -440,6 +440,8 @@
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+#define MT_PCIE_MAC_PM MT_PCIE_MAC(0x194)
+#define MT_PCIE_MAC_PM_L0S_DIS BIT(8)
#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs))
#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 487acd6e2be8..3b25a06fd946 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -96,6 +96,7 @@ static int mt7921s_probe(struct sdio_func *func,
.tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
.tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
@@ -194,7 +195,6 @@ static void mt7921s_remove(struct sdio_func *func)
mt7921s_unregister_device(dev);
}
-#ifdef CONFIG_PM
static int mt7921s_suspend(struct device *__dev)
{
struct sdio_func *func = dev_to_sdio_func(__dev);
@@ -206,6 +206,7 @@ static int mt7921s_suspend(struct device *__dev)
pm->suspended = true;
set_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
+ flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -261,6 +262,9 @@ restore_suspend:
clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
@@ -276,7 +280,7 @@ static int mt7921s_resume(struct device *__dev)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto failed;
mt76_worker_enable(&mdev->tx_worker);
mt76_worker_enable(&mdev->sdio.txrx_worker);
@@ -288,34 +292,27 @@ static int mt7921s_resume(struct device *__dev)
mt76_connac_mcu_set_deep_sleep(mdev, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
- if (err)
- return err;
-
+failed:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
-static const struct dev_pm_ops mt7921s_pm_ops = {
- .suspend = mt7921s_suspend,
- .resume = mt7921s_resume,
-};
-#endif
-
MODULE_DEVICE_TABLE(sdio, mt7921s_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7921s_pm_ops, mt7921s_suspend, mt7921s_resume);
+
static struct sdio_driver mt7921s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7921s_probe,
.remove = mt7921s_remove,
.id_table = mt7921s_table,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &mt7921s_pm_ops,
- }
-#endif
+ .drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index e038d7404323..5c1489766d9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -33,12 +33,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ mdev->mcu.timeout = 3 * HZ;
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index dd3b8884e162..29c0ee330dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -106,12 +106,7 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ mdev->mcu.timeout = 3 * HZ;
if (cmd != MCU_CMD(FW_SCATTER))
ep = MT_EP_OUT_INBAND_CMD;
@@ -183,6 +178,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
.tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
@@ -300,23 +296,34 @@ static void mt7921u_disconnect(struct usb_interface *usb_intf)
static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
+ struct mt76_connac_pm *pm = &dev->pm;
int err;
+ pm->suspended = true;
+ flush_work(&dev->reset_work);
+
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
if (err)
- return err;
+ goto failed;
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
- set_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
-
return 0;
+
+failed:
+ pm->suspended = false;
+
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
+ return err;
}
static int mt7921u_resume(struct usb_interface *intf)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
+ struct mt76_connac_pm *pm = &dev->pm;
bool reinit = true;
int err, i;
@@ -338,16 +345,21 @@ static int mt7921u_resume(struct usb_interface *intf)
if (reinit || mt7921_dma_need_reinit(dev)) {
err = mt7921u_dma_init(dev, true);
if (err)
- return err;
+ goto failed;
}
- clear_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
-
err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
- return err;
+ goto failed;
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+failed:
+ pm->suspended = false;
+
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
- return mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ return err;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index aba2a9865821..0ec308f99af5 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -478,14 +478,14 @@ static void mt76s_status_worker(struct mt76_worker *w)
if (ndata_frames > 0)
resched = true;
- if (dev->drv->tx_status_data &&
+ if (dev->drv->tx_status_data && ndata_frames > 0 &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
!test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
- queue_work(dev->wq, &dev->sdio.stat_work);
+ ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
} while (nframes > 0);
if (resched)
- mt76_worker_schedule(&dev->sdio.txrx_worker);
+ mt76_worker_schedule(&dev->tx_worker);
}
static void mt76s_tx_status_data(struct work_struct *work)
@@ -508,7 +508,7 @@ static void mt76s_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
- queue_work(dev->wq, &sdio->stat_work);
+ ieee80211_queue_work(dev->hw, &sdio->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index a2601aa9e7b1..bfc4de50a4d2 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -85,7 +85,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i;
struct page *page;
- u8 *buf;
+ u8 *buf, *end;
for (i = 0; i < intr->rx.num[qid]; i++)
len += round_up(intr->rx.len[qid][i] + 4, 4);
@@ -112,20 +112,29 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
return err;
}
- for (i = 0; i < intr->rx.num[qid]; i++) {
+ end = buf + len;
+ i = 0;
+
+ while (i < intr->rx.num[qid] && buf < end) {
int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
__le32 *rxd = (__le32 *)buf;
/* parse rxd to get the actual packet length */
len = le32_get_bits(rxd[0], GENMASK(15, 0));
- e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
- if (!e->skb)
- break;
+ /* Optimized path for TXS */
+ if (!dev->drv->rx_check || dev->drv->rx_check(dev, buf, len)) {
+ e->skb = mt76s_build_rx_skb(buf, len,
+ round_up(len + 4, 4));
+ if (!e->skb)
+ break;
+
+ if (q->queued + i + 1 == q->ndesc)
+ break;
+ i++;
+ }
buf += round_up(len + 4, 4);
- if (q->queued + i + 1 == q->ndesc)
- break;
}
put_page(page);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 71fd3fbfa7d2..0accc71a91c9 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/random.h>
#include "mt76.h"
const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
@@ -123,12 +125,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
if (!head)
return -ENOMEM;
- hdr = __skb_put_zero(head, head_len);
+ hdr = __skb_put_zero(head, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
skb_set_queue_mapping(head, IEEE80211_AC_BE);
+ get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
+ head_len - sizeof(*hdr));
info = IEEE80211_SKB_CB(head);
info->flags = IEEE80211_TX_CTL_INJECTED |
@@ -154,7 +158,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
return -ENOMEM;
}
- __skb_put_zero(frag, frag_len);
+ get_random_bytes(__skb_put(frag, frag_len), frag_len);
head->len += frag->len;
head->data_len += frag->len;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 6b8964c19f50..4c4033bb1bb3 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -528,6 +528,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
data_len = min_t(int, len, data_len - head_room);
+
+ if (len == data_len &&
+ dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
+ return 0;
+
skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 3ac373d29d93..b89047965e78 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -540,8 +540,9 @@ static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info,
return 0;
}
-static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise, const u8 *mac_addr, struct key_params *params)
+static int add_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
{
int ret = 0, keylen = params->key_len;
@@ -644,7 +645,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
return ret;
}
-static int del_key(struct wiphy *wiphy, struct net_device *netdev,
+static int del_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
u8 key_index,
bool pairwise,
const u8 *mac_addr)
@@ -685,8 +686,9 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
return 0;
}
-static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise, const u8 *mac_addr, void *cookie,
+static int get_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ void *cookie,
void (*callback)(void *cookie, struct key_params *))
{
struct wilc_vif *vif = netdev_priv(netdev);
@@ -723,13 +725,14 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
/* wiphy_new_nm() will WARNON if not present */
static int set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
return 0;
}
static int set_default_mgmt_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index)
+ int link_id, u8 key_index)
{
struct wilc_vif *vif = netdev_priv(netdev);
@@ -994,12 +997,11 @@ bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size)
{
struct wilc *wl = vif->wilc;
struct wilc_priv *priv = &vif->priv;
- int freq, ret;
+ int freq;
freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ);
- ret = cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
- return ret;
+ return cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
}
void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index b5a1b65c087c..03b7229a0ff5 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -229,7 +229,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
return NULL;
wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP;
- strlcpy(wl->monitor_dev->name, name, IFNAMSIZ);
+ strscpy(wl->monitor_dev->name, name, IFNAMSIZ);
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 43c085c74b7a..bb1a315a7b7e 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -245,6 +245,7 @@ struct wilc {
u8 *rx_buffer;
u32 rx_buffer_offset;
u8 *tx_buffer;
+ u32 *vmm_table;
struct txq_handle txq[NQUEUES];
int txq_entries;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 600cc57e9da2..7390f94cd4ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -28,6 +28,7 @@ struct wilc_sdio {
u32 block_size;
bool isinit;
int has_thrpt_enh3;
+ u8 *cmd53_buf;
};
struct sdio_cmd52 {
@@ -47,6 +48,7 @@ struct sdio_cmd53 {
u32 count: 9;
u8 *buffer;
u32 block_size;
+ bool use_global_buf;
};
static const struct wilc_hif_func wilc_hif_sdio;
@@ -91,6 +93,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int size, ret;
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+ u8 *buf = cmd->buffer;
sdio_claim_host(func);
@@ -101,12 +105,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
else
size = cmd->count;
+ if (cmd->use_global_buf) {
+ if (size > sizeof(u32))
+ return -EINVAL;
+
+ buf = sdio_priv->cmd53_buf;
+ }
+
if (cmd->read_write) { /* write */
- ret = sdio_memcpy_toio(func, cmd->address,
- (void *)cmd->buffer, size);
+ if (cmd->use_global_buf)
+ memcpy(buf, cmd->buffer, size);
+
+ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
} else { /* read */
- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
- cmd->address, size);
+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
+
+ if (cmd->use_global_buf)
+ memcpy(cmd->buffer, buf, size);
}
sdio_release_host(func);
@@ -128,6 +143,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
if (!sdio_priv)
return -ENOMEM;
+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!sdio_priv->cmd53_buf) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
&wilc_hif_sdio);
if (ret)
@@ -161,6 +182,7 @@ dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
free:
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
return ret;
}
@@ -172,6 +194,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
@@ -375,8 +398,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)&data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret)
@@ -414,6 +438,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
@@ -492,8 +517,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -535,6 +561,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 947d9a0a494e..58bbf50081e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
int ret = 0;
int counter;
int timeout;
- u32 vmm_table[WILC_VMM_TBL_SIZE];
+ u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
int srcu_idx;
@@ -1252,6 +1252,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
@@ -1489,6 +1491,14 @@ int wilc_wlan_init(struct net_device *dev)
goto fail;
}
+ if (!wilc->vmm_table)
+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+ goto fail;
+ }
+
if (!wilc->tx_buffer)
wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
@@ -1513,7 +1523,8 @@ int wilc_wlan_init(struct net_device *dev)
return 0;
fail:
-
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 1593e810b3ca..bfdf03bfa6c5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -532,8 +532,8 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -548,7 +548,8 @@ static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -569,7 +570,8 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -585,7 +587,7 @@ static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
static int
qtnf_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index)
+ int link_id, u8 key_index)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -721,9 +723,8 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
return -EFAULT;
}
- if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- }
ret = qtnf_cmd_send_disconnect(vif, reason_code);
if (ret)
@@ -750,7 +751,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan;
int ret;
-
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -1223,7 +1223,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
mac->macinfo.extended_capabilities_len;
}
- strlcpy(wiphy->fw_version, hw_info->fw_version,
+ strscpy(wiphy->fw_version, hw_info->fw_version,
sizeof(wiphy->fw_version));
wiphy->hw_version = hw_info->hw_version;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 0fad53693292..b1b73478d89b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -967,7 +967,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_rx_chain, hwinfo->total_tx_chain,
hwinfo->fw_ver);
- strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
+ strscpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
hwinfo->hw_version = hw_ver;
return 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index d758e8874457..de2ee5ffc34e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -1016,6 +1016,8 @@
*/
#define MAC_STATUS_CFG 0x1200
#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
+#define MAC_STATUS_CFG_BBP_RF_BUSY_TX FIELD32(0x00000001)
+#define MAC_STATUS_CFG_BBP_RF_BUSY_RX FIELD32(0x00000002)
/*
* PWR_PIN_CFG:
@@ -2739,6 +2741,7 @@ enum rt2800_eeprom_word {
#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_EXTERNAL_PA FIELD16(0x8000)
/*
* EEPROM LNA
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 18102fbe36d6..cbbb1a4849cf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -198,6 +198,26 @@ static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value);
}
+static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ rt2800_bbp_write(rt2x00dev, 159, value);
+}
+
+static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ return rt2800_bbp_read(rt2x00dev, 159);
+}
+
+static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 195, reg);
+ rt2800_bbp_write(rt2x00dev, 196, value);
+}
+
static u8 rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word)
{
@@ -2143,6 +2163,48 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
+static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev,
+ const struct rt2x00_field32 mask)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
+ if (!rt2x00_get_field32(reg, mask))
+ return 0;
+
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
+ return -EACCES;
+}
+
+static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u8 value;
+
+ /*
+ * BBP was enabled after firmware was loaded,
+ * but we need to reactivate it now.
+ */
+ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ msleep(1);
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ value = rt2800_bbp_read(rt2x00dev, 0);
+ if ((value != 0xff) && (value != 0x00))
+ return 0;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
+ return -EACCES;
+}
+
static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -3793,16 +3855,23 @@ static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev,
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr);
}
+
+ if (conf_is_ht40(conf)) {
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2f);
+ } else {
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1a);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x40);
+ }
}
static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level) {
u16 eeprom, target_power, max_power;
- u32 mac_sys_ctrl, mac_status;
+ u32 mac_sys_ctrl;
u32 reg;
u8 bbp;
- int i;
/* hardware unit is 0.5dBm, limited to 23.5dBm */
power_level *= 2;
@@ -3838,16 +3907,8 @@ static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
/* Disable Tx/Rx */
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
/* Check MAC Tx/Rx idle */
- for (i = 0; i < 10000; i++) {
- mac_status = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
- if (mac_status & 0x3)
- usleep_range(50, 200);
- else
- break;
- }
-
- if (i == 10000)
- rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n");
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
+ rt2x00_warn(rt2x00dev, "RF busy while configuring ALC\n");
if (chan->center_freq > 2457) {
bbp = rt2800_bbp_read(rt2x00dev, 30);
@@ -4164,7 +4225,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 86, 0);
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
}
if (rf->channel <= 14) {
@@ -4365,7 +4429,45 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
- rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ if (rt2x00_rt(rt2x00dev, RT5592))
+ rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT6352)) {
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags)) {
+ reg = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ reg |= 0x00000101;
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, reg);
+
+ reg = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+ reg |= 0x00000101;
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, reg);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00);
+
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT,
+ 0x36303636);
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN,
+ 0x6C6C6B6C);
+ rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN,
+ 0x6C6C6B6C);
+ }
}
bbp = rt2800_bbp_read(rt2x00dev, 4);
@@ -5644,7 +5746,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2x00_rt(rt2x00dev, RT3883) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5867,7 +5970,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
} else if (rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
- rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
@@ -6129,6 +6232,27 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_soc(rt2x00dev)) {
+ struct clk *clk = clk_get_sys("bus", NULL);
+ int rate;
+
+ if (IS_ERR(clk)) {
+ clk = clk_get_sys("cpu", NULL);
+
+ if (IS_ERR(clk)) {
+ rate = 125;
+ } else {
+ rate = clk_get_rate(clk) / 3000000;
+ clk_put(clk);
+ }
+ } else {
+ rate = clk_get_rate(clk) / 1000000;
+ clk_put(clk);
+ }
+
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, rate);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0);
@@ -6212,46 +6336,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
return 0;
}
-static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
- if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
- return 0;
-
- udelay(REGISTER_BUSY_DELAY);
- }
-
- rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
- return -EACCES;
-}
-
-static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u8 value;
-
- /*
- * BBP was enabled after firmware was loaded,
- * but we need to reactivate it now.
- */
- rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
- rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
- msleep(1);
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- value = rt2800_bbp_read(rt2x00dev, 0);
- if ((value != 0xff) && (value != 0x00))
- return 0;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
- return -EACCES;
-}
static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
{
@@ -6916,26 +7000,6 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
-static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
- const u8 reg, const u8 value)
-{
- rt2800_bbp_write(rt2x00dev, 195, reg);
- rt2800_bbp_write(rt2x00dev, 196, value);
-}
-
-static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
- const u8 reg, const u8 value)
-{
- rt2800_bbp_write(rt2x00dev, 158, reg);
- rt2800_bbp_write(rt2x00dev, 159, value);
-}
-
-static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
-{
- rt2800_bbp_write(rt2x00dev, 158, reg);
- return rt2800_bbp_read(rt2x00dev, 159);
-}
-
static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev)
{
u8 bbp;
@@ -8398,6 +8462,1519 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_led_open_drain_enable(rt2x00dev);
}
+static void rt2800_rf_self_txdc_cal(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfb5r1_org, rfb7r1_org, rfvalue;
+ u32 mac0518, mac051c, mac0528, mac052c;
+ u8 i;
+
+ mac0518 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ mac051c = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ mac0528 = rt2800_register_read(rt2x00dev, RF_CONTROL2);
+ mac052c = rt2800_register_read(rt2x00dev, RF_BYPASS2);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0xC);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x3306);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, 0x3330);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0xfffff);
+ rfb5r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ rfb7r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, 0x4);
+ for (i = 0; i < 100; ++i) {
+ usleep_range(50, 100);
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ if ((rfvalue & 0x04) != 0x4)
+ break;
+ }
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rfb5r1_org);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, 0x4);
+ for (i = 0; i < 100; ++i) {
+ usleep_range(50, 100);
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1);
+ if ((rfvalue & 0x04) != 0x4)
+ break;
+ }
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, rfb7r1_org);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, mac0518);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, mac051c);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, mac0528);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, mac052c);
+}
+
+static int rt2800_calcrcalibrationcode(struct rt2x00_dev *rt2x00dev, int d1, int d2)
+{
+ int calcode = ((d2 - d1) * 1000) / 43;
+
+ if ((calcode % 10) >= 5)
+ calcode += 10;
+ calcode = (calcode / 10);
+
+ return calcode;
+}
+
+static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u32 savemacsysctrl;
+ u8 saverfb0r1, saverfb0r34, saverfb0r35;
+ u8 saverfb5r4, saverfb5r17, saverfb5r18;
+ u8 saverfb5r19, saverfb5r20;
+ u8 savebbpr22, savebbpr47, savebbpr49;
+ u8 bytevalue = 0;
+ int rcalcode;
+ u8 r_cal_code = 0;
+ char d1 = 0, d2 = 0;
+ u8 rfvalue;
+ u32 MAC_RF_BYPASS0, MAC_RF_CONTROL0, MAC_PWR_PIN_CFG;
+ u32 maccfg;
+
+ saverfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ saverfb0r34 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 34);
+ saverfb0r35 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ saverfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ saverfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ saverfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+
+ savebbpr22 = rt2800_bbp_read(rt2x00dev, 22);
+ savebbpr47 = rt2800_bbp_read(rt2x00dev, 47);
+ savebbpr49 = rt2800_bbp_read(rt2x00dev, 49);
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ MAC_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ MAC_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ MAC_PWR_PIN_CFG = rt2800_register_read(rt2x00dev, PWR_PIN_CFG);
+
+ maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ maccfg &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "Wait MAC Tx Status to MAX !!!\n");
+
+ maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ maccfg &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "Wait MAC Rx Status to MAX !!!\n");
+
+ rfvalue = (MAC_RF_BYPASS0 | 0x3004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, rfvalue);
+ rfvalue = (MAC_RF_CONTROL0 | (~0x3002));
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, rfvalue);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x27);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0x83);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, 0x13);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x1);
+
+ rt2800_bbp_write(rt2x00dev, 47, 0x04);
+ rt2800_bbp_write(rt2x00dev, 22, 0x80);
+ usleep_range(100, 200);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 49);
+ if (bytevalue > 128)
+ d1 = bytevalue - 256;
+ else
+ d1 = (char)bytevalue;
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x01);
+
+ rt2800_bbp_write(rt2x00dev, 22, 0x80);
+ usleep_range(100, 200);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 49);
+ if (bytevalue > 128)
+ d2 = bytevalue - 256;
+ else
+ d2 = (char)bytevalue;
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+
+ rcalcode = rt2800_calcrcalibrationcode(rt2x00dev, d1, d2);
+ if (rcalcode < 0)
+ r_cal_code = 256 + rcalcode;
+ else
+ r_cal_code = (u8)rcalcode;
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 7, r_cal_code);
+
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+
+ bytevalue = rt2800_bbp_read(rt2x00dev, 21);
+ bytevalue |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bytevalue);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 21);
+ bytevalue &= (~0x1);
+ rt2800_bbp_write(rt2x00dev, 21, bytevalue);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, saverfb0r1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, saverfb0r34);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, saverfb0r35);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20);
+
+ rt2800_bbp_write(rt2x00dev, 22, savebbpr22);
+ rt2800_bbp_write(rt2x00dev, 47, savebbpr47);
+ rt2800_bbp_write(rt2x00dev, 49, savebbpr49);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, MAC_PWR_PIN_CFG);
+}
+
+static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u8 bbpreg = 0;
+ u32 macvalue = 0;
+ u8 saverfb0r2, saverfb5r4, saverfb7r4, rfvalue;
+ int i;
+
+ saverfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rfvalue = saverfb0r2;
+ rfvalue |= 0x03;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfvalue);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg |= 0x10;
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x8);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in RX RXDCOC calibration\n");
+
+ saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ saverfb5r4 = saverfb5r4 & (~0x40);
+ saverfb7r4 = saverfb7r4 & (~0x40);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x64);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+ bbpreg |= 0x48;
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ for (i = 0; i < 10000; i++) {
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ if ((bbpreg & 0x40) == 0)
+ break;
+ usleep_range(50, 100);
+ }
+
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg &= (~0x10);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, saverfb0r2);
+}
+
+static u32 rt2800_do_sqrt_accumulation(u32 si)
+{
+ u32 root, root_pre, bit;
+ char i;
+
+ bit = 1 << 15;
+ root = 0;
+ for (i = 15; i >= 0; i = i - 1) {
+ root_pre = root + bit;
+ if ((root_pre * root_pre) <= si)
+ root = root_pre;
+ bit = bit >> 1;
+ }
+
+ return root;
+}
+
+static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfb0r1, rfb0r2, rfb0r42;
+ u8 rfb4r0, rfb4r19;
+ u8 rfb5r3, rfb5r4, rfb5r17, rfb5r18, rfb5r19, rfb5r20;
+ u8 rfb6r0, rfb6r19;
+ u8 rfb7r3, rfb7r4, rfb7r17, rfb7r18, rfb7r19, rfb7r20;
+
+ u8 bbp1, bbp4;
+ u8 bbpr241, bbpr242;
+ u32 i;
+ u8 ch_idx;
+ u8 bbpval;
+ u8 rfval, vga_idx = 0;
+ int mi = 0, mq = 0, si = 0, sq = 0, riq = 0;
+ int sigma_i, sigma_q, r_iq, g_rx;
+ int g_imb;
+ int ph_rx;
+ u32 savemacsysctrl = 0;
+ u32 orig_RF_CONTROL0 = 0;
+ u32 orig_RF_BYPASS0 = 0;
+ u32 orig_RF_CONTROL1 = 0;
+ u32 orig_RF_BYPASS1 = 0;
+ u32 orig_RF_CONTROL3 = 0;
+ u32 orig_RF_BYPASS3 = 0;
+ u32 bbpval1 = 0;
+ static const u8 rf_vga_table[] = {0x20, 0x21, 0x22, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f};
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ orig_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ orig_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ orig_RF_CONTROL1 = rt2800_register_read(rt2x00dev, RF_CONTROL1);
+ orig_RF_BYPASS1 = rt2800_register_read(rt2x00dev, RF_BYPASS1);
+ orig_RF_CONTROL3 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ orig_RF_BYPASS3 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+
+ bbp1 = rt2800_bbp_read(rt2x00dev, 1);
+ bbp4 = rt2800_bbp_read(rt2x00dev, 4);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x0);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
+ rt2x00_warn(rt2x00dev, "Timeout waiting for MAC status in RXIQ calibration\n");
+
+ bbpval = bbp4 & (~0x18);
+ bbpval = bbp4 | 0x00;
+ rt2800_bbp_write(rt2x00dev, 4, bbpval);
+
+ bbpval = rt2800_bbp_read(rt2x00dev, 21);
+ bbpval = bbpval | 1;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+ bbpval = bbpval & 0xfe;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL1, 0x00000202);
+ rt2800_register_write(rt2x00dev, RF_BYPASS1, 0x00000303);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0101);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0000);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0xf1f1);
+
+ rfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rfb4r0 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0);
+ rfb4r19 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 19);
+ rfb5r3 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ rfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ rfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ rfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ rfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ rfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+
+ rfb6r0 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0);
+ rfb6r19 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 19);
+ rfb7r3 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3);
+ rfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ rfb7r17 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17);
+ rfb7r18 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18);
+ rfb7r19 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19);
+ rfb7r20 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x87);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0x27);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x38);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x38);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x80);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0xC1);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x60);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x0);
+ rt2800_bbp_write(rt2x00dev, 24, 0x0);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 5, 0x0);
+
+ bbpr241 = rt2800_bbp_read(rt2x00dev, 241);
+ bbpr242 = rt2800_bbp_read(rt2x00dev, 242);
+
+ rt2800_bbp_write(rt2x00dev, 241, 0x10);
+ rt2800_bbp_write(rt2x00dev, 242, 0x84);
+ rt2800_bbp_write(rt2x00dev, 244, 0x31);
+
+ bbpval = rt2800_bbp_dcoc_read(rt2x00dev, 3);
+ bbpval = bbpval & (~0x7);
+ rt2800_bbp_dcoc_write(rt2x00dev, 3, bbpval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006);
+ usleep_range(1, 200);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003376);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006);
+ udelay(1);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x06);
+ rt2800_bbp_write(rt2x00dev, 24, 0x06);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 23, 0x02);
+ rt2800_bbp_write(rt2x00dev, 24, 0x02);
+ }
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx = ch_idx + 1) {
+ if (ch_idx == 0) {
+ rfval = rfb0r1 & (~0x3);
+ rfval = rfb0r1 | 0x1;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
+ rfval = rfb0r2 & (~0x33);
+ rfval = rfb0r2 | 0x11;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
+ rfval = rfb0r42 & (~0x50);
+ rfval = rfb0r42 | 0x10;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006);
+ udelay(1);
+
+ bbpval = bbp1 & (~0x18);
+ bbpval = bbpval | 0x00;
+ rt2800_bbp_write(rt2x00dev, 1, bbpval);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x00);
+ } else {
+ rfval = rfb0r1 & (~0x3);
+ rfval = rfb0r1 | 0x2;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
+ rfval = rfb0r2 & (~0x33);
+ rfval = rfb0r2 | 0x22;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
+ rfval = rfb0r42 & (~0x50);
+ rfval = rfb0r42 | 0x40;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002006);
+ udelay(1);
+
+ bbpval = bbp1 & (~0x18);
+ bbpval = bbpval | 0x08;
+ rt2800_bbp_write(rt2x00dev, 1, bbpval);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x01);
+ }
+ usleep_range(500, 1500);
+
+ vga_idx = 0;
+ while (vga_idx < 11) {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rf_vga_table[vga_idx]);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rf_vga_table[vga_idx]);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x93);
+
+ for (i = 0; i < 10000; i++) {
+ bbpval = rt2800_bbp_read(rt2x00dev, 159);
+ if ((bbpval & 0xff) == 0x93)
+ usleep_range(50, 100);
+ else
+ break;
+ }
+
+ if ((bbpval & 0xff) == 0x93) {
+ rt2x00_warn(rt2x00dev, "Fatal Error: Calibration doesn't finish");
+ goto restore_value;
+ }
+ for (i = 0; i < 5; i++) {
+ u32 bbptemp = 0;
+ u8 value = 0;
+ int result = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x1e);
+ rt2800_bbp_write(rt2x00dev, 159, i);
+ rt2800_bbp_write(rt2x00dev, 158, 0x22);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 24);
+ rt2800_bbp_write(rt2x00dev, 158, 0x21);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 16);
+ rt2800_bbp_write(rt2x00dev, 158, 0x20);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 8);
+ rt2800_bbp_write(rt2x00dev, 158, 0x1f);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + value;
+
+ if (i < 2 && (bbptemp & 0x800000))
+ result = (bbptemp & 0xffffff) - 0x1000000;
+ else if (i == 4)
+ result = bbptemp;
+ else
+ result = bbptemp;
+
+ if (i == 0)
+ mi = result / 4096;
+ else if (i == 1)
+ mq = result / 4096;
+ else if (i == 2)
+ si = bbptemp / 4096;
+ else if (i == 3)
+ sq = bbptemp / 4096;
+ else
+ riq = result / 4096;
+ }
+
+ bbpval1 = si - mi * mi;
+ rt2x00_dbg(rt2x00dev,
+ "RXIQ si=%d, sq=%d, riq=%d, bbpval %d, vga_idx %d",
+ si, sq, riq, bbpval1, vga_idx);
+
+ if (bbpval1 >= (100 * 100))
+ break;
+
+ if (bbpval1 <= 100)
+ vga_idx = vga_idx + 9;
+ else if (bbpval1 <= 158)
+ vga_idx = vga_idx + 8;
+ else if (bbpval1 <= 251)
+ vga_idx = vga_idx + 7;
+ else if (bbpval1 <= 398)
+ vga_idx = vga_idx + 6;
+ else if (bbpval1 <= 630)
+ vga_idx = vga_idx + 5;
+ else if (bbpval1 <= 1000)
+ vga_idx = vga_idx + 4;
+ else if (bbpval1 <= 1584)
+ vga_idx = vga_idx + 3;
+ else if (bbpval1 <= 2511)
+ vga_idx = vga_idx + 2;
+ else
+ vga_idx = vga_idx + 1;
+ }
+
+ sigma_i = rt2800_do_sqrt_accumulation(100 * (si - mi * mi));
+ sigma_q = rt2800_do_sqrt_accumulation(100 * (sq - mq * mq));
+ r_iq = 10 * (riq - (mi * mq));
+
+ rt2x00_dbg(rt2x00dev, "Sigma_i=%d, Sigma_q=%d, R_iq=%d", sigma_i, sigma_q, r_iq);
+
+ if (sigma_i <= 1400 && sigma_i >= 1000 &&
+ (sigma_i - sigma_q) <= 112 &&
+ (sigma_i - sigma_q) >= -112 &&
+ mi <= 32 && mi >= -32 &&
+ mq <= 32 && mq >= -32) {
+ r_iq = 10 * (riq - (mi * mq));
+ rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n",
+ sigma_i, sigma_q, r_iq);
+
+ g_rx = (1000 * sigma_q) / sigma_i;
+ g_imb = ((-2) * 128 * (1000 - g_rx)) / (1000 + g_rx);
+ ph_rx = (r_iq * 2292) / (sigma_i * sigma_q);
+
+ if (ph_rx > 20 || ph_rx < -20) {
+ ph_rx = 0;
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+
+ if (g_imb > 12 || g_imb < -12) {
+ g_imb = 0;
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+ } else {
+ g_imb = 0;
+ ph_rx = 0;
+ rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n",
+ sigma_i, sigma_q, r_iq);
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+
+ if (ch_idx == 0) {
+ rt2800_bbp_write(rt2x00dev, 158, 0x37);
+ rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f);
+ rt2800_bbp_write(rt2x00dev, 158, 0x35);
+ rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 158, 0x55);
+ rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f);
+ rt2800_bbp_write(rt2x00dev, 158, 0x53);
+ rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f);
+ }
+ }
+
+restore_value:
+ rt2800_bbp_write(rt2x00dev, 158, 0x3);
+ bbpval = rt2800_bbp_read(rt2x00dev, 159);
+ rt2800_bbp_write(rt2x00dev, 159, (bbpval | 0x07));
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ rt2800_bbp_write(rt2x00dev, 1, bbp1);
+ rt2800_bbp_write(rt2x00dev, 4, bbp4);
+ rt2800_bbp_write(rt2x00dev, 241, bbpr241);
+ rt2800_bbp_write(rt2x00dev, 242, bbpr242);
+
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+ bbpval = rt2800_bbp_read(rt2x00dev, 21);
+ bbpval |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+ usleep_range(10, 200);
+ bbpval &= 0xfe;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfb0r1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfb0r2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, rfb4r0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 19, rfb4r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rfb5r3);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, rfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, rfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, rfb5r20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, rfb6r0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 19, rfb6r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, rfb7r3);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, rfb7r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, rfb7r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, rfb7r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, rfb7r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, rfb7r20);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, orig_RF_CONTROL0);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, orig_RF_BYPASS0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL1, orig_RF_CONTROL1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS1, orig_RF_BYPASS1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, orig_RF_CONTROL3);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, orig_RF_BYPASS3);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+}
+
+static void rt2800_rf_configstore(struct rt2x00_dev *rt2x00dev,
+ struct rf_reg_pair rf_reg_record[][13], u8 chain)
+{
+ u8 rfvalue = 0;
+
+ if (chain == CHAIN_0) {
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rf_reg_record[CHAIN_0][0].bank = 0;
+ rf_reg_record[CHAIN_0][0].reg = 1;
+ rf_reg_record[CHAIN_0][0].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rf_reg_record[CHAIN_0][1].bank = 0;
+ rf_reg_record[CHAIN_0][1].reg = 2;
+ rf_reg_record[CHAIN_0][1].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ rf_reg_record[CHAIN_0][2].bank = 0;
+ rf_reg_record[CHAIN_0][2].reg = 35;
+ rf_reg_record[CHAIN_0][2].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rf_reg_record[CHAIN_0][3].bank = 0;
+ rf_reg_record[CHAIN_0][3].reg = 42;
+ rf_reg_record[CHAIN_0][3].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0);
+ rf_reg_record[CHAIN_0][4].bank = 4;
+ rf_reg_record[CHAIN_0][4].reg = 0;
+ rf_reg_record[CHAIN_0][4].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 2);
+ rf_reg_record[CHAIN_0][5].bank = 4;
+ rf_reg_record[CHAIN_0][5].reg = 2;
+ rf_reg_record[CHAIN_0][5].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 34);
+ rf_reg_record[CHAIN_0][6].bank = 4;
+ rf_reg_record[CHAIN_0][6].reg = 34;
+ rf_reg_record[CHAIN_0][6].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ rf_reg_record[CHAIN_0][7].bank = 5;
+ rf_reg_record[CHAIN_0][7].reg = 3;
+ rf_reg_record[CHAIN_0][7].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ rf_reg_record[CHAIN_0][8].bank = 5;
+ rf_reg_record[CHAIN_0][8].reg = 4;
+ rf_reg_record[CHAIN_0][8].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ rf_reg_record[CHAIN_0][9].bank = 5;
+ rf_reg_record[CHAIN_0][9].reg = 17;
+ rf_reg_record[CHAIN_0][9].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ rf_reg_record[CHAIN_0][10].bank = 5;
+ rf_reg_record[CHAIN_0][10].reg = 18;
+ rf_reg_record[CHAIN_0][10].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ rf_reg_record[CHAIN_0][11].bank = 5;
+ rf_reg_record[CHAIN_0][11].reg = 19;
+ rf_reg_record[CHAIN_0][11].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+ rf_reg_record[CHAIN_0][12].bank = 5;
+ rf_reg_record[CHAIN_0][12].reg = 20;
+ rf_reg_record[CHAIN_0][12].value = rfvalue;
+ } else if (chain == CHAIN_1) {
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rf_reg_record[CHAIN_1][0].bank = 0;
+ rf_reg_record[CHAIN_1][0].reg = 1;
+ rf_reg_record[CHAIN_1][0].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rf_reg_record[CHAIN_1][1].bank = 0;
+ rf_reg_record[CHAIN_1][1].reg = 2;
+ rf_reg_record[CHAIN_1][1].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ rf_reg_record[CHAIN_1][2].bank = 0;
+ rf_reg_record[CHAIN_1][2].reg = 35;
+ rf_reg_record[CHAIN_1][2].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rf_reg_record[CHAIN_1][3].bank = 0;
+ rf_reg_record[CHAIN_1][3].reg = 42;
+ rf_reg_record[CHAIN_1][3].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0);
+ rf_reg_record[CHAIN_1][4].bank = 6;
+ rf_reg_record[CHAIN_1][4].reg = 0;
+ rf_reg_record[CHAIN_1][4].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 2);
+ rf_reg_record[CHAIN_1][5].bank = 6;
+ rf_reg_record[CHAIN_1][5].reg = 2;
+ rf_reg_record[CHAIN_1][5].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 34);
+ rf_reg_record[CHAIN_1][6].bank = 6;
+ rf_reg_record[CHAIN_1][6].reg = 34;
+ rf_reg_record[CHAIN_1][6].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3);
+ rf_reg_record[CHAIN_1][7].bank = 7;
+ rf_reg_record[CHAIN_1][7].reg = 3;
+ rf_reg_record[CHAIN_1][7].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ rf_reg_record[CHAIN_1][8].bank = 7;
+ rf_reg_record[CHAIN_1][8].reg = 4;
+ rf_reg_record[CHAIN_1][8].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17);
+ rf_reg_record[CHAIN_1][9].bank = 7;
+ rf_reg_record[CHAIN_1][9].reg = 17;
+ rf_reg_record[CHAIN_1][9].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18);
+ rf_reg_record[CHAIN_1][10].bank = 7;
+ rf_reg_record[CHAIN_1][10].reg = 18;
+ rf_reg_record[CHAIN_1][10].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19);
+ rf_reg_record[CHAIN_1][11].bank = 7;
+ rf_reg_record[CHAIN_1][11].reg = 19;
+ rf_reg_record[CHAIN_1][11].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20);
+ rf_reg_record[CHAIN_1][12].bank = 7;
+ rf_reg_record[CHAIN_1][12].reg = 20;
+ rf_reg_record[CHAIN_1][12].value = rfvalue;
+ } else {
+ rt2x00_warn(rt2x00dev, "Unknown chain = %u\n", chain);
+ }
+}
+
+static void rt2800_rf_configrecover(struct rt2x00_dev *rt2x00dev,
+ struct rf_reg_pair rf_record[][13])
+{
+ u8 chain_index = 0, record_index = 0;
+ u8 bank = 0, rf_register = 0, value = 0;
+
+ for (chain_index = 0; chain_index < 2; chain_index++) {
+ for (record_index = 0; record_index < 13; record_index++) {
+ bank = rf_record[chain_index][record_index].bank;
+ rf_register = rf_record[chain_index][record_index].reg;
+ value = rf_record[chain_index][record_index].value;
+ rt2800_rfcsr_write_bank(rt2x00dev, bank, rf_register, value);
+ rt2x00_dbg(rt2x00dev, "bank: %d, rf_register: %d, value: %x\n",
+ bank, rf_register, value);
+ }
+ }
+}
+
+static void rt2800_setbbptonegenerator(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 158, 0xAA);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAB);
+ rt2800_bbp_write(rt2x00dev, 159, 0x0A);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAC);
+ rt2800_bbp_write(rt2x00dev, 159, 0x3F);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAD);
+ rt2800_bbp_write(rt2x00dev, 159, 0x3F);
+
+ rt2800_bbp_write(rt2x00dev, 244, 0x40);
+}
+
+static u32 rt2800_do_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx, u8 read_neg)
+{
+ u32 macvalue = 0;
+ int fftout_i = 0, fftout_q = 0;
+ u32 ptmp = 0, pint = 0;
+ u8 bbp = 0;
+ u8 tidxi;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x9b);
+
+ bbp = 0x9b;
+
+ while (bbp == 0x9b) {
+ usleep_range(10, 50);
+ bbp = rt2800_bbp_read(rt2x00dev, 159);
+ bbp = bbp & 0xff;
+ }
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xba);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ pint = ptmp;
+ rt2x00_dbg(rt2x00dev, "I = %d, Q = %d, power = %x\n", fftout_i, fftout_q, pint);
+ if (read_neg) {
+ pint = pint >> 1;
+ tidxi = 0x40 - tidx;
+ tidxi = tidxi & 0x3f;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xba);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ ptmp = ptmp >> 1;
+ pint = pint + ptmp;
+ }
+
+ return pint;
+}
+
+static u32 rt2800_read_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx)
+{
+ u32 macvalue = 0;
+ int fftout_i = 0, fftout_q = 0;
+ u32 ptmp = 0, pint = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xBA);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ pint = ptmp;
+
+ return pint;
+}
+
+static void rt2800_write_dc(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 alc, u8 iorq, u8 dc)
+{
+ u8 bbp = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb0);
+ bbp = alc | 0x80;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ if (ch_idx == 0)
+ bbp = (iorq == 0) ? 0xb1 : 0xb2;
+ else
+ bbp = (iorq == 0) ? 0xb8 : 0xb9;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ bbp = dc;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+}
+
+static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
+ u8 alc_idx, u8 dc_result[][RF_ALC_NUM][2])
+{
+ u32 p0 = 0, p1 = 0, pf = 0;
+ char idx0 = 0, idx1 = 0;
+ u8 idxf[] = {0x00, 0x00};
+ u8 ibit = 0x20;
+ u8 iorq;
+ char bidx;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x80);
+
+ for (bidx = 5; bidx >= 0; bidx--) {
+ for (iorq = 0; iorq <= 1; iorq++) {
+ if (idxf[iorq] == 0x20) {
+ idx0 = 0x20;
+ p0 = pf;
+ } else {
+ idx0 = idxf[iorq] - ibit;
+ idx0 = idx0 & 0x3F;
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx0);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ }
+
+ idx1 = idxf[iorq] + (bidx == 5 ? 0 : ibit);
+ idx1 = idx1 & 0x3F;
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx1);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+
+ rt2x00_dbg(rt2x00dev, "alc=%u, IorQ=%u, idx_final=%2x\n",
+ alc_idx, iorq, idxf[iorq]);
+ rt2x00_dbg(rt2x00dev, "p0=%x, p1=%x, pf=%x, idx_0=%x, idx_1=%x, ibit=%x\n",
+ p0, p1, pf, idx0, idx1, ibit);
+
+ if (bidx != 5 && pf <= p0 && pf < p1) {
+ idxf[iorq] = idxf[iorq];
+ } else if (p0 < p1) {
+ pf = p0;
+ idxf[iorq] = idx0 & 0x3F;
+ } else {
+ pf = p1;
+ idxf[iorq] = idx1 & 0x3F;
+ }
+ rt2x00_dbg(rt2x00dev, "IorQ=%u, idx_final[%u]:%x, pf:%8x\n",
+ iorq, iorq, idxf[iorq], pf);
+
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idxf[iorq]);
+ }
+ ibit = ibit >> 1;
+ }
+ dc_result[ch_idx][alc_idx][0] = idxf[0];
+ dc_result[ch_idx][alc_idx][1] = idxf[1];
+}
+
+static void rt2800_iq_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 *ges, u8 *pes)
+{
+ u32 p0 = 0, p1 = 0, pf = 0;
+ char perr = 0, gerr = 0, iq_err = 0;
+ char pef = 0, gef = 0;
+ char psta, pend;
+ char gsta, gend;
+
+ u8 ibit = 0x20;
+ u8 first_search = 0x00, touch_neg_max = 0x00;
+ char idx0 = 0, idx1 = 0;
+ u8 gop;
+ u8 bbp = 0;
+ char bidx;
+
+ for (bidx = 5; bidx >= 1; bidx--) {
+ for (gop = 0; gop < 2; gop++) {
+ if (gop == 1 || bidx < 4) {
+ if (gop == 0)
+ iq_err = gerr;
+ else
+ iq_err = perr;
+
+ first_search = (gop == 0) ? (bidx == 3) : (bidx == 5);
+ touch_neg_max = (gop) ? ((iq_err & 0x0F) == 0x08) :
+ ((iq_err & 0x3F) == 0x20);
+
+ if (touch_neg_max) {
+ p0 = pf;
+ idx0 = iq_err;
+ } else {
+ idx0 = iq_err - ibit;
+ bbp = (ch_idx == 0) ? ((gop == 0) ? 0x28 : 0x29) :
+ ((gop == 0) ? 0x46 : 0x47);
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, idx0);
+
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+ }
+
+ idx1 = iq_err + (first_search ? 0 : ibit);
+ idx1 = (gop == 0) ? (idx1 & 0x0F) : (idx1 & 0x3F);
+
+ bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 :
+ (gop == 0) ? 0x46 : 0x47;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, idx1);
+
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+
+ rt2x00_dbg(rt2x00dev,
+ "p0=%x, p1=%x, pwer_final=%x, idx0=%x, idx1=%x, iq_err=%x, gop=%d, ibit=%x\n",
+ p0, p1, pf, idx0, idx1, iq_err, gop, ibit);
+
+ if (!(!first_search && pf <= p0 && pf < p1)) {
+ if (p0 < p1) {
+ pf = p0;
+ iq_err = idx0;
+ } else {
+ pf = p1;
+ iq_err = idx1;
+ }
+ }
+
+ bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 :
+ (gop == 0) ? 0x46 : 0x47;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, iq_err);
+
+ if (gop == 0)
+ gerr = iq_err;
+ else
+ perr = iq_err;
+
+ rt2x00_dbg(rt2x00dev, "IQCalibration pf=%8x (%2x, %2x) !\n",
+ pf, gerr & 0x0F, perr & 0x3F);
+ }
+ }
+
+ if (bidx > 0)
+ ibit = (ibit >> 1);
+ }
+ gerr = (gerr & 0x08) ? (gerr & 0x0F) - 0x10 : (gerr & 0x0F);
+ perr = (perr & 0x20) ? (perr & 0x3F) - 0x40 : (perr & 0x3F);
+
+ gerr = (gerr < -0x07) ? -0x07 : (gerr > 0x05) ? 0x05 : gerr;
+ gsta = gerr - 1;
+ gend = gerr + 2;
+
+ perr = (perr < -0x1f) ? -0x1f : (perr > 0x1d) ? 0x1d : perr;
+ psta = perr - 1;
+ pend = perr + 2;
+
+ for (gef = gsta; gef <= gend; gef = gef + 1)
+ for (pef = psta; pef <= pend; pef = pef + 1) {
+ bbp = (ch_idx == 0) ? 0x28 : 0x46;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, gef & 0x0F);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, pef & 0x3F);
+
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+ if (gef == gsta && pef == psta) {
+ pf = p1;
+ gerr = gef;
+ perr = pef;
+ } else if (pf > p1) {
+ pf = p1;
+ gerr = gef;
+ perr = pef;
+ }
+ rt2x00_dbg(rt2x00dev, "Fine IQCalibration p1=%8x pf=%8x (%2x, %2x) !\n",
+ p1, pf, gef & 0x0F, pef & 0x3F);
+ }
+
+ ges[ch_idx] = gerr & 0x0F;
+ pes[ch_idx] = perr & 0x3F;
+}
+
+static void rt2800_rf_aux_tx0_loopback(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x21);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x10);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x1b);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 2, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 34, 0xee);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xd7);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0xa2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20);
+}
+
+static void rt2800_rf_aux_tx1_loopback(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x22);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x20);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x4b);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 2, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 34, 0xee);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, 0xd7);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, 0xa2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, 0x20);
+}
+
+static void rt2800_loft_iq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ struct rf_reg_pair rf_store[CHAIN_NUM][13];
+ u32 macorg1 = 0;
+ u32 macorg2 = 0;
+ u32 macorg3 = 0;
+ u32 macorg4 = 0;
+ u32 macorg5 = 0;
+ u32 orig528 = 0;
+ u32 orig52c = 0;
+
+ u32 savemacsysctrl = 0;
+ u32 macvalue = 0;
+ u32 mac13b8 = 0;
+ u32 p0 = 0, p1 = 0;
+ u32 p0_idx10 = 0, p1_idx10 = 0;
+
+ u8 rfvalue;
+ u8 loft_dc_search_result[CHAIN_NUM][RF_ALC_NUM][2];
+ u8 ger[CHAIN_NUM], per[CHAIN_NUM];
+
+ u8 vga_gain[] = {14, 14};
+ u8 bbp = 0, ch_idx = 0, rf_alc_idx = 0, idx = 0;
+ u8 bbpr30, rfb0r39, rfb0r42;
+ u8 bbpr1;
+ u8 bbpr4;
+ u8 bbpr241, bbpr242;
+ u8 count_step;
+
+ static const u8 rf_gain[] = {0x00, 0x01, 0x02, 0x04, 0x08, 0x0c};
+ static const u8 rfvga_gain_table[] = {0x24, 0x25, 0x26, 0x27, 0x28, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3F};
+ static const u8 bbp_2324gain[] = {0x16, 0x14, 0x12, 0x10, 0x0c, 0x08};
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+ mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8);
+ orig528 = rt2800_register_read(rt2x00dev, RF_CONTROL2);
+ orig52c = rt2800_register_read(rt2x00dev, RF_BYPASS2);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n");
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x08);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n");
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++)
+ rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx);
+
+ bbpr30 = rt2800_bbp_read(rt2x00dev, 30);
+ rfb0r39 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 39);
+ rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+
+ rt2800_bbp_write(rt2x00dev, 30, 0x1F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x5B);
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ rt2800_setbbptonegenerator(rt2x00dev);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306);
+ rt2800_register_write(rt2x00dev, 0x13b8, 0x10);
+ udelay(1);
+
+ if (ch_idx == 0)
+ rt2800_rf_aux_tx0_loopback(rt2x00dev);
+ else
+ rt2800_rf_aux_tx1_loopback(rt2x00dev);
+
+ udelay(1);
+
+ if (ch_idx == 0)
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x05);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ if (ch_idx == 0)
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ else
+ rt2800_bbp_write(rt2x00dev, 159, 0x01);
+
+ vga_gain[ch_idx] = 18;
+ for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, bbp_2324gain[rf_alc_idx]);
+ rt2800_bbp_write(rt2x00dev, 24, bbp_2324gain[rf_alc_idx]);
+
+ macvalue = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macvalue &= (~0x0000F1F1);
+ macvalue |= (rf_gain[rf_alc_idx] << 4);
+ macvalue |= (rf_gain[rf_alc_idx] << 12);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macvalue);
+ macvalue = (0x0000F1F1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macvalue);
+
+ if (rf_alc_idx == 0) {
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x21);
+ for (; vga_gain[ch_idx] > 0;
+ vga_gain[ch_idx] = vga_gain[ch_idx] - 2) {
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x21);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ rt2x00_dbg(rt2x00dev, "LOFT AGC %d %d\n", p0, p1);
+ if ((p0 < 7000 * 7000) && (p1 < (7000 * 7000)))
+ break;
+ }
+
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00);
+
+ rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx],
+ rfvga_gain_table[vga_gain[ch_idx]]);
+
+ if (vga_gain[ch_idx] < 0)
+ vga_gain[ch_idx] = 0;
+ }
+
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+
+ rt2800_loft_search(rt2x00dev, ch_idx, rf_alc_idx, loft_dc_search_result);
+ }
+ }
+
+ for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) {
+ for (idx = 0; idx < 4; idx++) {
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ bbp = (idx << 2) + rf_alc_idx;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " ALC %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb1);
+ bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x00];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " I0 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb2);
+ bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x01];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " Q0 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb8);
+ bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x00];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " I1 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb9);
+ bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x01];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " Q1 %2x\n", bbp);
+ }
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ bbp = 0x00;
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_rf_configrecover(rt2x00dev, rf_store);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, orig528);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, orig52c);
+ rt2800_register_write(rt2x00dev, 0x13b8, mac13b8);
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+
+ bbpr1 = rt2800_bbp_read(rt2x00dev, 1);
+ bbpr4 = rt2800_bbp_read(rt2x00dev, 4);
+ bbpr241 = rt2800_bbp_read(rt2x00dev, 241);
+ bbpr242 = rt2800_bbp_read(rt2x00dev, 242);
+ mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n");
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x08);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n");
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000101);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 4, bbpr4 & (~0x18));
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 241, 0x14);
+ rt2800_bbp_write(rt2x00dev, 242, 0x80);
+ rt2800_bbp_write(rt2x00dev, 244, 0x31);
+ } else {
+ rt2800_setbbptonegenerator(rt2x00dev);
+ }
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306);
+ udelay(1);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F);
+
+ if (!test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000000);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1);
+ }
+
+ rt2800_register_write(rt2x00dev, 0x13b8, 0x00000010);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++)
+ rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx);
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x3B);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x3B);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x03);
+ rt2800_bbp_write(rt2x00dev, 159, 0x60);
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x80);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ if (ch_idx == 0) {
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ bbp = bbpr1 & (~0x18);
+ bbp = bbp | 0x00;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+ }
+ rt2800_rf_aux_tx0_loopback(rt2x00dev);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ rt2800_bbp_write(rt2x00dev, 159, 0x01);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags)) {
+ bbp = bbpr1 & (~0x18);
+ bbp = bbp | 0x08;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+ }
+ rt2800_rf_aux_tx1_loopback(rt2x00dev);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x05);
+ rt2800_bbp_write(rt2x00dev, 159, 0x04);
+
+ bbp = (ch_idx == 0) ? 0x28 : 0x46;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x06);
+ rt2800_bbp_write(rt2x00dev, 24, 0x06);
+ count_step = 1;
+ } else {
+ rt2800_bbp_write(rt2x00dev, 23, 0x1F);
+ rt2800_bbp_write(rt2x00dev, 24, 0x1F);
+ count_step = 2;
+ }
+
+ for (; vga_gain[ch_idx] < 19; vga_gain[ch_idx] = (vga_gain[ch_idx] + count_step)) {
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ p0_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x21);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags))
+ p1_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A);
+
+ rt2x00_dbg(rt2x00dev, "IQ AGC %d %d\n", p0, p1);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2x00_dbg(rt2x00dev, "IQ AGC IDX 10 %d %d\n", p0_idx10, p1_idx10);
+ if ((p0_idx10 > 7000 * 7000) || (p1_idx10 > 7000 * 7000)) {
+ if (vga_gain[ch_idx] != 0)
+ vga_gain[ch_idx] = vga_gain[ch_idx] - 1;
+ break;
+ }
+ }
+
+ if ((p0 > 2500 * 2500) || (p1 > 2500 * 2500))
+ break;
+ }
+
+ if (vga_gain[ch_idx] > 18)
+ vga_gain[ch_idx] = 18;
+ rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx],
+ rfvga_gain_table[vga_gain[ch_idx]]);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_iq_search(rt2x00dev, ch_idx, ger, per);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x28);
+ bbp = ger[CHAIN_0] & 0x0F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x29);
+ bbp = per[CHAIN_0] & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x46);
+ bbp = ger[CHAIN_1] & 0x0F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x47);
+ bbp = per[CHAIN_1] & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 1, bbpr1);
+ rt2800_bbp_write(rt2x00dev, 241, bbpr241);
+ rt2800_bbp_write(rt2x00dev, 242, bbpr242);
+ }
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 30, bbpr30);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, rfb0r39);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ rt2800_bbp_write(rt2x00dev, 4, bbpr4);
+
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_rf_configrecover(rt2x00dev, rf_store);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, 0x13b8, mac13b8);
+}
+
static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev,
bool set_bw, bool is_ht40)
{
@@ -9005,8 +10582,13 @@ static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C);
+ rt2800_r_calibration(rt2x00dev);
+ rt2800_rf_self_txdc_cal(rt2x00dev);
+ rt2800_rxdcoc_calibration(rt2x00dev);
rt2800_bw_filter_calibration(rt2x00dev, true);
rt2800_bw_filter_calibration(rt2x00dev, false);
+ rt2800_loft_iq_calibration(rt2x00dev);
+ rt2800_rxiq_calibration(rt2x00dev);
}
static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -9073,7 +10655,7 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Wait BBP/RF to wake up.
*/
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
return -EIO;
/*
@@ -9435,6 +11017,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rf = RF3853;
else if (rt2x00_rt(rt2x00dev, RT5350))
rf = RF5350;
+ else if (rt2x00_rt(rt2x00dev, RT5592))
+ rf = RF5592;
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -9564,7 +11148,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
- if (rt2x00_rt(rt2x00dev, RT3352)) {
+ if (rt2x00_rt(rt2x00dev, RT3352) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
__set_bit(CAPABILITY_EXTERNAL_PA_TX0,
@@ -9575,6 +11160,18 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
&rt2x00dev->cap_flags);
}
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF2);
+
+ if (rt2x00_rt(rt2x00dev, RT6352) && eeprom != 0 && eeprom != 0xffff) {
+ if (!rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF2_EXTERNAL_PA)) {
+ __clear_bit(CAPABILITY_EXTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags);
+ __clear_bit(CAPABILITY_EXTERNAL_PA_TX1,
+ &rt2x00dev->cap_flags);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index e1761f467b94..3cbef77b4bd3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -17,6 +17,16 @@
#define WCID_START 33
#define WCID_END 222
#define STA_IDS_SIZE (WCID_END - WCID_START + 2)
+#define CHAIN_0 0x0
+#define CHAIN_1 0x1
+#define RF_ALC_NUM 6
+#define CHAIN_NUM 2
+
+struct rf_reg_pair {
+ u8 bank;
+ u8 reg;
+ u8 value;
+};
/* RT2800 driver data structure */
struct rt2800_drv_data {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 8f5772b98f58..07a6a5a9ce13 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1309,8 +1309,11 @@ void rt2x00queue_unmap_skb(struct queue_entry *entry);
*/
static inline struct data_queue *
rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
+ enum data_queue_qid queue)
{
+ if (queue >= rt2x00dev->ops->tx_queues && queue < IEEE80211_NUM_ACS)
+ queue = rt2x00dev->ops->tx_queues - 1;
+
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index e95c101c2711..3a035afcf7f9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1093,6 +1093,19 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
kfree(rt2x00dev->spec.channels_info);
}
+static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 2 * 1024, .blink_time = 220 },
+ { .throughput = 5 * 1024, .blink_time = 190 },
+ { .throughput = 10 * 1024, .blink_time = 170 },
+ { .throughput = 25 * 1024, .blink_time = 150 },
+ { .throughput = 54 * 1024, .blink_time = 130 },
+ { .throughput = 120 * 1024, .blink_time = 110 },
+ { .throughput = 265 * 1024, .blink_time = 80 },
+ { .throughput = 586 * 1024, .blink_time = 50 },
+};
+
static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -1174,6 +1187,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
#undef RT2X00_TASKLET_INIT
+ ieee80211_create_tpt_led_trigger(rt2x00dev->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ rt2x00_tpt_blink,
+ ARRAY_SIZE(rt2x00_tpt_blink));
+
/*
* Register HW.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 4d06038afd83..98df0aef8168 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -318,7 +318,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* when using more then one tx stream (>MCS7).
*/
if (sta && txdesc->u.ht.mcs > 7 &&
- sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
index 49421d10e22b..f7d95c9624a0 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
@@ -143,7 +143,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
led->dev = dev;
led->ledpin = ledpin;
led->is_radio = is_radio;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7ddce3c3f0c4..782b089a2e1b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1425,7 +1425,7 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1511,9 +1511,9 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index c66f0726b253..ac641a56efb0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1878,13 +1878,6 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
/* We have 8 bits to indicate validity */
map_addr = offset * 8;
- if (map_addr >= EFUSE_MAP_LEN) {
- dev_warn(dev, "%s: Illegal map_addr (%04x), "
- "efuse corrupt!\n",
- __func__, map_addr);
- ret = -EINVAL;
- goto exit;
- }
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (word_mask & BIT(i)) {
@@ -1895,6 +1888,13 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
if (ret)
goto exit;
+ if (map_addr >= EFUSE_MAP_LEN - 1) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
+ ret = -EINVAL;
+ goto exit;
+ }
priv->efuse_wifi.raw[map_addr++] = val8;
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
@@ -2929,12 +2929,12 @@ bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
}
if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
- /* path B RX OK */
+ /* path B TX OK */
for (i = 4; i < 6; i++)
result[3][i] = result[c1][i];
}
- if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ if (!(simubitmap & 0xc0) && priv->tx_paths > 1) {
/* path B RX OK */
for (i = 6; i < 8; i++)
result[3][i] = result[c1][i];
@@ -4320,7 +4320,7 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
}
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
{
struct h2c_cmd h2c;
@@ -4340,10 +4340,15 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
{
struct h2c_cmd h2c;
- u8 bw = RTL8XXXU_CHANNEL_WIDTH_20;
+ u8 bw;
+
+ if (txbw_40mhz)
+ bw = RTL8XXXU_CHANNEL_WIDTH_40;
+ else
+ bw = RTL8XXXU_CHANNEL_WIDTH_20;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@@ -4353,15 +4358,14 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff;
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
- h2c.ramask.arg = 0x80;
h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
h2c.b_macid_cfg.data2 = bw;
- dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
- __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, rateid %02x, sgi %d, size %zi\n",
+ __func__, ramask, rateid, sgi, sizeof(h2c.b_macid_cfg));
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
}
@@ -4556,6 +4560,53 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
return network_type;
}
+static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
+{
+ u32 reg_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+ };
+ u32 val32;
+ u16 wireless_mode = 0;
+ u8 aifs, aifsn, sifs;
+ int i;
+
+ if (priv->vif) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ if (sta)
+ wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
+ rcu_read_unlock();
+ }
+
+ if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
+ (wireless_mode & WIRELESS_MODE_N_24G))
+ sifs = 16;
+ else
+ sifs = 10;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ val32 = rtl8xxxu_read32(priv, reg_edca_param[i]);
+
+ /* It was set in conf_tx. */
+ aifsn = val32 & 0xff;
+
+ /* aifsn not set yet or already fixed */
+ if (aifsn < 2 || aifsn > 15)
+ continue;
+
+ aifs = aifsn * slot_time + sifs;
+
+ val32 &= ~0xff;
+ val32 |= aifs;
+ rtl8xxxu_write32(priv, reg_edca_param[i], val32);
+ }
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changed)
@@ -4622,7 +4673,11 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
RATE_INFO_FLAGS_SHORT_GI;
}
- rarpt->txrate.bw |= RATE_INFO_BW_20;
+ if (rtl8xxxu_ht40_2g &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ rarpt->txrate.bw = RATE_INFO_BW_40;
+ else
+ rarpt->txrate.bw = RATE_INFO_BW_20;
}
bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
rarpt->bit_rate = bit_rate;
@@ -4631,7 +4686,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
priv->vif = vif;
priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
- priv->fops->update_rate_mask(priv, ramask, 0, sgi);
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi, rarpt->txrate.bw == RATE_INFO_BW_40);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -4671,6 +4726,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
else
val8 = 20;
rtl8xxxu_write8(priv, REG_SLOT, val8);
+
+ rtl8xxxu_set_aifs(priv, val8);
}
if (changed & BSS_CHANGED_BSSID) {
@@ -4710,9 +4767,8 @@ static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
return rtlqueue;
}
-static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+static u32 rtl8xxxu_queue_select(struct ieee80211_hdr *hdr, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u32 queue;
if (ieee80211_is_mgmt(hdr->frame_control))
@@ -5062,6 +5118,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (control && control->sta)
sta = control->sta;
+ queue = rtl8xxxu_queue_select(hdr, skb);
+
tx_desc = skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
@@ -5074,7 +5132,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
- queue = rtl8xxxu_queue_select(hw, skb);
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
if (tx_info->control.hw_key) {
@@ -6344,7 +6401,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
}
priv->rssi_level = rssi_level;
- priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi);
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz);
}
}
@@ -6657,7 +6714,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
if (!hw) {
ret = -ENOMEM;
- priv = NULL;
goto err_put_dev;
}
@@ -6768,11 +6824,9 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
err_set_intfdata:
usb_set_intfdata(interface, NULL);
- if (priv) {
- kfree(priv->fw_data);
- mutex_destroy(&priv->usb_buf_mutex);
- mutex_destroy(&priv->h2c_mutex);
- }
+ kfree(priv->fw_data);
+ mutex_destroy(&priv->usb_buf_mutex);
+ mutex_destroy(&priv->h2c_mutex);
ieee80211_free_hw(hw);
err_put_dev:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 15e6a6aded31..d18c092b6142 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2386,11 +2386,10 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
"Just Read IQK Matrix reg for channel:%d....\n",
channel);
- _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
- rtlphy->iqk_matrix[
- indexforchannel].value, 0,
- (rtlphy->iqk_matrix[
- indexforchannel].value[0][2] == 0));
+ if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0)
+ _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0);
if (IS_92D_SINGLEPHY(rtlhal->version)) {
if ((rtlphy->iqk_matrix[
indexforchannel].value[0][4] != 0)
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index 76c7f3257dd3..038a30b170ef 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -30,11 +30,11 @@ void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
struct rtw_bfee *bfee = &rtwvif->bfee;
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
- struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_sta *sta;
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_sta_vht_cap *ic_vht_cap;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index cac053f485c3..6276ad624299 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -13,7 +13,7 @@
static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
u8 rssi, u8 rssi_thresh)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tol = chip->rssi_tolerance;
u8 next_state;
@@ -36,7 +36,7 @@ static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
static void rtw_coex_limited_tx(struct rtw_dev *rtwdev,
bool tx_limit_en, bool ampdu_limit_en)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 num_of_active_port = 1;
@@ -365,7 +365,7 @@ static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap,
void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u16 val = 0x2;
@@ -400,7 +400,7 @@ EXPORT_SYMBOL(rtw_coex_write_scbd);
static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->scbd_support)
return 0;
@@ -410,7 +410,7 @@ static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
static void rtw_coex_check_rfk(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_rfe *coex_rfe = &coex->rfe;
@@ -489,7 +489,7 @@ static void rtw_coex_monitor_bt_ctr(struct rtw_dev *rtwdev)
static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
@@ -524,10 +524,10 @@ static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_traffic_stats *stats = &rtwdev->stats;
bool is_5G = false;
bool wl_busy = false;
@@ -706,10 +706,10 @@ static const char *rtw_coex_get_bt_status_string(u8 bt_status)
static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 i;
u8 rssi_state;
u8 rssi_step;
@@ -806,7 +806,7 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
@@ -933,7 +933,7 @@ EXPORT_SYMBOL(rtw_coex_write_indirect_reg);
static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_hw_reg *btg_reg = chip->btg_reg;
if (wifi_control) {
@@ -981,7 +981,7 @@ static void rtw_coex_mimo_ps(struct rtw_dev *rtwdev, bool force, bool state)
static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force,
u8 table_case)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 h2c_para[6] = {0};
u32 table_wl = 0x5a5a5a5a;
@@ -1065,9 +1065,9 @@ static void rtw_coex_set_table(struct rtw_dev *rtwdev, bool force, u32 table0,
static void rtw_coex_table(struct rtw_dev *rtwdev, bool force, u8 type)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_stat *coex_stat = &coex->stat;
@@ -1135,9 +1135,9 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
u8 byte3, u8 byte4, u8 byte5)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 ps_type = COEX_PS_WIFI_NATIVE;
bool ap_enable = false;
@@ -1193,10 +1193,10 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 n, type;
bool turn_on;
@@ -1526,8 +1526,8 @@ static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev)
static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1549,11 +1549,11 @@ static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 level = 0;
bool bt_afh_loss = true;
@@ -1594,8 +1594,8 @@ static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1619,8 +1619,8 @@ static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1644,10 +1644,10 @@ static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1684,11 +1684,11 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_rfe *coex_rfe = &coex->rfe;
u8 table_case = 0xff, tdma_case = 0xff;
@@ -1753,10 +1753,10 @@ exit:
static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
bool wl_hi_pri = false;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1853,11 +1853,11 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1901,10 +1901,10 @@ static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1932,10 +1932,10 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
bool bt_multi_link_remain = false, is_toggle_table = false;
@@ -2015,11 +2015,11 @@ static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -2057,10 +2057,10 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
bool ap_enable = false;
@@ -2096,10 +2096,10 @@ static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2133,11 +2133,11 @@ static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case, interval = 0;
u32 slot_type = 0;
bool is_toggle_table = false;
@@ -2190,10 +2190,10 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
bool wl_cpt_test = false, bt_cpt_test = false;
@@ -2247,10 +2247,10 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2282,10 +2282,10 @@ static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2316,9 +2316,9 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
@@ -2348,8 +2348,8 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2372,9 +2372,9 @@ static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
@@ -2411,10 +2411,10 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -2451,8 +2451,8 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2528,8 +2528,8 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
bool rf4ce_en = false;
@@ -3002,9 +3002,9 @@ void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type)
void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
u32 bt_relink_time;
u8 i, rsp_source = 0, type;
@@ -3270,8 +3270,8 @@ static const u8 coex_bt_hidinfo_xb[] = {0x58, 0x62, 0x6f};
void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_hid *hidinfo;
struct rtw_coex_hid_info_a *hida;
@@ -3360,8 +3360,8 @@ void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_hid *hidinfo;
u8 i, handle;
@@ -3582,7 +3582,7 @@ static const char *rtw_coex_get_reason_string(u8 reason)
static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0,
u32 wl_reg_6c4)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 ans = 0xFF;
u8 n, i;
@@ -3618,8 +3618,8 @@ static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0,
static u8 rtw_coex_get_tdma_index(struct rtw_dev *rtwdev, u8 *tdma_para)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 ans = 0xFF;
u8 n, i, j;
u8 load_cur_tab_val;
@@ -3736,7 +3736,7 @@ static int rtw_coex_val_info(struct rtw_dev *rtwdev,
static void rtw_coex_set_coexinfo_hw(struct rtw_dev *rtwdev, struct seq_file *m)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_reg_domain *reg;
char addr_info[INFO_SIZE];
int n_addr = 0;
@@ -3910,7 +3910,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index 07fa7aa34d4b..57cf29da9ea4 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -327,7 +327,7 @@ struct coex_rf_para {
static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_init(rtwdev);
}
@@ -335,7 +335,7 @@ static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
static inline
void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->ops->coex_set_ant_switch)
return;
@@ -345,28 +345,28 @@ void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_gnt_fix(rtwdev);
}
static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_gnt_debug(rtwdev);
}
static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_rfe_type(rtwdev);
}
static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr);
}
@@ -374,7 +374,7 @@ static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
static inline
void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 7cde6bcf253b..9ebe544e51d0 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -621,11 +621,13 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_hal *hal = &rtwdev->hal;
- u8 path, rate;
+ u8 path, rate, bw, ch, regd;
struct rtw_power_params pwr_param = {0};
- u8 bw = hal->current_band_width;
- u8 ch = hal->current_channel;
- u8 regd = rtw_regd_get(rtwdev);
+
+ mutex_lock(&rtwdev->mutex);
+ bw = hal->current_band_width;
+ ch = hal->current_channel;
+ regd = rtw_regd_get(rtwdev);
seq_printf(m, "channel: %u\n", ch);
seq_printf(m, "bandwidth: %u\n", bw);
@@ -667,6 +669,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
}
mutex_unlock(&hal->tx_power_mutex);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
index c266c84ef233..b85075cd68d0 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.c
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -86,7 +86,7 @@ static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map,
static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 size = rtwdev->efuse.physical_size;
u32 efuse_ctl;
u32 addr;
@@ -145,7 +145,7 @@ EXPORT_SYMBOL(rtw_read8_physical_efuse);
int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u32 phy_size = efuse->physical_size;
u32 log_size = efuse->logical_size;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 4fdab0329695..0b5f903c0f36 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -14,6 +14,8 @@
#include "util.h"
#include "wow.h"
#include "ps.h"
+#include "phy.h"
+#include "mac.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -116,7 +118,7 @@ legacy:
si->ra_report.desc_rate = rate;
si->ra_report.bit_rate = bit_rate;
- sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
+ sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
}
static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
@@ -904,7 +906,7 @@ void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
struct rtw_nlo_info_hdr *nlo_hdr;
struct cfg80211_ssid *ssid;
@@ -959,7 +961,7 @@ static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
struct ieee80211_channel *channels = pno_req->channels;
struct sk_buff *skb;
@@ -993,7 +995,7 @@ static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
struct rtw_lps_pg_dpk_hdr *dpk_hdr;
struct sk_buff *skb;
@@ -1018,7 +1020,7 @@ static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_lps_pg_info_hdr *pg_info_hdr;
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
@@ -1080,10 +1082,10 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
skb_new = ieee80211_proberesp_get(hw, vif);
break;
case RSVD_NULL:
- skb_new = ieee80211_nullfunc_get(hw, vif, false);
+ skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
break;
case RSVD_QOS_NULL:
- skb_new = ieee80211_nullfunc_get(hw, vif, true);
+ skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
break;
case RSVD_LPS_PG_DPK:
skb_new = rtw_lps_pg_dpk_get(hw);
@@ -1122,7 +1124,7 @@ static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
enum rtw_rsvd_packet_type type)
{
struct rtw_tx_pkt_info pkt_info = {0};
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 *pkt_desc;
rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
@@ -1433,7 +1435,7 @@ static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
{
struct ieee80211_hw *hw = rtwdev->hw;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *iter;
struct rtw_rsvd_page *rsvd_pkt;
u32 page = 0;
@@ -1647,7 +1649,7 @@ out:
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
u32 offset, u32 size, u32 *buf)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 start_pg, residue;
if (sel >= RTW_FW_FIFO_MAX) {
@@ -1706,7 +1708,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
u8 location)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
@@ -1818,8 +1820,8 @@ static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
struct sk_buff_head *list, u8 *bands,
struct rtw_vif *rtwvif)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
- struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *new;
u8 idx;
@@ -1841,16 +1843,23 @@ static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
struct sk_buff_head *probe_req_list)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb, *tmp;
u8 page_offset = 1, *buf, page_size = chip->page_size;
- u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
u16 buf_offset = page_size * page_offset;
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
+ u8 page_cnt, pages;
unsigned int pkt_len;
int ret;
+ if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
+ page_cnt = RTW_OLD_PROBE_PG_CNT;
+ else
+ page_cnt = RTW_PROBE_PG_CNT;
+
+ pages = page_offset + num_probes * page_cnt;
+
buf = kzalloc(page_size * pages, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1859,7 +1868,7 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
skb_queue_walk_safe(probe_req_list, skb, tmp) {
skb_unlink(skb, probe_req_list);
rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
- if (skb->len > page_size * RTW_PROBE_PG_CNT) {
+ if (skb->len > page_size * page_cnt) {
ret = -EINVAL;
goto out;
}
@@ -1869,8 +1878,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
- buf_offset += RTW_PROBE_PG_CNT * page_size;
- page_offset += RTW_PROBE_PG_CNT;
+ buf_offset += page_cnt * page_size;
+ page_offset += page_cnt;
kfree_skb(skb);
}
@@ -2048,6 +2057,9 @@ void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw_leave_lps_deep(rtwdev);
+ rtw_hci_flush_all_queues(rtwdev, false);
+ rtw_mac_flush_all_queues(rtwdev, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
req->mac_addr_mask);
@@ -2080,10 +2092,9 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_core_scan_complete(rtwdev, vif, true);
rtwvif = (struct rtw_vif *)vif->drv_priv;
- if (rtwvif->net_type == RTW_NET_MGD_LINKED) {
- hal->current_channel = chan;
- hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
- }
+ if (chan)
+ rtw_store_op_chan(rtwdev, false);
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
@@ -2124,6 +2135,7 @@ int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw_ch_switch_option cs_option = {0};
struct rtw_chan_list chan_list = {0};
int ret = 0;
@@ -2132,7 +2144,7 @@ int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
return -EINVAL;
cs_option.switch_en = enable;
- cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED;
+ cs_option.back_op_en = scan_info->op_chan != 0;
if (enable) {
ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
if (ret)
@@ -2171,14 +2183,33 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
}
-void rtw_store_op_chan(struct rtw_dev *rtwdev)
+void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
{
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw_hal *hal = &rtwdev->hal;
+ u8 band;
+
+ if (backup) {
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+ scan_info->op_pri_ch = hal->primary_channel;
+ } else {
+ band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ rtw_update_channel(rtwdev, scan_info->op_chan,
+ scan_info->op_pri_ch,
+ band, scan_info->op_bw);
+ }
+}
- scan_info->op_chan = hal->current_channel;
- scan_info->op_bw = hal->current_band_width;
- scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+void rtw_clear_op_chan(struct rtw_dev *rtwdev)
+{
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ scan_info->op_chan = 0;
+ scan_info->op_bw = 0;
+ scan_info->op_pri_ch_idx = 0;
+ scan_info->op_pri_ch = 0;
}
static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
@@ -2193,7 +2224,7 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_c2h_cmd *c2h;
enum rtw_scan_notify_id id;
- u8 chan, status;
+ u8 chan, band, status;
if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
return;
@@ -2204,10 +2235,13 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
status = GET_CHAN_SWITCH_STATUS(c2h->payload);
if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
- if (rtw_is_op_chan(rtwdev, chan))
+ band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ rtw_update_channel(rtwdev, chan, chan, band,
+ RTW_CHANNEL_WIDTH_20);
+ if (rtw_is_op_chan(rtwdev, chan)) {
+ rtw_store_op_chan(rtwdev, false);
ieee80211_wake_queues(rtwdev->hw);
- hal->current_channel = chan;
- hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ }
} else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
if (IS_CH_5G_BAND(chan)) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
@@ -2220,7 +2254,12 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
rtw_coex_switchband_notify(rtwdev, chan_type);
}
- if (rtw_is_op_chan(rtwdev, chan))
+ /* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
+ * channel that hardware will switch. We need to stop queue
+ * if next channel is non-op channel.
+ */
+ if (!rtw_is_op_chan(rtwdev, chan) &&
+ rtw_is_op_chan(rtwdev, hal->current_channel))
ieee80211_stop_queues(rtwdev->hw);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 7a37675c61e8..a5a965803a3c 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -41,7 +41,8 @@
#define RTW_EX_CH_INFO_HDR_SIZE 2
#define RTW_SCAN_WIDTH 0
#define RTW_PRI_CH_IDX 1
-#define RTW_PROBE_PG_CNT 2
+#define RTW_OLD_PROBE_PG_CNT 2
+#define RTW_PROBE_PG_CNT 4
enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
@@ -120,6 +121,10 @@ enum rtw_fw_feature {
FW_FEATURE_MAX = BIT(31),
};
+enum rtw_fw_feature_ext {
+ FW_FEATURE_EXT_OLD_PAGE_NUM = BIT(0),
+};
+
enum rtw_beacon_filter_offload_mode {
BCN_FILTER_OFFLOAD_MODE_0 = 0,
BCN_FILTER_OFFLOAD_MODE_1,
@@ -323,6 +328,11 @@ struct rtw_fw_hdr_legacy {
__le32 rsvd5;
} __packed;
+#define RTW_FW_VER_CODE(ver, sub_ver, idx) \
+ (((ver) << 16) | ((sub_ver) << 8) | (idx))
+#define RTW_FW_SUIT_VER_CODE(s) \
+ RTW_FW_VER_CODE((s).version, (s).sub_version, (s).sub_index)
+
/* C2H */
#define GET_CCX_REPORT_SEQNUM_V0(c2h_payload) (c2h_payload[6] & 0xfc)
#define GET_CCX_REPORT_STATUS_V0(c2h_payload) (c2h_payload[0] & 0xc0)
@@ -770,6 +780,12 @@ static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw,
return !!(fw->feature & feature);
}
+static inline bool rtw_fw_feature_ext_check(struct rtw_fw_state *fw,
+ enum rtw_fw_feature_ext feature)
+{
+ return !!(fw->feature_ext & feature);
+}
+
void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
struct sk_buff *skb);
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
@@ -831,7 +847,8 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
-void rtw_store_op_chan(struct rtw_dev *rtwdev);
+void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup);
+void rtw_clear_op_chan(struct rtw_dev *rtwdev);
void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index caf2603da2d6..52076e89d59a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -243,7 +243,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
@@ -587,7 +587,7 @@ static int
download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
u32 src, u32 dst, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 desc_size = chip->tx_pkt_desc_sz;
u8 first_part;
u32 mem_offset;
@@ -934,7 +934,7 @@ static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
u32 prio_queue, bool drop)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_prioq_addr *addr;
bool wsize;
u16 avail_page, rsvd_page;
@@ -996,7 +996,7 @@ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
@@ -1037,8 +1037,8 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
static int set_trx_fifo_info(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
u16 cur_pg_addr;
u8 csi_buf_pg_num = chip->csi_buf_pg_num;
@@ -1092,8 +1092,8 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev,
const struct rtw_page_table *pg_tbl,
u16 pubq_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
@@ -1123,8 +1123,8 @@ static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
const struct rtw_page_table *pg_tbl,
u16 pubq_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
u32 val32;
val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
@@ -1149,8 +1149,8 @@ static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
@@ -1277,7 +1277,7 @@ static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
int rtw_mac_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int ret;
ret = rtw_init_trx_cfg(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index c7b98a0599d5..07578ccc4bab 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -101,7 +101,8 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw_set_channel(rtwdev);
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (hw->conf.flags & IEEE80211_CONF_IDLE))
+ (hw->conf.flags & IEEE80211_CONF_IDLE) &&
+ !test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_enter_ips(rtwdev);
out:
@@ -377,7 +378,6 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
- rtw_store_op_chan(rtwdev);
} else {
rtw_leave_lps(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
@@ -395,6 +395,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
config |= PORT_SET_BSSID;
+ if (is_zero_ether_addr(rtwvif->bssid))
+ rtw_clear_op_chan(rtwdev);
+ else
+ rtw_store_op_chan(rtwdev, true);
}
if (changed & BSS_CHANGED_BEACON_INT) {
@@ -434,7 +438,7 @@ static int rtw_ops_start_ap(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
mutex_lock(&rtwdev->mutex);
chip->ops->phy_calibration(rtwdev);
@@ -752,7 +756,7 @@ static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
u32 rx_antenna)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int ret;
if (!chip->ops->set_antenna)
@@ -872,7 +876,9 @@ static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
+ mutex_lock(&rtwdev->mutex);
rtw_set_sar_specs(rtwdev, sar);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 76dc9da88f6c..67151dbf8384 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -353,7 +353,7 @@ struct rtw_fwcd_hdr {
static int rtw_fwcd_prep(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
const struct rtw_fwcd_segs *segs = chip->fwcd_segs;
u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr);
@@ -675,67 +675,126 @@ void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period)
rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1);
}
+void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
+ u8 primary_channel, enum rtw_supported_band band,
+ enum rtw_bandwidth bandwidth)
+{
+ enum nl80211_band nl_band = rtw_hw_to_nl80211_band(band);
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 *cch_by_bw = hal->cch_by_bw;
+ u32 center_freq, primary_freq;
+ enum rtw_sar_bands sar_band;
+ u8 primary_channel_idx;
+
+ center_freq = ieee80211_channel_to_frequency(center_channel, nl_band);
+ primary_freq = ieee80211_channel_to_frequency(primary_channel, nl_band);
+
+ /* assign the center channel used while 20M bw is selected */
+ cch_by_bw[RTW_CHANNEL_WIDTH_20] = primary_channel;
+
+ /* assign the center channel used while current bw is selected */
+ cch_by_bw[bandwidth] = center_channel;
+
+ switch (bandwidth) {
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ primary_channel_idx = RTW_SC_DONT_CARE;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (primary_freq > center_freq)
+ primary_channel_idx = RTW_SC_20_UPPER;
+ else
+ primary_channel_idx = RTW_SC_20_LOWER;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (primary_freq > center_freq) {
+ if (primary_freq - center_freq == 10)
+ primary_channel_idx = RTW_SC_20_UPPER;
+ else
+ primary_channel_idx = RTW_SC_20_UPMOST;
+
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel + 4;
+ } else {
+ if (center_freq - primary_freq == 10)
+ primary_channel_idx = RTW_SC_20_LOWER;
+ else
+ primary_channel_idx = RTW_SC_20_LOWEST;
+
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel - 4;
+ }
+ break;
+ }
+
+ switch (center_channel) {
+ case 1 ... 14:
+ sar_band = RTW_SAR_BAND_0;
+ break;
+ case 36 ... 64:
+ sar_band = RTW_SAR_BAND_1;
+ break;
+ case 100 ... 144:
+ sar_band = RTW_SAR_BAND_3;
+ break;
+ case 149 ... 177:
+ sar_band = RTW_SAR_BAND_4;
+ break;
+ default:
+ WARN(1, "unknown ch(%u) to SAR band\n", center_channel);
+ sar_band = RTW_SAR_BAND_0;
+ break;
+ }
+
+ hal->current_primary_channel_index = primary_channel_idx;
+ hal->current_band_width = bandwidth;
+ hal->primary_channel = primary_channel;
+ hal->current_channel = center_channel;
+ hal->current_band_type = band;
+ hal->sar_band = sar_band;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
- u8 *cch_by_bw = chan_params->cch_by_bw;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW_CHANNEL_WIDTH_20;
- u8 primary_chan_idx = 0;
- u8 i;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
center_freq = chandef->center_freq1;
- /* assign the center channel used while 20M bw is selected */
- cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value;
-
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
- primary_chan_idx = RTW_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
- if (primary_freq > center_freq) {
- primary_chan_idx = RTW_SC_20_UPPER;
+ if (primary_freq > center_freq)
center_chan -= 2;
- } else {
- primary_chan_idx = RTW_SC_20_LOWER;
+ else
center_chan += 2;
- }
break;
case NL80211_CHAN_WIDTH_80:
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
- if (primary_freq - center_freq == 10) {
- primary_chan_idx = RTW_SC_20_UPPER;
+ if (primary_freq - center_freq == 10)
center_chan -= 2;
- } else {
- primary_chan_idx = RTW_SC_20_UPMOST;
+ else
center_chan -= 6;
- }
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
- if (center_freq - primary_freq == 10) {
- primary_chan_idx = RTW_SC_20_LOWER;
+ if (center_freq - primary_freq == 10)
center_chan += 2;
- } else {
- primary_chan_idx = RTW_SC_20_LOWEST;
+ else
center_chan += 6;
- }
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4;
}
break;
default:
@@ -745,60 +804,30 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
chan_params->center_chan = center_chan;
chan_params->bandwidth = bandwidth;
- chan_params->primary_chan_idx = primary_chan_idx;
-
- /* assign the center channel used while current bw is selected */
- cch_by_bw[bandwidth] = center_chan;
-
- for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++)
- cch_by_bw[i] = 0;
+ chan_params->primary_chan = channel->hw_value;
}
void rtw_set_channel(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_channel_params ch_param;
- u8 center_chan, bandwidth, primary_chan_idx;
- u8 i;
+ u8 center_chan, primary_chan, bandwidth, band;
rtw_get_channel_params(&hw->conf.chandef, &ch_param);
if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
return;
center_chan = ch_param.center_chan;
+ primary_chan = ch_param.primary_chan;
bandwidth = ch_param.bandwidth;
- primary_chan_idx = ch_param.primary_chan_idx;
-
- hal->current_band_width = bandwidth;
- hal->current_channel = center_chan;
- hal->current_primary_channel_index = primary_chan_idx;
- hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->sar_band = RTW_SAR_BAND_0;
- break;
- case 36 ... 64:
- hal->sar_band = RTW_SAR_BAND_1;
- break;
- case 100 ... 144:
- hal->sar_band = RTW_SAR_BAND_3;
- break;
- case 149 ... 177:
- hal->sar_band = RTW_SAR_BAND_4;
- break;
- default:
- WARN(1, "unknown ch(%u) to SAR band\n", center_chan);
- hal->sar_band = RTW_SAR_BAND_0;
- break;
- }
+ band = ch_param.center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
- for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++)
- hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
+ rtw_update_channel(rtwdev, center_chan, primary_chan, band, bandwidth);
- chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+ chip->ops->set_channel(rtwdev, center_chan, bandwidth,
+ hal->current_primary_channel_index);
if (hal->current_band_type == RTW_BAND_5G) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
@@ -821,7 +850,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
void rtw_chip_prepare_tx(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (rtwdev->need_rfk) {
rtwdev->need_rfk = false;
@@ -890,8 +919,8 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
hw_ant_num >= hal->rf_path_num)
@@ -1240,7 +1269,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw;
fw = &rtwdev->fw;
@@ -1261,7 +1290,7 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported ||
!fw->feature)
@@ -1280,7 +1309,7 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
static int rtw_power_on(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw = &rtwdev->fw;
bool wifi_only;
int ret;
@@ -1469,8 +1498,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
@@ -1552,8 +1581,23 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->vht_mcs.tx_highest = highest;
}
+static u16 rtw_get_max_scan_ie_len(struct rtw_dev *rtwdev)
+{
+ u16 len;
+
+ len = rtwdev->chip->max_scan_ie_len;
+
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) &&
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822C)
+ len = IEEE80211_MAX_DATA_LEN;
+ else if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
+ len -= RTW_OLD_PROBE_PG_CNT * TX_PAGE_SIZE;
+
+ return len;
+}
+
static void rtw_set_supported_band(struct ieee80211_hw *hw,
- struct rtw_chip_info *chip)
+ const struct rtw_chip_info *chip)
{
struct rtw_dev *rtwdev = hw->priv;
struct ieee80211_supported_band *sband;
@@ -1585,7 +1629,7 @@ err_out:
}
static void rtw_unset_supported_band(struct ieee80211_hw *hw,
- struct rtw_chip_info *chip)
+ const struct rtw_chip_info *chip)
{
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
@@ -1607,7 +1651,7 @@ static void rtw_vif_smps_iter(void *data, u8 *mac,
void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss)
@@ -1631,6 +1675,10 @@ static void __update_firmware_feature(struct rtw_dev *rtwdev,
feature = le32_to_cpu(fw_hdr->feature);
fw->feature = feature & FW_FEATURE_SIG ? feature : 0;
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C &&
+ RTW_FW_SUIT_VER_CODE(rtwdev->fw) < RTW_FW_VER_CODE(9, 9, 13))
+ fw->feature_ext |= FW_FEATURE_EXT_OLD_PAGE_NUM;
}
static void __update_firmware_info(struct rtw_dev *rtwdev,
@@ -1724,7 +1772,7 @@ static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type)
static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
@@ -1982,7 +2030,7 @@ static void rtw_stats_init(struct rtw_dev *rtwdev)
int rtw_core_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
int ret;
@@ -2045,7 +2093,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
- return ret;
+ goto out;
}
if (chip->wow_fw_name) {
@@ -2055,11 +2103,15 @@ int rtw_core_init(struct rtw_dev *rtwdev)
wait_for_completion(&rtwdev->fw.completion);
if (rtwdev->fw.firmware)
release_firmware(rtwdev->fw.firmware);
- return ret;
+ goto out;
}
}
return 0;
+
+out:
+ destroy_workqueue(rtwdev->tx_wq);
+ return ret;
}
EXPORT_SYMBOL(rtw_core_init);
@@ -2136,7 +2188,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
- hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN;
+ hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
@@ -2180,7 +2232,7 @@ EXPORT_SYMBOL(rtw_register_hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 7db627fc26be..bccd7b28f60c 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -22,7 +22,6 @@
#define MAX_PG_CAM_BACKUP_NUM 8
#define RTW_SCAN_MAX_SSIDS 4
-#define RTW_SCAN_MAX_IE_LEN 128
#define RTW_MAX_PATTERN_NUM 12
#define RTW_MAX_PATTERN_MASK_SIZE 16
@@ -33,6 +32,7 @@
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
#define TX_PAGE_SIZE_SHIFT 7
+#define TX_PAGE_SIZE (1 << TX_PAGE_SIZE_SHIFT)
#define RTW_CHANNEL_WIDTH_MAX 3
#define RTW_RF_PATH_MAX 4
@@ -510,12 +510,8 @@ struct rtw_timer_list {
struct rtw_channel_params {
u8 center_chan;
+ u8 primary_chan;
u8 bandwidth;
- u8 primary_chan_idx;
- /* center channel by different available bandwidth,
- * val of (bw > current bandwidth) is invalid
- */
- u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1];
};
struct rtw_hw_reg {
@@ -1232,6 +1228,7 @@ struct rtw_chip_info {
const char *wow_fw_name;
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
+ const u16 max_scan_ie_len;
/* coex paras */
u32 coex_para_ver;
@@ -1853,6 +1850,7 @@ struct rtw_fw_state {
u8 sub_index;
u16 h2c_version;
u32 feature;
+ u32 feature_ext;
};
enum rtw_sar_sources {
@@ -1896,6 +1894,7 @@ struct rtw_hal {
u8 current_primary_channel_index;
u8 current_band_width;
u8 current_band_type;
+ u8 primary_channel;
/* center channel for different available bandwidth,
* val of (bw > current_band_width) is invalid
@@ -1967,6 +1966,7 @@ struct rtw_hw_scan_info {
struct ieee80211_vif *scanning_vif;
u8 probe_pg_size;
u8 op_pri_ch_idx;
+ u8 op_pri_ch;
u8 op_chan;
u8 op_bw;
};
@@ -1978,7 +1978,7 @@ struct rtw_dev {
struct rtw_hci hci;
struct rtw_hw_scan_info scan_info;
- struct rtw_chip_info *chip;
+ const struct rtw_chip_info *chip;
struct rtw_hal hal;
struct rtw_fifo_conf fifo;
struct rtw_fw_state fw;
@@ -2132,6 +2132,20 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
return 0;
}
+static inline
+enum nl80211_band rtw_hw_to_nl80211_band(enum rtw_supported_band hw_band)
+{
+ switch (hw_band) {
+ default:
+ case RTW_BAND_2G:
+ return NL80211_BAND_2GHZ;
+ case RTW_BAND_5G:
+ return NL80211_BAND_5GHZ;
+ case RTW_BAND_60G:
+ return NL80211_BAND_60GHZ;
+ }
+}
+
void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel);
void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period);
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -2173,4 +2187,7 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
u32 fwcd_item);
int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss);
+void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
+ u8 primary_channel, enum rtw_supported_band band,
+ enum rtw_bandwidth bandwidth);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 24d5695363d3..0975d27240e4 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -322,7 +322,7 @@ static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
struct rtw_pci_rx_ring *rx_ring;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
int tx_desc_size, rx_desc_size;
u32 len;
@@ -721,7 +721,7 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
u32 idx)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_rx_buffer_desc *buf_desc;
u32 desc_sz = chip->rx_buf_desc_sz;
u16 total_pkt_size;
@@ -834,7 +834,7 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
struct sk_buff *skb, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_tx_ring *ring;
struct rtw_pci_tx_data *tx_data;
dma_addr_t dma;
@@ -1073,7 +1073,7 @@ static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u8 hw_queue, u32 limit)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct napi_struct *napi = &rtwpci->napi;
struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
struct rtw_rx_pkt_stat pkt_stat;
@@ -1425,7 +1425,7 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u16 link_ctrl;
@@ -1467,7 +1467,7 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
switch (chip->id) {
case RTW_CHIP_TYPE_8822C:
@@ -1483,7 +1483,7 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
@@ -1538,7 +1538,7 @@ static int __maybe_unused rtw_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
@@ -1550,7 +1550,7 @@ static int __maybe_unused rtw_pci_resume(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
@@ -1717,8 +1717,7 @@ static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
init_dummy_netdev(&rtwpci->netdev);
- netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
}
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
@@ -1848,7 +1847,7 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw_dev *rtwdev;
- struct rtw_chip_info *chip;
+ const struct rtw_chip_info *chip;
if (!hw)
return;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8982e0c98dac..bd7d05e08084 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(rtw_phy_set_edcca_th);
void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
/* turn off in debugfs for debug usage */
@@ -165,7 +165,7 @@ void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
rtw_phy_adaptivity_set_mode(rtwdev);
if (chip->ops->adaptivity_init)
@@ -180,7 +180,7 @@ static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->cfo_init)
chip->ops->cfo_init(rtwdev);
@@ -199,7 +199,7 @@ static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
void rtw_phy_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 addr, mask;
@@ -226,7 +226,7 @@ EXPORT_SYMBOL(rtw_phy_init);
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u32 addr, mask;
u8 path;
@@ -245,7 +245,7 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->false_alarm_statistics(rtwdev);
}
@@ -603,7 +603,7 @@ static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev)
static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->dpk_track)
chip->ops->dpk_track(rtwdev);
@@ -659,7 +659,7 @@ EXPORT_SYMBOL(rtw_phy_parsing_cfo);
static void rtw_phy_cfo_track(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->cfo_track)
chip->ops->cfo_track(rtwdev);
@@ -720,8 +720,8 @@ static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev)
static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- struct rtw_chip_info *chip = rtwdev->chip;
u32 cck_fa = dm_info->cck_fa_cnt;
u8 level;
@@ -816,23 +816,18 @@ static u8 rtw_phy_linear_2_db(u64 linear)
u8 j;
u32 dB;
- if (linear >= db_invert_table[11][7])
- return 96; /* maximum 96 dB */
-
for (i = 0; i < 12; i++) {
- if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
- break;
- else if (i > 2 && linear <= db_invert_table[i][7])
- break;
+ for (j = 0; j < 8; j++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
+ goto cnt;
+ else if (i > 2 && linear <= db_invert_table[i][j])
+ goto cnt;
+ }
}
- for (j = 0; j < 8; j++) {
- if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
- break;
- else if (i > 2 && linear <= db_invert_table[i][j])
- break;
- }
+ return 96; /* maximum 96 dB */
+cnt:
if (j == 0 && i == 0)
goto end;
@@ -900,7 +895,7 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
@@ -923,7 +918,7 @@ u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_rf_sipi_addr *rf_sipi_addr;
const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
u32 val32;
@@ -972,8 +967,8 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
- u32 *sipi_addr = chip->rf_sipi_addr;
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *sipi_addr = chip->rf_sipi_addr;
u32 data_and_addr;
u32 old_data = 0;
u32 shift;
@@ -1012,7 +1007,7 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
@@ -1747,7 +1742,7 @@ EXPORT_SYMBOL(rtw_phy_cfg_rf);
static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
if (!chip->rfk_init_tbl)
@@ -1766,7 +1761,7 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
@@ -1875,7 +1870,7 @@ static u8 rtw_get_channel_group(u8 channel, u8 rate)
static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
s8 dpd_diff = 0;
if (!chip->en_dis_dpd)
@@ -1909,7 +1904,7 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
bool mcs_rate;
bool above_2ss;
@@ -1956,7 +1951,7 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
u8 upper, lower;
bool mcs_rate;
@@ -2209,7 +2204,7 @@ static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u8 path;
@@ -2484,7 +2479,7 @@ static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
{
struct rtw_path_div *path_div = &rtwdev->dm_path_div;
enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (tx_path_sel_1ss == path_div->current_tx_path)
return;
@@ -2539,7 +2534,7 @@ static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->path_div_supported)
return;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index b6c5ae60a462..ccfcbd3ced03 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -114,7 +114,7 @@ const struct rtw_table name ## _tbl = { \
static inline const struct rtw_rfe_def *rtw_get_rfe_def(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct rtw_rfe_def *rfe_def = NULL;
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index bfa64c038f5f..c93da743681f 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -19,14 +19,14 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
- clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
+ if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ return 0;
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
@@ -50,6 +50,9 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
+ if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ return 0;
+
rtw_hci_link_ps(rtwdev, false);
ret = rtw_ips_pwr_up(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index 315c2b193e92..2f547cbcf6da 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -479,6 +479,7 @@ void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n",
rtwdev->regd.state, next_regd.state);
+ mutex_lock(&rtwdev->mutex);
rtwdev->regd = next_regd;
rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ",
request->alpha2[0],
@@ -487,6 +488,7 @@ void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
rtw_phy_adaptivity_set_mode(rtwdev);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+ mutex_unlock(&rtwdev->mutex);
}
u8 rtw_regd_get(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 993bd6b1d723..0a4f770fcbb7 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2720,7 +2720,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = false,
@@ -2748,6 +2748,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl,
.iqk_threshold = 8,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x2007022f,
.bt_desired_ver = 0x2f,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 025262a8970e..9afdc5ce86b4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1898,7 +1898,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
@@ -1926,6 +1926,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x19092746,
.bt_desired_ver = 0x46,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 321848870561..690e35c98f6e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2517,7 +2517,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
@@ -2549,6 +2549,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
.l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 09f9e4adcf34..fccb15dfb959 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -5330,7 +5330,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.max_power_index = 0x7f,
.csi_buf_pg_num = 50,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
.default_1ss_tx_path = BB_PATH_A,
.path_div_supported = true,
@@ -5375,6 +5375,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.wowlan_stub = &rtw_wowlan_stub_8822c,
.max_sched_scan_ssids = 4,
#endif
+ .max_scan_ie_len = (RTW_PROBE_PG_CNT - 1) * TX_PAGE_SIZE,
.coex_para_ver = 0x22020720,
.bt_desired_ver = 0x20,
.scbd_support = true,
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 60d40a5c2c6a..ab39245e9c2f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -384,7 +384,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw_sta_info *si;
@@ -424,7 +424,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct sk_buff *skb,
enum rtw_rsvd_packet_type type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool bmc;
@@ -475,7 +475,7 @@ rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
u32 tx_pkt_desc_sz;
u32 length;
@@ -501,7 +501,7 @@ rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
u32 tx_pkt_desc_sz;
u32 length;
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 2c515af214e7..cdfd66a85075 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(check_hw_ready);
bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
@@ -37,7 +37,7 @@ bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 3006482d25c7..a87f2aff4def 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -12,6 +12,7 @@ rtw89_core-y += core.o \
sar.o \
coex.o \
ps.o \
+ chan.o \
ser.o
obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
new file mode 100644
index 000000000000..a4f61c2f6512
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+
+static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
+ u8 center_chan)
+{
+ switch (band) {
+ default:
+ case RTW89_BAND_2G:
+ switch (center_chan) {
+ default:
+ case 1 ... 14:
+ return RTW89_CH_2G;
+ }
+ case RTW89_BAND_5G:
+ switch (center_chan) {
+ default:
+ case 36 ... 64:
+ return RTW89_CH_5G_BAND_1;
+ case 100 ... 144:
+ return RTW89_CH_5G_BAND_3;
+ case 149 ... 177:
+ return RTW89_CH_5G_BAND_4;
+ }
+ case RTW89_BAND_6G:
+ switch (center_chan) {
+ default:
+ case 1 ... 29:
+ return RTW89_CH_6G_BAND_IDX0;
+ case 33 ... 61:
+ return RTW89_CH_6G_BAND_IDX1;
+ case 65 ... 93:
+ return RTW89_CH_6G_BAND_IDX2;
+ case 97 ... 125:
+ return RTW89_CH_6G_BAND_IDX3;
+ case 129 ... 157:
+ return RTW89_CH_6G_BAND_IDX4;
+ case 161 ... 189:
+ return RTW89_CH_6G_BAND_IDX5;
+ case 193 ... 221:
+ return RTW89_CH_6G_BAND_IDX6;
+ case 225 ... 253:
+ return RTW89_CH_6G_BAND_IDX7;
+ }
+ }
+}
+
+static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw,
+ u32 center_freq,
+ u32 primary_freq)
+{
+ u8 primary_chan_idx;
+ u32 offset;
+
+ switch (bw) {
+ default:
+ case RTW89_CHANNEL_WIDTH_20:
+ primary_chan_idx = RTW89_SC_DONT_CARE;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ if (primary_freq > center_freq)
+ primary_chan_idx = RTW89_SC_20_UPPER;
+ else
+ primary_chan_idx = RTW89_SC_20_LOWER;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_160:
+ if (primary_freq > center_freq) {
+ offset = (primary_freq - center_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
+ } else {
+ offset = (center_freq - primary_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
+ }
+ break;
+ }
+
+ return primary_chan_idx;
+}
+
+void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
+ enum rtw89_band band, enum rtw89_bandwidth bandwidth)
+{
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ u32 center_freq, primary_freq;
+
+ memset(chan, 0, sizeof(*chan));
+ chan->channel = center_chan;
+ chan->primary_channel = primary_chan;
+ chan->band_type = band;
+ chan->band_width = bandwidth;
+
+ center_freq = ieee80211_channel_to_frequency(center_chan, nl_band);
+ primary_freq = ieee80211_channel_to_frequency(primary_chan, nl_band);
+
+ chan->freq = center_freq;
+ chan->subband_type = rtw89_get_subband_type(band, center_chan);
+ chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq,
+ primary_freq);
+}
+
+bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct rtw89_chan *new)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chan *chan = &hal->chan[idx];
+ struct rtw89_chan_rcd *rcd = &hal->chan_rcd[idx];
+ bool band_changed;
+
+ rcd->prev_primary_channel = chan->primary_channel;
+ rcd->prev_band_type = chan->band_type;
+ band_changed = new->band_type != chan->band_type;
+
+ *chan = *new;
+ return band_changed;
+}
+
+static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef,
+ bool from_stack)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ hal->chandef[idx] = *chandef;
+
+ if (from_stack)
+ set_bit(idx, hal->entity_map);
+}
+
+void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef)
+{
+ __rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
+}
+
+static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
+{
+ struct cfg80211_chan_def chandef = {0};
+
+ rtw89_get_default_chandef(&chandef);
+ __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false);
+}
+
+void rtw89_entity_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_config_default_chandef(rtwdev);
+}
+
+enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_entity_mode mode;
+ u8 weight;
+
+ weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ switch (weight) {
+ default:
+ rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ fallthrough;
+ case 0:
+ rtw89_config_default_chandef(rtwdev);
+ fallthrough;
+ case 1:
+ mode = RTW89_ENTITY_MODE_SCC;
+ break;
+ }
+
+ rtw89_set_entity_mode(rtwdev, mode);
+ return mode;
+}
+
+int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 idx;
+
+ idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ if (idx >= chip->support_chanctx_num)
+ return -ENOENT;
+
+ rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
+ rtw89_set_channel(rtwdev);
+ cfg->idx = idx;
+ return 0;
+}
+
+void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+
+ clear_bit(cfg->idx, hal->entity_map);
+ rtw89_set_channel(rtwdev);
+}
+
+void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ u8 idx = cfg->idx;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+ rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
+ rtw89_set_channel(rtwdev);
+ }
+}
+
+int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return 0;
+}
+
+void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
new file mode 100644
index 000000000000..ecbd4503bead
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ * Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_CHAN_H__
+#define __RTW89_CHAN_H__
+
+#include "core.h"
+
+static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return READ_ONCE(hal->entity_active);
+}
+
+static inline void rtw89_set_entity_state(struct rtw89_dev *rtwdev, bool active)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ WRITE_ONCE(hal->entity_active, active);
+}
+
+static inline
+enum rtw89_entity_mode rtw89_get_entity_mode(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return READ_ONCE(hal->entity_mode);
+}
+
+static inline void rtw89_set_entity_mode(struct rtw89_dev *rtwdev,
+ enum rtw89_entity_mode mode)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ WRITE_ONCE(hal->entity_mode, mode);
+}
+
+void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
+ enum rtw89_band band, enum rtw89_bandwidth bandwidth);
+bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct rtw89_chan *new);
+void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef);
+void rtw89_entity_init(struct rtw89_dev *rtwdev);
+enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
+int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 683854bba217..bbdfa9ac203c 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -9,6 +9,7 @@
#include "ps.h"
#include "reg.h"
+#define RTW89_COEX_VERSION 0x06030013
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
enum btc_fbtc_tdma_template {
@@ -77,21 +78,21 @@ static const struct rtw89_btc_fbtc_tdma t_def[] = {
static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
- [CXST_B2W] = __DEF_FBTC_SLOT(5, 0x5a5a5a5a, SLOT_ISO),
- [CXST_W1] = __DEF_FBTC_SLOT(70, 0x5a5a5a5a, SLOT_ISO),
- [CXST_W2] = __DEF_FBTC_SLOT(70, 0x5a5a5aaa, SLOT_ISO),
- [CXST_W2B] = __DEF_FBTC_SLOT(15, 0x5a5a5a5a, SLOT_ISO),
- [CXST_B1] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
- [CXST_B2] = __DEF_FBTC_SLOT(7, 0x6a5a5a5a, SLOT_MIX),
- [CXST_B3] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
- [CXST_B4] = __DEF_FBTC_SLOT(50, 0x55555555, SLOT_MIX),
- [CXST_LK] = __DEF_FBTC_SLOT(20, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_B2W] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_ISO),
+ [CXST_W1] = __DEF_FBTC_SLOT(70, 0xea5a5a5a, SLOT_ISO),
+ [CXST_W2] = __DEF_FBTC_SLOT(70, 0xea5a5aaa, SLOT_ISO),
+ [CXST_W2B] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
+ [CXST_B1] = __DEF_FBTC_SLOT(100, 0xe5555555, SLOT_MIX),
+ [CXST_B2] = __DEF_FBTC_SLOT(7, 0xea5a5a5a, SLOT_MIX),
+ [CXST_B3] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
+ [CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
+ [CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
[CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(20, 0x6a5a5a5a, SLOT_MIX),
+ [CXST_E2G] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX),
- [CXST_EBT] = __DEF_FBTC_SLOT(20, 0x55555555, SLOT_MIX),
+ [CXST_EBT] = __DEF_FBTC_SLOT(20, 0xe5555555, SLOT_MIX),
[CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO),
- [CXST_WLK] = __DEF_FBTC_SLOT(250, 0x6a5a6a5a, SLOT_MIX),
+ [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX),
};
@@ -99,13 +100,13 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
static const u32 cxtbl[] = {
0xffffffff, /* 0 */
0xaaaaaaaa, /* 1 */
- 0x55555555, /* 2 */
- 0x66555555, /* 3 */
- 0x66556655, /* 4 */
+ 0xe5555555, /* 2 */
+ 0xee555555, /* 3 */
+ 0xd5555555, /* 4 */
0x5a5a5a5a, /* 5 */
- 0x5a5a5aaa, /* 6 */
- 0xaa5a5a5a, /* 7 */
- 0x6a5a5a5a, /* 8 */
+ 0xfa5a5a5a, /* 6 */
+ 0xda5a5a5a, /* 7 */
+ 0xea5a5a5a, /* 8 */
0x6a5a5aaa, /* 9 */
0x6a5a6a5a, /* 10 */
0x6a5a6aaa, /* 11 */
@@ -261,6 +262,12 @@ enum btc_cx_poicy_type {
/* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+ /* TDMA off + pri: WL_Hi-Tx > BT, BT_Hi > other-WL > BT_Lo */
+ BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 8,
+
+ /* TDMA off + pri: WL_Hi-Tx = BT */
+ BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 9,
+
/* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
@@ -270,6 +277,21 @@ enum btc_cx_poicy_type {
/* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
BTC_CXP_OFFE_DEF2 = (BTC_CXP_OFFE << 8) | 1,
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_2GBWISOB = (BTC_CXP_OFFE << 8) | 2,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
+ BTC_CXP_OFFE_2GISOB = (BTC_CXP_OFFE << 8) | 3,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot WL > BT */
+ BTC_CXP_OFFE_2GBWMIXB = (BTC_CXP_OFFE << 8) | 4,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G/EBT-slot WL > BT */
+ BTC_CXP_OFFE_WL = (BTC_CXP_OFFE << 8) | 5,
+
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_2GBWMIXB2 = (BTC_CXP_OFFE << 8) | 6,
+
/* TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_FIX_TD3030 = (BTC_CXP_FIX << 8) | 0,
@@ -300,6 +322,9 @@ enum btc_cx_poicy_type {
/* TDMA Fix slot-9: W1:B1 = 40:20 */
BTC_CXP_FIX_TD4020 = (BTC_CXP_FIX << 8) | 9,
+ /* TDMA Fix slot-9: W1:B1 = 40:10 */
+ BTC_CXP_FIX_TD4010ISO = (BTC_CXP_FIX << 8) | 10,
+
/* PS-TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_PFIX_TD3030 = (BTC_CXP_PFIX << 8) | 0,
@@ -322,25 +347,25 @@ enum btc_cx_poicy_type {
BTC_CXP_PFIX_TDW1B1 = (BTC_CXP_PFIX << 8) | 6,
/* TDMA Auto slot-0: W1:B1 = 50:200 */
- BTC_CXP_AUTO_TD50200 = (BTC_CXP_AUTO << 8) | 0,
+ BTC_CXP_AUTO_TD50B1 = (BTC_CXP_AUTO << 8) | 0,
/* TDMA Auto slot-1: W1:B1 = 60:200 */
- BTC_CXP_AUTO_TD60200 = (BTC_CXP_AUTO << 8) | 1,
+ BTC_CXP_AUTO_TD60B1 = (BTC_CXP_AUTO << 8) | 1,
/* TDMA Auto slot-2: W1:B1 = 20:200 */
- BTC_CXP_AUTO_TD20200 = (BTC_CXP_AUTO << 8) | 2,
+ BTC_CXP_AUTO_TD20B1 = (BTC_CXP_AUTO << 8) | 2,
/* TDMA Auto slot-3: W1:B1 = user-define */
BTC_CXP_AUTO_TDW1B1 = (BTC_CXP_AUTO << 8) | 3,
/* PS-TDMA Auto slot-0: W1:B1 = 50:200 */
- BTC_CXP_PAUTO_TD50200 = (BTC_CXP_PAUTO << 8) | 0,
+ BTC_CXP_PAUTO_TD50B1 = (BTC_CXP_PAUTO << 8) | 0,
/* PS-TDMA Auto slot-1: W1:B1 = 60:200 */
- BTC_CXP_PAUTO_TD60200 = (BTC_CXP_PAUTO << 8) | 1,
+ BTC_CXP_PAUTO_TD60B1 = (BTC_CXP_PAUTO << 8) | 1,
/* PS-TDMA Auto slot-2: W1:B1 = 20:200 */
- BTC_CXP_PAUTO_TD20200 = (BTC_CXP_PAUTO << 8) | 2,
+ BTC_CXP_PAUTO_TD20B1 = (BTC_CXP_PAUTO << 8) | 2,
/* PS-TDMA Auto slot-3: W1:B1 = user-define */
BTC_CXP_PAUTO_TDW1B1 = (BTC_CXP_PAUTO << 8) | 3,
@@ -412,7 +437,7 @@ enum btc_w2b_scoreboard {
BTC_WSCB_TDMA = BIT(9),
BTC_WSCB_FIX2M = BIT(10),
BTC_WSCB_WLRFK = BIT(11),
- BTC_WSCB_BTRFK_GNT = BIT(12), /* not used, use mailbox to inform BT */
+ BTC_WSCB_RXSCAN_PRI = BIT(12),
BTC_WSCB_BT_HILNA = BIT(13),
BTC_WSCB_BTLOG = BIT(14),
BTC_WSCB_ALL = GENMASK(23, 0),
@@ -434,6 +459,16 @@ enum btc_wl_link_mode {
BTC_WLINK_MAX
};
+enum btc_wl_mrole_type {
+ BTC_WLMROLE_NONE = 0x0,
+ BTC_WLMROLE_STA_GC,
+ BTC_WLMROLE_STA_GC_NOA,
+ BTC_WLMROLE_STA_GO,
+ BTC_WLMROLE_STA_GO_NOA,
+ BTC_WLMROLE_STA_STA,
+ BTC_WLMROLE_MAX
+};
+
enum btc_bt_hid_type {
BTC_HID_218 = BIT(0),
BTC_HID_418 = BIT(1),
@@ -460,6 +495,11 @@ enum btc_gnt_state {
BTC_GNT_MAX
};
+enum btc_ctr_path {
+ BTC_CTRL_BY_BT = 0,
+ BTC_CTRL_BY_WL
+};
+
enum btc_wl_max_tx_time {
BTC_MAX_TX_TIME_L1 = 500,
BTC_MAX_TX_TIME_L2 = 1000,
@@ -531,6 +571,7 @@ enum btc_reason_and_action {
#define BTC_FREERUN_ANTISO_MIN 30
#define BTC_TDMA_BTHID_MAX 2
#define BTC_BLINK_NOCONNECT 0
+#define BTC_B1_MAX 250 /* unit ms */
static void _run_coex(struct rtw89_dev *rtwdev,
enum btc_reason_and_action reason);
@@ -551,8 +592,10 @@ static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
"[BTC], %s(): return by btc not init!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
return;
- } else if ((wl->status.map.rf_off_pre == 1 && wl->status.map.rf_off == 1) ||
- (wl->status.map.lps_pre == 1 && wl->status.map.lps == 1)) {
+ } else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
+ wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
+ (wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
+ wl->status.map.lps == BTC_LPS_RF_OFF)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by wl off!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
@@ -616,8 +659,6 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
memset(&btc->mdinfo, 0, sizeof(btc->mdinfo));
}
-#define BTC_FWINFO_BUF 1024
-
#define BTC_RPT_HDR_SIZE 3
#define BTC_CHK_WLSLOT_DRIFT_MAX 15
#define BTC_CHK_HANG_MAX 3
@@ -869,18 +910,24 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *prptbuf, u32 index)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_fbtc_rpt_ctrl *prpt = NULL;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_fbtc_rpt_ctrl *prpt;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prpt_v1;
struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1 = NULL;
struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
struct rtw89_btc_prpt *btc_prpt = NULL;
struct rtw89_btc_fbtc_slot *rtp_slot = NULL;
- u8 rpt_type = 0, *rpt_content = NULL, *pfinfo = NULL;
- u16 wl_slot_set = 0;
+ void *rpt_content = NULL, *pfinfo = NULL;
+ u8 rpt_type = 0;
+ u16 wl_slot_set = 0, wl_slot_real = 0;
u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t;
+ u32 cnt_leak_slot = 0, bt_slot_real = 0, cnt_rx_imr = 0;
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -904,100 +951,129 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_ctrl.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
- pcinfo->req_fver = BTCRPT_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxbtcrpt_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_TDMA:
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_tdma.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
- pcinfo->req_fver = FCXTDMA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxtdma_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_slots.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
- pcinfo->req_fver = FCXSLOTS_VER;
+ pcinfo->req_fver = chip->fcxslots_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_CYSTA:
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_cysta.finfo);
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
- pcinfo->req_fver = FCXCYSTA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo;
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
+ rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxcysta_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_step.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
- trace_step + 8;
- pcinfo->req_fver = FCXSTEP_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
+ trace_step +
+ offsetof(struct rtw89_btc_fbtc_steps, step);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo_v1.step[0]) *
+ trace_step +
+ offsetof(struct rtw89_btc_fbtc_steps_v1, step);
+ }
+ pcinfo->req_fver = chip->fcxstep_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_NULLSTA:
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_nullsta.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
- pcinfo->req_fver = FCXNULLSTA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxnullsta_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_MREG:
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_mregval.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo);
- pcinfo->req_fver = FCXMREG_VER;
+ pcinfo->req_fver = chip->fcxmreg_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
- pcinfo->req_fver = FCXGPIODBG_VER;
+ pcinfo->req_fver = chip->fcxgpiodbg_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btver.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
- pcinfo->req_fver = FCX_BTVER_VER;
+ pcinfo->req_fver = chip->fcxbtver_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_SCAN:
pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btscan.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo);
- pcinfo->req_fver = FCX_BTSCAN_VER;
+ pcinfo->req_fver = chip->fcxbtscan_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_AFH:
pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btafh.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo);
- pcinfo->req_fver = FCX_BTAFH_VER;
+ pcinfo->req_fver = chip->fcxbtafh_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_DEVICE:
pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btdev.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btdev.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
- pcinfo->req_fver = FCX_BTDEVINFO_VER;
+ pcinfo->req_fver = chip->fcxbtdevinfo_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
@@ -1026,7 +1102,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcpy(pfinfo, rpt_content, pcinfo->req_len);
pcinfo->valid = 1;
- if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ if (rpt_type == BTC_RPT_TYPE_TDMA && chip->chip_id == RTL8852A) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): check %d %zu\n", __func__,
BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
@@ -1039,7 +1115,8 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
dm->tdma_now.type, dm->tdma_now.rxflctrl,
dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rsvd0, dm->tdma_now.rsvd1);
+ dm->tdma_now.rxflctrl_role,
+ dm->tdma_now.option_ctrl);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
@@ -1050,14 +1127,46 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n,
pfwinfo->rpt_fbtc_tdma.finfo.leak_n,
pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.rsvd0,
- pfwinfo->rpt_fbtc_tdma.finfo.rsvd1);
+ pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl_role,
+ pfwinfo->rpt_fbtc_tdma.finfo.option_ctrl);
}
_chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo,
sizeof(dm->tdma_now)));
+ } else if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n", __func__,
+ BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
+
+ if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
+ sizeof(dm->tdma_now)) != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ dm->tdma_now.type, dm->tdma_now.rxflctrl,
+ dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
+ dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
+ dm->tdma_now.rxflctrl_role,
+ dm->tdma_now.option_ctrl);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.type,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.txpause,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.wtgle_n,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.leak_n,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.ext_ctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl_role,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.option_ctrl);
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
+ sizeof(dm->tdma_now)));
}
if (rpt_type == BTC_RPT_TYPE_SLOT) {
@@ -1097,7 +1206,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
sizeof(dm->slot_now)));
}
- if (rpt_type == BTC_RPT_TYPE_CYSTA &&
+ if (rpt_type == BTC_RPT_TYPE_CYSTA && chip->chip_id == RTL8852A &&
pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) {
/* Check Leak-AP */
if (pcysta->slot_cnt[CXST_LK] != 0 &&
@@ -1120,16 +1229,55 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
}
_chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_B1]);
_chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
+ } else if (rpt_type == BTC_RPT_TYPE_CYSTA && pcysta_v1 &&
+ le16_to_cpu(pcysta_v1->cycles) >= BTC_CYSTA_CHK_PERIOD) {
+ cnt_leak_slot = le32_to_cpu(pcysta_v1->slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta_v1->leak_slot.cnt_rximr);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
+ }
+
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_BT]);
+
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta_v1->slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le32_to_cpu(pcysta_v1->slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ (u32)le16_to_cpu(pcysta_v1->cycles));
}
- if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ if (rpt_type == BTC_RPT_TYPE_CTRL && chip->chip_id == RTL8852A) {
prpt = &pfwinfo->rpt_ctrl.finfo;
btc->fwinfo.rpt_en_map = prpt->rpt_enable;
wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
wl->ver_info.fw = prpt->wl_fw_ver;
- dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload);
+ dm->wl_fw_cx_offload = !!prpt->wl_fw_cx_offload;
_chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
pfwinfo->event[BTF_EVNT_RPT]);
@@ -1142,6 +1290,33 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
btc->cx.cnt_bt[BTC_BCNT_POLUT] =
rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0);
}
+ } else if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ prpt_v1 = &pfwinfo->rpt_ctrl.finfo_v1;
+ btc->fwinfo.rpt_en_map = le32_to_cpu(prpt_v1->rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt_v1->wl_fw_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt_v1->wl_fw_info.fw_ver);
+ dm->wl_fw_cx_offload = !!le32_to_cpu(prpt_v1->wl_fw_info.cx_offload);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt_v1->gnt_val[i],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_POLLUTED]);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ if (le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
}
if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
@@ -1155,6 +1330,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *pbuf, u32 buf_len)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc_prpt *btc_prpt = NULL;
u32 index = 0, rpt_len = 0;
@@ -1164,7 +1340,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
while (pbuf) {
btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index];
- if (index + 2 >= BTC_FWINFO_BUF)
+ if (index + 2 >= chip->btc_fwinfo_buf)
break;
/* At least 3 bytes: type(1) & len(2) */
rpt_len = le16_to_cpu(btc_prpt->len);
@@ -1182,10 +1358,12 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
static void _append_tdma(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_btf_tlv *tlv = NULL;
- struct rtw89_btc_fbtc_tdma *v = NULL;
+ struct rtw89_btc_btf_tlv *tlv;
+ struct rtw89_btc_fbtc_tdma *v;
+ struct rtw89_btc_fbtc_tdma_v1 *v1;
u16 len = btc->policy_len;
if (!btc->update_policy_force &&
@@ -1197,12 +1375,19 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
}
tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
- v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
tlv->type = CXPOLICY_TDMA;
- tlv->len = sizeof(*v);
-
- memcpy(v, &dm->tdma, sizeof(*v));
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ if (chip->chip_id == RTL8852A) {
+ v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
+ tlv->len = sizeof(*v);
+ memcpy(v, &dm->tdma, sizeof(*v));
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ } else {
+ tlv->len = sizeof(*v1);
+ v1 = (struct rtw89_btc_fbtc_tdma_v1 *)&tlv->val[0];
+ v1->fver = chip->fcxtdma_ver;
+ v1->tdma = dm->tdma;
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v1);
+ }
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): type:%d, rxflctrl=%d, txpause=%d, wtgle_n=%d, leak_n=%d, ext_ctrl=%d\n",
@@ -1408,12 +1593,17 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
switch (type) {
case CXDRVINFO_INIT:
rtw89_fw_h2c_cxdrv_init(rtwdev);
break;
case CXDRVINFO_ROLE:
- rtw89_fw_h2c_cxdrv_role(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ rtw89_fw_h2c_cxdrv_role(rtwdev);
+ else
+ rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
break;
case CXDRVINFO_CTRL:
rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
@@ -1448,7 +1638,7 @@ void btc_fw_event(struct rtw89_dev *rtwdev, u8 evt_id, void *data, u32 len)
}
}
-static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
+static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -1462,7 +1652,7 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
if (!(phy_map & BIT(i)))
continue;
- switch (state) {
+ switch (wl_state) {
case BTC_GNT_HW:
g[i].gnt_wl_sw_en = 0;
g[i].gnt_wl = 0;
@@ -1476,6 +1666,21 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
g[i].gnt_wl = 1;
break;
}
+
+ switch (bt_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_bt_sw_en = 0;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ break;
+ }
}
rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
@@ -1534,6 +1739,7 @@ static void _set_wl_tx_power(struct rtw89_dev *rtwdev, u32 level)
static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -1546,6 +1752,8 @@ static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): level = %d\n",
__func__, level);
+
+ chip->ops->btc_set_wl_rx_gain(rtwdev, level);
}
static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
@@ -1683,28 +1891,45 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *b = &bt->link_info;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ struct rtw89_btc_wl_active_role *r;
+ struct rtw89_btc_wl_active_role_v1 *r1;
u8 en = 0, i, ch = 0, bw = 0;
+ u8 mode, connect_cnt;
if (btc->ctrl.manual || wl->status.map.scan)
return;
- /* TODO if include module->ant.type == BTC_ANT_SHARED */
+ if (chip->chip_id == RTL8852A) {
+ mode = wl_rinfo->link_mode;
+ connect_cnt = wl_rinfo->connect_cnt;
+ } else {
+ mode = wl_rinfo_v1->link_mode;
+ connect_cnt = wl_rinfo_v1->connect_cnt;
+ }
+
if (wl->status.map.rf_off || bt->whql_test ||
- wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- wl_rinfo->link_mode == BTC_WLINK_5G ||
- wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ mode == BTC_WLINK_NOLINK || mode == BTC_WLINK_5G ||
+ connect_cnt > BTC_TDMA_WLROLE_MAX) {
en = false;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
+ } else if (mode == BTC_WLINK_2G_MCC || mode == BTC_WLINK_2G_SCC) {
en = true;
/* get p2p channel */
for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (wl_rinfo->active_role[i].role ==
- RTW89_WIFI_ROLE_P2P_GO ||
- wl_rinfo->active_role[i].role ==
- RTW89_WIFI_ROLE_P2P_CLIENT) {
- ch = wl_rinfo->active_role[i].ch;
- bw = wl_rinfo->active_role[i].bw;
+ r = &wl_rinfo->active_role[i];
+ r1 = &wl_rinfo_v1->active_role_v1[i];
+
+ if (chip->chip_id == RTL8852A &&
+ (r->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r->ch;
+ bw = r->bw;
+ break;
+ } else if (chip->chip_id != RTL8852A &&
+ (r1->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r1->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r1->ch;
+ bw = r1->bw;
break;
}
}
@@ -1712,10 +1937,18 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
en = true;
/* get 2g channel */
for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (wl_rinfo->active_role[i].connected &&
- wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
- ch = wl_rinfo->active_role[i].ch;
- bw = wl_rinfo->active_role[i].bw;
+ r = &wl_rinfo->active_role[i];
+ r1 = &wl_rinfo_v1->active_role_v1[i];
+
+ if (chip->chip_id == RTL8852A &&
+ r->connected && r->band == RTW89_BAND_2G) {
+ ch = r->ch;
+ bw = r->bw;
+ break;
+ } else if (chip->chip_id != RTL8852A &&
+ r1->connected && r1->band == RTW89_BAND_2G) {
+ ch = r1->ch;
+ bw = r1->bw;
break;
}
}
@@ -1768,6 +2001,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -1777,7 +2011,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
/* The below is dedicated antenna case */
- if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX ||
+ wl_rinfo_v1->connect_cnt > BTC_TDMA_WLROLE_MAX) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -1826,6 +2061,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
#define _tdma_set_flctrl(btc, flc) ({(btc)->dm.tdma.rxflctrl = flc; })
+#define _tdma_set_flctrl_role(btc, role) ({(btc)->dm.tdma.rxflctrl_role = role; })
#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; })
#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; })
@@ -1904,6 +2140,15 @@ union btc_btinfo {
static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
enum btc_reason_and_action action)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->btc_set_policy(rtwdev, policy_type);
+ _fw_set_policy(rtwdev, policy_type, action);
+}
+
+#define BTC_B1_MAX 250 /* unit ms */
+void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
+{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
@@ -1964,6 +2209,9 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
case BTC_CXP_OFF_BWB1:
_slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
break;
+ case BTC_CXP_OFF_BWB3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[6]);
+ break;
}
break;
case BTC_CXP_OFFB: /* TDMA off + beacon protect */
@@ -2080,17 +2328,361 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_AUTO];
switch (policy_type) {
- case BTC_CXP_AUTO_TD50200:
+ case BTC_CXP_AUTO_TD50B1:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD60B1:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD20B1:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO_TD50B1:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD60B1:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD20B1:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TDW1B1:
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO2];
+ switch (policy_type) {
+ case BTC_CXP_AUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO2];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL(rtw89_btc_set_policy);
+
+void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
+ struct rtw89_btc_fbtc_slot *s = dm->slot;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
+ struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
+ struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
+ u8 type, null_role;
+ u32 tbl_w1, tbl_b1, tbl_b4;
+
+ type = FIELD_GET(BTC_CXP_MASK, policy_type);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ tbl_w1 = cxtbl[1];
+ else if (hid->exist && hid->type == BTC_HID_218)
+ tbl_w1 = cxtbl[7]; /* Ack/BA no break bt Hi-Pri-rx */
+ else
+ tbl_w1 = cxtbl[8];
+
+ if (dm->leak_ap &&
+ (type == BTC_CXP_PFIX || type == BTC_CXP_PAUTO2)) {
+ tbl_b1 = cxtbl[3];
+ tbl_b4 = cxtbl[3];
+ } else if (hid->exist && hid->type == BTC_HID_218) {
+ tbl_b1 = cxtbl[4]; /* Ack/BA no break bt Hi-Pri-rx */
+ tbl_b4 = cxtbl[4];
+ } else {
+ tbl_b1 = cxtbl[2];
+ tbl_b4 = cxtbl[2];
+ }
+ } else {
+ tbl_w1 = cxtbl[16];
+ tbl_b1 = cxtbl[17];
+ tbl_b4 = cxtbl[17];
+ }
+
+ btc->bt_req_en = false;
+
+ switch (type) {
+ case BTC_CXP_USERDEF0:
+ btc->update_policy_force = true;
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF: /* TDMA off */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFF_BT:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF_WL:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[1]);
+ break;
+ case BTC_CXP_OFF_EQ0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
+ break;
+ case BTC_CXP_OFF_EQ1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[16]);
+ break;
+ case BTC_CXP_OFF_EQ2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[17]);
+ break;
+ case BTC_CXP_OFF_EQ3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[18]);
+ break;
+ case BTC_CXP_OFF_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[5]);
+ break;
+ case BTC_CXP_OFF_BWB1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ case BTC_CXP_OFF_BWB2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[7]);
+ break;
+ case BTC_CXP_OFF_BWB3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[6]);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_OFFB: /* TDMA off + beacon protect */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF_B2];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFFB_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_OFFE: /* TDMA off + beacon protect + Ext_control */
+ btc->bt_req_en = true;
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_OFF_EXT];
+
+ /* To avoid wl-s0 tx break by hid/hfp tx */
+ if (hid->exist || hfp->exist)
+ tbl_w1 = cxtbl[16];
+
+ switch (policy_type) {
+ case BTC_CXP_OFFE_DEF:
+ s[CXST_E2G] = s_def[CXST_E2G];
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ case BTC_CXP_OFFE_DEF2:
+ _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_FIX: /* TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_FIX];
+
+ switch (policy_type) {
+ case BTC_CXP_FIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010:
+ _slot_set(btc, CXST_W1, 40, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010ISO:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD7010:
+ _slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD3060:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_PFIX: /* PS-TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PFIX];
+
+ switch (policy_type) {
+ case BTC_CXP_PFIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO: /* TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO];
+
+ switch (policy_type) {
+ case BTC_CXP_AUTO_TD50B1:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_AUTO_TD60200:
+ case BTC_CXP_AUTO_TD60B1:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_AUTO_TD20200:
+ case BTC_CXP_AUTO_TD20B1:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
@@ -2098,23 +2690,26 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
tbl_b1, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_PAUTO];
+
switch (policy_type) {
- case BTC_CXP_PAUTO_TD50200:
+ case BTC_CXP_PAUTO_TD50B1:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_PAUTO_TD60200:
+ case BTC_CXP_PAUTO_TD60B1:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_PAUTO_TD20200:
+ case BTC_CXP_PAUTO_TD20B1:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
case BTC_CXP_PAUTO_TDW1B1:
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
@@ -2122,119 +2717,112 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
tbl_b1, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_AUTO2];
+
switch (policy_type) {
case BTC_CXP_AUTO2_TD3050:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD3070:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD5050:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD6060:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD2080:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
tbl_b4, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_PAUTO2];
+
switch (policy_type) {
case BTC_CXP_PAUTO2_TD3050:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD3070:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD5050:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD6060:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD2080:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
tbl_b4, SLOT_MIX);
break;
+ default:
+ break;
}
break;
}
- _fw_set_policy(rtwdev, policy_type, action);
-}
-
-static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_mac_ax_gnt *g = dm->gnt.band;
- u8 i;
-
- if (phy_map > BTC_PHY_ALL)
- return;
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC && dm->tdma.rxflctrl) {
+ null_role = FIELD_PREP(0x0f, dm->wl_scc.null_role1) |
+ FIELD_PREP(0xf0, dm->wl_scc.null_role2);
+ _tdma_set_flctrl_role(btc, null_role);
+ }
- for (i = 0; i < RTW89_PHY_MAX; i++) {
- if (!(phy_map & BIT(i)))
- continue;
+ /* enter leak_slot after each null-1 */
+ if (dm->leak_ap && dm->tdma.leak_n > 1)
+ _tdma_set_lek(btc, 1);
- switch (state) {
- case BTC_GNT_HW:
- g[i].gnt_bt_sw_en = 0;
- g[i].gnt_bt = 0;
- break;
- case BTC_GNT_SW_LO:
- g[i].gnt_bt_sw_en = 1;
- g[i].gnt_bt = 0;
- break;
- case BTC_GNT_SW_HI:
- g[i].gnt_bt_sw_en = 1;
- g[i].gnt_bt = 1;
- break;
- }
+ if (dm->tdma_instant_excute) {
+ btc->dm.tdma.option_ctrl |= BIT(0);
+ btc->update_policy_force = true;
}
-
- rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
+EXPORT_SYMBOL(rtw89_btc_set_policy_v1);
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
u8 tx_val, u8 rx_val)
@@ -2300,86 +2888,74 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
switch (type) {
case BTC_ANT_WPOWERON:
- rtw89_chip_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
break;
case BTC_ANT_WINIT:
- if (bt->enable.now) {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
- } else {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- }
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
+ if (bt->enable.now)
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI);
+ else
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
+
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
break;
case BTC_ANT_WONLY:
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WOFF:
- rtw89_chip_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W2G:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
if (rtwdev->dbcc_en) {
for (i = 0; i < RTW89_PHY_MAX; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
- _set_gnt_wl(rtwdev, BIT(i), gnt_wl_ctrl);
-
gnt_bt_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
/* BT should control by GNT_BT if WL_2G at S0 */
if (i == 1 &&
wl_dinfo->real_band[0] == RTW89_BAND_2G &&
wl_dinfo->real_band[1] == RTW89_BAND_5G)
gnt_bt_ctrl = BTC_GNT_HW;
- _set_gnt_bt(rtwdev, BIT(i), gnt_bt_ctrl);
-
+ _set_gnt(rtwdev, BIT(i), gnt_wl_ctrl, gnt_bt_ctrl);
plt_ctrl = b2g ? BTC_PLT_BT : BTC_PLT_NONE;
_set_bt_plut(rtwdev, BIT(i),
plt_ctrl, plt_ctrl);
}
} else {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_HW, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_BT, BTC_PLT_BT);
}
break;
case BTC_ANT_W5G:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W25G:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_HW, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
break;
case BTC_ANT_FREERUN:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WRFK:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_BRFK:
- rtw89_chip_cfg_ctrl_path(rtwdev, false);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
default:
@@ -2491,14 +3067,19 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
static void _action_bt_hfp(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
- if (btc->cx.wl.status.map._4way)
+ if (btc->cx.wl.status.map._4way) {
_set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HFP);
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ btc->cx.bt.scan_rx_low_pri = true;
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB2, BTC_ACT_BT_HFP);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
+ }
} else {
_set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
}
@@ -2506,17 +3087,37 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
static void _action_bt_hid(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_hid_desc *hid = &bt->link_info.hid_desc;
+ u16 policy_type = BTC_CXP_OFF_BT;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) /* shared-antenna */
- if (btc->cx.wl.status.map._4way)
- _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HID);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HID);
- else /* dedicated-antenna */
- _set_policy(rtwdev, BTC_CXP_OFF_EQ3, BTC_ACT_BT_HID);
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (wl->status.map._4way) {
+ policy_type = BTC_CXP_OFF_WL;
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ btc->cx.bt.scan_rx_low_pri = true;
+ if (hid->type & BTC_HID_BLE)
+ policy_type = BTC_CXP_OFF_BWB0;
+ else
+ policy_type = BTC_CXP_OFF_BWB2;
+ } else if (hid->type == BTC_HID_218) {
+ bt->scan_rx_low_pri = true;
+ policy_type = BTC_CXP_OFF_BWB2;
+ } else if (chip->para_ver == 0x1) {
+ policy_type = BTC_CXP_OFF_BWB3;
+ } else {
+ policy_type = BTC_CXP_OFF_BWB1;
+ }
+ } else { /* dedicated-antenna */
+ policy_type = BTC_CXP_OFF_EQ3;
+ }
+
+ _set_policy(rtwdev, policy_type, BTC_ACT_BT_HID);
}
static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
@@ -2537,7 +3138,7 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
} else {
_set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP);
+ BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP);
}
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
@@ -2554,12 +3155,12 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
BTC_ACT_BT_A2DP);
} else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
BTC_ACT_BT_A2DP);
}
break;
case BTC_WIDLE: /* wl-idle + bt-A2DP */
- _set_policy(rtwdev, BTC_CXP_AUTO_TD20200, BTC_ACT_BT_A2DP);
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP);
break;
}
}
@@ -2639,7 +3240,7 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
} else {
_set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP_HID);
+ BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID);
}
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
@@ -2657,7 +3258,7 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
BTC_ACT_BT_A2DP_HID);
} else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
BTC_ACT_BT_A2DP_HID);
}
break;
@@ -2792,19 +3393,27 @@ static void _action_wl_rfk(struct rtw89_dev *rtwdev)
static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- bool is_btg = false;
+ bool is_btg;
+ u8 mode;
if (btc->ctrl.manual)
return;
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
/* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */
- if (wl_rinfo->link_mode == BTC_WLINK_5G) /* always 0 if 5G */
+ if (mode == BTC_WLINK_5G) /* always 0 if 5G */
is_btg = false;
- else if (wl_rinfo->link_mode == BTC_WLINK_25G_DBCC &&
+ else if (mode == BTC_WLINK_25G_DBCC &&
wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
is_btg = false;
else
@@ -2816,7 +3425,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
btc->dm.wl_btg_rx = is_btg;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (mode == BTC_WLINK_25G_MCC)
return;
rtw89_ctrl_btg(rtwdev, is_btg);
@@ -2889,6 +3498,7 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -2898,16 +3508,22 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc;
struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
- u8 mode = wl_rinfo->link_mode;
- u8 tx_retry = 0;
- u32 tx_time = 0;
- u16 enable = 0;
+ u8 mode;
+ u8 tx_retry;
+ u32 tx_time;
+ u16 enable;
bool reenable = false;
if (btc->ctrl.manual)
return;
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
enable = 0;
@@ -2951,13 +3567,21 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
+ u8 mode;
+
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
- if (wl_rinfo->link_mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
+ if (mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
bt_hi_lna_rx = true;
if (bt_hi_lna_rx == bt->hi_lna_rx)
@@ -2966,14 +3590,34 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
_write_scbd(rtwdev, BTC_WSCB_BT_HILNA, bt_hi_lna_rx);
}
+static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+
+ _write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri));
+}
+
/* TODO add these functions */
static void _action_common(struct rtw89_dev *rtwdev)
{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
_set_btg_ctrl(rtwdev);
_set_wl_tx_limit(rtwdev);
_set_bt_afh_info(rtwdev);
_set_bt_rx_agc(rtwdev);
_set_rf_trx_para(rtwdev);
+ _set_bt_rx_scan_pri(rtwdev);
+
+ if (wl->scbd_change) {
+ rtw89_mac_cfg_sb(rtwdev, wl->scbd);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
+ wl->scbd);
+ wl->scbd_change = false;
+ btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+ }
}
static void _action_by_bt(struct rtw89_dev *rtwdev)
@@ -3145,6 +3789,68 @@ static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
}
}
+static void _action_wl_2g_scc_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &wl->role_info_v1;
+ u16 policy_type = BTC_CXP_OFF_BT;
+ u32 dur;
+
+ if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ policy_type = BTC_CXP_OFF_EQ0;
+ } else {
+ /* shared-antenna */
+ switch (wl_rinfo->mrole_type) {
+ case BTC_WLMROLE_STA_GC:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
+ dm->wl_scc.ebt_null = 0; /* no ext-slot-control */
+ _action_by_bt(rtwdev);
+ return;
+ case BTC_WLMROLE_STA_STA:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.ebt_null = 0; /* no ext-slot-control */
+ _action_by_bt(rtwdev);
+ return;
+ case BTC_WLMROLE_STA_GC_NOA:
+ case BTC_WLMROLE_STA_GO:
+ case BTC_WLMROLE_STA_GO_NOA:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
+ dur = wl_rinfo->mrole_noa_duration;
+
+ if (wl->status.map._4way) {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_WL;
+ } else if (bt->link_info.status.map.connect == 0) {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ } else if (bt->link_info.a2dp_desc.exist &&
+ dur < btc->bt_req_len) {
+ dm->wl_scc.ebt_null = 1; /* tx null at EBT */
+ policy_type = BTC_CXP_OFFE_2GBWMIXB2;
+ } else if (bt->link_info.a2dp_desc.exist ||
+ bt->link_info.pan_desc.exist) {
+ dm->wl_scc.ebt_null = 1; /* tx null at EBT */
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
+}
+
static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -3234,20 +3940,20 @@ static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 scbd_val = 0;
+ u8 force_exec = false;
if (!chip->scbd)
return;
scbd_val = state ? wl->scbd | val : wl->scbd & ~val;
- if (scbd_val == wl->scbd)
- return;
- rtw89_mac_cfg_sb(rtwdev, scbd_val);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
- scbd_val);
- wl->scbd = scbd_val;
+ if (val & BTC_WSCB_ACTIVE || val & BTC_WSCB_ON)
+ force_exec = true;
- btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+ if (scbd_val != wl->scbd || force_exec) {
+ wl->scbd = scbd_val;
+ wl->scbd_change = true;
+ }
}
static u8
@@ -3428,8 +4134,158 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], cnt_connect = %d, link_mode = %d\n",
- cnt_connect, wl_rinfo->link_mode);
+ "[BTC], cnt_connect = %d, connecting = %d, link_mode = %d\n",
+ cnt_connect, cnt_connecting, wl_rinfo->link_mode);
+
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+}
+
+static void _update_wl_info_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &wl->role_info_v1;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ u8 cnt_connect = 0, cnt_connecting = 0, cnt_active = 0;
+ u8 cnt_2g = 0, cnt_5g = 0, phy;
+ u32 wl_2g_ch[2] = {}, wl_5g_ch[2] = {};
+ bool b2g = false, b5g = false, client_joined = false;
+ u8 i;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active)
+ continue;
+
+ cnt_active++;
+ wl_rinfo->active_role_v1[cnt_active - 1].role = wl_linfo[i].role;
+ wl_rinfo->active_role_v1[cnt_active - 1].pid = wl_linfo[i].pid;
+ wl_rinfo->active_role_v1[cnt_active - 1].phy = wl_linfo[i].phy;
+ wl_rinfo->active_role_v1[cnt_active - 1].band = wl_linfo[i].band;
+ wl_rinfo->active_role_v1[cnt_active - 1].noa = (u8)wl_linfo[i].noa;
+ wl_rinfo->active_role_v1[cnt_active - 1].connected = 0;
+
+ wl->port_id[wl_linfo[i].role] = wl_linfo[i].pid;
+
+ phy = wl_linfo[i].phy;
+
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ wl_dinfo->role[phy] = wl_linfo[i].role;
+ wl_dinfo->op_band[phy] = wl_linfo[i].band;
+ _update_dbcc_band(rtwdev, phy);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ cnt_connecting++;
+ } else {
+ cnt_connect++;
+ if ((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ }
+
+ wl_rinfo->role_map.val |= BIT(wl_linfo[i].role);
+ wl_rinfo->active_role_v1[cnt_active - 1].ch = wl_linfo[i].ch;
+ wl_rinfo->active_role_v1[cnt_active - 1].bw = wl_linfo[i].bw;
+ wl_rinfo->active_role_v1[cnt_active - 1].connected = 1;
+
+ /* only care 2 roles + BT coex */
+ if (wl_linfo[i].band != RTW89_BAND_2G) {
+ if (cnt_5g <= ARRAY_SIZE(wl_5g_ch) - 1)
+ wl_5g_ch[cnt_5g] = wl_linfo[i].ch;
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (cnt_2g <= ARRAY_SIZE(wl_2g_ch) - 1)
+ wl_2g_ch[cnt_2g] = wl_linfo[i].ch;
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+
+ wl_rinfo->connect_cnt = cnt_connect;
+
+ /* Be careful to change the following sequence!! */
+ if (cnt_connect == 0) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map.role.none = 1;
+ } else if (!b2g && b5g) {
+ wl_rinfo->link_mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map.role.nan) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_NAN;
+ } else if (cnt_connect > BTC_TDMA_WLROLE_MAX) {
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ } else if (b2g && b5g && cnt_connect == 2) {
+ if (rtwdev->dbcc_en) {
+ switch (wl_dinfo->role[RTW89_PHY_0]) {
+ case RTW89_WIFI_ROLE_STATION:
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ break;
+ case RTW89_WIFI_ROLE_P2P_GO:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ break;
+ case RTW89_WIFI_ROLE_P2P_CLIENT:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ break;
+ case RTW89_WIFI_ROLE_AP:
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ break;
+ default:
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ break;
+ }
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_25G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 2) {
+ if (wl_rinfo->role_map.role.station &&
+ (wl_rinfo->role_map.role.p2p_go ||
+ wl_rinfo->role_map.role.p2p_gc ||
+ wl_rinfo->role_map.role.ap)) {
+ if (wl_2g_ch[0] == wl_2g_ch[1])
+ wl_rinfo->link_mode = BTC_WLINK_2G_SCC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 1) {
+ if (wl_rinfo->role_map.role.station)
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ else if (wl_rinfo->role_map.role.ap)
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ else if (wl_rinfo->role_map.role.p2p_go)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ else if (wl_rinfo->role_map.role.p2p_gc)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ }
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (wl_rinfo->role_map.role.p2p_go || wl_rinfo->role_map.role.ap) {
+ if (!client_joined) {
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_MCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ wl_rinfo->connect_cnt = 1;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->connect_cnt = 0;
+ }
+ }
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], cnt_connect = %d, connecting = %d, link_mode = %d\n",
+ cnt_connect, cnt_connecting, wl_rinfo->link_mode);
_fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
@@ -3584,23 +4440,32 @@ static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
static
void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
- u8 mode = wl_rinfo->link_mode;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ u8 mode;
lockdep_assert_held(&rtwdev->mutex);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
- __func__, reason, mode);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
- __func__, dm->wl_only, dm->bt_only);
dm->run_reason = reason;
_update_dm_step(rtwdev, reason);
_update_btc_state_map(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
+ __func__, reason, mode);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
+ __func__, dm->wl_only, dm->bt_only);
+
/* Be careful to change the following function sequence!! */
if (btc->ctrl.manual) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -3657,6 +4522,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
btc->ctrl.igno_bt = false;
dm->freerun = false;
+ bt->scan_rx_low_pri = false;
if (reason == BTC_RSN_NTFY_INIT) {
_action_wl_init(rtwdev);
@@ -3699,21 +4565,30 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_sta(rtwdev);
break;
case BTC_WLINK_2G_AP:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_ap(rtwdev);
break;
case BTC_WLINK_2G_GO:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_go(rtwdev);
break;
case BTC_WLINK_2G_GC:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_gc(rtwdev);
break;
case BTC_WLINK_2G_SCC:
- _action_wl_2g_scc(rtwdev);
+ bt->scan_rx_low_pri = true;
+ if (chip->chip_id == RTL8852A)
+ _action_wl_2g_scc(rtwdev);
+ else if (chip->chip_id == RTL8852C)
+ _action_wl_2g_scc_v1(rtwdev);
break;
case BTC_WLINK_2G_MCC:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_mcc(rtwdev);
break;
case BTC_WLINK_25G_MCC:
+ bt->scan_rx_low_pri = true;
_action_wl_25g_mcc(rtwdev);
break;
case BTC_WLINK_5G:
@@ -3743,11 +4618,14 @@ void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev)
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
btc->dm.cnt_notify[BTC_NCNT_POWER_OFF]++;
btc->cx.wl.status.map.rf_off = 1;
+ btc->cx.wl.status.map.busy = 0;
+ wl->status.map.lps = BTC_LPS_OFF;
_write_scbd(rtwdev, BTC_WSCB_ALL, false);
_run_coex(rtwdev, BTC_RSN_NTFY_POWEROFF);
@@ -3807,7 +4685,7 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_write_scbd(rtwdev,
BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true);
_update_bt_scbd(rtwdev, true);
- if (rtw89_mac_get_ctrl_path(rtwdev)) {
+ if (rtw89_mac_get_ctrl_path(rtwdev) && chip->chip_id == RTL8852A) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): PTA owner warning!!\n",
__func__);
@@ -4150,7 +5028,8 @@ enum btc_wl_mode {
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4165,8 +5044,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
vif->type == NL80211_IFTYPE_STATION);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif->port);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], band=%d ch=%d bw=%d\n",
- hal->current_band_type, hal->current_channel,
- hal->current_band_width);
+ chan->band_type, chan->channel, chan->band_width);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], associated=%d\n",
state == BTC_ROLE_MSTS_STA_CONN_END);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -4205,9 +5083,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
r.connected = MLME_LINKED;
r.bcn_period = vif->bss_conf.beacon_int;
r.dtim_period = vif->bss_conf.dtim_period;
- r.band = hal->current_band_type;
- r.ch = hal->current_channel;
- r.bw = hal->current_band_width;
+ r.band = chan->band_type;
+ r.ch = chan->channel;
+ r.bw = chan->band_width;
ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
@@ -4218,7 +5096,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
memcpy(wlinfo, &r, sizeof(*wlinfo));
- _update_wl_info(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ _update_wl_info(rtwdev);
+ else
+ _update_wl_info_v1(rtwdev);
if (wlinfo->role == RTW89_WIFI_ROLE_STATION &&
wlinfo->connected == MLME_NO_LINK)
@@ -4240,6 +5121,7 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ u32 val;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): rf_state = %d\n",
__func__, rf_state);
@@ -4249,10 +5131,12 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
case BTC_RFCTRL_WL_OFF:
wl->status.map.rf_off = 1;
wl->status.map.lps = BTC_LPS_OFF;
+ wl->status.map.busy = 0;
break;
case BTC_RFCTRL_FW_CTRL:
wl->status.map.rf_off = 0;
wl->status.map.lps = BTC_LPS_RF_OFF;
+ wl->status.map.busy = 0;
break;
case BTC_RFCTRL_WL_ON:
default:
@@ -4262,14 +5146,17 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
}
if (rf_state == BTC_RFCTRL_WL_ON) {
+ btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
rtw89_btc_fw_en_rpt(rtwdev,
RPT_EN_MREG | RPT_EN_BT_VER_INFO, true);
- _write_scbd(rtwdev, BTC_WSCB_ACTIVE, true);
+ val = BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG;
+ _write_scbd(rtwdev, val, true);
_update_bt_scbd(rtwdev, true);
chip->ops->btc_init_cfg(rtwdev);
} else {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, false);
- _write_scbd(rtwdev, BTC_WSCB_ACTIVE | BTC_WSCB_WLBUSY, false);
+ if (rf_state == BTC_RFCTRL_WL_OFF)
+ _write_scbd(rtwdev, BTC_WSCB_ALL, false);
}
_run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
@@ -4609,10 +5496,10 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n",
chip->chip_id);
- ver_main = FIELD_GET(GENMASK(31, 24), chip->para_ver);
- ver_sub = FIELD_GET(GENMASK(23, 16), chip->para_ver);
- ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->para_ver);
- id_branch = FIELD_GET(GENMASK(7, 0), chip->para_ver);
+ ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION);
+ ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), RTW89_COEX_VERSION);
+ id_branch = FIELD_GET(GENMASK(7, 0), RTW89_COEX_VERSION);
seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ",
"[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch);
@@ -4726,23 +5613,29 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
return;
seq_puts(m, "========== [WL Status] ==========\n");
- seq_printf(m, " %-15s : link_mode:%d, ",
- "[status]", (u32)wl_rinfo->link_mode);
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ seq_printf(m, " %-15s : link_mode:%d, ", "[status]", mode);
seq_printf(m,
- "rf_off:%s, power_save:%s, scan:%s(band:%d/phy_map:0x%x), ",
- wl->status.map.rf_off ? "Y" : "N",
- wl->status.map.lps ? "Y" : "N",
+ "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
+ wl->status.map.rf_off, wl->status.map.lps,
wl->status.map.scan ? "Y" : "N",
wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
@@ -4908,6 +5801,7 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_ACT_STR(e) case BTC_ACT_ ## e | BTC_ACT_EXT_BIT: return #e
#define CASE_BTC_POLICY_STR(e) \
case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
+#define CASE_BTC_SLOT_STR(e) case CXST_ ## e: return #e
static const char *steps_to_str(u16 step)
{
@@ -4969,9 +5863,16 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(OFF_EQ3);
CASE_BTC_POLICY_STR(OFF_BWB0);
CASE_BTC_POLICY_STR(OFF_BWB1);
+ CASE_BTC_POLICY_STR(OFF_BWB2);
+ CASE_BTC_POLICY_STR(OFF_BWB3);
CASE_BTC_POLICY_STR(OFFB_BWB0);
CASE_BTC_POLICY_STR(OFFE_DEF);
CASE_BTC_POLICY_STR(OFFE_DEF2);
+ CASE_BTC_POLICY_STR(OFFE_2GBWISOB);
+ CASE_BTC_POLICY_STR(OFFE_2GISOB);
+ CASE_BTC_POLICY_STR(OFFE_2GBWMIXB);
+ CASE_BTC_POLICY_STR(OFFE_WL);
+ CASE_BTC_POLICY_STR(OFFE_2GBWMIXB2);
CASE_BTC_POLICY_STR(FIX_TD3030);
CASE_BTC_POLICY_STR(FIX_TD5050);
CASE_BTC_POLICY_STR(FIX_TD2030);
@@ -4982,6 +5883,7 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(FIX_TD2080);
CASE_BTC_POLICY_STR(FIX_TDW1B1);
CASE_BTC_POLICY_STR(FIX_TD4020);
+ CASE_BTC_POLICY_STR(FIX_TD4010ISO);
CASE_BTC_POLICY_STR(PFIX_TD3030);
CASE_BTC_POLICY_STR(PFIX_TD5050);
CASE_BTC_POLICY_STR(PFIX_TD2030);
@@ -4989,13 +5891,13 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(PFIX_TD3070);
CASE_BTC_POLICY_STR(PFIX_TD2080);
CASE_BTC_POLICY_STR(PFIX_TDW1B1);
- CASE_BTC_POLICY_STR(AUTO_TD50200);
- CASE_BTC_POLICY_STR(AUTO_TD60200);
- CASE_BTC_POLICY_STR(AUTO_TD20200);
+ CASE_BTC_POLICY_STR(AUTO_TD50B1);
+ CASE_BTC_POLICY_STR(AUTO_TD60B1);
+ CASE_BTC_POLICY_STR(AUTO_TD20B1);
CASE_BTC_POLICY_STR(AUTO_TDW1B1);
- CASE_BTC_POLICY_STR(PAUTO_TD50200);
- CASE_BTC_POLICY_STR(PAUTO_TD60200);
- CASE_BTC_POLICY_STR(PAUTO_TD20200);
+ CASE_BTC_POLICY_STR(PAUTO_TD50B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD60B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD20B1);
CASE_BTC_POLICY_STR(PAUTO_TDW1B1);
CASE_BTC_POLICY_STR(AUTO2_TD3050);
CASE_BTC_POLICY_STR(AUTO2_TD3070);
@@ -5014,6 +5916,32 @@ static const char *steps_to_str(u16 step)
}
}
+static const char *id_to_slot(u32 id)
+{
+ switch (id) {
+ CASE_BTC_SLOT_STR(OFF);
+ CASE_BTC_SLOT_STR(B2W);
+ CASE_BTC_SLOT_STR(W1);
+ CASE_BTC_SLOT_STR(W2);
+ CASE_BTC_SLOT_STR(W2B);
+ CASE_BTC_SLOT_STR(B1);
+ CASE_BTC_SLOT_STR(B2);
+ CASE_BTC_SLOT_STR(B3);
+ CASE_BTC_SLOT_STR(B4);
+ CASE_BTC_SLOT_STR(LK);
+ CASE_BTC_SLOT_STR(BLK);
+ CASE_BTC_SLOT_STR(E2G);
+ CASE_BTC_SLOT_STR(E5G);
+ CASE_BTC_SLOT_STR(EBT);
+ CASE_BTC_SLOT_STR(ENULL);
+ CASE_BTC_SLOT_STR(WLK);
+ CASE_BTC_SLOT_STR(W1FDD);
+ CASE_BTC_SLOT_STR(B1FDD);
+ default:
+ return "unknown";
+ }
+}
+
static
void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
@@ -5105,21 +6033,31 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
(bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
seq_printf(m,
- " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU\n",
+ " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
"[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time,
- dm->wl_tx_limit.tx_retry, btc->bt_req_len);
+ dm->wl_tx_limit.tx_retry, btc->bt_req_len, bt->scan_rx_low_pri);
}
static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_cysta *pcysta = NULL;
-
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ struct rtw89_btc_fbtc_cysta *pcysta;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1;
+ u32 except_cnt, exception_map;
+
+ if (chip->chip_id == RTL8852A) {
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ except_cnt = le32_to_cpu(pcysta->except_cnt);
+ exception_map = le32_to_cpu(pcysta->exception);
+ } else {
+ pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ except_cnt = le32_to_cpu(pcysta_v1->except_cnt);
+ exception_map = le32_to_cpu(pcysta_v1->except_map);
+ }
- if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 &&
- pcysta->except_cnt == 0 &&
+ if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
!pfwinfo->len_mismch && !pfwinfo->fver_mismch)
return;
@@ -5144,16 +6082,17 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
}
/* cycle statistics exceptions */
- if (pcysta->exception || pcysta->except_cnt) {
+ if (exception_map || except_cnt) {
seq_printf(m,
"exception-type: 0x%x, exception-cnt = %d",
- pcysta->exception, pcysta->except_cnt);
+ exception_map, except_cnt);
}
seq_puts(m, "\n");
}
static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
@@ -5166,7 +6105,10 @@ static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ if (chip->chip_id == RTL8852A)
+ t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ else
+ t = &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma;
seq_printf(m,
" %-15s : ", "[tdma_policy]");
@@ -5369,12 +6311,145 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
}
}
+static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_a2dp_trx_stat *a2dp_trx;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u8 i, cnt = 0, slot_pair, divide_cnt;
+ u16 cycle, c_begin, c_end, store_index;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ seq_printf(m,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le32_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+
+ seq_printf(m, ", %s:%d", id_to_slot(i),
+ le32_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d", le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (le32_to_cpu(pcysta->collision_cnt))
+ seq_printf(m, ", collision:%d", le32_to_cpu(pcysta->collision_cnt));
+
+ if (le32_to_cpu(pcysta->skip_cnt))
+ seq_printf(m, ", skip:%d", le32_to_cpu(pcysta->skip_cnt));
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ seq_printf(m,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+
+ cycle = le16_to_cpu(pcysta->cycles);
+ if (cycle == 0)
+ return;
+
+ /* 1 cycle record 1 wl-slot and 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (cycle <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = cycle - slot_pair + 1;
+
+ c_end = cycle;
+
+ if (a2dp->exist)
+ divide_cnt = 3;
+ else
+ divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ store_index = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1) {
+ seq_printf(m, "\n\r %-15s : ", "[cycle_step]");
+ } else {
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ }
+ if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
+ seq_puts(m, "\n");
+ }
+
+ if (a2dp->exist) {
+ seq_printf(m, "%-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+
+ seq_puts(m, "\n");
+ }
+}
+
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_cynullsta *ns = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_fbtc_cynullsta *ns;
+ struct rtw89_btc_fbtc_cynullsta_v1 *ns_v1;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
@@ -5384,25 +6459,58 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ if (chip->chip_id == RTL8852A) {
+ ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
- seq_printf(m, " %-15s : ", "[null_sta]");
+ seq_printf(m, " %-15s : ", "[null_sta]");
- for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[ok:%d/", le32_to_cpu(ns->result[i][1]));
- seq_printf(m, "fail:%d/", le32_to_cpu(ns->result[i][0]));
- seq_printf(m, "on_time:%d/", le32_to_cpu(ns->result[i][2]));
- seq_printf(m, "retry:%d/", le32_to_cpu(ns->result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->avg_t[i]) / 1000,
- le32_to_cpu(ns->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns->max_t[i]) / 1000,
- le32_to_cpu(ns->max_t[i]) % 1000);
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns->result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns->result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns->result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->avg_t[i]) / 1000,
+ le32_to_cpu(ns->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns->max_t[i]) / 1000,
+ le32_to_cpu(ns->max_t[i]) % 1000);
+ }
+ } else {
+ ns_v1 = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
+
+ seq_printf(m, " %-15s : ", "[null_sta]");
+
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[Tx:%d/",
+ le32_to_cpu(ns_v1->result[i][4]));
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns_v1->result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns_v1->result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns_v1->result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns_v1->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns_v1->avg_t[i]) / 1000,
+ le32_to_cpu(ns_v1->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns_v1->max_t[i]) / 1000,
+ le32_to_cpu(ns_v1->max_t[i]) % 1000);
+ }
}
seq_puts(m, "\n");
}
@@ -5478,6 +6586,7 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
@@ -5486,11 +6595,57 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_error(rtwdev, m);
_show_fbtc_tdma(rtwdev, m);
_show_fbtc_slots(rtwdev, m);
- _show_fbtc_cysta(rtwdev, m);
+
+ if (chip->chip_id == RTL8852A)
+ _show_fbtc_cysta(rtwdev, m);
+ else
+ _show_fbtc_cysta_v1(rtwdev, m);
+
_show_fbtc_nullsta(rtwdev, m);
_show_fbtc_step(rtwdev, m);
}
+static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_ax_gnt *gnt;
+ u32 val, status;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) {
+ rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
+ rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status);
+
+ gnt = &gnt_cfg->band[0];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S0_SW_CTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S0_STA);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S0_SW_CTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S0_STA);
+
+ gnt = &gnt_cfg->band[1];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S1_SW_CTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S1_STA);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S1_SW_CTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S1_STA);
+ } else if (chip->chip_id == RTL8852C) {
+ val = rtw89_read32(rtwdev, R_AX_GNT_SW_CTRL);
+ status = rtw89_read32(rtwdev, R_AX_GNT_VAL_V1);
+
+ gnt = &gnt_cfg->band[0];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S0_SWCTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S0);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S0_SWCTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S0);
+
+ gnt = &gnt_cfg->band[1];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S1_SWCTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S1);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S1_SWCTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S1);
+ } else {
+ return;
+ }
+}
+
static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5502,7 +6657,8 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_mac_ax_gnt gnt[2] = {0};
+ struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
+ struct rtw89_mac_ax_gnt gnt;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
@@ -5519,45 +6675,28 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
/* To avoid I/O if WL LPS or power-off */
if (!wl->status.map.lps && !wl->status.map.rf_off) {
- rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
- if (val & (B_AX_GNT_BT_RFC_S0_SW_VAL |
- B_AX_GNT_BT_BB_S0_SW_VAL))
- gnt[0].gnt_bt = true;
- if (val & (B_AX_GNT_BT_RFC_S0_SW_CTRL |
- B_AX_GNT_BT_BB_S0_SW_CTRL))
- gnt[0].gnt_bt_sw_en = true;
- if (val & (B_AX_GNT_WL_RFC_S0_SW_VAL |
- B_AX_GNT_WL_BB_S0_SW_VAL))
- gnt[0].gnt_wl = true;
- if (val & (B_AX_GNT_WL_RFC_S0_SW_CTRL |
- B_AX_GNT_WL_BB_S0_SW_CTRL))
- gnt[0].gnt_wl_sw_en = true;
-
- if (val & (B_AX_GNT_BT_RFC_S1_SW_VAL |
- B_AX_GNT_BT_BB_S1_SW_VAL))
- gnt[1].gnt_bt = true;
- if (val & (B_AX_GNT_BT_RFC_S1_SW_CTRL |
- B_AX_GNT_BT_BB_S1_SW_CTRL))
- gnt[1].gnt_bt_sw_en = true;
- if (val & (B_AX_GNT_WL_RFC_S1_SW_VAL |
- B_AX_GNT_WL_BB_S1_SW_VAL))
- gnt[1].gnt_wl = true;
- if (val & (B_AX_GNT_WL_RFC_S1_SW_CTRL |
- B_AX_GNT_WL_BB_S1_SW_CTRL))
- gnt[1].gnt_wl_sw_en = true;
+ if (chip->chip_id == RTL8852A)
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ else if (chip->chip_id == RTL8852C)
+ btc->dm.pta_owner = 0;
+ _get_gnt(rtwdev, &gnt_cfg);
+ gnt = gnt_cfg.band[0];
seq_printf(m,
" %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
"[gnt_status]",
- (rtw89_mac_get_ctrl_path(rtwdev) ? "WL" : "BT"),
- (gnt[0].gnt_wl_sw_en ? "SW" : "HW"), gnt[0].gnt_wl,
- (gnt[0].gnt_bt_sw_en ? "SW" : "HW"), gnt[0].gnt_bt);
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ gnt = gnt_cfg.band[1];
seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- (gnt[1].gnt_wl_sw_en ? "SW" : "HW"), gnt[1].gnt_wl,
- (gnt[1].gnt_bt_sw_en ? "SW" : "HW"), gnt[1].gnt_bt);
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
}
-
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -5714,8 +6853,121 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_puts(m, "========== [Statistics] ==========\n");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo_v1;
+
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
+
+ seq_printf(m,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le32_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en),
+ dm->error.val);
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ seq_printf(m,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
+
+ if (le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
+ } else {
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ seq_puts(m, " (WL FW report invalid!!)\n");
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
@@ -5746,5 +6998,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_info(rtwdev, m);
_show_fw_dm_msg(rtwdev, m);
_show_mreg(rtwdev, m);
- _show_summary(rtwdev, m);
+ if (chip->chip_id == RTL8852A)
+ _show_summary(rtwdev, m);
+ else
+ _show_summary_v1(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index c3a722d259d7..ca16afa97ec0 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -162,17 +162,19 @@ void rtw89_coex_act1_work(struct work_struct *work);
void rtw89_coex_bt_devinfo_work(struct work_struct *work);
void rtw89_coex_rfk_chk_work(struct work_struct *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
+void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
+void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
enum rtw89_rf_path_bit paths)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 phy_map;
phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) |
FIELD_PREP(BTC_RFK_PHY_MAP, BIT(phy_idx)) |
- FIELD_PREP(BTC_RFK_BAND_MAP, hal->current_band_type);
+ FIELD_PREP(BTC_RFK_BAND_MAP, chan->band_type);
return phy_map;
}
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a5880a54812e..bc2994865372 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -5,6 +5,7 @@
#include <linux/udp.h>
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "core.h"
#include "efuse.h"
@@ -224,18 +225,22 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
- struct rtw89_channel_params *chan_param)
+void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef)
+{
+ cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0],
+ NL80211_CHAN_NO_HT);
+}
+
+static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
+ struct rtw89_chan *chan)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
- u8 primary_chan_idx = 0;
u32 offset;
u8 band;
- u8 subband;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
@@ -245,15 +250,12 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW89_CHANNEL_WIDTH_20;
- primary_chan_idx = RTW89_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW89_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
- primary_chan_idx = RTW89_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = RTW89_SC_20_LOWER;
center_chan += 2;
}
break;
@@ -262,11 +264,9 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
bandwidth = nl_to_rtw89_bandwidth(width);
if (primary_freq > center_freq) {
offset = (primary_freq - center_freq - 10) / 20;
- primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
center_chan -= 2 + offset * 4;
} else {
offset = (center_freq - primary_freq - 10) / 20;
- primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
center_chan += 2 + offset * 4;
}
break;
@@ -288,110 +288,76 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
break;
}
- switch (band) {
- default:
- case RTW89_BAND_2G:
- switch (center_chan) {
- default:
- case 1 ... 14:
- subband = RTW89_CH_2G;
- break;
- }
- break;
- case RTW89_BAND_5G:
- switch (center_chan) {
- default:
- case 36 ... 64:
- subband = RTW89_CH_5G_BAND_1;
- break;
- case 100 ... 144:
- subband = RTW89_CH_5G_BAND_3;
- break;
- case 149 ... 177:
- subband = RTW89_CH_5G_BAND_4;
- break;
- }
- break;
- case RTW89_BAND_6G:
- switch (center_chan) {
- default:
- case 1 ... 29:
- subband = RTW89_CH_6G_BAND_IDX0;
- break;
- case 33 ... 61:
- subband = RTW89_CH_6G_BAND_IDX1;
- break;
- case 65 ... 93:
- subband = RTW89_CH_6G_BAND_IDX2;
- break;
- case 97 ... 125:
- subband = RTW89_CH_6G_BAND_IDX3;
- break;
- case 129 ... 157:
- subband = RTW89_CH_6G_BAND_IDX4;
- break;
- case 161 ... 189:
- subband = RTW89_CH_6G_BAND_IDX5;
- break;
- case 193 ... 221:
- subband = RTW89_CH_6G_BAND_IDX6;
- break;
- case 225 ... 253:
- subband = RTW89_CH_6G_BAND_IDX7;
- break;
- }
- break;
- }
+ rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth);
+}
- chan_param->center_chan = center_chan;
- chan_param->center_freq = center_freq;
- chan_param->primary_chan = channel->hw_value;
- chan_param->bandwidth = bandwidth;
- chan_param->pri_ch_idx = primary_chan_idx;
- chan_param->band_type = band;
- chan_param->subband_type = subband;
+void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_phy_idx phy_idx;
+ enum rtw89_entity_mode mode;
+ bool entity_active;
+
+ entity_active = rtw89_get_entity_state(rtwdev);
+ if (!entity_active)
+ return;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode))
+ return;
+
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ phy_idx = RTW89_PHY_0;
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ if (chip->ops->set_txpwr)
+ chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
{
- struct ieee80211_hw *hw = rtwdev->hw;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_channel_params ch_param;
+ const struct cfg80211_chan_def *chandef;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_mac_idx mac_idx;
+ enum rtw89_phy_idx phy_idx;
+ struct rtw89_chan chan;
struct rtw89_channel_help_params bak;
- u8 center_chan, bandwidth;
+ enum rtw89_entity_mode mode;
bool band_changed;
+ bool entity_active;
- rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
- if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
+ entity_active = rtw89_get_entity_state(rtwdev);
+
+ mode = rtw89_entity_recalc(rtwdev);
+ if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode))
return;
- center_chan = ch_param.center_chan;
- bandwidth = ch_param.bandwidth;
- band_changed = hal->current_band_type != ch_param.band_type ||
- hal->current_channel == 0;
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ mac_idx = RTW89_MAC_0;
+ phy_idx = RTW89_PHY_0;
+ chandef = rtw89_chandef_get(rtwdev, sub_entity_idx);
+ rtw89_get_channel_params(chandef, &chan);
+ if (WARN(chan.channel == 0, "Invalid channel\n"))
+ return;
- hal->current_band_width = bandwidth;
- hal->current_channel = center_chan;
- hal->current_freq = ch_param.center_freq;
- hal->prev_primary_channel = hal->current_primary_channel;
- hal->prev_band_type = hal->current_band_type;
- hal->current_primary_channel = ch_param.primary_chan;
- hal->current_band_type = ch_param.band_type;
- hal->current_subband = ch_param.subband_type;
+ band_changed = rtw89_assign_entity_chan(rtwdev, sub_entity_idx, &chan);
- rtw89_chip_set_channel_prepare(rtwdev, &bak);
+ rtw89_chip_set_channel_prepare(rtwdev, &bak, &chan, mac_idx, phy_idx);
- chip->ops->set_channel(rtwdev, &ch_param);
+ chip->ops->set_channel(rtwdev, &chan, mac_idx, phy_idx);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
- rtw89_chip_set_channel_done(rtwdev, &bak);
+ rtw89_chip_set_channel_done(rtwdev, &bak, &chan, mac_idx, phy_idx);
- if (band_changed) {
- rtw89_btc_ntfy_switch_band(rtwdev, RTW89_PHY_0, hal->current_band_type);
- rtw89_chip_rfk_band_changed(rtwdev);
+ if (!entity_active || band_changed) {
+ rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan.band_type);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
}
+
+ rtw89_set_entity_state(rtwdev, true);
}
static enum rtw89_core_tx_type
@@ -529,9 +495,15 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = tx_info->control.vif;
- struct rtw89_hal *hal = &rtwdev->hal;
- u16 lowest_rate = hal->current_band_type == RTW89_BAND_2G ?
- RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u16 lowest_rate;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta)
return lowest_rate;
@@ -546,6 +518,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 qsel, ch_dma;
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
@@ -564,9 +537,9 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req);
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
- "tx mgmt frame with rate 0x%x on channel %d (bw %d)\n",
- desc_info->data_rate, rtwdev->hal.current_channel,
- rtwdev->hal.current_band_width);
+ "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
+ desc_info->data_rate, chan->channel, chan->band_type,
+ chan->band_width);
}
static void
@@ -591,15 +564,16 @@ static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc
};
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 om_bandwidth;
if (!chip->dis_2g_40m_ul_ofdma ||
- hal->current_band_type != RTW89_BAND_2G ||
- hal->current_band_width != RTW89_CHANNEL_WIDTH_40)
+ chan->band_type != RTW89_BAND_2G ||
+ chan->band_width != RTW89_CHANNEL_WIDTH_40)
return;
- om_bandwidth = hal->current_band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
- rtw89_bandwidth_to_om[hal->current_band_width] : 0;
+ om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
+ rtw89_bandwidth_to_om[chan->band_width] : 0;
*htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) |
le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) |
@@ -617,6 +591,7 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
enum btc_pkt_type pkt_type)
{
struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct sk_buff *skb = tx_req->skb;
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
@@ -634,6 +609,9 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
return false;
+ if (rtwsta && rtwsta->ra_report.might_fallback_legacy)
+ return false;
+
return true;
}
@@ -713,7 +691,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 tid, tid_indicate;
@@ -736,9 +714,11 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
if (IEEE80211_SKB_CB(skb)->control.hw_key)
rtw89_core_tx_update_sec_key(rtwdev, tx_req);
- if (rate_pattern->enable)
+ if (vif->p2p)
+ desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (rate_pattern->enable)
desc_info->data_retry_lowest_rate = rate_pattern->rate;
- else if (hal->current_band_type == RTW89_BAND_2G)
+ else if (chan->band_type == RTW89_BAND_2G)
desc_info->data_retry_lowest_rate = RTW89_HW_RATE_CCK1;
else
desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
@@ -796,13 +776,16 @@ static void
rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
return;
if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
return;
- if (tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
+ if (chip->chip_id != RTL8852C &&
+ tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
return;
rtw89_mac_notify_wake(rtwdev);
@@ -872,6 +855,7 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_FW,
"ignore h2c due to power is off with firmware state=%d\n",
test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags));
+ dev_kfree_skb(skb);
return 0;
}
@@ -1021,7 +1005,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
}
@@ -1171,9 +1156,14 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ int i;
- if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self)
+ if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self) {
ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
+ }
}
#define VAR_LEN 0xff
@@ -1229,15 +1219,15 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, u8 *addr,
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- s8 *rssi = phy_ppdu->rssi;
+ u8 *rssi = phy_ppdu->rssi;
u8 *buf = phy_ppdu->buf;
phy_ppdu->ie = RTW89_GET_PHY_STS_IE_MAP(buf);
phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf);
- rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf));
- rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf));
- rssi[RF_PATH_C] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_C(buf));
- rssi[RF_PATH_D] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_D(buf));
+ rssi[RF_PATH_A] = RTW89_GET_PHY_STS_RSSI_A(buf);
+ rssi[RF_PATH_B] = RTW89_GET_PHY_STS_RSSI_B(buf);
+ rssi[RF_PATH_C] = RTW89_GET_PHY_STS_RSSI_C(buf);
+ rssi[RF_PATH_D] = RTW89_GET_PHY_STS_RSSI_D(buf);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
@@ -1448,8 +1438,11 @@ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
- u16 chan = rtwdev->hal.prev_primary_channel;
- u8 band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ const struct rtw89_chan_rcd *rcd =
+ rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u16 chan = rcd->prev_primary_channel;
+ u8 band = rcd->prev_band_type == RTW89_BAND_2G ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
if (status->band != NL80211_BAND_2GHZ &&
status->encoding == RX_ENC_LEGACY &&
@@ -1661,19 +1654,20 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_hw *hw = rtwdev->hw;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct cfg80211_chan_def *chandef =
+ rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u16 data_rate;
u8 data_rate_mode;
/* currently using single PHY */
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = chandef->chan->center_freq;
+ rx_status->band = chandef->chan->band;
if (rtwdev->scanning &&
RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
- u8 chan = hal->current_primary_channel;
- u8 band = hal->current_band_type;
+ u8 chan = cur->primary_channel;
+ u8 band = cur->band_type;
enum nl80211_band nl_band;
nl_band = rtw89_hw_to_nl80211_band(band);
@@ -1727,7 +1721,8 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (rtw89_disable_ps_mode || !chip->ps_mode_supported)
+ if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
+ RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED))
@@ -1810,7 +1805,7 @@ void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
{
init_dummy_netdev(&rtwdev->netdev);
netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
- rtwdev->hci.ops->napi_poll, NAPI_POLL_WEIGHT);
+ rtwdev->hci.ops->napi_poll);
}
EXPORT_SYMBOL(rtw89_core_napi_init);
@@ -1907,21 +1902,14 @@ static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
return;
spin_lock_bh(&rtwdev->ba_lock);
- if (!list_empty(&rtwtxq->list)) {
- list_del_init(&rtwtxq->list);
- goto out;
- }
-
- set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ if (!test_and_set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
+ spin_unlock_bh(&rtwdev->ba_lock);
- list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
ieee80211_stop_tx_ba_session(sta, txq->tid);
cancel_delayed_work(&rtwdev->forbid_ba_work);
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
RTW89_FORBID_BA_TIMER);
-
-out:
- spin_unlock_bh(&rtwdev->ba_lock);
}
static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
@@ -1933,6 +1921,9 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = txq->sta;
struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
+ if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ return;
+
if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
return;
@@ -1941,9 +1932,6 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
if (unlikely(!sta))
return;
- if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
- return;
-
if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
return;
@@ -2179,12 +2167,13 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE)
- rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ rtw89_enter_lps(rtwdev, rtwvif);
}
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
@@ -2237,6 +2226,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_chip_rfk_track(rtwdev);
rtw89_phy_ra_update(rtwdev);
rtw89_phy_cfo_track(rtwdev);
+ rtw89_phy_tx_path_div_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -2266,45 +2256,69 @@ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
bitmap_zero(addr, nbits);
}
-int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
{
- struct rtw89_ba_cam_entry *entry;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_ba_cam_entry *entry = NULL, *tmp;
u8 idx;
+ int i;
- idx = rtw89_core_acquire_bit_map(rtwsta->ba_cam_map, RTW89_BA_CAM_NUM);
- if (idx == RTW89_BA_CAM_NUM) {
- /* allocate a static BA CAM to tid=0, so replace the existing
+ lockdep_assert_held(&rtwdev->mutex);
+
+ idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
+ if (idx == chip->bacam_num) {
+ /* allocate a static BA CAM to tid=0/5, so replace the existing
* one if BA CAM is full. Hardware will process the original tid
* automatically.
*/
- if (tid != 0)
+ if (tid != 0 && tid != 5)
return -ENOSPC;
- idx = 0;
+ for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) {
+ tmp = &cam_info->ba_cam_entry[i];
+ if (tmp->tid == 0 || tmp->tid == 5)
+ continue;
+
+ idx = i;
+ entry = tmp;
+ list_del(&entry->list);
+ break;
+ }
+
+ if (!entry)
+ return -ENOSPC;
+ } else {
+ entry = &cam_info->ba_cam_entry[idx];
}
- entry = &rtwsta->ba_cam_entry[idx];
entry->tid = tid;
+ list_add_tail(&entry->list, &rtwsta->ba_cam_list);
+
*cam_idx = idx;
return 0;
}
-int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
{
- struct rtw89_ba_cam_entry *entry;
- int i;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_ba_cam_entry *entry = NULL, *tmp;
+ u8 idx;
- for (i = 0; i < RTW89_BA_CAM_NUM; i++) {
- if (!test_bit(i, rtwsta->ba_cam_map))
- continue;
+ lockdep_assert_held(&rtwdev->mutex);
- entry = &rtwsta->ba_cam_entry[i];
+ list_for_each_entry_safe(entry, tmp, &rtwsta->ba_cam_list, list) {
if (entry->tid != tid)
continue;
- rtw89_core_release_bit_map(rtwsta->ba_cam_map, i);
- *cam_idx = i;
+ idx = entry - cam_info->ba_cam_entry;
+ list_del(&entry->list);
+
+ rtw89_core_release_bit_map(cam_info->ba_cam_map, idx);
+ *cam_idx = idx;
return 0;
}
@@ -2320,9 +2334,19 @@ void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
+ else
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_STATION;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
+ else
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_AP;
+ break;
RTW89_TYPE_MAPPING(ADHOC);
- RTW89_TYPE_MAPPING(STATION);
- RTW89_TYPE_MAPPING(AP);
RTW89_TYPE_MAPPING(MONITOR);
RTW89_TYPE_MAPPING(MESH_POINT);
default:
@@ -2365,13 +2389,17 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int i;
+ rtwsta->rtwdev = rtwdev;
rtwsta->rtwvif = rtwvif;
rtwsta->prev_rssi = 0;
+ INIT_LIST_HEAD(&rtwsta->ba_cam_list);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw89_core_txq_init(rtwdev, sta->txq[i]);
ewma_rssi_init(&rtwsta->avg_rssi);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_rssi_init(&rtwsta->rssi[i]);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
@@ -2541,6 +2569,60 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
return 0;
}
+static void _rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_cfg *tid_conf)
+{
+ struct ieee80211_txq *txq;
+ struct rtw89_txq *rtwtxq;
+ u32 mask = tid_conf->mask;
+ u8 tids = tid_conf->tids;
+ int tids_nbit = BITS_PER_BYTE;
+ int i;
+
+ for (i = 0; i < tids_nbit; i++, tids >>= 1) {
+ if (!tids)
+ break;
+
+ if (!(tids & BIT(0)))
+ continue;
+
+ txq = sta->txq[i];
+ rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+
+ if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (tid_conf->ampdu == NL80211_TID_CONFIG_ENABLE) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ } else {
+ if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags))
+ ieee80211_stop_tx_ba_session(sta, txq->tid);
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_del_init(&rtwtxq->list);
+ set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ spin_unlock_bh(&rtwdev->ba_lock);
+ }
+ }
+
+ if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL) && tids == 0xff) {
+ if (tid_conf->amsdu == NL80211_TID_CONFIG_ENABLE)
+ sta->max_amsdu_subframes = 0;
+ else
+ sta->max_amsdu_subframes = 1;
+ }
+ }
+}
+
+void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ int i;
+
+ for (i = 0; i < tid_config->n_tid_conf; i++)
+ _rtw89_core_set_tid_config(rtwdev, sta,
+ &tid_config->tid_conf[i]);
+}
+
static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
@@ -2669,8 +2751,7 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
he_cap->has_he = true;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
if (i == NL80211_IFTYPE_STATION)
mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
@@ -2706,6 +2787,8 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bw160)
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
phy_cap_info[5] = no_ng16 ? 0 :
IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
@@ -2866,7 +2949,9 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
/* efuse process */
/* pre-config BB/RF, BB reset/RFC reset */
- rtw89_chip_disable_bb_rf(rtwdev);
+ ret = rtw89_chip_disable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
ret = rtw89_chip_enable_bb_rf(rtwdev);
if (ret)
return ret;
@@ -2894,6 +2979,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable);
+ rtw89_fw_h2c_init_ba_cam(rtwdev);
return 0;
}
@@ -2987,6 +3073,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
return ret;
}
rtw89_ser_init(rtwdev);
+ rtw89_entity_init(rtwdev);
return 0;
}
@@ -3007,7 +3094,7 @@ EXPORT_SYMBOL(rtw89_core_deinit);
void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
const u8 *mac_addr, bool hw_scan)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
@@ -3015,7 +3102,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
rtw89_leave_ips(rtwdev);
ether_addr_copy(rtwvif->mac_addr, mac_addr);
- rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
+ rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
rtw89_chip_rfk_scan(rtwdev, true);
rtw89_hci_recalc_int_mit(rtwdev);
@@ -3141,6 +3228,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->vif_data_size = sizeof(struct rtw89_vif);
hw->sta_data_size = sizeof(struct rtw89_sta);
hw->txq_data_size = sizeof(struct rtw89_txq);
+ hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg);
SET_IEEE80211_PERM_ADDR(hw, efuse->addr);
@@ -3148,6 +3236,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->queues = IEEE80211_NUM_ACS;
hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
+ hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -3164,17 +3253,26 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
- WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
+ WIPHY_FLAG_AP_UAPSD;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+ hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
+ hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
+
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
ret = rtw89_core_set_supported_band(rtwdev);
@@ -3234,6 +3332,63 @@ void rtw89_core_unregister(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_unregister);
+struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ u32 bus_data_size,
+ const struct rtw89_chip_info *chip)
+{
+ struct ieee80211_hw *hw;
+ struct rtw89_dev *rtwdev;
+ struct ieee80211_ops *ops;
+ u32 driver_data_size;
+ u32 early_feat_map = 0;
+ bool no_chanctx;
+
+ rtw89_early_fw_feature_recognize(device, chip, &early_feat_map);
+
+ ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL);
+ if (!ops)
+ goto err;
+
+ no_chanctx = chip->support_chanctx_num == 0 ||
+ !(early_feat_map & BIT(RTW89_FW_FEATURE_SCAN_OFFLOAD));
+
+ if (no_chanctx) {
+ ops->add_chanctx = NULL;
+ ops->remove_chanctx = NULL;
+ ops->change_chanctx = NULL;
+ ops->assign_vif_chanctx = NULL;
+ ops->unassign_vif_chanctx = NULL;
+ }
+
+ driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
+ hw = ieee80211_alloc_hw(driver_data_size, ops);
+ if (!hw)
+ goto err;
+
+ rtwdev = hw->priv;
+ rtwdev->hw = hw;
+ rtwdev->dev = device;
+ rtwdev->ops = ops;
+ rtwdev->chip = chip;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ no_chanctx ? "without" : "with");
+
+ return rtwdev;
+
+err:
+ kfree(ops);
+ return NULL;
+}
+EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw);
+
+void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->ops);
+ ieee80211_free_hw(rtwdev->hw);
+}
+EXPORT_SYMBOL(rtw89_free_ieee80211_hw);
+
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless core module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 7a9d6f5d8a51..db041b32a8c2 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -34,6 +34,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define MAX_RSSI 110
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
+#define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR)
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
@@ -522,7 +523,7 @@ struct rtw89_rx_phy_ppdu {
u8 *buf;
u32 len;
u8 rssi_avg;
- s8 rssi[RF_PATH_MAX];
+ u8 rssi[RF_PATH_MAX];
u8 mac_id;
u8 chan_idx;
u8 ie;
@@ -542,6 +543,12 @@ enum rtw89_phy_idx {
RTW89_PHY_MAX
};
+enum rtw89_sub_entity_idx {
+ RTW89_SUB_ENTITY_0 = 0,
+
+ NUM_OF_RTW89_SUB_ENTITY,
+};
+
enum rtw89_rf_path {
RF_PATH_A = 0,
RF_PATH_B = 1,
@@ -624,14 +631,23 @@ enum rtw89_sc_offset {
RTW89_SC_40_LOWER = 10,
};
-struct rtw89_channel_params {
- u8 center_chan;
- u32 center_freq;
- u8 primary_chan;
- u8 bandwidth;
- u8 pri_ch_idx;
- u8 band_type;
- u8 subband_type;
+struct rtw89_chan {
+ u8 channel;
+ u8 primary_channel;
+ enum rtw89_band band_type;
+ enum rtw89_bandwidth band_width;
+
+ /* The follow-up are derived from the above. We must ensure that it
+ * is assigned correctly in rtw89_chan_create() if new one is added.
+ */
+ u32 freq;
+ enum rtw89_subband subband_type;
+ enum rtw89_sc_offset pri_ch_idx;
+};
+
+struct rtw89_chan_rcd {
+ u8 prev_primary_channel;
+ enum rtw89_band prev_band_type;
};
struct rtw89_channel_help_params {
@@ -793,7 +809,7 @@ struct rtw89_mac_ax_gnt {
u8 gnt_bt;
u8 gnt_wl_sw_en;
u8 gnt_wl;
-};
+} __packed;
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
@@ -848,6 +864,7 @@ enum rtw89_btc_dcnt {
BTC_DCNT_SLOT_NONSYNC,
BTC_DCNT_BTCNT_FREEZE,
BTC_DCNT_WL_SLOT_DRIFT,
+ BTC_DCNT_BT_SLOT_DRIFT,
BTC_DCNT_WL_STA_LAST,
BTC_DCNT_NUM,
};
@@ -920,12 +937,12 @@ struct rtw89_btc_wl_smap {
u32 roaming: 1;
u32 _4way: 1;
u32 rf_off: 1;
- u32 lps: 1;
+ u32 lps: 2;
u32 ips: 1;
u32 init_ok: 1;
u32 traffic_dir : 2;
u32 rf_off_pre: 1;
- u32 lps_pre: 1;
+ u32 lps_pre: 2;
};
enum rtw89_tfc_lv {
@@ -1108,6 +1125,27 @@ struct rtw89_btc_wl_active_role {
u16 rx_rate;
};
+struct rtw89_btc_wl_active_role_v1 {
+ u8 connected: 1;
+ u8 pid: 3;
+ u8 phy: 1;
+ u8 noa: 1;
+ u8 band: 2;
+
+ u8 client_ps: 1;
+ u8 bw: 7;
+
+ u8 role;
+ u8 ch;
+
+ u16 tx_lvl;
+ u16 rx_lvl;
+ u16 tx_rate;
+ u16 rx_rate;
+
+ u32 noa_duration; /* ms */
+};
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1123,6 +1161,12 @@ struct rtw89_btc_wl_role_info_bpos {
u16 nan: 1;
};
+struct rtw89_btc_wl_scc_ctrl {
+ u8 null_role1;
+ u8 null_role2;
+ u8 ebt_null; /* if tx null at EBT slot */
+};
+
union rtw89_btc_wl_role_info_map {
u16 val;
struct rtw89_btc_wl_role_info_bpos role;
@@ -1135,6 +1179,21 @@ struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */
struct rtw89_btc_wl_active_role active_role[RTW89_PORT_NUM];
};
+struct rtw89_btc_wl_role_info_v1 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ union rtw89_btc_wl_role_info_map role_map;
+ struct rtw89_btc_wl_active_role_v1 active_role_v1[RTW89_PORT_NUM];
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+
+ u32 dbcc_en: 1;
+ u32 dbcc_chg: 1;
+ u32 dbcc_2g_phy: 2; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+ u32 link_mode_chg: 1;
+ u32 rsvd: 27;
+};
+
struct rtw89_btc_wl_ver_info {
u32 fw_coex; /* match with which coex_ver */
u32 fw;
@@ -1240,6 +1299,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
struct rtw89_btc_wl_role_info role_info;
+ struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
struct rtw89_btc_rf_para rf_para;
@@ -1248,6 +1308,7 @@ struct rtw89_btc_wl_info {
u8 port_id[RTW89_WIFI_ROLE_MLME_MAX];
u8 rssi_level;
+ bool scbd_change;
u32 scbd;
};
@@ -1333,7 +1394,8 @@ struct rtw89_btc_bt_info {
u32 pag: 1;
u32 run_patch_code: 1;
u32 hi_lna_rx: 1;
- u32 rsvd: 22;
+ u32 scan_rx_low_pri: 1;
+ u32 rsvd: 21;
};
struct rtw89_btc_cx {
@@ -1346,32 +1408,43 @@ struct rtw89_btc_cx {
};
struct rtw89_btc_fbtc_tdma {
- u8 type;
+ u8 type; /* chip_info::fcxtdma_ver */
u8 rxflctrl;
u8 txpause;
u8 wtgle_n;
u8 leak_n;
u8 ext_ctrl;
- u8 rsvd0;
- u8 rsvd1;
+ u8 rxflctrl_role;
+ u8 option_ctrl;
+} __packed;
+
+struct rtw89_btc_fbtc_tdma_v1 {
+ u8 fver; /* chip_info::fcxtdma_ver */
+ u8 rsvd;
+ __le16 rsvd1;
+ struct rtw89_btc_fbtc_tdma tdma;
} __packed;
#define CXMREG_MAX 30
#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/
-#define BTCRPT_VER 1
#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */
-enum rtw89_btc_bt_rfk_counter {
+enum rtw89_btc_bt_sta_counter {
BTC_BCNT_RFK_REQ = 0,
BTC_BCNT_RFK_GO = 1,
BTC_BCNT_RFK_REJECT = 2,
BTC_BCNT_RFK_FAIL = 3,
BTC_BCNT_RFK_TIMEOUT = 4,
- BTC_BCNT_RFK_MAX
+ BTC_BCNT_HI_TX = 5,
+ BTC_BCNT_HI_RX = 6,
+ BTC_BCNT_LO_TX = 7,
+ BTC_BCNT_LO_RX = 8,
+ BTC_BCNT_POLLUTED = 9,
+ BTC_BCNT_STA_MAX
};
struct rtw89_btc_fbtc_rpt_ctrl {
- u16 fver;
+ u16 fver; /* chip_info::fcxbtcrpt_ver */
u16 rpt_cnt; /* tmr counters */
u32 wl_fw_coex_ver; /* match which driver's coex version */
u32 wl_fw_cx_offload;
@@ -1384,11 +1457,56 @@ struct rtw89_btc_fbtc_rpt_ctrl {
u32 mb_a2dp_empty_cnt; /* a2dp empty count */
u32 mb_a2dp_flct_cnt; /* a2dp empty flow control counter */
u32 mb_a2dp_full_cnt; /* a2dp empty full counter */
- u32 bt_rfk_cnt[BTC_BCNT_RFK_MAX];
+ u32 bt_rfk_cnt[BTC_BCNT_HI_TX];
u32 c2h_cnt; /* fw send c2h counter */
u32 h2c_cnt; /* fw recv h2c counter */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info {
+ __le32 cnt; /* fw report counter */
+ __le32 en; /* report map */
+ __le32 para; /* not used */
+
+ __le32 cnt_c2h; /* fw send c2h counter */
+ __le32 cnt_h2c; /* fw recv h2c counter */
+ __le32 len_c2h; /* The total length of the last C2H */
+
+ __le32 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le32 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 cx_offload;
+ __le32 fw_ver;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty {
+ __le32 cnt_empty; /* a2dp empty count */
+ __le32 cnt_flowctrl; /* a2dp empty flow control counter */
+ __le32 cnt_tx;
+ __le32 cnt_ack;
+ __le32 cnt_nack;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox {
+ __le32 cnt_send_ok; /* fw send mailbox ok counter */
+ __le32 cnt_send_fail; /* fw send mailbox fail counter */
+ __le32 cnt_recv; /* fw recv mailbox counter */
+ struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty a2dp;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd1;
+ struct rtw89_btc_fbtc_rpt_ctrl_info rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info wl_fw_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+ __le32 bt_cnt[BTC_BCNT_STA_MAX];
+ struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
+} __packed;
+
enum rtw89_fbtc_ext_ctrl_type {
CXECTL_OFF = 0x0, /* tdma off */
CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */
@@ -1457,10 +1575,9 @@ enum { /* STEP TYPE */
CXSTEP_MAX,
};
-#define FCXGPIODBG_VER 1
#define BTC_DBG_MAX1 32
struct rtw89_btc_fbtc_gpio_dbg {
- u8 fver;
+ u8 fver; /* chip_info::fcxgpiodbg_ver */
u8 rsvd;
u16 rsvd2;
u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
@@ -1468,9 +1585,8 @@ struct rtw89_btc_fbtc_gpio_dbg {
u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */
} __packed;
-#define FCXMREG_VER 1
struct rtw89_btc_fbtc_mreg_val {
- u8 fver;
+ u8 fver; /* chip_info::fcxmreg_ver */
u8 reg_num;
__le16 rsvd;
__le32 mreg_val[CXMREG_MAX];
@@ -1492,16 +1608,14 @@ struct rtw89_btc_fbtc_slot {
__le16 cxtype;
} __packed;
-#define FCXSLOTS_VER 1
struct rtw89_btc_fbtc_slots {
- u8 fver;
+ u8 fver; /* chip_info::fcxslots_ver */
u8 tbl_num;
__le16 rsvd;
__le32 update_map;
struct rtw89_btc_fbtc_slot slot[CXST_MAX];
} __packed;
-#define FCXSTEP_VER 2
struct rtw89_btc_fbtc_step {
u8 type;
u8 val;
@@ -1509,7 +1623,7 @@ struct rtw89_btc_fbtc_step {
} __packed;
struct rtw89_btc_fbtc_steps {
- u8 fver;
+ u8 fver; /* chip_info::fcxstep_ver */
u8 rsvd;
__le16 cnt;
__le16 pos_old;
@@ -1517,9 +1631,16 @@ struct rtw89_btc_fbtc_steps {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-#define FCXCYSTA_VER 2
-struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+struct rtw89_btc_fbtc_steps_v1 {
u8 fver;
+ u8 en;
+ __le16 rsvd;
+ __le32 cnt;
+ struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
+} __packed;
+
+struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+ u8 fver; /* chip_info::fcxcysta_ver */
u8 rsvd;
__le16 cycles; /* total cycle number */
__le16 cycles_a2dp[CXT_FLCTRL_MAX];
@@ -1544,19 +1665,80 @@ struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
__le16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
} __packed;
-#define FCXNULLSTA_VER 1
-struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+struct rtw89_btc_fbtc_fdd_try_info {
+ __le16 cycles[CXT_FLCTRL_MAX];
+ __le16 tavg[CXT_FLCTRL_MAX]; /* avg try BT-Slot-TDD/BT-slot-FDD time */
+ __le16 tmax[CXT_FLCTRL_MAX]; /* max try BT-Slot-TDD/BT-slot-FDD time */
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_time_info {
+ __le16 tavg[CXT_MAX]; /* avg wl/bt cycle time */
+ __le16 tmax[CXT_MAX]; /* max wl/bt cycle time */
+ __le16 tmaxdiff[CXT_MAX]; /* max wl-wl bt-bt cycle diff time */
+} __packed;
+
+struct rtw89_btc_fbtc_a2dp_trx_stat {
+ u8 empty_cnt;
+ u8 retry_cnt;
+ u8 tx_rate;
+ u8 tx_cnt;
+ u8 ack_cnt;
+ u8 nack_cnt;
+ u8 rsvd1;
+ u8 rsvd2;
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_a2dp_empty_info {
+ __le16 cnt; /* a2dp empty cnt */
+ __le16 cnt_timeout; /* a2dp empty timeout cnt*/
+ __le16 tavg; /* avg a2dp empty time */
+ __le16 tmax; /* max a2dp empty time */
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_leak_info {
+ __le32 cnt_rximr; /* the rximr occur at leak slot */
+ __le16 tavg; /* avg leak-slot time */
+ __le16 tmax; /* max leak-slot time */
+} __packed;
+
+struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
u8 fver;
u8 rsvd;
+ __le16 cycles; /* total cycle number */
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_time_info cycle_time;
+ struct rtw89_btc_fbtc_fdd_try_info fdd_try;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_a2dp_trx_stat a2dp_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_leak_info leak_slot;
+ __le32 slot_cnt[CXST_MAX]; /* slot count */
+ __le32 bcn_cnt[CXBCN_MAX];
+ __le32 collision_cnt; /* counter for event/timer occur at the same time */
+ __le32 skip_cnt;
+ __le32 except_cnt;
+ __le32 except_map;
+} __packed;
+
+struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+ u8 fver; /* chip_info::fcxnullsta_ver */
+ u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
__le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
__le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */
} __packed;
-#define FCX_BTVER_VER 1
+struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
+ u8 fver; /* chip_info::fcxnullsta_ver */
+ u8 rsvd;
+ __le16 rsvd2;
+ __le32 max_t[2]; /* max_t for 0:null0/1:null1 */
+ __le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
+ __le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
+} __packed;
+
struct rtw89_btc_fbtc_btver {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtver_ver */
u8 rsvd;
__le16 rsvd2;
__le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
@@ -1564,17 +1746,15 @@ struct rtw89_btc_fbtc_btver {
__le32 feature;
} __packed;
-#define FCX_BTSCAN_VER 1
struct rtw89_btc_fbtc_btscan {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtscan_ver */
u8 rsvd;
__le16 rsvd2;
u8 scan[6];
} __packed;
-#define FCX_BTAFH_VER 1
struct rtw89_btc_fbtc_btafh {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtafh_ver */
u8 rsvd;
__le16 rsvd2;
u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */
@@ -1582,9 +1762,8 @@ struct rtw89_btc_fbtc_btafh {
u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */
} __packed;
-#define FCX_BTDEVINFO_VER 1
struct rtw89_btc_fbtc_btdevinfo {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtdevinfo_ver */
u8 rsvd;
__le16 vendor_id;
__le32 dev_name; /* only 24 bits valid */
@@ -1609,6 +1788,7 @@ struct rtw89_btc_dm {
struct rtw89_btc_rf_trx_para rf_trx_para;
struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
struct rtw89_btc_dm_step dm_step;
+ struct rtw89_btc_wl_scc_ctrl wl_scc;
union rtw89_btc_dm_error_map error;
u32 cnt_dm[BTC_DCNT_NUM];
u32 cnt_notify[BTC_NCNT_NUM];
@@ -1628,7 +1808,9 @@ struct rtw89_btc_dm {
u32 wl_btg_rx: 1;
u32 trx_para_level: 8;
u32 wl_stb_chg: 1;
- u32 rsvd: 3;
+ u32 pta_owner: 1;
+ u32 tdma_instant_excute: 1;
+ u32 rsvd: 1;
u16 slot_dur[CXST_MAX];
@@ -1650,8 +1832,6 @@ struct rtw89_btc_dbg {
u32 rb_val;
};
-#define FCXTDMA_VER 1
-
enum rtw89_btc_btf_fw_event {
BTF_EVNT_RPT = 0,
BTF_EVNT_BT_INFO = 1,
@@ -1704,12 +1884,18 @@ struct rtw89_btc_rpt_cmn_info {
struct rtw89_btc_report_ctrl_state {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw for 52A*/
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+ struct rtw89_btc_fbtc_tdma_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_slots {
@@ -1719,17 +1905,26 @@ struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_fbtc_cysta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_cysta finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_cysta finfo; /* info from fw for 52A*/
+ struct rtw89_btc_fbtc_cysta_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_step {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+ struct rtw89_btc_fbtc_steps_v1 finfo_v1; /* info from fw */
+ };
};
struct rtw89_btc_rpt_fbtc_nullsta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+ struct rtw89_btc_fbtc_cynullsta_v1 finfo_v1; /* info from fw */
+ };
};
struct rtw89_btc_rpt_fbtc_mreg {
@@ -1887,7 +2082,9 @@ struct rtw89_ra_info {
u8 ra_csi_rate_en:1;
u8 fixed_csi_rate_en:1;
u8 cr_tbl_sel:1;
- u8 rsvd2:5;
+ u8 fix_giltf_en:1;
+ u8 fix_giltf:3;
+ u8 rsvd2:1;
u8 csi_mcs_ss_idx;
u8 csi_mode:2;
u8 csi_gi_ltf:3;
@@ -1911,19 +2108,20 @@ struct rtw89_ra_report {
struct rate_info txrate;
u32 bit_rate;
u16 hw_rate;
+ bool might_fallback_legacy;
};
DECLARE_EWMA(rssi, 10, 16);
-#define RTW89_BA_CAM_NUM 2
-
struct rtw89_ba_cam_entry {
+ struct list_head list;
u8 tid;
};
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
+#define RTW89_MAX_BA_CAM_NUM 8
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -1967,18 +2165,21 @@ struct rtw89_sec_cam_entry {
struct rtw89_sta {
u8 mac_id;
bool disassoc;
+ struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
struct rtw89_ra_info ra;
struct rtw89_ra_report ra_report;
int max_agg_wait;
u8 prev_rssi;
struct ewma_rssi avg_rssi;
+ struct ewma_rssi rssi[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
struct rtw89_addr_cam_entry addr_cam; /* AP mode or TDLS peer only */
struct rtw89_bssid_cam_entry bssid_cam; /* TDLS peer only */
+ struct list_head ba_cam_list;
bool use_cfg_mask;
struct cfg80211_bitrate_mask mask;
@@ -1987,9 +2188,6 @@ struct rtw89_sta {
u32 ampdu_max_time:4;
bool cctl_tx_retry_limit;
u32 data_tx_cnt_lmt:6;
-
- DECLARE_BITMAP(ba_cam_map, RTW89_BA_CAM_NUM);
- struct rtw89_ba_cam_entry ba_cam_entry[RTW89_BA_CAM_NUM];
};
struct rtw89_efuse {
@@ -2007,6 +2205,8 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
+#define RTW89_P2P_MAX_NOA_NUM 2
+
struct rtw89_vif {
struct list_head list;
struct rtw89_dev *rtwdev;
@@ -2022,6 +2222,7 @@ struct rtw89_vif {
u8 wmm;
u8 bcn_hit_cond;
u8 hit_rule;
+ u8 last_noa_nr;
bool trigger;
bool lsig_txop;
u8 tgt_ind;
@@ -2091,7 +2292,7 @@ struct rtw89_hci_info {
struct rtw89_chip_ops {
int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
- void (*disable_bb_rf)(struct rtw89_dev *rtwdev);
+ int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -2100,20 +2301,29 @@ struct rtw89_chip_ops {
bool (*write_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void (*set_channel)(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param);
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx);
void (*set_channel_help)(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p);
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
- void (*rfk_band_changed)(struct rtw89_dev *rtwdev);
+ void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
void (*power_trim)(struct rtw89_dev *rtwdev);
- void (*set_txpwr)(struct rtw89_dev *rtwdev);
- void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg);
@@ -2150,6 +2360,8 @@ struct rtw89_chip_ops {
void (*btc_bt_aci_imp)(struct rtw89_dev *rtwdev);
void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
+ void (*btc_set_policy)(struct rtw89_dev *rtwdev, u16 policy_type);
+ void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level);
};
enum rtw89_dma_ch {
@@ -2351,6 +2563,7 @@ struct rtw89_imr_info {
u32 cpu_disp_imr_set;
u32 other_disp_imr_clr;
u32 other_disp_imr_set;
+ u32 bbrpt_com_err_imr_reg;
u32 bbrpt_chinfo_err_imr_reg;
u32 bbrpt_err_imr_set;
u32 bbrpt_dfs_err_imr_reg;
@@ -2373,17 +2586,40 @@ struct rtw89_imr_info {
u32 tmac_imr_set;
};
+struct rtw89_rrsr_cfgs {
+ struct rtw89_reg3_def ref_rate;
+ struct rtw89_reg3_def rsc;
+};
+
+struct rtw89_dig_regs {
+ u32 seg0_pd_reg;
+ u32 pd_lower_bound_mask;
+ u32 pd_spatial_reuse_en;
+ struct rtw89_reg_def p0_lna_init;
+ struct rtw89_reg_def p1_lna_init;
+ struct rtw89_reg_def p0_tia_init;
+ struct rtw89_reg_def p1_tia_init;
+ struct rtw89_reg_def p0_rxb_init;
+ struct rtw89_reg_def p1_rxb_init;
+ struct rtw89_reg_def p0_p20_pagcugc_en;
+ struct rtw89_reg_def p0_s20_pagcugc_en;
+ struct rtw89_reg_def p1_p20_pagcugc_en;
+ struct rtw89_reg_def p1_s20_pagcugc_en;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
const char *fw_name;
u32 fifo_size;
+ u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
bool dis_2g_40m_ul_ofdma;
u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
+ u8 support_chanctx_num;
u8 support_bands;
bool support_bw160;
bool hw_sec_hdr;
@@ -2393,6 +2629,9 @@ struct rtw89_chip_info {
u8 acam_num;
u8 bcam_num;
u8 scam_num;
+ u8 bacam_num;
+ u8 bacam_dynamic_num;
+ bool bacam_v1;
u8 sec_ctrl_efuse_size;
u32 physical_efuse_size;
@@ -2411,6 +2650,7 @@ struct rtw89_chip_info {
const struct rtw89_phy_table *nctl_table;
const struct rtw89_txpwr_table *byr_table;
const struct rtw89_phy_dig_gain_table *dig_table;
+ const struct rtw89_dig_regs *dig_regs;
const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table;
const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
@@ -2436,6 +2676,20 @@ struct rtw89_chip_info {
u8 btcx_desired;
u8 scbd;
u8 mailbox;
+ u16 btc_fwinfo_buf;
+
+ u8 fcxbtcrpt_ver;
+ u8 fcxtdma_ver;
+ u8 fcxslots_ver;
+ u8 fcxcysta_ver;
+ u8 fcxstep_ver;
+ u8 fcxnullsta_ver;
+ u8 fcxmreg_ver;
+ u8 fcxgpiodbg_ver;
+ u8 fcxbtver_ver;
+ u8 fcxbtscan_ver;
+ u8 fcxbtafh_ver;
+ u8 fcxbtdevinfo_ver;
u8 afh_guard_ch;
const u8 *wl_rssi_thres;
@@ -2463,6 +2717,8 @@ struct rtw89_chip_info {
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
const struct rtw89_imr_info *imr_info;
+ const struct rtw89_rrsr_cfgs *rrsr_cfgs;
+ u32 dma_ch_mask;
};
union rtw89_bus_info {
@@ -2514,6 +2770,8 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD,
RTW89_FW_FEATURE_TX_WAKE,
RTW89_FW_FEATURE_CRASH_TRIGGER,
+ RTW89_FW_FEATURE_PACKET_DROP,
+ RTW89_FW_FEATURE_NO_DEEP_PS,
};
struct rtw89_fw_suit {
@@ -2536,6 +2794,18 @@ struct rtw89_fw_suit {
#define RTW89_FW_SUIT_VER_CODE(s) \
RTW89_FW_VER_CODE((s)->major_ver, (s)->minor_ver, (s)->sub_ver, (s)->sub_idex)
+#define RTW89_MFW_HDR_VER_CODE(mfw_hdr) \
+ RTW89_FW_VER_CODE((mfw_hdr)->ver.major, \
+ (mfw_hdr)->ver.minor, \
+ (mfw_hdr)->ver.sub, \
+ (mfw_hdr)->ver.idx)
+
+#define RTW89_FW_HDR_VER_CODE(fw_hdr) \
+ RTW89_FW_VER_CODE(GET_FW_HDR_MAJOR_VERSION(fw_hdr), \
+ GET_FW_HDR_MINOR_VERSION(fw_hdr), \
+ GET_FW_HDR_SUBVERSION(fw_hdr), \
+ GET_FW_HDR_SUBINDEX(fw_hdr))
+
struct rtw89_fw_info {
const struct firmware *firmware;
struct rtw89_dev *rtwdev;
@@ -2558,6 +2828,8 @@ struct rtw89_cam_info {
DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM);
DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM);
DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+ DECLARE_BITMAP(ba_cam_map, RTW89_MAX_BA_CAM_NUM);
+ struct rtw89_ba_cam_entry ba_cam_entry[RTW89_MAX_BA_CAM_NUM];
};
enum rtw89_sar_sources {
@@ -2599,24 +2871,34 @@ struct rtw89_sar_info {
};
};
+struct rtw89_chanctx_cfg {
+ enum rtw89_sub_entity_idx idx;
+};
+
+enum rtw89_entity_mode {
+ RTW89_ENTITY_MODE_SCC,
+};
+
struct rtw89_hal {
u32 rx_fltr;
u8 cv;
- u8 current_channel;
- u32 current_freq;
- u8 prev_primary_channel;
- u8 current_primary_channel;
- enum rtw89_subband current_subband;
- u8 current_band_width;
- u8 prev_band_type;
- u8 current_band_type;
u32 sw_amsdu_max_size;
u32 antenna_tx;
u32 antenna_rx;
u8 tx_nss;
u8 rx_nss;
+ bool tx_path_diversity;
bool support_cckpd;
bool support_igi;
+
+ DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ struct cfg80211_chan_def chandef[NUM_OF_RTW89_SUB_ENTITY];
+
+ bool entity_active;
+ enum rtw89_entity_mode entity_mode;
+
+ struct rtw89_chan chan[NUM_OF_RTW89_SUB_ENTITY];
+ struct rtw89_chan_rcd chan_rcd[NUM_OF_RTW89_SUB_ENTITY];
};
#define RTW89_MAX_MAC_ID_NUM 128
@@ -2632,11 +2914,37 @@ enum rtw89_flags {
RTW89_FLAG_LEISURE_PS,
RTW89_FLAG_LOW_POWER_MODE,
RTW89_FLAG_INACTIVE_PS,
- RTW89_FLAG_RESTART_TRIGGER,
+ RTW89_FLAG_CRASH_SIMULATING,
NUM_OF_RTW89_FLAGS,
};
+enum rtw89_pkt_drop_sel {
+ RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_ALL,
+ RTW89_PKT_DROP_SEL_MG0_ONCE,
+ RTW89_PKT_DROP_SEL_HIQ_ONCE,
+ RTW89_PKT_DROP_SEL_HIQ_PORT,
+ RTW89_PKT_DROP_SEL_HIQ_MBSSID,
+ RTW89_PKT_DROP_SEL_BAND,
+ RTW89_PKT_DROP_SEL_BAND_ONCE,
+ RTW89_PKT_DROP_SEL_REL_MACID,
+ RTW89_PKT_DROP_SEL_REL_HIQ_PORT,
+ RTW89_PKT_DROP_SEL_REL_HIQ_MBSSID,
+};
+
+struct rtw89_pkt_drop_params {
+ enum rtw89_pkt_drop_sel sel;
+ enum rtw89_mac_idx mac_band;
+ u8 macid;
+ u8 port;
+ u8 mbssid;
+ bool tf_trs;
+};
+
struct rtw89_pkt_stat {
u16 beacon_nr;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
@@ -3073,6 +3381,7 @@ struct rtw89_hw_scan_info {
u8 op_chan;
u8 op_bw;
u8 op_band;
+ u32 last_chan_idx;
};
enum rtw89_phy_bb_gain_band {
@@ -3119,6 +3428,7 @@ struct rtw89_phy_efuse_gain {
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
+ const struct ieee80211_ops *ops;
bool dbcc_en;
struct rtw89_hw_scan_info scan_info;
@@ -3498,6 +3808,16 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw89_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
+static inline struct ieee80211_vif *rtwvif_to_vif_safe(struct rtw89_vif *rtwvif)
+{
+ return rtwvif ? rtwvif_to_vif(rtwvif) : NULL;
+}
+
+static inline struct rtw89_vif *vif_to_rtwvif_safe(struct ieee80211_vif *vif)
+{
+ return vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+}
+
static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta)
{
void *p = rtwsta;
@@ -3542,6 +3862,20 @@ enum nl80211_band rtw89_hw_to_nl80211_band(enum rtw89_band hw_band)
}
static inline
+enum rtw89_band rtw89_nl80211_to_hw_band(enum nl80211_band nl_band)
+{
+ switch (nl_band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ return RTW89_BAND_2G;
+ case NL80211_BAND_5GHZ:
+ return RTW89_BAND_5G;
+ case NL80211_BAND_6GHZ:
+ return RTW89_BAND_6G;
+ }
+}
+
+static inline
enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
{
switch (width) {
@@ -3588,16 +3922,51 @@ struct rtw89_bssid_cam_entry *rtw89_get_bssid_cam_of(struct rtw89_vif *rtwvif,
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtwdev->chip->ops->set_channel_help(rtwdev, true, p);
+ rtwdev->chip->ops->set_channel_help(rtwdev, true, p, chan,
+ mac_idx, phy_idx);
}
static inline
void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtwdev->chip->ops->set_channel_help(rtwdev, false, p, chan,
+ mac_idx, phy_idx);
+}
+
+static inline
+const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
{
- rtwdev->chip->ops->set_channel_help(rtwdev, false, p);
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chandef[idx];
+}
+
+static inline
+const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chan[idx];
+}
+
+static inline
+const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chan_rcd[idx];
}
static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
@@ -3632,12 +4001,13 @@ static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
chip->ops->rfk_channel(rtwdev);
}
-static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev)
+static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx);
}
static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -3661,19 +4031,7 @@ static inline void rtw89_chip_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->set_txpwr_ctrl)
- chip->ops->set_txpwr_ctrl(rtwdev);
-}
-
-static inline void rtw89_chip_set_txpwr(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch = rtwdev->hal.current_channel;
-
- if (!ch)
- return;
-
- if (chip->ops->set_txpwr)
- chip->ops->set_txpwr(rtwdev);
+ chip->ops->set_txpwr_ctrl(rtwdev, RTW89_PHY_0);
}
static inline void rtw89_chip_power_trim(struct rtw89_dev *rtwdev)
@@ -3902,16 +4260,27 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_register(struct rtw89_dev *rtwdev);
void rtw89_core_unregister(struct rtw89_dev *rtwdev);
+struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ u32 bus_data_size,
+ const struct rtw89_chip_info *chip);
+void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
+void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_set_channel(struct rtw89_dev *rtwdev);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
-int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
-int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 829c61da99bb..730e83d54257 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -525,7 +525,8 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev)
{
- u8 band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
switch (regd) {
@@ -2189,6 +2190,37 @@ out:
return count;
}
+static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true);
+ switch (pkt_id) {
+ case 0xffff:
+ return -ETIMEDOUT;
+ case 0xfff:
+ return -ENOMEM;
+ default:
+ break;
+ }
+
+ /* intentionally, enqueue two pkt, but has only one pkt id */
+ ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
+ ctrl_para.start_pktid = pkt_id;
+ ctrl_para.end_pktid = pkt_id;
+ ctrl_para.pkt_num = 1; /* start from 0 */
+ ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
+ ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
+
+ if (rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true))
+ return -EFAULT;
+
+ return 0;
+}
+
static int
rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
{
@@ -2196,10 +2228,15 @@ rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
seq_printf(m, "%d\n",
- test_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags));
+ test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
return 0;
}
+enum rtw89_dbg_crash_simulation_type {
+ RTW89_DBG_SIM_CPU_EXCEPTION = 1,
+ RTW89_DBG_SIM_CTRL_ERROR = 2,
+};
+
static ssize_t
rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
size_t count, loff_t *loff)
@@ -2207,22 +2244,30 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
struct seq_file *m = (struct seq_file *)filp->private_data;
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- bool fw_crash;
+ int (*sim)(struct rtw89_dev *rtwdev);
+ u8 crash_type;
int ret;
- if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
- return -EOPNOTSUPP;
-
- ret = kstrtobool_from_user(user_buf, count, &fw_crash);
+ ret = kstrtou8_from_user(user_buf, count, 0, &crash_type);
if (ret)
return -EINVAL;
- if (!fw_crash)
+ switch (crash_type) {
+ case RTW89_DBG_SIM_CPU_EXCEPTION:
+ if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
+ return -EOPNOTSUPP;
+ sim = rtw89_fw_h2c_trigger_cpu_exception;
+ break;
+ case RTW89_DBG_SIM_CTRL_ERROR:
+ sim = rtw89_dbg_trigger_ctrl_error;
+ break;
+ default:
return -EINVAL;
+ }
mutex_lock(&rtwdev->mutex);
- set_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
- ret = rtw89_fw_h2c_trigger_cpu_exception(rtwdev);
+ set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+ ret = sim(rtwdev);
mutex_unlock(&rtwdev->mutex);
if (ret)
@@ -2289,7 +2334,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
struct rate_info *rate = &rtwsta->ra_report.txrate;
struct ieee80211_rx_status *status = &rtwsta->rx_status;
struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_hal *hal = &rtwdev->hal;
u8 rssi;
+ int i;
seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id);
@@ -2305,9 +2353,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
he_gi_str[rate->he_gi] : "N/A");
else
seq_printf(m, "Legacy %d", rate->legacy);
+ seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : "");
seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate);
seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait,
- sta->max_rc_amsdu_len);
+ sta->deflink.agg.max_rc_amsdu_len);
seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id);
@@ -2333,8 +2382,15 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d)\n",
+ seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
+ rssi = ewma_rssi_read(&rtwsta->rssi[i]);
+ seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
+ hal->tx_path_diversity && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ i + 1 == rtwdev->chip->rf_path_num ? "" : ", ");
+ }
+ seq_puts(m, "]\n");
}
static void
@@ -2433,6 +2489,26 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
}
+static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
+{
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_ba_cam_entry *entry;
+ bool first = true;
+
+ list_for_each_entry(entry, &rtwsta->ba_cam_list, list) {
+ if (first) {
+ seq_puts(m, "\tba_cam ");
+ first = false;
+ } else {
+ seq_puts(m, ", ");
+ }
+ seq_printf(m, "tid[%u]=%d", entry->tid,
+ (int)(entry - rtwdev->cam_info.ba_cam_entry));
+ }
+ seq_puts(m, "\n");
+}
+
static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -2441,6 +2517,7 @@ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
sta->tdls ? "(TDLS)" : "");
rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+ rtw89_dump_ba_cam(m, rtwsta);
}
static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
@@ -2449,6 +2526,8 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ mutex_lock(&rtwdev->mutex);
+
seq_puts(m, "map:\n");
seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
rtwdev->mac_id_map);
@@ -2458,12 +2537,16 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
cam_info->bssid_cam_map);
seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
cam_info->sec_cam_map);
+ seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
+ cam_info->ba_cam_map);
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+ mutex_unlock(&rtwdev->mutex);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 6176152dbf6b..ee243aadde87 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -25,6 +25,7 @@ enum rtw89_debug_mask {
RTW89_DBG_BF = BIT(14),
RTW89_DBG_HW_SCAN = BIT(15),
RTW89_DBG_SAR = BIT(16),
+ RTW89_DBG_STATE = BIT(17),
RTW89_DBG_UNEXP = BIT(31),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 6473015a6b2a..d57e3610fb88 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -224,6 +225,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
};
static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
@@ -247,6 +254,46 @@ static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
}
}
+void rtw89_early_fw_feature_recognize(struct device *device,
+ const struct rtw89_chip_info *chip,
+ u32 *early_feat_map)
+{
+ union {
+ struct rtw89_mfw_hdr mfw_hdr;
+ u8 fw_hdr[RTW89_FW_HDR_SIZE];
+ } buf = {};
+ const struct firmware *firmware;
+ u32 ver_code;
+ int ret;
+ int i;
+
+ ret = request_partial_firmware_into_buf(&firmware, chip->fw_name,
+ device, &buf, sizeof(buf), 0);
+ if (ret) {
+ dev_err(device, "failed to early request firmware: %d\n", ret);
+ return;
+ }
+
+ ver_code = buf.mfw_hdr.sig != RTW89_MFW_SIG ?
+ RTW89_FW_HDR_VER_CODE(&buf.fw_hdr) :
+ RTW89_MFW_HDR_VER_CODE(&buf.mfw_hdr);
+ if (!ver_code)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
+ const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
+
+ if (chip->chip_id != ent->chip_id)
+ continue;
+
+ if (ent->cond(ver_code, ent->ver_code))
+ *early_feat_map |= BIT(ent->feature);
+ }
+
+out:
+ release_firmware(firmware);
+}
+
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
{
int ret;
@@ -571,6 +618,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
if (!skb) {
@@ -587,7 +635,8 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
H2C_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -596,7 +645,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_DCTL_SEC_CAM_LEN 68
@@ -605,6 +654,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
if (!skb) {
@@ -621,7 +671,8 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
H2C_DCTL_SEC_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -630,7 +681,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
@@ -638,14 +689,16 @@ EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
u8 macid = rtwsta->mac_id;
struct sk_buff *skb;
u8 entry_idx;
int ret;
ret = valid ?
- rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) :
- rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx);
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
if (ret) {
/* it still works even if we don't have static BA CAM, because
* hardware can create dynamic BA CAM automatically.
@@ -663,7 +716,10 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
- SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
+ if (chip->bacam_v1)
+ SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ else
+ SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
if (!valid)
goto end;
SET_BA_CAM_VALID(skb->data, valid);
@@ -676,6 +732,11 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
+ if (chip->bacam_v1) {
+ SET_BA_CAM_STD_EN(skb->data, 1);
+ SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ }
+
end:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
@@ -683,7 +744,8 @@ end:
H2C_FUNC_MAC_BA_CAM, 0, 1,
H2C_BA_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -692,7 +754,59 @@ end:
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
+ u8 entry_idx, u8 uid)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BA_CAM_LEN);
+
+ SET_BA_CAM_VALID(skb->data, 1);
+ SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ SET_BA_CAM_UID(skb->data, uid);
+ SET_BA_CAM_BAND(skb->data, 0);
+ SET_BA_CAM_STD_EN(skb->data, 0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM, 0, 1,
+ H2C_BA_CAM_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 entry_idx = chip->bacam_num;
+ u8 uid = 0;
+ int i;
+
+ for (i = 0; i < chip->bacam_dynamic_num; i++) {
+ rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
+ entry_idx++;
+ uid++;
+ }
}
#define H2C_LOG_CFG_LEN 12
@@ -701,6 +815,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
struct sk_buff *skb;
u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
@@ -720,7 +835,8 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
H2C_FUNC_LOG_CFG, 0, 0,
H2C_LOG_CFG_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -729,7 +845,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_GENERAL_PKT_LEN 6
@@ -737,6 +853,7 @@ fail:
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
if (!skb) {
@@ -757,7 +874,8 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
H2C_GENERAL_PKT_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -766,7 +884,7 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LPS_PARM_LEN 8
@@ -774,6 +892,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
if (!skb) {
@@ -799,7 +918,8 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_LPS_PARM, 0, 1,
H2C_LPS_PARM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -808,7 +928,73 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+#define H2C_P2P_ACT_LEN 20
+int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_p2p_noa_desc *desc,
+ u8 act, u8 noa_id)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
+ u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
+ struct sk_buff *skb;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_P2P_ACT_LEN);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
+ RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
+ RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
+ RTW89_SET_FWCMD_P2P_ACT(cmd, act);
+ RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
+ RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
+ if (desc) {
+ RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
+ RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
+ RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
+ RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
+ RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_P2P_ACT, 0, 0,
+ H2C_P2P_ACT_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
+ u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+
+ SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
+ SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
+ SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
}
#define H2C_CMC_TBL_LEN 68
@@ -816,11 +1002,9 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
struct sk_buff *skb;
- u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
- u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
u8 macid = rtwvif->mac_id;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
@@ -832,11 +1016,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
SET_CTRL_INFO_OPERATION(skb->data, 1);
if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
- SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
- SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
- SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
+ __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
SET_CMC_TBL_ANTSEL_A(skb->data, 0);
SET_CMC_TBL_ANTSEL_B(skb->data, 0);
SET_CMC_TBL_ANTSEL_C(skb->data, 0);
@@ -852,7 +1032,8 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -861,7 +1042,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
@@ -926,17 +1107,26 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u16 lowest_rate;
+ int ret;
memset(pads, 0, sizeof(pads));
if (sta)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
@@ -947,10 +1137,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
SET_CTRL_INFO_OPERATION(skb->data, 1);
SET_CMC_TBL_DISRTSFB(skb->data, 1);
SET_CMC_TBL_DISDATAFB(skb->data, 1);
- if (hal->current_band_type == RTW89_BAND_2G)
- SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1);
- else
- SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6);
+ SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -980,7 +1167,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -989,7 +1177,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
@@ -997,6 +1185,7 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
@@ -1020,7 +1209,47 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CMC_TBL_LEN);
+ SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_OPERATION(skb->data, 1);
+
+ __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ H2C_CMC_TBL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1029,19 +1258,28 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_BCN_BASE_LEN 12
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- struct rtw89_hal *hal = &rtwdev->hal;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct sk_buff *skb;
struct sk_buff *skb_beacon;
u16 tim_offset;
int bcn_total_len;
+ u16 beacon_rate;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
NULL, 0);
@@ -1066,8 +1304,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
- SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ?
- RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6);
+ SET_BCN_UPD_RATE(skb->data, beacon_rate);
skb_put_data(skb, skb_beacon->data, skb_beacon->len);
dev_kfree_skb_any(skb_beacon);
@@ -1077,10 +1314,11 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_BCN_UPD, 0, 1,
bcn_total_len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
return 0;
@@ -1095,6 +1333,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role;
+ int ret;
if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
if (rtwsta)
@@ -1121,7 +1360,8 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
H2C_ROLE_MAINTAIN_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1130,7 +1370,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_JOIN_INFO_LEN 4
@@ -1141,6 +1381,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role = rtwvif->self_role;
u8 net_type = rtwvif->net_type;
+ int ret;
if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
self_role = RTW89_SELF_ROLE_AP_CLIENT;
@@ -1172,7 +1413,8 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_MAC_JOININFO, 0, 1,
H2C_JOIN_INFO_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1181,7 +1423,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
@@ -1190,6 +1432,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
struct rtw89_fw_macid_pause_grp h2c = {{0}};
u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
if (!skb) {
@@ -1206,7 +1449,8 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1215,7 +1459,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_EDCA_LEN 12
@@ -1223,6 +1467,7 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u8 ac, u32 val)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
if (!skb) {
@@ -1241,7 +1486,8 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_USR_EDCA, 0, 1,
H2C_EDCA_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1250,7 +1496,47 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+#define H2C_TSF32_TOGL_LEN 4
+int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool en)
+{
+ struct sk_buff *skb;
+ u16 early_us = en ? 2000 : 0;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_TSF32_TOGL_LEN);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
+ RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
+ RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
+ RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_TSF32_TOGL, 0, 0,
+ H2C_TSF32_TOGL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
#define H2C_OFLD_CFG_LEN 8
@@ -1258,6 +1544,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
{
static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
if (!skb) {
@@ -1271,7 +1558,8 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
H2C_FUNC_OFLD_CFG, 0, 1,
H2C_OFLD_CFG_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1280,7 +1568,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_RA_LEN 16
@@ -1288,6 +1576,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
{
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
if (!skb) {
@@ -1318,6 +1607,8 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en);
+ RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf);
if (csi) {
RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
@@ -1336,7 +1627,8 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
H2C_RA_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1345,7 +1637,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_CXDRVHDR 2
@@ -1359,6 +1651,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
struct rtw89_btc_ant_info *ant = &module->ant;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
if (!skb) {
@@ -1395,7 +1688,8 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_INIT);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1404,10 +1698,15 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
+#define PORT_DATA_OFFSET 4
+#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
+ H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
+ H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1416,7 +1715,9 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role *active = role_info->active_role;
struct sk_buff *skb;
+ u8 offset = 0;
u8 *cmd;
+ int ret;
int i;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
@@ -1447,19 +1748,19 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
- RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
- RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
- RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
- RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i);
- RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i);
- RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i);
- RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i);
- RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i);
- RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i);
- RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i);
- RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i);
- RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i);
- RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1467,7 +1768,8 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_ROLE);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1476,16 +1778,101 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
+ struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
+ struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
+ struct sk_buff *skb;
+ u8 *cmd, offset;
+ int ret;
+ int i;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
+
+ RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
+ RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
+ RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
+ RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
+
+ offset = PORT_DATA_OFFSET;
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
+ }
+
+ offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
+ RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
+ RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_ROLE_V1);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
if (!skb) {
@@ -1501,14 +1888,16 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
- RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
+ if (chip->chip_id == RTL8852A)
+ RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_CTRL);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1517,7 +1906,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
@@ -1528,6 +1917,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
if (!skb) {
@@ -1551,7 +1941,8 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_RFK);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1560,7 +1951,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_PKT_OFLD 4
@@ -1568,6 +1959,7 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
{
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
if (!skb) {
@@ -1585,7 +1977,8 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
H2C_FUNC_PACKET_OFLD, 1, 1,
H2C_LEN_PKT_OFLD);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1594,7 +1987,7 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
@@ -1603,6 +1996,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb;
u8 *cmd;
u8 alloc_id;
+ int ret;
alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
RTW89_MAX_PKT_OFLD_NUM);
@@ -1629,7 +2023,8 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
H2C_FUNC_PACKET_OFLD, 1, 1,
H2C_LEN_PKT_OFLD + skb_ofld->len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1638,7 +2033,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_SCAN_LIST_OFFLOAD 4
@@ -1649,6 +2044,7 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
struct sk_buff *skb;
int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
@@ -1693,7 +2089,8 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1702,10 +2099,10 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
-#define H2C_LEN_SCAN_OFFLOAD 20
+#define H2C_LEN_SCAN_OFFLOAD 28
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
struct rtw89_vif *rtwvif)
@@ -1713,6 +2110,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
if (!skb) {
@@ -1736,6 +2134,8 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
scan_info->op_pri_ch);
RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
scan_info->op_chan);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd,
+ scan_info->op_band);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1743,7 +2143,8 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
H2C_LEN_SCAN_OFFLOAD);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1752,7 +2153,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
@@ -1762,6 +2163,7 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
u8 class = info->rf_path == RF_PATH_A ?
H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -1774,7 +2176,8 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
H2C_CAT_OUTSRC, class, page, 0, 0,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1783,14 +2186,16 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
struct rtw89_fw_h2c_rf_get_mccch *mccch;
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
if (!skb) {
@@ -1804,15 +2209,16 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
- mccch->current_channel = cpu_to_le32(rtwdev->hal.current_channel);
- mccch->current_band_type = cpu_to_le32(rtwdev->hal.current_band_type);
+ mccch->current_channel = cpu_to_le32(chan->channel);
+ mccch->current_band_type = cpu_to_le32(chan->band_type);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
sizeof(*mccch));
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1821,7 +2227,7 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
@@ -1830,6 +2236,7 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
bool rack, bool dack)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -1842,7 +2249,8 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1851,12 +2259,13 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
if (!skb) {
@@ -1865,7 +2274,8 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
}
skb_put_data(skb, buf, len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1874,7 +2284,7 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
@@ -2169,7 +2579,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
if (ssid_num) {
ch_info->num_pkt = ssid_num;
- band = ch_info->ch_band;
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
list_for_each_entry(info, &scan_info->pkt_list[band], list) {
ch_info->probe_id = info->id;
@@ -2211,13 +2621,16 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
struct ieee80211_channel *channel;
struct list_head chan_list;
bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
- int list_len = req->n_channels, off_chan_time = 0;
+ int list_len, off_chan_time = 0;
enum rtw89_chan_type type;
- int ret = 0, i;
+ int ret = 0;
+ u32 idx;
INIT_LIST_HEAD(&chan_list);
- for (i = 0; i < req->n_channels; i++) {
- channel = req->channels[i];
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
if (!ch_info) {
ret = -ENOMEM;
@@ -2226,7 +2639,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
ch_info->period = req->duration_mandatory ?
req->duration : RTW89_CHANNEL_TIME;
- ch_info->ch_band = channel->band;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
ch_info->central_ch = channel->hw_value;
ch_info->pri_ch = channel->hw_value;
ch_info->rand_seq_num = random_seq;
@@ -2258,6 +2671,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
list_add_tail(&ch_info->list, &chan_list);
off_chan_time += ch_info->period;
}
+ rtwdev->scan_info.last_chan_idx = idx;
ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
out:
@@ -2289,9 +2703,11 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct cfg80211_scan_request *req = &scan_req->req;
+ u32 rx_fltr = rtwdev->hal.rx_fltr;
u8 mac_addr[ETH_ALEN];
rtwdev->scan_info.scanning_vif = vif;
+ rtwdev->scan_info.last_chan_idx = 0;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
@@ -2303,13 +2719,13 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
ether_addr_copy(mac_addr, vif->addr);
rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
- rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
- rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
- rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+ rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rx_fltr &= ~B_AX_A_BC;
+ rx_fltr &= ~B_AX_A_A1_MATCH;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
B_AX_RX_FLTR_CFG_MASK,
- rtwdev->hal.rx_fltr);
+ rx_fltr);
}
void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
@@ -2323,9 +2739,6 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (!vif)
return;
- rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
- rtwdev->hal.rx_fltr |= B_AX_A_BC;
- rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
B_AX_RX_FLTR_CFG_MASK,
@@ -2339,6 +2752,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
+ rtwdev->scan_info.last_chan_idx = 0;
rtwdev->scan_info.scanning_vif = NULL;
if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
@@ -2377,18 +2791,18 @@ out:
void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_chan new;
if (backup) {
- scan_info->op_pri_ch = hal->current_primary_channel;
- scan_info->op_chan = hal->current_channel;
- scan_info->op_bw = hal->current_band_width;
- scan_info->op_band = hal->current_band_type;
+ scan_info->op_pri_ch = cur->primary_channel;
+ scan_info->op_chan = cur->channel;
+ scan_info->op_bw = cur->band_width;
+ scan_info->op_band = cur->band_type;
} else {
- hal->current_primary_channel = scan_info->op_pri_ch;
- hal->current_channel = scan_info->op_chan;
- hal->current_band_width = scan_info->op_bw;
- hal->current_band_type = scan_info->op_band;
+ rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch,
+ scan_info->op_band, scan_info->op_bw);
+ rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
}
}
@@ -2397,6 +2811,7 @@ void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
if (!skb) {
@@ -2415,7 +2830,62 @@ int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
H2C_FUNC_CPU_EXCEPTION, 0, 0,
H2C_FW_CPU_EXCEPTION_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+#define H2C_PKT_DROP_LEN 24
+int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ const struct rtw89_pkt_drop_params *params)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for packet drop\n");
+ return -ENOMEM;
+ }
+
+ switch (params->sel) {
+ case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "H2C of pkt drop might not fully support sel: %d yet\n",
+ params->sel);
+ break;
+ }
+
+ skb_put(skb, H2C_PKT_DROP_LEN);
+ RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
+ RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
+ RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
+ RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
+ RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
+ RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PKT_DROP, 0, 0,
+ H2C_PKT_DROP_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -2424,5 +2894,5 @@ int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index e75ad22aa85d..0047d5d0e9b1 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -63,21 +63,32 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
};
-struct rtw89_c2h_phy_cap {
- u32 func:7;
- u32 ack:1;
- u32 len:4;
- u32 seq:4;
- u32 rx_nss:8;
- u32 bw:8;
-
- u32 tx_nss:8;
- u32 prot:8;
- u32 nic:8;
- u32 wl_func:8;
-
- u32 hw_type:8;
-} __packed;
+#define RTW89_GET_C2H_PHYCAP_FUNC(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(6, 0))
+#define RTW89_GET_C2H_PHYCAP_ACK(info) \
+ u32_get_bits(*((const u32 *)(info)), BIT(7))
+#define RTW89_GET_C2H_PHYCAP_LEN(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(11, 8))
+#define RTW89_GET_C2H_PHYCAP_SEQ(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(15, 12))
+#define RTW89_GET_C2H_PHYCAP_RX_NSS(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(23, 16))
+#define RTW89_GET_C2H_PHYCAP_BW(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(31, 24))
+#define RTW89_GET_C2H_PHYCAP_TX_NSS(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(7, 0))
+#define RTW89_GET_C2H_PHYCAP_PROT(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(15, 8))
+#define RTW89_GET_C2H_PHYCAP_NIC(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(23, 16))
+#define RTW89_GET_C2H_PHYCAP_WL_FUNC(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(31, 24))
+#define RTW89_GET_C2H_PHYCAP_HW_TYPE(info) \
+ u32_get_bits(*((const u32 *)(info) + 2), GENMASK(7, 0))
+#define RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(info) \
+ u32_get_bits(*((const u32 *)(info) + 3), GENMASK(15, 8))
+#define RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(info) \
+ u32_get_bits(*((const u32 *)(info) + 3), GENMASK(23, 16))
enum rtw89_fw_c2h_category {
RTW89_C2H_CAT_TEST,
@@ -144,6 +155,13 @@ enum rtw89_chan_type {
RTW89_CHAN_DFS,
};
+enum rtw89_p2pps_action {
+ RTW89_P2P_ACT_INIT = 0,
+ RTW89_P2P_ACT_UPDATE = 1,
+ RTW89_P2P_ACT_REMOVE = 2,
+ RTW89_P2P_ACT_TERMINATE = 3,
+};
+
#define FWDL_SECTION_MAX_NUM 10
#define FWDL_SECTION_CHKSUM_LEN 8
#define FWDL_SECTION_PER_PKT_LEN 2020
@@ -177,6 +195,7 @@ struct rtw89_h2creg_sch_tx_en {
u16 rsvd:15;
} __packed;
+#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_DFS_CHAN_TIME 105
#define RTW89_OFF_CHAN_TIME 100
@@ -186,7 +205,10 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_SCANOFLD_MAX_IE_LEN 512
#define RTW89_SCANOFLD_PKT_NONE 0xFF
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
-#define RTW89_MAC_CHINFO_SIZE 20
+#define RTW89_MAC_CHINFO_SIZE 24
+#define RTW89_SCAN_LIST_GUARD 4
+#define RTW89_SCAN_LIST_LIMIT \
+ ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
struct rtw89_mac_chinfo {
u8 period;
@@ -346,6 +368,16 @@ static inline void RTW89_SET_FWCMD_RA_CR_TBL_SEL(void *cmd, u32 val)
le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10));
}
+static inline void RTW89_SET_FWCMD_RA_FIX_GILTF_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(11));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIX_GILTF(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(14, 12));
+}
+
static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16));
@@ -1798,6 +1830,36 @@ static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
}
+static inline void RTW89_SET_FWCMD_PKT_DROP_SEL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_PORT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MBSSID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(15, 8));
+}
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -2006,69 +2068,104 @@ static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NAN(void *cmd, u16 val)
le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, GENMASK(3, 1));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(4));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(5));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, GENMASK(7, 6));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0));
+ u8p_replace_bits((u8 *)cmd + (7 + (12 + offset) * n), val, BIT(0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1));
+ u8p_replace_bits((u8 *)cmd + (7 + (12 + offset) * n), val, GENMASK(7, 1));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0));
+ u8p_replace_bits((u8 *)cmd + (8 + (12 + offset) * n), val, GENMASK(7, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0));
+ u8p_replace_bits((u8 *)cmd + (9 + (12 + offset) * n), val, GENMASK(7, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (10 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (12 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (14 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (16 + (12 + offset) * n)), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(void *cmd, u32 val, int n, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + (20 + (12 + offset) * n)), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_MROLE_NOA(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 4), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_EN(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_CHG(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, GENMASK(3, 2));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(4));
}
static inline void RTW89_SET_FWCMD_CXCTRL_MANUAL(void *cmd, u32 val)
@@ -2352,6 +2449,86 @@ static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(31, 0));
}
+static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_P2PID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_NOAID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 12));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_ACT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(19, 16));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(20));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_ALL_SLEP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(21));
+}
+
+static inline void RTW89_SET_FWCMD_NOA_START_TIME(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 1) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_INTERVAL(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 2) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_DURATION(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 3) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_COUNT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_NOA_CTWINDOW(void *cmd, u32 val)
+{
+ u8 ctwnd;
+
+ if (!(val & IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return;
+ ctwnd = FIELD_GET(IEEE80211_P2P_OPPPS_CTWINDOW_MASK, val);
+ le32p_replace_bits((__le32 *)(cmd) + 4, ctwnd, GENMASK(23, 8));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_PORT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(4, 2));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_EARLY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 16));
+}
+
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -2421,6 +2598,8 @@ static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
+#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
@@ -2446,7 +2625,14 @@ struct rtw89_mfw_info {
struct rtw89_mfw_hdr {
u8 sig; /* RTW89_MFW_SIG */
u8 fw_nr;
- u8 rsvd[14];
+ u8 rsvd0[2];
+ struct {
+ u8 major;
+ u8 minor;
+ u8 sub;
+ u8 idx;
+ } ver;
+ u8 rsvd1[8];
struct rtw89_mfw_info info[];
} __packed;
@@ -2493,6 +2679,7 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
#define H2C_FUNC_MAC_LPS_PARM 0x0
+#define H2C_FUNC_P2P_ACT 0x1
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -2519,9 +2706,11 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_PACKET_OFLD 0x1
#define H2C_FUNC_MAC_MACID_PAUSE 0x8
#define H2C_FUNC_USR_EDCA 0xF
+#define H2C_FUNC_TSF32_TOGL 0x10
#define H2C_FUNC_OFLD_CFG 0x14
#define H2C_FUNC_ADD_SCANOFLD_CH 0x16
#define H2C_FUNC_SCANOFLD 0x17
+#define H2C_FUNC_PKT_DROP 0x1b
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
@@ -2552,7 +2741,7 @@ struct rtw89_fw_h2c_rf_get_mccch {
#define RTW89_FW_RSVD_PLE_SIZE 0x800
-#define RTW89_WCPU_BASE_ADDR 0xA0000000
+#define RTW89_WCPU_BASE_MASK GENMASK(27, 0)
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
#define RTW89_VALID_FW_BACKTRACE_SIZE(_size) \
@@ -2563,6 +2752,9 @@ struct rtw89_fw_h2c_rf_get_mccch {
int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev);
int rtw89_fw_recognize(struct rtw89_dev *rtwdev);
+void rtw89_early_fw_feature_recognize(struct device *device,
+ const struct rtw89_chip_info *chip,
+ u32 *early_feat_map);
int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type);
int rtw89_load_firmware(struct rtw89_dev *rtwdev);
void rtw89_unload_firmware(struct rtw89_dev *rtwdev);
@@ -2577,6 +2769,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
@@ -2600,6 +2794,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
@@ -2623,6 +2818,7 @@ void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
+void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
@@ -2642,5 +2838,20 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ const struct rtw89_pkt_drop_params *params);
+int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_p2p_noa_desc *desc,
+ u8 act, u8 noa_id);
+int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool en);
+
+static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->bacam_v1)
+ rtw89_fw_h2c_init_ba_cam_v1(rtwdev);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 93124b815825..0508dfca8edf 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
@@ -826,6 +827,8 @@ static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 dma_ch_mask = chip->dma_ch_mask;
u8 ch;
u32 ret = 0;
@@ -847,6 +850,8 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ if (dma_ch_mask & BIT(ch))
+ continue;
ret = hfc_ch_ctrl(rtwdev, ch);
if (ret)
return ret;
@@ -862,6 +867,8 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
udelay(10);
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ if (dma_ch_mask & BIT(ch))
+ continue;
ret = hfc_upd_ch_info(rtwdev, ch);
if (ret)
return ret;
@@ -1053,18 +1060,29 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
enum rtw89_rpwm_req_pwr_state state;
unsigned long delay = enter ? 10 : 150;
int ret;
+ int i;
if (enter)
state = rtw89_mac_get_req_pwr_state(rtwdev);
else
state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
- rtw89_mac_send_rpwm(rtwdev, state, false);
- ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
- delay, 15000, false, rtwdev, state);
- if (ret)
- rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
- enter ? "entering" : "leaving");
+ for (i = 0; i < RPWM_TRY_CNT; i++) {
+ rtw89_mac_send_rpwm(rtwdev, state, false);
+ ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret,
+ !ret, delay, 15000, false,
+ rtwdev, state);
+ if (!ret)
+ break;
+
+ if (i == RPWM_TRY_CNT - 1)
+ rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
+ enter ? "entering" : "leaving");
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "%d time firmware failed to ack for %s ps mode\n",
+ i + 1, enter ? "entering" : "leaving");
+ }
}
void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
@@ -1081,7 +1099,6 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_pwr_cfg * const *cfg_seq;
int (*cfg_func)(struct rtw89_dev *rtwdev);
- struct rtw89_hal *hal = &rtwdev->hal;
int ret;
u8 val;
@@ -1113,7 +1130,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
- hal->current_channel = 0;
+ rtw89_set_entity_state(rtwdev, false);
}
return 0;
@@ -1207,8 +1224,8 @@ static int chip_func_en(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id == RTL8852A)
- rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0,
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
B_AX_OCP_L1_MASK);
return 0;
@@ -1239,6 +1256,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
/* DLFW */
.wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
+ /* PCIE 64 */
+ .wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
+ /* DLFW */
+ .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
/* 8852C DLFW */
.wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
/* 8852C PCIE SCC */
@@ -1247,6 +1268,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
+ /* PCIE 64 */
+ .ple_size6 = {RTW89_PLE_PG_128, 496, 16,},
+ /* DLFW */
+ .ple_size8 = {RTW89_PLE_PG_128, 64, 960,},
/* 8852C DLFW */
.ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
/* 8852C PCIE SCC */
@@ -1255,6 +1280,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt0 = {3792, 196, 0, 107,},
/* DLFW */
.wde_qt4 = {0, 0, 0, 0,},
+ /* PCIE 64 */
+ .wde_qt6 = {448, 48, 0, 16,},
/* 8852C DLFW */
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
@@ -1265,6 +1292,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
/* DLFW */
.ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
+ /* PCIE 64 */
+ .ple_qt18 = {147, 0, 16, 20, 17, 13, 89, 0, 32, 14, 8, 0,},
/* DLFW 52C */
.ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
/* DLFW 52C */
@@ -1273,6 +1302,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
/* 8852C PCIE SCC */
.ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
+ /* PCIE 64 */
+ .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
};
EXPORT_SYMBOL(rtw89_mac_size);
@@ -1307,6 +1338,17 @@ static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num);
}
+static u32 dle_expected_used_size(struct rtw89_dev *rtwdev,
+ enum rtw89_qta_mode mode)
+{
+ u32 size = rtwdev->chip->fifo_size;
+
+ if (mode == RTW89_QTA_SCC)
+ size -= rtwdev->chip->dle_scc_rsvd_size;
+
+ return size;
+}
+
static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
{
if (enable)
@@ -1474,7 +1516,8 @@ static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
+ dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
ret = -EINVAL;
goto error;
@@ -1734,7 +1777,7 @@ static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32(rtwdev, reg, val);
ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR),
- 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR);
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, reg);
if (ret) {
rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n");
return ret;
@@ -1747,13 +1790,19 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 ret;
u32 reg;
+ u32 val;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
return ret;
reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
+ SIFS_MACTXEN_T1_V1);
+ else
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
+ SIFS_MACTXEN_T1);
if (rtwdev->chip->chip_id == RTL8852B) {
reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx);
@@ -1764,7 +1813,16 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN);
reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US);
+ if (rtwdev->chip->chip_id == RTL8852C) {
+ val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
+ B_AX_TX_PARTIAL_MODE);
+ if (!val)
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+ } else {
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+ }
return 0;
}
@@ -1910,7 +1968,7 @@ static int nav_ctrl_init(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN |
B_AX_WMAC_TF_UP_NAV_EN |
B_AX_WMAC_NAV_UPPER_EN);
- rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_12MS);
+ rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_25MS);
return 0;
}
@@ -1953,6 +2011,8 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs;
u32 reg, val, sifs;
int ret;
@@ -1983,6 +2043,11 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN);
+ reg = rtw89_mac_reg_by_idx(rrsr->ref_rate.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data);
+ reg = rtw89_mac_reg_by_idx(rrsr->rsc.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data);
+
return 0;
}
@@ -2061,6 +2126,7 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -2075,6 +2141,11 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
rtw89_write32(rtwdev, reg, val);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_RRSR1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN);
+ }
+
return 0;
}
@@ -2134,6 +2205,25 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
+static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg;
+ int ret;
+
+ if (chip_id != RTL8852A && chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXDMA_CTRL_0, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE);
+
+ return 0;
+}
+
static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
int ret;
@@ -2209,6 +2299,12 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = cmac_dma_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret);
+ return ret;
+ }
+
return ret;
}
@@ -2236,23 +2332,42 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_mac_c2h_info c2h_info = {0};
- struct rtw89_c2h_phy_cap *cap =
- (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0];
+ u8 tx_nss;
+ u8 rx_nss;
+ u8 tx_ant;
+ u8 rx_ant;
u32 ret;
ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
if (ret)
return ret;
- hal->tx_nss = cap->tx_nss ?
- min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss;
- hal->rx_nss = cap->rx_nss ?
- min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss;
+ tx_nss = RTW89_GET_C2H_PHYCAP_TX_NSS(c2h_info.c2hreg);
+ rx_nss = RTW89_GET_C2H_PHYCAP_RX_NSS(c2h_info.c2hreg);
+ tx_ant = RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(c2h_info.c2hreg);
+ rx_ant = RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(c2h_info.c2hreg);
+
+ hal->tx_nss = tx_nss ? min_t(u8, tx_nss, chip->tx_nss) : chip->tx_nss;
+ hal->rx_nss = rx_nss ? min_t(u8, rx_nss, chip->rx_nss) : chip->rx_nss;
+
+ if (tx_ant == 1)
+ hal->antenna_tx = RF_B;
+ if (rx_ant == 1)
+ hal->antenna_rx = RF_B;
+
+ if (tx_nss == 1 && tx_ant == 2 && rx_ant == 2) {
+ hal->antenna_tx = RF_B;
+ hal->tx_path_diversity = true;
+ }
rtw89_debug(rtwdev, RTW89_DBG_FW,
"phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n",
- hal->tx_nss, cap->tx_nss, chip->tx_nss,
- hal->rx_nss, cap->rx_nss, chip->rx_nss);
+ hal->tx_nss, tx_nss, chip->tx_nss,
+ hal->rx_nss, rx_nss, chip->rx_nss);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n",
+ tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx);
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity);
return 0;
}
@@ -2429,8 +2544,7 @@ int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
-static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
- bool wd)
+u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd)
{
u32 val, reg;
int ret;
@@ -2450,9 +2564,8 @@ static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val);
}
-static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
- struct rtw89_cpuio_ctrl *ctrl_para,
- bool wd)
+int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd)
{
u32 val, cmd_type, reg;
int ret;
@@ -2517,7 +2630,8 @@ static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
return -EINVAL;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
+ dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
return -EINVAL;
}
@@ -2766,7 +2880,7 @@ static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev)
{
const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
- rtw89_write32_set(rtwdev, R_AX_BBRPT_COM_ERR_IMR,
+ rtw89_write32_set(rtwdev, imr->bbrpt_com_err_imr_reg,
B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN);
rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
B_AX_BBRPT_CHINFO_IMR_CLR);
@@ -3026,6 +3140,8 @@ static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_H2C, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_C2H, 0);
rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
@@ -3103,14 +3219,6 @@ dle:
return ret;
}
-static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
- B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
-}
-
int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
@@ -3124,7 +3232,7 @@ int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_mac_enable_bb_rf);
-void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -3132,6 +3240,8 @@ void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+
+ return 0;
}
EXPORT_SYMBOL(rtw89_mac_disable_bb_rf);
@@ -3147,7 +3257,7 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_mac_hci_func_en(rtwdev);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
ret = rtw89_mac_dmac_pre_init(rtwdev);
if (ret)
@@ -3524,6 +3634,26 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
BCN_ERLY_DEF);
}
+static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u16 val;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ return;
+
+ val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
+ B_AX_TBTT_SHIFT_OFST_SIGN;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_shift,
+ B_AX_TBTT_SHIFT_OFST_MASK, val);
+}
+
int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
@@ -3598,6 +3728,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
@@ -3607,6 +3738,50 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
}
+static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *data)
+{
+ const struct cfg80211_bss_ies *ies;
+ const struct element *elem;
+ bool *tolerated = data;
+
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+ ies->len);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
+ *tolerated = false;
+ rcu_read_unlock();
+}
+
+void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_hw *hw = rtwdev->hw;
+ bool tolerated = true;
+ u32 reg;
+
+ if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ rtw89_mac_check_he_obss_narrow_bw_ru_iter,
+ &tolerated);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ if (tolerated)
+ rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ else
+ rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+}
+
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
@@ -3655,22 +3830,26 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
u32 len)
{
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
- struct rtw89_hal *hal = &rtwdev->hal;
- u8 reason, status, tx_fail, band;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ struct rtw89_chan new;
+ u8 reason, status, tx_fail, band, actual_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx;
u16 chan;
+ int ret;
tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
+ actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d\n",
- band, chan, reason, status, tx_fail);
+ "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ band, chan, reason, status, tx_fail, actual_period);
switch (reason) {
case RTW89_SCAN_LEAVE_CH_NOTIFY:
@@ -3678,15 +3857,20 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
ieee80211_stop_queues(rtwdev->hw);
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
- rtw89_hw_scan_complete(rtwdev, vif, false);
+ if (rtwvif && rtwvif->scan_req &&
+ last_chan < rtwvif->scan_req->n_channels) {
+ ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
+ }
+ } else {
+ rtw89_hw_scan_complete(rtwdev, vif, false);
+ }
break;
case RTW89_SCAN_ENTER_CH_NOTIFY:
- hal->prev_band_type = hal->current_band_type;
- hal->current_band_type = band;
- hal->prev_primary_channel = hal->current_primary_channel;
- hal->current_primary_channel = chan;
- hal->current_channel = chan;
- hal->current_band_width = RTW89_CHANNEL_WIDTH_20;
+ rtw89_chan_create(&new, chan, chan, band, RTW89_CHANNEL_WIDTH_20);
+ rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_store_op_chan(rtwdev, false);
ieee80211_wake_queues(rtwdev->hw);
@@ -3738,6 +3922,12 @@ rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
{
}
+static void
+rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3747,6 +3937,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
[RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
+ [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt,
};
static
@@ -4628,3 +4819,48 @@ int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
return 0;
}
+
+static
+void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
+{
+ static const enum rtw89_pkt_drop_sel sels[] = {
+ RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ };
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_pkt_drop_params params = {0};
+ int i;
+
+ params.mac_band = RTW89_MAC_0;
+ params.macid = rtwsta->mac_id;
+ params.port = rtwvif->port;
+ params.mbssid = 0;
+ params.tf_trs = rtwvif->trigger;
+
+ for (i = 0; i < ARRAY_SIZE(sels); i++) {
+ params.sel = sels[i];
+ rtw89_fw_h2c_pkt_drop(rtwdev, &params);
+ }
+}
+
+static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif *target = data;
+
+ if (rtwvif != target)
+ return;
+
+ rtw89_mac_pkt_drop_sta(rtwdev, rtwsta);
+}
+
+void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_mac_pkt_drop_vif_iter,
+ rtwvif);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index f66619354734..6f4ada1869a1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -6,11 +6,13 @@
#define __RTW89_MAC_H__
#include "core.h"
+#include "reg.h"
#define MAC_MEM_DUMP_PAGE_SIZE 0x40000
#define ADDR_CAM_ENT_SIZE 0x40
#define BSSID_CAM_ENT_SIZE 0x08
#define HFC_PAGE_UNIT 64
+#define RPWM_TRY_CNT 3
enum rtw89_mac_hwmod_sel {
RTW89_DMAC_SEL = 0,
@@ -304,6 +306,7 @@ enum rtw89_mac_c2h_ofld_func {
RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
RTW89_MAC_C2H_FUNC_BCN_RESEND,
RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+ RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT = 0x6,
RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9,
RTW89_MAC_C2H_FUNC_OFLD_MAX,
};
@@ -688,23 +691,30 @@ struct rtw89_mac_size_set {
const struct rtw89_hfc_prec_cfg hfc_preccfg_pcie;
const struct rtw89_dle_size wde_size0;
const struct rtw89_dle_size wde_size4;
+ const struct rtw89_dle_size wde_size6;
+ const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
const struct rtw89_dle_size ple_size0;
const struct rtw89_dle_size ple_size4;
+ const struct rtw89_dle_size ple_size6;
+ const struct rtw89_dle_size ple_size8;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
const struct rtw89_wde_quota wde_qt0;
const struct rtw89_wde_quota wde_qt4;
+ const struct rtw89_wde_quota wde_qt6;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
const struct rtw89_ple_quota ple_qt4;
const struct rtw89_ple_quota ple_qt5;
const struct rtw89_ple_quota ple_qt13;
+ const struct rtw89_ple_quota ple_qt18;
const struct rtw89_ple_quota ple_qt44;
const struct rtw89_ple_quota ple_qt45;
const struct rtw89_ple_quota ple_qt46;
const struct rtw89_ple_quota ple_qt47;
+ const struct rtw89_ple_quota ple_qt58;
};
extern const struct rtw89_mac_size_set rtw89_mac_size;
@@ -798,9 +808,11 @@ int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
-void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev)
{
@@ -809,11 +821,11 @@ static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev)
return chip->ops->enable_bb_rf(rtwdev);
}
-static inline void rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
+static inline int rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- chip->ops->disable_bb_rf(rtwdev);
+ return chip->ops->disable_bb_rf(rtwdev);
}
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
@@ -911,6 +923,45 @@ static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev,
return 0;
}
+static inline void rtw89_mac_ctrl_hci_dma_tx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN);
+}
+
+static inline void rtw89_mac_ctrl_hci_dma_rx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_RXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_RXDMA_EN);
+}
+
+static inline void rtw89_mac_ctrl_hci_dma_trx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+}
+
int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool resume, u32 tx_time);
int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
@@ -944,8 +995,10 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_HIGH_ADDR_MASK GENMASK(2, 0)
XTAL_SI_READ_VAL = 0x7A,
XTAL_SI_WL_RFC_S0 = 0x80,
+#define XTAL_SI_RF00S_EN GENMASK(2, 0)
#define XTAL_SI_RF00 BIT(0)
XTAL_SI_WL_RFC_S1 = 0x81,
+#define XTAL_SI_RF10S_EN GENMASK(2, 0)
#define XTAL_SI_RF10 BIT(0)
XTAL_SI_ANAPAR_WL = 0x90,
#define XTAL_SI_SRAM2RFC BIT(7)
@@ -962,5 +1015,9 @@ enum rtw89_mac_xtal_si_offset {
int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd);
+int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index cef27e781ae2..a296bfa8188f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -12,6 +13,7 @@
#include "reg.h"
#include "sar.h"
#include "ser.h"
+#include "util.h"
static void rtw89_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -85,8 +87,11 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
+ &hw->conf.chandef);
rtw89_set_channel(rtwdev);
+ }
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
(hw->conf.flags & IEEE80211_CONF_IDLE))
@@ -104,6 +109,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
int ret = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n",
+ vif->addr, vif->type, vif->p2p);
+
mutex_lock(&rtwdev->mutex);
rtwvif->rtwdev = rtwdev;
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
@@ -146,6 +154,9 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n",
+ vif->addr, vif->type, vif->p2p);
+
cancel_work_sync(&rtwvif->update_beacon_work);
mutex_lock(&rtwdev->mutex);
@@ -157,6 +168,23 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype type, bool p2p)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+ vif->addr, vif->type, type, vif->p2p, p2p);
+
+ rtw89_ops_remove_interface(hw, vif);
+
+ vif->type = type;
+ vif->p2p = p2p;
+
+ return rtw89_ops_add_interface(hw, vif);
+}
+
static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -235,11 +263,12 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 aifsn)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 slot_time;
u8 sifs;
slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
- sifs = rtwdev->hal.current_band_type == RTW89_BAND_5G ? 16 : 10;
+ sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
return aifsn * slot_time + sifs;
}
@@ -350,6 +379,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, vif);
rtw89_store_op_chan(rtwdev, true);
} else {
/* Abort ongoing scan if cancel_scan isn't issued
@@ -378,6 +408,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MU_GROUPS)
rtw89_mac_bf_set_gid_table(rtwdev, vif, conf);
+ if (changed & BSS_CHANGED_P2P_PS)
+ rtw89_process_p2p_ps(rtwdev, vif);
+
mutex_unlock(&rtwdev->mutex);
}
@@ -605,6 +638,20 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
+static
+void __rtw89_drop_packets(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif;
+
+ if (vif) {
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ } else {
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ }
+}
+
static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -613,7 +660,12 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
rtw89_hci_flush_queues(rtwdev, queues, drop);
- rtw89_mac_flush_txq(rtwdev, queues, drop);
+
+ if (drop && RTW89_CHK_FW_FEATURE(PACKET_DROP, &rtwdev->fw))
+ __rtw89_drop_packets(rtwdev, vif);
+ else
+ rtw89_mac_flush_txq(rtwdev, queues, drop);
+
mutex_unlock(&rtwdev->mutex);
}
@@ -629,7 +681,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
- if (vif != br_data->vif)
+ if (vif != br_data->vif || vif->p2p)
return;
rtwsta->use_cfg_mask = true;
@@ -669,12 +721,13 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
- if (rx_ant != hw->wiphy->available_antennas_rx)
+ if (rx_ant != hw->wiphy->available_antennas_rx && rx_ant != hal->antenna_rx)
return -EINVAL;
mutex_lock(&rtwdev->mutex);
hal->antenna_tx = tx_ant;
hal->antenna_rx = rx_ant;
+ hal->tx_path_diversity = false;
mutex_unlock(&rtwdev->mutex);
return 0;
@@ -772,6 +825,97 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
rtw89_phy_ra_updata_sta(rtwdev, sta, changed);
}
+static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_chanctx_ops_add(rtwdev, ctx);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_remove(rtwdev, ctx);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_change(rtwdev, ctx, changed);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif, ctx);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif, ctx);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_set_tid_config_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct cfg80211_tid_config *tid_config = data;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwvif->rtwdev;
+
+ rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+}
+
+static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ if (sta)
+ rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+ else
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_set_tid_config_iter,
+ tid_config);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -779,6 +923,7 @@ const struct ieee80211_ops rtw89_ops = {
.stop = rtw89_ops_stop,
.config = rtw89_ops_config,
.add_interface = rtw89_ops_add_interface,
+ .change_interface = rtw89_ops_change_interface,
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
.bss_info_changed = rtw89_ops_bss_info_changed,
@@ -800,7 +945,13 @@ const struct ieee80211_ops rtw89_ops = {
.reconfig_complete = rtw89_ops_reconfig_complete,
.hw_scan = rtw89_ops_hw_scan,
.cancel_hw_scan = rtw89_ops_cancel_hw_scan,
+ .add_chanctx = rtw89_ops_add_chanctx,
+ .remove_chanctx = rtw89_ops_remove_chanctx,
+ .change_chanctx = rtw89_ops_change_chanctx,
+ .assign_vif_chanctx = rtw89_ops_assign_vif_chanctx,
+ .unassign_vif_chanctx = rtw89_ops_unassign_vif_chanctx,
.set_sar_specs = rtw89_ops_set_sar_specs,
.sta_rc_update = rtw89_ops_sta_rc_update,
+ .set_tid_config = rtw89_ops_set_tid_config,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index c68fec9eb5a6..5f8e19639362 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -169,6 +169,23 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
+ const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
+
+ if (enable) {
+ rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
+ if (dma_stop2->addr)
+ rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
+ } else {
+ rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
+ if (dma_stop2->addr)
+ rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
+ }
+}
+
static bool
rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
struct sk_buff *new,
@@ -760,7 +777,8 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
enable_intr:
spin_lock_irqsave(&rtwpci->irq_lock, flags);
- rtw89_chip_enable_intr(rtwdev, rtwpci);
+ if (likely(rtwpci->running))
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return IRQ_HANDLED;
}
@@ -925,10 +943,12 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ cnt = min(cnt, wd_ring->curr_num);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
@@ -1073,12 +1093,15 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
bool drop)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
u8 i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
/* It may be unnecessary to flush FWCMD queue. */
if (i == RTW89_TXCH_CH12)
continue;
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
if (txchs & BIT(i))
__pci_flush_txch(rtwdev, i, drop);
@@ -1357,6 +1380,7 @@ static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
@@ -1368,6 +1392,9 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
+
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = &bd_ram_table[i];
@@ -1411,12 +1438,15 @@ static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
int txch;
rtw89_pci_reset_trx_rings(rtwdev);
spin_lock_bh(&rtwpci->trx_lock);
for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
+ if (info->tx_dma_ch_mask & BIT(txch))
+ continue;
if (txch == RTW89_TXCH_CH12) {
rtw89_pci_release_fwcmd(rtwdev, rtwpci,
skb_queue_len(&rtwpci->h2c_queue), true);
@@ -1604,33 +1634,41 @@ static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
writel(data, rtwpci->mmap + addr);
}
-static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
const struct rtw89_pci_info *info = rtwdev->pci_info;
- u32 txhci_en = info->txhci_en_bit;
- u32 rxhci_en = info->rxhci_en_bit;
- if (enable) {
- if (chip_id != RTL8852C)
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
- B_AX_STOP_PCIEIO);
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- txhci_en | rxhci_en);
- if (chip_id == RTL8852C)
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_STOP_AXI_MST);
+ if (enable)
+ rtw89_write32_set(rtwdev, info->init_cfg_reg,
+ info->rxhci_en_bit | info->txhci_en_bit);
+ else
+ rtw89_write32_clr(rtwdev, info->init_cfg_reg,
+ info->rxhci_en_bit | info->txhci_en_bit);
+}
+
+static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg, mask;
+
+ if (chip_id == RTL8852C) {
+ reg = R_AX_HAXI_INIT_CFG1;
+ mask = B_AX_STOP_AXI_MST;
} else {
- if (chip_id != RTL8852C)
- rtw89_write32_set(rtwdev, info->dma_stop1_reg,
- B_AX_STOP_PCIEIO);
- else
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_STOP_AXI_MST);
- if (chip_id == RTL8852C)
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_STOP_AXI_MST);
+ reg = R_AX_PCIE_DMA_STOP1;
+ mask = B_AX_STOP_PCIEIO;
}
+
+ if (enable)
+ rtw89_write32_clr(rtwdev, reg, mask);
+ else
+ rtw89_write32_set(rtwdev, reg, mask);
+}
+
+static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_pci_ctrl_dma_io(rtwdev, enable);
+ rtw89_pci_ctrl_dma_trx(rtwdev, enable);
}
static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
@@ -1836,6 +1874,18 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate
return 0;
}
+static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
+ PCIE_AUTOK_4, PCIE_PHY_GEN1);
+ return ret;
+}
+
static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
{
enum rtw89_pcie_phy phy_rate;
@@ -2049,7 +2099,7 @@ static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id != RTL8852A)
+ if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
return;
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
@@ -2234,19 +2284,19 @@ static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
- u32 dma_busy1 = info->dma_busy1_reg;
+ u32 dma_busy1 = info->dma_busy1.addr;
u32 dma_busy2 = info->dma_busy2_reg;
- check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY |
- B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY |
- B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY |
- B_AX_CH9_BUSY | B_AX_CH12_BUSY;
+ check = info->dma_busy1.mask;
ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
10, 100, false, rtwdev, dma_busy1);
if (ret)
return ret;
+ if (!dma_busy2)
+ return 0;
+
check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
@@ -2414,6 +2464,12 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
rtw89_pci_hci_ldo(rtwdev);
rtw89_pci_dphy_delay(rtwdev);
+ ret = rtw89_pci_autok_x(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
+ return ret;
+ }
+
ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
@@ -2432,7 +2488,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
rtw89_pci_set_dbg(rtwdev);
rtw89_pci_set_keep_reg(rtwdev);
- rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA);
+ rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
/* stop DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, false);
@@ -2455,10 +2511,9 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return ret;
}
- /* enable FW CMD queue to download firmware */
- rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12);
- rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
+ /* disable all channels except to FW CMD channel to download firmware */
+ rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
+ rtw89_write32_clr(rtwdev, info->dma_stop1.addr, B_AX_STOP_CH12);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -2486,15 +2541,15 @@ int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
if (rtw89_pci_ltr_is_err_reg_val(val))
return -EINVAL;
- rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
- rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
+ B_AX_LTR_WD_NOEMP_CHK);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
PCI_LTR_SPC_500US);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
- PCI_LTR_IDLE_TIMER_800US);
+ PCI_LTR_IDLE_TIMER_3_2MS);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
- rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
+ rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
return 0;
@@ -2571,11 +2626,10 @@ static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
}
/* enable DMA for all queues */
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
+ rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
/* Release PCI IO */
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
+ rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
return 0;
@@ -2696,10 +2750,13 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
tx_ring = &rtwpci->tx_rings[i];
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
@@ -2887,6 +2944,7 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
u32 desc_size;
u32 len;
@@ -2894,6 +2952,8 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
int ret;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
tx_ring = &rtwpci->tx_rings[i];
desc_size = sizeof(struct rtw89_pci_tx_bd_32);
len = RTW89_PCI_TXBD_NUM_MAX;
@@ -3219,8 +3279,79 @@ static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
pci_free_irq_vectors(pdev);
}
+static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
+{
+ u16 bin = 0, gray_bit;
+ u32 bit_idx;
+
+ for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
+ gray_bit = (gray_code >> bit_idx) & 0x1;
+ if (bit_num - bit_idx > 1)
+ gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
+ bin |= (gray_bit << bit_idx);
+ }
+
+ return bin;
+}
+
+static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 val16, filter_out_val;
+ u32 val, phy_offset;
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return 0;
+
+ val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
+ if (val == B_AX_ASPM_CTRL_L1)
+ return 0;
+
+ ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
+ if (val == RTW89_PCIE_GEN1_SPEED) {
+ phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ } else if (val == RTW89_PCIE_GEN2_SPEED) {
+ phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
+ val16 | B_PCIE_BIT_PINOUT_DIS);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
+ val16 & ~B_PCIE_BIT_RD_SEL);
+
+ val16 = rtw89_read16_mask(rtwdev,
+ phy_offset + RAC_ANA1F * RAC_MULT,
+ FILTER_OUT_EQ_MASK);
+ val16 = gray_code_to_bin(val16, hweight16(val16));
+ filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
+ RAC_MULT);
+ filter_out_val &= ~REG_FILTER_OUT_MASK;
+ filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
+
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
+ filter_out_val);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
+ B_BAC_EQ_SEL);
+ rtw89_write16_set(rtwdev,
+ R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
+ B_PCIE_BIT_PSAVE);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
+ B_PCIE_BIT_PSAVE);
+
+ return 0;
+}
+
static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
if (rtw89_pci_disable_clkreq)
@@ -3231,19 +3362,33 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (enable)
- ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
- else
- ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
- if (ret)
- rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
- enable ? "set" : "unset", ret);
+ if (chip_id == RTL8852A) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
+ enable ? "set" : "unset", ret);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
+ B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
+ B_AX_CLK_REQ_N);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
+ B_AX_CLK_REQ_N);
+ }
}
static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u8 value = 0;
int ret;
@@ -3262,12 +3407,23 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
- if (enable)
- ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
- else
- ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ } else if (chip_id == RTL8852C) {
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_ASPM_CTRL_L1);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_ASPM_CTRL_L1);
+ }
if (ret)
rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -3328,17 +3484,34 @@ static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (enable)
- ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
- else
- ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
- if (ret)
- rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
- enable ? "set" : "unset", ret);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
+ enable ? "set" : "unset", ret);
+ } else if (chip_id == RTL8852C) {
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
+ RTW89_PCIE_BIT_ASPM_L11 |
+ RTW89_PCIE_BIT_PCI_L11);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
+ if (enable)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_L1SUB_DISABLE);
+ else
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_L1SUB_DISABLE);
+ }
}
static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
@@ -3360,26 +3533,6 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
rtw89_pci_l1ss_set(rtwdev, true);
}
-static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
-{
- const struct rtw89_pci_info *info = rtwdev->pci_info;
- u32 val32;
-
- if (en == MAC_AX_FUNC_EN) {
- val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32);
-
- val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
- } else {
- val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32);
-
- val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
- }
-}
-
static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
{
int ret = 0;
@@ -3399,10 +3552,13 @@ static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
{
- u32 val, dma_rst = 0;
+ u32 val;
int ret;
- rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ rtw89_pci_ctrl_dma_all(rtwdev, false);
ret = rtw89_pci_poll_io_idle(rtwdev);
if (ret) {
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
@@ -3410,12 +3566,10 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
"[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
R_AX_DBG_ERR_FLAG, val);
if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
- dma_rst |= B_AX_HCI_TXDMA_EN;
+ rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
if (val & B_AX_RX_STUCK)
- dma_rst |= B_AX_HCI_RXDMA_EN;
- val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
- rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
- rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
+ rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
ret = rtw89_pci_poll_io_idle(rtwdev);
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
rtw89_debug(rtwdev, RTW89_DBG_HCI,
@@ -3426,18 +3580,7 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
return ret;
}
-static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
-{
- u32 val32;
- if (en == MAC_AX_FUNC_EN) {
- val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
- rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
- } else {
- val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
- rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
- }
-}
static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
{
@@ -3457,15 +3600,18 @@ static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
{
u32 ret;
- rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
- rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
rtw89_pci_clr_idx_all(rtwdev);
ret = rtw89_pci_rst_bdram(rtwdev);
if (ret)
return ret;
- rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
+ rtw89_pci_ctrl_dma_all(rtwdev, true);
return ret;
}
@@ -3535,14 +3681,20 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw89_dev *rtwdev = hw->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ } else {
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
+ }
return 0;
}
@@ -3563,15 +3715,24 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw89_dev *rtwdev = hw->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ } else {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_SEL_REQ_ENTR_L1);
+ }
rtw89_pci_l2_hci_ldo(rtwdev);
+ rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -3614,27 +3775,23 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
const struct rtw89_driver_info *info;
const struct rtw89_pci_info *pci_info;
- int driver_data_size;
int ret;
- driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci);
- hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops);
- if (!hw) {
+ info = (const struct rtw89_driver_info *)id->driver_data;
+
+ rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
+ sizeof(struct rtw89_pci),
+ info->chip);
+ if (!rtwdev) {
dev_err(&pdev->dev, "failed to allocate hw\n");
return -ENOMEM;
}
- info = (const struct rtw89_driver_info *)id->driver_data;
pci_info = info->bus.pci;
- rtwdev = hw->priv;
- rtwdev->hw = hw;
- rtwdev->dev = &pdev->dev;
- rtwdev->chip = info->chip;
rtwdev->pci_info = info->bus.pci;
rtwdev->hci.ops = &rtw89_pci_ops;
rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
@@ -3667,6 +3824,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_clear_resource;
}
+ rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -3696,7 +3854,7 @@ err_declaim_pci:
err_core_deinit:
rtw89_core_deinit(rtwdev);
err_release_hw:
- ieee80211_free_hw(hw);
+ rtw89_free_ieee80211_hw(rtwdev);
return ret;
}
@@ -3715,7 +3873,7 @@ void rtw89_pci_remove(struct pci_dev *pdev)
rtw89_pci_clear_resource(rtwdev, pdev);
rtw89_pci_declaim_device(rtwdev, pdev);
rtw89_core_deinit(rtwdev);
- ieee80211_free_hw(hw);
+ rtw89_free_ieee80211_hw(rtwdev);
}
EXPORT_SYMBOL(rtw89_pci_remove);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index a118647213e3..179740607778 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -11,11 +11,21 @@
#define MDIO_PG1_G1 1
#define MDIO_PG0_G2 2
#define MDIO_PG1_G2 3
+#define RAC_CTRL_PPR 0x00
+#define RAC_ANA0A 0x0A
+#define B_BAC_EQ_SEL BIT(5)
+#define RAC_ANA0C 0x0C
+#define B_PCIE_BIT_PSAVE BIT(15)
#define RAC_ANA10 0x10
+#define B_PCIE_BIT_PINOUT_DIS BIT(3)
#define RAC_REG_REV2 0x1B
#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12)
#define PCIE_DPHY_DLY_25US 0x1
#define RAC_ANA19 0x19
+#define B_PCIE_BIT_RD_SEL BIT(2)
+#define RAC_REG_FLD_0 0x1D
+#define BAC_AUTOK_N_MASK GENMASK(3, 2)
+#define PCIE_AUTOK_4 0x3
#define RAC_ANA1F 0x1F
#define RAC_ANA24 0x24
#define B_AX_DEGLITCH GENMASK(11, 8)
@@ -45,9 +55,26 @@
#define B_AX_SEL_REQ_ENTR_L1 BIT(2)
#define B_AX_SEL_REQ_EXIT_L1 BIT(0)
+#define R_AX_PCIE_MIX_CFG_V1 0x300C
+#define B_AX_ASPM_CTRL_L1 BIT(17)
+#define B_AX_ASPM_CTRL_L0 BIT(16)
+#define B_AX_ASPM_CTRL_MASK GENMASK(17, 16)
+#define B_AX_XFER_PENDING_FW BIT(11)
+#define B_AX_XFER_PENDING BIT(10)
+#define B_AX_REQ_EXIT_L1 BIT(9)
+#define B_AX_REQ_ENTR_L1 BIT(8)
+#define B_AX_L1SUB_DISABLE BIT(0)
+
+#define R_AX_L1_CLK_CTRL 0x3010
+#define B_AX_CLK_REQ_N BIT(1)
+
#define R_AX_PCIE_BG_CLR 0x303C
#define B_AX_BG_CLR_ASYNC_M3 BIT(4)
+#define R_AX_PCIE_LAT_CTRL 0x3044
+#define B_AX_CLK_REQ_SEL_OPT BIT(1)
+#define B_AX_CLK_REQ_SEL BIT(0)
+
#define R_AX_PCIE_IO_RCY_M1 0x3100
#define B_AX_PCIE_IO_RCY_P_M1 BIT(5)
#define B_AX_PCIE_IO_RCY_WDT_P_M1 BIT(4)
@@ -88,7 +115,10 @@
#define B_AX_PCIE_WDT_TIMER_S1_MASK GENMASK(31, 0)
#define R_RAC_DIRECT_OFFSET_G1 0x3800
+#define FILTER_OUT_EQ_MASK GENMASK(14, 10)
#define R_RAC_DIRECT_OFFSET_G2 0x3880
+#define REG_FILTER_OUT_MASK GENMASK(6, 2)
+#define RAC_MULT 2
#define RTW89_PCI_WR_RETRY_CNT 20
@@ -383,6 +413,16 @@
#define B_AX_STOP_RPQ BIT(1)
#define B_AX_STOP_RXQ BIT(0)
#define B_AX_TX_STOP1_ALL GENMASK(18, 8)
+#define B_AX_TX_STOP1_MASK (B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | \
+ B_AX_STOP_ACH2 | B_AX_STOP_ACH3 | \
+ B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | \
+ B_AX_STOP_ACH6 | B_AX_STOP_ACH7 | \
+ B_AX_STOP_CH8 | B_AX_STOP_CH9 | \
+ B_AX_STOP_CH12)
+#define B_AX_TX_STOP1_MASK_V1 (B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | \
+ B_AX_STOP_ACH2 | B_AX_STOP_ACH3 | \
+ B_AX_STOP_CH8 | B_AX_STOP_CH9 | \
+ B_AX_STOP_CH12)
#define R_AX_PCIE_DMA_STOP2 0x1310
#define B_AX_STOP_CH11 BIT(1)
@@ -431,6 +471,13 @@
#define B_AX_ACH0_BUSY BIT(8)
#define B_AX_RPQ_BUSY BIT(1)
#define B_AX_RXQ_BUSY BIT(0)
+#define DMA_BUSY1_CHECK (B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | \
+ B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | \
+ B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | \
+ B_AX_CH9_BUSY | B_AX_CH12_BUSY)
+#define DMA_BUSY1_CHECK_V1 (B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | \
+ B_AX_ACH3_BUSY | B_AX_CH8_BUSY | B_AX_CH9_BUSY | \
+ B_AX_CH12_BUSY)
#define R_AX_PCIE_DMA_BUSY2 0x131C
#define B_AX_CH11_BUSY BIT(1)
@@ -505,6 +552,17 @@
#define RTW89_PCI_MULTITAG 8
/* PCIE CFG register */
+#define RTW89_PCIE_L1_STS_V1 0x80
+#define RTW89_BCFG_LINK_SPEED_MASK GENMASK(19, 16)
+#define RTW89_PCIE_GEN1_SPEED 0x01
+#define RTW89_PCIE_GEN2_SPEED 0x02
+#define RTW89_PCIE_PHY_RATE 0x82
+#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+#define RTW89_PCIE_L1SS_STS_V1 0x0168
+#define RTW89_PCIE_BIT_ASPM_L11 BIT(3)
+#define RTW89_PCIE_BIT_ASPM_L12 BIT(2)
+#define RTW89_PCIE_BIT_PCI_L11 BIT(1)
+#define RTW89_PCIE_BIT_PCI_L12 BIT(0)
#define RTW89_PCIE_ASPM_CTRL 0x070F
#define RTW89_L1DLY_MASK GENMASK(5, 3)
#define RTW89_L0DLY_MASK GENMASK(2, 0)
@@ -516,8 +574,7 @@
#define RTW89_PCIE_CLK_CTRL 0x0725
#define RTW89_PCIE_RST_MSTATE 0x0B48
#define RTW89_PCIE_BIT_CFG_RST_MSTATE BIT(0)
-#define RTW89_PCIE_PHY_RATE 0x82
-#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+
#define INTF_INTGRA_MINREF_V1 90
#define INTF_INTGRA_HOSTREF_V1 100
@@ -527,11 +584,6 @@ enum rtw89_pcie_phy {
PCIE_PHY_GEN1_UNDEFINE = 0x7F,
};
-enum mac_ax_func_sw {
- MAC_AX_FUNC_DIS,
- MAC_AX_FUNC_EN,
-};
-
enum rtw89_pcie_l0sdly {
PCIE_L0SDLY_1US = 0,
PCIE_L0SDLY_2US = 1,
@@ -710,14 +762,15 @@ struct rtw89_pci_info {
u32 max_tag_num_mask;
u32 rxbd_rwptr_clr_reg;
u32 txbd_rwptr_clr2_reg;
- u32 dma_stop1_reg;
- u32 dma_stop2_reg;
- u32 dma_busy1_reg;
+ struct rtw89_reg_def dma_stop1;
+ struct rtw89_reg_def dma_stop2;
+ struct rtw89_reg_def dma_busy1;
u32 dma_busy2_reg;
u32 dma_busy3_reg;
u32 rpwm_addr;
u32 cpwm_addr;
+ u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 1532c0a6bbc4..6a6bdc652e09 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -14,23 +14,14 @@
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
- const struct rate_info *txrate = &report->txrate;
u32 bit_rate = report->bit_rate;
- u8 mcs;
/* lower than ofdm, do not aggregate */
if (bit_rate < 550)
return 1;
- /* prevent hardware rate fallback to G mode rate */
- if (txrate->flags & RATE_INFO_FLAGS_MCS)
- mcs = txrate->mcs & 0x07;
- else if (txrate->flags & (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS))
- mcs = txrate->mcs;
- else
- mcs = 0;
-
- if (mcs <= 2)
+ /* avoid AMSDU for legacy rate */
+ if (report->might_fallback_legacy)
return 1;
/* lower than 20M vht 2ss mcs8, make it small */
@@ -142,8 +133,8 @@ static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
{
- struct rtw89_hal *hal = &rtwdev->hal;
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
enum nl80211_band band;
u64 cfg_mask;
@@ -151,7 +142,7 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
if (!rtwsta->use_cfg_mask)
return -1;
- switch (hal->current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
band = NL80211_BAND_2GHZ;
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
@@ -168,7 +159,7 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
RA_MASK_OFDM_RATES);
break;
default:
- rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
+ rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
return -1;
}
@@ -202,6 +193,40 @@ static const u64
rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
+static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ bool *fix_giltf_en, u8 *fix_giltf)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
+ u8 band = chan->band_type;
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ u8 he_gi = mask->control[nl_band].he_gi;
+ u8 he_ltf = mask->control[nl_band].he_ltf;
+
+ if (!rtwsta->use_cfg_mask)
+ return;
+
+ if (he_ltf == 2 && he_gi == 2) {
+ *fix_giltf = RTW89_GILTF_LGI_4XHE32;
+ } else if (he_ltf == 2 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_SGI_4XHE08;
+ } else if (he_ltf == 1 && he_gi == 1) {
+ *fix_giltf = RTW89_GILTF_2XHE16;
+ } else if (he_ltf == 1 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_2XHE08;
+ } else if (he_ltf == 0 && he_gi == 1) {
+ *fix_giltf = RTW89_GILTF_1XHE16;
+ } else if (he_ltf == 0 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_1XHE08;
+ } else {
+ *fix_giltf_en = false;
+ return;
+ }
+
+ *fix_giltf_en = true;
+}
+
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, bool csi)
{
@@ -209,6 +234,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
struct rtw89_ra_info *ra = &rtwsta->ra;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
u64 ra_mask = 0;
@@ -218,8 +245,10 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
u8 bw_mode = 0;
u8 stbc_en = 0;
u8 ldpc_en = 0;
+ u8 fix_giltf = 0;
u8 i;
bool sgi = false;
+ bool fix_giltf_en = false;
memset(ra, 0, sizeof(*ra));
/* Set the ra mask from sta's capability */
@@ -234,6 +263,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, &fix_giltf_en, &fix_giltf);
} else if (sta->deflink.vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
@@ -260,13 +290,13 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ldpc_en = 1;
}
- switch (rtwdev->hal.current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
- if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] <= 0xf)
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
mode |= RTW89_RA_MODE_CCK;
- else
- mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
+ mode |= RTW89_RA_MODE_OFDM;
break;
case RTW89_BAND_5G:
ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
@@ -329,7 +359,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
ra->dcm_cap = 1;
- if (rate_pattern->enable) {
+ if (rate_pattern->enable && !vif->p2p) {
ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
ra_mask &= rate_pattern->ra_mask;
mode = rate_pattern->ra_mode;
@@ -343,6 +373,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
ra->en_sgi = sgi;
ra->ra_mask = ra_mask;
+ ra->fix_giltf_en = fix_giltf_en;
+ ra->fix_giltf = fix_giltf;
if (!csi)
return;
@@ -416,6 +448,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_supported_band *sband;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern next_pattern = {0};
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u16 hw_rate_he[] = {RTW89_HW_RATE_HE_NSS1_MCS0,
RTW89_HW_RATE_HE_NSS2_MCS0,
RTW89_HW_RATE_HE_NSS3_MCS0,
@@ -428,7 +461,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
RTW89_HW_RATE_MCS8,
RTW89_HW_RATE_MCS16,
RTW89_HW_RATE_MCS24};
- u8 band = rtwdev->hal.current_band_type;
+ u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
u8 tx_nss = rtwdev->hal.tx_nss;
u8 i;
@@ -542,12 +575,12 @@ void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
}
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw)
{
- enum rtw89_bandwidth cbw = param->bandwidth;
- u8 pri_ch = param->primary_chan;
- u8 central_ch = param->center_chan;
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
u8 txsc_idx = 0;
u8 tmp = 0;
@@ -1468,10 +1501,9 @@ EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
(txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \
})
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
s8 *byr;
u8 idx;
@@ -1538,11 +1570,10 @@ static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
}
}
-s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 band = rtwdev->hal.current_band_type;
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt = 0, sar;
@@ -1578,11 +1609,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
-#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
+#define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \
do { \
u8 __i; \
for (__i = 0; __i < RTW89_BF_NUM; __i++) \
ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \
+ band, \
bw, ntx, \
rs, __i, \
(ch)); \
@@ -1590,64 +1622,75 @@ EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch, u8 pri_ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
- __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch, u8 pri_ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
u8 i;
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
+ RTW89_CHANNEL_WIDTH_80,
ntx, RTW89_RS_MCS, ch);
- __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
for (i = 0; i < RTW89_BF_NUM; i++)
@@ -1656,7 +1699,7 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch, u8 pri_ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
@@ -1665,60 +1708,75 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
u8 i;
/* fill ofdm section */
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
/* fill mcs 20m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 14);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 10);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 10);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 14);
/* fill mcs 40m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 12);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 12);
/* fill mcs 80m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
+ RTW89_CHANNEL_WIDTH_80,
ntx, RTW89_RS_MCS, ch - 8);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
+ RTW89_CHANNEL_WIDTH_80,
ntx, RTW89_RS_MCS, ch + 8);
/* fill mcs 160m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, RTW89_CHANNEL_WIDTH_160,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
+ RTW89_CHANNEL_WIDTH_160,
ntx, RTW89_RS_MCS, ch);
/* fill mcs 40m 0p5 section */
- __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
for (i = 0; i < RTW89_BF_NUM; i++)
lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
/* fill mcs 40m 2p5 section */
- __fill_txpwr_limit_nonbf_bf(val_2p5_n, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 8);
- __fill_txpwr_limit_nonbf_bf(val_2p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 8);
for (i = 0; i < RTW89_BF_NUM; i++)
@@ -1726,37 +1784,41 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
}
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
u8 ntx)
{
- u8 pri_ch = rtwdev->hal.current_primary_channel;
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 band = chan->band_type;
+ u8 pri_ch = chan->primary_channel;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
memset(lmt, 0, sizeof(*lmt));
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch, pri_ch);
+ rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch, pri_ch);
+ rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
case RTW89_CHANNEL_WIDTH_160:
- rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, ntx, ch, pri_ch);
+ rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
}
}
EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
-static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 band = rtwdev->hal.current_band_type;
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt_ru = 0, sar;
@@ -1794,85 +1856,106 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
static void
rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch);
}
static void
rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 2);
- lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 2);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 2);
- lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 2);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 2);
- lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 2);
}
static void
rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 6);
- lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 2);
- lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 2);
- lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 6);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 6);
- lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 2);
- lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 2);
- lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 6);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 6);
- lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 2);
- lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 2);
- lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 6);
}
static void
rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
int i;
static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
- lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
RTW89_RU26,
ntx,
ch + ofst[i]);
- lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
RTW89_RU52,
ntx,
ch + ofst[i]);
- lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
RTW89_RU106,
ntx,
ch + ofst[i]);
@@ -1880,26 +1963,32 @@ rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
}
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx)
{
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
memset(lmt_ru, 0, sizeof(*lmt_ru));
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_160:
- rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
}
}
@@ -1920,6 +2009,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
u8 mode, rate, bw, giltf, mac_id;
u16 legacy_bitrate;
bool valid;
+ u8 mcs = 0;
mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data);
if (mac_id != rtwsta->mac_id)
@@ -1936,7 +2026,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
return;
}
- memset(ra_report, 0, sizeof(*ra_report));
+ memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
switch (mode) {
case RTW89_RA_RPT_MODE_LEGACY:
@@ -1952,6 +2042,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.mcs = rate;
if (giltf)
ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ mcs = ra_report->txrate.mcs & 0x07;
break;
case RTW89_RA_RPT_MODE_VHT:
ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
@@ -1959,6 +2050,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1;
if (giltf)
ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ mcs = ra_report->txrate.mcs;
break;
case RTW89_RA_RPT_MODE_HE:
ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -1970,6 +2062,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
else
ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ mcs = ra_report->txrate.mcs;
break;
}
@@ -1977,8 +2070,9 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
- sta->max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
- rtwsta->max_agg_wait = sta->max_rc_amsdu_len / 1500 - 1;
+ ra_report->might_fallback_legacy = mcs <= 2;
+ sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
+ rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
}
static void
@@ -3247,10 +3341,11 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
- switch (rtwdev->hal.current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
dig->lna_gain = dig->lna_gain_g;
dig->tia_gain = dig->tia_gain_g;
@@ -3410,26 +3505,32 @@ static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_LNA_INIT,
- B_PATH0_LNA_INIT_IDX_MSK, lna_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_LNA_INIT,
- B_PATH1_LNA_INIT_IDX_MSK, lna_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
+ dig_regs->p0_lna_init.mask, lna_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
+ dig_regs->p1_lna_init.mask, lna_idx);
}
static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_TIA_INIT,
- B_PATH0_TIA_INIT_IDX_MSK, tia_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_TIA_INIT,
- B_PATH1_TIA_INIT_IDX_MSK, tia_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
+ dig_regs->p0_tia_init.mask, tia_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
+ dig_regs->p1_tia_init.mask, tia_idx);
}
static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_RXB_INIT,
- B_PATH0_RXB_INIT_IDX_MSK, rxb_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_RXB_INIT,
- B_PATH1_RXB_INIT_IDX_MSK, rxb_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
+ dig_regs->p0_rxb_init.mask, rxb_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
+ dig_regs->p1_rxb_init.mask, rxb_idx);
}
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
@@ -3443,21 +3544,19 @@ static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
set.lna_idx, set.tia_idx, set.rxb_idx);
}
-static const struct rtw89_reg_def sdagc_config[4] = {
- {R_PATH0_P20_FOLLOW_BY_PAGCUGC, B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH0_S20_FOLLOW_BY_PAGCUGC, B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH1_P20_FOLLOW_BY_PAGCUGC, B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH1_S20_FOLLOW_BY_PAGCUGC, B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
-};
-
static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
bool enable)
{
- u8 i = 0;
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- for (i = 0; i < ARRAY_SIZE(sdagc_config); i++)
- rtw89_phy_write32_mask(rtwdev, sdagc_config[i].addr,
- sdagc_config[i].mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
+ dig_regs->p0_p20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
+ dig_regs->p0_s20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
+ dig_regs->p1_p20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
+ dig_regs->p1_s20_pagcugc_en.mask, enable);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
}
@@ -3483,7 +3582,9 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
bool enable)
{
- enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ enum rtw89_bandwidth cbw = chan->band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
u8 ofdm_cca_th;
@@ -3525,10 +3626,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
- rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
- pd_val);
- rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
- B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_lower_bound_mask, pd_val);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_spatial_reuse_en, enable);
if (!rtwdev->hal.support_cckpd)
return;
@@ -3604,6 +3705,62 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
}
+static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool *done = data;
+ u8 rssi_a, rssi_b;
+ u32 candidate;
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
+ return;
+
+ if (*done)
+ return;
+
+ *done = true;
+
+ rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
+ rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
+
+ if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_A;
+ else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_B;
+ else
+ return;
+
+ if (hal->antenna_tx == candidate)
+ return;
+
+ hal->antenna_tx = candidate;
+ rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
+
+ if (hal->antenna_tx == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
+ } else if (hal->antenna_tx == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
+ }
+}
+
+void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool done = false;
+
+ if (!hal->tx_path_diversity)
+ return;
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_tx_path_div_sta_iter,
+ &done);
+}
+
static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
{
rtw89_phy_ccx_top_setting_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index e20636f54b55..ee3bc5e111e1 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -56,7 +56,7 @@
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
-#define CFO_BOUND 32
+#define CFO_BOUND 64
#define CFO_TP_UPPER 100
#define CFO_TP_LOWER 50
#define CFO_COMP_PERIOD 250
@@ -439,7 +439,7 @@ rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl);
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
@@ -460,15 +460,17 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc);
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
u8 ntx);
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx);
-s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch);
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
@@ -489,6 +491,7 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val);
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
+void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx mac_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index a90b33720588..bf41a1141679 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -59,8 +59,11 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
-static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
+static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ if (rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ return;
+
if (!rtwdev->ps_mode)
return;
@@ -111,23 +114,23 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, mac_id);
- __rtw89_enter_ps_mode(rtwdev);
+ __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_ps_mode(rtwdev, rtwvif);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_ps_mode(rtwdev);
__rtw89_leave_lps(rtwdev, rtwvif->mac_id);
}
@@ -140,6 +143,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
+ __rtw89_leave_ps_mode(rtwdev);
+
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_leave_lps_vif(rtwdev, rtwvif);
}
@@ -178,3 +183,64 @@ void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl)
if (btc_ctrl)
rtw89_leave_lps(rtwdev);
}
+
+static void rtw89_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ enum rtw89_p2pps_action act)
+{
+ if (act == RTW89_P2P_ACT_UPDATE || act == RTW89_P2P_ACT_REMOVE)
+ return;
+
+ if (act == RTW89_P2P_ACT_INIT)
+ rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, true);
+ else if (act == RTW89_P2P_ACT_TERMINATE)
+ rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, false);
+}
+
+static void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ enum rtw89_p2pps_action act;
+ u8 noa_id;
+
+ if (rtwvif->last_noa_nr == 0)
+ return;
+
+ for (noa_id = 0; noa_id < rtwvif->last_noa_nr; noa_id++) {
+ if (noa_id == rtwvif->last_noa_nr - 1)
+ act = RTW89_P2P_ACT_TERMINATE;
+ else
+ act = RTW89_P2P_ACT_REMOVE;
+ rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+ rtw89_fw_h2c_p2p_act(rtwdev, vif, NULL, act, noa_id);
+ }
+}
+
+static void rtw89_p2p_update_noa(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_p2p_noa_desc *desc;
+ enum rtw89_p2pps_action act;
+ u8 noa_id;
+
+ for (noa_id = 0; noa_id < RTW89_P2P_MAX_NOA_NUM; noa_id++) {
+ desc = &vif->bss_conf.p2p_noa_attr.desc[noa_id];
+ if (!desc->count || !desc->duration)
+ break;
+
+ if (noa_id == 0)
+ act = RTW89_P2P_ACT_INIT;
+ else
+ act = RTW89_P2P_ACT_UPDATE;
+ rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+ rtw89_fw_h2c_p2p_act(rtwdev, vif, desc, act, noa_id);
+ }
+ rtwvif->last_noa_nr = noa_id;
+}
+
+void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ rtw89_p2p_disable_all_noa(rtwdev, vif);
+ rtw89_p2p_update_noa(rtwdev, vif);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index a184b68994aa..0feae3991623 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -5,12 +5,13 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id);
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
+void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index ebf28719d935..ca20bb024b40 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -51,9 +51,6 @@
#define B_AX_EF_POR BIT(10)
#define B_AX_EF_CELL_SEL_MASK GENMASK(9, 8)
-#define R_AX_SPSLDO_ON_CTRL0 0x0200
-#define B_AX_OCP_L1_MASK GENMASK(15, 13)
-
#define R_AX_EFUSE_CTRL 0x0030
#define B_AX_EF_MODE_SEL_MASK GENMASK(31, 30)
#define B_AX_EF_RDY BIT(29)
@@ -143,6 +140,18 @@
#define R_AX_PMC_DBG_CTRL2 0x00CC
#define B_AX_SYSON_DIS_PMCR_AX_WRMSK BIT(2)
+#define R_AX_PCIE_MIO_INTF 0x00E4
+#define B_AX_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
+#define B_AX_PCIE_MIO_BYIOREG BIT(13)
+#define B_AX_PCIE_MIO_RE BIT(12)
+#define B_AX_PCIE_MIO_WE_MASK GENMASK(11, 8)
+#define MIO_WRITE_BYTE_ALL 0xF
+#define B_AX_PCIE_MIO_ADDR_MASK GENMASK(7, 0)
+#define MIO_ADDR_PAGE_MASK GENMASK(12, 8)
+
+#define R_AX_PCIE_MIO_INTD 0x00E8
+#define B_AX_PCIE_MIO_DATA_MASK GENMASK(31, 0)
+
#define R_AX_SYS_CFG1 0x00F0
#define B_AX_CHIP_VER_MASK GENMASK(15, 12)
@@ -191,6 +200,12 @@
#define R_AX_UDM2 0x01F8
#define R_AX_UDM3 0x01FC
+#define R_AX_SPS_DIG_ON_CTRL0 0x0200
+#define B_AX_VREFPFM_L_MASK GENMASK(25, 22)
+#define B_AX_REG_ZCDC_H_MASK GENMASK(18, 17)
+#define B_AX_OCP_L1_MASK GENMASK(15, 13)
+#define B_AX_VOL_L1_MASK GENMASK(3, 0)
+
#define R_AX_LDO_AON_CTRL0 0x0218
#define B_AX_PD_REGU_L BIT(16)
@@ -383,6 +398,7 @@
#define R_AX_PHYREG_SET 0x8040
#define PHYREG_SET_ALL_CYCLE 0x8
+#define PHYREG_SET_XYN_CYCLE 0xE
#define R_AX_HD0IMR 0x8110
#define B_AX_WDT_PTFM_INT_EN BIT(5)
@@ -467,6 +483,7 @@
#define R_AX_LTR_CTRL_0 0x8410
#define B_AX_LTR_SPACE_IDX_MASK GENMASK(13, 12)
#define B_AX_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8)
+#define B_AX_LTR_WD_NOEMP_CHK BIT(6)
#define B_AX_APP_LTR_ACT BIT(5)
#define B_AX_APP_LTR_IDLE BIT(4)
#define B_AX_LTR_EN BIT(1)
@@ -1024,15 +1041,13 @@
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
- B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
- B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
- B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
- B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
- B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
@@ -1043,10 +1058,7 @@
B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
- B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
- B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
- B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
- B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -1826,6 +1838,13 @@
#define B_AX_TXSC_40M_MASK GENMASK(7, 4)
#define B_AX_TXSC_20M_MASK GENMASK(3, 0)
+#define R_AX_PTCL_RRSR1 0xC090
+#define R_AX_PTCL_RRSR1_C1 0xE090
+#define B_AX_RRSR_RATE_EN_MASK GENMASK(11, 8)
+#define RRSR_OFDM_CCK_EN 3
+#define B_AX_RSC_MASK GENMASK(7, 6)
+#define B_AX_RRSR_CCK_MASK GENMASK(3, 0)
+
#define R_AX_CMAC_ERR_IMR 0xC160
#define R_AX_CMAC_ERR_IMR_C1 0xE160
#define B_AX_WMAC_TX_ERR_IND_EN BIT(7)
@@ -1882,6 +1901,7 @@
#define B_AX_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
#define B_AX_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
#define SIFS_MACTXEN_T1 0x47
+#define SIFS_MACTXEN_T1_V1 0x41
#define R_AX_CCA_CFG_0 0xC340
#define R_AX_CCA_CFG_0_C1 0xE340
@@ -2098,6 +2118,8 @@
#define R_AX_TBTT_SHIFT_P3 0xC4E8
#define R_AX_TBTT_SHIFT_P4 0xC528
#define B_AX_TBTT_SHIFT_OFST_MASK GENMASK(11, 0)
+#define B_AX_TBTT_SHIFT_OFST_SIGN BIT(11)
+#define B_AX_TBTT_SHIFT_OFST_MAG GENMASK(10, 0)
#define R_AX_BCN_CNT_TMR_P0 0xC434
#define R_AX_BCN_CNT_TMR_P1 0xC474
@@ -2258,6 +2280,7 @@
#define B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN BIT(8)
#define B_AX_FSM1_TIMEOUT_ERR_INT_EN BIT(1)
#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_AX_PTCL_IMR_CLR_ALL GENMASK(31, 0)
#define B_AX_PTCL_IMR_CLR (B_AX_FSM_TIMEOUT_ERR_INT_EN | \
B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN | \
B_AX_TXPRT_FULL_DROP_ERR_INT_EN | \
@@ -2315,6 +2338,28 @@
#define B_AX_DLE_IMR_SET (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
B_AX_RXDATA_FSM_HANG_ERROR_IMR)
+#define R_AX_RXDMA_CTRL_0 0xC804
+#define R_AX_RXDMA_CTRL_0_C1 0xE804
+#define B_AX_RXDMA_DBGOUT_EN BIT(31)
+#define B_AX_RXDMA_DBG_SEL_MASK GENMASK(30, 29)
+#define B_AX_RXDMA_FIFO_DBG_SEL_MASK GENMASK(28, 25)
+#define B_AX_RXDMA_DEFAULT_PAGE_MASK GENMASK(22, 21)
+#define B_AX_RXDMA_BUFF_REQ_PRI_MASK GENMASK(20, 19)
+#define B_AX_RXDMA_TGT_QUEID_MASK GENMASK(18, 13)
+#define B_AX_RXDMA_TGT_PRID_MASK GENMASK(12, 10)
+#define B_AX_RXDMA_DIS_CSI_RELEASE BIT(9)
+#define B_AX_RXDMA_DIS_RXSTS_WAIT_PTR_CLR BIT(7)
+#define B_AX_RXDMA_DIS_CSI_WAIT_PTR_CLR BIT(6)
+#define B_AX_RXSTS_PTR_FULL_MODE BIT(5)
+#define B_AX_CSI_PTR_FULL_MODE BIT(4)
+#define B_AX_RU3_PTR_FULL_MODE BIT(3)
+#define B_AX_RU2_PTR_FULL_MODE BIT(2)
+#define B_AX_RU1_PTR_FULL_MODE BIT(1)
+#define B_AX_RU0_PTR_FULL_MODE BIT(0)
+#define RX_FULL_MODE (B_AX_RU0_PTR_FULL_MODE | B_AX_RU1_PTR_FULL_MODE | \
+ B_AX_RU2_PTR_FULL_MODE | B_AX_RU3_PTR_FULL_MODE | \
+ B_AX_CSI_PTR_FULL_MODE | B_AX_RXSTS_PTR_FULL_MODE)
+
#define R_AX_RXDMA_PKT_INFO_0 0xC814
#define R_AX_RXDMA_PKT_INFO_1 0xC818
#define R_AX_RXDMA_PKT_INFO_2 0xC81C
@@ -2553,6 +2598,20 @@
#define WMAC_SPEC_SIFS_OFDM_52C 0x11
#define WMAC_SPEC_SIFS_CCK 0xA
+#define R_AX_TRXPTCL_RRSR_CTL_0 0xCC08
+#define R_AX_TRXPTCL_RRSR_CTL_0_C1 0xEC08
+#define B_AX_RESP_TX_MACID_CCA_TH_EN BIT(31)
+#define B_AX_RESP_TX_PWRMODE_MASK GENMASK(30, 28)
+#define B_AX_FTM_RRSR_RATE_EN_MASK GENMASK(27, 24)
+#define B_AX_NESS_MASK GENMASK(23, 22)
+#define B_AX_WMAC_RESP_DOPPLEB_AX_EN BIT(21)
+#define B_AX_WMAC_RESP_DCM_EN BIT(20)
+#define B_AX_WMAC_RRSB_AX_CCK_MASK GENMASK(19, 16)
+#define B_AX_WMAC_RESP_RATE_EN_MASK GENMASK(15, 12)
+#define B_AX_WMAC_RESP_RSC_MASK GENMASK(11, 10)
+#define B_AX_WMAC_RESP_REF_RATE_SEL BIT(9)
+#define B_AX_WMAC_RESP_REF_RATE_MASK GENMASK(8, 0)
+
#define R_AX_MAC_LOOPBACK 0xCC20
#define R_AX_MAC_LOOPBACK_C1 0xEC20
#define B_AX_MACLBK_EN BIT(0)
@@ -2565,6 +2624,7 @@
#define B_AX_WMAC_TF_UP_NAV_EN BIT(16)
#define B_AX_WMAC_NAV_UPPER_MASK GENMASK(15, 8)
#define NAV_12MS 0xBC
+#define NAV_25MS 0xC4
#define B_AX_WMAC_RTS_RST_DUR_MASK GENMASK(7, 0)
#define R_AX_RXTRIG_TEST_USER_2 0xCCB0
@@ -2968,18 +3028,18 @@
#define R_AX_PATH_COM0 0xD800
#define AX_PATH_COM0_DFVAL 0x00000000
-#define AX_PATH_COM0_PATHA 0x08888880
-#define AX_PATH_COM0_PATHB 0x11111100
+#define AX_PATH_COM0_PATHA 0x08889880
+#define AX_PATH_COM0_PATHB 0x11111900
#define AX_PATH_COM0_PATHAB 0x19999980
#define R_AX_PATH_COM1 0xD804
#define AX_PATH_COM1_DFVAL 0x00000000
-#define AX_PATH_COM1_PATHA 0x11111111
-#define AX_PATH_COM1_PATHB 0x22222222
+#define AX_PATH_COM1_PATHA 0x13111111
+#define AX_PATH_COM1_PATHB 0x23222222
#define AX_PATH_COM1_PATHAB 0x33333333
#define R_AX_PATH_COM2 0xD808
#define AX_PATH_COM2_DFVAL 0x00000000
-#define AX_PATH_COM2_PATHA 0x01209111
-#define AX_PATH_COM2_PATHB 0x01209222
+#define AX_PATH_COM2_PATHA 0x01209313
+#define AX_PATH_COM2_PATHB 0x01209323
#define AX_PATH_COM2_PATHAB 0x01209333
#define R_AX_PATH_COM3 0xD80C
#define AX_PATH_COM3_DFVAL 0x49249249
@@ -3125,6 +3185,18 @@
#define B_AX_GNT_WL_BB_VAL BIT(1)
#define B_AX_GNT_WL_BB_SWCTRL BIT(0)
+#define R_AX_GNT_VAL 0x0054
+#define B_AX_GNT_BT_RFC_S1_STA BIT(5)
+#define B_AX_GNT_WL_RFC_S1_STA BIT(4)
+#define B_AX_GNT_BT_RFC_S0_STA BIT(3)
+#define B_AX_GNT_WL_RFC_S0_STA BIT(2)
+
+#define R_AX_GNT_VAL_V1 0xDA4C
+#define B_AX_GNT_BT_RFC_S1 BIT(4)
+#define B_AX_GNT_BT_RFC_S0 BIT(3)
+#define B_AX_GNT_WL_RFC_S1 BIT(2)
+#define B_AX_GNT_WL_RFC_S0 BIT(1)
+
#define R_AX_TDMA_MODE 0xDA4C
#define R_AX_TDMA_MODE_C1 0xFA4C
#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
@@ -3356,6 +3428,7 @@
#define RR_DCK_FINE BIT(1)
#define RR_DCK_LV BIT(0)
#define RR_DCK1 0x93
+#define RR_DCK1_DONE BIT(5)
#define RR_DCK1_CLR GENMASK(3, 0)
#define RR_DCK1_SEL BIT(3)
#define RR_DCK2 0x94
@@ -3431,8 +3504,9 @@
#define R_MAC_PIN_SEL 0x0734
#define B_CH_IDX_SEG0 GENMASK(23, 16)
#define R_PLCP_HISTOGRAM 0x0738
-#define B_STS_DIS_TRIG_BY_BRK BIT(2)
+#define B_STS_PARSING_TIME GENMASK(19, 16)
#define B_STS_DIS_TRIG_BY_FAIL BIT(3)
+#define B_STS_DIS_TRIG_BY_BRK BIT(2)
#define R_PHY_STS_BITMAP_ADDR_START R_PHY_STS_BITMAP_SEARCH_FAIL
#define B_PHY_STS_BITMAP_ADDR_MASK GENMASK(6, 2)
#define R_PHY_STS_BITMAP_SEARCH_FAIL 0x073C
@@ -3542,6 +3616,9 @@
#define B_P0_RXCK_VAL GENMASK(18, 16)
#define B_P0_TXCK_ON BIT(15)
#define B_P0_TXCK_VAL GENMASK(14, 12)
+#define R_P0_RFMODE 0x12AC
+#define B_P0_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
+#define B_P0_RFMODE_MUX GENMASK(11, 4)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
#define R_S0_RXDC 0x12D4
@@ -3648,6 +3725,9 @@
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_S1_HW_SI_DIS 0x3200
#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
+#define R_P1_RFMODE 0x32AC
+#define B_P1_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
+#define B_P1_RFMODE_MUX GENMASK(11, 4)
#define R_P1_DBGMOD 0x32B8
#define B_P1_DBGMOD_ON BIT(30)
#define R_S1_RXDC 0x32D4
@@ -3663,6 +3743,8 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_MUIC 0x40F8
+#define B_MUIC_EN BIT(0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(1, 0)
#define R_SEG0CSI 0x42AC
@@ -3745,15 +3827,22 @@
#define R_PATH0_RXB_INIT 0x4658
#define B_PATH0_RXB_INIT_IDX_MSK GENMASK(9, 5)
#define R_PATH0_LNA_INIT 0x4668
+#define R_PATH0_LNA_INIT_V1 0x472C
#define B_PATH0_LNA_INIT_IDX_MSK GENMASK(26, 24)
#define R_PATH0_BTG 0x466C
#define B_PATH0_BTG_SHEN GENMASK(18, 17)
#define R_PATH0_TIA_INIT 0x4674
#define B_PATH0_TIA_INIT_IDX_MSK BIT(17)
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH0_RXB_INIT_V1 0x46A8
+#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
#define R_PATH0_G_LNA6_OP1DB_V1 0x4688
#define B_PATH0_G_LNA6_OP1DB_V1 GENMASK(31, 24)
#define R_PATH0_G_TIA0_LNA6_OP1DB_V1 0x4694
@@ -3780,7 +3869,10 @@
#define R_P0_AGC_CTL 0x4730
#define B_P0_AGC_EN BIT(31)
#define R_PATH1_LNA_INIT 0x473C
+#define R_PATH1_LNA_INIT_V1 0x4A80
#define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24)
+#define R_PATH0_TIA_INIT_V1 0x473C
+#define B_PATH0_TIA_INIT_IDX_MSK_V1 BIT(9)
#define R_PATH1_TIA_INIT 0x4748
#define B_PATH1_TIA_INIT_IDX_MSK BIT(17)
#define R_PATH1_BTG 0x4740
@@ -3790,8 +3882,12 @@
#define R_PATH1_G_LNA6_OP1DB_V1 0x476C
#define B_PATH1_G_LNA6_OP1DB_V1 GENMASK(31, 24)
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
@@ -3807,6 +3903,8 @@
#define B_P1_NBIIDX_VAL GENMASK(11, 0)
#define B_P1_NBIIDX_NOTCH_EN BIT(12)
#define R_SEG0R_PD 0x481C
+#define R_SEG0R_PD_V1 0x4860
+#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
#define R_2P4G_BAND 0x4970
@@ -3830,8 +3928,12 @@
#define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0)
#define R_CCK_FC0_INV_V1 0x4A20
#define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0)
+#define R_PATH1_RXB_INIT_V1 0x4A5C
+#define B_PATH1_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
#define R_P1_AGC_CTL 0x4A9C
#define B_P1_AGC_EN BIT(31)
+#define R_PATH1_TIA_INIT_V1 0x4AA8
+#define B_PATH1_TIA_INIT_IDX_MSK_V1 BIT(9)
#define R_PATH0_RXBB_V1 0x4AD4
#define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0)
#define R_PATH1_RXBB_V1 0x4AE0
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 20c7afd3e70f..6e5a740b128f 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -346,7 +346,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
rtw89_debug_regd(rtwdev, rtwdev->regd, "get from initiator %d, alpha2",
request->initiator);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
exit:
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 81bd0c4fe21b..784147680353 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -431,6 +431,7 @@ static const struct rtw89_imr_info rtw8852a_imr_info = {
.cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
.other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
.other_disp_imr_set = 0,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR,
.bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
.bbrpt_err_imr_set = 0,
.bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
@@ -453,6 +454,31 @@ static const struct rtw89_imr_info rtw8852a_imr_info = {
.tmac_imr_set = B_AX_TMAC_IMR_SET,
};
+static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK,
+ .p0_lna_init = {R_PATH0_LNA_INIT, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT, B_PATH0_TIA_INIT_IDX_MSK},
+ .p1_tia_init = {R_PATH1_TIA_INIT, B_PATH1_TIA_INIT_IDX_MSK},
+ .p0_rxb_init = {R_PATH0_RXB_INIT, B_PATH0_RXB_INIT_IDX_MSK},
+ .p1_rxb_init = {R_PATH1_RXB_INIT, B_PATH1_RXB_INIT_IDX_MSK},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -660,7 +686,7 @@ static void rtw8852a_power_trim(struct rtw89_dev *rtwdev)
}
static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 mac_idx)
{
u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
@@ -669,20 +695,20 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
u8 txsc20 = 0, txsc40 = 0;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_40);
fallthrough;
case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_20);
break;
default:
break;
}
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_80:
rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
@@ -699,7 +725,7 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
break;
}
- if (param->center_chan > 14)
+ if (chan->channel > 14)
rtw89_write8_set(rtwdev, chk_rate,
B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
else
@@ -1102,11 +1128,12 @@ static void rtw8852a_bb_sethw(struct rtw89_dev *rtwdev)
if (rtwdev->hal.cv <= CHIP_CCV) {
rtw89_phy_write32_set(rtwdev, R_RSTB_WATCH_DOG, B_P0_RSTB_WATCH_DOG);
rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_1, 0x864FA000);
- rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x3F);
+ rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x43F);
rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_3, 0x7FFF);
rtw89_phy_write32_set(rtwdev, R_SPOOF_ASYNC_RST, B_SPOOF_ASYNC_RST);
rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, B_STS_PARSING_TIME);
}
rtw89_phy_write32_mask(rtwdev, R_CFO_TRK0, B_CFO_TRK_MSK, 0x1f);
rtw89_phy_write32_mask(rtwdev, R_CFO_TRK1, B_CFO_TRK_MSK, 0x0c);
@@ -1130,35 +1157,38 @@ static void rtw8852a_bbrst_for_rfk(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- bool cck_en = param->center_chan <= 14;
- u8 pri_ch_idx = param->pri_ch_idx;
+ bool cck_en = chan->channel <= 14;
+ u8 pri_ch_idx = chan->pri_ch_idx;
if (cck_en)
- rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan,
- param->primary_chan, param->bandwidth);
+ rtw8852a_ctrl_sco_cck(rtwdev, chan->channel,
+ chan->primary_channel,
+ chan->band_width);
- rtw8852a_ctrl_ch(rtwdev, param->center_chan, phy_idx);
- rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ rtw8852a_ctrl_ch(rtwdev, chan->channel, phy_idx);
+ rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
if (cck_en) {
rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
} else {
rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
rtw8852a_bbrst_for_rfk(rtwdev, phy_idx);
}
- rtw8852a_spur_elimination(rtwdev, param->center_chan);
+ rtw8852a_spur_elimination(rtwdev, chan->channel);
rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0,
- param->primary_chan);
+ chan->primary_channel);
rtw8852a_bb_reset_all(rtwdev, phy_idx);
}
static void rtw8852a_set_channel(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *params)
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_channel_mac(rtwdev, params, RTW89_MAC_0);
- rtw8852a_set_channel_bb(rtwdev, params, RTW89_PHY_0);
+ rtw8852a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852a_set_channel_bb(rtwdev, chan, phy_idx);
}
static void rtw8852a_dfs_en(struct rtw89_dev *rtwdev, bool en)
@@ -1209,25 +1239,27 @@ static void rtw8852a_adc_en(struct rtw89_dev *rtwdev, bool en)
}
static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- u8 phy_idx = RTW89_PHY_0;
-
if (enter) {
- rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, &p->tx_en,
+ RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852a_dfs_en(rtwdev, false);
- rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
rtw8852a_adc_en(rtwdev, false);
fsleep(40);
rtw8852a_bb_reset_en(rtwdev, phy_idx, false);
} else {
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852a_adc_en(rtwdev, true);
rtw8852a_dfs_en(rtwdev, true);
- rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
}
@@ -1277,9 +1309,10 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev)
rtw8852a_dpk(rtwdev, phy_idx);
}
-static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev)
+static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_tssi_scan(rtwdev, RTW89_PHY_0);
+ rtw8852a_tssi_scan(rtwdev, phy_idx);
}
static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -1378,9 +1411,11 @@ static void rtw8852a_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 ch = rtwdev->hal.current_channel;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -1406,7 +1441,8 @@ static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
cur.idx = j;
shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
+ &cur);
val |= (tmp << shf);
if ((j + 1) % 4)
@@ -1421,8 +1457,10 @@ static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 band = chan->band_type;
struct rtw89_rate_desc desc = {
.nss = RTW89_NSS_1,
.rs = RTW89_RS_OFFSET,
@@ -1433,7 +1471,7 @@ static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
val |= ((v & 0xf) << (4 * desc.idx));
}
@@ -1442,29 +1480,31 @@ static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit lmt[NTX_NUM_8852A];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
ptr = (s8 *)&lmt[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -1473,30 +1513,32 @@ static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852A];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_RU_LMT + j +
__MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
ptr = (s8 *)&lmt_ru[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -1505,17 +1547,20 @@ static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
}
-static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev)
+static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_limit(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
-static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_ref(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_offset(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_ref(rtwdev, phy_idx);
}
static int
@@ -1592,10 +1637,12 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
enum rtw89_phy_idx idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
- if (rtwdev->hal.current_band_type == RTW89_BAND_2G)
+ if (chan->band_type == RTW89_BAND_2G)
rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
return;
}
@@ -1797,6 +1844,9 @@ static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
} else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
@@ -2010,6 +2060,51 @@ void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
}
+static void rtw8852a_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ /* level=0 Default: TIA 1/0= (LNA2,TIAN6) = (7,1)/(5,1) = 21dB/12dB
+ * level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
+ * To improve BT ACI in co-rx
+ */
+
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void rtw8852a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ switch (level) {
+ case 0: /* original */
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852a_set_wl_lna2(rtwdev, 0);
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, true);
+ rtw8852a_set_wl_lna2(rtwdev, 0);
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852a_set_wl_lna2(rtwdev, 1);
+ break;
+ }
+}
+
static void rtw8852a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -2030,12 +2125,12 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
u8 path;
- s8 *rx_power = phy_ppdu->rssi;
+ u8 *rx_power = phy_ppdu->rssi;
- status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
- status->chain_signal[path] = rx_power[path];
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
}
if (phy_ppdu->valid)
rtw8852a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
@@ -2086,6 +2181,8 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.btc_bt_aci_imp = rtw8852a_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852a_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852a_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852a_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy,
};
const struct rtw89_chip_info rtw8852a_chip_info = {
@@ -2093,6 +2190,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ops = &rtw8852a_chip_ops,
.fw_name = "rtw89/rtw8852a_fw.bin",
.fifo_size = 458752,
+ .dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x6f800,
@@ -2114,7 +2212,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
+ .support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bw160 = false,
@@ -2125,6 +2225,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.acam_num = 128,
.bcam_num = 10,
.scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_v1 = false,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
@@ -2133,11 +2236,26 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dav_log_efuse_size = 0,
.phycap_addr = 0x580,
.phycap_size = 128,
- .para_ver = 0x05050864,
- .wlcx_desired = 0x05050000,
- .btcx_desired = 0x5,
+ .para_ver = 0x0,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
+ .btc_fwinfo_buf = 1024,
+
+ .fcxbtcrpt_ver = 1,
+ .fcxtdma_ver = 1,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 2,
+ .fcxstep_ver = 2,
+ .fcxnullsta_ver = 1,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852a_bt_rssi_thres,
@@ -2163,7 +2281,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.page_regs = &rtw8852a_page_regs,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 3,
- .imr_info = &rtw8852a_imr_info
+ .imr_info = &rtw8852a_imr_info,
+ .rrsr_cfgs = &rtw8852a_rrsr_cfgs,
+ .dma_ch_mask = 0,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index 3d60feb78312..582ff0d3a9ea 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -1359,7 +1359,7 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, u8 path)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1380,9 +1380,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
- iqk_info->iqk_band[path] = hal->current_band_type;
- iqk_info->iqk_bw[path] = hal->current_band_width;
- iqk_info->iqk_ch[path] = hal->current_channel;
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
@@ -1879,13 +1879,12 @@ static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- struct rtw89_hal *hal = &rtwdev->hal;
-
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 kidx = dpk->cur_idx[path];
- dpk->bp[path][kidx].band = hal->current_band_type;
- dpk->bp[path][kidx].ch = hal->current_channel;
- dpk->bp[path][kidx].bw = hal->current_band_width;
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
@@ -2358,6 +2357,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2404,7 +2404,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
"[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
tmp_rxbb);
if (offset != 0 || agc_cnt == 0) {
- if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80)
+ if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
_dpk_lbk_rxiqk(rtwdev, phy, path);
@@ -2548,11 +2548,12 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
- cur_band = rtwdev->hal.current_band_type;
- cur_ch = rtwdev->hal.current_channel;
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
if (cur_band != dpk->bp[path][idx].band ||
@@ -2681,12 +2682,13 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
return true;
@@ -2842,7 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
@@ -2852,7 +2855,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2863,7 +2867,8 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
&rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
@@ -2905,8 +2910,9 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
const s8 *thm_down_a = NULL;
const s8 *thm_up_b = NULL;
@@ -3099,7 +3105,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 subband = chan->subband_type;
switch (subband) {
default:
@@ -3275,7 +3282,8 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
s8 de_2nd = 0;
@@ -3312,7 +3320,8 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
s8 tde_2nd = 0;
@@ -3350,6 +3359,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3358,7 +3368,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
- u8 ch = rtwdev->hal.current_channel;
+ u8 ch = chan->channel;
u8 i, gidx;
s8 ofdm_de;
s8 trim_de;
@@ -3478,9 +3488,11 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel, ch_tmp;
- u8 bw = rtwdev->hal.current_band_width;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel, ch_tmp;
+ u8 bw = chan->band_width;
+ u8 band = chan->band_type;
+ u8 subband = chan->subband_type;
s8 power;
s32 xdbm;
@@ -3491,7 +3503,7 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
else
ch_tmp = ch;
- power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX,
+ power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX,
RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
xdbm = power * 100 / 4;
@@ -3523,9 +3535,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
- u8 ch = rtwdev->hal.current_channel, ch_tmp;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel, ch_tmp;
+ u8 bw = chan->band_width;
+ u8 band = chan->band_type;
u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
s8 power;
@@ -3539,8 +3553,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
else
ch_tmp = ch;
- power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX,
- RTW89_RS_OFDM, RTW89_NONBF, ch_tmp);
+ power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20,
+ RTW89_1TX, RTW89_RS_OFDM,
+ RTW89_NONBF, ch_tmp);
xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 190c4aefb02e..0cd8c0c44d19 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -33,14 +33,15 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
.txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2,
- .dma_stop1_reg = R_AX_PCIE_DMA_STOP1,
- .dma_stop2_reg = R_AX_PCIE_DMA_STOP2,
- .dma_busy1_reg = R_AX_PCIE_DMA_BUSY1,
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK},
+ .dma_stop2 = {R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK},
.dma_busy2_reg = R_AX_PCIE_DMA_BUSY2,
.dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
+ .tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
new file mode 100644
index 000000000000..9f9908418ee4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "core.h"
+#include "mac.h"
+#include "reg.h"
+
+static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
+ &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ &rtw89_mac_size.ple_qt58},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
+
+ return 0;
+}
+
+static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ u8 wl_rfc_s0;
+ u8 wl_rfc_s1;
+ int ret;
+
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
+ if (ret)
+ return ret;
+ wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
+ if (ret)
+ return ret;
+ wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
+ FULL_BIT_MASK);
+ return ret;
+}
+
+static const struct rtw89_chip_ops rtw8852b_chip_ops = {
+ .enable_bb_rf = rtw8852b_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852b_mac_disable_bb_rf,
+};
+
+const struct rtw89_chip_info rtw8852b_chip_info = {
+ .chip_id = RTL8852B,
+ .fifo_size = 196608,
+ .dle_scc_rsvd_size = 98304,
+ .dle_mem = rtw8852b_dle_mem_pcie,
+ .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
+ BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
+ BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
+};
+EXPORT_SYMBOL(rtw8852b_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852b_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
new file mode 100644
index 000000000000..7bf95c38d3eb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+
+static const struct rtw89_pci_info rtw8852b_pci_info = {
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
+ BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
+ BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+};
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index b697aef2faf2..67653b3e1a35 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -109,6 +109,7 @@ static const struct rtw89_imr_info rtw8852c_imr_info = {
.cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET_V1,
.other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR_V1,
.other_disp_imr_set = B_AX_OTHER_DISP_IMR_SET_V1,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR,
.bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR,
.bbrpt_err_imr_set = R_AX_BBRPT_CHINFO_IMR_SET_V1,
.bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR,
@@ -131,7 +132,34 @@ static const struct rtw89_imr_info rtw8852c_imr_info = {
.tmac_imr_set = B_AX_TMAC_IMR_SET_V1,
};
+static const struct rtw89_rrsr_cfgs rtw8852c_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_PTCL_RRSR1, B_AX_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852c_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg);
+static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
+ enum rtw89_mac_idx mac_idx);
static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
{
@@ -567,7 +595,7 @@ static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
}
static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 mac_idx)
{
u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
@@ -578,24 +606,24 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
u8 rf_mod_val = 0, chk_rate_mask = 0;
u32 txsc;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_160:
- txsc80 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc80 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_80);
fallthrough;
case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_40);
fallthrough;
case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_20);
break;
default:
break;
}
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_160:
rf_mod_val = AX_WMAC_RFMOD_160M;
txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) |
@@ -620,7 +648,7 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, rf_mod_val);
rtw89_write32(rtwdev, sub_carr, txsc);
- switch (param->band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
chk_rate_mask = B_AX_BAND_MODE;
break;
@@ -629,7 +657,7 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
chk_rate_mask = B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6;
break;
default:
- rtw89_warn(rtwdev, "Invalid band_type:%d\n", param->band_type);
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
return;
}
rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE | B_AX_CHECK_CCK_EN |
@@ -920,7 +948,7 @@ static void rtw8852c_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
}
static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
- const struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx,
enum rtw89_rf_path path)
{
@@ -939,7 +967,7 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
if (rtwdev->dbcc_en && path == RF_PATH_B)
phy_idx = RTW89_PHY_1;
- if (param->band_type == RTW89_BAND_2G) {
+ if (chan->band_type == RTW89_BAND_2G) {
offset_q0 = efuse_gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK];
offset_base_q4 = efuse_gain->offset_base[phy_idx];
@@ -948,7 +976,7 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_RPL_OFST, B_RPL_OFST_MASK, tmp & 0x7f);
}
- switch (param->subband_type) {
+ switch (chan->subband_type) {
default:
case RTW89_CH_2G:
gain_band = RTW89_GAIN_OFFSET_2G_OFDM;
@@ -977,14 +1005,14 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
- const struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
u8 sco;
- u16 central_freq = param->center_freq;
- u8 central_ch = param->center_chan;
- u8 band = param->band_type;
- u8 subband = param->subband_type;
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ u8 subband = chan->subband_type;
bool is_2g = band == RTW89_BAND_2G;
u8 chan_idx;
@@ -996,7 +1024,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
if (phy_idx == RTW89_PHY_0) {
/* Path A */
rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_A);
- rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_A);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_A);
if (is_2g)
rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
@@ -1009,7 +1037,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
/* Path B */
if (!rtwdev->dbcc_en) {
rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
- rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_B);
if (is_2g)
rtw89_phy_write32_idx(rtwdev,
@@ -1038,7 +1066,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
} else {
/* Path B */
rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
- rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_B);
if (is_2g)
rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
@@ -1095,7 +1123,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
}
}
- chan_idx = rtw8852c_encode_chan_idx(rtwdev, param->primary_chan, band);
+ chan_idx = rtw8852c_encode_chan_idx(rtwdev, chan->primary_channel, band);
rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
}
@@ -1246,12 +1274,12 @@ rtw8852c_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
}
static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param)
+ const struct rtw89_chan *chan)
{
- u8 center_chan = param->center_chan;
- u8 bw = param->bandwidth;
+ u8 center_chan = chan->channel;
+ u8 bw = chan->band_width;
- switch (param->band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
if (bw == RTW89_CHANNEL_WIDTH_20) {
if (center_chan >= 5 && center_chan <= 8)
@@ -1285,19 +1313,19 @@ static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev,
#define MAX_TONE_NUM 2048
static void rtw8852c_set_csi_tone_idx(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
u32 spur_freq;
s32 freq_diff, csi_idx, csi_tone_idx;
- spur_freq = rtw8852c_spur_freq(rtwdev, param);
+ spur_freq = rtw8852c_spur_freq(rtwdev, chan);
if (spur_freq == 0) {
rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 0, phy_idx);
return;
}
- freq_diff = (spur_freq - param->center_freq) * 1000000;
+ freq_diff = (spur_freq - chan->freq) * 1000000;
csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
@@ -1325,7 +1353,7 @@ static const struct rtw89_nbi_reg_def rtw8852c_nbi_reg_def[] = {
};
static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_rf_path path)
{
const struct rtw89_nbi_reg_def *nbi = &rtw8852c_nbi_reg_def[path];
@@ -1335,34 +1363,37 @@ static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
s32 nbi_frac_idx, nbi_frac_tone_idx;
bool notch2_chk = false;
- spur_freq = rtw8852c_spur_freq(rtwdev, param);
+ spur_freq = rtw8852c_spur_freq(rtwdev, chan);
if (spur_freq == 0) {
rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
return;
}
- fc = param->center_freq;
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160) {
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
fc = (spur_freq > fc) ? fc + 40 : fc - 40;
- if ((fc > spur_freq && param->center_chan < param->primary_chan) ||
- (fc < spur_freq && param->center_chan > param->primary_chan))
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
notch2_chk = true;
}
freq_diff = (spur_freq - fc) * 1000000;
nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5, &nbi_frac_idx);
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_20) {
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
} else {
- u16 tone_para = (param->bandwidth == RTW89_CHANNEL_WIDTH_40) ? 128 : 256;
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
}
nbi_frac_tone_idx = s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
rtw89_phy_write32_mask(rtwdev, nbi->notch2_idx.addr,
nbi->notch2_idx.mask, nbi_tone_idx);
rtw89_phy_write32_mask(rtwdev, nbi->notch2_frac_idx.addr,
@@ -1404,42 +1435,42 @@ static void rtw8852c_spur_notch(struct rtw89_dev *rtwdev, u32 val,
}
static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 pri_ch_idx,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_csi_tone_idx(rtwdev, param, phy_idx);
+ rtw8852c_set_csi_tone_idx(rtwdev, chan, phy_idx);
if (phy_idx == RTW89_PHY_0) {
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_LOWER ||
pri_ch_idx == RTW89_SC_20_UP3X)) {
rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_0);
if (!rtwdev->dbcc_en)
rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
- } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ } else if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_UPPER ||
pri_ch_idx == RTW89_SC_20_LOW3X)) {
rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_0);
if (!rtwdev->dbcc_en)
rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
} else {
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A);
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A);
if (!rtwdev->dbcc_en)
- rtw8852c_set_nbi_tone_idx(rtwdev, param,
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan,
RF_PATH_B);
}
} else {
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_LOWER ||
pri_ch_idx == RTW89_SC_20_UP3X)) {
rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
- } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ } else if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_UPPER ||
pri_ch_idx == RTW89_SC_20_LOW3X)) {
rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
} else {
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B);
}
}
@@ -1450,14 +1481,14 @@ static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
}
static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 pri_ch = param->primary_chan;
+ u8 pri_ch = chan->primary_channel;
bool mask_5m_low;
bool mask_5m_en;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_40:
mask_5m_en = true;
mask_5m_low = pri_ch == 2;
@@ -1526,11 +1557,9 @@ static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev,
phy_idx);
}
-static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev,
+static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
enum rtw89_phy_idx phy_idx, bool en)
{
- struct rtw89_hal *hal = &rtwdev->hal;
-
if (en) {
rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
@@ -1538,7 +1567,7 @@ static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev,
B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
phy_idx);
- if (hal->current_band_type == RTW89_BAND_2G)
+ if (band == RTW89_BAND_2G)
rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x0);
rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
} else {
@@ -1690,21 +1719,24 @@ static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev)
}
static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- bool cck_en = param->band_type == RTW89_BAND_2G;
- u8 pri_ch_idx = param->pri_ch_idx;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_ch_idx = chan->pri_ch_idx;
u32 mask, reg;
u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0,
B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1};
+ u8 ntx_path;
- if (param->band_type == RTW89_BAND_2G)
- rtw8852c_ctrl_sco_cck(rtwdev, param->center_chan,
- param->primary_chan, param->bandwidth);
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8852c_ctrl_sco_cck(rtwdev, chan->channel,
+ chan->primary_channel,
+ chan->band_width);
- rtw8852c_ctrl_ch(rtwdev, param, phy_idx);
- rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ rtw8852c_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
if (cck_en) {
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0);
@@ -1717,17 +1749,17 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
B_PD_ARBITER_OFF, 0x1, phy_idx);
}
- rtw8852c_spur_elimination(rtwdev, param, pri_ch_idx, phy_idx);
- rtw8852c_ctrl_btg(rtwdev, param->band_type == RTW89_BAND_2G);
- rtw8852c_5m_mask(rtwdev, param, phy_idx);
+ rtw8852c_spur_elimination(rtwdev, chan, pri_ch_idx, phy_idx);
+ rtw8852c_ctrl_btg(rtwdev, chan->band_type == RTW89_BAND_2G);
+ rtw8852c_5m_mask(rtwdev, chan, phy_idx);
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
rtwdev->hal.cv != CHIP_CAV) {
rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ,
B_P80_AT_HIGH_FREQ, 0x0, phy_idx);
reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP,
phy_idx);
- if (param->primary_chan > param->center_chan) {
+ if (chan->primary_channel > chan->channel) {
rtw89_phy_write32_mask(rtwdev,
R_P80_AT_HIGH_FREQ_RU_ALLOC,
ru_alloc_msk[phy_idx], 1);
@@ -1742,8 +1774,8 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
}
}
- if (param->band_type == RTW89_BAND_6G &&
- param->bandwidth == RTW89_CHANNEL_WIDTH_160)
+ if (chan->band_type == RTW89_BAND_6G &&
+ chan->band_width == RTW89_CHANNEL_WIDTH_160)
rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN,
B_CDD_EVM_CHK_EN, 0, phy_idx);
else
@@ -1769,15 +1801,29 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
}
}
+ if (chan->band_type == RTW89_BAND_6G)
+ rtw89_phy_write32_set(rtwdev, R_MUIC, B_MUIC_EN);
+ else
+ rtw89_phy_write32_clr(rtwdev, R_MUIC, B_MUIC_EN);
+
+ if (hal->antenna_tx)
+ ntx_path = hal->antenna_tx;
+ else
+ ntx_path = chan->band_type == RTW89_BAND_6G ? RF_B : RF_AB;
+
+ rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, (enum rtw89_mac_idx)phy_idx);
+
rtw8852c_bb_reset_all(rtwdev, phy_idx);
}
static void rtw8852c_set_channel(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *params)
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_channel_mac(rtwdev, params, RTW89_MAC_0);
- rtw8852c_set_channel_bb(rtwdev, params, RTW89_PHY_0);
- rtw8852c_set_channel_rf(rtwdev, params, RTW89_PHY_0);
+ rtw8852c_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852c_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852c_set_channel_rf(rtwdev, chan, phy_idx);
}
static void rtw8852c_dfs_en(struct rtw89_dev *rtwdev, bool en)
@@ -1799,25 +1845,27 @@ static void rtw8852c_adc_en(struct rtw89_dev *rtwdev, bool en)
}
static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- u8 phy_idx = RTW89_PHY_0;
-
if (enter) {
- rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, &p->tx_en,
+ RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852c_dfs_en(rtwdev, false);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
rtw8852c_adc_en(rtwdev, false);
fsleep(40);
- rtw8852c_bb_reset_en(rtwdev, phy_idx, false);
+ rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
} else {
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852c_adc_en(rtwdev, true);
rtw8852c_dfs_en(rtwdev, true);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
- rtw8852c_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
}
@@ -1847,9 +1895,10 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
-static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev)
+static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_tssi_scan(rtwdev, RTW89_PHY_0);
+ rtw8852c_tssi_scan(rtwdev, phy_idx);
}
static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -1958,9 +2007,11 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 ch = rtwdev->hal.current_channel;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -1986,7 +2037,8 @@ static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
cur.idx = j;
shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
+ &cur);
val |= (tmp << shf);
if ((j + 1) % 4)
@@ -2001,8 +2053,10 @@ static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 band = chan->band_type;
struct rtw89_rate_desc desc = {
.nss = RTW89_NSS_1,
.rs = RTW89_RS_OFFSET,
@@ -2013,7 +2067,7 @@ static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
val |= ((v & 0xf) << (4 * desc.idx));
}
@@ -2045,7 +2099,8 @@ static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
__DECL_DFIR_ADDR(filter,
0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0,
0x45C4, 0x45C8);
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
const u32 *param;
int i;
@@ -2076,9 +2131,10 @@ static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 band = rtwdev->hal.current_band_type;
+ u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd];
u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd];
@@ -2092,29 +2148,31 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit lmt[NTX_NUM_8852C];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852C; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
ptr = (s8 *)&lmt[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -2123,30 +2181,32 @@ static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852C];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852C; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_RU_LMT + j +
__MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
ptr = (s8 *)&lmt_ru[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -2155,18 +2215,21 @@ static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
}
-static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev)
+static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
- rtw8852c_set_txpwr_offset(rtwdev, RTW89_PHY_0);
- rtw8852c_set_tx_shape(rtwdev, RTW89_PHY_0);
- rtw8852c_set_txpwr_limit(rtwdev, RTW89_PHY_0);
- rtw8852c_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
-static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_ref(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx);
}
static void
@@ -2222,7 +2285,8 @@ rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
@@ -2316,7 +2380,7 @@ static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
1);
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
1);
- rtw8852c_ctrl_btg(rtwdev, hal->current_band_type == RTW89_BAND_2G);
+ rtw8852c_ctrl_btg(rtwdev, band == RTW89_BAND_2G);
rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
rst_mask0, 1);
rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
@@ -2458,7 +2522,6 @@ static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en)
static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
- u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB);
@@ -2473,8 +2536,6 @@ static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
}
-
- rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, RTW89_MAC_0);
}
static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
@@ -2773,23 +2834,7 @@ void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
static
void rtw8852c_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_cx *cx = &btc->cx;
- u32 val;
-
- val = rtw89_read32(rtwdev, R_BTC_BT_CNT_HIGH);
- cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val);
- cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val);
-
- val = rtw89_read32(rtwdev, R_BTC_BT_CNT_LOW);
- cx->cnt_bt[BTC_BCNT_LOPRI_TX] = FIELD_GET(B_AX_STATIS_BT_LO_TX_1_MASK, val);
- cx->cnt_bt[BTC_BCNT_LOPRI_RX] = FIELD_GET(B_AX_STATIS_BT_LO_RX_1_MASK, val);
-
- /* clock-gate off before reset counter*/
- rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
- rtw89_write32_clr(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST);
- rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST);
- rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
+ /* Feature move to firmware */
}
static
@@ -2810,6 +2855,59 @@ void rtw8852c_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
}
+static void rtw8852c_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ /* level=0 Default: TIA 1/0= (LNA2,TIAN6) = (7,1)/(5,1) = 21dB/12dB
+ * level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
+ * To improve BT ACI in co-rx
+ */
+
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void rtw8852c_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ switch (level) {
+ case 0: /* original */
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852c_set_wl_lna2(rtwdev, 0);
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, true);
+ rtw8852c_set_wl_lna2(rtwdev, 0);
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852c_set_wl_lna2(rtwdev, 1);
+ break;
+ }
+}
+
static void rtw8852c_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -2831,12 +2929,12 @@ static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
u8 path;
- s8 *rx_power = phy_ppdu->rssi;
+ u8 *rx_power = phy_ppdu->rssi;
- status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
- status->chain_signal[path] = rx_power[path];
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
}
if (phy_ppdu->valid)
rtw8852c_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
@@ -2879,10 +2977,12 @@ static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
return 0;
}
-static void rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ return 0;
}
static const struct rtw89_chip_ops rtw8852c_chip_ops = {
@@ -2930,6 +3030,8 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.btc_bt_aci_imp = rtw8852c_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852c_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852c_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852c_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
};
const struct rtw89_chip_info rtw8852c_chip_info = {
@@ -2937,6 +3039,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.ops = &rtw8852c_chip_ops,
.fw_name = "rtw89/rtw8852c_fw.bin",
.fifo_size = 458752,
+ .dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
.dis_2g_40m_ul_ofdma = false,
.rsvd_ple_ofst = 0x6f800,
@@ -2960,7 +3063,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
+ .dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
@@ -2972,6 +3077,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.acam_num = 128,
.bcam_num = 20,
.scam_num = 128,
+ .bacam_num = 8,
+ .bacam_dynamic_num = 8,
+ .bacam_v1 = true,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
@@ -2980,11 +3088,26 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dav_log_efuse_size = 16,
.phycap_addr = 0x590,
.phycap_size = 0x60,
- .para_ver = 0x05050764,
- .wlcx_desired = 0x05050000,
- .btcx_desired = 0x5,
+ .para_ver = 0x1,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
+ .btc_fwinfo_buf = 1280,
+
+ .fcxbtcrpt_ver = 4,
+ .fcxtdma_ver = 3,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 3,
+ .fcxstep_ver = 3,
+ .fcxnullsta_ver = 2,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852c_bt_rssi_thres,
@@ -2995,7 +3118,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rf_para_ulink = rtw89_btc_8852c_rf_ul,
.rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_dl),
.rf_para_dlink = rtw89_btc_8852c_rf_dl,
- .ps_mode_supported = 0,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
.low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1,
@@ -3009,7 +3134,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.page_regs = &rtw8852c_page_regs,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 5,
- .imr_info = &rtw8852c_imr_info
+ .imr_info = &rtw8852c_imr_info,
+ .rrsr_cfgs = &rtw8852c_rrsr_cfgs,
+ .dma_ch_mask = 0,
};
EXPORT_SYMBOL(rtw8852c_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 4186d825d19b..006c2cf93111 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -1294,14 +1294,14 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, u8 path)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- struct rtw89_hal *hal = &rtwdev->hal;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- iqk_info->iqk_band[path] = hal->current_band_type;
- iqk_info->iqk_bw[path] = hal->current_band_width;
- iqk_info->iqk_ch[path] = hal->current_channel;
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
@@ -1546,7 +1546,8 @@ static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
- 2, 1000, false, rtwdev, path, 0x93, BIT(5));
+ 2, 2000, false, rtwdev, path,
+ RR_DCK1, RR_DCK1_DONE);
if (ret)
rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
else
@@ -1691,14 +1692,14 @@ static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- struct rtw89_hal *hal = &rtwdev->hal;
u8 kidx = dpk->cur_idx[path];
- dpk->bp[path][kidx].band = hal->current_band_type;
- dpk->bp[path][kidx].ch = hal->current_channel;
- dpk->bp[path][kidx].bw = hal->current_band_width;
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
@@ -2272,12 +2273,13 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
- cur_band = rtwdev->hal.current_band_type;
- cur_ch = rtwdev->hal.current_channel;
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
if (cur_band != dpk->bp[path][idx].band ||
@@ -2530,17 +2532,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
- if (rtwdev->hal.cv == CHIP_CAV && rtwdev->hal.current_band_type != RTW89_BAND_2G) {
+ if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
return true;
- } else if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ } else if (fem->epa_2g && band == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ } else if (fem->epa_5g && band == RTW89_BAND_5G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_6g && rtwdev->hal.current_band_type == RTW89_BAND_6G) {
+ } else if (fem->epa_6g && band == RTW89_BAND_6G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
return true;
}
@@ -2663,7 +2667,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
@@ -2697,7 +2702,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
@@ -2735,8 +2741,9 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
const s8 *thm_down_a = NULL;
const s8 *thm_up_b = NULL;
@@ -2908,7 +2915,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2924,7 +2932,8 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl;
if (path == RF_PATH_A) {
@@ -3335,8 +3344,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- enum rtw89_band band = rtwdev->hal.current_band_type;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
s8 de_2nd;
@@ -3398,8 +3408,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- enum rtw89_band band = rtwdev->hal.current_band_type;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
s8 tde_2nd = 0;
@@ -3462,7 +3473,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
s8 trim_de;
@@ -3802,15 +3814,17 @@ void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, param->center_chan, param->band_type,
- param->bandwidth);
+ rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
+ chan->band_type,
+ chan->band_width);
}
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
u8 idx = mcc_info->table_idx;
int i;
@@ -3823,8 +3837,8 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i
}
mcc_info->table_idx = idx;
- mcc_info->ch[idx] = rtwdev->hal.current_channel;
- mcc_info->band[idx] = rtwdev->hal.current_band_type;
+ mcc_info->ch[idx] = chan->channel;
+ mcc_info->band[idx] = chan->band_type;
}
void rtw8852c_rck(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index 5118a49da8d3..928a587cdd05 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -21,7 +21,7 @@ void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
void rtw8852c_lck_init(struct rtw89_dev *rtwdev);
void rtw8852c_lck_track(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
index feaa83b16171..11f35e7a7f0e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -1767,7 +1767,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
{0x3070103, 0x34343C3C},
};
-static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
+static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0xF0010000, 0x00000000},
{0xF0020000, 0x00000001},
{0xF0320000, 0x00000002},
@@ -1777,13 +1777,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0xF0360000, 0x00000006},
{0xF0010001, 0x00000007},
{0xF0020001, 0x00000008},
- {0xF0320001, 0x00000009},
- {0xF0330001, 0x0000000A},
- {0xF0340001, 0x0000000B},
- {0xF0350001, 0x0000000C},
- {0xF0360001, 0x0000000D},
- {0xF03F0001, 0x0000000E},
- {0xF0400001, 0x0000000F},
+ {0xF0030001, 0x00000009},
+ {0xF0040001, 0x0000000A},
+ {0xF0050001, 0x0000000B},
+ {0xF0070001, 0x0000000C},
+ {0xF0320001, 0x0000000D},
+ {0xF0330001, 0x0000000E},
+ {0xF0340001, 0x0000000F},
+ {0xF0350001, 0x00000010},
+ {0xF0360001, 0x00000011},
+ {0xF03F0001, 0x00000012},
+ {0xF0400001, 0x00000013},
{0x005, 0x00000000},
{0x10005, 0x00000000},
{0x000, 0x00030001},
@@ -1795,7 +1799,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03E, 0x00000620},
{0x03F, 0x0000020C},
{0x0EF, 0x00000000},
- {0x05F, 0x00000032},
+ {0x05F, 0x00000038},
{0x097, 0x00043200},
{0x0A6, 0x00066DB7},
{0x0EF, 0x00004000},
@@ -1821,8 +1825,8 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x000, 0x00033C01},
{0x10000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x096, 0x00015200},
+ {0x10055, 0x00080080},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0004D000},
{0x0DA, 0x000D4009},
@@ -1850,6 +1854,18 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
@@ -1922,6 +1938,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000CC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000CC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -1958,6 +1982,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000C4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000C4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000C4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -1994,6 +2026,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000BC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2030,6 +2070,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000B4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2066,6 +2114,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000AC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2102,6 +2158,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2138,6 +2202,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000009C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2174,6 +2246,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000094},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2210,6 +2290,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000008C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2246,6 +2334,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000084},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2282,6 +2378,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000BC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2318,6 +2422,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000B4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2354,6 +2466,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000AC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2390,6 +2510,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2426,6 +2554,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000009C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2462,6 +2598,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000094},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2498,6 +2642,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000008C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2534,6 +2686,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000084},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2570,6 +2730,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000003C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2606,6 +2774,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000034},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000034},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000034},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2642,6 +2818,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000002C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000002C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000002C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2678,6 +2862,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000024},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000024},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000024},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2714,6 +2906,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000001C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000001C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2750,6 +2950,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000014},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000014},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000014},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2786,6 +2994,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000000C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000000C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000000C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2822,6 +3038,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000004},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000004},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000004},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2871,6 +3095,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x08F, 0x000D1352},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2905,6 +3137,52 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000015},
{0x033, 0x00000001},
{0x03F, 0x00000017},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x0EF, 0x00008000},
{0x033, 0x00000020},
@@ -3416,6 +3694,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000EFFF},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -3522,7 +3808,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000005},
{0x03F, 0x00004344},
{0x033, 0x00000006},
- {0x03F, 0x00004324},
+ {0x03F, 0x00004344},
{0x033, 0x00000007},
{0x03F, 0x00004344},
{0x033, 0x00000008},
@@ -3585,6 +3871,33 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000200},
{0x0EF, 0x00000000},
{0x0EF, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000084DC},
{0x030, 0x000103C9},
{0x030, 0x00018399},
@@ -3597,6 +3910,241 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x030, 0x00050011},
{0x030, 0x00058000},
{0x030, 0x00060000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0xB0000000, 0x00000000},
{0x030, 0x00068000},
{0x030, 0x00070000},
{0x0EF, 0x00000000},
@@ -3831,6 +4379,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x030, 0x000300FF},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000300FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000300FF},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -3901,6 +4457,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x095, 0x00000008},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -3920,101 +4484,2033 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0xB0000000, 0x00000000},
{0x0EE, 0x00001000},
{0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x03F, 0x000002E7},
{0x033, 0x0000003C},
{0x03F, 0x000003E7},
{0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000063},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
@@ -4034,20 +6530,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000152},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0xA0000000, 0x00000000},
{0x03F, 0x00000052},
{0xB0000000, 0x00000000},
@@ -4070,20 +6574,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000015A},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
{0xB0000000, 0x00000000},
@@ -4106,20 +6618,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000019C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
{0xB0000000, 0x00000000},
@@ -4142,20 +6662,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000001A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
{0xB0000000, 0x00000000},
@@ -4178,20 +6706,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000001E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
{0xB0000000, 0x00000000},
@@ -4214,20 +6750,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000002E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0xA0000000, 0x00000000},
{0x03F, 0x000001E6},
{0xB0000000, 0x00000000},
@@ -5271,131 +7815,131 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
{0x10030, 0x000781EF},
{0x10030, 0x000785E9},
{0x10030, 0x000789E3},
- {0x10030, 0x00078DA3},
- {0x10030, 0x00079161},
- {0x10030, 0x0007955B},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
{0x10030, 0x00079921},
{0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -5416,131 +7960,711 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
{0x10030, 0x000781EF},
{0x10030, 0x000785E9},
{0x10030, 0x000789E3},
- {0x10030, 0x00078DA3},
- {0x10030, 0x00079161},
- {0x10030, 0x0007955B},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
{0x10030, 0x00079921},
{0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -5561,1002 +8685,1002 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
{0x10030, 0x000781EF},
{0x10030, 0x000785E9},
{0x10030, 0x000789E3},
- {0x10030, 0x00078DA3},
- {0x10030, 0x00079161},
- {0x10030, 0x0007955B},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
{0x10030, 0x00079921},
{0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0xA0000000, 0x00000000},
{0x10030, 0x000001EF},
{0x10030, 0x000005E9},
@@ -6724,6 +9848,1150 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00004017},
{0x100EE, 0x00000000},
{0x100EE, 0x00002000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x000600F6},
+ {0x10030, 0x000604F3},
+ {0x10030, 0x000608F0},
+ {0x10030, 0x00060CED},
+ {0x10030, 0x000610EA},
+ {0x10030, 0x000614E7},
+ {0x10030, 0x000618E4},
+ {0x10030, 0x00061CE1},
+ {0x10030, 0x000620DE},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628D8},
+ {0x10030, 0x00062CD5},
+ {0x10030, 0x000630D2},
+ {0x10030, 0x000634CF},
+ {0x10030, 0x000638CC},
+ {0x10030, 0x00063C09},
+ {0x10030, 0x00064006},
+ {0x10030, 0x000680F5},
+ {0x10030, 0x000684F2},
+ {0x10030, 0x000688EF},
+ {0x10030, 0x00068CEC},
+ {0x10030, 0x000690E9},
+ {0x10030, 0x000694E6},
+ {0x10030, 0x000698E3},
+ {0x10030, 0x00069CE0},
+ {0x10030, 0x0006A0DD},
+ {0x10030, 0x0006A4DA},
+ {0x10030, 0x0006A8D7},
+ {0x10030, 0x0006ACD4},
+ {0x10030, 0x0006B0D1},
+ {0x10030, 0x0006B4CE},
+ {0x10030, 0x0006B8CB},
+ {0x10030, 0x0006BC08},
+ {0x10030, 0x0006C005},
+ {0x10030, 0x000700F5},
+ {0x10030, 0x000704F2},
+ {0x10030, 0x000708EF},
+ {0x10030, 0x00070CEC},
+ {0x10030, 0x000710E9},
+ {0x10030, 0x000714E6},
+ {0x10030, 0x000718E3},
+ {0x10030, 0x00071CE0},
+ {0x10030, 0x000720DD},
+ {0x10030, 0x000724DA},
+ {0x10030, 0x000728D7},
+ {0x10030, 0x00072CD4},
+ {0x10030, 0x000730D1},
+ {0x10030, 0x000734CE},
+ {0x10030, 0x000738CB},
+ {0x10030, 0x00073C08},
+ {0x10030, 0x00074005},
{0x10030, 0x000780F4},
{0x10030, 0x000784F1},
{0x10030, 0x000788EE},
@@ -6777,9 +11045,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000025},
{0x03F, 0x00008002},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
{0x03F, 0x00050002},
{0x033, 0x00000029},
@@ -6793,9 +11145,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000002D},
{0x03F, 0x00008002},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x03F, 0x00050002},
{0x033, 0x00000031},
@@ -6809,9 +11245,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000035},
{0x03F, 0x00008002},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
{0x03F, 0x00050002},
{0x033, 0x00000061},
@@ -6825,9 +11345,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000065},
{0x03F, 0x00008002},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
{0x03F, 0x00050002},
{0x033, 0x00000069},
@@ -6841,9 +11445,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000006D},
{0x03F, 0x00008002},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
{0x03F, 0x00050002},
{0x033, 0x00000071},
@@ -6857,9 +11545,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000075},
{0x03F, 0x00008002},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
{0x03F, 0x00050002},
{0x033, 0x00000079},
@@ -6873,9 +11645,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000007D},
{0x03F, 0x00008002},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A0},
{0x03F, 0x00050002},
{0x033, 0x000000A1},
@@ -6889,9 +11745,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000A5},
{0x03F, 0x00008002},
{0x033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A8},
{0x03F, 0x00050002},
{0x033, 0x000000A9},
@@ -6905,9 +11845,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000AD},
{0x03F, 0x00008002},
{0x033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B0},
{0x03F, 0x00050002},
{0x033, 0x000000B1},
@@ -6921,9 +11945,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000B5},
{0x03F, 0x00008002},
{0x033, 0x000000B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E0},
{0x03F, 0x00050002},
{0x033, 0x000000E1},
@@ -6937,9 +12045,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000E5},
{0x03F, 0x00008002},
{0x033, 0x000000E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E8},
{0x03F, 0x00050002},
{0x033, 0x000000E9},
@@ -6953,9 +12145,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000ED},
{0x03F, 0x00008002},
{0x033, 0x000000EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F0},
{0x03F, 0x00050002},
{0x033, 0x000000F1},
@@ -6969,9 +12245,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000F5},
{0x03F, 0x00008002},
{0x033, 0x000000F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F8},
{0x03F, 0x00050002},
{0x033, 0x000000F9},
@@ -6985,9 +12345,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000FD},
{0x03F, 0x00008002},
{0x033, 0x000000FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000120},
{0x03F, 0x00050002},
{0x033, 0x00000121},
@@ -7001,9 +12445,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000125},
{0x03F, 0x00008002},
{0x033, 0x00000126},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000127},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000128},
{0x03F, 0x00050002},
{0x033, 0x00000129},
@@ -7017,9 +12545,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000012D},
{0x03F, 0x00008002},
{0x033, 0x0000012E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000012F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000130},
{0x03F, 0x00050002},
{0x033, 0x00000131},
@@ -7033,9 +12645,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000135},
{0x03F, 0x00008002},
{0x033, 0x00000136},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000137},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000160},
{0x03F, 0x00050002},
{0x033, 0x00000161},
@@ -7049,9 +12745,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000165},
{0x03F, 0x00008002},
{0x033, 0x00000166},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000167},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000168},
{0x03F, 0x00050002},
{0x033, 0x00000169},
@@ -7065,9 +12845,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000016D},
{0x03F, 0x00008002},
{0x033, 0x0000016E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000016F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000170},
{0x03F, 0x00050002},
{0x033, 0x00000171},
@@ -7081,9 +12945,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000175},
{0x03F, 0x00008002},
{0x033, 0x00000176},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000177},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000178},
{0x03F, 0x00050002},
{0x033, 0x00000179},
@@ -7097,9 +13045,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000017D},
{0x03F, 0x00008002},
{0x033, 0x0000017E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000017F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A0},
{0x03F, 0x00050002},
{0x033, 0x000001A1},
@@ -7113,9 +13145,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001A5},
{0x03F, 0x00008002},
{0x033, 0x000001A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A8},
{0x03F, 0x00050002},
{0x033, 0x000001A9},
@@ -7129,9 +13245,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001AD},
{0x03F, 0x00008002},
{0x033, 0x000001AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B0},
{0x03F, 0x00050002},
{0x033, 0x000001B1},
@@ -7145,9 +13345,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001B5},
{0x03F, 0x00008002},
{0x033, 0x000001B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E0},
{0x03F, 0x00050002},
{0x033, 0x000001E1},
@@ -7161,9 +13445,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001E5},
{0x03F, 0x00008002},
{0x033, 0x000001E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E8},
{0x03F, 0x00050002},
{0x033, 0x000001E9},
@@ -7177,9 +13545,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001ED},
{0x03F, 0x00008002},
{0x033, 0x000001EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F0},
{0x03F, 0x00050002},
{0x033, 0x000001F1},
@@ -7193,9 +13645,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001F5},
{0x03F, 0x00008002},
{0x033, 0x000001F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F8},
{0x03F, 0x00050002},
{0x033, 0x000001F9},
@@ -7209,9 +13745,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001FD},
{0x03F, 0x00008002},
{0x033, 0x000001FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x005, 0x00000001},
{0x10005, 0x00000001},
@@ -7253,7 +13873,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00022000},
{0x10030, 0x00023000},
{0x10030, 0x00024000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00025000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00026003},
{0x10030, 0x00027003},
{0x10030, 0x00028000},
@@ -7261,7 +13923,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x0002A000},
{0x10030, 0x0002B000},
{0x10030, 0x0002C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0002D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0002E003},
{0x10030, 0x0002F003},
{0x10030, 0x00030000},
@@ -7269,7 +13973,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00032000},
{0x10030, 0x00033000},
{0x10030, 0x00034000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00035000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00036003},
{0x10030, 0x00037003},
{0x10030, 0x00038000},
@@ -7277,7 +14023,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x0003A000},
{0x10030, 0x0003B000},
{0x10030, 0x0003C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0003D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0003E003},
{0x10030, 0x0003F003},
{0x10030, 0x00060000},
@@ -7285,35 +14073,283 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00062000},
{0x10030, 0x00063000},
{0x10030, 0x00064000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00065000},
{0x10030, 0x00066000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00067003},
{0x10030, 0x00068000},
{0x10030, 0x00069000},
{0x10030, 0x0006A000},
{0x10030, 0x0006B000},
{0x10030, 0x0006C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0006D000},
{0x10030, 0x0006E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0006F003},
{0x10030, 0x00070000},
{0x10030, 0x00071000},
{0x10030, 0x00072000},
{0x10030, 0x00073000},
{0x10030, 0x00074000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0xA0000000, 0x00000000},
{0x10030, 0x00075000},
{0x10030, 0x00076000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00077003},
{0x10030, 0x00078000},
{0x10030, 0x00079000},
{0x10030, 0x0007A000},
{0x10030, 0x0007B000},
{0x10030, 0x0007C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0007D000},
{0x10030, 0x0007E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0007F003},
{0x100EE, 0x00000000},
- {0x0FE, 0x00000031},
+ {0x0FE, 0x00000048},
};
static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
@@ -7326,13 +14362,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0xF0360000, 0x00000006},
{0xF0010001, 0x00000007},
{0xF0020001, 0x00000008},
- {0xF0320001, 0x00000009},
- {0xF0330001, 0x0000000A},
- {0xF0340001, 0x0000000B},
- {0xF0350001, 0x0000000C},
- {0xF0360001, 0x0000000D},
- {0xF03F0001, 0x0000000E},
- {0xF0400001, 0x0000000F},
+ {0xF0030001, 0x00000009},
+ {0xF0040001, 0x0000000A},
+ {0xF0050001, 0x0000000B},
+ {0xF0070001, 0x0000000C},
+ {0xF0320001, 0x0000000D},
+ {0xF0330001, 0x0000000E},
+ {0xF0340001, 0x0000000F},
+ {0xF0350001, 0x00000010},
+ {0xF0360001, 0x00000011},
+ {0xF03F0001, 0x00000012},
+ {0xF0400001, 0x00000013},
{0x005, 0x00000000},
{0x10005, 0x00000000},
{0x0B9, 0x00020440},
@@ -7340,42 +14380,69 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10000, 0x00030000},
{0x018, 0x00011124},
{0x10018, 0x00011124},
- {0x05F, 0x00000032},
+ {0x05F, 0x00000038},
{0x097, 0x00043200},
{0x0A6, 0x00066DB7},
{0x0EF, 0x00004000},
{0x033, 0x00000005},
{0x03E, 0x00000000},
{0x03F, 0x00010500},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
{0x033, 0x00000003},
{0x03E, 0x00000000},
{0x03F, 0x00028B00},
{0x033, 0x00000002},
{0x03E, 0x00000000},
{0x03F, 0x0009AB00},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000000},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
{0x033, 0x0000000D},
{0x03E, 0x00000000},
{0x03F, 0x00010500},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
{0x033, 0x0000000B},
{0x03E, 0x00000000},
{0x03F, 0x00028B00},
{0x033, 0x0000000A},
{0x03E, 0x00000000},
{0x03F, 0x0009AB00},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
{0x033, 0x00000015},
{0x03E, 0x00000000},
{0x03F, 0x00010500},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
{0x033, 0x00000013},
{0x03E, 0x00000000},
{0x03F, 0x00028B00},
{0x033, 0x00000012},
{0x03E, 0x00000000},
{0x03F, 0x0009AB00},
+ {0x033, 0x00000011},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
{0x0EF, 0x00000000},
+ {0x10055, 0x00080080},
{0x000, 0x00033C01},
{0x10000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x096, 0x00015200},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0004D000},
@@ -7404,6 +14471,18 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
@@ -7430,7 +14509,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x0DA, 0x000D4009},
{0xB0000000, 0x00000000},
{0x057, 0x0000D589},
- {0x05A, 0x0007FFFF},
+ {0x05A, 0x0007F0F8},
{0x043, 0x00005000},
{0x018, 0x00001001},
{0x10018, 0x00001001},
@@ -7462,6 +14541,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x08F, 0x000D1352},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -7496,6 +14583,52 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x00000015},
{0x033, 0x00000001},
{0x03F, 0x00000017},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x0EF, 0x00008000},
{0x033, 0x00000020},
@@ -8007,6 +15140,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000EFFF},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -8113,7 +15254,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000005},
{0x03F, 0x00004344},
{0x033, 0x00000006},
- {0x03F, 0x00004324},
+ {0x03F, 0x00004344},
{0x033, 0x00000007},
{0x03F, 0x00004344},
{0x033, 0x00000008},
@@ -8176,6 +15317,85 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x00000200},
{0x0EF, 0x00000000},
{0x0EF, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000084DC},
{0x030, 0x000103C9},
{0x030, 0x00018399},
@@ -8188,6 +15408,189 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x030, 0x00050011},
{0x030, 0x00058000},
{0x030, 0x00060000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0xB0000000, 0x00000000},
{0x030, 0x00068000},
{0x030, 0x00070000},
{0x0EF, 0x00000000},
@@ -8458,6 +15861,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x095, 0x00000008},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -8477,101 +15888,2117 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0xB0000000, 0x00000000},
{0x0EE, 0x00001000},
{0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000063},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
@@ -8591,20 +18018,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x00000152},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0xA0000000, 0x00000000},
{0x03F, 0x00000052},
{0xB0000000, 0x00000000},
@@ -8627,20 +18062,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000015A},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
{0xB0000000, 0x00000000},
@@ -8663,20 +18106,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000019C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
{0xB0000000, 0x00000000},
@@ -8699,20 +18150,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x000001A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
{0xB0000000, 0x00000000},
@@ -8735,20 +18194,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x000001E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
{0xB0000000, 0x00000000},
@@ -8771,20 +18238,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x000002E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0xA0000000, 0x00000000},
{0x03F, 0x000001E6},
{0xB0000000, 0x00000000},
@@ -9828,131 +19303,131 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
- {0x10030, 0x000781EF},
- {0x10030, 0x000785E9},
- {0x10030, 0x000789E3},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
{0x10030, 0x00078DA3},
{0x10030, 0x00079161},
{0x10030, 0x0007955B},
- {0x10030, 0x00079921},
- {0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -9973,131 +19448,711 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
- {0x10030, 0x000781EF},
- {0x10030, 0x000785E9},
- {0x10030, 0x000789E3},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
{0x10030, 0x00078DA3},
{0x10030, 0x00079161},
{0x10030, 0x0007955B},
- {0x10030, 0x00079921},
- {0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -10118,1002 +20173,1002 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
- {0x10030, 0x000781EF},
- {0x10030, 0x000785E9},
- {0x10030, 0x000789E3},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
{0x10030, 0x00078DA3},
{0x10030, 0x00079161},
{0x10030, 0x0007955B},
- {0x10030, 0x00079921},
- {0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0xA0000000, 0x00000000},
{0x10030, 0x000001EF},
{0x10030, 0x000005E9},
@@ -11281,6 +21336,1150 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00004017},
{0x100EE, 0x00000000},
{0x100EE, 0x00002000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x000600F6},
+ {0x10030, 0x000604F3},
+ {0x10030, 0x000608F0},
+ {0x10030, 0x00060CED},
+ {0x10030, 0x000610EA},
+ {0x10030, 0x000614E7},
+ {0x10030, 0x000618E4},
+ {0x10030, 0x00061CE1},
+ {0x10030, 0x000620DE},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628D8},
+ {0x10030, 0x00062CD5},
+ {0x10030, 0x000630D2},
+ {0x10030, 0x000634CF},
+ {0x10030, 0x000638CC},
+ {0x10030, 0x00063C09},
+ {0x10030, 0x00064006},
+ {0x10030, 0x000680F5},
+ {0x10030, 0x000684F2},
+ {0x10030, 0x000688EF},
+ {0x10030, 0x00068CEC},
+ {0x10030, 0x000690E9},
+ {0x10030, 0x000694E6},
+ {0x10030, 0x000698E3},
+ {0x10030, 0x00069CE0},
+ {0x10030, 0x0006A0DD},
+ {0x10030, 0x0006A4DA},
+ {0x10030, 0x0006A8D7},
+ {0x10030, 0x0006ACD4},
+ {0x10030, 0x0006B0D1},
+ {0x10030, 0x0006B4CE},
+ {0x10030, 0x0006B8CB},
+ {0x10030, 0x0006BC08},
+ {0x10030, 0x0006C005},
+ {0x10030, 0x000700F5},
+ {0x10030, 0x000704F2},
+ {0x10030, 0x000708EF},
+ {0x10030, 0x00070CEC},
+ {0x10030, 0x000710E9},
+ {0x10030, 0x000714E6},
+ {0x10030, 0x000718E3},
+ {0x10030, 0x00071CE0},
+ {0x10030, 0x000720DD},
+ {0x10030, 0x000724DA},
+ {0x10030, 0x000728D7},
+ {0x10030, 0x00072CD4},
+ {0x10030, 0x000730D1},
+ {0x10030, 0x000734CE},
+ {0x10030, 0x000738CB},
+ {0x10030, 0x00073C08},
+ {0x10030, 0x00074005},
{0x10030, 0x000780F4},
{0x10030, 0x000784F1},
{0x10030, 0x000788EE},
@@ -11334,9 +22533,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000025},
{0x03F, 0x00008002},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
{0x03F, 0x00050002},
{0x033, 0x00000029},
@@ -11350,9 +22633,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000002D},
{0x03F, 0x00008002},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x03F, 0x00050002},
{0x033, 0x00000031},
@@ -11366,9 +22733,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000035},
{0x03F, 0x00008002},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
{0x03F, 0x00050002},
{0x033, 0x00000061},
@@ -11382,9 +22833,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000065},
{0x03F, 0x00008002},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
{0x03F, 0x00050002},
{0x033, 0x00000069},
@@ -11398,9 +22933,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000006D},
{0x03F, 0x00008002},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
{0x03F, 0x00050002},
{0x033, 0x00000071},
@@ -11414,9 +23033,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000075},
{0x03F, 0x00008002},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
{0x03F, 0x00050002},
{0x033, 0x00000079},
@@ -11430,9 +23133,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000007D},
{0x03F, 0x00008002},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A0},
{0x03F, 0x00050002},
{0x033, 0x000000A1},
@@ -11446,9 +23233,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000A5},
{0x03F, 0x00008002},
{0x033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A8},
{0x03F, 0x00050002},
{0x033, 0x000000A9},
@@ -11462,9 +23333,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000AD},
{0x03F, 0x00008002},
{0x033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B0},
{0x03F, 0x00050002},
{0x033, 0x000000B1},
@@ -11478,9 +23433,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000B5},
{0x03F, 0x00008002},
{0x033, 0x000000B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E0},
{0x03F, 0x00050002},
{0x033, 0x000000E1},
@@ -11494,9 +23533,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000E5},
{0x03F, 0x00008002},
{0x033, 0x000000E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E8},
{0x03F, 0x00050002},
{0x033, 0x000000E9},
@@ -11510,9 +23633,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000ED},
{0x03F, 0x00008002},
{0x033, 0x000000EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F0},
{0x03F, 0x00050002},
{0x033, 0x000000F1},
@@ -11526,9 +23733,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000F5},
{0x03F, 0x00008002},
{0x033, 0x000000F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F8},
{0x03F, 0x00050002},
{0x033, 0x000000F9},
@@ -11542,9 +23833,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000FD},
{0x03F, 0x00008002},
{0x033, 0x000000FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000120},
{0x03F, 0x00050002},
{0x033, 0x00000121},
@@ -11558,9 +23933,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000125},
{0x03F, 0x00008002},
{0x033, 0x00000126},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000127},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000128},
{0x03F, 0x00050002},
{0x033, 0x00000129},
@@ -11574,9 +24033,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000012D},
{0x03F, 0x00008002},
{0x033, 0x0000012E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000012F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000130},
{0x03F, 0x00050002},
{0x033, 0x00000131},
@@ -11590,9 +24133,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000135},
{0x03F, 0x00008002},
{0x033, 0x00000136},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000137},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000160},
{0x03F, 0x00050002},
{0x033, 0x00000161},
@@ -11606,9 +24233,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000165},
{0x03F, 0x00008002},
{0x033, 0x00000166},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000167},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000168},
{0x03F, 0x00050002},
{0x033, 0x00000169},
@@ -11622,9 +24333,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000016D},
{0x03F, 0x00008002},
{0x033, 0x0000016E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000016F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000170},
{0x03F, 0x00050002},
{0x033, 0x00000171},
@@ -11638,9 +24433,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000175},
{0x03F, 0x00008002},
{0x033, 0x00000176},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000177},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000178},
{0x03F, 0x00050002},
{0x033, 0x00000179},
@@ -11654,9 +24533,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000017D},
{0x03F, 0x00008002},
{0x033, 0x0000017E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000017F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A0},
{0x03F, 0x00050002},
{0x033, 0x000001A1},
@@ -11670,9 +24633,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001A5},
{0x03F, 0x00008002},
{0x033, 0x000001A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A8},
{0x03F, 0x00050002},
{0x033, 0x000001A9},
@@ -11686,9 +24733,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001AD},
{0x03F, 0x00008002},
{0x033, 0x000001AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B0},
{0x03F, 0x00050002},
{0x033, 0x000001B1},
@@ -11702,9 +24833,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001B5},
{0x03F, 0x00008002},
{0x033, 0x000001B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E0},
{0x03F, 0x00050002},
{0x033, 0x000001E1},
@@ -11718,9 +24933,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001E5},
{0x03F, 0x00008002},
{0x033, 0x000001E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E8},
{0x03F, 0x00050002},
{0x033, 0x000001E9},
@@ -11734,9 +25033,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001ED},
{0x03F, 0x00008002},
{0x033, 0x000001EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F0},
{0x03F, 0x00050002},
{0x033, 0x000001F1},
@@ -11750,9 +25133,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001F5},
{0x03F, 0x00008002},
{0x033, 0x000001F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F8},
{0x03F, 0x00050002},
{0x033, 0x000001F9},
@@ -11766,9 +25233,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001FD},
{0x03F, 0x00008002},
{0x033, 0x000001FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x005, 0x00000001},
{0x10005, 0x00000001},
@@ -11810,7 +25361,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00022000},
{0x10030, 0x00023000},
{0x10030, 0x00024000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00025000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00026003},
{0x10030, 0x00027003},
{0x10030, 0x00028000},
@@ -11818,7 +25411,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x0002A000},
{0x10030, 0x0002B000},
{0x10030, 0x0002C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0002D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0002E003},
{0x10030, 0x0002F003},
{0x10030, 0x00030000},
@@ -11826,7 +25461,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00032000},
{0x10030, 0x00033000},
{0x10030, 0x00034000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0xA0000000, 0x00000000},
{0x10030, 0x00035000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00036003},
{0x10030, 0x00037003},
{0x10030, 0x00038000},
@@ -11834,7 +25511,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x0003A000},
{0x10030, 0x0003B000},
{0x10030, 0x0003C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0003D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0003E003},
{0x10030, 0x0003F003},
{0x10030, 0x00060000},
@@ -11842,32 +25561,280 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00062000},
{0x10030, 0x00063000},
{0x10030, 0x00064000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00065000},
{0x10030, 0x00066000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00067003},
{0x10030, 0x00068000},
{0x10030, 0x00069000},
{0x10030, 0x0006A000},
{0x10030, 0x0006B000},
{0x10030, 0x0006C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0006D000},
{0x10030, 0x0006E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0006F003},
{0x10030, 0x00070000},
{0x10030, 0x00071000},
{0x10030, 0x00072000},
{0x10030, 0x00073000},
{0x10030, 0x00074000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00075000},
{0x10030, 0x00076000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00077003},
{0x10030, 0x00078000},
{0x10030, 0x00079000},
{0x10030, 0x0007A000},
{0x10030, 0x0007B000},
{0x10030, 0x0007C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0007D000},
{0x10030, 0x0007E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0007F003},
{0x0ED, 0x00000010},
{0x033, 0x00000001},
@@ -11884,7 +25851,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000000A},
{0x0ED, 0x00000000},
{0x100EE, 0x00000000},
- {0x0FE, 0x00000031},
+ {0x0FE, 0x00000048},
};
static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = {
@@ -13825,1207 +27792,1722 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
[RTW89_REGD_NUM] = {
[0][0][RTW89_ACMA] = 0,
+ [0][0][RTW89_CN] = 0,
[0][0][RTW89_ETSI] = 0,
[0][0][RTW89_FCC] = 1,
[0][0][RTW89_IC] = 1,
+ [0][0][RTW89_KCC] = 0,
[0][0][RTW89_MKK] = 0,
+ [0][0][RTW89_UK] = 0,
[0][1][RTW89_ACMA] = 0,
+ [0][1][RTW89_CN] = 0,
[0][1][RTW89_ETSI] = 0,
[0][1][RTW89_FCC] = 3,
[0][1][RTW89_IC] = 3,
+ [0][1][RTW89_KCC] = 0,
[0][1][RTW89_MKK] = 0,
+ [0][1][RTW89_UK] = 0,
[1][1][RTW89_ACMA] = 0,
+ [1][1][RTW89_CN] = 0,
[1][1][RTW89_ETSI] = 0,
[1][1][RTW89_FCC] = 3,
[1][1][RTW89_IC] = 3,
+ [1][1][RTW89_KCC] = 0,
[1][1][RTW89_MKK] = 0,
- [2][1][RTW89_FCC] = 1,
+ [1][1][RTW89_UK] = 0,
+ [2][1][RTW89_ETSI] = 0,
+ [2][1][RTW89_FCC] = 0,
+ [2][1][RTW89_KCC] = 0,
};
const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
- [0][0][0][0][RTW89_WW][0] = 60,
- [0][0][0][0][RTW89_WW][1] = 60,
- [0][0][0][0][RTW89_WW][2] = 60,
- [0][0][0][0][RTW89_WW][3] = 60,
- [0][0][0][0][RTW89_WW][4] = 60,
- [0][0][0][0][RTW89_WW][5] = 60,
- [0][0][0][0][RTW89_WW][6] = 60,
- [0][0][0][0][RTW89_WW][7] = 60,
- [0][0][0][0][RTW89_WW][8] = 60,
- [0][0][0][0][RTW89_WW][9] = 60,
- [0][0][0][0][RTW89_WW][10] = 60,
- [0][0][0][0][RTW89_WW][11] = 60,
- [0][0][0][0][RTW89_WW][12] = 48,
+ [0][0][0][0][RTW89_WW][0] = 58,
+ [0][0][0][0][RTW89_WW][1] = 58,
+ [0][0][0][0][RTW89_WW][2] = 58,
+ [0][0][0][0][RTW89_WW][3] = 58,
+ [0][0][0][0][RTW89_WW][4] = 58,
+ [0][0][0][0][RTW89_WW][5] = 58,
+ [0][0][0][0][RTW89_WW][6] = 58,
+ [0][0][0][0][RTW89_WW][7] = 58,
+ [0][0][0][0][RTW89_WW][8] = 58,
+ [0][0][0][0][RTW89_WW][9] = 58,
+ [0][0][0][0][RTW89_WW][10] = 58,
+ [0][0][0][0][RTW89_WW][11] = 58,
+ [0][0][0][0][RTW89_WW][12] = 46,
[0][0][0][0][RTW89_WW][13] = 72,
- [0][1][0][0][RTW89_WW][0] = 48,
- [0][1][0][0][RTW89_WW][1] = 48,
- [0][1][0][0][RTW89_WW][2] = 48,
- [0][1][0][0][RTW89_WW][3] = 48,
- [0][1][0][0][RTW89_WW][4] = 48,
- [0][1][0][0][RTW89_WW][5] = 48,
- [0][1][0][0][RTW89_WW][6] = 48,
- [0][1][0][0][RTW89_WW][7] = 48,
- [0][1][0][0][RTW89_WW][8] = 48,
- [0][1][0][0][RTW89_WW][9] = 48,
- [0][1][0][0][RTW89_WW][10] = 48,
- [0][1][0][0][RTW89_WW][11] = 46,
- [0][1][0][0][RTW89_WW][12] = 34,
+ [0][1][0][0][RTW89_WW][0] = 42,
+ [0][1][0][0][RTW89_WW][1] = 42,
+ [0][1][0][0][RTW89_WW][2] = 42,
+ [0][1][0][0][RTW89_WW][3] = 42,
+ [0][1][0][0][RTW89_WW][4] = 42,
+ [0][1][0][0][RTW89_WW][5] = 42,
+ [0][1][0][0][RTW89_WW][6] = 42,
+ [0][1][0][0][RTW89_WW][7] = 42,
+ [0][1][0][0][RTW89_WW][8] = 42,
+ [0][1][0][0][RTW89_WW][9] = 42,
+ [0][1][0][0][RTW89_WW][10] = 42,
+ [0][1][0][0][RTW89_WW][11] = 42,
+ [0][1][0][0][RTW89_WW][12] = 18,
[0][1][0][0][RTW89_WW][13] = 60,
[1][0][0][0][RTW89_WW][0] = 0,
[1][0][0][0][RTW89_WW][1] = 0,
- [1][0][0][0][RTW89_WW][2] = 42,
- [1][0][0][0][RTW89_WW][3] = 42,
- [1][0][0][0][RTW89_WW][4] = 42,
+ [1][0][0][0][RTW89_WW][2] = 44,
+ [1][0][0][0][RTW89_WW][3] = 58,
+ [1][0][0][0][RTW89_WW][4] = 58,
[1][0][0][0][RTW89_WW][5] = 58,
- [1][0][0][0][RTW89_WW][6] = 42,
- [1][0][0][0][RTW89_WW][7] = 42,
- [1][0][0][0][RTW89_WW][8] = 42,
- [1][0][0][0][RTW89_WW][9] = 34,
- [1][0][0][0][RTW89_WW][10] = 22,
+ [1][0][0][0][RTW89_WW][6] = 46,
+ [1][0][0][0][RTW89_WW][7] = 46,
+ [1][0][0][0][RTW89_WW][8] = 28,
+ [1][0][0][0][RTW89_WW][9] = 26,
+ [1][0][0][0][RTW89_WW][10] = 26,
[1][0][0][0][RTW89_WW][11] = 0,
[1][0][0][0][RTW89_WW][12] = 0,
[1][0][0][0][RTW89_WW][13] = 0,
[1][1][0][0][RTW89_WW][0] = 0,
[1][1][0][0][RTW89_WW][1] = 0,
- [1][1][0][0][RTW89_WW][2] = 38,
- [1][1][0][0][RTW89_WW][3] = 38,
- [1][1][0][0][RTW89_WW][4] = 38,
- [1][1][0][0][RTW89_WW][5] = 48,
- [1][1][0][0][RTW89_WW][6] = 26,
- [1][1][0][0][RTW89_WW][7] = 26,
- [1][1][0][0][RTW89_WW][8] = 26,
- [1][1][0][0][RTW89_WW][9] = 22,
- [1][1][0][0][RTW89_WW][10] = 22,
+ [1][1][0][0][RTW89_WW][2] = 46,
+ [1][1][0][0][RTW89_WW][3] = 46,
+ [1][1][0][0][RTW89_WW][4] = 46,
+ [1][1][0][0][RTW89_WW][5] = 46,
+ [1][1][0][0][RTW89_WW][6] = 40,
+ [1][1][0][0][RTW89_WW][7] = 40,
+ [1][1][0][0][RTW89_WW][8] = 14,
+ [1][1][0][0][RTW89_WW][9] = 14,
+ [1][1][0][0][RTW89_WW][10] = 12,
[1][1][0][0][RTW89_WW][11] = 0,
[1][1][0][0][RTW89_WW][12] = 0,
[1][1][0][0][RTW89_WW][13] = 0,
- [0][0][1][0][RTW89_WW][0] = 60,
- [0][0][1][0][RTW89_WW][1] = 60,
- [0][0][1][0][RTW89_WW][2] = 60,
- [0][0][1][0][RTW89_WW][3] = 60,
- [0][0][1][0][RTW89_WW][4] = 60,
- [0][0][1][0][RTW89_WW][5] = 60,
- [0][0][1][0][RTW89_WW][6] = 60,
- [0][0][1][0][RTW89_WW][7] = 60,
- [0][0][1][0][RTW89_WW][8] = 60,
- [0][0][1][0][RTW89_WW][9] = 60,
- [0][0][1][0][RTW89_WW][10] = 60,
- [0][0][1][0][RTW89_WW][11] = 46,
- [0][0][1][0][RTW89_WW][12] = 42,
+ [0][0][1][0][RTW89_WW][0] = 58,
+ [0][0][1][0][RTW89_WW][1] = 58,
+ [0][0][1][0][RTW89_WW][2] = 58,
+ [0][0][1][0][RTW89_WW][3] = 58,
+ [0][0][1][0][RTW89_WW][4] = 58,
+ [0][0][1][0][RTW89_WW][5] = 58,
+ [0][0][1][0][RTW89_WW][6] = 58,
+ [0][0][1][0][RTW89_WW][7] = 58,
+ [0][0][1][0][RTW89_WW][8] = 58,
+ [0][0][1][0][RTW89_WW][9] = 58,
+ [0][0][1][0][RTW89_WW][10] = 58,
+ [0][0][1][0][RTW89_WW][11] = 58,
+ [0][0][1][0][RTW89_WW][12] = 58,
[0][0][1][0][RTW89_WW][13] = 0,
- [0][1][1][0][RTW89_WW][0] = 48,
- [0][1][1][0][RTW89_WW][1] = 48,
- [0][1][1][0][RTW89_WW][2] = 48,
- [0][1][1][0][RTW89_WW][3] = 48,
- [0][1][1][0][RTW89_WW][4] = 48,
- [0][1][1][0][RTW89_WW][5] = 48,
- [0][1][1][0][RTW89_WW][6] = 48,
- [0][1][1][0][RTW89_WW][7] = 48,
- [0][1][1][0][RTW89_WW][8] = 48,
- [0][1][1][0][RTW89_WW][9] = 48,
- [0][1][1][0][RTW89_WW][10] = 48,
- [0][1][1][0][RTW89_WW][11] = 38,
- [0][1][1][0][RTW89_WW][12] = 34,
+ [0][1][1][0][RTW89_WW][0] = 46,
+ [0][1][1][0][RTW89_WW][1] = 46,
+ [0][1][1][0][RTW89_WW][2] = 46,
+ [0][1][1][0][RTW89_WW][3] = 46,
+ [0][1][1][0][RTW89_WW][4] = 46,
+ [0][1][1][0][RTW89_WW][5] = 46,
+ [0][1][1][0][RTW89_WW][6] = 46,
+ [0][1][1][0][RTW89_WW][7] = 46,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][9] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][11] = 46,
+ [0][1][1][0][RTW89_WW][12] = 36,
[0][1][1][0][RTW89_WW][13] = 0,
- [0][0][2][0][RTW89_WW][0] = 60,
- [0][0][2][0][RTW89_WW][1] = 60,
- [0][0][2][0][RTW89_WW][2] = 60,
- [0][0][2][0][RTW89_WW][3] = 60,
- [0][0][2][0][RTW89_WW][4] = 60,
- [0][0][2][0][RTW89_WW][5] = 60,
- [0][0][2][0][RTW89_WW][6] = 60,
- [0][0][2][0][RTW89_WW][7] = 60,
- [0][0][2][0][RTW89_WW][8] = 60,
- [0][0][2][0][RTW89_WW][9] = 60,
- [0][0][2][0][RTW89_WW][10] = 60,
- [0][0][2][0][RTW89_WW][11] = 46,
- [0][0][2][0][RTW89_WW][12] = 42,
+ [0][0][2][0][RTW89_WW][0] = 58,
+ [0][0][2][0][RTW89_WW][1] = 58,
+ [0][0][2][0][RTW89_WW][2] = 58,
+ [0][0][2][0][RTW89_WW][3] = 58,
+ [0][0][2][0][RTW89_WW][4] = 58,
+ [0][0][2][0][RTW89_WW][5] = 58,
+ [0][0][2][0][RTW89_WW][6] = 58,
+ [0][0][2][0][RTW89_WW][7] = 58,
+ [0][0][2][0][RTW89_WW][8] = 58,
+ [0][0][2][0][RTW89_WW][9] = 58,
+ [0][0][2][0][RTW89_WW][10] = 58,
+ [0][0][2][0][RTW89_WW][11] = 58,
+ [0][0][2][0][RTW89_WW][12] = 38,
[0][0][2][0][RTW89_WW][13] = 0,
- [0][1][2][0][RTW89_WW][0] = 48,
- [0][1][2][0][RTW89_WW][1] = 48,
- [0][1][2][0][RTW89_WW][2] = 48,
- [0][1][2][0][RTW89_WW][3] = 48,
- [0][1][2][0][RTW89_WW][4] = 48,
- [0][1][2][0][RTW89_WW][5] = 48,
- [0][1][2][0][RTW89_WW][6] = 48,
- [0][1][2][0][RTW89_WW][7] = 48,
- [0][1][2][0][RTW89_WW][8] = 48,
- [0][1][2][0][RTW89_WW][9] = 48,
- [0][1][2][0][RTW89_WW][10] = 48,
- [0][1][2][0][RTW89_WW][11] = 38,
- [0][1][2][0][RTW89_WW][12] = 34,
+ [0][1][2][0][RTW89_WW][0] = 46,
+ [0][1][2][0][RTW89_WW][1] = 46,
+ [0][1][2][0][RTW89_WW][2] = 46,
+ [0][1][2][0][RTW89_WW][3] = 46,
+ [0][1][2][0][RTW89_WW][4] = 46,
+ [0][1][2][0][RTW89_WW][5] = 46,
+ [0][1][2][0][RTW89_WW][6] = 46,
+ [0][1][2][0][RTW89_WW][7] = 46,
+ [0][1][2][0][RTW89_WW][8] = 46,
+ [0][1][2][0][RTW89_WW][9] = 46,
+ [0][1][2][0][RTW89_WW][10] = 46,
+ [0][1][2][0][RTW89_WW][11] = 46,
+ [0][1][2][0][RTW89_WW][12] = 16,
[0][1][2][0][RTW89_WW][13] = 0,
[0][1][2][1][RTW89_WW][0] = 36,
- [0][1][2][1][RTW89_WW][1] = 36,
- [0][1][2][1][RTW89_WW][2] = 36,
- [0][1][2][1][RTW89_WW][3] = 36,
- [0][1][2][1][RTW89_WW][4] = 36,
- [0][1][2][1][RTW89_WW][5] = 36,
- [0][1][2][1][RTW89_WW][6] = 36,
- [0][1][2][1][RTW89_WW][7] = 36,
- [0][1][2][1][RTW89_WW][8] = 36,
- [0][1][2][1][RTW89_WW][9] = 36,
- [0][1][2][1][RTW89_WW][10] = 36,
- [0][1][2][1][RTW89_WW][11] = 36,
- [0][1][2][1][RTW89_WW][12] = 34,
+ [0][1][2][1][RTW89_WW][1] = 34,
+ [0][1][2][1][RTW89_WW][2] = 34,
+ [0][1][2][1][RTW89_WW][3] = 34,
+ [0][1][2][1][RTW89_WW][4] = 34,
+ [0][1][2][1][RTW89_WW][5] = 34,
+ [0][1][2][1][RTW89_WW][6] = 34,
+ [0][1][2][1][RTW89_WW][7] = 34,
+ [0][1][2][1][RTW89_WW][8] = 34,
+ [0][1][2][1][RTW89_WW][9] = 34,
+ [0][1][2][1][RTW89_WW][10] = 34,
+ [0][1][2][1][RTW89_WW][11] = 34,
+ [0][1][2][1][RTW89_WW][12] = 16,
[0][1][2][1][RTW89_WW][13] = 0,
[1][0][2][0][RTW89_WW][0] = 0,
[1][0][2][0][RTW89_WW][1] = 0,
- [1][0][2][0][RTW89_WW][2] = 60,
- [1][0][2][0][RTW89_WW][3] = 60,
- [1][0][2][0][RTW89_WW][4] = 60,
- [1][0][2][0][RTW89_WW][5] = 60,
- [1][0][2][0][RTW89_WW][6] = 60,
- [1][0][2][0][RTW89_WW][7] = 60,
- [1][0][2][0][RTW89_WW][8] = 60,
- [1][0][2][0][RTW89_WW][9] = 60,
- [1][0][2][0][RTW89_WW][10] = 58,
+ [1][0][2][0][RTW89_WW][2] = 58,
+ [1][0][2][0][RTW89_WW][3] = 58,
+ [1][0][2][0][RTW89_WW][4] = 58,
+ [1][0][2][0][RTW89_WW][5] = 58,
+ [1][0][2][0][RTW89_WW][6] = 58,
+ [1][0][2][0][RTW89_WW][7] = 58,
+ [1][0][2][0][RTW89_WW][8] = 58,
+ [1][0][2][0][RTW89_WW][9] = 58,
+ [1][0][2][0][RTW89_WW][10] = 56,
[1][0][2][0][RTW89_WW][11] = 0,
[1][0][2][0][RTW89_WW][12] = 0,
[1][0][2][0][RTW89_WW][13] = 0,
[1][1][2][0][RTW89_WW][0] = 0,
[1][1][2][0][RTW89_WW][1] = 0,
- [1][1][2][0][RTW89_WW][2] = 46,
- [1][1][2][0][RTW89_WW][3] = 46,
- [1][1][2][0][RTW89_WW][4] = 48,
- [1][1][2][0][RTW89_WW][5] = 48,
- [1][1][2][0][RTW89_WW][6] = 48,
- [1][1][2][0][RTW89_WW][7] = 46,
- [1][1][2][0][RTW89_WW][8] = 46,
+ [1][1][2][0][RTW89_WW][2] = 34,
+ [1][1][2][0][RTW89_WW][3] = 34,
+ [1][1][2][0][RTW89_WW][4] = 34,
+ [1][1][2][0][RTW89_WW][5] = 34,
+ [1][1][2][0][RTW89_WW][6] = 34,
+ [1][1][2][0][RTW89_WW][7] = 34,
+ [1][1][2][0][RTW89_WW][8] = 34,
[1][1][2][0][RTW89_WW][9] = 34,
- [1][1][2][0][RTW89_WW][10] = 30,
+ [1][1][2][0][RTW89_WW][10] = 34,
[1][1][2][0][RTW89_WW][11] = 0,
[1][1][2][0][RTW89_WW][12] = 0,
[1][1][2][0][RTW89_WW][13] = 0,
[1][1][2][1][RTW89_WW][0] = 0,
[1][1][2][1][RTW89_WW][1] = 0,
- [1][1][2][1][RTW89_WW][2] = 36,
- [1][1][2][1][RTW89_WW][3] = 36,
- [1][1][2][1][RTW89_WW][4] = 36,
- [1][1][2][1][RTW89_WW][5] = 36,
- [1][1][2][1][RTW89_WW][6] = 36,
- [1][1][2][1][RTW89_WW][7] = 36,
- [1][1][2][1][RTW89_WW][8] = 36,
+ [1][1][2][1][RTW89_WW][2] = 34,
+ [1][1][2][1][RTW89_WW][3] = 34,
+ [1][1][2][1][RTW89_WW][4] = 34,
+ [1][1][2][1][RTW89_WW][5] = 34,
+ [1][1][2][1][RTW89_WW][6] = 34,
+ [1][1][2][1][RTW89_WW][7] = 34,
+ [1][1][2][1][RTW89_WW][8] = 34,
[1][1][2][1][RTW89_WW][9] = 34,
- [1][1][2][1][RTW89_WW][10] = 30,
+ [1][1][2][1][RTW89_WW][10] = 36,
[1][1][2][1][RTW89_WW][11] = 0,
[1][1][2][1][RTW89_WW][12] = 0,
[1][1][2][1][RTW89_WW][13] = 0,
- [0][0][0][0][RTW89_FCC][0] = 70,
+ [0][0][0][0][RTW89_FCC][0] = 76,
[0][0][0][0][RTW89_ETSI][0] = 60,
[0][0][0][0][RTW89_MKK][0] = 68,
- [0][0][0][0][RTW89_IC][0] = 74,
+ [0][0][0][0][RTW89_IC][0] = 76,
+ [0][0][0][0][RTW89_KCC][0] = 68,
[0][0][0][0][RTW89_ACMA][0] = 60,
- [0][0][0][0][RTW89_FCC][1] = 70,
+ [0][0][0][0][RTW89_CN][0] = 58,
+ [0][0][0][0][RTW89_UK][0] = 60,
+ [0][0][0][0][RTW89_FCC][1] = 76,
[0][0][0][0][RTW89_ETSI][1] = 60,
[0][0][0][0][RTW89_MKK][1] = 68,
- [0][0][0][0][RTW89_IC][1] = 74,
+ [0][0][0][0][RTW89_IC][1] = 76,
+ [0][0][0][0][RTW89_KCC][1] = 68,
[0][0][0][0][RTW89_ACMA][1] = 60,
- [0][0][0][0][RTW89_FCC][2] = 70,
+ [0][0][0][0][RTW89_CN][1] = 58,
+ [0][0][0][0][RTW89_UK][1] = 60,
+ [0][0][0][0][RTW89_FCC][2] = 76,
[0][0][0][0][RTW89_ETSI][2] = 60,
[0][0][0][0][RTW89_MKK][2] = 68,
- [0][0][0][0][RTW89_IC][2] = 74,
+ [0][0][0][0][RTW89_IC][2] = 76,
+ [0][0][0][0][RTW89_KCC][2] = 68,
[0][0][0][0][RTW89_ACMA][2] = 60,
- [0][0][0][0][RTW89_FCC][3] = 70,
+ [0][0][0][0][RTW89_CN][2] = 58,
+ [0][0][0][0][RTW89_UK][2] = 60,
+ [0][0][0][0][RTW89_FCC][3] = 76,
[0][0][0][0][RTW89_ETSI][3] = 60,
[0][0][0][0][RTW89_MKK][3] = 68,
- [0][0][0][0][RTW89_IC][3] = 74,
+ [0][0][0][0][RTW89_IC][3] = 76,
+ [0][0][0][0][RTW89_KCC][3] = 68,
[0][0][0][0][RTW89_ACMA][3] = 60,
- [0][0][0][0][RTW89_FCC][4] = 70,
+ [0][0][0][0][RTW89_CN][3] = 58,
+ [0][0][0][0][RTW89_UK][3] = 60,
+ [0][0][0][0][RTW89_FCC][4] = 76,
[0][0][0][0][RTW89_ETSI][4] = 60,
[0][0][0][0][RTW89_MKK][4] = 68,
- [0][0][0][0][RTW89_IC][4] = 74,
+ [0][0][0][0][RTW89_IC][4] = 76,
+ [0][0][0][0][RTW89_KCC][4] = 68,
[0][0][0][0][RTW89_ACMA][4] = 60,
- [0][0][0][0][RTW89_FCC][5] = 70,
+ [0][0][0][0][RTW89_CN][4] = 58,
+ [0][0][0][0][RTW89_UK][4] = 60,
+ [0][0][0][0][RTW89_FCC][5] = 76,
[0][0][0][0][RTW89_ETSI][5] = 60,
[0][0][0][0][RTW89_MKK][5] = 68,
- [0][0][0][0][RTW89_IC][5] = 74,
+ [0][0][0][0][RTW89_IC][5] = 76,
+ [0][0][0][0][RTW89_KCC][5] = 68,
[0][0][0][0][RTW89_ACMA][5] = 60,
- [0][0][0][0][RTW89_FCC][6] = 70,
+ [0][0][0][0][RTW89_CN][5] = 58,
+ [0][0][0][0][RTW89_UK][5] = 60,
+ [0][0][0][0][RTW89_FCC][6] = 76,
[0][0][0][0][RTW89_ETSI][6] = 60,
[0][0][0][0][RTW89_MKK][6] = 68,
- [0][0][0][0][RTW89_IC][6] = 74,
+ [0][0][0][0][RTW89_IC][6] = 76,
+ [0][0][0][0][RTW89_KCC][6] = 68,
[0][0][0][0][RTW89_ACMA][6] = 60,
- [0][0][0][0][RTW89_FCC][7] = 70,
+ [0][0][0][0][RTW89_CN][6] = 58,
+ [0][0][0][0][RTW89_UK][6] = 60,
+ [0][0][0][0][RTW89_FCC][7] = 76,
[0][0][0][0][RTW89_ETSI][7] = 60,
[0][0][0][0][RTW89_MKK][7] = 68,
- [0][0][0][0][RTW89_IC][7] = 74,
+ [0][0][0][0][RTW89_IC][7] = 76,
+ [0][0][0][0][RTW89_KCC][7] = 68,
[0][0][0][0][RTW89_ACMA][7] = 60,
- [0][0][0][0][RTW89_FCC][8] = 70,
+ [0][0][0][0][RTW89_CN][7] = 58,
+ [0][0][0][0][RTW89_UK][7] = 60,
+ [0][0][0][0][RTW89_FCC][8] = 76,
[0][0][0][0][RTW89_ETSI][8] = 60,
[0][0][0][0][RTW89_MKK][8] = 68,
- [0][0][0][0][RTW89_IC][8] = 74,
+ [0][0][0][0][RTW89_IC][8] = 76,
+ [0][0][0][0][RTW89_KCC][8] = 68,
[0][0][0][0][RTW89_ACMA][8] = 60,
- [0][0][0][0][RTW89_FCC][9] = 70,
+ [0][0][0][0][RTW89_CN][8] = 58,
+ [0][0][0][0][RTW89_UK][8] = 60,
+ [0][0][0][0][RTW89_FCC][9] = 76,
[0][0][0][0][RTW89_ETSI][9] = 60,
[0][0][0][0][RTW89_MKK][9] = 68,
- [0][0][0][0][RTW89_IC][9] = 74,
+ [0][0][0][0][RTW89_IC][9] = 76,
+ [0][0][0][0][RTW89_KCC][9] = 70,
[0][0][0][0][RTW89_ACMA][9] = 60,
- [0][0][0][0][RTW89_FCC][10] = 70,
+ [0][0][0][0][RTW89_CN][9] = 58,
+ [0][0][0][0][RTW89_UK][9] = 60,
+ [0][0][0][0][RTW89_FCC][10] = 76,
[0][0][0][0][RTW89_ETSI][10] = 60,
[0][0][0][0][RTW89_MKK][10] = 68,
- [0][0][0][0][RTW89_IC][10] = 74,
+ [0][0][0][0][RTW89_IC][10] = 76,
+ [0][0][0][0][RTW89_KCC][10] = 70,
[0][0][0][0][RTW89_ACMA][10] = 60,
- [0][0][0][0][RTW89_FCC][11] = 62,
+ [0][0][0][0][RTW89_CN][10] = 58,
+ [0][0][0][0][RTW89_UK][10] = 60,
+ [0][0][0][0][RTW89_FCC][11] = 58,
[0][0][0][0][RTW89_ETSI][11] = 60,
[0][0][0][0][RTW89_MKK][11] = 68,
- [0][0][0][0][RTW89_IC][11] = 72,
+ [0][0][0][0][RTW89_IC][11] = 58,
+ [0][0][0][0][RTW89_KCC][11] = 70,
[0][0][0][0][RTW89_ACMA][11] = 60,
- [0][0][0][0][RTW89_FCC][12] = 48,
+ [0][0][0][0][RTW89_CN][11] = 58,
+ [0][0][0][0][RTW89_UK][11] = 60,
+ [0][0][0][0][RTW89_FCC][12] = 46,
[0][0][0][0][RTW89_ETSI][12] = 60,
[0][0][0][0][RTW89_MKK][12] = 68,
- [0][0][0][0][RTW89_IC][12] = 58,
+ [0][0][0][0][RTW89_IC][12] = 46,
+ [0][0][0][0][RTW89_KCC][12] = 70,
[0][0][0][0][RTW89_ACMA][12] = 60,
+ [0][0][0][0][RTW89_CN][12] = 58,
+ [0][0][0][0][RTW89_UK][12] = 60,
[0][0][0][0][RTW89_FCC][13] = 127,
[0][0][0][0][RTW89_ETSI][13] = 127,
[0][0][0][0][RTW89_MKK][13] = 72,
[0][0][0][0][RTW89_IC][13] = 127,
+ [0][0][0][0][RTW89_KCC][13] = 127,
[0][0][0][0][RTW89_ACMA][13] = 127,
- [0][1][0][0][RTW89_FCC][0] = 66,
+ [0][0][0][0][RTW89_CN][13] = 127,
+ [0][0][0][0][RTW89_UK][13] = 127,
+ [0][1][0][0][RTW89_FCC][0] = 76,
[0][1][0][0][RTW89_ETSI][0] = 48,
[0][1][0][0][RTW89_MKK][0] = 58,
- [0][1][0][0][RTW89_IC][0] = 74,
+ [0][1][0][0][RTW89_IC][0] = 76,
+ [0][1][0][0][RTW89_KCC][0] = 56,
[0][1][0][0][RTW89_ACMA][0] = 48,
- [0][1][0][0][RTW89_FCC][1] = 66,
+ [0][1][0][0][RTW89_CN][0] = 42,
+ [0][1][0][0][RTW89_UK][0] = 48,
+ [0][1][0][0][RTW89_FCC][1] = 76,
[0][1][0][0][RTW89_ETSI][1] = 48,
[0][1][0][0][RTW89_MKK][1] = 58,
- [0][1][0][0][RTW89_IC][1] = 74,
+ [0][1][0][0][RTW89_IC][1] = 76,
+ [0][1][0][0][RTW89_KCC][1] = 56,
[0][1][0][0][RTW89_ACMA][1] = 48,
- [0][1][0][0][RTW89_FCC][2] = 66,
+ [0][1][0][0][RTW89_CN][1] = 42,
+ [0][1][0][0][RTW89_UK][1] = 48,
+ [0][1][0][0][RTW89_FCC][2] = 76,
[0][1][0][0][RTW89_ETSI][2] = 48,
[0][1][0][0][RTW89_MKK][2] = 58,
- [0][1][0][0][RTW89_IC][2] = 74,
+ [0][1][0][0][RTW89_IC][2] = 76,
+ [0][1][0][0][RTW89_KCC][2] = 56,
[0][1][0][0][RTW89_ACMA][2] = 48,
- [0][1][0][0][RTW89_FCC][3] = 66,
+ [0][1][0][0][RTW89_CN][2] = 42,
+ [0][1][0][0][RTW89_UK][2] = 48,
+ [0][1][0][0][RTW89_FCC][3] = 76,
[0][1][0][0][RTW89_ETSI][3] = 48,
[0][1][0][0][RTW89_MKK][3] = 58,
- [0][1][0][0][RTW89_IC][3] = 74,
+ [0][1][0][0][RTW89_IC][3] = 76,
+ [0][1][0][0][RTW89_KCC][3] = 56,
[0][1][0][0][RTW89_ACMA][3] = 48,
- [0][1][0][0][RTW89_FCC][4] = 66,
+ [0][1][0][0][RTW89_CN][3] = 42,
+ [0][1][0][0][RTW89_UK][3] = 48,
+ [0][1][0][0][RTW89_FCC][4] = 76,
[0][1][0][0][RTW89_ETSI][4] = 48,
[0][1][0][0][RTW89_MKK][4] = 58,
- [0][1][0][0][RTW89_IC][4] = 74,
+ [0][1][0][0][RTW89_IC][4] = 76,
+ [0][1][0][0][RTW89_KCC][4] = 56,
[0][1][0][0][RTW89_ACMA][4] = 48,
- [0][1][0][0][RTW89_FCC][5] = 66,
+ [0][1][0][0][RTW89_CN][4] = 42,
+ [0][1][0][0][RTW89_UK][4] = 48,
+ [0][1][0][0][RTW89_FCC][5] = 76,
[0][1][0][0][RTW89_ETSI][5] = 48,
[0][1][0][0][RTW89_MKK][5] = 58,
- [0][1][0][0][RTW89_IC][5] = 74,
+ [0][1][0][0][RTW89_IC][5] = 76,
+ [0][1][0][0][RTW89_KCC][5] = 56,
[0][1][0][0][RTW89_ACMA][5] = 48,
- [0][1][0][0][RTW89_FCC][6] = 66,
+ [0][1][0][0][RTW89_CN][5] = 42,
+ [0][1][0][0][RTW89_UK][5] = 48,
+ [0][1][0][0][RTW89_FCC][6] = 76,
[0][1][0][0][RTW89_ETSI][6] = 48,
[0][1][0][0][RTW89_MKK][6] = 58,
- [0][1][0][0][RTW89_IC][6] = 74,
+ [0][1][0][0][RTW89_IC][6] = 76,
+ [0][1][0][0][RTW89_KCC][6] = 56,
[0][1][0][0][RTW89_ACMA][6] = 48,
- [0][1][0][0][RTW89_FCC][7] = 66,
+ [0][1][0][0][RTW89_CN][6] = 42,
+ [0][1][0][0][RTW89_UK][6] = 48,
+ [0][1][0][0][RTW89_FCC][7] = 76,
[0][1][0][0][RTW89_ETSI][7] = 48,
[0][1][0][0][RTW89_MKK][7] = 58,
- [0][1][0][0][RTW89_IC][7] = 74,
+ [0][1][0][0][RTW89_IC][7] = 76,
+ [0][1][0][0][RTW89_KCC][7] = 56,
[0][1][0][0][RTW89_ACMA][7] = 48,
- [0][1][0][0][RTW89_FCC][8] = 66,
+ [0][1][0][0][RTW89_CN][7] = 42,
+ [0][1][0][0][RTW89_UK][7] = 48,
+ [0][1][0][0][RTW89_FCC][8] = 76,
[0][1][0][0][RTW89_ETSI][8] = 48,
[0][1][0][0][RTW89_MKK][8] = 58,
- [0][1][0][0][RTW89_IC][8] = 74,
+ [0][1][0][0][RTW89_IC][8] = 76,
+ [0][1][0][0][RTW89_KCC][8] = 56,
[0][1][0][0][RTW89_ACMA][8] = 48,
- [0][1][0][0][RTW89_FCC][9] = 66,
+ [0][1][0][0][RTW89_CN][8] = 42,
+ [0][1][0][0][RTW89_UK][8] = 48,
+ [0][1][0][0][RTW89_FCC][9] = 70,
[0][1][0][0][RTW89_ETSI][9] = 48,
[0][1][0][0][RTW89_MKK][9] = 58,
- [0][1][0][0][RTW89_IC][9] = 74,
+ [0][1][0][0][RTW89_IC][9] = 70,
+ [0][1][0][0][RTW89_KCC][9] = 56,
[0][1][0][0][RTW89_ACMA][9] = 48,
- [0][1][0][0][RTW89_FCC][10] = 66,
+ [0][1][0][0][RTW89_CN][9] = 42,
+ [0][1][0][0][RTW89_UK][9] = 48,
+ [0][1][0][0][RTW89_FCC][10] = 72,
[0][1][0][0][RTW89_ETSI][10] = 48,
[0][1][0][0][RTW89_MKK][10] = 58,
- [0][1][0][0][RTW89_IC][10] = 74,
+ [0][1][0][0][RTW89_IC][10] = 72,
+ [0][1][0][0][RTW89_KCC][10] = 56,
[0][1][0][0][RTW89_ACMA][10] = 48,
- [0][1][0][0][RTW89_FCC][11] = 46,
+ [0][1][0][0][RTW89_CN][10] = 42,
+ [0][1][0][0][RTW89_UK][10] = 48,
+ [0][1][0][0][RTW89_FCC][11] = 44,
[0][1][0][0][RTW89_ETSI][11] = 48,
[0][1][0][0][RTW89_MKK][11] = 58,
- [0][1][0][0][RTW89_IC][11] = 56,
+ [0][1][0][0][RTW89_IC][11] = 44,
+ [0][1][0][0][RTW89_KCC][11] = 56,
[0][1][0][0][RTW89_ACMA][11] = 48,
- [0][1][0][0][RTW89_FCC][12] = 34,
+ [0][1][0][0][RTW89_CN][11] = 42,
+ [0][1][0][0][RTW89_UK][11] = 48,
+ [0][1][0][0][RTW89_FCC][12] = 18,
[0][1][0][0][RTW89_ETSI][12] = 48,
[0][1][0][0][RTW89_MKK][12] = 58,
- [0][1][0][0][RTW89_IC][12] = 44,
+ [0][1][0][0][RTW89_IC][12] = 18,
+ [0][1][0][0][RTW89_KCC][12] = 56,
[0][1][0][0][RTW89_ACMA][12] = 48,
+ [0][1][0][0][RTW89_CN][12] = 42,
+ [0][1][0][0][RTW89_UK][12] = 48,
[0][1][0][0][RTW89_FCC][13] = 127,
[0][1][0][0][RTW89_ETSI][13] = 127,
[0][1][0][0][RTW89_MKK][13] = 60,
[0][1][0][0][RTW89_IC][13] = 127,
+ [0][1][0][0][RTW89_KCC][13] = 127,
[0][1][0][0][RTW89_ACMA][13] = 127,
+ [0][1][0][0][RTW89_CN][13] = 127,
+ [0][1][0][0][RTW89_UK][13] = 127,
[1][0][0][0][RTW89_FCC][0] = 127,
[1][0][0][0][RTW89_ETSI][0] = 127,
[1][0][0][0][RTW89_MKK][0] = 127,
[1][0][0][0][RTW89_IC][0] = 127,
+ [1][0][0][0][RTW89_KCC][0] = 127,
[1][0][0][0][RTW89_ACMA][0] = 127,
+ [1][0][0][0][RTW89_CN][0] = 127,
+ [1][0][0][0][RTW89_UK][0] = 127,
[1][0][0][0][RTW89_FCC][1] = 127,
[1][0][0][0][RTW89_ETSI][1] = 127,
[1][0][0][0][RTW89_MKK][1] = 127,
[1][0][0][0][RTW89_IC][1] = 127,
+ [1][0][0][0][RTW89_KCC][1] = 127,
[1][0][0][0][RTW89_ACMA][1] = 127,
- [1][0][0][0][RTW89_FCC][2] = 42,
+ [1][0][0][0][RTW89_CN][1] = 127,
+ [1][0][0][0][RTW89_UK][1] = 127,
+ [1][0][0][0][RTW89_FCC][2] = 44,
[1][0][0][0][RTW89_ETSI][2] = 60,
[1][0][0][0][RTW89_MKK][2] = 66,
- [1][0][0][0][RTW89_IC][2] = 52,
+ [1][0][0][0][RTW89_IC][2] = 44,
+ [1][0][0][0][RTW89_KCC][2] = 68,
[1][0][0][0][RTW89_ACMA][2] = 60,
- [1][0][0][0][RTW89_FCC][3] = 42,
+ [1][0][0][0][RTW89_CN][2] = 58,
+ [1][0][0][0][RTW89_UK][2] = 60,
+ [1][0][0][0][RTW89_FCC][3] = 60,
[1][0][0][0][RTW89_ETSI][3] = 60,
[1][0][0][0][RTW89_MKK][3] = 66,
- [1][0][0][0][RTW89_IC][3] = 52,
+ [1][0][0][0][RTW89_IC][3] = 60,
+ [1][0][0][0][RTW89_KCC][3] = 68,
[1][0][0][0][RTW89_ACMA][3] = 60,
- [1][0][0][0][RTW89_FCC][4] = 42,
+ [1][0][0][0][RTW89_CN][3] = 58,
+ [1][0][0][0][RTW89_UK][3] = 60,
+ [1][0][0][0][RTW89_FCC][4] = 60,
[1][0][0][0][RTW89_ETSI][4] = 60,
[1][0][0][0][RTW89_MKK][4] = 66,
- [1][0][0][0][RTW89_IC][4] = 52,
+ [1][0][0][0][RTW89_IC][4] = 60,
+ [1][0][0][0][RTW89_KCC][4] = 68,
[1][0][0][0][RTW89_ACMA][4] = 60,
- [1][0][0][0][RTW89_FCC][5] = 58,
+ [1][0][0][0][RTW89_CN][4] = 58,
+ [1][0][0][0][RTW89_UK][4] = 60,
+ [1][0][0][0][RTW89_FCC][5] = 62,
[1][0][0][0][RTW89_ETSI][5] = 60,
[1][0][0][0][RTW89_MKK][5] = 66,
- [1][0][0][0][RTW89_IC][5] = 68,
+ [1][0][0][0][RTW89_IC][5] = 62,
+ [1][0][0][0][RTW89_KCC][5] = 68,
[1][0][0][0][RTW89_ACMA][5] = 60,
- [1][0][0][0][RTW89_FCC][6] = 42,
+ [1][0][0][0][RTW89_CN][5] = 58,
+ [1][0][0][0][RTW89_UK][5] = 60,
+ [1][0][0][0][RTW89_FCC][6] = 46,
[1][0][0][0][RTW89_ETSI][6] = 60,
[1][0][0][0][RTW89_MKK][6] = 66,
- [1][0][0][0][RTW89_IC][6] = 52,
+ [1][0][0][0][RTW89_IC][6] = 46,
+ [1][0][0][0][RTW89_KCC][6] = 68,
[1][0][0][0][RTW89_ACMA][6] = 60,
- [1][0][0][0][RTW89_FCC][7] = 42,
+ [1][0][0][0][RTW89_CN][6] = 58,
+ [1][0][0][0][RTW89_UK][6] = 60,
+ [1][0][0][0][RTW89_FCC][7] = 46,
[1][0][0][0][RTW89_ETSI][7] = 60,
[1][0][0][0][RTW89_MKK][7] = 66,
- [1][0][0][0][RTW89_IC][7] = 52,
+ [1][0][0][0][RTW89_IC][7] = 46,
+ [1][0][0][0][RTW89_KCC][7] = 68,
[1][0][0][0][RTW89_ACMA][7] = 60,
- [1][0][0][0][RTW89_FCC][8] = 42,
+ [1][0][0][0][RTW89_CN][7] = 58,
+ [1][0][0][0][RTW89_UK][7] = 60,
+ [1][0][0][0][RTW89_FCC][8] = 28,
[1][0][0][0][RTW89_ETSI][8] = 60,
[1][0][0][0][RTW89_MKK][8] = 66,
- [1][0][0][0][RTW89_IC][8] = 52,
+ [1][0][0][0][RTW89_IC][8] = 28,
+ [1][0][0][0][RTW89_KCC][8] = 70,
[1][0][0][0][RTW89_ACMA][8] = 60,
- [1][0][0][0][RTW89_FCC][9] = 34,
+ [1][0][0][0][RTW89_CN][8] = 58,
+ [1][0][0][0][RTW89_UK][8] = 60,
+ [1][0][0][0][RTW89_FCC][9] = 26,
[1][0][0][0][RTW89_ETSI][9] = 60,
[1][0][0][0][RTW89_MKK][9] = 66,
- [1][0][0][0][RTW89_IC][9] = 44,
+ [1][0][0][0][RTW89_IC][9] = 26,
+ [1][0][0][0][RTW89_KCC][9] = 70,
[1][0][0][0][RTW89_ACMA][9] = 60,
- [1][0][0][0][RTW89_FCC][10] = 22,
+ [1][0][0][0][RTW89_CN][9] = 58,
+ [1][0][0][0][RTW89_UK][9] = 60,
+ [1][0][0][0][RTW89_FCC][10] = 26,
[1][0][0][0][RTW89_ETSI][10] = 60,
[1][0][0][0][RTW89_MKK][10] = 66,
- [1][0][0][0][RTW89_IC][10] = 32,
+ [1][0][0][0][RTW89_IC][10] = 26,
+ [1][0][0][0][RTW89_KCC][10] = 70,
[1][0][0][0][RTW89_ACMA][10] = 60,
+ [1][0][0][0][RTW89_CN][10] = 58,
+ [1][0][0][0][RTW89_UK][10] = 60,
[1][0][0][0][RTW89_FCC][11] = 127,
[1][0][0][0][RTW89_ETSI][11] = 127,
[1][0][0][0][RTW89_MKK][11] = 127,
[1][0][0][0][RTW89_IC][11] = 127,
+ [1][0][0][0][RTW89_KCC][11] = 127,
[1][0][0][0][RTW89_ACMA][11] = 127,
+ [1][0][0][0][RTW89_CN][11] = 127,
+ [1][0][0][0][RTW89_UK][11] = 127,
[1][0][0][0][RTW89_FCC][12] = 127,
[1][0][0][0][RTW89_ETSI][12] = 127,
[1][0][0][0][RTW89_MKK][12] = 127,
[1][0][0][0][RTW89_IC][12] = 127,
+ [1][0][0][0][RTW89_KCC][12] = 127,
[1][0][0][0][RTW89_ACMA][12] = 127,
+ [1][0][0][0][RTW89_CN][12] = 127,
+ [1][0][0][0][RTW89_UK][12] = 127,
[1][0][0][0][RTW89_FCC][13] = 127,
[1][0][0][0][RTW89_ETSI][13] = 127,
[1][0][0][0][RTW89_MKK][13] = 127,
[1][0][0][0][RTW89_IC][13] = 127,
+ [1][0][0][0][RTW89_KCC][13] = 127,
[1][0][0][0][RTW89_ACMA][13] = 127,
+ [1][0][0][0][RTW89_CN][13] = 127,
+ [1][0][0][0][RTW89_UK][13] = 127,
[1][1][0][0][RTW89_FCC][0] = 127,
[1][1][0][0][RTW89_ETSI][0] = 127,
[1][1][0][0][RTW89_MKK][0] = 127,
[1][1][0][0][RTW89_IC][0] = 127,
+ [1][1][0][0][RTW89_KCC][0] = 127,
[1][1][0][0][RTW89_ACMA][0] = 127,
+ [1][1][0][0][RTW89_CN][0] = 127,
+ [1][1][0][0][RTW89_UK][0] = 127,
[1][1][0][0][RTW89_FCC][1] = 127,
[1][1][0][0][RTW89_ETSI][1] = 127,
[1][1][0][0][RTW89_MKK][1] = 127,
[1][1][0][0][RTW89_IC][1] = 127,
+ [1][1][0][0][RTW89_KCC][1] = 127,
[1][1][0][0][RTW89_ACMA][1] = 127,
- [1][1][0][0][RTW89_FCC][2] = 38,
+ [1][1][0][0][RTW89_CN][1] = 127,
+ [1][1][0][0][RTW89_UK][1] = 127,
+ [1][1][0][0][RTW89_FCC][2] = 46,
[1][1][0][0][RTW89_ETSI][2] = 48,
[1][1][0][0][RTW89_MKK][2] = 58,
- [1][1][0][0][RTW89_IC][2] = 48,
+ [1][1][0][0][RTW89_IC][2] = 46,
+ [1][1][0][0][RTW89_KCC][2] = 56,
[1][1][0][0][RTW89_ACMA][2] = 48,
- [1][1][0][0][RTW89_FCC][3] = 38,
+ [1][1][0][0][RTW89_CN][2] = 46,
+ [1][1][0][0][RTW89_UK][2] = 48,
+ [1][1][0][0][RTW89_FCC][3] = 46,
[1][1][0][0][RTW89_ETSI][3] = 48,
[1][1][0][0][RTW89_MKK][3] = 58,
- [1][1][0][0][RTW89_IC][3] = 48,
+ [1][1][0][0][RTW89_IC][3] = 46,
+ [1][1][0][0][RTW89_KCC][3] = 56,
[1][1][0][0][RTW89_ACMA][3] = 48,
- [1][1][0][0][RTW89_FCC][4] = 38,
+ [1][1][0][0][RTW89_CN][3] = 46,
+ [1][1][0][0][RTW89_UK][3] = 48,
+ [1][1][0][0][RTW89_FCC][4] = 46,
[1][1][0][0][RTW89_ETSI][4] = 48,
[1][1][0][0][RTW89_MKK][4] = 58,
- [1][1][0][0][RTW89_IC][4] = 48,
+ [1][1][0][0][RTW89_IC][4] = 46,
+ [1][1][0][0][RTW89_KCC][4] = 56,
[1][1][0][0][RTW89_ACMA][4] = 48,
- [1][1][0][0][RTW89_FCC][5] = 54,
+ [1][1][0][0][RTW89_CN][4] = 46,
+ [1][1][0][0][RTW89_UK][4] = 48,
+ [1][1][0][0][RTW89_FCC][5] = 48,
[1][1][0][0][RTW89_ETSI][5] = 48,
[1][1][0][0][RTW89_MKK][5] = 58,
- [1][1][0][0][RTW89_IC][5] = 64,
+ [1][1][0][0][RTW89_IC][5] = 48,
+ [1][1][0][0][RTW89_KCC][5] = 56,
[1][1][0][0][RTW89_ACMA][5] = 48,
- [1][1][0][0][RTW89_FCC][6] = 26,
+ [1][1][0][0][RTW89_CN][5] = 46,
+ [1][1][0][0][RTW89_UK][5] = 48,
+ [1][1][0][0][RTW89_FCC][6] = 40,
[1][1][0][0][RTW89_ETSI][6] = 48,
[1][1][0][0][RTW89_MKK][6] = 58,
- [1][1][0][0][RTW89_IC][6] = 36,
+ [1][1][0][0][RTW89_IC][6] = 40,
+ [1][1][0][0][RTW89_KCC][6] = 56,
[1][1][0][0][RTW89_ACMA][6] = 48,
- [1][1][0][0][RTW89_FCC][7] = 26,
+ [1][1][0][0][RTW89_CN][6] = 46,
+ [1][1][0][0][RTW89_UK][6] = 48,
+ [1][1][0][0][RTW89_FCC][7] = 40,
[1][1][0][0][RTW89_ETSI][7] = 48,
[1][1][0][0][RTW89_MKK][7] = 58,
- [1][1][0][0][RTW89_IC][7] = 36,
+ [1][1][0][0][RTW89_IC][7] = 40,
+ [1][1][0][0][RTW89_KCC][7] = 56,
[1][1][0][0][RTW89_ACMA][7] = 48,
- [1][1][0][0][RTW89_FCC][8] = 26,
+ [1][1][0][0][RTW89_CN][7] = 46,
+ [1][1][0][0][RTW89_UK][7] = 48,
+ [1][1][0][0][RTW89_FCC][8] = 14,
[1][1][0][0][RTW89_ETSI][8] = 48,
[1][1][0][0][RTW89_MKK][8] = 58,
- [1][1][0][0][RTW89_IC][8] = 36,
+ [1][1][0][0][RTW89_IC][8] = 14,
+ [1][1][0][0][RTW89_KCC][8] = 58,
[1][1][0][0][RTW89_ACMA][8] = 48,
- [1][1][0][0][RTW89_FCC][9] = 22,
+ [1][1][0][0][RTW89_CN][8] = 46,
+ [1][1][0][0][RTW89_UK][8] = 48,
+ [1][1][0][0][RTW89_FCC][9] = 14,
[1][1][0][0][RTW89_ETSI][9] = 48,
[1][1][0][0][RTW89_MKK][9] = 58,
- [1][1][0][0][RTW89_IC][9] = 32,
+ [1][1][0][0][RTW89_IC][9] = 14,
+ [1][1][0][0][RTW89_KCC][9] = 58,
[1][1][0][0][RTW89_ACMA][9] = 48,
- [1][1][0][0][RTW89_FCC][10] = 22,
+ [1][1][0][0][RTW89_CN][9] = 46,
+ [1][1][0][0][RTW89_UK][9] = 48,
+ [1][1][0][0][RTW89_FCC][10] = 12,
[1][1][0][0][RTW89_ETSI][10] = 48,
[1][1][0][0][RTW89_MKK][10] = 56,
- [1][1][0][0][RTW89_IC][10] = 32,
+ [1][1][0][0][RTW89_IC][10] = 12,
+ [1][1][0][0][RTW89_KCC][10] = 58,
[1][1][0][0][RTW89_ACMA][10] = 48,
+ [1][1][0][0][RTW89_CN][10] = 46,
+ [1][1][0][0][RTW89_UK][10] = 48,
[1][1][0][0][RTW89_FCC][11] = 127,
[1][1][0][0][RTW89_ETSI][11] = 127,
[1][1][0][0][RTW89_MKK][11] = 127,
[1][1][0][0][RTW89_IC][11] = 127,
+ [1][1][0][0][RTW89_KCC][11] = 127,
[1][1][0][0][RTW89_ACMA][11] = 127,
+ [1][1][0][0][RTW89_CN][11] = 127,
+ [1][1][0][0][RTW89_UK][11] = 127,
[1][1][0][0][RTW89_FCC][12] = 127,
[1][1][0][0][RTW89_ETSI][12] = 127,
[1][1][0][0][RTW89_MKK][12] = 127,
[1][1][0][0][RTW89_IC][12] = 127,
+ [1][1][0][0][RTW89_KCC][12] = 127,
[1][1][0][0][RTW89_ACMA][12] = 127,
+ [1][1][0][0][RTW89_CN][12] = 127,
+ [1][1][0][0][RTW89_UK][12] = 127,
[1][1][0][0][RTW89_FCC][13] = 127,
[1][1][0][0][RTW89_ETSI][13] = 127,
[1][1][0][0][RTW89_MKK][13] = 127,
[1][1][0][0][RTW89_IC][13] = 127,
+ [1][1][0][0][RTW89_KCC][13] = 127,
[1][1][0][0][RTW89_ACMA][13] = 127,
- [0][0][1][0][RTW89_FCC][0] = 68,
+ [1][1][0][0][RTW89_CN][13] = 127,
+ [1][1][0][0][RTW89_UK][13] = 127,
+ [0][0][1][0][RTW89_FCC][0] = 66,
[0][0][1][0][RTW89_ETSI][0] = 60,
[0][0][1][0][RTW89_MKK][0] = 76,
- [0][0][1][0][RTW89_IC][0] = 78,
+ [0][0][1][0][RTW89_IC][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 68,
[0][0][1][0][RTW89_ACMA][0] = 60,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 60,
[0][0][1][0][RTW89_FCC][1] = 68,
[0][0][1][0][RTW89_ETSI][1] = 60,
[0][0][1][0][RTW89_MKK][1] = 78,
- [0][0][1][0][RTW89_IC][1] = 78,
+ [0][0][1][0][RTW89_IC][1] = 68,
+ [0][0][1][0][RTW89_KCC][1] = 68,
[0][0][1][0][RTW89_ACMA][1] = 60,
- [0][0][1][0][RTW89_FCC][2] = 70,
+ [0][0][1][0][RTW89_CN][1] = 58,
+ [0][0][1][0][RTW89_UK][1] = 60,
+ [0][0][1][0][RTW89_FCC][2] = 72,
[0][0][1][0][RTW89_ETSI][2] = 60,
[0][0][1][0][RTW89_MKK][2] = 78,
- [0][0][1][0][RTW89_IC][2] = 78,
+ [0][0][1][0][RTW89_IC][2] = 72,
+ [0][0][1][0][RTW89_KCC][2] = 68,
[0][0][1][0][RTW89_ACMA][2] = 60,
- [0][0][1][0][RTW89_FCC][3] = 70,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 60,
+ [0][0][1][0][RTW89_FCC][3] = 76,
[0][0][1][0][RTW89_ETSI][3] = 60,
[0][0][1][0][RTW89_MKK][3] = 78,
- [0][0][1][0][RTW89_IC][3] = 78,
+ [0][0][1][0][RTW89_IC][3] = 76,
+ [0][0][1][0][RTW89_KCC][3] = 68,
[0][0][1][0][RTW89_ACMA][3] = 60,
- [0][0][1][0][RTW89_FCC][4] = 70,
+ [0][0][1][0][RTW89_CN][3] = 58,
+ [0][0][1][0][RTW89_UK][3] = 60,
+ [0][0][1][0][RTW89_FCC][4] = 80,
[0][0][1][0][RTW89_ETSI][4] = 60,
[0][0][1][0][RTW89_MKK][4] = 78,
- [0][0][1][0][RTW89_IC][4] = 78,
+ [0][0][1][0][RTW89_IC][4] = 80,
+ [0][0][1][0][RTW89_KCC][4] = 76,
[0][0][1][0][RTW89_ACMA][4] = 60,
- [0][0][1][0][RTW89_FCC][5] = 70,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 60,
+ [0][0][1][0][RTW89_FCC][5] = 80,
[0][0][1][0][RTW89_ETSI][5] = 60,
[0][0][1][0][RTW89_MKK][5] = 78,
- [0][0][1][0][RTW89_IC][5] = 78,
+ [0][0][1][0][RTW89_IC][5] = 80,
+ [0][0][1][0][RTW89_KCC][5] = 76,
[0][0][1][0][RTW89_ACMA][5] = 60,
- [0][0][1][0][RTW89_FCC][6] = 70,
+ [0][0][1][0][RTW89_CN][5] = 58,
+ [0][0][1][0][RTW89_UK][5] = 60,
+ [0][0][1][0][RTW89_FCC][6] = 80,
[0][0][1][0][RTW89_ETSI][6] = 60,
[0][0][1][0][RTW89_MKK][6] = 76,
- [0][0][1][0][RTW89_IC][6] = 78,
+ [0][0][1][0][RTW89_IC][6] = 80,
+ [0][0][1][0][RTW89_KCC][6] = 76,
[0][0][1][0][RTW89_ACMA][6] = 60,
- [0][0][1][0][RTW89_FCC][7] = 70,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 60,
+ [0][0][1][0][RTW89_FCC][7] = 80,
[0][0][1][0][RTW89_ETSI][7] = 60,
[0][0][1][0][RTW89_MKK][7] = 78,
- [0][0][1][0][RTW89_IC][7] = 78,
+ [0][0][1][0][RTW89_IC][7] = 80,
+ [0][0][1][0][RTW89_KCC][7] = 76,
[0][0][1][0][RTW89_ACMA][7] = 60,
- [0][0][1][0][RTW89_FCC][8] = 70,
+ [0][0][1][0][RTW89_CN][7] = 58,
+ [0][0][1][0][RTW89_UK][7] = 60,
+ [0][0][1][0][RTW89_FCC][8] = 80,
[0][0][1][0][RTW89_ETSI][8] = 60,
[0][0][1][0][RTW89_MKK][8] = 78,
- [0][0][1][0][RTW89_IC][8] = 78,
+ [0][0][1][0][RTW89_IC][8] = 80,
+ [0][0][1][0][RTW89_KCC][8] = 76,
[0][0][1][0][RTW89_ACMA][8] = 60,
- [0][0][1][0][RTW89_FCC][9] = 66,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 60,
+ [0][0][1][0][RTW89_FCC][9] = 76,
[0][0][1][0][RTW89_ETSI][9] = 60,
[0][0][1][0][RTW89_MKK][9] = 78,
[0][0][1][0][RTW89_IC][9] = 76,
+ [0][0][1][0][RTW89_KCC][9] = 70,
[0][0][1][0][RTW89_ACMA][9] = 60,
+ [0][0][1][0][RTW89_CN][9] = 58,
+ [0][0][1][0][RTW89_UK][9] = 60,
[0][0][1][0][RTW89_FCC][10] = 66,
[0][0][1][0][RTW89_ETSI][10] = 60,
[0][0][1][0][RTW89_MKK][10] = 78,
- [0][0][1][0][RTW89_IC][10] = 76,
+ [0][0][1][0][RTW89_IC][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 70,
[0][0][1][0][RTW89_ACMA][10] = 60,
- [0][0][1][0][RTW89_FCC][11] = 46,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 60,
+ [0][0][1][0][RTW89_FCC][11] = 62,
[0][0][1][0][RTW89_ETSI][11] = 60,
[0][0][1][0][RTW89_MKK][11] = 78,
- [0][0][1][0][RTW89_IC][11] = 56,
+ [0][0][1][0][RTW89_IC][11] = 62,
+ [0][0][1][0][RTW89_KCC][11] = 70,
[0][0][1][0][RTW89_ACMA][11] = 60,
- [0][0][1][0][RTW89_FCC][12] = 42,
+ [0][0][1][0][RTW89_CN][11] = 58,
+ [0][0][1][0][RTW89_UK][11] = 60,
+ [0][0][1][0][RTW89_FCC][12] = 60,
[0][0][1][0][RTW89_ETSI][12] = 60,
[0][0][1][0][RTW89_MKK][12] = 78,
- [0][0][1][0][RTW89_IC][12] = 52,
+ [0][0][1][0][RTW89_IC][12] = 60,
+ [0][0][1][0][RTW89_KCC][12] = 70,
[0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 60,
[0][0][1][0][RTW89_FCC][13] = 127,
[0][0][1][0][RTW89_ETSI][13] = 127,
[0][0][1][0][RTW89_MKK][13] = 127,
[0][0][1][0][RTW89_IC][13] = 127,
+ [0][0][1][0][RTW89_KCC][13] = 127,
[0][0][1][0][RTW89_ACMA][13] = 127,
- [0][1][1][0][RTW89_FCC][0] = 54,
+ [0][0][1][0][RTW89_CN][13] = 127,
+ [0][0][1][0][RTW89_UK][13] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 66,
[0][1][1][0][RTW89_ETSI][0] = 48,
[0][1][1][0][RTW89_MKK][0] = 66,
- [0][1][1][0][RTW89_IC][0] = 64,
+ [0][1][1][0][RTW89_IC][0] = 66,
+ [0][1][1][0][RTW89_KCC][0] = 64,
[0][1][1][0][RTW89_ACMA][0] = 48,
- [0][1][1][0][RTW89_FCC][1] = 54,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 48,
+ [0][1][1][0][RTW89_FCC][1] = 68,
[0][1][1][0][RTW89_ETSI][1] = 48,
[0][1][1][0][RTW89_MKK][1] = 66,
- [0][1][1][0][RTW89_IC][1] = 64,
+ [0][1][1][0][RTW89_IC][1] = 68,
+ [0][1][1][0][RTW89_KCC][1] = 64,
[0][1][1][0][RTW89_ACMA][1] = 48,
- [0][1][1][0][RTW89_FCC][2] = 58,
+ [0][1][1][0][RTW89_CN][1] = 46,
+ [0][1][1][0][RTW89_UK][1] = 48,
+ [0][1][1][0][RTW89_FCC][2] = 72,
[0][1][1][0][RTW89_ETSI][2] = 48,
[0][1][1][0][RTW89_MKK][2] = 66,
- [0][1][1][0][RTW89_IC][2] = 68,
+ [0][1][1][0][RTW89_IC][2] = 72,
+ [0][1][1][0][RTW89_KCC][2] = 64,
[0][1][1][0][RTW89_ACMA][2] = 48,
- [0][1][1][0][RTW89_FCC][3] = 62,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 48,
+ [0][1][1][0][RTW89_FCC][3] = 76,
[0][1][1][0][RTW89_ETSI][3] = 48,
[0][1][1][0][RTW89_MKK][3] = 66,
- [0][1][1][0][RTW89_IC][3] = 72,
+ [0][1][1][0][RTW89_IC][3] = 76,
+ [0][1][1][0][RTW89_KCC][3] = 64,
[0][1][1][0][RTW89_ACMA][3] = 48,
- [0][1][1][0][RTW89_FCC][4] = 70,
+ [0][1][1][0][RTW89_CN][3] = 46,
+ [0][1][1][0][RTW89_UK][3] = 48,
+ [0][1][1][0][RTW89_FCC][4] = 80,
[0][1][1][0][RTW89_ETSI][4] = 48,
[0][1][1][0][RTW89_MKK][4] = 66,
- [0][1][1][0][RTW89_IC][4] = 78,
+ [0][1][1][0][RTW89_IC][4] = 80,
+ [0][1][1][0][RTW89_KCC][4] = 66,
[0][1][1][0][RTW89_ACMA][4] = 48,
- [0][1][1][0][RTW89_FCC][5] = 70,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 48,
+ [0][1][1][0][RTW89_FCC][5] = 80,
[0][1][1][0][RTW89_ETSI][5] = 48,
[0][1][1][0][RTW89_MKK][5] = 66,
- [0][1][1][0][RTW89_IC][5] = 78,
+ [0][1][1][0][RTW89_IC][5] = 80,
+ [0][1][1][0][RTW89_KCC][5] = 66,
[0][1][1][0][RTW89_ACMA][5] = 48,
- [0][1][1][0][RTW89_FCC][6] = 70,
+ [0][1][1][0][RTW89_CN][5] = 46,
+ [0][1][1][0][RTW89_UK][5] = 48,
+ [0][1][1][0][RTW89_FCC][6] = 80,
[0][1][1][0][RTW89_ETSI][6] = 48,
[0][1][1][0][RTW89_MKK][6] = 66,
- [0][1][1][0][RTW89_IC][6] = 78,
+ [0][1][1][0][RTW89_IC][6] = 80,
+ [0][1][1][0][RTW89_KCC][6] = 66,
[0][1][1][0][RTW89_ACMA][6] = 48,
- [0][1][1][0][RTW89_FCC][7] = 62,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 48,
+ [0][1][1][0][RTW89_FCC][7] = 78,
[0][1][1][0][RTW89_ETSI][7] = 48,
[0][1][1][0][RTW89_MKK][7] = 66,
- [0][1][1][0][RTW89_IC][7] = 72,
+ [0][1][1][0][RTW89_IC][7] = 78,
+ [0][1][1][0][RTW89_KCC][7] = 66,
[0][1][1][0][RTW89_ACMA][7] = 48,
- [0][1][1][0][RTW89_FCC][8] = 58,
+ [0][1][1][0][RTW89_CN][7] = 46,
+ [0][1][1][0][RTW89_UK][7] = 48,
+ [0][1][1][0][RTW89_FCC][8] = 74,
[0][1][1][0][RTW89_ETSI][8] = 48,
[0][1][1][0][RTW89_MKK][8] = 66,
- [0][1][1][0][RTW89_IC][8] = 68,
+ [0][1][1][0][RTW89_IC][8] = 74,
+ [0][1][1][0][RTW89_KCC][8] = 66,
[0][1][1][0][RTW89_ACMA][8] = 48,
- [0][1][1][0][RTW89_FCC][9] = 54,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 48,
+ [0][1][1][0][RTW89_FCC][9] = 70,
[0][1][1][0][RTW89_ETSI][9] = 48,
[0][1][1][0][RTW89_MKK][9] = 66,
- [0][1][1][0][RTW89_IC][9] = 64,
+ [0][1][1][0][RTW89_IC][9] = 70,
+ [0][1][1][0][RTW89_KCC][9] = 64,
[0][1][1][0][RTW89_ACMA][9] = 48,
- [0][1][1][0][RTW89_FCC][10] = 54,
+ [0][1][1][0][RTW89_CN][9] = 46,
+ [0][1][1][0][RTW89_UK][9] = 48,
+ [0][1][1][0][RTW89_FCC][10] = 62,
[0][1][1][0][RTW89_ETSI][10] = 48,
[0][1][1][0][RTW89_MKK][10] = 66,
- [0][1][1][0][RTW89_IC][10] = 64,
+ [0][1][1][0][RTW89_IC][10] = 62,
+ [0][1][1][0][RTW89_KCC][10] = 64,
[0][1][1][0][RTW89_ACMA][10] = 48,
- [0][1][1][0][RTW89_FCC][11] = 38,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 48,
+ [0][1][1][0][RTW89_FCC][11] = 60,
[0][1][1][0][RTW89_ETSI][11] = 48,
[0][1][1][0][RTW89_MKK][11] = 66,
- [0][1][1][0][RTW89_IC][11] = 48,
+ [0][1][1][0][RTW89_IC][11] = 60,
+ [0][1][1][0][RTW89_KCC][11] = 64,
[0][1][1][0][RTW89_ACMA][11] = 48,
- [0][1][1][0][RTW89_FCC][12] = 34,
+ [0][1][1][0][RTW89_CN][11] = 46,
+ [0][1][1][0][RTW89_UK][11] = 48,
+ [0][1][1][0][RTW89_FCC][12] = 36,
[0][1][1][0][RTW89_ETSI][12] = 48,
[0][1][1][0][RTW89_MKK][12] = 66,
- [0][1][1][0][RTW89_IC][12] = 44,
+ [0][1][1][0][RTW89_IC][12] = 36,
+ [0][1][1][0][RTW89_KCC][12] = 64,
[0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 48,
[0][1][1][0][RTW89_FCC][13] = 127,
[0][1][1][0][RTW89_ETSI][13] = 127,
[0][1][1][0][RTW89_MKK][13] = 127,
[0][1][1][0][RTW89_IC][13] = 127,
+ [0][1][1][0][RTW89_KCC][13] = 127,
[0][1][1][0][RTW89_ACMA][13] = 127,
- [0][0][2][0][RTW89_FCC][0] = 68,
+ [0][1][1][0][RTW89_CN][13] = 127,
+ [0][1][1][0][RTW89_UK][13] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 66,
[0][0][2][0][RTW89_ETSI][0] = 60,
[0][0][2][0][RTW89_MKK][0] = 78,
- [0][0][2][0][RTW89_IC][0] = 78,
+ [0][0][2][0][RTW89_IC][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 70,
[0][0][2][0][RTW89_ACMA][0] = 60,
- [0][0][2][0][RTW89_FCC][1] = 68,
+ [0][0][2][0][RTW89_CN][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 60,
+ [0][0][2][0][RTW89_FCC][1] = 70,
[0][0][2][0][RTW89_ETSI][1] = 60,
[0][0][2][0][RTW89_MKK][1] = 78,
- [0][0][2][0][RTW89_IC][1] = 78,
+ [0][0][2][0][RTW89_IC][1] = 70,
+ [0][0][2][0][RTW89_KCC][1] = 70,
[0][0][2][0][RTW89_ACMA][1] = 60,
- [0][0][2][0][RTW89_FCC][2] = 70,
+ [0][0][2][0][RTW89_CN][1] = 58,
+ [0][0][2][0][RTW89_UK][1] = 60,
+ [0][0][2][0][RTW89_FCC][2] = 74,
[0][0][2][0][RTW89_ETSI][2] = 60,
[0][0][2][0][RTW89_MKK][2] = 78,
- [0][0][2][0][RTW89_IC][2] = 78,
+ [0][0][2][0][RTW89_IC][2] = 74,
+ [0][0][2][0][RTW89_KCC][2] = 70,
[0][0][2][0][RTW89_ACMA][2] = 60,
- [0][0][2][0][RTW89_FCC][3] = 70,
+ [0][0][2][0][RTW89_CN][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 60,
+ [0][0][2][0][RTW89_FCC][3] = 78,
[0][0][2][0][RTW89_ETSI][3] = 60,
[0][0][2][0][RTW89_MKK][3] = 78,
[0][0][2][0][RTW89_IC][3] = 78,
+ [0][0][2][0][RTW89_KCC][3] = 70,
[0][0][2][0][RTW89_ACMA][3] = 60,
- [0][0][2][0][RTW89_FCC][4] = 70,
+ [0][0][2][0][RTW89_CN][3] = 58,
+ [0][0][2][0][RTW89_UK][3] = 60,
+ [0][0][2][0][RTW89_FCC][4] = 80,
[0][0][2][0][RTW89_ETSI][4] = 60,
[0][0][2][0][RTW89_MKK][4] = 78,
- [0][0][2][0][RTW89_IC][4] = 78,
+ [0][0][2][0][RTW89_IC][4] = 80,
+ [0][0][2][0][RTW89_KCC][4] = 78,
[0][0][2][0][RTW89_ACMA][4] = 60,
- [0][0][2][0][RTW89_FCC][5] = 70,
+ [0][0][2][0][RTW89_CN][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 60,
+ [0][0][2][0][RTW89_FCC][5] = 80,
[0][0][2][0][RTW89_ETSI][5] = 60,
[0][0][2][0][RTW89_MKK][5] = 78,
- [0][0][2][0][RTW89_IC][5] = 78,
+ [0][0][2][0][RTW89_IC][5] = 80,
+ [0][0][2][0][RTW89_KCC][5] = 78,
[0][0][2][0][RTW89_ACMA][5] = 60,
- [0][0][2][0][RTW89_FCC][6] = 70,
+ [0][0][2][0][RTW89_CN][5] = 58,
+ [0][0][2][0][RTW89_UK][5] = 60,
+ [0][0][2][0][RTW89_FCC][6] = 80,
[0][0][2][0][RTW89_ETSI][6] = 60,
[0][0][2][0][RTW89_MKK][6] = 78,
- [0][0][2][0][RTW89_IC][6] = 78,
+ [0][0][2][0][RTW89_IC][6] = 80,
+ [0][0][2][0][RTW89_KCC][6] = 78,
[0][0][2][0][RTW89_ACMA][6] = 60,
- [0][0][2][0][RTW89_FCC][7] = 70,
+ [0][0][2][0][RTW89_CN][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 60,
+ [0][0][2][0][RTW89_FCC][7] = 80,
[0][0][2][0][RTW89_ETSI][7] = 60,
[0][0][2][0][RTW89_MKK][7] = 78,
- [0][0][2][0][RTW89_IC][7] = 78,
+ [0][0][2][0][RTW89_IC][7] = 80,
+ [0][0][2][0][RTW89_KCC][7] = 78,
[0][0][2][0][RTW89_ACMA][7] = 60,
- [0][0][2][0][RTW89_FCC][8] = 68,
+ [0][0][2][0][RTW89_CN][7] = 58,
+ [0][0][2][0][RTW89_UK][7] = 60,
+ [0][0][2][0][RTW89_FCC][8] = 78,
[0][0][2][0][RTW89_ETSI][8] = 60,
[0][0][2][0][RTW89_MKK][8] = 78,
[0][0][2][0][RTW89_IC][8] = 78,
+ [0][0][2][0][RTW89_KCC][8] = 78,
[0][0][2][0][RTW89_ACMA][8] = 60,
- [0][0][2][0][RTW89_FCC][9] = 64,
+ [0][0][2][0][RTW89_CN][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 60,
+ [0][0][2][0][RTW89_FCC][9] = 74,
[0][0][2][0][RTW89_ETSI][9] = 60,
[0][0][2][0][RTW89_MKK][9] = 78,
[0][0][2][0][RTW89_IC][9] = 74,
+ [0][0][2][0][RTW89_KCC][9] = 66,
[0][0][2][0][RTW89_ACMA][9] = 60,
- [0][0][2][0][RTW89_FCC][10] = 64,
+ [0][0][2][0][RTW89_CN][9] = 58,
+ [0][0][2][0][RTW89_UK][9] = 60,
+ [0][0][2][0][RTW89_FCC][10] = 62,
[0][0][2][0][RTW89_ETSI][10] = 60,
[0][0][2][0][RTW89_MKK][10] = 78,
- [0][0][2][0][RTW89_IC][10] = 74,
+ [0][0][2][0][RTW89_IC][10] = 62,
+ [0][0][2][0][RTW89_KCC][10] = 66,
[0][0][2][0][RTW89_ACMA][10] = 60,
- [0][0][2][0][RTW89_FCC][11] = 46,
+ [0][0][2][0][RTW89_CN][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 60,
+ [0][0][2][0][RTW89_FCC][11] = 60,
[0][0][2][0][RTW89_ETSI][11] = 60,
[0][0][2][0][RTW89_MKK][11] = 78,
- [0][0][2][0][RTW89_IC][11] = 56,
+ [0][0][2][0][RTW89_IC][11] = 60,
+ [0][0][2][0][RTW89_KCC][11] = 66,
[0][0][2][0][RTW89_ACMA][11] = 60,
- [0][0][2][0][RTW89_FCC][12] = 42,
+ [0][0][2][0][RTW89_CN][11] = 58,
+ [0][0][2][0][RTW89_UK][11] = 60,
+ [0][0][2][0][RTW89_FCC][12] = 38,
[0][0][2][0][RTW89_ETSI][12] = 60,
[0][0][2][0][RTW89_MKK][12] = 78,
- [0][0][2][0][RTW89_IC][12] = 52,
+ [0][0][2][0][RTW89_IC][12] = 38,
+ [0][0][2][0][RTW89_KCC][12] = 66,
[0][0][2][0][RTW89_ACMA][12] = 60,
+ [0][0][2][0][RTW89_CN][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 60,
[0][0][2][0][RTW89_FCC][13] = 127,
[0][0][2][0][RTW89_ETSI][13] = 127,
[0][0][2][0][RTW89_MKK][13] = 127,
[0][0][2][0][RTW89_IC][13] = 127,
+ [0][0][2][0][RTW89_KCC][13] = 127,
[0][0][2][0][RTW89_ACMA][13] = 127,
- [0][1][2][0][RTW89_FCC][0] = 50,
+ [0][0][2][0][RTW89_CN][13] = 127,
+ [0][0][2][0][RTW89_UK][13] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 64,
[0][1][2][0][RTW89_ETSI][0] = 48,
[0][1][2][0][RTW89_MKK][0] = 68,
- [0][1][2][0][RTW89_IC][0] = 60,
+ [0][1][2][0][RTW89_IC][0] = 64,
+ [0][1][2][0][RTW89_KCC][0] = 66,
[0][1][2][0][RTW89_ACMA][0] = 48,
- [0][1][2][0][RTW89_FCC][1] = 50,
+ [0][1][2][0][RTW89_CN][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 48,
+ [0][1][2][0][RTW89_FCC][1] = 70,
[0][1][2][0][RTW89_ETSI][1] = 48,
[0][1][2][0][RTW89_MKK][1] = 68,
- [0][1][2][0][RTW89_IC][1] = 60,
+ [0][1][2][0][RTW89_IC][1] = 70,
+ [0][1][2][0][RTW89_KCC][1] = 66,
[0][1][2][0][RTW89_ACMA][1] = 48,
- [0][1][2][0][RTW89_FCC][2] = 54,
+ [0][1][2][0][RTW89_CN][1] = 46,
+ [0][1][2][0][RTW89_UK][1] = 48,
+ [0][1][2][0][RTW89_FCC][2] = 74,
[0][1][2][0][RTW89_ETSI][2] = 48,
[0][1][2][0][RTW89_MKK][2] = 68,
- [0][1][2][0][RTW89_IC][2] = 64,
+ [0][1][2][0][RTW89_IC][2] = 74,
+ [0][1][2][0][RTW89_KCC][2] = 66,
[0][1][2][0][RTW89_ACMA][2] = 48,
- [0][1][2][0][RTW89_FCC][3] = 58,
+ [0][1][2][0][RTW89_CN][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 48,
+ [0][1][2][0][RTW89_FCC][3] = 78,
[0][1][2][0][RTW89_ETSI][3] = 48,
[0][1][2][0][RTW89_MKK][3] = 68,
- [0][1][2][0][RTW89_IC][3] = 68,
+ [0][1][2][0][RTW89_IC][3] = 78,
+ [0][1][2][0][RTW89_KCC][3] = 66,
[0][1][2][0][RTW89_ACMA][3] = 48,
- [0][1][2][0][RTW89_FCC][4] = 64,
+ [0][1][2][0][RTW89_CN][3] = 46,
+ [0][1][2][0][RTW89_UK][3] = 48,
+ [0][1][2][0][RTW89_FCC][4] = 80,
[0][1][2][0][RTW89_ETSI][4] = 48,
[0][1][2][0][RTW89_MKK][4] = 68,
- [0][1][2][0][RTW89_IC][4] = 74,
+ [0][1][2][0][RTW89_IC][4] = 80,
+ [0][1][2][0][RTW89_KCC][4] = 66,
[0][1][2][0][RTW89_ACMA][4] = 48,
- [0][1][2][0][RTW89_FCC][5] = 70,
+ [0][1][2][0][RTW89_CN][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 48,
+ [0][1][2][0][RTW89_FCC][5] = 80,
[0][1][2][0][RTW89_ETSI][5] = 48,
[0][1][2][0][RTW89_MKK][5] = 68,
- [0][1][2][0][RTW89_IC][5] = 78,
+ [0][1][2][0][RTW89_IC][5] = 80,
+ [0][1][2][0][RTW89_KCC][5] = 66,
[0][1][2][0][RTW89_ACMA][5] = 48,
- [0][1][2][0][RTW89_FCC][6] = 66,
+ [0][1][2][0][RTW89_CN][5] = 46,
+ [0][1][2][0][RTW89_UK][5] = 48,
+ [0][1][2][0][RTW89_FCC][6] = 80,
[0][1][2][0][RTW89_ETSI][6] = 48,
[0][1][2][0][RTW89_MKK][6] = 68,
- [0][1][2][0][RTW89_IC][6] = 76,
+ [0][1][2][0][RTW89_IC][6] = 80,
+ [0][1][2][0][RTW89_KCC][6] = 66,
[0][1][2][0][RTW89_ACMA][6] = 48,
- [0][1][2][0][RTW89_FCC][7] = 58,
+ [0][1][2][0][RTW89_CN][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 48,
+ [0][1][2][0][RTW89_FCC][7] = 74,
[0][1][2][0][RTW89_ETSI][7] = 48,
[0][1][2][0][RTW89_MKK][7] = 68,
- [0][1][2][0][RTW89_IC][7] = 68,
+ [0][1][2][0][RTW89_IC][7] = 74,
+ [0][1][2][0][RTW89_KCC][7] = 66,
[0][1][2][0][RTW89_ACMA][7] = 48,
- [0][1][2][0][RTW89_FCC][8] = 54,
+ [0][1][2][0][RTW89_CN][7] = 46,
+ [0][1][2][0][RTW89_UK][7] = 48,
+ [0][1][2][0][RTW89_FCC][8] = 70,
[0][1][2][0][RTW89_ETSI][8] = 48,
[0][1][2][0][RTW89_MKK][8] = 68,
- [0][1][2][0][RTW89_IC][8] = 64,
+ [0][1][2][0][RTW89_IC][8] = 70,
+ [0][1][2][0][RTW89_KCC][8] = 66,
[0][1][2][0][RTW89_ACMA][8] = 48,
- [0][1][2][0][RTW89_FCC][9] = 50,
+ [0][1][2][0][RTW89_CN][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 48,
+ [0][1][2][0][RTW89_FCC][9] = 66,
[0][1][2][0][RTW89_ETSI][9] = 48,
[0][1][2][0][RTW89_MKK][9] = 68,
- [0][1][2][0][RTW89_IC][9] = 60,
+ [0][1][2][0][RTW89_IC][9] = 66,
+ [0][1][2][0][RTW89_KCC][9] = 64,
[0][1][2][0][RTW89_ACMA][9] = 48,
- [0][1][2][0][RTW89_FCC][10] = 50,
+ [0][1][2][0][RTW89_CN][9] = 46,
+ [0][1][2][0][RTW89_UK][9] = 48,
+ [0][1][2][0][RTW89_FCC][10] = 58,
[0][1][2][0][RTW89_ETSI][10] = 48,
[0][1][2][0][RTW89_MKK][10] = 68,
- [0][1][2][0][RTW89_IC][10] = 60,
+ [0][1][2][0][RTW89_IC][10] = 58,
+ [0][1][2][0][RTW89_KCC][10] = 64,
[0][1][2][0][RTW89_ACMA][10] = 48,
- [0][1][2][0][RTW89_FCC][11] = 38,
+ [0][1][2][0][RTW89_CN][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 48,
+ [0][1][2][0][RTW89_FCC][11] = 58,
[0][1][2][0][RTW89_ETSI][11] = 48,
[0][1][2][0][RTW89_MKK][11] = 68,
- [0][1][2][0][RTW89_IC][11] = 48,
+ [0][1][2][0][RTW89_IC][11] = 58,
+ [0][1][2][0][RTW89_KCC][11] = 64,
[0][1][2][0][RTW89_ACMA][11] = 48,
- [0][1][2][0][RTW89_FCC][12] = 34,
+ [0][1][2][0][RTW89_CN][11] = 46,
+ [0][1][2][0][RTW89_UK][11] = 48,
+ [0][1][2][0][RTW89_FCC][12] = 16,
[0][1][2][0][RTW89_ETSI][12] = 48,
[0][1][2][0][RTW89_MKK][12] = 68,
- [0][1][2][0][RTW89_IC][12] = 44,
+ [0][1][2][0][RTW89_IC][12] = 16,
+ [0][1][2][0][RTW89_KCC][12] = 64,
[0][1][2][0][RTW89_ACMA][12] = 48,
+ [0][1][2][0][RTW89_CN][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 48,
[0][1][2][0][RTW89_FCC][13] = 127,
[0][1][2][0][RTW89_ETSI][13] = 127,
[0][1][2][0][RTW89_MKK][13] = 127,
[0][1][2][0][RTW89_IC][13] = 127,
+ [0][1][2][0][RTW89_KCC][13] = 127,
[0][1][2][0][RTW89_ACMA][13] = 127,
- [0][1][2][1][RTW89_FCC][0] = 50,
+ [0][1][2][0][RTW89_CN][13] = 127,
+ [0][1][2][0][RTW89_UK][13] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 64,
[0][1][2][1][RTW89_ETSI][0] = 36,
[0][1][2][1][RTW89_MKK][0] = 68,
- [0][1][2][1][RTW89_IC][0] = 60,
+ [0][1][2][1][RTW89_IC][0] = 64,
+ [0][1][2][1][RTW89_KCC][0] = 66,
[0][1][2][1][RTW89_ACMA][0] = 36,
- [0][1][2][1][RTW89_FCC][1] = 50,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 36,
+ [0][1][2][1][RTW89_FCC][1] = 70,
[0][1][2][1][RTW89_ETSI][1] = 36,
[0][1][2][1][RTW89_MKK][1] = 68,
- [0][1][2][1][RTW89_IC][1] = 60,
+ [0][1][2][1][RTW89_IC][1] = 70,
+ [0][1][2][1][RTW89_KCC][1] = 66,
[0][1][2][1][RTW89_ACMA][1] = 36,
- [0][1][2][1][RTW89_FCC][2] = 54,
+ [0][1][2][1][RTW89_CN][1] = 34,
+ [0][1][2][1][RTW89_UK][1] = 36,
+ [0][1][2][1][RTW89_FCC][2] = 74,
[0][1][2][1][RTW89_ETSI][2] = 36,
[0][1][2][1][RTW89_MKK][2] = 68,
- [0][1][2][1][RTW89_IC][2] = 64,
+ [0][1][2][1][RTW89_IC][2] = 74,
+ [0][1][2][1][RTW89_KCC][2] = 66,
[0][1][2][1][RTW89_ACMA][2] = 36,
- [0][1][2][1][RTW89_FCC][3] = 58,
+ [0][1][2][1][RTW89_CN][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 36,
+ [0][1][2][1][RTW89_FCC][3] = 78,
[0][1][2][1][RTW89_ETSI][3] = 36,
[0][1][2][1][RTW89_MKK][3] = 68,
- [0][1][2][1][RTW89_IC][3] = 68,
+ [0][1][2][1][RTW89_IC][3] = 78,
+ [0][1][2][1][RTW89_KCC][3] = 66,
[0][1][2][1][RTW89_ACMA][3] = 36,
- [0][1][2][1][RTW89_FCC][4] = 64,
+ [0][1][2][1][RTW89_CN][3] = 34,
+ [0][1][2][1][RTW89_UK][3] = 36,
+ [0][1][2][1][RTW89_FCC][4] = 80,
[0][1][2][1][RTW89_ETSI][4] = 36,
[0][1][2][1][RTW89_MKK][4] = 68,
- [0][1][2][1][RTW89_IC][4] = 74,
+ [0][1][2][1][RTW89_IC][4] = 80,
+ [0][1][2][1][RTW89_KCC][4] = 66,
[0][1][2][1][RTW89_ACMA][4] = 36,
- [0][1][2][1][RTW89_FCC][5] = 70,
+ [0][1][2][1][RTW89_CN][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 36,
+ [0][1][2][1][RTW89_FCC][5] = 80,
[0][1][2][1][RTW89_ETSI][5] = 36,
[0][1][2][1][RTW89_MKK][5] = 68,
- [0][1][2][1][RTW89_IC][5] = 78,
+ [0][1][2][1][RTW89_IC][5] = 80,
+ [0][1][2][1][RTW89_KCC][5] = 66,
[0][1][2][1][RTW89_ACMA][5] = 36,
- [0][1][2][1][RTW89_FCC][6] = 66,
+ [0][1][2][1][RTW89_CN][5] = 34,
+ [0][1][2][1][RTW89_UK][5] = 36,
+ [0][1][2][1][RTW89_FCC][6] = 80,
[0][1][2][1][RTW89_ETSI][6] = 36,
[0][1][2][1][RTW89_MKK][6] = 68,
- [0][1][2][1][RTW89_IC][6] = 76,
+ [0][1][2][1][RTW89_IC][6] = 80,
+ [0][1][2][1][RTW89_KCC][6] = 66,
[0][1][2][1][RTW89_ACMA][6] = 36,
- [0][1][2][1][RTW89_FCC][7] = 58,
+ [0][1][2][1][RTW89_CN][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 36,
+ [0][1][2][1][RTW89_FCC][7] = 74,
[0][1][2][1][RTW89_ETSI][7] = 36,
[0][1][2][1][RTW89_MKK][7] = 68,
- [0][1][2][1][RTW89_IC][7] = 68,
+ [0][1][2][1][RTW89_IC][7] = 74,
+ [0][1][2][1][RTW89_KCC][7] = 66,
[0][1][2][1][RTW89_ACMA][7] = 36,
- [0][1][2][1][RTW89_FCC][8] = 54,
+ [0][1][2][1][RTW89_CN][7] = 34,
+ [0][1][2][1][RTW89_UK][7] = 36,
+ [0][1][2][1][RTW89_FCC][8] = 70,
[0][1][2][1][RTW89_ETSI][8] = 36,
[0][1][2][1][RTW89_MKK][8] = 68,
- [0][1][2][1][RTW89_IC][8] = 64,
+ [0][1][2][1][RTW89_IC][8] = 70,
+ [0][1][2][1][RTW89_KCC][8] = 66,
[0][1][2][1][RTW89_ACMA][8] = 36,
- [0][1][2][1][RTW89_FCC][9] = 50,
+ [0][1][2][1][RTW89_CN][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 36,
+ [0][1][2][1][RTW89_FCC][9] = 66,
[0][1][2][1][RTW89_ETSI][9] = 36,
[0][1][2][1][RTW89_MKK][9] = 68,
- [0][1][2][1][RTW89_IC][9] = 60,
+ [0][1][2][1][RTW89_IC][9] = 66,
+ [0][1][2][1][RTW89_KCC][9] = 64,
[0][1][2][1][RTW89_ACMA][9] = 36,
- [0][1][2][1][RTW89_FCC][10] = 50,
+ [0][1][2][1][RTW89_CN][9] = 34,
+ [0][1][2][1][RTW89_UK][9] = 36,
+ [0][1][2][1][RTW89_FCC][10] = 58,
[0][1][2][1][RTW89_ETSI][10] = 36,
[0][1][2][1][RTW89_MKK][10] = 68,
- [0][1][2][1][RTW89_IC][10] = 60,
+ [0][1][2][1][RTW89_IC][10] = 58,
+ [0][1][2][1][RTW89_KCC][10] = 64,
[0][1][2][1][RTW89_ACMA][10] = 36,
- [0][1][2][1][RTW89_FCC][11] = 38,
+ [0][1][2][1][RTW89_CN][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 36,
+ [0][1][2][1][RTW89_FCC][11] = 58,
[0][1][2][1][RTW89_ETSI][11] = 36,
[0][1][2][1][RTW89_MKK][11] = 68,
- [0][1][2][1][RTW89_IC][11] = 48,
+ [0][1][2][1][RTW89_IC][11] = 58,
+ [0][1][2][1][RTW89_KCC][11] = 64,
[0][1][2][1][RTW89_ACMA][11] = 36,
- [0][1][2][1][RTW89_FCC][12] = 34,
+ [0][1][2][1][RTW89_CN][11] = 34,
+ [0][1][2][1][RTW89_UK][11] = 36,
+ [0][1][2][1][RTW89_FCC][12] = 16,
[0][1][2][1][RTW89_ETSI][12] = 36,
[0][1][2][1][RTW89_MKK][12] = 68,
- [0][1][2][1][RTW89_IC][12] = 44,
+ [0][1][2][1][RTW89_IC][12] = 16,
+ [0][1][2][1][RTW89_KCC][12] = 64,
[0][1][2][1][RTW89_ACMA][12] = 36,
+ [0][1][2][1][RTW89_CN][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 36,
[0][1][2][1][RTW89_FCC][13] = 127,
[0][1][2][1][RTW89_ETSI][13] = 127,
[0][1][2][1][RTW89_MKK][13] = 127,
[0][1][2][1][RTW89_IC][13] = 127,
+ [0][1][2][1][RTW89_KCC][13] = 127,
[0][1][2][1][RTW89_ACMA][13] = 127,
+ [0][1][2][1][RTW89_CN][13] = 127,
+ [0][1][2][1][RTW89_UK][13] = 127,
[1][0][2][0][RTW89_FCC][0] = 127,
[1][0][2][0][RTW89_ETSI][0] = 127,
[1][0][2][0][RTW89_MKK][0] = 127,
[1][0][2][0][RTW89_IC][0] = 127,
+ [1][0][2][0][RTW89_KCC][0] = 127,
[1][0][2][0][RTW89_ACMA][0] = 127,
+ [1][0][2][0][RTW89_CN][0] = 127,
+ [1][0][2][0][RTW89_UK][0] = 127,
[1][0][2][0][RTW89_FCC][1] = 127,
[1][0][2][0][RTW89_ETSI][1] = 127,
[1][0][2][0][RTW89_MKK][1] = 127,
[1][0][2][0][RTW89_IC][1] = 127,
+ [1][0][2][0][RTW89_KCC][1] = 127,
[1][0][2][0][RTW89_ACMA][1] = 127,
- [1][0][2][0][RTW89_FCC][2] = 62,
+ [1][0][2][0][RTW89_CN][1] = 127,
+ [1][0][2][0][RTW89_UK][1] = 127,
+ [1][0][2][0][RTW89_FCC][2] = 64,
[1][0][2][0][RTW89_ETSI][2] = 60,
[1][0][2][0][RTW89_MKK][2] = 74,
- [1][0][2][0][RTW89_IC][2] = 72,
+ [1][0][2][0][RTW89_IC][2] = 64,
+ [1][0][2][0][RTW89_KCC][2] = 68,
[1][0][2][0][RTW89_ACMA][2] = 60,
- [1][0][2][0][RTW89_FCC][3] = 62,
+ [1][0][2][0][RTW89_CN][2] = 58,
+ [1][0][2][0][RTW89_UK][2] = 60,
+ [1][0][2][0][RTW89_FCC][3] = 64,
[1][0][2][0][RTW89_ETSI][3] = 60,
[1][0][2][0][RTW89_MKK][3] = 74,
- [1][0][2][0][RTW89_IC][3] = 72,
+ [1][0][2][0][RTW89_IC][3] = 64,
+ [1][0][2][0][RTW89_KCC][3] = 68,
[1][0][2][0][RTW89_ACMA][3] = 60,
- [1][0][2][0][RTW89_FCC][4] = 64,
+ [1][0][2][0][RTW89_CN][3] = 58,
+ [1][0][2][0][RTW89_UK][3] = 60,
+ [1][0][2][0][RTW89_FCC][4] = 68,
[1][0][2][0][RTW89_ETSI][4] = 60,
[1][0][2][0][RTW89_MKK][4] = 74,
- [1][0][2][0][RTW89_IC][4] = 74,
+ [1][0][2][0][RTW89_IC][4] = 68,
+ [1][0][2][0][RTW89_KCC][4] = 68,
[1][0][2][0][RTW89_ACMA][4] = 60,
- [1][0][2][0][RTW89_FCC][5] = 64,
+ [1][0][2][0][RTW89_CN][4] = 58,
+ [1][0][2][0][RTW89_UK][4] = 60,
+ [1][0][2][0][RTW89_FCC][5] = 68,
[1][0][2][0][RTW89_ETSI][5] = 60,
[1][0][2][0][RTW89_MKK][5] = 74,
- [1][0][2][0][RTW89_IC][5] = 74,
+ [1][0][2][0][RTW89_IC][5] = 68,
+ [1][0][2][0][RTW89_KCC][5] = 74,
[1][0][2][0][RTW89_ACMA][5] = 60,
- [1][0][2][0][RTW89_FCC][6] = 64,
+ [1][0][2][0][RTW89_CN][5] = 58,
+ [1][0][2][0][RTW89_UK][5] = 60,
+ [1][0][2][0][RTW89_FCC][6] = 66,
[1][0][2][0][RTW89_ETSI][6] = 60,
[1][0][2][0][RTW89_MKK][6] = 74,
- [1][0][2][0][RTW89_IC][6] = 74,
+ [1][0][2][0][RTW89_IC][6] = 66,
+ [1][0][2][0][RTW89_KCC][6] = 74,
[1][0][2][0][RTW89_ACMA][6] = 60,
- [1][0][2][0][RTW89_FCC][7] = 60,
+ [1][0][2][0][RTW89_CN][6] = 58,
+ [1][0][2][0][RTW89_UK][6] = 60,
+ [1][0][2][0][RTW89_FCC][7] = 62,
[1][0][2][0][RTW89_ETSI][7] = 60,
[1][0][2][0][RTW89_MKK][7] = 74,
- [1][0][2][0][RTW89_IC][7] = 70,
+ [1][0][2][0][RTW89_IC][7] = 62,
+ [1][0][2][0][RTW89_KCC][7] = 74,
[1][0][2][0][RTW89_ACMA][7] = 60,
- [1][0][2][0][RTW89_FCC][8] = 60,
+ [1][0][2][0][RTW89_CN][7] = 58,
+ [1][0][2][0][RTW89_UK][7] = 60,
+ [1][0][2][0][RTW89_FCC][8] = 62,
[1][0][2][0][RTW89_ETSI][8] = 60,
[1][0][2][0][RTW89_MKK][8] = 74,
- [1][0][2][0][RTW89_IC][8] = 70,
+ [1][0][2][0][RTW89_IC][8] = 62,
+ [1][0][2][0][RTW89_KCC][8] = 68,
[1][0][2][0][RTW89_ACMA][8] = 60,
+ [1][0][2][0][RTW89_CN][8] = 58,
+ [1][0][2][0][RTW89_UK][8] = 60,
[1][0][2][0][RTW89_FCC][9] = 60,
[1][0][2][0][RTW89_ETSI][9] = 60,
[1][0][2][0][RTW89_MKK][9] = 74,
- [1][0][2][0][RTW89_IC][9] = 70,
+ [1][0][2][0][RTW89_IC][9] = 60,
+ [1][0][2][0][RTW89_KCC][9] = 68,
[1][0][2][0][RTW89_ACMA][9] = 60,
- [1][0][2][0][RTW89_FCC][10] = 58,
+ [1][0][2][0][RTW89_CN][9] = 58,
+ [1][0][2][0][RTW89_UK][9] = 60,
+ [1][0][2][0][RTW89_FCC][10] = 56,
[1][0][2][0][RTW89_ETSI][10] = 60,
[1][0][2][0][RTW89_MKK][10] = 74,
- [1][0][2][0][RTW89_IC][10] = 68,
+ [1][0][2][0][RTW89_IC][10] = 56,
+ [1][0][2][0][RTW89_KCC][10] = 68,
[1][0][2][0][RTW89_ACMA][10] = 60,
+ [1][0][2][0][RTW89_CN][10] = 58,
+ [1][0][2][0][RTW89_UK][10] = 60,
[1][0][2][0][RTW89_FCC][11] = 127,
[1][0][2][0][RTW89_ETSI][11] = 127,
[1][0][2][0][RTW89_MKK][11] = 127,
[1][0][2][0][RTW89_IC][11] = 127,
+ [1][0][2][0][RTW89_KCC][11] = 127,
[1][0][2][0][RTW89_ACMA][11] = 127,
+ [1][0][2][0][RTW89_CN][11] = 127,
+ [1][0][2][0][RTW89_UK][11] = 127,
[1][0][2][0][RTW89_FCC][12] = 127,
[1][0][2][0][RTW89_ETSI][12] = 127,
[1][0][2][0][RTW89_MKK][12] = 127,
[1][0][2][0][RTW89_IC][12] = 127,
+ [1][0][2][0][RTW89_KCC][12] = 127,
[1][0][2][0][RTW89_ACMA][12] = 127,
+ [1][0][2][0][RTW89_CN][12] = 127,
+ [1][0][2][0][RTW89_UK][12] = 127,
[1][0][2][0][RTW89_FCC][13] = 127,
[1][0][2][0][RTW89_ETSI][13] = 127,
[1][0][2][0][RTW89_MKK][13] = 127,
[1][0][2][0][RTW89_IC][13] = 127,
+ [1][0][2][0][RTW89_KCC][13] = 127,
[1][0][2][0][RTW89_ACMA][13] = 127,
+ [1][0][2][0][RTW89_CN][13] = 127,
+ [1][0][2][0][RTW89_UK][13] = 127,
[1][1][2][0][RTW89_FCC][0] = 127,
[1][1][2][0][RTW89_ETSI][0] = 127,
[1][1][2][0][RTW89_MKK][0] = 127,
[1][1][2][0][RTW89_IC][0] = 127,
+ [1][1][2][0][RTW89_KCC][0] = 127,
[1][1][2][0][RTW89_ACMA][0] = 127,
+ [1][1][2][0][RTW89_CN][0] = 127,
+ [1][1][2][0][RTW89_UK][0] = 127,
[1][1][2][0][RTW89_FCC][1] = 127,
[1][1][2][0][RTW89_ETSI][1] = 127,
[1][1][2][0][RTW89_MKK][1] = 127,
[1][1][2][0][RTW89_IC][1] = 127,
+ [1][1][2][0][RTW89_KCC][1] = 127,
[1][1][2][0][RTW89_ACMA][1] = 127,
- [1][1][2][0][RTW89_FCC][2] = 46,
+ [1][1][2][0][RTW89_CN][1] = 127,
+ [1][1][2][0][RTW89_UK][1] = 127,
+ [1][1][2][0][RTW89_FCC][2] = 60,
[1][1][2][0][RTW89_ETSI][2] = 48,
[1][1][2][0][RTW89_MKK][2] = 68,
- [1][1][2][0][RTW89_IC][2] = 56,
+ [1][1][2][0][RTW89_IC][2] = 60,
+ [1][1][2][0][RTW89_KCC][2] = 64,
[1][1][2][0][RTW89_ACMA][2] = 48,
- [1][1][2][0][RTW89_FCC][3] = 46,
+ [1][1][2][0][RTW89_CN][2] = 34,
+ [1][1][2][0][RTW89_UK][2] = 48,
+ [1][1][2][0][RTW89_FCC][3] = 60,
[1][1][2][0][RTW89_ETSI][3] = 48,
[1][1][2][0][RTW89_MKK][3] = 68,
- [1][1][2][0][RTW89_IC][3] = 56,
+ [1][1][2][0][RTW89_IC][3] = 60,
+ [1][1][2][0][RTW89_KCC][3] = 64,
[1][1][2][0][RTW89_ACMA][3] = 48,
- [1][1][2][0][RTW89_FCC][4] = 50,
+ [1][1][2][0][RTW89_CN][3] = 34,
+ [1][1][2][0][RTW89_UK][3] = 48,
+ [1][1][2][0][RTW89_FCC][4] = 60,
[1][1][2][0][RTW89_ETSI][4] = 48,
[1][1][2][0][RTW89_MKK][4] = 68,
[1][1][2][0][RTW89_IC][4] = 60,
+ [1][1][2][0][RTW89_KCC][4] = 64,
[1][1][2][0][RTW89_ACMA][4] = 48,
- [1][1][2][0][RTW89_FCC][5] = 58,
+ [1][1][2][0][RTW89_CN][4] = 34,
+ [1][1][2][0][RTW89_UK][4] = 48,
+ [1][1][2][0][RTW89_FCC][5] = 60,
[1][1][2][0][RTW89_ETSI][5] = 48,
[1][1][2][0][RTW89_MKK][5] = 68,
- [1][1][2][0][RTW89_IC][5] = 68,
+ [1][1][2][0][RTW89_IC][5] = 60,
+ [1][1][2][0][RTW89_KCC][5] = 66,
[1][1][2][0][RTW89_ACMA][5] = 48,
- [1][1][2][0][RTW89_FCC][6] = 50,
+ [1][1][2][0][RTW89_CN][5] = 34,
+ [1][1][2][0][RTW89_UK][5] = 48,
+ [1][1][2][0][RTW89_FCC][6] = 58,
[1][1][2][0][RTW89_ETSI][6] = 48,
[1][1][2][0][RTW89_MKK][6] = 68,
- [1][1][2][0][RTW89_IC][6] = 60,
+ [1][1][2][0][RTW89_IC][6] = 58,
+ [1][1][2][0][RTW89_KCC][6] = 66,
[1][1][2][0][RTW89_ACMA][6] = 48,
- [1][1][2][0][RTW89_FCC][7] = 46,
+ [1][1][2][0][RTW89_CN][6] = 34,
+ [1][1][2][0][RTW89_UK][6] = 48,
+ [1][1][2][0][RTW89_FCC][7] = 54,
[1][1][2][0][RTW89_ETSI][7] = 48,
[1][1][2][0][RTW89_MKK][7] = 68,
- [1][1][2][0][RTW89_IC][7] = 56,
+ [1][1][2][0][RTW89_IC][7] = 54,
+ [1][1][2][0][RTW89_KCC][7] = 66,
[1][1][2][0][RTW89_ACMA][7] = 48,
- [1][1][2][0][RTW89_FCC][8] = 46,
+ [1][1][2][0][RTW89_CN][7] = 34,
+ [1][1][2][0][RTW89_UK][7] = 48,
+ [1][1][2][0][RTW89_FCC][8] = 54,
[1][1][2][0][RTW89_ETSI][8] = 48,
[1][1][2][0][RTW89_MKK][8] = 68,
- [1][1][2][0][RTW89_IC][8] = 56,
+ [1][1][2][0][RTW89_IC][8] = 54,
+ [1][1][2][0][RTW89_KCC][8] = 64,
[1][1][2][0][RTW89_ACMA][8] = 48,
- [1][1][2][0][RTW89_FCC][9] = 34,
+ [1][1][2][0][RTW89_CN][8] = 34,
+ [1][1][2][0][RTW89_UK][8] = 48,
+ [1][1][2][0][RTW89_FCC][9] = 54,
[1][1][2][0][RTW89_ETSI][9] = 48,
[1][1][2][0][RTW89_MKK][9] = 68,
- [1][1][2][0][RTW89_IC][9] = 44,
+ [1][1][2][0][RTW89_IC][9] = 54,
+ [1][1][2][0][RTW89_KCC][9] = 64,
[1][1][2][0][RTW89_ACMA][9] = 48,
- [1][1][2][0][RTW89_FCC][10] = 30,
+ [1][1][2][0][RTW89_CN][9] = 34,
+ [1][1][2][0][RTW89_UK][9] = 48,
+ [1][1][2][0][RTW89_FCC][10] = 46,
[1][1][2][0][RTW89_ETSI][10] = 48,
[1][1][2][0][RTW89_MKK][10] = 68,
- [1][1][2][0][RTW89_IC][10] = 40,
+ [1][1][2][0][RTW89_IC][10] = 46,
+ [1][1][2][0][RTW89_KCC][10] = 64,
[1][1][2][0][RTW89_ACMA][10] = 48,
+ [1][1][2][0][RTW89_CN][10] = 34,
+ [1][1][2][0][RTW89_UK][10] = 48,
[1][1][2][0][RTW89_FCC][11] = 127,
[1][1][2][0][RTW89_ETSI][11] = 127,
[1][1][2][0][RTW89_MKK][11] = 127,
[1][1][2][0][RTW89_IC][11] = 127,
+ [1][1][2][0][RTW89_KCC][11] = 127,
[1][1][2][0][RTW89_ACMA][11] = 127,
+ [1][1][2][0][RTW89_CN][11] = 127,
+ [1][1][2][0][RTW89_UK][11] = 127,
[1][1][2][0][RTW89_FCC][12] = 127,
[1][1][2][0][RTW89_ETSI][12] = 127,
[1][1][2][0][RTW89_MKK][12] = 127,
[1][1][2][0][RTW89_IC][12] = 127,
+ [1][1][2][0][RTW89_KCC][12] = 127,
[1][1][2][0][RTW89_ACMA][12] = 127,
+ [1][1][2][0][RTW89_CN][12] = 127,
+ [1][1][2][0][RTW89_UK][12] = 127,
[1][1][2][0][RTW89_FCC][13] = 127,
[1][1][2][0][RTW89_ETSI][13] = 127,
[1][1][2][0][RTW89_MKK][13] = 127,
[1][1][2][0][RTW89_IC][13] = 127,
+ [1][1][2][0][RTW89_KCC][13] = 127,
[1][1][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][0][RTW89_CN][13] = 127,
+ [1][1][2][0][RTW89_UK][13] = 127,
[1][1][2][1][RTW89_FCC][0] = 127,
[1][1][2][1][RTW89_ETSI][0] = 127,
[1][1][2][1][RTW89_MKK][0] = 127,
[1][1][2][1][RTW89_IC][0] = 127,
+ [1][1][2][1][RTW89_KCC][0] = 127,
[1][1][2][1][RTW89_ACMA][0] = 127,
+ [1][1][2][1][RTW89_CN][0] = 127,
+ [1][1][2][1][RTW89_UK][0] = 127,
[1][1][2][1][RTW89_FCC][1] = 127,
[1][1][2][1][RTW89_ETSI][1] = 127,
[1][1][2][1][RTW89_MKK][1] = 127,
[1][1][2][1][RTW89_IC][1] = 127,
+ [1][1][2][1][RTW89_KCC][1] = 127,
[1][1][2][1][RTW89_ACMA][1] = 127,
- [1][1][2][1][RTW89_FCC][2] = 46,
+ [1][1][2][1][RTW89_CN][1] = 127,
+ [1][1][2][1][RTW89_UK][1] = 127,
+ [1][1][2][1][RTW89_FCC][2] = 60,
[1][1][2][1][RTW89_ETSI][2] = 36,
[1][1][2][1][RTW89_MKK][2] = 68,
- [1][1][2][1][RTW89_IC][2] = 56,
+ [1][1][2][1][RTW89_IC][2] = 60,
+ [1][1][2][1][RTW89_KCC][2] = 64,
[1][1][2][1][RTW89_ACMA][2] = 36,
- [1][1][2][1][RTW89_FCC][3] = 46,
+ [1][1][2][1][RTW89_CN][2] = 34,
+ [1][1][2][1][RTW89_UK][2] = 36,
+ [1][1][2][1][RTW89_FCC][3] = 60,
[1][1][2][1][RTW89_ETSI][3] = 36,
[1][1][2][1][RTW89_MKK][3] = 68,
- [1][1][2][1][RTW89_IC][3] = 56,
+ [1][1][2][1][RTW89_IC][3] = 60,
+ [1][1][2][1][RTW89_KCC][3] = 64,
[1][1][2][1][RTW89_ACMA][3] = 36,
- [1][1][2][1][RTW89_FCC][4] = 50,
+ [1][1][2][1][RTW89_CN][3] = 34,
+ [1][1][2][1][RTW89_UK][3] = 36,
+ [1][1][2][1][RTW89_FCC][4] = 60,
[1][1][2][1][RTW89_ETSI][4] = 36,
[1][1][2][1][RTW89_MKK][4] = 68,
[1][1][2][1][RTW89_IC][4] = 60,
+ [1][1][2][1][RTW89_KCC][4] = 64,
[1][1][2][1][RTW89_ACMA][4] = 36,
- [1][1][2][1][RTW89_FCC][5] = 58,
+ [1][1][2][1][RTW89_CN][4] = 34,
+ [1][1][2][1][RTW89_UK][4] = 36,
+ [1][1][2][1][RTW89_FCC][5] = 60,
[1][1][2][1][RTW89_ETSI][5] = 36,
[1][1][2][1][RTW89_MKK][5] = 68,
- [1][1][2][1][RTW89_IC][5] = 68,
+ [1][1][2][1][RTW89_IC][5] = 60,
+ [1][1][2][1][RTW89_KCC][5] = 66,
[1][1][2][1][RTW89_ACMA][5] = 36,
- [1][1][2][1][RTW89_FCC][6] = 50,
+ [1][1][2][1][RTW89_CN][5] = 34,
+ [1][1][2][1][RTW89_UK][5] = 36,
+ [1][1][2][1][RTW89_FCC][6] = 58,
[1][1][2][1][RTW89_ETSI][6] = 36,
[1][1][2][1][RTW89_MKK][6] = 68,
- [1][1][2][1][RTW89_IC][6] = 60,
+ [1][1][2][1][RTW89_IC][6] = 58,
+ [1][1][2][1][RTW89_KCC][6] = 66,
[1][1][2][1][RTW89_ACMA][6] = 36,
- [1][1][2][1][RTW89_FCC][7] = 46,
+ [1][1][2][1][RTW89_CN][6] = 34,
+ [1][1][2][1][RTW89_UK][6] = 36,
+ [1][1][2][1][RTW89_FCC][7] = 54,
[1][1][2][1][RTW89_ETSI][7] = 36,
[1][1][2][1][RTW89_MKK][7] = 68,
- [1][1][2][1][RTW89_IC][7] = 56,
+ [1][1][2][1][RTW89_IC][7] = 54,
+ [1][1][2][1][RTW89_KCC][7] = 66,
[1][1][2][1][RTW89_ACMA][7] = 36,
- [1][1][2][1][RTW89_FCC][8] = 46,
+ [1][1][2][1][RTW89_CN][7] = 34,
+ [1][1][2][1][RTW89_UK][7] = 36,
+ [1][1][2][1][RTW89_FCC][8] = 54,
[1][1][2][1][RTW89_ETSI][8] = 36,
[1][1][2][1][RTW89_MKK][8] = 68,
- [1][1][2][1][RTW89_IC][8] = 56,
+ [1][1][2][1][RTW89_IC][8] = 54,
+ [1][1][2][1][RTW89_KCC][8] = 64,
[1][1][2][1][RTW89_ACMA][8] = 36,
- [1][1][2][1][RTW89_FCC][9] = 34,
+ [1][1][2][1][RTW89_CN][8] = 34,
+ [1][1][2][1][RTW89_UK][8] = 36,
+ [1][1][2][1][RTW89_FCC][9] = 54,
[1][1][2][1][RTW89_ETSI][9] = 36,
[1][1][2][1][RTW89_MKK][9] = 68,
- [1][1][2][1][RTW89_IC][9] = 44,
+ [1][1][2][1][RTW89_IC][9] = 54,
+ [1][1][2][1][RTW89_KCC][9] = 64,
[1][1][2][1][RTW89_ACMA][9] = 36,
- [1][1][2][1][RTW89_FCC][10] = 30,
+ [1][1][2][1][RTW89_CN][9] = 34,
+ [1][1][2][1][RTW89_UK][9] = 36,
+ [1][1][2][1][RTW89_FCC][10] = 46,
[1][1][2][1][RTW89_ETSI][10] = 36,
[1][1][2][1][RTW89_MKK][10] = 68,
- [1][1][2][1][RTW89_IC][10] = 40,
+ [1][1][2][1][RTW89_IC][10] = 46,
+ [1][1][2][1][RTW89_KCC][10] = 64,
[1][1][2][1][RTW89_ACMA][10] = 36,
+ [1][1][2][1][RTW89_CN][10] = 36,
+ [1][1][2][1][RTW89_UK][10] = 36,
[1][1][2][1][RTW89_FCC][11] = 127,
[1][1][2][1][RTW89_ETSI][11] = 127,
[1][1][2][1][RTW89_MKK][11] = 127,
[1][1][2][1][RTW89_IC][11] = 127,
+ [1][1][2][1][RTW89_KCC][11] = 127,
[1][1][2][1][RTW89_ACMA][11] = 127,
+ [1][1][2][1][RTW89_CN][11] = 127,
+ [1][1][2][1][RTW89_UK][11] = 127,
[1][1][2][1][RTW89_FCC][12] = 127,
[1][1][2][1][RTW89_ETSI][12] = 127,
[1][1][2][1][RTW89_MKK][12] = 127,
[1][1][2][1][RTW89_IC][12] = 127,
+ [1][1][2][1][RTW89_KCC][12] = 127,
[1][1][2][1][RTW89_ACMA][12] = 127,
+ [1][1][2][1][RTW89_CN][12] = 127,
+ [1][1][2][1][RTW89_UK][12] = 127,
[1][1][2][1][RTW89_FCC][13] = 127,
[1][1][2][1][RTW89_ETSI][13] = 127,
[1][1][2][1][RTW89_MKK][13] = 127,
[1][1][2][1][RTW89_IC][13] = 127,
+ [1][1][2][1][RTW89_KCC][13] = 127,
[1][1][2][1][RTW89_ACMA][13] = 127,
+ [1][1][2][1][RTW89_CN][13] = 127,
+ [1][1][2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
- [0][0][1][0][RTW89_WW][0] = 60,
- [0][0][1][0][RTW89_WW][2] = 60,
- [0][0][1][0][RTW89_WW][4] = 60,
- [0][0][1][0][RTW89_WW][6] = 60,
- [0][0][1][0][RTW89_WW][8] = 60,
- [0][0][1][0][RTW89_WW][10] = 60,
- [0][0][1][0][RTW89_WW][12] = 60,
- [0][0][1][0][RTW89_WW][14] = 60,
- [0][0][1][0][RTW89_WW][15] = 60,
- [0][0][1][0][RTW89_WW][17] = 60,
- [0][0][1][0][RTW89_WW][19] = 60,
- [0][0][1][0][RTW89_WW][21] = 60,
- [0][0][1][0][RTW89_WW][23] = 60,
+ [0][0][1][0][RTW89_WW][0] = 50,
+ [0][0][1][0][RTW89_WW][2] = 50,
+ [0][0][1][0][RTW89_WW][4] = 50,
+ [0][0][1][0][RTW89_WW][6] = 50,
+ [0][0][1][0][RTW89_WW][8] = 50,
+ [0][0][1][0][RTW89_WW][10] = 50,
+ [0][0][1][0][RTW89_WW][12] = 50,
+ [0][0][1][0][RTW89_WW][14] = 50,
+ [0][0][1][0][RTW89_WW][15] = 66,
+ [0][0][1][0][RTW89_WW][17] = 66,
+ [0][0][1][0][RTW89_WW][19] = 66,
+ [0][0][1][0][RTW89_WW][21] = 66,
+ [0][0][1][0][RTW89_WW][23] = 66,
[0][0][1][0][RTW89_WW][25] = 66,
[0][0][1][0][RTW89_WW][27] = 66,
[0][0][1][0][RTW89_WW][29] = 66,
- [0][0][1][0][RTW89_WW][31] = 60,
- [0][0][1][0][RTW89_WW][33] = 60,
+ [0][0][1][0][RTW89_WW][31] = 66,
+ [0][0][1][0][RTW89_WW][33] = 66,
[0][0][1][0][RTW89_WW][35] = 60,
- [0][0][1][0][RTW89_WW][37] = 70,
+ [0][0][1][0][RTW89_WW][37] = 64,
[0][0][1][0][RTW89_WW][38] = 30,
[0][0][1][0][RTW89_WW][40] = 30,
[0][0][1][0][RTW89_WW][42] = 30,
[0][0][1][0][RTW89_WW][44] = 30,
[0][0][1][0][RTW89_WW][46] = 30,
- [0][0][1][0][RTW89_WW][48] = 70,
- [0][0][1][0][RTW89_WW][50] = 70,
- [0][0][1][0][RTW89_WW][52] = 70,
- [0][1][1][0][RTW89_WW][0] = 42,
- [0][1][1][0][RTW89_WW][2] = 42,
- [0][1][1][0][RTW89_WW][4] = 42,
- [0][1][1][0][RTW89_WW][6] = 42,
- [0][1][1][0][RTW89_WW][8] = 48,
- [0][1][1][0][RTW89_WW][10] = 48,
- [0][1][1][0][RTW89_WW][12] = 48,
- [0][1][1][0][RTW89_WW][14] = 48,
- [0][1][1][0][RTW89_WW][15] = 48,
- [0][1][1][0][RTW89_WW][17] = 48,
- [0][1][1][0][RTW89_WW][19] = 48,
- [0][1][1][0][RTW89_WW][21] = 48,
- [0][1][1][0][RTW89_WW][23] = 48,
+ [0][0][1][0][RTW89_WW][48] = 72,
+ [0][0][1][0][RTW89_WW][50] = 72,
+ [0][0][1][0][RTW89_WW][52] = 72,
+ [0][1][1][0][RTW89_WW][0] = 34,
+ [0][1][1][0][RTW89_WW][2] = 34,
+ [0][1][1][0][RTW89_WW][4] = 34,
+ [0][1][1][0][RTW89_WW][6] = 36,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][12] = 46,
+ [0][1][1][0][RTW89_WW][14] = 46,
+ [0][1][1][0][RTW89_WW][15] = 54,
+ [0][1][1][0][RTW89_WW][17] = 54,
+ [0][1][1][0][RTW89_WW][19] = 54,
+ [0][1][1][0][RTW89_WW][21] = 54,
+ [0][1][1][0][RTW89_WW][23] = 54,
[0][1][1][0][RTW89_WW][25] = 54,
[0][1][1][0][RTW89_WW][27] = 54,
[0][1][1][0][RTW89_WW][29] = 54,
- [0][1][1][0][RTW89_WW][31] = 48,
- [0][1][1][0][RTW89_WW][33] = 48,
- [0][1][1][0][RTW89_WW][35] = 48,
- [0][1][1][0][RTW89_WW][37] = 60,
+ [0][1][1][0][RTW89_WW][31] = 54,
+ [0][1][1][0][RTW89_WW][33] = 54,
+ [0][1][1][0][RTW89_WW][35] = 52,
+ [0][1][1][0][RTW89_WW][37] = 52,
[0][1][1][0][RTW89_WW][38] = 18,
- [0][1][1][0][RTW89_WW][40] = 16,
+ [0][1][1][0][RTW89_WW][40] = 18,
[0][1][1][0][RTW89_WW][42] = 18,
- [0][1][1][0][RTW89_WW][44] = 16,
+ [0][1][1][0][RTW89_WW][44] = 18,
[0][1][1][0][RTW89_WW][46] = 18,
[0][1][1][0][RTW89_WW][48] = 48,
[0][1][1][0][RTW89_WW][50] = 48,
[0][1][1][0][RTW89_WW][52] = 48,
- [0][0][2][0][RTW89_WW][0] = 62,
- [0][0][2][0][RTW89_WW][2] = 62,
- [0][0][2][0][RTW89_WW][4] = 62,
- [0][0][2][0][RTW89_WW][6] = 60,
- [0][0][2][0][RTW89_WW][8] = 58,
- [0][0][2][0][RTW89_WW][10] = 62,
- [0][0][2][0][RTW89_WW][12] = 62,
- [0][0][2][0][RTW89_WW][14] = 62,
- [0][0][2][0][RTW89_WW][15] = 62,
- [0][0][2][0][RTW89_WW][17] = 62,
- [0][0][2][0][RTW89_WW][19] = 62,
- [0][0][2][0][RTW89_WW][21] = 62,
- [0][0][2][0][RTW89_WW][23] = 62,
+ [0][0][2][0][RTW89_WW][0] = 52,
+ [0][0][2][0][RTW89_WW][2] = 52,
+ [0][0][2][0][RTW89_WW][4] = 52,
+ [0][0][2][0][RTW89_WW][6] = 52,
+ [0][0][2][0][RTW89_WW][8] = 52,
+ [0][0][2][0][RTW89_WW][10] = 52,
+ [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][14] = 52,
+ [0][0][2][0][RTW89_WW][15] = 66,
+ [0][0][2][0][RTW89_WW][17] = 66,
+ [0][0][2][0][RTW89_WW][19] = 66,
+ [0][0][2][0][RTW89_WW][21] = 66,
+ [0][0][2][0][RTW89_WW][23] = 66,
[0][0][2][0][RTW89_WW][25] = 66,
[0][0][2][0][RTW89_WW][27] = 66,
[0][0][2][0][RTW89_WW][29] = 66,
- [0][0][2][0][RTW89_WW][31] = 62,
- [0][0][2][0][RTW89_WW][33] = 62,
- [0][0][2][0][RTW89_WW][35] = 62,
- [0][0][2][0][RTW89_WW][37] = 70,
+ [0][0][2][0][RTW89_WW][31] = 66,
+ [0][0][2][0][RTW89_WW][33] = 66,
+ [0][0][2][0][RTW89_WW][35] = 56,
+ [0][0][2][0][RTW89_WW][37] = 64,
[0][0][2][0][RTW89_WW][38] = 30,
[0][0][2][0][RTW89_WW][40] = 30,
[0][0][2][0][RTW89_WW][42] = 30,
[0][0][2][0][RTW89_WW][44] = 30,
[0][0][2][0][RTW89_WW][46] = 30,
- [0][0][2][0][RTW89_WW][48] = 70,
- [0][0][2][0][RTW89_WW][50] = 70,
- [0][0][2][0][RTW89_WW][52] = 70,
- [0][1][2][0][RTW89_WW][0] = 44,
- [0][1][2][0][RTW89_WW][2] = 44,
- [0][1][2][0][RTW89_WW][4] = 44,
- [0][1][2][0][RTW89_WW][6] = 44,
- [0][1][2][0][RTW89_WW][8] = 42,
- [0][1][2][0][RTW89_WW][10] = 50,
- [0][1][2][0][RTW89_WW][12] = 50,
- [0][1][2][0][RTW89_WW][14] = 50,
- [0][1][2][0][RTW89_WW][15] = 50,
- [0][1][2][0][RTW89_WW][17] = 50,
- [0][1][2][0][RTW89_WW][19] = 50,
- [0][1][2][0][RTW89_WW][21] = 50,
- [0][1][2][0][RTW89_WW][23] = 50,
+ [0][0][2][0][RTW89_WW][48] = 72,
+ [0][0][2][0][RTW89_WW][50] = 72,
+ [0][0][2][0][RTW89_WW][52] = 72,
+ [0][1][2][0][RTW89_WW][0] = 36,
+ [0][1][2][0][RTW89_WW][2] = 36,
+ [0][1][2][0][RTW89_WW][4] = 36,
+ [0][1][2][0][RTW89_WW][6] = 38,
+ [0][1][2][0][RTW89_WW][8] = 40,
+ [0][1][2][0][RTW89_WW][10] = 40,
+ [0][1][2][0][RTW89_WW][12] = 40,
+ [0][1][2][0][RTW89_WW][14] = 40,
+ [0][1][2][0][RTW89_WW][15] = 54,
+ [0][1][2][0][RTW89_WW][17] = 54,
+ [0][1][2][0][RTW89_WW][19] = 54,
+ [0][1][2][0][RTW89_WW][21] = 54,
+ [0][1][2][0][RTW89_WW][23] = 54,
[0][1][2][0][RTW89_WW][25] = 54,
[0][1][2][0][RTW89_WW][27] = 54,
[0][1][2][0][RTW89_WW][29] = 54,
- [0][1][2][0][RTW89_WW][31] = 50,
- [0][1][2][0][RTW89_WW][33] = 50,
- [0][1][2][0][RTW89_WW][35] = 50,
- [0][1][2][0][RTW89_WW][37] = 62,
+ [0][1][2][0][RTW89_WW][31] = 54,
+ [0][1][2][0][RTW89_WW][33] = 54,
+ [0][1][2][0][RTW89_WW][35] = 46,
+ [0][1][2][0][RTW89_WW][37] = 52,
[0][1][2][0][RTW89_WW][38] = 18,
[0][1][2][0][RTW89_WW][40] = 18,
[0][1][2][0][RTW89_WW][42] = 18,
[0][1][2][0][RTW89_WW][44] = 18,
[0][1][2][0][RTW89_WW][46] = 18,
- [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][48] = 48,
[0][1][2][0][RTW89_WW][50] = 50,
- [0][1][2][0][RTW89_WW][52] = 50,
- [0][1][2][1][RTW89_WW][0] = 38,
- [0][1][2][1][RTW89_WW][2] = 38,
- [0][1][2][1][RTW89_WW][4] = 38,
- [0][1][2][1][RTW89_WW][6] = 38,
- [0][1][2][1][RTW89_WW][8] = 38,
- [0][1][2][1][RTW89_WW][10] = 38,
- [0][1][2][1][RTW89_WW][12] = 38,
- [0][1][2][1][RTW89_WW][14] = 38,
- [0][1][2][1][RTW89_WW][15] = 38,
- [0][1][2][1][RTW89_WW][17] = 38,
- [0][1][2][1][RTW89_WW][19] = 38,
- [0][1][2][1][RTW89_WW][21] = 38,
- [0][1][2][1][RTW89_WW][23] = 38,
+ [0][1][2][0][RTW89_WW][52] = 48,
+ [0][1][2][1][RTW89_WW][0] = 36,
+ [0][1][2][1][RTW89_WW][2] = 36,
+ [0][1][2][1][RTW89_WW][4] = 36,
+ [0][1][2][1][RTW89_WW][6] = 36,
+ [0][1][2][1][RTW89_WW][8] = 36,
+ [0][1][2][1][RTW89_WW][10] = 36,
+ [0][1][2][1][RTW89_WW][12] = 36,
+ [0][1][2][1][RTW89_WW][14] = 36,
+ [0][1][2][1][RTW89_WW][15] = 40,
+ [0][1][2][1][RTW89_WW][17] = 40,
+ [0][1][2][1][RTW89_WW][19] = 40,
+ [0][1][2][1][RTW89_WW][21] = 40,
+ [0][1][2][1][RTW89_WW][23] = 40,
[0][1][2][1][RTW89_WW][25] = 40,
[0][1][2][1][RTW89_WW][27] = 40,
[0][1][2][1][RTW89_WW][29] = 40,
- [0][1][2][1][RTW89_WW][31] = 38,
- [0][1][2][1][RTW89_WW][33] = 38,
- [0][1][2][1][RTW89_WW][35] = 38,
- [0][1][2][1][RTW89_WW][37] = 60,
+ [0][1][2][1][RTW89_WW][31] = 40,
+ [0][1][2][1][RTW89_WW][33] = 40,
+ [0][1][2][1][RTW89_WW][35] = 40,
+ [0][1][2][1][RTW89_WW][37] = 40,
[0][1][2][1][RTW89_WW][38] = 6,
[0][1][2][1][RTW89_WW][40] = 6,
[0][1][2][1][RTW89_WW][42] = 6,
[0][1][2][1][RTW89_WW][44] = 6,
[0][1][2][1][RTW89_WW][46] = 6,
- [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][48] = 48,
[0][1][2][1][RTW89_WW][50] = 50,
- [0][1][2][1][RTW89_WW][52] = 50,
- [1][0][2][0][RTW89_WW][1] = 58,
- [1][0][2][0][RTW89_WW][5] = 66,
- [1][0][2][0][RTW89_WW][9] = 66,
- [1][0][2][0][RTW89_WW][13] = 58,
+ [0][1][2][1][RTW89_WW][52] = 48,
+ [1][0][2][0][RTW89_WW][1] = 54,
+ [1][0][2][0][RTW89_WW][5] = 54,
+ [1][0][2][0][RTW89_WW][9] = 54,
+ [1][0][2][0][RTW89_WW][13] = 52,
[1][0][2][0][RTW89_WW][16] = 56,
- [1][0][2][0][RTW89_WW][20] = 66,
- [1][0][2][0][RTW89_WW][24] = 66,
+ [1][0][2][0][RTW89_WW][20] = 56,
+ [1][0][2][0][RTW89_WW][24] = 56,
[1][0][2][0][RTW89_WW][28] = 66,
- [1][0][2][0][RTW89_WW][32] = 66,
- [1][0][2][0][RTW89_WW][36] = 66,
+ [1][0][2][0][RTW89_WW][32] = 62,
+ [1][0][2][0][RTW89_WW][36] = 64,
[1][0][2][0][RTW89_WW][39] = 30,
[1][0][2][0][RTW89_WW][43] = 30,
[1][0][2][0][RTW89_WW][47] = 68,
[1][0][2][0][RTW89_WW][51] = 68,
- [1][1][2][0][RTW89_WW][1] = 48,
- [1][1][2][0][RTW89_WW][5] = 52,
- [1][1][2][0][RTW89_WW][9] = 52,
- [1][1][2][0][RTW89_WW][13] = 52,
- [1][1][2][0][RTW89_WW][16] = 48,
+ [1][1][2][0][RTW89_WW][1] = 42,
+ [1][1][2][0][RTW89_WW][5] = 42,
+ [1][1][2][0][RTW89_WW][9] = 42,
+ [1][1][2][0][RTW89_WW][13] = 42,
+ [1][1][2][0][RTW89_WW][16] = 54,
[1][1][2][0][RTW89_WW][20] = 54,
[1][1][2][0][RTW89_WW][24] = 54,
[1][1][2][0][RTW89_WW][28] = 54,
[1][1][2][0][RTW89_WW][32] = 54,
- [1][1][2][0][RTW89_WW][36] = 66,
+ [1][1][2][0][RTW89_WW][36] = 52,
[1][1][2][0][RTW89_WW][39] = 18,
[1][1][2][0][RTW89_WW][43] = 18,
- [1][1][2][0][RTW89_WW][47] = 60,
- [1][1][2][0][RTW89_WW][51] = 58,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 60,
[1][1][2][1][RTW89_WW][1] = 40,
[1][1][2][1][RTW89_WW][5] = 40,
[1][1][2][1][RTW89_WW][9] = 40,
@@ -15035,2082 +29517,3694 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][24] = 40,
[1][1][2][1][RTW89_WW][28] = 40,
[1][1][2][1][RTW89_WW][32] = 40,
- [1][1][2][1][RTW89_WW][36] = 60,
+ [1][1][2][1][RTW89_WW][36] = 40,
[1][1][2][1][RTW89_WW][39] = 6,
[1][1][2][1][RTW89_WW][43] = 6,
- [1][1][2][1][RTW89_WW][47] = 60,
- [1][1][2][1][RTW89_WW][51] = 58,
- [2][0][2][0][RTW89_WW][3] = 56,
- [2][0][2][0][RTW89_WW][11] = 58,
- [2][0][2][0][RTW89_WW][18] = 54,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 60,
+ [2][0][2][0][RTW89_WW][3] = 54,
+ [2][0][2][0][RTW89_WW][11] = 50,
+ [2][0][2][0][RTW89_WW][18] = 56,
[2][0][2][0][RTW89_WW][26] = 60,
[2][0][2][0][RTW89_WW][34] = 60,
[2][0][2][0][RTW89_WW][41] = 30,
- [2][0][2][0][RTW89_WW][49] = 56,
- [2][1][2][0][RTW89_WW][3] = 48,
- [2][1][2][0][RTW89_WW][11] = 52,
- [2][1][2][0][RTW89_WW][18] = 48,
- [2][1][2][0][RTW89_WW][26] = 54,
- [2][1][2][0][RTW89_WW][34] = 60,
+ [2][0][2][0][RTW89_WW][49] = 62,
+ [2][1][2][0][RTW89_WW][3] = 46,
+ [2][1][2][0][RTW89_WW][11] = 38,
+ [2][1][2][0][RTW89_WW][18] = 50,
+ [2][1][2][0][RTW89_WW][26] = 52,
+ [2][1][2][0][RTW89_WW][34] = 52,
[2][1][2][0][RTW89_WW][41] = 18,
- [2][1][2][0][RTW89_WW][49] = 50,
+ [2][1][2][0][RTW89_WW][49] = 62,
[2][1][2][1][RTW89_WW][3] = 40,
- [2][1][2][1][RTW89_WW][11] = 40,
+ [2][1][2][1][RTW89_WW][11] = 38,
[2][1][2][1][RTW89_WW][18] = 40,
[2][1][2][1][RTW89_WW][26] = 42,
- [2][1][2][1][RTW89_WW][34] = 60,
+ [2][1][2][1][RTW89_WW][34] = 40,
[2][1][2][1][RTW89_WW][41] = 6,
- [2][1][2][1][RTW89_WW][49] = 50,
- [3][0][2][0][RTW89_WW][7] = 38,
- [3][0][2][0][RTW89_WW][22] = 50,
- [3][0][2][0][RTW89_WW][45] = 0,
- [3][1][2][0][RTW89_WW][7] = 26,
- [3][1][2][0][RTW89_WW][22] = 42,
- [3][1][2][0][RTW89_WW][45] = 0,
- [3][1][2][1][RTW89_WW][7] = 14,
- [3][1][2][1][RTW89_WW][22] = 30,
- [3][1][2][1][RTW89_WW][45] = 0,
- [0][0][1][0][RTW89_FCC][0] = 70,
+ [2][1][2][1][RTW89_WW][49] = 62,
+ [3][0][2][0][RTW89_WW][7] = 40,
+ [3][0][2][0][RTW89_WW][22] = 42,
+ [3][0][2][0][RTW89_WW][45] = 52,
+ [3][1][2][0][RTW89_WW][7] = 32,
+ [3][1][2][0][RTW89_WW][22] = 36,
+ [3][1][2][0][RTW89_WW][45] = 46,
+ [3][1][2][1][RTW89_WW][7] = 32,
+ [3][1][2][1][RTW89_WW][22] = 36,
+ [3][1][2][1][RTW89_WW][45] = 46,
+ [0][0][1][0][RTW89_FCC][0] = 72,
[0][0][1][0][RTW89_ETSI][0] = 66,
[0][0][1][0][RTW89_MKK][0] = 66,
- [0][0][1][0][RTW89_IC][0] = 62,
- [0][0][1][0][RTW89_ACMA][0] = 60,
- [0][0][1][0][RTW89_FCC][2] = 70,
+ [0][0][1][0][RTW89_IC][0] = 60,
+ [0][0][1][0][RTW89_KCC][0] = 52,
+ [0][0][1][0][RTW89_ACMA][0] = 66,
+ [0][0][1][0][RTW89_CN][0] = 50,
+ [0][0][1][0][RTW89_UK][0] = 66,
+ [0][0][1][0][RTW89_FCC][2] = 72,
[0][0][1][0][RTW89_ETSI][2] = 66,
[0][0][1][0][RTW89_MKK][2] = 66,
- [0][0][1][0][RTW89_IC][2] = 62,
- [0][0][1][0][RTW89_ACMA][2] = 60,
- [0][0][1][0][RTW89_FCC][4] = 70,
+ [0][0][1][0][RTW89_IC][2] = 60,
+ [0][0][1][0][RTW89_KCC][2] = 52,
+ [0][0][1][0][RTW89_ACMA][2] = 66,
+ [0][0][1][0][RTW89_CN][2] = 50,
+ [0][0][1][0][RTW89_UK][2] = 66,
+ [0][0][1][0][RTW89_FCC][4] = 72,
[0][0][1][0][RTW89_ETSI][4] = 66,
[0][0][1][0][RTW89_MKK][4] = 66,
- [0][0][1][0][RTW89_IC][4] = 62,
- [0][0][1][0][RTW89_ACMA][4] = 60,
- [0][0][1][0][RTW89_FCC][6] = 70,
+ [0][0][1][0][RTW89_IC][4] = 60,
+ [0][0][1][0][RTW89_KCC][4] = 52,
+ [0][0][1][0][RTW89_ACMA][4] = 66,
+ [0][0][1][0][RTW89_CN][4] = 50,
+ [0][0][1][0][RTW89_UK][4] = 66,
+ [0][0][1][0][RTW89_FCC][6] = 72,
[0][0][1][0][RTW89_ETSI][6] = 66,
[0][0][1][0][RTW89_MKK][6] = 66,
- [0][0][1][0][RTW89_IC][6] = 62,
- [0][0][1][0][RTW89_ACMA][6] = 60,
- [0][0][1][0][RTW89_FCC][8] = 70,
+ [0][0][1][0][RTW89_IC][6] = 58,
+ [0][0][1][0][RTW89_KCC][6] = 62,
+ [0][0][1][0][RTW89_ACMA][6] = 66,
+ [0][0][1][0][RTW89_CN][6] = 50,
+ [0][0][1][0][RTW89_UK][6] = 66,
+ [0][0][1][0][RTW89_FCC][8] = 72,
[0][0][1][0][RTW89_ETSI][8] = 66,
[0][0][1][0][RTW89_MKK][8] = 66,
- [0][0][1][0][RTW89_IC][8] = 66,
- [0][0][1][0][RTW89_ACMA][8] = 60,
- [0][0][1][0][RTW89_FCC][10] = 70,
+ [0][0][1][0][RTW89_IC][8] = 64,
+ [0][0][1][0][RTW89_KCC][8] = 70,
+ [0][0][1][0][RTW89_ACMA][8] = 66,
+ [0][0][1][0][RTW89_CN][8] = 50,
+ [0][0][1][0][RTW89_UK][8] = 66,
+ [0][0][1][0][RTW89_FCC][10] = 72,
[0][0][1][0][RTW89_ETSI][10] = 66,
[0][0][1][0][RTW89_MKK][10] = 66,
- [0][0][1][0][RTW89_IC][10] = 66,
- [0][0][1][0][RTW89_ACMA][10] = 60,
- [0][0][1][0][RTW89_FCC][12] = 70,
+ [0][0][1][0][RTW89_IC][10] = 64,
+ [0][0][1][0][RTW89_KCC][10] = 70,
+ [0][0][1][0][RTW89_ACMA][10] = 66,
+ [0][0][1][0][RTW89_CN][10] = 50,
+ [0][0][1][0][RTW89_UK][10] = 66,
+ [0][0][1][0][RTW89_FCC][12] = 72,
[0][0][1][0][RTW89_ETSI][12] = 66,
[0][0][1][0][RTW89_MKK][12] = 66,
- [0][0][1][0][RTW89_IC][12] = 66,
- [0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_IC][12] = 64,
+ [0][0][1][0][RTW89_KCC][12] = 66,
+ [0][0][1][0][RTW89_ACMA][12] = 66,
+ [0][0][1][0][RTW89_CN][12] = 50,
+ [0][0][1][0][RTW89_UK][12] = 66,
[0][0][1][0][RTW89_FCC][14] = 70,
[0][0][1][0][RTW89_ETSI][14] = 66,
[0][0][1][0][RTW89_MKK][14] = 66,
- [0][0][1][0][RTW89_IC][14] = 66,
- [0][0][1][0][RTW89_ACMA][14] = 60,
- [0][0][1][0][RTW89_FCC][15] = 68,
+ [0][0][1][0][RTW89_IC][14] = 64,
+ [0][0][1][0][RTW89_KCC][14] = 66,
+ [0][0][1][0][RTW89_ACMA][14] = 66,
+ [0][0][1][0][RTW89_CN][14] = 50,
+ [0][0][1][0][RTW89_UK][14] = 66,
+ [0][0][1][0][RTW89_FCC][15] = 72,
[0][0][1][0][RTW89_ETSI][15] = 66,
[0][0][1][0][RTW89_MKK][15] = 70,
- [0][0][1][0][RTW89_IC][15] = 70,
- [0][0][1][0][RTW89_ACMA][15] = 60,
- [0][0][1][0][RTW89_FCC][17] = 70,
+ [0][0][1][0][RTW89_IC][15] = 72,
+ [0][0][1][0][RTW89_KCC][15] = 70,
+ [0][0][1][0][RTW89_ACMA][15] = 66,
+ [0][0][1][0][RTW89_CN][15] = 127,
+ [0][0][1][0][RTW89_UK][15] = 66,
+ [0][0][1][0][RTW89_FCC][17] = 72,
[0][0][1][0][RTW89_ETSI][17] = 66,
[0][0][1][0][RTW89_MKK][17] = 70,
- [0][0][1][0][RTW89_IC][17] = 70,
- [0][0][1][0][RTW89_ACMA][17] = 60,
- [0][0][1][0][RTW89_FCC][19] = 70,
+ [0][0][1][0][RTW89_IC][17] = 72,
+ [0][0][1][0][RTW89_KCC][17] = 70,
+ [0][0][1][0][RTW89_ACMA][17] = 66,
+ [0][0][1][0][RTW89_CN][17] = 127,
+ [0][0][1][0][RTW89_UK][17] = 66,
+ [0][0][1][0][RTW89_FCC][19] = 72,
[0][0][1][0][RTW89_ETSI][19] = 66,
[0][0][1][0][RTW89_MKK][19] = 70,
- [0][0][1][0][RTW89_IC][19] = 70,
- [0][0][1][0][RTW89_ACMA][19] = 60,
- [0][0][1][0][RTW89_FCC][21] = 70,
+ [0][0][1][0][RTW89_IC][19] = 72,
+ [0][0][1][0][RTW89_KCC][19] = 70,
+ [0][0][1][0][RTW89_ACMA][19] = 66,
+ [0][0][1][0][RTW89_CN][19] = 127,
+ [0][0][1][0][RTW89_UK][19] = 66,
+ [0][0][1][0][RTW89_FCC][21] = 72,
[0][0][1][0][RTW89_ETSI][21] = 66,
[0][0][1][0][RTW89_MKK][21] = 70,
- [0][0][1][0][RTW89_IC][21] = 70,
- [0][0][1][0][RTW89_ACMA][21] = 60,
- [0][0][1][0][RTW89_FCC][23] = 70,
+ [0][0][1][0][RTW89_IC][21] = 72,
+ [0][0][1][0][RTW89_KCC][21] = 70,
+ [0][0][1][0][RTW89_ACMA][21] = 66,
+ [0][0][1][0][RTW89_CN][21] = 127,
+ [0][0][1][0][RTW89_UK][21] = 66,
+ [0][0][1][0][RTW89_FCC][23] = 72,
[0][0][1][0][RTW89_ETSI][23] = 66,
[0][0][1][0][RTW89_MKK][23] = 70,
- [0][0][1][0][RTW89_IC][23] = 70,
- [0][0][1][0][RTW89_ACMA][23] = 60,
- [0][0][1][0][RTW89_FCC][25] = 70,
+ [0][0][1][0][RTW89_IC][23] = 72,
+ [0][0][1][0][RTW89_KCC][23] = 70,
+ [0][0][1][0][RTW89_ACMA][23] = 66,
+ [0][0][1][0][RTW89_CN][23] = 127,
+ [0][0][1][0][RTW89_UK][23] = 66,
+ [0][0][1][0][RTW89_FCC][25] = 72,
[0][0][1][0][RTW89_ETSI][25] = 66,
[0][0][1][0][RTW89_MKK][25] = 70,
[0][0][1][0][RTW89_IC][25] = 127,
+ [0][0][1][0][RTW89_KCC][25] = 70,
[0][0][1][0][RTW89_ACMA][25] = 127,
- [0][0][1][0][RTW89_FCC][27] = 70,
+ [0][0][1][0][RTW89_CN][25] = 127,
+ [0][0][1][0][RTW89_UK][25] = 66,
+ [0][0][1][0][RTW89_FCC][27] = 72,
[0][0][1][0][RTW89_ETSI][27] = 66,
[0][0][1][0][RTW89_MKK][27] = 70,
[0][0][1][0][RTW89_IC][27] = 127,
+ [0][0][1][0][RTW89_KCC][27] = 70,
[0][0][1][0][RTW89_ACMA][27] = 127,
- [0][0][1][0][RTW89_FCC][29] = 70,
+ [0][0][1][0][RTW89_CN][27] = 127,
+ [0][0][1][0][RTW89_UK][27] = 66,
+ [0][0][1][0][RTW89_FCC][29] = 72,
[0][0][1][0][RTW89_ETSI][29] = 66,
[0][0][1][0][RTW89_MKK][29] = 70,
[0][0][1][0][RTW89_IC][29] = 127,
+ [0][0][1][0][RTW89_KCC][29] = 70,
[0][0][1][0][RTW89_ACMA][29] = 127,
- [0][0][1][0][RTW89_FCC][31] = 70,
+ [0][0][1][0][RTW89_CN][29] = 127,
+ [0][0][1][0][RTW89_UK][29] = 66,
+ [0][0][1][0][RTW89_FCC][31] = 72,
[0][0][1][0][RTW89_ETSI][31] = 66,
[0][0][1][0][RTW89_MKK][31] = 70,
- [0][0][1][0][RTW89_IC][31] = 70,
- [0][0][1][0][RTW89_ACMA][31] = 60,
- [0][0][1][0][RTW89_FCC][33] = 70,
+ [0][0][1][0][RTW89_IC][31] = 72,
+ [0][0][1][0][RTW89_KCC][31] = 70,
+ [0][0][1][0][RTW89_ACMA][31] = 66,
+ [0][0][1][0][RTW89_CN][31] = 127,
+ [0][0][1][0][RTW89_UK][31] = 66,
+ [0][0][1][0][RTW89_FCC][33] = 72,
[0][0][1][0][RTW89_ETSI][33] = 66,
[0][0][1][0][RTW89_MKK][33] = 70,
- [0][0][1][0][RTW89_IC][33] = 70,
- [0][0][1][0][RTW89_ACMA][33] = 60,
- [0][0][1][0][RTW89_FCC][35] = 62,
+ [0][0][1][0][RTW89_IC][33] = 72,
+ [0][0][1][0][RTW89_KCC][33] = 70,
+ [0][0][1][0][RTW89_ACMA][33] = 66,
+ [0][0][1][0][RTW89_CN][33] = 127,
+ [0][0][1][0][RTW89_UK][33] = 66,
+ [0][0][1][0][RTW89_FCC][35] = 60,
[0][0][1][0][RTW89_ETSI][35] = 66,
[0][0][1][0][RTW89_MKK][35] = 70,
- [0][0][1][0][RTW89_IC][35] = 70,
- [0][0][1][0][RTW89_ACMA][35] = 60,
- [0][0][1][0][RTW89_FCC][37] = 70,
+ [0][0][1][0][RTW89_IC][35] = 60,
+ [0][0][1][0][RTW89_KCC][35] = 70,
+ [0][0][1][0][RTW89_ACMA][35] = 66,
+ [0][0][1][0][RTW89_CN][35] = 127,
+ [0][0][1][0][RTW89_UK][35] = 66,
+ [0][0][1][0][RTW89_FCC][37] = 72,
[0][0][1][0][RTW89_ETSI][37] = 127,
[0][0][1][0][RTW89_MKK][37] = 70,
- [0][0][1][0][RTW89_IC][37] = 70,
+ [0][0][1][0][RTW89_IC][37] = 72,
+ [0][0][1][0][RTW89_KCC][37] = 70,
[0][0][1][0][RTW89_ACMA][37] = 70,
- [0][0][1][0][RTW89_FCC][38] = 70,
+ [0][0][1][0][RTW89_CN][37] = 127,
+ [0][0][1][0][RTW89_UK][37] = 64,
+ [0][0][1][0][RTW89_FCC][38] = 72,
[0][0][1][0][RTW89_ETSI][38] = 30,
[0][0][1][0][RTW89_MKK][38] = 127,
- [0][0][1][0][RTW89_IC][38] = 70,
+ [0][0][1][0][RTW89_IC][38] = 72,
+ [0][0][1][0][RTW89_KCC][38] = 62,
[0][0][1][0][RTW89_ACMA][38] = 70,
- [0][0][1][0][RTW89_FCC][40] = 70,
+ [0][0][1][0][RTW89_CN][38] = 68,
+ [0][0][1][0][RTW89_UK][38] = 64,
+ [0][0][1][0][RTW89_FCC][40] = 72,
[0][0][1][0][RTW89_ETSI][40] = 30,
[0][0][1][0][RTW89_MKK][40] = 127,
- [0][0][1][0][RTW89_IC][40] = 70,
+ [0][0][1][0][RTW89_IC][40] = 72,
+ [0][0][1][0][RTW89_KCC][40] = 62,
[0][0][1][0][RTW89_ACMA][40] = 70,
- [0][0][1][0][RTW89_FCC][42] = 70,
+ [0][0][1][0][RTW89_CN][40] = 68,
+ [0][0][1][0][RTW89_UK][40] = 64,
+ [0][0][1][0][RTW89_FCC][42] = 72,
[0][0][1][0][RTW89_ETSI][42] = 30,
[0][0][1][0][RTW89_MKK][42] = 127,
- [0][0][1][0][RTW89_IC][42] = 70,
+ [0][0][1][0][RTW89_IC][42] = 72,
+ [0][0][1][0][RTW89_KCC][42] = 62,
[0][0][1][0][RTW89_ACMA][42] = 70,
- [0][0][1][0][RTW89_FCC][44] = 70,
+ [0][0][1][0][RTW89_CN][42] = 68,
+ [0][0][1][0][RTW89_UK][42] = 64,
+ [0][0][1][0][RTW89_FCC][44] = 72,
[0][0][1][0][RTW89_ETSI][44] = 30,
[0][0][1][0][RTW89_MKK][44] = 127,
- [0][0][1][0][RTW89_IC][44] = 70,
+ [0][0][1][0][RTW89_IC][44] = 72,
+ [0][0][1][0][RTW89_KCC][44] = 62,
[0][0][1][0][RTW89_ACMA][44] = 70,
- [0][0][1][0][RTW89_FCC][46] = 70,
+ [0][0][1][0][RTW89_CN][44] = 68,
+ [0][0][1][0][RTW89_UK][44] = 64,
+ [0][0][1][0][RTW89_FCC][46] = 72,
[0][0][1][0][RTW89_ETSI][46] = 30,
[0][0][1][0][RTW89_MKK][46] = 127,
- [0][0][1][0][RTW89_IC][46] = 70,
+ [0][0][1][0][RTW89_IC][46] = 72,
+ [0][0][1][0][RTW89_KCC][46] = 62,
[0][0][1][0][RTW89_ACMA][46] = 70,
- [0][0][1][0][RTW89_FCC][48] = 70,
+ [0][0][1][0][RTW89_CN][46] = 68,
+ [0][0][1][0][RTW89_UK][46] = 64,
+ [0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
[0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
- [0][0][1][0][RTW89_FCC][50] = 70,
+ [0][0][1][0][RTW89_CN][48] = 127,
+ [0][0][1][0][RTW89_UK][48] = 127,
+ [0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
[0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
- [0][0][1][0][RTW89_FCC][52] = 70,
+ [0][0][1][0][RTW89_CN][50] = 127,
+ [0][0][1][0][RTW89_UK][50] = 127,
+ [0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
[0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
+ [0][0][1][0][RTW89_CN][52] = 127,
+ [0][0][1][0][RTW89_UK][52] = 127,
[0][1][1][0][RTW89_FCC][0] = 60,
[0][1][1][0][RTW89_ETSI][0] = 54,
[0][1][1][0][RTW89_MKK][0] = 54,
- [0][1][1][0][RTW89_IC][0] = 42,
- [0][1][1][0][RTW89_ACMA][0] = 48,
+ [0][1][1][0][RTW89_IC][0] = 34,
+ [0][1][1][0][RTW89_KCC][0] = 40,
+ [0][1][1][0][RTW89_ACMA][0] = 54,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 54,
[0][1][1][0][RTW89_FCC][2] = 60,
[0][1][1][0][RTW89_ETSI][2] = 54,
[0][1][1][0][RTW89_MKK][2] = 54,
- [0][1][1][0][RTW89_IC][2] = 42,
- [0][1][1][0][RTW89_ACMA][2] = 48,
+ [0][1][1][0][RTW89_IC][2] = 34,
+ [0][1][1][0][RTW89_KCC][2] = 40,
+ [0][1][1][0][RTW89_ACMA][2] = 54,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 54,
[0][1][1][0][RTW89_FCC][4] = 60,
[0][1][1][0][RTW89_ETSI][4] = 54,
[0][1][1][0][RTW89_MKK][4] = 54,
- [0][1][1][0][RTW89_IC][4] = 42,
- [0][1][1][0][RTW89_ACMA][4] = 48,
+ [0][1][1][0][RTW89_IC][4] = 34,
+ [0][1][1][0][RTW89_KCC][4] = 40,
+ [0][1][1][0][RTW89_ACMA][4] = 54,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 54,
[0][1][1][0][RTW89_FCC][6] = 60,
[0][1][1][0][RTW89_ETSI][6] = 54,
[0][1][1][0][RTW89_MKK][6] = 54,
- [0][1][1][0][RTW89_IC][6] = 42,
- [0][1][1][0][RTW89_ACMA][6] = 48,
- [0][1][1][0][RTW89_FCC][8] = 60,
+ [0][1][1][0][RTW89_IC][6] = 36,
+ [0][1][1][0][RTW89_KCC][6] = 60,
+ [0][1][1][0][RTW89_ACMA][6] = 54,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 54,
+ [0][1][1][0][RTW89_FCC][8] = 62,
[0][1][1][0][RTW89_ETSI][8] = 54,
[0][1][1][0][RTW89_MKK][8] = 52,
- [0][1][1][0][RTW89_IC][8] = 54,
- [0][1][1][0][RTW89_ACMA][8] = 48,
- [0][1][1][0][RTW89_FCC][10] = 60,
+ [0][1][1][0][RTW89_IC][8] = 52,
+ [0][1][1][0][RTW89_KCC][8] = 60,
+ [0][1][1][0][RTW89_ACMA][8] = 54,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 54,
+ [0][1][1][0][RTW89_FCC][10] = 62,
[0][1][1][0][RTW89_ETSI][10] = 54,
[0][1][1][0][RTW89_MKK][10] = 54,
- [0][1][1][0][RTW89_IC][10] = 54,
- [0][1][1][0][RTW89_ACMA][10] = 48,
- [0][1][1][0][RTW89_FCC][12] = 60,
+ [0][1][1][0][RTW89_IC][10] = 52,
+ [0][1][1][0][RTW89_KCC][10] = 60,
+ [0][1][1][0][RTW89_ACMA][10] = 54,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 54,
+ [0][1][1][0][RTW89_FCC][12] = 62,
[0][1][1][0][RTW89_ETSI][12] = 54,
[0][1][1][0][RTW89_MKK][12] = 54,
- [0][1][1][0][RTW89_IC][12] = 54,
- [0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_IC][12] = 52,
+ [0][1][1][0][RTW89_KCC][12] = 60,
+ [0][1][1][0][RTW89_ACMA][12] = 54,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 54,
[0][1][1][0][RTW89_FCC][14] = 60,
[0][1][1][0][RTW89_ETSI][14] = 54,
[0][1][1][0][RTW89_MKK][14] = 54,
- [0][1][1][0][RTW89_IC][14] = 54,
- [0][1][1][0][RTW89_ACMA][14] = 48,
- [0][1][1][0][RTW89_FCC][15] = 58,
+ [0][1][1][0][RTW89_IC][14] = 52,
+ [0][1][1][0][RTW89_KCC][14] = 60,
+ [0][1][1][0][RTW89_ACMA][14] = 54,
+ [0][1][1][0][RTW89_CN][14] = 46,
+ [0][1][1][0][RTW89_UK][14] = 54,
+ [0][1][1][0][RTW89_FCC][15] = 60,
[0][1][1][0][RTW89_ETSI][15] = 54,
[0][1][1][0][RTW89_MKK][15] = 70,
- [0][1][1][0][RTW89_IC][15] = 68,
- [0][1][1][0][RTW89_ACMA][15] = 48,
+ [0][1][1][0][RTW89_IC][15] = 60,
+ [0][1][1][0][RTW89_KCC][15] = 60,
+ [0][1][1][0][RTW89_ACMA][15] = 54,
+ [0][1][1][0][RTW89_CN][15] = 127,
+ [0][1][1][0][RTW89_UK][15] = 54,
[0][1][1][0][RTW89_FCC][17] = 60,
[0][1][1][0][RTW89_ETSI][17] = 54,
[0][1][1][0][RTW89_MKK][17] = 70,
- [0][1][1][0][RTW89_IC][17] = 70,
- [0][1][1][0][RTW89_ACMA][17] = 48,
+ [0][1][1][0][RTW89_IC][17] = 60,
+ [0][1][1][0][RTW89_KCC][17] = 60,
+ [0][1][1][0][RTW89_ACMA][17] = 54,
+ [0][1][1][0][RTW89_CN][17] = 127,
+ [0][1][1][0][RTW89_UK][17] = 54,
[0][1][1][0][RTW89_FCC][19] = 60,
[0][1][1][0][RTW89_ETSI][19] = 54,
[0][1][1][0][RTW89_MKK][19] = 70,
- [0][1][1][0][RTW89_IC][19] = 70,
- [0][1][1][0][RTW89_ACMA][19] = 48,
+ [0][1][1][0][RTW89_IC][19] = 60,
+ [0][1][1][0][RTW89_KCC][19] = 60,
+ [0][1][1][0][RTW89_ACMA][19] = 54,
+ [0][1][1][0][RTW89_CN][19] = 127,
+ [0][1][1][0][RTW89_UK][19] = 54,
[0][1][1][0][RTW89_FCC][21] = 60,
[0][1][1][0][RTW89_ETSI][21] = 54,
[0][1][1][0][RTW89_MKK][21] = 70,
- [0][1][1][0][RTW89_IC][21] = 70,
- [0][1][1][0][RTW89_ACMA][21] = 48,
+ [0][1][1][0][RTW89_IC][21] = 60,
+ [0][1][1][0][RTW89_KCC][21] = 60,
+ [0][1][1][0][RTW89_ACMA][21] = 54,
+ [0][1][1][0][RTW89_CN][21] = 127,
+ [0][1][1][0][RTW89_UK][21] = 54,
[0][1][1][0][RTW89_FCC][23] = 60,
[0][1][1][0][RTW89_ETSI][23] = 54,
[0][1][1][0][RTW89_MKK][23] = 70,
- [0][1][1][0][RTW89_IC][23] = 70,
- [0][1][1][0][RTW89_ACMA][23] = 48,
+ [0][1][1][0][RTW89_IC][23] = 60,
+ [0][1][1][0][RTW89_KCC][23] = 60,
+ [0][1][1][0][RTW89_ACMA][23] = 54,
+ [0][1][1][0][RTW89_CN][23] = 127,
+ [0][1][1][0][RTW89_UK][23] = 54,
[0][1][1][0][RTW89_FCC][25] = 60,
[0][1][1][0][RTW89_ETSI][25] = 54,
[0][1][1][0][RTW89_MKK][25] = 70,
[0][1][1][0][RTW89_IC][25] = 127,
+ [0][1][1][0][RTW89_KCC][25] = 60,
[0][1][1][0][RTW89_ACMA][25] = 127,
+ [0][1][1][0][RTW89_CN][25] = 127,
+ [0][1][1][0][RTW89_UK][25] = 54,
[0][1][1][0][RTW89_FCC][27] = 60,
[0][1][1][0][RTW89_ETSI][27] = 54,
[0][1][1][0][RTW89_MKK][27] = 70,
[0][1][1][0][RTW89_IC][27] = 127,
+ [0][1][1][0][RTW89_KCC][27] = 60,
[0][1][1][0][RTW89_ACMA][27] = 127,
+ [0][1][1][0][RTW89_CN][27] = 127,
+ [0][1][1][0][RTW89_UK][27] = 54,
[0][1][1][0][RTW89_FCC][29] = 60,
[0][1][1][0][RTW89_ETSI][29] = 54,
[0][1][1][0][RTW89_MKK][29] = 70,
[0][1][1][0][RTW89_IC][29] = 127,
+ [0][1][1][0][RTW89_KCC][29] = 60,
[0][1][1][0][RTW89_ACMA][29] = 127,
+ [0][1][1][0][RTW89_CN][29] = 127,
+ [0][1][1][0][RTW89_UK][29] = 54,
[0][1][1][0][RTW89_FCC][31] = 60,
[0][1][1][0][RTW89_ETSI][31] = 54,
[0][1][1][0][RTW89_MKK][31] = 70,
- [0][1][1][0][RTW89_IC][31] = 70,
- [0][1][1][0][RTW89_ACMA][31] = 48,
+ [0][1][1][0][RTW89_IC][31] = 60,
+ [0][1][1][0][RTW89_KCC][31] = 58,
+ [0][1][1][0][RTW89_ACMA][31] = 54,
+ [0][1][1][0][RTW89_CN][31] = 127,
+ [0][1][1][0][RTW89_UK][31] = 54,
[0][1][1][0][RTW89_FCC][33] = 60,
[0][1][1][0][RTW89_ETSI][33] = 54,
[0][1][1][0][RTW89_MKK][33] = 70,
- [0][1][1][0][RTW89_IC][33] = 70,
- [0][1][1][0][RTW89_ACMA][33] = 48,
- [0][1][1][0][RTW89_FCC][35] = 58,
+ [0][1][1][0][RTW89_IC][33] = 60,
+ [0][1][1][0][RTW89_KCC][33] = 58,
+ [0][1][1][0][RTW89_ACMA][33] = 54,
+ [0][1][1][0][RTW89_CN][33] = 127,
+ [0][1][1][0][RTW89_UK][33] = 54,
+ [0][1][1][0][RTW89_FCC][35] = 52,
[0][1][1][0][RTW89_ETSI][35] = 54,
[0][1][1][0][RTW89_MKK][35] = 70,
- [0][1][1][0][RTW89_IC][35] = 68,
- [0][1][1][0][RTW89_ACMA][35] = 48,
- [0][1][1][0][RTW89_FCC][37] = 60,
+ [0][1][1][0][RTW89_IC][35] = 52,
+ [0][1][1][0][RTW89_KCC][35] = 58,
+ [0][1][1][0][RTW89_ACMA][35] = 54,
+ [0][1][1][0][RTW89_CN][35] = 127,
+ [0][1][1][0][RTW89_UK][35] = 54,
+ [0][1][1][0][RTW89_FCC][37] = 62,
[0][1][1][0][RTW89_ETSI][37] = 127,
[0][1][1][0][RTW89_MKK][37] = 70,
- [0][1][1][0][RTW89_IC][37] = 70,
- [0][1][1][0][RTW89_ACMA][37] = 70,
- [0][1][1][0][RTW89_FCC][38] = 70,
+ [0][1][1][0][RTW89_IC][37] = 62,
+ [0][1][1][0][RTW89_KCC][37] = 58,
+ [0][1][1][0][RTW89_ACMA][37] = 64,
+ [0][1][1][0][RTW89_CN][37] = 127,
+ [0][1][1][0][RTW89_UK][37] = 52,
+ [0][1][1][0][RTW89_FCC][38] = 72,
[0][1][1][0][RTW89_ETSI][38] = 18,
[0][1][1][0][RTW89_MKK][38] = 127,
- [0][1][1][0][RTW89_IC][38] = 70,
+ [0][1][1][0][RTW89_IC][38] = 72,
+ [0][1][1][0][RTW89_KCC][38] = 60,
[0][1][1][0][RTW89_ACMA][38] = 70,
- [0][1][1][0][RTW89_FCC][40] = 70,
+ [0][1][1][0][RTW89_CN][38] = 64,
+ [0][1][1][0][RTW89_UK][38] = 52,
+ [0][1][1][0][RTW89_FCC][40] = 72,
[0][1][1][0][RTW89_ETSI][40] = 18,
[0][1][1][0][RTW89_MKK][40] = 127,
- [0][1][1][0][RTW89_IC][40] = 70,
- [0][1][1][0][RTW89_ACMA][40] = 16,
- [0][1][1][0][RTW89_FCC][42] = 70,
+ [0][1][1][0][RTW89_IC][40] = 72,
+ [0][1][1][0][RTW89_KCC][40] = 60,
+ [0][1][1][0][RTW89_ACMA][40] = 70,
+ [0][1][1][0][RTW89_CN][40] = 64,
+ [0][1][1][0][RTW89_UK][40] = 52,
+ [0][1][1][0][RTW89_FCC][42] = 72,
[0][1][1][0][RTW89_ETSI][42] = 18,
[0][1][1][0][RTW89_MKK][42] = 127,
- [0][1][1][0][RTW89_IC][42] = 70,
+ [0][1][1][0][RTW89_IC][42] = 72,
+ [0][1][1][0][RTW89_KCC][42] = 60,
[0][1][1][0][RTW89_ACMA][42] = 70,
- [0][1][1][0][RTW89_FCC][44] = 70,
+ [0][1][1][0][RTW89_CN][42] = 64,
+ [0][1][1][0][RTW89_UK][42] = 52,
+ [0][1][1][0][RTW89_FCC][44] = 72,
[0][1][1][0][RTW89_ETSI][44] = 18,
[0][1][1][0][RTW89_MKK][44] = 127,
- [0][1][1][0][RTW89_IC][44] = 70,
- [0][1][1][0][RTW89_ACMA][44] = 16,
- [0][1][1][0][RTW89_FCC][46] = 70,
+ [0][1][1][0][RTW89_IC][44] = 72,
+ [0][1][1][0][RTW89_KCC][44] = 60,
+ [0][1][1][0][RTW89_ACMA][44] = 70,
+ [0][1][1][0][RTW89_CN][44] = 60,
+ [0][1][1][0][RTW89_UK][44] = 52,
+ [0][1][1][0][RTW89_FCC][46] = 72,
[0][1][1][0][RTW89_ETSI][46] = 18,
[0][1][1][0][RTW89_MKK][46] = 127,
- [0][1][1][0][RTW89_IC][46] = 70,
+ [0][1][1][0][RTW89_IC][46] = 72,
+ [0][1][1][0][RTW89_KCC][46] = 60,
[0][1][1][0][RTW89_ACMA][46] = 70,
+ [0][1][1][0][RTW89_CN][46] = 60,
+ [0][1][1][0][RTW89_UK][46] = 52,
[0][1][1][0][RTW89_FCC][48] = 48,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
[0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
+ [0][1][1][0][RTW89_CN][48] = 127,
+ [0][1][1][0][RTW89_UK][48] = 127,
[0][1][1][0][RTW89_FCC][50] = 48,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
[0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
+ [0][1][1][0][RTW89_CN][50] = 127,
+ [0][1][1][0][RTW89_UK][50] = 127,
[0][1][1][0][RTW89_FCC][52] = 48,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
[0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
+ [0][1][1][0][RTW89_CN][52] = 127,
+ [0][1][1][0][RTW89_UK][52] = 127,
[0][0][2][0][RTW89_FCC][0] = 70,
[0][0][2][0][RTW89_ETSI][0] = 66,
[0][0][2][0][RTW89_MKK][0] = 68,
- [0][0][2][0][RTW89_IC][0] = 66,
- [0][0][2][0][RTW89_ACMA][0] = 62,
- [0][0][2][0][RTW89_FCC][2] = 70,
+ [0][0][2][0][RTW89_IC][0] = 60,
+ [0][0][2][0][RTW89_KCC][0] = 54,
+ [0][0][2][0][RTW89_ACMA][0] = 66,
+ [0][0][2][0][RTW89_CN][0] = 52,
+ [0][0][2][0][RTW89_UK][0] = 66,
+ [0][0][2][0][RTW89_FCC][2] = 72,
[0][0][2][0][RTW89_ETSI][2] = 66,
[0][0][2][0][RTW89_MKK][2] = 68,
- [0][0][2][0][RTW89_IC][2] = 66,
- [0][0][2][0][RTW89_ACMA][2] = 62,
- [0][0][2][0][RTW89_FCC][4] = 70,
+ [0][0][2][0][RTW89_IC][2] = 60,
+ [0][0][2][0][RTW89_KCC][2] = 54,
+ [0][0][2][0][RTW89_ACMA][2] = 66,
+ [0][0][2][0][RTW89_CN][2] = 52,
+ [0][0][2][0][RTW89_UK][2] = 66,
+ [0][0][2][0][RTW89_FCC][4] = 72,
[0][0][2][0][RTW89_ETSI][4] = 66,
[0][0][2][0][RTW89_MKK][4] = 68,
- [0][0][2][0][RTW89_IC][4] = 66,
- [0][0][2][0][RTW89_ACMA][4] = 62,
- [0][0][2][0][RTW89_FCC][6] = 70,
+ [0][0][2][0][RTW89_IC][4] = 60,
+ [0][0][2][0][RTW89_KCC][4] = 54,
+ [0][0][2][0][RTW89_ACMA][4] = 66,
+ [0][0][2][0][RTW89_CN][4] = 52,
+ [0][0][2][0][RTW89_UK][4] = 66,
+ [0][0][2][0][RTW89_FCC][6] = 72,
[0][0][2][0][RTW89_ETSI][6] = 66,
[0][0][2][0][RTW89_MKK][6] = 60,
- [0][0][2][0][RTW89_IC][6] = 66,
- [0][0][2][0][RTW89_ACMA][6] = 62,
- [0][0][2][0][RTW89_FCC][8] = 70,
+ [0][0][2][0][RTW89_IC][6] = 60,
+ [0][0][2][0][RTW89_KCC][6] = 68,
+ [0][0][2][0][RTW89_ACMA][6] = 66,
+ [0][0][2][0][RTW89_CN][6] = 52,
+ [0][0][2][0][RTW89_UK][6] = 66,
+ [0][0][2][0][RTW89_FCC][8] = 72,
[0][0][2][0][RTW89_ETSI][8] = 66,
[0][0][2][0][RTW89_MKK][8] = 58,
- [0][0][2][0][RTW89_IC][8] = 66,
- [0][0][2][0][RTW89_ACMA][8] = 62,
- [0][0][2][0][RTW89_FCC][10] = 70,
+ [0][0][2][0][RTW89_IC][8] = 64,
+ [0][0][2][0][RTW89_KCC][8] = 70,
+ [0][0][2][0][RTW89_ACMA][8] = 66,
+ [0][0][2][0][RTW89_CN][8] = 52,
+ [0][0][2][0][RTW89_UK][8] = 66,
+ [0][0][2][0][RTW89_FCC][10] = 72,
[0][0][2][0][RTW89_ETSI][10] = 66,
[0][0][2][0][RTW89_MKK][10] = 70,
- [0][0][2][0][RTW89_IC][10] = 66,
- [0][0][2][0][RTW89_ACMA][10] = 62,
- [0][0][2][0][RTW89_FCC][12] = 70,
+ [0][0][2][0][RTW89_IC][10] = 64,
+ [0][0][2][0][RTW89_KCC][10] = 70,
+ [0][0][2][0][RTW89_ACMA][10] = 66,
+ [0][0][2][0][RTW89_CN][10] = 52,
+ [0][0][2][0][RTW89_UK][10] = 66,
+ [0][0][2][0][RTW89_FCC][12] = 72,
[0][0][2][0][RTW89_ETSI][12] = 66,
[0][0][2][0][RTW89_MKK][12] = 70,
- [0][0][2][0][RTW89_IC][12] = 66,
- [0][0][2][0][RTW89_ACMA][12] = 62,
- [0][0][2][0][RTW89_FCC][14] = 70,
+ [0][0][2][0][RTW89_IC][12] = 64,
+ [0][0][2][0][RTW89_KCC][12] = 66,
+ [0][0][2][0][RTW89_ACMA][12] = 66,
+ [0][0][2][0][RTW89_CN][12] = 52,
+ [0][0][2][0][RTW89_UK][12] = 66,
+ [0][0][2][0][RTW89_FCC][14] = 68,
[0][0][2][0][RTW89_ETSI][14] = 66,
[0][0][2][0][RTW89_MKK][14] = 70,
- [0][0][2][0][RTW89_IC][14] = 66,
- [0][0][2][0][RTW89_ACMA][14] = 62,
- [0][0][2][0][RTW89_FCC][15] = 66,
+ [0][0][2][0][RTW89_IC][14] = 64,
+ [0][0][2][0][RTW89_KCC][14] = 66,
+ [0][0][2][0][RTW89_ACMA][14] = 66,
+ [0][0][2][0][RTW89_CN][14] = 52,
+ [0][0][2][0][RTW89_UK][14] = 66,
+ [0][0][2][0][RTW89_FCC][15] = 70,
[0][0][2][0][RTW89_ETSI][15] = 66,
[0][0][2][0][RTW89_MKK][15] = 70,
[0][0][2][0][RTW89_IC][15] = 70,
- [0][0][2][0][RTW89_ACMA][15] = 62,
- [0][0][2][0][RTW89_FCC][17] = 70,
+ [0][0][2][0][RTW89_KCC][15] = 70,
+ [0][0][2][0][RTW89_ACMA][15] = 66,
+ [0][0][2][0][RTW89_CN][15] = 127,
+ [0][0][2][0][RTW89_UK][15] = 66,
+ [0][0][2][0][RTW89_FCC][17] = 72,
[0][0][2][0][RTW89_ETSI][17] = 66,
[0][0][2][0][RTW89_MKK][17] = 70,
- [0][0][2][0][RTW89_IC][17] = 70,
- [0][0][2][0][RTW89_ACMA][17] = 62,
- [0][0][2][0][RTW89_FCC][19] = 70,
+ [0][0][2][0][RTW89_IC][17] = 72,
+ [0][0][2][0][RTW89_KCC][17] = 70,
+ [0][0][2][0][RTW89_ACMA][17] = 66,
+ [0][0][2][0][RTW89_CN][17] = 127,
+ [0][0][2][0][RTW89_UK][17] = 66,
+ [0][0][2][0][RTW89_FCC][19] = 72,
[0][0][2][0][RTW89_ETSI][19] = 66,
[0][0][2][0][RTW89_MKK][19] = 70,
- [0][0][2][0][RTW89_IC][19] = 70,
- [0][0][2][0][RTW89_ACMA][19] = 62,
- [0][0][2][0][RTW89_FCC][21] = 70,
+ [0][0][2][0][RTW89_IC][19] = 72,
+ [0][0][2][0][RTW89_KCC][19] = 70,
+ [0][0][2][0][RTW89_ACMA][19] = 66,
+ [0][0][2][0][RTW89_CN][19] = 127,
+ [0][0][2][0][RTW89_UK][19] = 66,
+ [0][0][2][0][RTW89_FCC][21] = 72,
[0][0][2][0][RTW89_ETSI][21] = 66,
[0][0][2][0][RTW89_MKK][21] = 70,
- [0][0][2][0][RTW89_IC][21] = 70,
- [0][0][2][0][RTW89_ACMA][21] = 62,
- [0][0][2][0][RTW89_FCC][23] = 70,
+ [0][0][2][0][RTW89_IC][21] = 72,
+ [0][0][2][0][RTW89_KCC][21] = 70,
+ [0][0][2][0][RTW89_ACMA][21] = 66,
+ [0][0][2][0][RTW89_CN][21] = 127,
+ [0][0][2][0][RTW89_UK][21] = 66,
+ [0][0][2][0][RTW89_FCC][23] = 72,
[0][0][2][0][RTW89_ETSI][23] = 66,
[0][0][2][0][RTW89_MKK][23] = 70,
- [0][0][2][0][RTW89_IC][23] = 70,
- [0][0][2][0][RTW89_ACMA][23] = 62,
- [0][0][2][0][RTW89_FCC][25] = 70,
+ [0][0][2][0][RTW89_IC][23] = 72,
+ [0][0][2][0][RTW89_KCC][23] = 70,
+ [0][0][2][0][RTW89_ACMA][23] = 66,
+ [0][0][2][0][RTW89_CN][23] = 127,
+ [0][0][2][0][RTW89_UK][23] = 66,
+ [0][0][2][0][RTW89_FCC][25] = 72,
[0][0][2][0][RTW89_ETSI][25] = 66,
[0][0][2][0][RTW89_MKK][25] = 70,
[0][0][2][0][RTW89_IC][25] = 127,
+ [0][0][2][0][RTW89_KCC][25] = 70,
[0][0][2][0][RTW89_ACMA][25] = 127,
- [0][0][2][0][RTW89_FCC][27] = 70,
+ [0][0][2][0][RTW89_CN][25] = 127,
+ [0][0][2][0][RTW89_UK][25] = 66,
+ [0][0][2][0][RTW89_FCC][27] = 72,
[0][0][2][0][RTW89_ETSI][27] = 66,
[0][0][2][0][RTW89_MKK][27] = 70,
[0][0][2][0][RTW89_IC][27] = 127,
+ [0][0][2][0][RTW89_KCC][27] = 70,
[0][0][2][0][RTW89_ACMA][27] = 127,
- [0][0][2][0][RTW89_FCC][29] = 70,
+ [0][0][2][0][RTW89_CN][27] = 127,
+ [0][0][2][0][RTW89_UK][27] = 66,
+ [0][0][2][0][RTW89_FCC][29] = 72,
[0][0][2][0][RTW89_ETSI][29] = 66,
[0][0][2][0][RTW89_MKK][29] = 70,
[0][0][2][0][RTW89_IC][29] = 127,
+ [0][0][2][0][RTW89_KCC][29] = 70,
[0][0][2][0][RTW89_ACMA][29] = 127,
- [0][0][2][0][RTW89_FCC][31] = 70,
+ [0][0][2][0][RTW89_CN][29] = 127,
+ [0][0][2][0][RTW89_UK][29] = 66,
+ [0][0][2][0][RTW89_FCC][31] = 72,
[0][0][2][0][RTW89_ETSI][31] = 66,
[0][0][2][0][RTW89_MKK][31] = 70,
- [0][0][2][0][RTW89_IC][31] = 70,
- [0][0][2][0][RTW89_ACMA][31] = 62,
- [0][0][2][0][RTW89_FCC][33] = 70,
+ [0][0][2][0][RTW89_IC][31] = 72,
+ [0][0][2][0][RTW89_KCC][31] = 70,
+ [0][0][2][0][RTW89_ACMA][31] = 66,
+ [0][0][2][0][RTW89_CN][31] = 127,
+ [0][0][2][0][RTW89_UK][31] = 66,
+ [0][0][2][0][RTW89_FCC][33] = 72,
[0][0][2][0][RTW89_ETSI][33] = 66,
[0][0][2][0][RTW89_MKK][33] = 70,
- [0][0][2][0][RTW89_IC][33] = 70,
- [0][0][2][0][RTW89_ACMA][33] = 62,
- [0][0][2][0][RTW89_FCC][35] = 62,
+ [0][0][2][0][RTW89_IC][33] = 72,
+ [0][0][2][0][RTW89_KCC][33] = 70,
+ [0][0][2][0][RTW89_ACMA][33] = 66,
+ [0][0][2][0][RTW89_CN][33] = 127,
+ [0][0][2][0][RTW89_UK][33] = 66,
+ [0][0][2][0][RTW89_FCC][35] = 56,
[0][0][2][0][RTW89_ETSI][35] = 66,
[0][0][2][0][RTW89_MKK][35] = 70,
- [0][0][2][0][RTW89_IC][35] = 70,
- [0][0][2][0][RTW89_ACMA][35] = 62,
- [0][0][2][0][RTW89_FCC][37] = 70,
+ [0][0][2][0][RTW89_IC][35] = 56,
+ [0][0][2][0][RTW89_KCC][35] = 70,
+ [0][0][2][0][RTW89_ACMA][35] = 66,
+ [0][0][2][0][RTW89_CN][35] = 127,
+ [0][0][2][0][RTW89_UK][35] = 66,
+ [0][0][2][0][RTW89_FCC][37] = 72,
[0][0][2][0][RTW89_ETSI][37] = 127,
[0][0][2][0][RTW89_MKK][37] = 70,
- [0][0][2][0][RTW89_IC][37] = 70,
+ [0][0][2][0][RTW89_IC][37] = 72,
+ [0][0][2][0][RTW89_KCC][37] = 70,
[0][0][2][0][RTW89_ACMA][37] = 70,
- [0][0][2][0][RTW89_FCC][38] = 70,
+ [0][0][2][0][RTW89_CN][37] = 127,
+ [0][0][2][0][RTW89_UK][37] = 64,
+ [0][0][2][0][RTW89_FCC][38] = 72,
[0][0][2][0][RTW89_ETSI][38] = 30,
[0][0][2][0][RTW89_MKK][38] = 127,
- [0][0][2][0][RTW89_IC][38] = 70,
+ [0][0][2][0][RTW89_IC][38] = 72,
+ [0][0][2][0][RTW89_KCC][38] = 58,
[0][0][2][0][RTW89_ACMA][38] = 70,
- [0][0][2][0][RTW89_FCC][40] = 70,
+ [0][0][2][0][RTW89_CN][38] = 68,
+ [0][0][2][0][RTW89_UK][38] = 64,
+ [0][0][2][0][RTW89_FCC][40] = 72,
[0][0][2][0][RTW89_ETSI][40] = 30,
[0][0][2][0][RTW89_MKK][40] = 127,
- [0][0][2][0][RTW89_IC][40] = 70,
+ [0][0][2][0][RTW89_IC][40] = 72,
+ [0][0][2][0][RTW89_KCC][40] = 58,
[0][0][2][0][RTW89_ACMA][40] = 70,
- [0][0][2][0][RTW89_FCC][42] = 70,
+ [0][0][2][0][RTW89_CN][40] = 68,
+ [0][0][2][0][RTW89_UK][40] = 64,
+ [0][0][2][0][RTW89_FCC][42] = 72,
[0][0][2][0][RTW89_ETSI][42] = 30,
[0][0][2][0][RTW89_MKK][42] = 127,
- [0][0][2][0][RTW89_IC][42] = 70,
+ [0][0][2][0][RTW89_IC][42] = 72,
+ [0][0][2][0][RTW89_KCC][42] = 58,
[0][0][2][0][RTW89_ACMA][42] = 70,
- [0][0][2][0][RTW89_FCC][44] = 70,
+ [0][0][2][0][RTW89_CN][42] = 68,
+ [0][0][2][0][RTW89_UK][42] = 64,
+ [0][0][2][0][RTW89_FCC][44] = 72,
[0][0][2][0][RTW89_ETSI][44] = 30,
[0][0][2][0][RTW89_MKK][44] = 127,
- [0][0][2][0][RTW89_IC][44] = 70,
+ [0][0][2][0][RTW89_IC][44] = 72,
+ [0][0][2][0][RTW89_KCC][44] = 58,
[0][0][2][0][RTW89_ACMA][44] = 70,
- [0][0][2][0][RTW89_FCC][46] = 70,
+ [0][0][2][0][RTW89_CN][44] = 68,
+ [0][0][2][0][RTW89_UK][44] = 64,
+ [0][0][2][0][RTW89_FCC][46] = 72,
[0][0][2][0][RTW89_ETSI][46] = 30,
[0][0][2][0][RTW89_MKK][46] = 127,
- [0][0][2][0][RTW89_IC][46] = 70,
+ [0][0][2][0][RTW89_IC][46] = 72,
+ [0][0][2][0][RTW89_KCC][46] = 58,
[0][0][2][0][RTW89_ACMA][46] = 70,
- [0][0][2][0][RTW89_FCC][48] = 70,
+ [0][0][2][0][RTW89_CN][46] = 68,
+ [0][0][2][0][RTW89_UK][46] = 64,
+ [0][0][2][0][RTW89_FCC][48] = 72,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
[0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
- [0][0][2][0][RTW89_FCC][50] = 70,
+ [0][0][2][0][RTW89_CN][48] = 127,
+ [0][0][2][0][RTW89_UK][48] = 127,
+ [0][0][2][0][RTW89_FCC][50] = 72,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
[0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
- [0][0][2][0][RTW89_FCC][52] = 70,
+ [0][0][2][0][RTW89_CN][50] = 127,
+ [0][0][2][0][RTW89_UK][50] = 127,
+ [0][0][2][0][RTW89_FCC][52] = 72,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
[0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
- [0][1][2][0][RTW89_FCC][0] = 62,
+ [0][0][2][0][RTW89_CN][52] = 127,
+ [0][0][2][0][RTW89_UK][52] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 60,
[0][1][2][0][RTW89_ETSI][0] = 54,
[0][1][2][0][RTW89_MKK][0] = 54,
- [0][1][2][0][RTW89_IC][0] = 44,
- [0][1][2][0][RTW89_ACMA][0] = 50,
+ [0][1][2][0][RTW89_IC][0] = 36,
+ [0][1][2][0][RTW89_KCC][0] = 40,
+ [0][1][2][0][RTW89_ACMA][0] = 54,
+ [0][1][2][0][RTW89_CN][0] = 40,
+ [0][1][2][0][RTW89_UK][0] = 54,
[0][1][2][0][RTW89_FCC][2] = 62,
[0][1][2][0][RTW89_ETSI][2] = 54,
[0][1][2][0][RTW89_MKK][2] = 54,
- [0][1][2][0][RTW89_IC][2] = 44,
- [0][1][2][0][RTW89_ACMA][2] = 50,
+ [0][1][2][0][RTW89_IC][2] = 36,
+ [0][1][2][0][RTW89_KCC][2] = 40,
+ [0][1][2][0][RTW89_ACMA][2] = 54,
+ [0][1][2][0][RTW89_CN][2] = 40,
+ [0][1][2][0][RTW89_UK][2] = 54,
[0][1][2][0][RTW89_FCC][4] = 62,
[0][1][2][0][RTW89_ETSI][4] = 54,
[0][1][2][0][RTW89_MKK][4] = 54,
- [0][1][2][0][RTW89_IC][4] = 44,
- [0][1][2][0][RTW89_ACMA][4] = 50,
+ [0][1][2][0][RTW89_IC][4] = 36,
+ [0][1][2][0][RTW89_KCC][4] = 40,
+ [0][1][2][0][RTW89_ACMA][4] = 54,
+ [0][1][2][0][RTW89_CN][4] = 40,
+ [0][1][2][0][RTW89_UK][4] = 54,
[0][1][2][0][RTW89_FCC][6] = 62,
[0][1][2][0][RTW89_ETSI][6] = 54,
[0][1][2][0][RTW89_MKK][6] = 50,
- [0][1][2][0][RTW89_IC][6] = 44,
- [0][1][2][0][RTW89_ACMA][6] = 50,
+ [0][1][2][0][RTW89_IC][6] = 38,
+ [0][1][2][0][RTW89_KCC][6] = 64,
+ [0][1][2][0][RTW89_ACMA][6] = 54,
+ [0][1][2][0][RTW89_CN][6] = 40,
+ [0][1][2][0][RTW89_UK][6] = 54,
[0][1][2][0][RTW89_FCC][8] = 62,
[0][1][2][0][RTW89_ETSI][8] = 54,
[0][1][2][0][RTW89_MKK][8] = 42,
- [0][1][2][0][RTW89_IC][8] = 54,
- [0][1][2][0][RTW89_ACMA][8] = 50,
+ [0][1][2][0][RTW89_IC][8] = 52,
+ [0][1][2][0][RTW89_KCC][8] = 62,
+ [0][1][2][0][RTW89_ACMA][8] = 54,
+ [0][1][2][0][RTW89_CN][8] = 40,
+ [0][1][2][0][RTW89_UK][8] = 54,
[0][1][2][0][RTW89_FCC][10] = 62,
[0][1][2][0][RTW89_ETSI][10] = 54,
[0][1][2][0][RTW89_MKK][10] = 54,
- [0][1][2][0][RTW89_IC][10] = 54,
- [0][1][2][0][RTW89_ACMA][10] = 50,
+ [0][1][2][0][RTW89_IC][10] = 52,
+ [0][1][2][0][RTW89_KCC][10] = 62,
+ [0][1][2][0][RTW89_ACMA][10] = 54,
+ [0][1][2][0][RTW89_CN][10] = 40,
+ [0][1][2][0][RTW89_UK][10] = 54,
[0][1][2][0][RTW89_FCC][12] = 62,
[0][1][2][0][RTW89_ETSI][12] = 54,
[0][1][2][0][RTW89_MKK][12] = 54,
- [0][1][2][0][RTW89_IC][12] = 54,
- [0][1][2][0][RTW89_ACMA][12] = 50,
+ [0][1][2][0][RTW89_IC][12] = 52,
+ [0][1][2][0][RTW89_KCC][12] = 62,
+ [0][1][2][0][RTW89_ACMA][12] = 54,
+ [0][1][2][0][RTW89_CN][12] = 40,
+ [0][1][2][0][RTW89_UK][12] = 54,
[0][1][2][0][RTW89_FCC][14] = 62,
[0][1][2][0][RTW89_ETSI][14] = 54,
[0][1][2][0][RTW89_MKK][14] = 54,
- [0][1][2][0][RTW89_IC][14] = 54,
- [0][1][2][0][RTW89_ACMA][14] = 50,
+ [0][1][2][0][RTW89_IC][14] = 52,
+ [0][1][2][0][RTW89_KCC][14] = 62,
+ [0][1][2][0][RTW89_ACMA][14] = 54,
+ [0][1][2][0][RTW89_CN][14] = 40,
+ [0][1][2][0][RTW89_UK][14] = 54,
[0][1][2][0][RTW89_FCC][15] = 60,
[0][1][2][0][RTW89_ETSI][15] = 54,
[0][1][2][0][RTW89_MKK][15] = 68,
- [0][1][2][0][RTW89_IC][15] = 70,
- [0][1][2][0][RTW89_ACMA][15] = 50,
+ [0][1][2][0][RTW89_IC][15] = 60,
+ [0][1][2][0][RTW89_KCC][15] = 64,
+ [0][1][2][0][RTW89_ACMA][15] = 54,
+ [0][1][2][0][RTW89_CN][15] = 127,
+ [0][1][2][0][RTW89_UK][15] = 54,
[0][1][2][0][RTW89_FCC][17] = 62,
[0][1][2][0][RTW89_ETSI][17] = 54,
[0][1][2][0][RTW89_MKK][17] = 68,
- [0][1][2][0][RTW89_IC][17] = 70,
- [0][1][2][0][RTW89_ACMA][17] = 50,
+ [0][1][2][0][RTW89_IC][17] = 62,
+ [0][1][2][0][RTW89_KCC][17] = 64,
+ [0][1][2][0][RTW89_ACMA][17] = 54,
+ [0][1][2][0][RTW89_CN][17] = 127,
+ [0][1][2][0][RTW89_UK][17] = 54,
[0][1][2][0][RTW89_FCC][19] = 62,
[0][1][2][0][RTW89_ETSI][19] = 54,
[0][1][2][0][RTW89_MKK][19] = 68,
- [0][1][2][0][RTW89_IC][19] = 70,
- [0][1][2][0][RTW89_ACMA][19] = 50,
+ [0][1][2][0][RTW89_IC][19] = 62,
+ [0][1][2][0][RTW89_KCC][19] = 64,
+ [0][1][2][0][RTW89_ACMA][19] = 54,
+ [0][1][2][0][RTW89_CN][19] = 127,
+ [0][1][2][0][RTW89_UK][19] = 54,
[0][1][2][0][RTW89_FCC][21] = 62,
[0][1][2][0][RTW89_ETSI][21] = 54,
[0][1][2][0][RTW89_MKK][21] = 68,
- [0][1][2][0][RTW89_IC][21] = 70,
- [0][1][2][0][RTW89_ACMA][21] = 50,
+ [0][1][2][0][RTW89_IC][21] = 62,
+ [0][1][2][0][RTW89_KCC][21] = 64,
+ [0][1][2][0][RTW89_ACMA][21] = 54,
+ [0][1][2][0][RTW89_CN][21] = 127,
+ [0][1][2][0][RTW89_UK][21] = 54,
[0][1][2][0][RTW89_FCC][23] = 62,
[0][1][2][0][RTW89_ETSI][23] = 54,
[0][1][2][0][RTW89_MKK][23] = 68,
- [0][1][2][0][RTW89_IC][23] = 70,
- [0][1][2][0][RTW89_ACMA][23] = 50,
+ [0][1][2][0][RTW89_IC][23] = 62,
+ [0][1][2][0][RTW89_KCC][23] = 64,
+ [0][1][2][0][RTW89_ACMA][23] = 54,
+ [0][1][2][0][RTW89_CN][23] = 127,
+ [0][1][2][0][RTW89_UK][23] = 54,
[0][1][2][0][RTW89_FCC][25] = 62,
[0][1][2][0][RTW89_ETSI][25] = 54,
[0][1][2][0][RTW89_MKK][25] = 68,
[0][1][2][0][RTW89_IC][25] = 127,
+ [0][1][2][0][RTW89_KCC][25] = 64,
[0][1][2][0][RTW89_ACMA][25] = 127,
+ [0][1][2][0][RTW89_CN][25] = 127,
+ [0][1][2][0][RTW89_UK][25] = 54,
[0][1][2][0][RTW89_FCC][27] = 62,
[0][1][2][0][RTW89_ETSI][27] = 54,
[0][1][2][0][RTW89_MKK][27] = 68,
[0][1][2][0][RTW89_IC][27] = 127,
+ [0][1][2][0][RTW89_KCC][27] = 64,
[0][1][2][0][RTW89_ACMA][27] = 127,
+ [0][1][2][0][RTW89_CN][27] = 127,
+ [0][1][2][0][RTW89_UK][27] = 54,
[0][1][2][0][RTW89_FCC][29] = 62,
[0][1][2][0][RTW89_ETSI][29] = 54,
[0][1][2][0][RTW89_MKK][29] = 68,
[0][1][2][0][RTW89_IC][29] = 127,
+ [0][1][2][0][RTW89_KCC][29] = 64,
[0][1][2][0][RTW89_ACMA][29] = 127,
+ [0][1][2][0][RTW89_CN][29] = 127,
+ [0][1][2][0][RTW89_UK][29] = 54,
[0][1][2][0][RTW89_FCC][31] = 62,
[0][1][2][0][RTW89_ETSI][31] = 54,
[0][1][2][0][RTW89_MKK][31] = 68,
- [0][1][2][0][RTW89_IC][31] = 70,
- [0][1][2][0][RTW89_ACMA][31] = 50,
+ [0][1][2][0][RTW89_IC][31] = 62,
+ [0][1][2][0][RTW89_KCC][31] = 62,
+ [0][1][2][0][RTW89_ACMA][31] = 54,
+ [0][1][2][0][RTW89_CN][31] = 127,
+ [0][1][2][0][RTW89_UK][31] = 54,
[0][1][2][0][RTW89_FCC][33] = 62,
[0][1][2][0][RTW89_ETSI][33] = 54,
[0][1][2][0][RTW89_MKK][33] = 68,
- [0][1][2][0][RTW89_IC][33] = 70,
- [0][1][2][0][RTW89_ACMA][33] = 50,
- [0][1][2][0][RTW89_FCC][35] = 58,
+ [0][1][2][0][RTW89_IC][33] = 62,
+ [0][1][2][0][RTW89_KCC][33] = 62,
+ [0][1][2][0][RTW89_ACMA][33] = 54,
+ [0][1][2][0][RTW89_CN][33] = 127,
+ [0][1][2][0][RTW89_UK][33] = 54,
+ [0][1][2][0][RTW89_FCC][35] = 46,
[0][1][2][0][RTW89_ETSI][35] = 54,
[0][1][2][0][RTW89_MKK][35] = 68,
- [0][1][2][0][RTW89_IC][35] = 68,
- [0][1][2][0][RTW89_ACMA][35] = 50,
- [0][1][2][0][RTW89_FCC][37] = 62,
+ [0][1][2][0][RTW89_IC][35] = 46,
+ [0][1][2][0][RTW89_KCC][35] = 62,
+ [0][1][2][0][RTW89_ACMA][35] = 54,
+ [0][1][2][0][RTW89_CN][35] = 127,
+ [0][1][2][0][RTW89_UK][35] = 54,
+ [0][1][2][0][RTW89_FCC][37] = 64,
[0][1][2][0][RTW89_ETSI][37] = 127,
[0][1][2][0][RTW89_MKK][37] = 68,
- [0][1][2][0][RTW89_IC][37] = 70,
- [0][1][2][0][RTW89_ACMA][37] = 70,
- [0][1][2][0][RTW89_FCC][38] = 70,
+ [0][1][2][0][RTW89_IC][37] = 64,
+ [0][1][2][0][RTW89_KCC][37] = 62,
+ [0][1][2][0][RTW89_ACMA][37] = 64,
+ [0][1][2][0][RTW89_CN][37] = 127,
+ [0][1][2][0][RTW89_UK][37] = 52,
+ [0][1][2][0][RTW89_FCC][38] = 72,
[0][1][2][0][RTW89_ETSI][38] = 18,
[0][1][2][0][RTW89_MKK][38] = 127,
- [0][1][2][0][RTW89_IC][38] = 70,
+ [0][1][2][0][RTW89_IC][38] = 72,
+ [0][1][2][0][RTW89_KCC][38] = 56,
[0][1][2][0][RTW89_ACMA][38] = 70,
- [0][1][2][0][RTW89_FCC][40] = 70,
+ [0][1][2][0][RTW89_CN][38] = 68,
+ [0][1][2][0][RTW89_UK][38] = 52,
+ [0][1][2][0][RTW89_FCC][40] = 72,
[0][1][2][0][RTW89_ETSI][40] = 18,
[0][1][2][0][RTW89_MKK][40] = 127,
- [0][1][2][0][RTW89_IC][40] = 70,
+ [0][1][2][0][RTW89_IC][40] = 72,
+ [0][1][2][0][RTW89_KCC][40] = 56,
[0][1][2][0][RTW89_ACMA][40] = 70,
- [0][1][2][0][RTW89_FCC][42] = 70,
+ [0][1][2][0][RTW89_CN][40] = 68,
+ [0][1][2][0][RTW89_UK][40] = 52,
+ [0][1][2][0][RTW89_FCC][42] = 72,
[0][1][2][0][RTW89_ETSI][42] = 18,
[0][1][2][0][RTW89_MKK][42] = 127,
- [0][1][2][0][RTW89_IC][42] = 70,
+ [0][1][2][0][RTW89_IC][42] = 72,
+ [0][1][2][0][RTW89_KCC][42] = 56,
[0][1][2][0][RTW89_ACMA][42] = 70,
- [0][1][2][0][RTW89_FCC][44] = 70,
+ [0][1][2][0][RTW89_CN][42] = 68,
+ [0][1][2][0][RTW89_UK][42] = 52,
+ [0][1][2][0][RTW89_FCC][44] = 72,
[0][1][2][0][RTW89_ETSI][44] = 18,
[0][1][2][0][RTW89_MKK][44] = 127,
- [0][1][2][0][RTW89_IC][44] = 70,
+ [0][1][2][0][RTW89_IC][44] = 72,
+ [0][1][2][0][RTW89_KCC][44] = 56,
[0][1][2][0][RTW89_ACMA][44] = 70,
- [0][1][2][0][RTW89_FCC][46] = 70,
+ [0][1][2][0][RTW89_CN][44] = 68,
+ [0][1][2][0][RTW89_UK][44] = 52,
+ [0][1][2][0][RTW89_FCC][46] = 72,
[0][1][2][0][RTW89_ETSI][46] = 18,
[0][1][2][0][RTW89_MKK][46] = 127,
- [0][1][2][0][RTW89_IC][46] = 70,
+ [0][1][2][0][RTW89_IC][46] = 72,
+ [0][1][2][0][RTW89_KCC][46] = 56,
[0][1][2][0][RTW89_ACMA][46] = 70,
- [0][1][2][0][RTW89_FCC][48] = 50,
+ [0][1][2][0][RTW89_CN][46] = 68,
+ [0][1][2][0][RTW89_UK][46] = 52,
+ [0][1][2][0][RTW89_FCC][48] = 48,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
[0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
+ [0][1][2][0][RTW89_CN][48] = 127,
+ [0][1][2][0][RTW89_UK][48] = 127,
[0][1][2][0][RTW89_FCC][50] = 50,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
[0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
- [0][1][2][0][RTW89_FCC][52] = 50,
+ [0][1][2][0][RTW89_CN][50] = 127,
+ [0][1][2][0][RTW89_UK][50] = 127,
+ [0][1][2][0][RTW89_FCC][52] = 48,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
[0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
+ [0][1][2][0][RTW89_CN][52] = 127,
+ [0][1][2][0][RTW89_UK][52] = 127,
[0][1][2][1][RTW89_FCC][0] = 60,
[0][1][2][1][RTW89_ETSI][0] = 40,
[0][1][2][1][RTW89_MKK][0] = 54,
- [0][1][2][1][RTW89_IC][0] = 42,
- [0][1][2][1][RTW89_ACMA][0] = 38,
- [0][1][2][1][RTW89_FCC][2] = 60,
+ [0][1][2][1][RTW89_IC][0] = 40,
+ [0][1][2][1][RTW89_KCC][0] = 40,
+ [0][1][2][1][RTW89_ACMA][0] = 40,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 40,
+ [0][1][2][1][RTW89_FCC][2] = 62,
[0][1][2][1][RTW89_ETSI][2] = 40,
[0][1][2][1][RTW89_MKK][2] = 54,
- [0][1][2][1][RTW89_IC][2] = 42,
- [0][1][2][1][RTW89_ACMA][2] = 38,
- [0][1][2][1][RTW89_FCC][4] = 60,
+ [0][1][2][1][RTW89_IC][2] = 40,
+ [0][1][2][1][RTW89_KCC][2] = 40,
+ [0][1][2][1][RTW89_ACMA][2] = 40,
+ [0][1][2][1][RTW89_CN][2] = 36,
+ [0][1][2][1][RTW89_UK][2] = 40,
+ [0][1][2][1][RTW89_FCC][4] = 62,
[0][1][2][1][RTW89_ETSI][4] = 40,
[0][1][2][1][RTW89_MKK][4] = 54,
- [0][1][2][1][RTW89_IC][4] = 42,
- [0][1][2][1][RTW89_ACMA][4] = 38,
- [0][1][2][1][RTW89_FCC][6] = 60,
+ [0][1][2][1][RTW89_IC][4] = 40,
+ [0][1][2][1][RTW89_KCC][4] = 40,
+ [0][1][2][1][RTW89_ACMA][4] = 40,
+ [0][1][2][1][RTW89_CN][4] = 36,
+ [0][1][2][1][RTW89_UK][4] = 40,
+ [0][1][2][1][RTW89_FCC][6] = 62,
[0][1][2][1][RTW89_ETSI][6] = 40,
[0][1][2][1][RTW89_MKK][6] = 50,
- [0][1][2][1][RTW89_IC][6] = 42,
- [0][1][2][1][RTW89_ACMA][6] = 38,
- [0][1][2][1][RTW89_FCC][8] = 60,
+ [0][1][2][1][RTW89_IC][6] = 40,
+ [0][1][2][1][RTW89_KCC][6] = 64,
+ [0][1][2][1][RTW89_ACMA][6] = 40,
+ [0][1][2][1][RTW89_CN][6] = 36,
+ [0][1][2][1][RTW89_UK][6] = 40,
+ [0][1][2][1][RTW89_FCC][8] = 62,
[0][1][2][1][RTW89_ETSI][8] = 40,
[0][1][2][1][RTW89_MKK][8] = 42,
- [0][1][2][1][RTW89_IC][8] = 42,
- [0][1][2][1][RTW89_ACMA][8] = 38,
- [0][1][2][1][RTW89_FCC][10] = 60,
+ [0][1][2][1][RTW89_IC][8] = 40,
+ [0][1][2][1][RTW89_KCC][8] = 62,
+ [0][1][2][1][RTW89_ACMA][8] = 40,
+ [0][1][2][1][RTW89_CN][8] = 36,
+ [0][1][2][1][RTW89_UK][8] = 40,
+ [0][1][2][1][RTW89_FCC][10] = 62,
[0][1][2][1][RTW89_ETSI][10] = 40,
- [0][1][2][1][RTW89_MKK][10] = 66,
- [0][1][2][1][RTW89_IC][10] = 42,
- [0][1][2][1][RTW89_ACMA][10] = 38,
- [0][1][2][1][RTW89_FCC][12] = 60,
+ [0][1][2][1][RTW89_MKK][10] = 54,
+ [0][1][2][1][RTW89_IC][10] = 40,
+ [0][1][2][1][RTW89_KCC][10] = 62,
+ [0][1][2][1][RTW89_ACMA][10] = 40,
+ [0][1][2][1][RTW89_CN][10] = 36,
+ [0][1][2][1][RTW89_UK][10] = 40,
+ [0][1][2][1][RTW89_FCC][12] = 62,
[0][1][2][1][RTW89_ETSI][12] = 40,
- [0][1][2][1][RTW89_MKK][12] = 66,
- [0][1][2][1][RTW89_IC][12] = 42,
- [0][1][2][1][RTW89_ACMA][12] = 38,
- [0][1][2][1][RTW89_FCC][14] = 60,
+ [0][1][2][1][RTW89_MKK][12] = 54,
+ [0][1][2][1][RTW89_IC][12] = 40,
+ [0][1][2][1][RTW89_KCC][12] = 62,
+ [0][1][2][1][RTW89_ACMA][12] = 40,
+ [0][1][2][1][RTW89_CN][12] = 36,
+ [0][1][2][1][RTW89_UK][12] = 40,
+ [0][1][2][1][RTW89_FCC][14] = 62,
[0][1][2][1][RTW89_ETSI][14] = 40,
- [0][1][2][1][RTW89_MKK][14] = 66,
- [0][1][2][1][RTW89_IC][14] = 42,
- [0][1][2][1][RTW89_ACMA][14] = 38,
+ [0][1][2][1][RTW89_MKK][14] = 54,
+ [0][1][2][1][RTW89_IC][14] = 40,
+ [0][1][2][1][RTW89_KCC][14] = 62,
+ [0][1][2][1][RTW89_ACMA][14] = 40,
+ [0][1][2][1][RTW89_CN][14] = 36,
+ [0][1][2][1][RTW89_UK][14] = 40,
[0][1][2][1][RTW89_FCC][15] = 60,
[0][1][2][1][RTW89_ETSI][15] = 40,
[0][1][2][1][RTW89_MKK][15] = 68,
- [0][1][2][1][RTW89_IC][15] = 70,
- [0][1][2][1][RTW89_ACMA][15] = 38,
- [0][1][2][1][RTW89_FCC][17] = 60,
+ [0][1][2][1][RTW89_IC][15] = 60,
+ [0][1][2][1][RTW89_KCC][15] = 64,
+ [0][1][2][1][RTW89_ACMA][15] = 40,
+ [0][1][2][1][RTW89_CN][15] = 127,
+ [0][1][2][1][RTW89_UK][15] = 40,
+ [0][1][2][1][RTW89_FCC][17] = 62,
[0][1][2][1][RTW89_ETSI][17] = 40,
[0][1][2][1][RTW89_MKK][17] = 68,
- [0][1][2][1][RTW89_IC][17] = 70,
- [0][1][2][1][RTW89_ACMA][17] = 38,
- [0][1][2][1][RTW89_FCC][19] = 60,
+ [0][1][2][1][RTW89_IC][17] = 62,
+ [0][1][2][1][RTW89_KCC][17] = 64,
+ [0][1][2][1][RTW89_ACMA][17] = 40,
+ [0][1][2][1][RTW89_CN][17] = 127,
+ [0][1][2][1][RTW89_UK][17] = 40,
+ [0][1][2][1][RTW89_FCC][19] = 62,
[0][1][2][1][RTW89_ETSI][19] = 40,
[0][1][2][1][RTW89_MKK][19] = 68,
- [0][1][2][1][RTW89_IC][19] = 70,
- [0][1][2][1][RTW89_ACMA][19] = 38,
- [0][1][2][1][RTW89_FCC][21] = 60,
+ [0][1][2][1][RTW89_IC][19] = 62,
+ [0][1][2][1][RTW89_KCC][19] = 64,
+ [0][1][2][1][RTW89_ACMA][19] = 40,
+ [0][1][2][1][RTW89_CN][19] = 127,
+ [0][1][2][1][RTW89_UK][19] = 40,
+ [0][1][2][1][RTW89_FCC][21] = 62,
[0][1][2][1][RTW89_ETSI][21] = 40,
[0][1][2][1][RTW89_MKK][21] = 68,
- [0][1][2][1][RTW89_IC][21] = 70,
- [0][1][2][1][RTW89_ACMA][21] = 38,
- [0][1][2][1][RTW89_FCC][23] = 60,
+ [0][1][2][1][RTW89_IC][21] = 62,
+ [0][1][2][1][RTW89_KCC][21] = 64,
+ [0][1][2][1][RTW89_ACMA][21] = 40,
+ [0][1][2][1][RTW89_CN][21] = 127,
+ [0][1][2][1][RTW89_UK][21] = 40,
+ [0][1][2][1][RTW89_FCC][23] = 62,
[0][1][2][1][RTW89_ETSI][23] = 40,
[0][1][2][1][RTW89_MKK][23] = 68,
- [0][1][2][1][RTW89_IC][23] = 70,
- [0][1][2][1][RTW89_ACMA][23] = 38,
- [0][1][2][1][RTW89_FCC][25] = 58,
+ [0][1][2][1][RTW89_IC][23] = 62,
+ [0][1][2][1][RTW89_KCC][23] = 64,
+ [0][1][2][1][RTW89_ACMA][23] = 40,
+ [0][1][2][1][RTW89_CN][23] = 127,
+ [0][1][2][1][RTW89_UK][23] = 40,
+ [0][1][2][1][RTW89_FCC][25] = 46,
[0][1][2][1][RTW89_ETSI][25] = 40,
[0][1][2][1][RTW89_MKK][25] = 68,
[0][1][2][1][RTW89_IC][25] = 127,
+ [0][1][2][1][RTW89_KCC][25] = 64,
[0][1][2][1][RTW89_ACMA][25] = 127,
- [0][1][2][1][RTW89_FCC][27] = 58,
+ [0][1][2][1][RTW89_CN][25] = 127,
+ [0][1][2][1][RTW89_UK][25] = 40,
+ [0][1][2][1][RTW89_FCC][27] = 46,
[0][1][2][1][RTW89_ETSI][27] = 40,
[0][1][2][1][RTW89_MKK][27] = 68,
[0][1][2][1][RTW89_IC][27] = 127,
+ [0][1][2][1][RTW89_KCC][27] = 64,
[0][1][2][1][RTW89_ACMA][27] = 127,
- [0][1][2][1][RTW89_FCC][29] = 58,
+ [0][1][2][1][RTW89_CN][27] = 127,
+ [0][1][2][1][RTW89_UK][27] = 40,
+ [0][1][2][1][RTW89_FCC][29] = 46,
[0][1][2][1][RTW89_ETSI][29] = 40,
[0][1][2][1][RTW89_MKK][29] = 68,
[0][1][2][1][RTW89_IC][29] = 127,
+ [0][1][2][1][RTW89_KCC][29] = 64,
[0][1][2][1][RTW89_ACMA][29] = 127,
- [0][1][2][1][RTW89_FCC][31] = 58,
+ [0][1][2][1][RTW89_CN][29] = 127,
+ [0][1][2][1][RTW89_UK][29] = 40,
+ [0][1][2][1][RTW89_FCC][31] = 46,
[0][1][2][1][RTW89_ETSI][31] = 40,
[0][1][2][1][RTW89_MKK][31] = 68,
- [0][1][2][1][RTW89_IC][31] = 68,
- [0][1][2][1][RTW89_ACMA][31] = 38,
- [0][1][2][1][RTW89_FCC][33] = 58,
+ [0][1][2][1][RTW89_IC][31] = 46,
+ [0][1][2][1][RTW89_KCC][31] = 62,
+ [0][1][2][1][RTW89_ACMA][31] = 40,
+ [0][1][2][1][RTW89_CN][31] = 127,
+ [0][1][2][1][RTW89_UK][31] = 40,
+ [0][1][2][1][RTW89_FCC][33] = 46,
[0][1][2][1][RTW89_ETSI][33] = 40,
[0][1][2][1][RTW89_MKK][33] = 68,
- [0][1][2][1][RTW89_IC][33] = 68,
- [0][1][2][1][RTW89_ACMA][33] = 38,
- [0][1][2][1][RTW89_FCC][35] = 58,
+ [0][1][2][1][RTW89_IC][33] = 46,
+ [0][1][2][1][RTW89_KCC][33] = 62,
+ [0][1][2][1][RTW89_ACMA][33] = 40,
+ [0][1][2][1][RTW89_CN][33] = 127,
+ [0][1][2][1][RTW89_UK][33] = 40,
+ [0][1][2][1][RTW89_FCC][35] = 46,
[0][1][2][1][RTW89_ETSI][35] = 40,
[0][1][2][1][RTW89_MKK][35] = 68,
- [0][1][2][1][RTW89_IC][35] = 68,
- [0][1][2][1][RTW89_ACMA][35] = 38,
- [0][1][2][1][RTW89_FCC][37] = 60,
+ [0][1][2][1][RTW89_IC][35] = 46,
+ [0][1][2][1][RTW89_KCC][35] = 62,
+ [0][1][2][1][RTW89_ACMA][35] = 40,
+ [0][1][2][1][RTW89_CN][35] = 127,
+ [0][1][2][1][RTW89_UK][35] = 40,
+ [0][1][2][1][RTW89_FCC][37] = 64,
[0][1][2][1][RTW89_ETSI][37] = 127,
[0][1][2][1][RTW89_MKK][37] = 68,
- [0][1][2][1][RTW89_IC][37] = 70,
- [0][1][2][1][RTW89_ACMA][37] = 70,
- [0][1][2][1][RTW89_FCC][38] = 70,
+ [0][1][2][1][RTW89_IC][37] = 64,
+ [0][1][2][1][RTW89_KCC][37] = 62,
+ [0][1][2][1][RTW89_ACMA][37] = 64,
+ [0][1][2][1][RTW89_CN][37] = 127,
+ [0][1][2][1][RTW89_UK][37] = 40,
+ [0][1][2][1][RTW89_FCC][38] = 72,
[0][1][2][1][RTW89_ETSI][38] = 6,
[0][1][2][1][RTW89_MKK][38] = 127,
- [0][1][2][1][RTW89_IC][38] = 70,
+ [0][1][2][1][RTW89_IC][38] = 72,
+ [0][1][2][1][RTW89_KCC][38] = 56,
[0][1][2][1][RTW89_ACMA][38] = 70,
- [0][1][2][1][RTW89_FCC][40] = 70,
+ [0][1][2][1][RTW89_CN][38] = 60,
+ [0][1][2][1][RTW89_UK][38] = 40,
+ [0][1][2][1][RTW89_FCC][40] = 72,
[0][1][2][1][RTW89_ETSI][40] = 6,
[0][1][2][1][RTW89_MKK][40] = 127,
- [0][1][2][1][RTW89_IC][40] = 70,
+ [0][1][2][1][RTW89_IC][40] = 72,
+ [0][1][2][1][RTW89_KCC][40] = 56,
[0][1][2][1][RTW89_ACMA][40] = 70,
- [0][1][2][1][RTW89_FCC][42] = 70,
+ [0][1][2][1][RTW89_CN][40] = 60,
+ [0][1][2][1][RTW89_UK][40] = 40,
+ [0][1][2][1][RTW89_FCC][42] = 72,
[0][1][2][1][RTW89_ETSI][42] = 6,
[0][1][2][1][RTW89_MKK][42] = 127,
- [0][1][2][1][RTW89_IC][42] = 70,
+ [0][1][2][1][RTW89_IC][42] = 72,
+ [0][1][2][1][RTW89_KCC][42] = 56,
[0][1][2][1][RTW89_ACMA][42] = 70,
- [0][1][2][1][RTW89_FCC][44] = 70,
+ [0][1][2][1][RTW89_CN][42] = 60,
+ [0][1][2][1][RTW89_UK][42] = 40,
+ [0][1][2][1][RTW89_FCC][44] = 72,
[0][1][2][1][RTW89_ETSI][44] = 6,
[0][1][2][1][RTW89_MKK][44] = 127,
- [0][1][2][1][RTW89_IC][44] = 70,
+ [0][1][2][1][RTW89_IC][44] = 72,
+ [0][1][2][1][RTW89_KCC][44] = 56,
[0][1][2][1][RTW89_ACMA][44] = 70,
- [0][1][2][1][RTW89_FCC][46] = 70,
+ [0][1][2][1][RTW89_CN][44] = 54,
+ [0][1][2][1][RTW89_UK][44] = 40,
+ [0][1][2][1][RTW89_FCC][46] = 72,
[0][1][2][1][RTW89_ETSI][46] = 6,
[0][1][2][1][RTW89_MKK][46] = 127,
- [0][1][2][1][RTW89_IC][46] = 70,
+ [0][1][2][1][RTW89_IC][46] = 72,
+ [0][1][2][1][RTW89_KCC][46] = 56,
[0][1][2][1][RTW89_ACMA][46] = 70,
- [0][1][2][1][RTW89_FCC][48] = 50,
+ [0][1][2][1][RTW89_CN][46] = 54,
+ [0][1][2][1][RTW89_UK][46] = 40,
+ [0][1][2][1][RTW89_FCC][48] = 48,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
[0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
+ [0][1][2][1][RTW89_CN][48] = 127,
+ [0][1][2][1][RTW89_UK][48] = 127,
[0][1][2][1][RTW89_FCC][50] = 50,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
[0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
- [0][1][2][1][RTW89_FCC][52] = 50,
+ [0][1][2][1][RTW89_CN][50] = 127,
+ [0][1][2][1][RTW89_UK][50] = 127,
+ [0][1][2][1][RTW89_FCC][52] = 48,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
[0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
- [1][0][2][0][RTW89_FCC][1] = 58,
+ [0][1][2][1][RTW89_CN][52] = 127,
+ [0][1][2][1][RTW89_UK][52] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 64,
[1][0][2][0][RTW89_ETSI][1] = 66,
[1][0][2][0][RTW89_MKK][1] = 66,
- [1][0][2][0][RTW89_IC][1] = 66,
+ [1][0][2][0][RTW89_IC][1] = 62,
+ [1][0][2][0][RTW89_KCC][1] = 66,
[1][0][2][0][RTW89_ACMA][1] = 66,
+ [1][0][2][0][RTW89_CN][1] = 54,
+ [1][0][2][0][RTW89_UK][1] = 66,
[1][0][2][0][RTW89_FCC][5] = 68,
[1][0][2][0][RTW89_ETSI][5] = 66,
[1][0][2][0][RTW89_MKK][5] = 66,
- [1][0][2][0][RTW89_IC][5] = 66,
+ [1][0][2][0][RTW89_IC][5] = 64,
+ [1][0][2][0][RTW89_KCC][5] = 54,
[1][0][2][0][RTW89_ACMA][5] = 66,
+ [1][0][2][0][RTW89_CN][5] = 54,
+ [1][0][2][0][RTW89_UK][5] = 66,
[1][0][2][0][RTW89_FCC][9] = 68,
[1][0][2][0][RTW89_ETSI][9] = 66,
[1][0][2][0][RTW89_MKK][9] = 66,
- [1][0][2][0][RTW89_IC][9] = 66,
+ [1][0][2][0][RTW89_IC][9] = 64,
+ [1][0][2][0][RTW89_KCC][9] = 66,
[1][0][2][0][RTW89_ACMA][9] = 66,
- [1][0][2][0][RTW89_FCC][13] = 58,
+ [1][0][2][0][RTW89_CN][9] = 54,
+ [1][0][2][0][RTW89_UK][9] = 66,
+ [1][0][2][0][RTW89_FCC][13] = 60,
[1][0][2][0][RTW89_ETSI][13] = 66,
[1][0][2][0][RTW89_MKK][13] = 66,
- [1][0][2][0][RTW89_IC][13] = 66,
+ [1][0][2][0][RTW89_IC][13] = 60,
+ [1][0][2][0][RTW89_KCC][13] = 52,
[1][0][2][0][RTW89_ACMA][13] = 66,
- [1][0][2][0][RTW89_FCC][16] = 56,
+ [1][0][2][0][RTW89_CN][13] = 54,
+ [1][0][2][0][RTW89_UK][13] = 66,
+ [1][0][2][0][RTW89_FCC][16] = 64,
[1][0][2][0][RTW89_ETSI][16] = 66,
[1][0][2][0][RTW89_MKK][16] = 66,
- [1][0][2][0][RTW89_IC][16] = 66,
+ [1][0][2][0][RTW89_IC][16] = 64,
+ [1][0][2][0][RTW89_KCC][16] = 56,
[1][0][2][0][RTW89_ACMA][16] = 66,
+ [1][0][2][0][RTW89_CN][16] = 127,
+ [1][0][2][0][RTW89_UK][16] = 66,
[1][0][2][0][RTW89_FCC][20] = 68,
[1][0][2][0][RTW89_ETSI][20] = 66,
[1][0][2][0][RTW89_MKK][20] = 66,
- [1][0][2][0][RTW89_IC][20] = 66,
+ [1][0][2][0][RTW89_IC][20] = 68,
+ [1][0][2][0][RTW89_KCC][20] = 56,
[1][0][2][0][RTW89_ACMA][20] = 66,
+ [1][0][2][0][RTW89_CN][20] = 127,
+ [1][0][2][0][RTW89_UK][20] = 66,
[1][0][2][0][RTW89_FCC][24] = 68,
[1][0][2][0][RTW89_ETSI][24] = 66,
[1][0][2][0][RTW89_MKK][24] = 66,
[1][0][2][0][RTW89_IC][24] = 127,
+ [1][0][2][0][RTW89_KCC][24] = 56,
[1][0][2][0][RTW89_ACMA][24] = 127,
+ [1][0][2][0][RTW89_CN][24] = 127,
+ [1][0][2][0][RTW89_UK][24] = 66,
[1][0][2][0][RTW89_FCC][28] = 68,
[1][0][2][0][RTW89_ETSI][28] = 66,
[1][0][2][0][RTW89_MKK][28] = 66,
[1][0][2][0][RTW89_IC][28] = 127,
+ [1][0][2][0][RTW89_KCC][28] = 66,
[1][0][2][0][RTW89_ACMA][28] = 127,
- [1][0][2][0][RTW89_FCC][32] = 68,
+ [1][0][2][0][RTW89_CN][28] = 127,
+ [1][0][2][0][RTW89_UK][28] = 66,
+ [1][0][2][0][RTW89_FCC][32] = 62,
[1][0][2][0][RTW89_ETSI][32] = 66,
[1][0][2][0][RTW89_MKK][32] = 66,
- [1][0][2][0][RTW89_IC][32] = 66,
+ [1][0][2][0][RTW89_IC][32] = 62,
+ [1][0][2][0][RTW89_KCC][32] = 66,
[1][0][2][0][RTW89_ACMA][32] = 66,
+ [1][0][2][0][RTW89_CN][32] = 127,
+ [1][0][2][0][RTW89_UK][32] = 66,
[1][0][2][0][RTW89_FCC][36] = 68,
[1][0][2][0][RTW89_ETSI][36] = 127,
[1][0][2][0][RTW89_MKK][36] = 66,
- [1][0][2][0][RTW89_IC][36] = 66,
+ [1][0][2][0][RTW89_IC][36] = 68,
+ [1][0][2][0][RTW89_KCC][36] = 66,
[1][0][2][0][RTW89_ACMA][36] = 66,
+ [1][0][2][0][RTW89_CN][36] = 127,
+ [1][0][2][0][RTW89_UK][36] = 64,
[1][0][2][0][RTW89_FCC][39] = 68,
[1][0][2][0][RTW89_ETSI][39] = 30,
[1][0][2][0][RTW89_MKK][39] = 127,
- [1][0][2][0][RTW89_IC][39] = 66,
+ [1][0][2][0][RTW89_IC][39] = 68,
+ [1][0][2][0][RTW89_KCC][39] = 66,
[1][0][2][0][RTW89_ACMA][39] = 66,
+ [1][0][2][0][RTW89_CN][39] = 62,
+ [1][0][2][0][RTW89_UK][39] = 64,
[1][0][2][0][RTW89_FCC][43] = 68,
[1][0][2][0][RTW89_ETSI][43] = 30,
[1][0][2][0][RTW89_MKK][43] = 127,
- [1][0][2][0][RTW89_IC][43] = 66,
+ [1][0][2][0][RTW89_IC][43] = 68,
+ [1][0][2][0][RTW89_KCC][43] = 66,
[1][0][2][0][RTW89_ACMA][43] = 66,
+ [1][0][2][0][RTW89_CN][43] = 66,
+ [1][0][2][0][RTW89_UK][43] = 64,
[1][0][2][0][RTW89_FCC][47] = 68,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
[1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
+ [1][0][2][0][RTW89_CN][47] = 127,
+ [1][0][2][0][RTW89_UK][47] = 127,
[1][0][2][0][RTW89_FCC][51] = 68,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
[1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
+ [1][0][2][0][RTW89_CN][51] = 127,
+ [1][0][2][0][RTW89_UK][51] = 127,
[1][1][2][0][RTW89_FCC][1] = 54,
[1][1][2][0][RTW89_ETSI][1] = 54,
[1][1][2][0][RTW89_MKK][1] = 48,
- [1][1][2][0][RTW89_IC][1] = 60,
- [1][1][2][0][RTW89_ACMA][1] = 60,
+ [1][1][2][0][RTW89_IC][1] = 48,
+ [1][1][2][0][RTW89_KCC][1] = 54,
+ [1][1][2][0][RTW89_ACMA][1] = 54,
+ [1][1][2][0][RTW89_CN][1] = 42,
+ [1][1][2][0][RTW89_UK][1] = 54,
[1][1][2][0][RTW89_FCC][5] = 68,
[1][1][2][0][RTW89_ETSI][5] = 54,
[1][1][2][0][RTW89_MKK][5] = 52,
- [1][1][2][0][RTW89_IC][5] = 60,
- [1][1][2][0][RTW89_ACMA][5] = 60,
+ [1][1][2][0][RTW89_IC][5] = 48,
+ [1][1][2][0][RTW89_KCC][5] = 54,
+ [1][1][2][0][RTW89_ACMA][5] = 54,
+ [1][1][2][0][RTW89_CN][5] = 42,
+ [1][1][2][0][RTW89_UK][5] = 54,
[1][1][2][0][RTW89_FCC][9] = 68,
[1][1][2][0][RTW89_ETSI][9] = 54,
[1][1][2][0][RTW89_MKK][9] = 52,
- [1][1][2][0][RTW89_IC][9] = 60,
- [1][1][2][0][RTW89_ACMA][9] = 60,
+ [1][1][2][0][RTW89_IC][9] = 52,
+ [1][1][2][0][RTW89_KCC][9] = 64,
+ [1][1][2][0][RTW89_ACMA][9] = 54,
+ [1][1][2][0][RTW89_CN][9] = 42,
+ [1][1][2][0][RTW89_UK][9] = 54,
[1][1][2][0][RTW89_FCC][13] = 54,
[1][1][2][0][RTW89_ETSI][13] = 54,
[1][1][2][0][RTW89_MKK][13] = 52,
- [1][1][2][0][RTW89_IC][13] = 60,
- [1][1][2][0][RTW89_ACMA][13] = 60,
- [1][1][2][0][RTW89_FCC][16] = 48,
+ [1][1][2][0][RTW89_IC][13] = 52,
+ [1][1][2][0][RTW89_KCC][13] = 52,
+ [1][1][2][0][RTW89_ACMA][13] = 54,
+ [1][1][2][0][RTW89_CN][13] = 42,
+ [1][1][2][0][RTW89_UK][13] = 54,
+ [1][1][2][0][RTW89_FCC][16] = 56,
[1][1][2][0][RTW89_ETSI][16] = 54,
[1][1][2][0][RTW89_MKK][16] = 66,
- [1][1][2][0][RTW89_IC][16] = 58,
- [1][1][2][0][RTW89_ACMA][16] = 60,
+ [1][1][2][0][RTW89_IC][16] = 56,
+ [1][1][2][0][RTW89_KCC][16] = 54,
+ [1][1][2][0][RTW89_ACMA][16] = 54,
+ [1][1][2][0][RTW89_CN][16] = 127,
+ [1][1][2][0][RTW89_UK][16] = 54,
[1][1][2][0][RTW89_FCC][20] = 68,
[1][1][2][0][RTW89_ETSI][20] = 54,
[1][1][2][0][RTW89_MKK][20] = 66,
- [1][1][2][0][RTW89_IC][20] = 66,
- [1][1][2][0][RTW89_ACMA][20] = 60,
+ [1][1][2][0][RTW89_IC][20] = 68,
+ [1][1][2][0][RTW89_KCC][20] = 54,
+ [1][1][2][0][RTW89_ACMA][20] = 54,
+ [1][1][2][0][RTW89_CN][20] = 127,
+ [1][1][2][0][RTW89_UK][20] = 54,
[1][1][2][0][RTW89_FCC][24] = 68,
[1][1][2][0][RTW89_ETSI][24] = 54,
[1][1][2][0][RTW89_MKK][24] = 66,
[1][1][2][0][RTW89_IC][24] = 127,
+ [1][1][2][0][RTW89_KCC][24] = 54,
[1][1][2][0][RTW89_ACMA][24] = 127,
+ [1][1][2][0][RTW89_CN][24] = 127,
+ [1][1][2][0][RTW89_UK][24] = 54,
[1][1][2][0][RTW89_FCC][28] = 68,
[1][1][2][0][RTW89_ETSI][28] = 54,
[1][1][2][0][RTW89_MKK][28] = 66,
[1][1][2][0][RTW89_IC][28] = 127,
+ [1][1][2][0][RTW89_KCC][28] = 66,
[1][1][2][0][RTW89_ACMA][28] = 127,
- [1][1][2][0][RTW89_FCC][32] = 60,
+ [1][1][2][0][RTW89_CN][28] = 127,
+ [1][1][2][0][RTW89_UK][28] = 54,
+ [1][1][2][0][RTW89_FCC][32] = 56,
[1][1][2][0][RTW89_ETSI][32] = 54,
[1][1][2][0][RTW89_MKK][32] = 66,
- [1][1][2][0][RTW89_IC][32] = 66,
+ [1][1][2][0][RTW89_IC][32] = 56,
+ [1][1][2][0][RTW89_KCC][32] = 66,
[1][1][2][0][RTW89_ACMA][32] = 54,
+ [1][1][2][0][RTW89_CN][32] = 127,
+ [1][1][2][0][RTW89_UK][32] = 54,
[1][1][2][0][RTW89_FCC][36] = 68,
[1][1][2][0][RTW89_ETSI][36] = 127,
[1][1][2][0][RTW89_MKK][36] = 66,
- [1][1][2][0][RTW89_IC][36] = 66,
+ [1][1][2][0][RTW89_IC][36] = 68,
+ [1][1][2][0][RTW89_KCC][36] = 66,
[1][1][2][0][RTW89_ACMA][36] = 66,
+ [1][1][2][0][RTW89_CN][36] = 127,
+ [1][1][2][0][RTW89_UK][36] = 52,
[1][1][2][0][RTW89_FCC][39] = 68,
[1][1][2][0][RTW89_ETSI][39] = 18,
[1][1][2][0][RTW89_MKK][39] = 127,
- [1][1][2][0][RTW89_IC][39] = 66,
+ [1][1][2][0][RTW89_IC][39] = 68,
+ [1][1][2][0][RTW89_KCC][39] = 56,
[1][1][2][0][RTW89_ACMA][39] = 66,
+ [1][1][2][0][RTW89_CN][39] = 62,
+ [1][1][2][0][RTW89_UK][39] = 52,
[1][1][2][0][RTW89_FCC][43] = 68,
[1][1][2][0][RTW89_ETSI][43] = 18,
[1][1][2][0][RTW89_MKK][43] = 127,
- [1][1][2][0][RTW89_IC][43] = 66,
+ [1][1][2][0][RTW89_IC][43] = 68,
+ [1][1][2][0][RTW89_KCC][43] = 56,
[1][1][2][0][RTW89_ACMA][43] = 66,
- [1][1][2][0][RTW89_FCC][47] = 60,
+ [1][1][2][0][RTW89_CN][43] = 66,
+ [1][1][2][0][RTW89_UK][43] = 52,
+ [1][1][2][0][RTW89_FCC][47] = 62,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
[1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
- [1][1][2][0][RTW89_FCC][51] = 58,
+ [1][1][2][0][RTW89_CN][47] = 127,
+ [1][1][2][0][RTW89_UK][47] = 127,
+ [1][1][2][0][RTW89_FCC][51] = 60,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
[1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
+ [1][1][2][0][RTW89_CN][51] = 127,
+ [1][1][2][0][RTW89_UK][51] = 127,
[1][1][2][1][RTW89_FCC][1] = 54,
[1][1][2][1][RTW89_ETSI][1] = 40,
[1][1][2][1][RTW89_MKK][1] = 48,
- [1][1][2][1][RTW89_IC][1] = 48,
- [1][1][2][1][RTW89_ACMA][1] = 48,
- [1][1][2][1][RTW89_FCC][5] = 60,
+ [1][1][2][1][RTW89_IC][1] = 40,
+ [1][1][2][1][RTW89_KCC][1] = 54,
+ [1][1][2][1][RTW89_ACMA][1] = 40,
+ [1][1][2][1][RTW89_CN][1] = 42,
+ [1][1][2][1][RTW89_UK][1] = 40,
+ [1][1][2][1][RTW89_FCC][5] = 68,
[1][1][2][1][RTW89_ETSI][5] = 40,
[1][1][2][1][RTW89_MKK][5] = 52,
- [1][1][2][1][RTW89_IC][5] = 48,
- [1][1][2][1][RTW89_ACMA][5] = 48,
- [1][1][2][1][RTW89_FCC][9] = 60,
+ [1][1][2][1][RTW89_IC][5] = 40,
+ [1][1][2][1][RTW89_KCC][5] = 54,
+ [1][1][2][1][RTW89_ACMA][5] = 40,
+ [1][1][2][1][RTW89_CN][5] = 42,
+ [1][1][2][1][RTW89_UK][5] = 40,
+ [1][1][2][1][RTW89_FCC][9] = 68,
[1][1][2][1][RTW89_ETSI][9] = 40,
[1][1][2][1][RTW89_MKK][9] = 52,
- [1][1][2][1][RTW89_IC][9] = 48,
- [1][1][2][1][RTW89_ACMA][9] = 48,
+ [1][1][2][1][RTW89_IC][9] = 40,
+ [1][1][2][1][RTW89_KCC][9] = 64,
+ [1][1][2][1][RTW89_ACMA][9] = 40,
+ [1][1][2][1][RTW89_CN][9] = 42,
+ [1][1][2][1][RTW89_UK][9] = 40,
[1][1][2][1][RTW89_FCC][13] = 54,
[1][1][2][1][RTW89_ETSI][13] = 40,
[1][1][2][1][RTW89_MKK][13] = 52,
- [1][1][2][1][RTW89_IC][13] = 48,
- [1][1][2][1][RTW89_ACMA][13] = 48,
- [1][1][2][1][RTW89_FCC][16] = 48,
+ [1][1][2][1][RTW89_IC][13] = 40,
+ [1][1][2][1][RTW89_KCC][13] = 52,
+ [1][1][2][1][RTW89_ACMA][13] = 40,
+ [1][1][2][1][RTW89_CN][13] = 42,
+ [1][1][2][1][RTW89_UK][13] = 40,
+ [1][1][2][1][RTW89_FCC][16] = 56,
[1][1][2][1][RTW89_ETSI][16] = 40,
[1][1][2][1][RTW89_MKK][16] = 66,
- [1][1][2][1][RTW89_IC][16] = 58,
- [1][1][2][1][RTW89_ACMA][16] = 48,
- [1][1][2][1][RTW89_FCC][20] = 60,
+ [1][1][2][1][RTW89_IC][16] = 56,
+ [1][1][2][1][RTW89_KCC][16] = 54,
+ [1][1][2][1][RTW89_ACMA][16] = 40,
+ [1][1][2][1][RTW89_CN][16] = 127,
+ [1][1][2][1][RTW89_UK][16] = 40,
+ [1][1][2][1][RTW89_FCC][20] = 68,
[1][1][2][1][RTW89_ETSI][20] = 40,
[1][1][2][1][RTW89_MKK][20] = 66,
- [1][1][2][1][RTW89_IC][20] = 66,
- [1][1][2][1][RTW89_ACMA][20] = 48,
- [1][1][2][1][RTW89_FCC][24] = 60,
+ [1][1][2][1][RTW89_IC][20] = 68,
+ [1][1][2][1][RTW89_KCC][20] = 54,
+ [1][1][2][1][RTW89_ACMA][20] = 40,
+ [1][1][2][1][RTW89_CN][20] = 127,
+ [1][1][2][1][RTW89_UK][20] = 40,
+ [1][1][2][1][RTW89_FCC][24] = 68,
[1][1][2][1][RTW89_ETSI][24] = 40,
[1][1][2][1][RTW89_MKK][24] = 66,
[1][1][2][1][RTW89_IC][24] = 127,
+ [1][1][2][1][RTW89_KCC][24] = 54,
[1][1][2][1][RTW89_ACMA][24] = 127,
- [1][1][2][1][RTW89_FCC][28] = 60,
+ [1][1][2][1][RTW89_CN][24] = 127,
+ [1][1][2][1][RTW89_UK][24] = 40,
+ [1][1][2][1][RTW89_FCC][28] = 68,
[1][1][2][1][RTW89_ETSI][28] = 40,
[1][1][2][1][RTW89_MKK][28] = 66,
[1][1][2][1][RTW89_IC][28] = 127,
+ [1][1][2][1][RTW89_KCC][28] = 66,
[1][1][2][1][RTW89_ACMA][28] = 127,
- [1][1][2][1][RTW89_FCC][32] = 60,
+ [1][1][2][1][RTW89_CN][28] = 127,
+ [1][1][2][1][RTW89_UK][28] = 40,
+ [1][1][2][1][RTW89_FCC][32] = 56,
[1][1][2][1][RTW89_ETSI][32] = 40,
[1][1][2][1][RTW89_MKK][32] = 66,
- [1][1][2][1][RTW89_IC][32] = 66,
- [1][1][2][1][RTW89_ACMA][32] = 42,
- [1][1][2][1][RTW89_FCC][36] = 60,
+ [1][1][2][1][RTW89_IC][32] = 56,
+ [1][1][2][1][RTW89_KCC][32] = 66,
+ [1][1][2][1][RTW89_ACMA][32] = 40,
+ [1][1][2][1][RTW89_CN][32] = 127,
+ [1][1][2][1][RTW89_UK][32] = 40,
+ [1][1][2][1][RTW89_FCC][36] = 68,
[1][1][2][1][RTW89_ETSI][36] = 127,
[1][1][2][1][RTW89_MKK][36] = 66,
- [1][1][2][1][RTW89_IC][36] = 66,
+ [1][1][2][1][RTW89_IC][36] = 68,
+ [1][1][2][1][RTW89_KCC][36] = 66,
[1][1][2][1][RTW89_ACMA][36] = 66,
+ [1][1][2][1][RTW89_CN][36] = 127,
+ [1][1][2][1][RTW89_UK][36] = 40,
[1][1][2][1][RTW89_FCC][39] = 68,
[1][1][2][1][RTW89_ETSI][39] = 6,
[1][1][2][1][RTW89_MKK][39] = 127,
- [1][1][2][1][RTW89_IC][39] = 66,
+ [1][1][2][1][RTW89_IC][39] = 68,
+ [1][1][2][1][RTW89_KCC][39] = 56,
[1][1][2][1][RTW89_ACMA][39] = 66,
+ [1][1][2][1][RTW89_CN][39] = 60,
+ [1][1][2][1][RTW89_UK][39] = 40,
[1][1][2][1][RTW89_FCC][43] = 68,
[1][1][2][1][RTW89_ETSI][43] = 6,
[1][1][2][1][RTW89_MKK][43] = 127,
- [1][1][2][1][RTW89_IC][43] = 66,
+ [1][1][2][1][RTW89_IC][43] = 68,
+ [1][1][2][1][RTW89_KCC][43] = 56,
[1][1][2][1][RTW89_ACMA][43] = 66,
- [1][1][2][1][RTW89_FCC][47] = 60,
+ [1][1][2][1][RTW89_CN][43] = 52,
+ [1][1][2][1][RTW89_UK][43] = 40,
+ [1][1][2][1][RTW89_FCC][47] = 62,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
[1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
- [1][1][2][1][RTW89_FCC][51] = 58,
+ [1][1][2][1][RTW89_CN][47] = 127,
+ [1][1][2][1][RTW89_UK][47] = 127,
+ [1][1][2][1][RTW89_FCC][51] = 60,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
[1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
- [2][0][2][0][RTW89_FCC][3] = 56,
+ [1][1][2][1][RTW89_CN][51] = 127,
+ [1][1][2][1][RTW89_UK][51] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 58,
[2][0][2][0][RTW89_ETSI][3] = 60,
[2][0][2][0][RTW89_MKK][3] = 60,
- [2][0][2][0][RTW89_IC][3] = 60,
+ [2][0][2][0][RTW89_IC][3] = 56,
+ [2][0][2][0][RTW89_KCC][3] = 60,
[2][0][2][0][RTW89_ACMA][3] = 60,
- [2][0][2][0][RTW89_FCC][11] = 58,
+ [2][0][2][0][RTW89_CN][3] = 54,
+ [2][0][2][0][RTW89_UK][3] = 60,
+ [2][0][2][0][RTW89_FCC][11] = 50,
[2][0][2][0][RTW89_ETSI][11] = 60,
[2][0][2][0][RTW89_MKK][11] = 60,
- [2][0][2][0][RTW89_IC][11] = 60,
+ [2][0][2][0][RTW89_IC][11] = 50,
+ [2][0][2][0][RTW89_KCC][11] = 58,
[2][0][2][0][RTW89_ACMA][11] = 60,
- [2][0][2][0][RTW89_FCC][18] = 54,
+ [2][0][2][0][RTW89_CN][11] = 54,
+ [2][0][2][0][RTW89_UK][11] = 60,
+ [2][0][2][0][RTW89_FCC][18] = 60,
[2][0][2][0][RTW89_ETSI][18] = 60,
[2][0][2][0][RTW89_MKK][18] = 60,
[2][0][2][0][RTW89_IC][18] = 60,
+ [2][0][2][0][RTW89_KCC][18] = 56,
[2][0][2][0][RTW89_ACMA][18] = 60,
+ [2][0][2][0][RTW89_CN][18] = 127,
+ [2][0][2][0][RTW89_UK][18] = 60,
[2][0][2][0][RTW89_FCC][26] = 62,
[2][0][2][0][RTW89_ETSI][26] = 60,
[2][0][2][0][RTW89_MKK][26] = 60,
[2][0][2][0][RTW89_IC][26] = 127,
+ [2][0][2][0][RTW89_KCC][26] = 60,
[2][0][2][0][RTW89_ACMA][26] = 127,
+ [2][0][2][0][RTW89_CN][26] = 127,
+ [2][0][2][0][RTW89_UK][26] = 60,
[2][0][2][0][RTW89_FCC][34] = 62,
[2][0][2][0][RTW89_ETSI][34] = 127,
[2][0][2][0][RTW89_MKK][34] = 60,
- [2][0][2][0][RTW89_IC][34] = 60,
+ [2][0][2][0][RTW89_IC][34] = 62,
+ [2][0][2][0][RTW89_KCC][34] = 60,
[2][0][2][0][RTW89_ACMA][34] = 60,
+ [2][0][2][0][RTW89_CN][34] = 127,
+ [2][0][2][0][RTW89_UK][34] = 60,
[2][0][2][0][RTW89_FCC][41] = 62,
[2][0][2][0][RTW89_ETSI][41] = 30,
[2][0][2][0][RTW89_MKK][41] = 127,
- [2][0][2][0][RTW89_IC][41] = 60,
+ [2][0][2][0][RTW89_IC][41] = 62,
+ [2][0][2][0][RTW89_KCC][41] = 58,
[2][0][2][0][RTW89_ACMA][41] = 60,
- [2][0][2][0][RTW89_FCC][49] = 56,
+ [2][0][2][0][RTW89_CN][41] = 62,
+ [2][0][2][0][RTW89_UK][41] = 60,
+ [2][0][2][0][RTW89_FCC][49] = 62,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
[2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
+ [2][0][2][0][RTW89_CN][49] = 127,
+ [2][0][2][0][RTW89_UK][49] = 127,
[2][1][2][0][RTW89_FCC][3] = 48,
[2][1][2][0][RTW89_ETSI][3] = 54,
[2][1][2][0][RTW89_MKK][3] = 56,
- [2][1][2][0][RTW89_IC][3] = 52,
- [2][1][2][0][RTW89_ACMA][3] = 52,
- [2][1][2][0][RTW89_FCC][11] = 54,
+ [2][1][2][0][RTW89_IC][3] = 46,
+ [2][1][2][0][RTW89_KCC][3] = 56,
+ [2][1][2][0][RTW89_ACMA][3] = 54,
+ [2][1][2][0][RTW89_CN][3] = 52,
+ [2][1][2][0][RTW89_UK][3] = 54,
+ [2][1][2][0][RTW89_FCC][11] = 38,
[2][1][2][0][RTW89_ETSI][11] = 54,
[2][1][2][0][RTW89_MKK][11] = 54,
- [2][1][2][0][RTW89_IC][11] = 52,
- [2][1][2][0][RTW89_ACMA][11] = 52,
- [2][1][2][0][RTW89_FCC][18] = 48,
+ [2][1][2][0][RTW89_IC][11] = 38,
+ [2][1][2][0][RTW89_KCC][11] = 52,
+ [2][1][2][0][RTW89_ACMA][11] = 54,
+ [2][1][2][0][RTW89_CN][11] = 52,
+ [2][1][2][0][RTW89_UK][11] = 54,
+ [2][1][2][0][RTW89_FCC][18] = 50,
[2][1][2][0][RTW89_ETSI][18] = 54,
[2][1][2][0][RTW89_MKK][18] = 60,
- [2][1][2][0][RTW89_IC][18] = 58,
- [2][1][2][0][RTW89_ACMA][18] = 52,
- [2][1][2][0][RTW89_FCC][26] = 62,
+ [2][1][2][0][RTW89_IC][18] = 50,
+ [2][1][2][0][RTW89_KCC][18] = 54,
+ [2][1][2][0][RTW89_ACMA][18] = 54,
+ [2][1][2][0][RTW89_CN][18] = 127,
+ [2][1][2][0][RTW89_UK][18] = 54,
+ [2][1][2][0][RTW89_FCC][26] = 52,
[2][1][2][0][RTW89_ETSI][26] = 54,
[2][1][2][0][RTW89_MKK][26] = 56,
[2][1][2][0][RTW89_IC][26] = 127,
+ [2][1][2][0][RTW89_KCC][26] = 60,
[2][1][2][0][RTW89_ACMA][26] = 127,
+ [2][1][2][0][RTW89_CN][26] = 127,
+ [2][1][2][0][RTW89_UK][26] = 54,
[2][1][2][0][RTW89_FCC][34] = 62,
[2][1][2][0][RTW89_ETSI][34] = 127,
[2][1][2][0][RTW89_MKK][34] = 60,
- [2][1][2][0][RTW89_IC][34] = 60,
+ [2][1][2][0][RTW89_IC][34] = 62,
+ [2][1][2][0][RTW89_KCC][34] = 60,
[2][1][2][0][RTW89_ACMA][34] = 60,
- [2][1][2][0][RTW89_FCC][41] = 62,
+ [2][1][2][0][RTW89_CN][34] = 127,
+ [2][1][2][0][RTW89_UK][34] = 52,
+ [2][1][2][0][RTW89_FCC][41] = 60,
[2][1][2][0][RTW89_ETSI][41] = 18,
[2][1][2][0][RTW89_MKK][41] = 127,
[2][1][2][0][RTW89_IC][41] = 60,
- [2][1][2][0][RTW89_ACMA][41] = 60,
- [2][1][2][0][RTW89_FCC][49] = 50,
+ [2][1][2][0][RTW89_KCC][41] = 50,
+ [2][1][2][0][RTW89_ACMA][41] = 58,
+ [2][1][2][0][RTW89_CN][41] = 62,
+ [2][1][2][0][RTW89_UK][41] = 52,
+ [2][1][2][0][RTW89_FCC][49] = 62,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
[2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
+ [2][1][2][0][RTW89_CN][49] = 127,
+ [2][1][2][0][RTW89_UK][49] = 127,
[2][1][2][1][RTW89_FCC][3] = 48,
[2][1][2][1][RTW89_ETSI][3] = 40,
[2][1][2][1][RTW89_MKK][3] = 56,
[2][1][2][1][RTW89_IC][3] = 40,
+ [2][1][2][1][RTW89_KCC][3] = 56,
[2][1][2][1][RTW89_ACMA][3] = 40,
- [2][1][2][1][RTW89_FCC][11] = 54,
+ [2][1][2][1][RTW89_CN][3] = 42,
+ [2][1][2][1][RTW89_UK][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 38,
[2][1][2][1][RTW89_ETSI][11] = 40,
[2][1][2][1][RTW89_MKK][11] = 54,
- [2][1][2][1][RTW89_IC][11] = 40,
+ [2][1][2][1][RTW89_IC][11] = 38,
+ [2][1][2][1][RTW89_KCC][11] = 52,
[2][1][2][1][RTW89_ACMA][11] = 40,
- [2][1][2][1][RTW89_FCC][18] = 48,
+ [2][1][2][1][RTW89_CN][11] = 42,
+ [2][1][2][1][RTW89_UK][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 50,
[2][1][2][1][RTW89_ETSI][18] = 40,
[2][1][2][1][RTW89_MKK][18] = 60,
- [2][1][2][1][RTW89_IC][18] = 58,
+ [2][1][2][1][RTW89_IC][18] = 50,
+ [2][1][2][1][RTW89_KCC][18] = 54,
[2][1][2][1][RTW89_ACMA][18] = 40,
- [2][1][2][1][RTW89_FCC][26] = 60,
+ [2][1][2][1][RTW89_CN][18] = 127,
+ [2][1][2][1][RTW89_UK][18] = 40,
+ [2][1][2][1][RTW89_FCC][26] = 52,
[2][1][2][1][RTW89_ETSI][26] = 42,
[2][1][2][1][RTW89_MKK][26] = 56,
[2][1][2][1][RTW89_IC][26] = 127,
+ [2][1][2][1][RTW89_KCC][26] = 60,
[2][1][2][1][RTW89_ACMA][26] = 127,
- [2][1][2][1][RTW89_FCC][34] = 60,
+ [2][1][2][1][RTW89_CN][26] = 127,
+ [2][1][2][1][RTW89_UK][26] = 42,
+ [2][1][2][1][RTW89_FCC][34] = 62,
[2][1][2][1][RTW89_ETSI][34] = 127,
[2][1][2][1][RTW89_MKK][34] = 60,
- [2][1][2][1][RTW89_IC][34] = 60,
+ [2][1][2][1][RTW89_IC][34] = 62,
+ [2][1][2][1][RTW89_KCC][34] = 60,
[2][1][2][1][RTW89_ACMA][34] = 60,
- [2][1][2][1][RTW89_FCC][41] = 62,
+ [2][1][2][1][RTW89_CN][34] = 127,
+ [2][1][2][1][RTW89_UK][34] = 40,
+ [2][1][2][1][RTW89_FCC][41] = 60,
[2][1][2][1][RTW89_ETSI][41] = 6,
[2][1][2][1][RTW89_MKK][41] = 127,
[2][1][2][1][RTW89_IC][41] = 60,
- [2][1][2][1][RTW89_ACMA][41] = 60,
- [2][1][2][1][RTW89_FCC][49] = 50,
+ [2][1][2][1][RTW89_KCC][41] = 50,
+ [2][1][2][1][RTW89_ACMA][41] = 58,
+ [2][1][2][1][RTW89_CN][41] = 40,
+ [2][1][2][1][RTW89_UK][41] = 40,
+ [2][1][2][1][RTW89_FCC][49] = 62,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
[2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
- [3][0][2][0][RTW89_FCC][7] = 38,
+ [2][1][2][1][RTW89_CN][49] = 127,
+ [2][1][2][1][RTW89_UK][49] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 40,
[3][0][2][0][RTW89_ETSI][7] = 50,
[3][0][2][0][RTW89_MKK][7] = 50,
- [3][0][2][0][RTW89_IC][7] = 50,
- [3][0][2][0][RTW89_ACMA][7] = 50,
- [3][0][2][0][RTW89_FCC][22] = 52,
+ [3][0][2][0][RTW89_IC][7] = 40,
+ [3][0][2][0][RTW89_KCC][7] = 44,
+ [3][0][2][0][RTW89_ACMA][7] = 127,
+ [3][0][2][0][RTW89_CN][7] = 66,
+ [3][0][2][0][RTW89_UK][7] = 127,
+ [3][0][2][0][RTW89_FCC][22] = 42,
[3][0][2][0][RTW89_ETSI][22] = 50,
[3][0][2][0][RTW89_MKK][22] = 50,
- [3][0][2][0][RTW89_IC][22] = 50,
- [3][0][2][0][RTW89_ACMA][22] = 50,
- [3][0][2][0][RTW89_FCC][45] = 127,
+ [3][0][2][0][RTW89_IC][22] = 127,
+ [3][0][2][0][RTW89_KCC][22] = 50,
+ [3][0][2][0][RTW89_ACMA][22] = 127,
+ [3][0][2][0][RTW89_CN][22] = 66,
+ [3][0][2][0][RTW89_UK][22] = 127,
+ [3][0][2][0][RTW89_FCC][45] = 52,
[3][0][2][0][RTW89_ETSI][45] = 127,
[3][0][2][0][RTW89_MKK][45] = 127,
[3][0][2][0][RTW89_IC][45] = 127,
+ [3][0][2][0][RTW89_KCC][45] = 127,
[3][0][2][0][RTW89_ACMA][45] = 127,
- [3][1][2][0][RTW89_FCC][7] = 26,
+ [3][0][2][0][RTW89_CN][45] = 127,
+ [3][0][2][0][RTW89_UK][45] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 32,
[3][1][2][0][RTW89_ETSI][7] = 50,
[3][1][2][0][RTW89_MKK][7] = 36,
[3][1][2][0][RTW89_IC][7] = 44,
- [3][1][2][0][RTW89_ACMA][7] = 44,
- [3][1][2][0][RTW89_FCC][22] = 42,
+ [3][1][2][0][RTW89_KCC][7] = 50,
+ [3][1][2][0][RTW89_ACMA][7] = 127,
+ [3][1][2][0][RTW89_CN][7] = 54,
+ [3][1][2][0][RTW89_UK][7] = 127,
+ [3][1][2][0][RTW89_FCC][22] = 36,
[3][1][2][0][RTW89_ETSI][22] = 50,
[3][1][2][0][RTW89_MKK][22] = 48,
- [3][1][2][0][RTW89_IC][22] = 44,
- [3][1][2][0][RTW89_ACMA][22] = 44,
- [3][1][2][0][RTW89_FCC][45] = 127,
+ [3][1][2][0][RTW89_IC][22] = 127,
+ [3][1][2][0][RTW89_KCC][22] = 50,
+ [3][1][2][0][RTW89_ACMA][22] = 127,
+ [3][1][2][0][RTW89_CN][22] = 54,
+ [3][1][2][0][RTW89_UK][22] = 127,
+ [3][1][2][0][RTW89_FCC][45] = 46,
[3][1][2][0][RTW89_ETSI][45] = 127,
[3][1][2][0][RTW89_MKK][45] = 127,
[3][1][2][0][RTW89_IC][45] = 127,
+ [3][1][2][0][RTW89_KCC][45] = 127,
[3][1][2][0][RTW89_ACMA][45] = 127,
- [3][1][2][1][RTW89_FCC][7] = 14,
+ [3][1][2][0][RTW89_CN][45] = 127,
+ [3][1][2][0][RTW89_UK][45] = 127,
+ [3][1][2][1][RTW89_FCC][7] = 32,
[3][1][2][1][RTW89_ETSI][7] = 42,
[3][1][2][1][RTW89_MKK][7] = 36,
- [3][1][2][1][RTW89_IC][7] = 32,
- [3][1][2][1][RTW89_ACMA][7] = 32,
- [3][1][2][1][RTW89_FCC][22] = 30,
+ [3][1][2][1][RTW89_IC][7] = 44,
+ [3][1][2][1][RTW89_KCC][7] = 50,
+ [3][1][2][1][RTW89_ACMA][7] = 127,
+ [3][1][2][1][RTW89_CN][7] = 42,
+ [3][1][2][1][RTW89_UK][7] = 127,
+ [3][1][2][1][RTW89_FCC][22] = 36,
[3][1][2][1][RTW89_ETSI][22] = 42,
[3][1][2][1][RTW89_MKK][22] = 48,
- [3][1][2][1][RTW89_IC][22] = 32,
- [3][1][2][1][RTW89_ACMA][22] = 32,
- [3][1][2][1][RTW89_FCC][45] = 127,
+ [3][1][2][1][RTW89_IC][22] = 127,
+ [3][1][2][1][RTW89_KCC][22] = 50,
+ [3][1][2][1][RTW89_ACMA][22] = 127,
+ [3][1][2][1][RTW89_CN][22] = 42,
+ [3][1][2][1][RTW89_UK][22] = 127,
+ [3][1][2][1][RTW89_FCC][45] = 46,
[3][1][2][1][RTW89_ETSI][45] = 127,
[3][1][2][1][RTW89_MKK][45] = 127,
[3][1][2][1][RTW89_IC][45] = 127,
+ [3][1][2][1][RTW89_KCC][45] = 127,
[3][1][2][1][RTW89_ACMA][45] = 127,
+ [3][1][2][1][RTW89_CN][45] = 127,
+ [3][1][2][1][RTW89_UK][45] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
- [0][0][1][0][RTW89_WW][0] = 72,
- [0][0][1][0][RTW89_WW][2] = 72,
- [0][0][1][0][RTW89_WW][4] = 72,
- [0][0][1][0][RTW89_WW][6] = 72,
- [0][0][1][0][RTW89_WW][8] = 72,
- [0][0][1][0][RTW89_WW][10] = 72,
- [0][0][1][0][RTW89_WW][12] = 72,
- [0][0][1][0][RTW89_WW][14] = 72,
- [0][0][1][0][RTW89_WW][15] = 72,
- [0][0][1][0][RTW89_WW][17] = 72,
- [0][0][1][0][RTW89_WW][19] = 72,
- [0][0][1][0][RTW89_WW][21] = 72,
- [0][0][1][0][RTW89_WW][23] = 72,
- [0][0][1][0][RTW89_WW][25] = 72,
- [0][0][1][0][RTW89_WW][27] = 72,
- [0][0][1][0][RTW89_WW][29] = 72,
- [0][0][1][0][RTW89_WW][30] = 72,
- [0][0][1][0][RTW89_WW][32] = 72,
- [0][0][1][0][RTW89_WW][34] = 72,
- [0][0][1][0][RTW89_WW][36] = 72,
- [0][0][1][0][RTW89_WW][38] = 72,
- [0][0][1][0][RTW89_WW][40] = 72,
- [0][0][1][0][RTW89_WW][42] = 72,
- [0][0][1][0][RTW89_WW][44] = 72,
- [0][0][1][0][RTW89_WW][45] = 72,
- [0][0][1][0][RTW89_WW][47] = 72,
- [0][0][1][0][RTW89_WW][49] = 72,
- [0][0][1][0][RTW89_WW][51] = 72,
- [0][0][1][0][RTW89_WW][53] = 72,
- [0][0][1][0][RTW89_WW][55] = 72,
- [0][0][1][0][RTW89_WW][57] = 72,
- [0][0][1][0][RTW89_WW][59] = 72,
- [0][0][1][0][RTW89_WW][60] = 72,
- [0][0][1][0][RTW89_WW][62] = 72,
- [0][0][1][0][RTW89_WW][64] = 72,
- [0][0][1][0][RTW89_WW][66] = 72,
- [0][0][1][0][RTW89_WW][68] = 72,
- [0][0][1][0][RTW89_WW][70] = 72,
- [0][0][1][0][RTW89_WW][72] = 72,
- [0][0][1][0][RTW89_WW][74] = 72,
- [0][0][1][0][RTW89_WW][75] = 72,
- [0][0][1][0][RTW89_WW][77] = 72,
- [0][0][1][0][RTW89_WW][79] = 72,
- [0][0][1][0][RTW89_WW][81] = 72,
- [0][0][1][0][RTW89_WW][83] = 72,
- [0][0][1][0][RTW89_WW][85] = 72,
- [0][0][1][0][RTW89_WW][87] = 72,
- [0][0][1][0][RTW89_WW][89] = 72,
- [0][0][1][0][RTW89_WW][90] = 72,
- [0][0][1][0][RTW89_WW][92] = 72,
- [0][0][1][0][RTW89_WW][94] = 72,
- [0][0][1][0][RTW89_WW][96] = 72,
- [0][0][1][0][RTW89_WW][98] = 72,
- [0][0][1][0][RTW89_WW][100] = 72,
- [0][0][1][0][RTW89_WW][102] = 72,
- [0][0][1][0][RTW89_WW][104] = 72,
- [0][0][1][0][RTW89_WW][105] = 72,
- [0][0][1][0][RTW89_WW][107] = 72,
- [0][0][1][0][RTW89_WW][109] = 72,
+ [0][0][1][0][RTW89_WW][0] = 24,
+ [0][0][1][0][RTW89_WW][2] = 22,
+ [0][0][1][0][RTW89_WW][4] = 22,
+ [0][0][1][0][RTW89_WW][6] = 22,
+ [0][0][1][0][RTW89_WW][8] = 22,
+ [0][0][1][0][RTW89_WW][10] = 22,
+ [0][0][1][0][RTW89_WW][12] = 22,
+ [0][0][1][0][RTW89_WW][14] = 22,
+ [0][0][1][0][RTW89_WW][15] = 22,
+ [0][0][1][0][RTW89_WW][17] = 22,
+ [0][0][1][0][RTW89_WW][19] = 22,
+ [0][0][1][0][RTW89_WW][21] = 22,
+ [0][0][1][0][RTW89_WW][23] = 22,
+ [0][0][1][0][RTW89_WW][25] = 22,
+ [0][0][1][0][RTW89_WW][27] = 22,
+ [0][0][1][0][RTW89_WW][29] = 22,
+ [0][0][1][0][RTW89_WW][30] = 22,
+ [0][0][1][0][RTW89_WW][32] = 22,
+ [0][0][1][0][RTW89_WW][34] = 22,
+ [0][0][1][0][RTW89_WW][36] = 22,
+ [0][0][1][0][RTW89_WW][38] = 22,
+ [0][0][1][0][RTW89_WW][40] = 22,
+ [0][0][1][0][RTW89_WW][42] = 22,
+ [0][0][1][0][RTW89_WW][44] = 22,
+ [0][0][1][0][RTW89_WW][45] = 22,
+ [0][0][1][0][RTW89_WW][47] = 22,
+ [0][0][1][0][RTW89_WW][49] = 24,
+ [0][0][1][0][RTW89_WW][51] = 22,
+ [0][0][1][0][RTW89_WW][53] = 22,
+ [0][0][1][0][RTW89_WW][55] = 22,
+ [0][0][1][0][RTW89_WW][57] = 22,
+ [0][0][1][0][RTW89_WW][59] = 22,
+ [0][0][1][0][RTW89_WW][60] = 22,
+ [0][0][1][0][RTW89_WW][62] = 22,
+ [0][0][1][0][RTW89_WW][64] = 22,
+ [0][0][1][0][RTW89_WW][66] = 22,
+ [0][0][1][0][RTW89_WW][68] = 22,
+ [0][0][1][0][RTW89_WW][70] = 24,
+ [0][0][1][0][RTW89_WW][72] = 22,
+ [0][0][1][0][RTW89_WW][74] = 22,
+ [0][0][1][0][RTW89_WW][75] = 22,
+ [0][0][1][0][RTW89_WW][77] = 22,
+ [0][0][1][0][RTW89_WW][79] = 22,
+ [0][0][1][0][RTW89_WW][81] = 22,
+ [0][0][1][0][RTW89_WW][83] = 22,
+ [0][0][1][0][RTW89_WW][85] = 22,
+ [0][0][1][0][RTW89_WW][87] = 22,
+ [0][0][1][0][RTW89_WW][89] = 22,
+ [0][0][1][0][RTW89_WW][90] = 22,
+ [0][0][1][0][RTW89_WW][92] = 22,
+ [0][0][1][0][RTW89_WW][94] = 22,
+ [0][0][1][0][RTW89_WW][96] = 22,
+ [0][0][1][0][RTW89_WW][98] = 22,
+ [0][0][1][0][RTW89_WW][100] = 22,
+ [0][0][1][0][RTW89_WW][102] = 22,
+ [0][0][1][0][RTW89_WW][104] = 22,
+ [0][0][1][0][RTW89_WW][105] = 22,
+ [0][0][1][0][RTW89_WW][107] = 24,
+ [0][0][1][0][RTW89_WW][109] = 24,
[0][0][1][0][RTW89_WW][111] = 0,
[0][0][1][0][RTW89_WW][113] = 0,
[0][0][1][0][RTW89_WW][115] = 0,
[0][0][1][0][RTW89_WW][117] = 0,
[0][0][1][0][RTW89_WW][119] = 0,
- [0][1][1][0][RTW89_WW][0] = 60,
- [0][1][1][0][RTW89_WW][2] = 60,
- [0][1][1][0][RTW89_WW][4] = 60,
- [0][1][1][0][RTW89_WW][6] = 60,
- [0][1][1][0][RTW89_WW][8] = 60,
- [0][1][1][0][RTW89_WW][10] = 60,
- [0][1][1][0][RTW89_WW][12] = 60,
- [0][1][1][0][RTW89_WW][14] = 60,
- [0][1][1][0][RTW89_WW][15] = 60,
- [0][1][1][0][RTW89_WW][17] = 60,
- [0][1][1][0][RTW89_WW][19] = 60,
- [0][1][1][0][RTW89_WW][21] = 60,
- [0][1][1][0][RTW89_WW][23] = 60,
- [0][1][1][0][RTW89_WW][25] = 60,
- [0][1][1][0][RTW89_WW][27] = 60,
- [0][1][1][0][RTW89_WW][29] = 60,
- [0][1][1][0][RTW89_WW][30] = 60,
- [0][1][1][0][RTW89_WW][32] = 60,
- [0][1][1][0][RTW89_WW][34] = 60,
- [0][1][1][0][RTW89_WW][36] = 60,
- [0][1][1][0][RTW89_WW][38] = 60,
- [0][1][1][0][RTW89_WW][40] = 60,
- [0][1][1][0][RTW89_WW][42] = 60,
- [0][1][1][0][RTW89_WW][44] = 60,
- [0][1][1][0][RTW89_WW][45] = 60,
- [0][1][1][0][RTW89_WW][47] = 60,
- [0][1][1][0][RTW89_WW][49] = 60,
- [0][1][1][0][RTW89_WW][51] = 60,
- [0][1][1][0][RTW89_WW][53] = 60,
- [0][1][1][0][RTW89_WW][55] = 60,
- [0][1][1][0][RTW89_WW][57] = 60,
- [0][1][1][0][RTW89_WW][59] = 60,
- [0][1][1][0][RTW89_WW][60] = 60,
- [0][1][1][0][RTW89_WW][62] = 60,
- [0][1][1][0][RTW89_WW][64] = 60,
- [0][1][1][0][RTW89_WW][66] = 60,
- [0][1][1][0][RTW89_WW][68] = 60,
- [0][1][1][0][RTW89_WW][70] = 60,
- [0][1][1][0][RTW89_WW][72] = 60,
- [0][1][1][0][RTW89_WW][74] = 60,
- [0][1][1][0][RTW89_WW][75] = 60,
- [0][1][1][0][RTW89_WW][77] = 60,
- [0][1][1][0][RTW89_WW][79] = 60,
- [0][1][1][0][RTW89_WW][81] = 60,
- [0][1][1][0][RTW89_WW][83] = 60,
- [0][1][1][0][RTW89_WW][85] = 60,
- [0][1][1][0][RTW89_WW][87] = 60,
- [0][1][1][0][RTW89_WW][89] = 60,
- [0][1][1][0][RTW89_WW][90] = 60,
- [0][1][1][0][RTW89_WW][92] = 60,
- [0][1][1][0][RTW89_WW][94] = 60,
- [0][1][1][0][RTW89_WW][96] = 60,
- [0][1][1][0][RTW89_WW][98] = 60,
- [0][1][1][0][RTW89_WW][100] = 60,
- [0][1][1][0][RTW89_WW][102] = 60,
- [0][1][1][0][RTW89_WW][104] = 60,
- [0][1][1][0][RTW89_WW][105] = 60,
- [0][1][1][0][RTW89_WW][107] = 60,
- [0][1][1][0][RTW89_WW][109] = 60,
+ [0][1][1][0][RTW89_WW][0] = -2,
+ [0][1][1][0][RTW89_WW][2] = -4,
+ [0][1][1][0][RTW89_WW][4] = -4,
+ [0][1][1][0][RTW89_WW][6] = -4,
+ [0][1][1][0][RTW89_WW][8] = -4,
+ [0][1][1][0][RTW89_WW][10] = -4,
+ [0][1][1][0][RTW89_WW][12] = -4,
+ [0][1][1][0][RTW89_WW][14] = -4,
+ [0][1][1][0][RTW89_WW][15] = -4,
+ [0][1][1][0][RTW89_WW][17] = -4,
+ [0][1][1][0][RTW89_WW][19] = -4,
+ [0][1][1][0][RTW89_WW][21] = -4,
+ [0][1][1][0][RTW89_WW][23] = -4,
+ [0][1][1][0][RTW89_WW][25] = -4,
+ [0][1][1][0][RTW89_WW][27] = -4,
+ [0][1][1][0][RTW89_WW][29] = -4,
+ [0][1][1][0][RTW89_WW][30] = -4,
+ [0][1][1][0][RTW89_WW][32] = -4,
+ [0][1][1][0][RTW89_WW][34] = -4,
+ [0][1][1][0][RTW89_WW][36] = -4,
+ [0][1][1][0][RTW89_WW][38] = -4,
+ [0][1][1][0][RTW89_WW][40] = -4,
+ [0][1][1][0][RTW89_WW][42] = -4,
+ [0][1][1][0][RTW89_WW][44] = -2,
+ [0][1][1][0][RTW89_WW][45] = -2,
+ [0][1][1][0][RTW89_WW][47] = -2,
+ [0][1][1][0][RTW89_WW][49] = -2,
+ [0][1][1][0][RTW89_WW][51] = -2,
+ [0][1][1][0][RTW89_WW][53] = -2,
+ [0][1][1][0][RTW89_WW][55] = -2,
+ [0][1][1][0][RTW89_WW][57] = -2,
+ [0][1][1][0][RTW89_WW][59] = -2,
+ [0][1][1][0][RTW89_WW][60] = -2,
+ [0][1][1][0][RTW89_WW][62] = -2,
+ [0][1][1][0][RTW89_WW][64] = -2,
+ [0][1][1][0][RTW89_WW][66] = -2,
+ [0][1][1][0][RTW89_WW][68] = -2,
+ [0][1][1][0][RTW89_WW][70] = -2,
+ [0][1][1][0][RTW89_WW][72] = -2,
+ [0][1][1][0][RTW89_WW][74] = -2,
+ [0][1][1][0][RTW89_WW][75] = -2,
+ [0][1][1][0][RTW89_WW][77] = -2,
+ [0][1][1][0][RTW89_WW][79] = -2,
+ [0][1][1][0][RTW89_WW][81] = -2,
+ [0][1][1][0][RTW89_WW][83] = -2,
+ [0][1][1][0][RTW89_WW][85] = -2,
+ [0][1][1][0][RTW89_WW][87] = -2,
+ [0][1][1][0][RTW89_WW][89] = -2,
+ [0][1][1][0][RTW89_WW][90] = -2,
+ [0][1][1][0][RTW89_WW][92] = -2,
+ [0][1][1][0][RTW89_WW][94] = -2,
+ [0][1][1][0][RTW89_WW][96] = -2,
+ [0][1][1][0][RTW89_WW][98] = -2,
+ [0][1][1][0][RTW89_WW][100] = -2,
+ [0][1][1][0][RTW89_WW][102] = -2,
+ [0][1][1][0][RTW89_WW][104] = -2,
+ [0][1][1][0][RTW89_WW][105] = -2,
+ [0][1][1][0][RTW89_WW][107] = 1,
+ [0][1][1][0][RTW89_WW][109] = 1,
[0][1][1][0][RTW89_WW][111] = 0,
[0][1][1][0][RTW89_WW][113] = 0,
[0][1][1][0][RTW89_WW][115] = 0,
[0][1][1][0][RTW89_WW][117] = 0,
[0][1][1][0][RTW89_WW][119] = 0,
- [0][0][2][0][RTW89_WW][0] = 72,
- [0][0][2][0][RTW89_WW][2] = 72,
- [0][0][2][0][RTW89_WW][4] = 72,
- [0][0][2][0][RTW89_WW][6] = 72,
- [0][0][2][0][RTW89_WW][8] = 72,
- [0][0][2][0][RTW89_WW][10] = 72,
- [0][0][2][0][RTW89_WW][12] = 72,
- [0][0][2][0][RTW89_WW][14] = 72,
- [0][0][2][0][RTW89_WW][15] = 72,
- [0][0][2][0][RTW89_WW][17] = 72,
- [0][0][2][0][RTW89_WW][19] = 72,
- [0][0][2][0][RTW89_WW][21] = 72,
- [0][0][2][0][RTW89_WW][23] = 72,
- [0][0][2][0][RTW89_WW][25] = 72,
- [0][0][2][0][RTW89_WW][27] = 72,
- [0][0][2][0][RTW89_WW][29] = 72,
- [0][0][2][0][RTW89_WW][30] = 72,
- [0][0][2][0][RTW89_WW][32] = 72,
- [0][0][2][0][RTW89_WW][34] = 72,
- [0][0][2][0][RTW89_WW][36] = 72,
- [0][0][2][0][RTW89_WW][38] = 72,
- [0][0][2][0][RTW89_WW][40] = 72,
- [0][0][2][0][RTW89_WW][42] = 72,
- [0][0][2][0][RTW89_WW][44] = 72,
- [0][0][2][0][RTW89_WW][45] = 72,
- [0][0][2][0][RTW89_WW][47] = 72,
- [0][0][2][0][RTW89_WW][49] = 72,
- [0][0][2][0][RTW89_WW][51] = 72,
- [0][0][2][0][RTW89_WW][53] = 72,
- [0][0][2][0][RTW89_WW][55] = 72,
- [0][0][2][0][RTW89_WW][57] = 72,
- [0][0][2][0][RTW89_WW][59] = 72,
- [0][0][2][0][RTW89_WW][60] = 72,
- [0][0][2][0][RTW89_WW][62] = 72,
- [0][0][2][0][RTW89_WW][64] = 72,
- [0][0][2][0][RTW89_WW][66] = 72,
- [0][0][2][0][RTW89_WW][68] = 72,
- [0][0][2][0][RTW89_WW][70] = 72,
- [0][0][2][0][RTW89_WW][72] = 72,
- [0][0][2][0][RTW89_WW][74] = 72,
- [0][0][2][0][RTW89_WW][75] = 72,
- [0][0][2][0][RTW89_WW][77] = 72,
- [0][0][2][0][RTW89_WW][79] = 72,
- [0][0][2][0][RTW89_WW][81] = 72,
- [0][0][2][0][RTW89_WW][83] = 72,
- [0][0][2][0][RTW89_WW][85] = 72,
- [0][0][2][0][RTW89_WW][87] = 72,
- [0][0][2][0][RTW89_WW][89] = 72,
- [0][0][2][0][RTW89_WW][90] = 72,
- [0][0][2][0][RTW89_WW][92] = 72,
- [0][0][2][0][RTW89_WW][94] = 72,
- [0][0][2][0][RTW89_WW][96] = 72,
- [0][0][2][0][RTW89_WW][98] = 72,
- [0][0][2][0][RTW89_WW][100] = 72,
- [0][0][2][0][RTW89_WW][102] = 72,
- [0][0][2][0][RTW89_WW][104] = 72,
- [0][0][2][0][RTW89_WW][105] = 72,
- [0][0][2][0][RTW89_WW][107] = 72,
- [0][0][2][0][RTW89_WW][109] = 72,
+ [0][0][2][0][RTW89_WW][0] = 24,
+ [0][0][2][0][RTW89_WW][2] = 22,
+ [0][0][2][0][RTW89_WW][4] = 22,
+ [0][0][2][0][RTW89_WW][6] = 22,
+ [0][0][2][0][RTW89_WW][8] = 22,
+ [0][0][2][0][RTW89_WW][10] = 22,
+ [0][0][2][0][RTW89_WW][12] = 22,
+ [0][0][2][0][RTW89_WW][14] = 22,
+ [0][0][2][0][RTW89_WW][15] = 22,
+ [0][0][2][0][RTW89_WW][17] = 22,
+ [0][0][2][0][RTW89_WW][19] = 22,
+ [0][0][2][0][RTW89_WW][21] = 22,
+ [0][0][2][0][RTW89_WW][23] = 22,
+ [0][0][2][0][RTW89_WW][25] = 22,
+ [0][0][2][0][RTW89_WW][27] = 22,
+ [0][0][2][0][RTW89_WW][29] = 22,
+ [0][0][2][0][RTW89_WW][30] = 22,
+ [0][0][2][0][RTW89_WW][32] = 22,
+ [0][0][2][0][RTW89_WW][34] = 22,
+ [0][0][2][0][RTW89_WW][36] = 22,
+ [0][0][2][0][RTW89_WW][38] = 22,
+ [0][0][2][0][RTW89_WW][40] = 22,
+ [0][0][2][0][RTW89_WW][42] = 22,
+ [0][0][2][0][RTW89_WW][44] = 22,
+ [0][0][2][0][RTW89_WW][45] = 22,
+ [0][0][2][0][RTW89_WW][47] = 22,
+ [0][0][2][0][RTW89_WW][49] = 24,
+ [0][0][2][0][RTW89_WW][51] = 22,
+ [0][0][2][0][RTW89_WW][53] = 22,
+ [0][0][2][0][RTW89_WW][55] = 22,
+ [0][0][2][0][RTW89_WW][57] = 22,
+ [0][0][2][0][RTW89_WW][59] = 22,
+ [0][0][2][0][RTW89_WW][60] = 22,
+ [0][0][2][0][RTW89_WW][62] = 22,
+ [0][0][2][0][RTW89_WW][64] = 22,
+ [0][0][2][0][RTW89_WW][66] = 22,
+ [0][0][2][0][RTW89_WW][68] = 22,
+ [0][0][2][0][RTW89_WW][70] = 24,
+ [0][0][2][0][RTW89_WW][72] = 22,
+ [0][0][2][0][RTW89_WW][74] = 22,
+ [0][0][2][0][RTW89_WW][75] = 22,
+ [0][0][2][0][RTW89_WW][77] = 22,
+ [0][0][2][0][RTW89_WW][79] = 22,
+ [0][0][2][0][RTW89_WW][81] = 22,
+ [0][0][2][0][RTW89_WW][83] = 22,
+ [0][0][2][0][RTW89_WW][85] = 22,
+ [0][0][2][0][RTW89_WW][87] = 22,
+ [0][0][2][0][RTW89_WW][89] = 22,
+ [0][0][2][0][RTW89_WW][90] = 22,
+ [0][0][2][0][RTW89_WW][92] = 22,
+ [0][0][2][0][RTW89_WW][94] = 22,
+ [0][0][2][0][RTW89_WW][96] = 22,
+ [0][0][2][0][RTW89_WW][98] = 22,
+ [0][0][2][0][RTW89_WW][100] = 22,
+ [0][0][2][0][RTW89_WW][102] = 22,
+ [0][0][2][0][RTW89_WW][104] = 22,
+ [0][0][2][0][RTW89_WW][105] = 22,
+ [0][0][2][0][RTW89_WW][107] = 24,
+ [0][0][2][0][RTW89_WW][109] = 24,
[0][0][2][0][RTW89_WW][111] = 0,
[0][0][2][0][RTW89_WW][113] = 0,
[0][0][2][0][RTW89_WW][115] = 0,
[0][0][2][0][RTW89_WW][117] = 0,
[0][0][2][0][RTW89_WW][119] = 0,
- [0][1][2][0][RTW89_WW][0] = 60,
- [0][1][2][0][RTW89_WW][2] = 60,
- [0][1][2][0][RTW89_WW][4] = 60,
- [0][1][2][0][RTW89_WW][6] = 60,
- [0][1][2][0][RTW89_WW][8] = 60,
- [0][1][2][0][RTW89_WW][10] = 60,
- [0][1][2][0][RTW89_WW][12] = 60,
- [0][1][2][0][RTW89_WW][14] = 60,
- [0][1][2][0][RTW89_WW][15] = 60,
- [0][1][2][0][RTW89_WW][17] = 60,
- [0][1][2][0][RTW89_WW][19] = 60,
- [0][1][2][0][RTW89_WW][21] = 60,
- [0][1][2][0][RTW89_WW][23] = 60,
- [0][1][2][0][RTW89_WW][25] = 60,
- [0][1][2][0][RTW89_WW][27] = 60,
- [0][1][2][0][RTW89_WW][29] = 60,
- [0][1][2][0][RTW89_WW][30] = 60,
- [0][1][2][0][RTW89_WW][32] = 60,
- [0][1][2][0][RTW89_WW][34] = 60,
- [0][1][2][0][RTW89_WW][36] = 60,
- [0][1][2][0][RTW89_WW][38] = 60,
- [0][1][2][0][RTW89_WW][40] = 60,
- [0][1][2][0][RTW89_WW][42] = 60,
- [0][1][2][0][RTW89_WW][44] = 60,
- [0][1][2][0][RTW89_WW][45] = 60,
- [0][1][2][0][RTW89_WW][47] = 60,
- [0][1][2][0][RTW89_WW][49] = 60,
- [0][1][2][0][RTW89_WW][51] = 60,
- [0][1][2][0][RTW89_WW][53] = 60,
- [0][1][2][0][RTW89_WW][55] = 60,
- [0][1][2][0][RTW89_WW][57] = 60,
- [0][1][2][0][RTW89_WW][59] = 60,
- [0][1][2][0][RTW89_WW][60] = 60,
- [0][1][2][0][RTW89_WW][62] = 60,
- [0][1][2][0][RTW89_WW][64] = 60,
- [0][1][2][0][RTW89_WW][66] = 60,
- [0][1][2][0][RTW89_WW][68] = 60,
- [0][1][2][0][RTW89_WW][70] = 60,
- [0][1][2][0][RTW89_WW][72] = 60,
- [0][1][2][0][RTW89_WW][74] = 60,
- [0][1][2][0][RTW89_WW][75] = 60,
- [0][1][2][0][RTW89_WW][77] = 60,
- [0][1][2][0][RTW89_WW][79] = 60,
- [0][1][2][0][RTW89_WW][81] = 60,
- [0][1][2][0][RTW89_WW][83] = 60,
- [0][1][2][0][RTW89_WW][85] = 60,
- [0][1][2][0][RTW89_WW][87] = 60,
- [0][1][2][0][RTW89_WW][89] = 60,
- [0][1][2][0][RTW89_WW][90] = 60,
- [0][1][2][0][RTW89_WW][92] = 60,
- [0][1][2][0][RTW89_WW][94] = 60,
- [0][1][2][0][RTW89_WW][96] = 60,
- [0][1][2][0][RTW89_WW][98] = 60,
- [0][1][2][0][RTW89_WW][100] = 60,
- [0][1][2][0][RTW89_WW][102] = 60,
- [0][1][2][0][RTW89_WW][104] = 60,
- [0][1][2][0][RTW89_WW][105] = 60,
- [0][1][2][0][RTW89_WW][107] = 60,
- [0][1][2][0][RTW89_WW][109] = 60,
+ [0][1][2][0][RTW89_WW][0] = -2,
+ [0][1][2][0][RTW89_WW][2] = -4,
+ [0][1][2][0][RTW89_WW][4] = -4,
+ [0][1][2][0][RTW89_WW][6] = -4,
+ [0][1][2][0][RTW89_WW][8] = -4,
+ [0][1][2][0][RTW89_WW][10] = -4,
+ [0][1][2][0][RTW89_WW][12] = -4,
+ [0][1][2][0][RTW89_WW][14] = -4,
+ [0][1][2][0][RTW89_WW][15] = -4,
+ [0][1][2][0][RTW89_WW][17] = -4,
+ [0][1][2][0][RTW89_WW][19] = -4,
+ [0][1][2][0][RTW89_WW][21] = -4,
+ [0][1][2][0][RTW89_WW][23] = -4,
+ [0][1][2][0][RTW89_WW][25] = -4,
+ [0][1][2][0][RTW89_WW][27] = -4,
+ [0][1][2][0][RTW89_WW][29] = -4,
+ [0][1][2][0][RTW89_WW][30] = -4,
+ [0][1][2][0][RTW89_WW][32] = -4,
+ [0][1][2][0][RTW89_WW][34] = -4,
+ [0][1][2][0][RTW89_WW][36] = -4,
+ [0][1][2][0][RTW89_WW][38] = -4,
+ [0][1][2][0][RTW89_WW][40] = -4,
+ [0][1][2][0][RTW89_WW][42] = -4,
+ [0][1][2][0][RTW89_WW][44] = -2,
+ [0][1][2][0][RTW89_WW][45] = -2,
+ [0][1][2][0][RTW89_WW][47] = -2,
+ [0][1][2][0][RTW89_WW][49] = -2,
+ [0][1][2][0][RTW89_WW][51] = -2,
+ [0][1][2][0][RTW89_WW][53] = -2,
+ [0][1][2][0][RTW89_WW][55] = -2,
+ [0][1][2][0][RTW89_WW][57] = -2,
+ [0][1][2][0][RTW89_WW][59] = -2,
+ [0][1][2][0][RTW89_WW][60] = -2,
+ [0][1][2][0][RTW89_WW][62] = -2,
+ [0][1][2][0][RTW89_WW][64] = -2,
+ [0][1][2][0][RTW89_WW][66] = -2,
+ [0][1][2][0][RTW89_WW][68] = -2,
+ [0][1][2][0][RTW89_WW][70] = -2,
+ [0][1][2][0][RTW89_WW][72] = -2,
+ [0][1][2][0][RTW89_WW][74] = -2,
+ [0][1][2][0][RTW89_WW][75] = -2,
+ [0][1][2][0][RTW89_WW][77] = -2,
+ [0][1][2][0][RTW89_WW][79] = -2,
+ [0][1][2][0][RTW89_WW][81] = -2,
+ [0][1][2][0][RTW89_WW][83] = -2,
+ [0][1][2][0][RTW89_WW][85] = -2,
+ [0][1][2][0][RTW89_WW][87] = -2,
+ [0][1][2][0][RTW89_WW][89] = -2,
+ [0][1][2][0][RTW89_WW][90] = -2,
+ [0][1][2][0][RTW89_WW][92] = -2,
+ [0][1][2][0][RTW89_WW][94] = -2,
+ [0][1][2][0][RTW89_WW][96] = -2,
+ [0][1][2][0][RTW89_WW][98] = -2,
+ [0][1][2][0][RTW89_WW][100] = -2,
+ [0][1][2][0][RTW89_WW][102] = -2,
+ [0][1][2][0][RTW89_WW][104] = -2,
+ [0][1][2][0][RTW89_WW][105] = -2,
+ [0][1][2][0][RTW89_WW][107] = 1,
+ [0][1][2][0][RTW89_WW][109] = 1,
[0][1][2][0][RTW89_WW][111] = 0,
[0][1][2][0][RTW89_WW][113] = 0,
[0][1][2][0][RTW89_WW][115] = 0,
[0][1][2][0][RTW89_WW][117] = 0,
[0][1][2][0][RTW89_WW][119] = 0,
- [0][1][2][1][RTW89_WW][0] = 48,
- [0][1][2][1][RTW89_WW][2] = 48,
- [0][1][2][1][RTW89_WW][4] = 48,
- [0][1][2][1][RTW89_WW][6] = 48,
- [0][1][2][1][RTW89_WW][8] = 48,
- [0][1][2][1][RTW89_WW][10] = 48,
- [0][1][2][1][RTW89_WW][12] = 48,
- [0][1][2][1][RTW89_WW][14] = 48,
- [0][1][2][1][RTW89_WW][15] = 48,
- [0][1][2][1][RTW89_WW][17] = 48,
- [0][1][2][1][RTW89_WW][19] = 48,
- [0][1][2][1][RTW89_WW][21] = 48,
- [0][1][2][1][RTW89_WW][23] = 48,
- [0][1][2][1][RTW89_WW][25] = 48,
- [0][1][2][1][RTW89_WW][27] = 48,
- [0][1][2][1][RTW89_WW][29] = 48,
- [0][1][2][1][RTW89_WW][30] = 48,
- [0][1][2][1][RTW89_WW][32] = 48,
- [0][1][2][1][RTW89_WW][34] = 48,
- [0][1][2][1][RTW89_WW][36] = 48,
- [0][1][2][1][RTW89_WW][38] = 48,
- [0][1][2][1][RTW89_WW][40] = 48,
- [0][1][2][1][RTW89_WW][42] = 48,
- [0][1][2][1][RTW89_WW][44] = 48,
- [0][1][2][1][RTW89_WW][45] = 48,
- [0][1][2][1][RTW89_WW][47] = 48,
- [0][1][2][1][RTW89_WW][49] = 48,
- [0][1][2][1][RTW89_WW][51] = 48,
- [0][1][2][1][RTW89_WW][53] = 48,
- [0][1][2][1][RTW89_WW][55] = 48,
- [0][1][2][1][RTW89_WW][57] = 48,
- [0][1][2][1][RTW89_WW][59] = 48,
- [0][1][2][1][RTW89_WW][60] = 48,
- [0][1][2][1][RTW89_WW][62] = 48,
- [0][1][2][1][RTW89_WW][64] = 48,
- [0][1][2][1][RTW89_WW][66] = 48,
- [0][1][2][1][RTW89_WW][68] = 48,
- [0][1][2][1][RTW89_WW][70] = 48,
- [0][1][2][1][RTW89_WW][72] = 48,
- [0][1][2][1][RTW89_WW][74] = 48,
- [0][1][2][1][RTW89_WW][75] = 48,
- [0][1][2][1][RTW89_WW][77] = 48,
- [0][1][2][1][RTW89_WW][79] = 48,
- [0][1][2][1][RTW89_WW][81] = 48,
- [0][1][2][1][RTW89_WW][83] = 48,
- [0][1][2][1][RTW89_WW][85] = 48,
- [0][1][2][1][RTW89_WW][87] = 48,
- [0][1][2][1][RTW89_WW][89] = 48,
- [0][1][2][1][RTW89_WW][90] = 48,
- [0][1][2][1][RTW89_WW][92] = 48,
- [0][1][2][1][RTW89_WW][94] = 48,
- [0][1][2][1][RTW89_WW][96] = 48,
- [0][1][2][1][RTW89_WW][98] = 48,
- [0][1][2][1][RTW89_WW][100] = 48,
- [0][1][2][1][RTW89_WW][102] = 48,
- [0][1][2][1][RTW89_WW][104] = 48,
- [0][1][2][1][RTW89_WW][105] = 48,
- [0][1][2][1][RTW89_WW][107] = 48,
- [0][1][2][1][RTW89_WW][109] = 48,
+ [0][1][2][1][RTW89_WW][0] = -2,
+ [0][1][2][1][RTW89_WW][2] = -4,
+ [0][1][2][1][RTW89_WW][4] = -4,
+ [0][1][2][1][RTW89_WW][6] = -4,
+ [0][1][2][1][RTW89_WW][8] = -4,
+ [0][1][2][1][RTW89_WW][10] = -4,
+ [0][1][2][1][RTW89_WW][12] = -4,
+ [0][1][2][1][RTW89_WW][14] = -4,
+ [0][1][2][1][RTW89_WW][15] = -4,
+ [0][1][2][1][RTW89_WW][17] = -4,
+ [0][1][2][1][RTW89_WW][19] = -4,
+ [0][1][2][1][RTW89_WW][21] = -4,
+ [0][1][2][1][RTW89_WW][23] = -4,
+ [0][1][2][1][RTW89_WW][25] = -4,
+ [0][1][2][1][RTW89_WW][27] = -4,
+ [0][1][2][1][RTW89_WW][29] = -4,
+ [0][1][2][1][RTW89_WW][30] = -4,
+ [0][1][2][1][RTW89_WW][32] = -4,
+ [0][1][2][1][RTW89_WW][34] = -4,
+ [0][1][2][1][RTW89_WW][36] = -4,
+ [0][1][2][1][RTW89_WW][38] = -4,
+ [0][1][2][1][RTW89_WW][40] = -4,
+ [0][1][2][1][RTW89_WW][42] = -4,
+ [0][1][2][1][RTW89_WW][44] = -2,
+ [0][1][2][1][RTW89_WW][45] = -2,
+ [0][1][2][1][RTW89_WW][47] = -2,
+ [0][1][2][1][RTW89_WW][49] = -2,
+ [0][1][2][1][RTW89_WW][51] = -2,
+ [0][1][2][1][RTW89_WW][53] = -2,
+ [0][1][2][1][RTW89_WW][55] = -2,
+ [0][1][2][1][RTW89_WW][57] = -2,
+ [0][1][2][1][RTW89_WW][59] = -2,
+ [0][1][2][1][RTW89_WW][60] = -2,
+ [0][1][2][1][RTW89_WW][62] = -2,
+ [0][1][2][1][RTW89_WW][64] = -2,
+ [0][1][2][1][RTW89_WW][66] = -2,
+ [0][1][2][1][RTW89_WW][68] = -2,
+ [0][1][2][1][RTW89_WW][70] = -2,
+ [0][1][2][1][RTW89_WW][72] = -2,
+ [0][1][2][1][RTW89_WW][74] = -2,
+ [0][1][2][1][RTW89_WW][75] = -2,
+ [0][1][2][1][RTW89_WW][77] = -2,
+ [0][1][2][1][RTW89_WW][79] = -2,
+ [0][1][2][1][RTW89_WW][81] = -2,
+ [0][1][2][1][RTW89_WW][83] = -2,
+ [0][1][2][1][RTW89_WW][85] = -2,
+ [0][1][2][1][RTW89_WW][87] = -2,
+ [0][1][2][1][RTW89_WW][89] = -2,
+ [0][1][2][1][RTW89_WW][90] = -2,
+ [0][1][2][1][RTW89_WW][92] = -2,
+ [0][1][2][1][RTW89_WW][94] = -2,
+ [0][1][2][1][RTW89_WW][96] = -2,
+ [0][1][2][1][RTW89_WW][98] = -2,
+ [0][1][2][1][RTW89_WW][100] = -2,
+ [0][1][2][1][RTW89_WW][102] = -2,
+ [0][1][2][1][RTW89_WW][104] = -2,
+ [0][1][2][1][RTW89_WW][105] = -2,
+ [0][1][2][1][RTW89_WW][107] = 1,
+ [0][1][2][1][RTW89_WW][109] = 1,
[0][1][2][1][RTW89_WW][111] = 0,
[0][1][2][1][RTW89_WW][113] = 0,
[0][1][2][1][RTW89_WW][115] = 0,
[0][1][2][1][RTW89_WW][117] = 0,
[0][1][2][1][RTW89_WW][119] = 0,
- [1][0][2][0][RTW89_WW][1] = 72,
- [1][0][2][0][RTW89_WW][5] = 72,
- [1][0][2][0][RTW89_WW][9] = 72,
- [1][0][2][0][RTW89_WW][13] = 72,
- [1][0][2][0][RTW89_WW][16] = 72,
- [1][0][2][0][RTW89_WW][20] = 72,
- [1][0][2][0][RTW89_WW][24] = 72,
- [1][0][2][0][RTW89_WW][28] = 72,
- [1][0][2][0][RTW89_WW][31] = 72,
- [1][0][2][0][RTW89_WW][35] = 72,
- [1][0][2][0][RTW89_WW][39] = 72,
- [1][0][2][0][RTW89_WW][43] = 72,
- [1][0][2][0][RTW89_WW][46] = 72,
- [1][0][2][0][RTW89_WW][50] = 72,
- [1][0][2][0][RTW89_WW][54] = 72,
- [1][0][2][0][RTW89_WW][58] = 72,
- [1][0][2][0][RTW89_WW][61] = 72,
- [1][0][2][0][RTW89_WW][65] = 72,
- [1][0][2][0][RTW89_WW][69] = 72,
- [1][0][2][0][RTW89_WW][73] = 72,
- [1][0][2][0][RTW89_WW][76] = 72,
- [1][0][2][0][RTW89_WW][80] = 72,
- [1][0][2][0][RTW89_WW][84] = 72,
- [1][0][2][0][RTW89_WW][88] = 72,
- [1][0][2][0][RTW89_WW][91] = 72,
- [1][0][2][0][RTW89_WW][95] = 72,
- [1][0][2][0][RTW89_WW][99] = 72,
- [1][0][2][0][RTW89_WW][103] = 72,
- [1][0][2][0][RTW89_WW][106] = 72,
+ [1][0][2][0][RTW89_WW][1] = 34,
+ [1][0][2][0][RTW89_WW][5] = 34,
+ [1][0][2][0][RTW89_WW][9] = 34,
+ [1][0][2][0][RTW89_WW][13] = 34,
+ [1][0][2][0][RTW89_WW][16] = 34,
+ [1][0][2][0][RTW89_WW][20] = 34,
+ [1][0][2][0][RTW89_WW][24] = 36,
+ [1][0][2][0][RTW89_WW][28] = 34,
+ [1][0][2][0][RTW89_WW][31] = 34,
+ [1][0][2][0][RTW89_WW][35] = 34,
+ [1][0][2][0][RTW89_WW][39] = 34,
+ [1][0][2][0][RTW89_WW][43] = 34,
+ [1][0][2][0][RTW89_WW][46] = 34,
+ [1][0][2][0][RTW89_WW][50] = 34,
+ [1][0][2][0][RTW89_WW][54] = 36,
+ [1][0][2][0][RTW89_WW][58] = 36,
+ [1][0][2][0][RTW89_WW][61] = 34,
+ [1][0][2][0][RTW89_WW][65] = 34,
+ [1][0][2][0][RTW89_WW][69] = 34,
+ [1][0][2][0][RTW89_WW][73] = 34,
+ [1][0][2][0][RTW89_WW][76] = 34,
+ [1][0][2][0][RTW89_WW][80] = 34,
+ [1][0][2][0][RTW89_WW][84] = 34,
+ [1][0][2][0][RTW89_WW][88] = 34,
+ [1][0][2][0][RTW89_WW][91] = 36,
+ [1][0][2][0][RTW89_WW][95] = 34,
+ [1][0][2][0][RTW89_WW][99] = 34,
+ [1][0][2][0][RTW89_WW][103] = 34,
+ [1][0][2][0][RTW89_WW][106] = 36,
[1][0][2][0][RTW89_WW][110] = 0,
[1][0][2][0][RTW89_WW][114] = 0,
[1][0][2][0][RTW89_WW][118] = 0,
- [1][1][2][0][RTW89_WW][1] = 60,
- [1][1][2][0][RTW89_WW][5] = 60,
- [1][1][2][0][RTW89_WW][9] = 60,
- [1][1][2][0][RTW89_WW][13] = 60,
- [1][1][2][0][RTW89_WW][16] = 60,
- [1][1][2][0][RTW89_WW][20] = 60,
- [1][1][2][0][RTW89_WW][24] = 60,
- [1][1][2][0][RTW89_WW][28] = 60,
- [1][1][2][0][RTW89_WW][31] = 60,
- [1][1][2][0][RTW89_WW][35] = 60,
- [1][1][2][0][RTW89_WW][39] = 60,
- [1][1][2][0][RTW89_WW][43] = 60,
- [1][1][2][0][RTW89_WW][46] = 60,
- [1][1][2][0][RTW89_WW][50] = 60,
- [1][1][2][0][RTW89_WW][54] = 60,
- [1][1][2][0][RTW89_WW][58] = 60,
- [1][1][2][0][RTW89_WW][61] = 60,
- [1][1][2][0][RTW89_WW][65] = 60,
- [1][1][2][0][RTW89_WW][69] = 60,
- [1][1][2][0][RTW89_WW][73] = 60,
- [1][1][2][0][RTW89_WW][76] = 60,
- [1][1][2][0][RTW89_WW][80] = 60,
- [1][1][2][0][RTW89_WW][84] = 60,
- [1][1][2][0][RTW89_WW][88] = 60,
- [1][1][2][0][RTW89_WW][91] = 60,
- [1][1][2][0][RTW89_WW][95] = 60,
- [1][1][2][0][RTW89_WW][99] = 60,
- [1][1][2][0][RTW89_WW][103] = 60,
- [1][1][2][0][RTW89_WW][106] = 60,
+ [1][1][2][0][RTW89_WW][1] = 10,
+ [1][1][2][0][RTW89_WW][5] = 10,
+ [1][1][2][0][RTW89_WW][9] = 10,
+ [1][1][2][0][RTW89_WW][13] = 10,
+ [1][1][2][0][RTW89_WW][16] = 10,
+ [1][1][2][0][RTW89_WW][20] = 10,
+ [1][1][2][0][RTW89_WW][24] = 10,
+ [1][1][2][0][RTW89_WW][28] = 10,
+ [1][1][2][0][RTW89_WW][31] = 10,
+ [1][1][2][0][RTW89_WW][35] = 10,
+ [1][1][2][0][RTW89_WW][39] = 10,
+ [1][1][2][0][RTW89_WW][43] = 10,
+ [1][1][2][0][RTW89_WW][46] = 12,
+ [1][1][2][0][RTW89_WW][50] = 12,
+ [1][1][2][0][RTW89_WW][54] = 10,
+ [1][1][2][0][RTW89_WW][58] = 10,
+ [1][1][2][0][RTW89_WW][61] = 10,
+ [1][1][2][0][RTW89_WW][65] = 10,
+ [1][1][2][0][RTW89_WW][69] = 10,
+ [1][1][2][0][RTW89_WW][73] = 10,
+ [1][1][2][0][RTW89_WW][76] = 10,
+ [1][1][2][0][RTW89_WW][80] = 10,
+ [1][1][2][0][RTW89_WW][84] = 10,
+ [1][1][2][0][RTW89_WW][88] = 10,
+ [1][1][2][0][RTW89_WW][91] = 12,
+ [1][1][2][0][RTW89_WW][95] = 10,
+ [1][1][2][0][RTW89_WW][99] = 10,
+ [1][1][2][0][RTW89_WW][103] = 10,
+ [1][1][2][0][RTW89_WW][106] = 12,
[1][1][2][0][RTW89_WW][110] = 0,
[1][1][2][0][RTW89_WW][114] = 0,
[1][1][2][0][RTW89_WW][118] = 0,
- [1][1][2][1][RTW89_WW][1] = 48,
- [1][1][2][1][RTW89_WW][5] = 48,
- [1][1][2][1][RTW89_WW][9] = 48,
- [1][1][2][1][RTW89_WW][13] = 48,
- [1][1][2][1][RTW89_WW][16] = 48,
- [1][1][2][1][RTW89_WW][20] = 48,
- [1][1][2][1][RTW89_WW][24] = 48,
- [1][1][2][1][RTW89_WW][28] = 48,
- [1][1][2][1][RTW89_WW][31] = 48,
- [1][1][2][1][RTW89_WW][35] = 48,
- [1][1][2][1][RTW89_WW][39] = 48,
- [1][1][2][1][RTW89_WW][43] = 48,
- [1][1][2][1][RTW89_WW][46] = 48,
- [1][1][2][1][RTW89_WW][50] = 48,
- [1][1][2][1][RTW89_WW][54] = 48,
- [1][1][2][1][RTW89_WW][58] = 48,
- [1][1][2][1][RTW89_WW][61] = 48,
- [1][1][2][1][RTW89_WW][65] = 48,
- [1][1][2][1][RTW89_WW][69] = 48,
- [1][1][2][1][RTW89_WW][73] = 48,
- [1][1][2][1][RTW89_WW][76] = 48,
- [1][1][2][1][RTW89_WW][80] = 48,
- [1][1][2][1][RTW89_WW][84] = 48,
- [1][1][2][1][RTW89_WW][88] = 48,
- [1][1][2][1][RTW89_WW][91] = 48,
- [1][1][2][1][RTW89_WW][95] = 48,
- [1][1][2][1][RTW89_WW][99] = 48,
- [1][1][2][1][RTW89_WW][103] = 48,
- [1][1][2][1][RTW89_WW][106] = 48,
+ [1][1][2][1][RTW89_WW][1] = 10,
+ [1][1][2][1][RTW89_WW][5] = 10,
+ [1][1][2][1][RTW89_WW][9] = 10,
+ [1][1][2][1][RTW89_WW][13] = 10,
+ [1][1][2][1][RTW89_WW][16] = 10,
+ [1][1][2][1][RTW89_WW][20] = 10,
+ [1][1][2][1][RTW89_WW][24] = 10,
+ [1][1][2][1][RTW89_WW][28] = 10,
+ [1][1][2][1][RTW89_WW][31] = 10,
+ [1][1][2][1][RTW89_WW][35] = 10,
+ [1][1][2][1][RTW89_WW][39] = 10,
+ [1][1][2][1][RTW89_WW][43] = 10,
+ [1][1][2][1][RTW89_WW][46] = 12,
+ [1][1][2][1][RTW89_WW][50] = 12,
+ [1][1][2][1][RTW89_WW][54] = 10,
+ [1][1][2][1][RTW89_WW][58] = 10,
+ [1][1][2][1][RTW89_WW][61] = 10,
+ [1][1][2][1][RTW89_WW][65] = 10,
+ [1][1][2][1][RTW89_WW][69] = 10,
+ [1][1][2][1][RTW89_WW][73] = 10,
+ [1][1][2][1][RTW89_WW][76] = 10,
+ [1][1][2][1][RTW89_WW][80] = 10,
+ [1][1][2][1][RTW89_WW][84] = 10,
+ [1][1][2][1][RTW89_WW][88] = 10,
+ [1][1][2][1][RTW89_WW][91] = 12,
+ [1][1][2][1][RTW89_WW][95] = 10,
+ [1][1][2][1][RTW89_WW][99] = 10,
+ [1][1][2][1][RTW89_WW][103] = 10,
+ [1][1][2][1][RTW89_WW][106] = 12,
[1][1][2][1][RTW89_WW][110] = 0,
[1][1][2][1][RTW89_WW][114] = 0,
[1][1][2][1][RTW89_WW][118] = 0,
- [2][0][2][0][RTW89_WW][3] = 64,
- [2][0][2][0][RTW89_WW][11] = 64,
- [2][0][2][0][RTW89_WW][18] = 64,
- [2][0][2][0][RTW89_WW][26] = 64,
- [2][0][2][0][RTW89_WW][33] = 64,
- [2][0][2][0][RTW89_WW][41] = 64,
- [2][0][2][0][RTW89_WW][48] = 64,
- [2][0][2][0][RTW89_WW][56] = 64,
- [2][0][2][0][RTW89_WW][63] = 64,
- [2][0][2][0][RTW89_WW][71] = 64,
- [2][0][2][0][RTW89_WW][78] = 64,
- [2][0][2][0][RTW89_WW][86] = 64,
- [2][0][2][0][RTW89_WW][93] = 64,
- [2][0][2][0][RTW89_WW][101] = 64,
+ [2][0][2][0][RTW89_WW][3] = 46,
+ [2][0][2][0][RTW89_WW][11] = 46,
+ [2][0][2][0][RTW89_WW][18] = 46,
+ [2][0][2][0][RTW89_WW][26] = 46,
+ [2][0][2][0][RTW89_WW][33] = 46,
+ [2][0][2][0][RTW89_WW][41] = 46,
+ [2][0][2][0][RTW89_WW][48] = 46,
+ [2][0][2][0][RTW89_WW][56] = 46,
+ [2][0][2][0][RTW89_WW][63] = 46,
+ [2][0][2][0][RTW89_WW][71] = 46,
+ [2][0][2][0][RTW89_WW][78] = 46,
+ [2][0][2][0][RTW89_WW][86] = 46,
+ [2][0][2][0][RTW89_WW][93] = 46,
+ [2][0][2][0][RTW89_WW][101] = 44,
[2][0][2][0][RTW89_WW][108] = 0,
[2][0][2][0][RTW89_WW][116] = 0,
- [2][1][2][0][RTW89_WW][3] = 52,
- [2][1][2][0][RTW89_WW][11] = 52,
- [2][1][2][0][RTW89_WW][18] = 52,
- [2][1][2][0][RTW89_WW][26] = 52,
- [2][1][2][0][RTW89_WW][33] = 52,
- [2][1][2][0][RTW89_WW][41] = 52,
- [2][1][2][0][RTW89_WW][48] = 52,
- [2][1][2][0][RTW89_WW][56] = 52,
- [2][1][2][0][RTW89_WW][63] = 52,
- [2][1][2][0][RTW89_WW][71] = 52,
- [2][1][2][0][RTW89_WW][78] = 52,
- [2][1][2][0][RTW89_WW][86] = 52,
- [2][1][2][0][RTW89_WW][93] = 52,
- [2][1][2][0][RTW89_WW][101] = 52,
+ [2][1][2][0][RTW89_WW][3] = 22,
+ [2][1][2][0][RTW89_WW][11] = 20,
+ [2][1][2][0][RTW89_WW][18] = 20,
+ [2][1][2][0][RTW89_WW][26] = 20,
+ [2][1][2][0][RTW89_WW][33] = 20,
+ [2][1][2][0][RTW89_WW][41] = 22,
+ [2][1][2][0][RTW89_WW][48] = 22,
+ [2][1][2][0][RTW89_WW][56] = 20,
+ [2][1][2][0][RTW89_WW][63] = 22,
+ [2][1][2][0][RTW89_WW][71] = 20,
+ [2][1][2][0][RTW89_WW][78] = 20,
+ [2][1][2][0][RTW89_WW][86] = 20,
+ [2][1][2][0][RTW89_WW][93] = 22,
+ [2][1][2][0][RTW89_WW][101] = 22,
[2][1][2][0][RTW89_WW][108] = 0,
[2][1][2][0][RTW89_WW][116] = 0,
- [2][1][2][1][RTW89_WW][3] = 40,
- [2][1][2][1][RTW89_WW][11] = 40,
- [2][1][2][1][RTW89_WW][18] = 40,
- [2][1][2][1][RTW89_WW][26] = 40,
- [2][1][2][1][RTW89_WW][33] = 40,
- [2][1][2][1][RTW89_WW][41] = 40,
- [2][1][2][1][RTW89_WW][48] = 40,
- [2][1][2][1][RTW89_WW][56] = 40,
- [2][1][2][1][RTW89_WW][63] = 40,
- [2][1][2][1][RTW89_WW][71] = 40,
- [2][1][2][1][RTW89_WW][78] = 40,
- [2][1][2][1][RTW89_WW][86] = 40,
- [2][1][2][1][RTW89_WW][93] = 40,
- [2][1][2][1][RTW89_WW][101] = 40,
+ [2][1][2][1][RTW89_WW][3] = 22,
+ [2][1][2][1][RTW89_WW][11] = 20,
+ [2][1][2][1][RTW89_WW][18] = 20,
+ [2][1][2][1][RTW89_WW][26] = 20,
+ [2][1][2][1][RTW89_WW][33] = 20,
+ [2][1][2][1][RTW89_WW][41] = 22,
+ [2][1][2][1][RTW89_WW][48] = 22,
+ [2][1][2][1][RTW89_WW][56] = 20,
+ [2][1][2][1][RTW89_WW][63] = 22,
+ [2][1][2][1][RTW89_WW][71] = 20,
+ [2][1][2][1][RTW89_WW][78] = 20,
+ [2][1][2][1][RTW89_WW][86] = 20,
+ [2][1][2][1][RTW89_WW][93] = 22,
+ [2][1][2][1][RTW89_WW][101] = 22,
[2][1][2][1][RTW89_WW][108] = 0,
[2][1][2][1][RTW89_WW][116] = 0,
- [3][0][2][0][RTW89_WW][7] = 56,
- [3][0][2][0][RTW89_WW][22] = 56,
- [3][0][2][0][RTW89_WW][37] = 56,
- [3][0][2][0][RTW89_WW][52] = 56,
- [3][0][2][0][RTW89_WW][67] = 56,
- [3][0][2][0][RTW89_WW][82] = 56,
- [3][0][2][0][RTW89_WW][97] = 56,
+ [3][0][2][0][RTW89_WW][7] = 38,
+ [3][0][2][0][RTW89_WW][22] = 38,
+ [3][0][2][0][RTW89_WW][37] = 38,
+ [3][0][2][0][RTW89_WW][52] = 54,
+ [3][0][2][0][RTW89_WW][67] = 54,
+ [3][0][2][0][RTW89_WW][82] = 26,
+ [3][0][2][0][RTW89_WW][97] = 26,
[3][0][2][0][RTW89_WW][112] = 0,
- [3][1][2][0][RTW89_WW][7] = 44,
- [3][1][2][0][RTW89_WW][22] = 44,
- [3][1][2][0][RTW89_WW][37] = 44,
- [3][1][2][0][RTW89_WW][52] = 44,
- [3][1][2][0][RTW89_WW][67] = 44,
- [3][1][2][0][RTW89_WW][82] = 44,
- [3][1][2][0][RTW89_WW][97] = 44,
+ [3][1][2][0][RTW89_WW][7] = 32,
+ [3][1][2][0][RTW89_WW][22] = 30,
+ [3][1][2][0][RTW89_WW][37] = 30,
+ [3][1][2][0][RTW89_WW][52] = 30,
+ [3][1][2][0][RTW89_WW][67] = 32,
+ [3][1][2][0][RTW89_WW][82] = 24,
+ [3][1][2][0][RTW89_WW][97] = 14,
[3][1][2][0][RTW89_WW][112] = 0,
[3][1][2][1][RTW89_WW][7] = 32,
- [3][1][2][1][RTW89_WW][22] = 32,
- [3][1][2][1][RTW89_WW][37] = 32,
- [3][1][2][1][RTW89_WW][52] = 32,
+ [3][1][2][1][RTW89_WW][22] = 30,
+ [3][1][2][1][RTW89_WW][37] = 30,
+ [3][1][2][1][RTW89_WW][52] = 30,
[3][1][2][1][RTW89_WW][67] = 32,
- [3][1][2][1][RTW89_WW][82] = 32,
- [3][1][2][1][RTW89_WW][97] = 32,
+ [3][1][2][1][RTW89_WW][82] = 24,
+ [3][1][2][1][RTW89_WW][97] = 14,
[3][1][2][1][RTW89_WW][112] = 0,
- [0][0][1][0][RTW89_FCC][0] = 72,
- [0][0][1][0][RTW89_FCC][2] = 72,
- [0][0][1][0][RTW89_FCC][4] = 72,
- [0][0][1][0][RTW89_FCC][6] = 72,
- [0][0][1][0][RTW89_FCC][8] = 72,
- [0][0][1][0][RTW89_FCC][10] = 72,
- [0][0][1][0][RTW89_FCC][12] = 72,
- [0][0][1][0][RTW89_FCC][14] = 72,
- [0][0][1][0][RTW89_FCC][15] = 72,
- [0][0][1][0][RTW89_FCC][17] = 72,
- [0][0][1][0][RTW89_FCC][19] = 72,
- [0][0][1][0][RTW89_FCC][21] = 72,
- [0][0][1][0][RTW89_FCC][23] = 72,
- [0][0][1][0][RTW89_FCC][25] = 72,
- [0][0][1][0][RTW89_FCC][27] = 72,
- [0][0][1][0][RTW89_FCC][29] = 72,
- [0][0][1][0][RTW89_FCC][30] = 72,
- [0][0][1][0][RTW89_FCC][32] = 72,
- [0][0][1][0][RTW89_FCC][34] = 72,
- [0][0][1][0][RTW89_FCC][36] = 72,
- [0][0][1][0][RTW89_FCC][38] = 72,
- [0][0][1][0][RTW89_FCC][40] = 72,
- [0][0][1][0][RTW89_FCC][42] = 72,
- [0][0][1][0][RTW89_FCC][44] = 72,
- [0][0][1][0][RTW89_FCC][45] = 72,
- [0][0][1][0][RTW89_FCC][47] = 72,
- [0][0][1][0][RTW89_FCC][49] = 72,
- [0][0][1][0][RTW89_FCC][51] = 72,
- [0][0][1][0][RTW89_FCC][53] = 72,
- [0][0][1][0][RTW89_FCC][55] = 72,
- [0][0][1][0][RTW89_FCC][57] = 72,
- [0][0][1][0][RTW89_FCC][59] = 72,
- [0][0][1][0][RTW89_FCC][60] = 72,
- [0][0][1][0][RTW89_FCC][62] = 72,
- [0][0][1][0][RTW89_FCC][64] = 72,
- [0][0][1][0][RTW89_FCC][66] = 72,
- [0][0][1][0][RTW89_FCC][68] = 72,
- [0][0][1][0][RTW89_FCC][70] = 72,
- [0][0][1][0][RTW89_FCC][72] = 72,
- [0][0][1][0][RTW89_FCC][74] = 72,
- [0][0][1][0][RTW89_FCC][75] = 72,
- [0][0][1][0][RTW89_FCC][77] = 72,
- [0][0][1][0][RTW89_FCC][79] = 72,
- [0][0][1][0][RTW89_FCC][81] = 72,
- [0][0][1][0][RTW89_FCC][83] = 72,
- [0][0][1][0][RTW89_FCC][85] = 72,
- [0][0][1][0][RTW89_FCC][87] = 72,
- [0][0][1][0][RTW89_FCC][89] = 72,
- [0][0][1][0][RTW89_FCC][90] = 72,
- [0][0][1][0][RTW89_FCC][92] = 72,
- [0][0][1][0][RTW89_FCC][94] = 72,
- [0][0][1][0][RTW89_FCC][96] = 72,
- [0][0][1][0][RTW89_FCC][98] = 72,
- [0][0][1][0][RTW89_FCC][100] = 72,
- [0][0][1][0][RTW89_FCC][102] = 72,
- [0][0][1][0][RTW89_FCC][104] = 72,
- [0][0][1][0][RTW89_FCC][105] = 72,
- [0][0][1][0][RTW89_FCC][107] = 72,
- [0][0][1][0][RTW89_FCC][109] = 72,
+ [0][0][1][0][RTW89_FCC][0] = 24,
+ [0][0][1][0][RTW89_ETSI][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 24,
+ [0][0][1][0][RTW89_FCC][2] = 22,
+ [0][0][1][0][RTW89_ETSI][2] = 66,
+ [0][0][1][0][RTW89_KCC][2] = 24,
+ [0][0][1][0][RTW89_FCC][4] = 22,
+ [0][0][1][0][RTW89_ETSI][4] = 66,
+ [0][0][1][0][RTW89_KCC][4] = 24,
+ [0][0][1][0][RTW89_FCC][6] = 22,
+ [0][0][1][0][RTW89_ETSI][6] = 66,
+ [0][0][1][0][RTW89_KCC][6] = 24,
+ [0][0][1][0][RTW89_FCC][8] = 22,
+ [0][0][1][0][RTW89_ETSI][8] = 66,
+ [0][0][1][0][RTW89_KCC][8] = 24,
+ [0][0][1][0][RTW89_FCC][10] = 22,
+ [0][0][1][0][RTW89_ETSI][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 24,
+ [0][0][1][0][RTW89_FCC][12] = 22,
+ [0][0][1][0][RTW89_ETSI][12] = 66,
+ [0][0][1][0][RTW89_KCC][12] = 24,
+ [0][0][1][0][RTW89_FCC][14] = 22,
+ [0][0][1][0][RTW89_ETSI][14] = 66,
+ [0][0][1][0][RTW89_KCC][14] = 24,
+ [0][0][1][0][RTW89_FCC][15] = 22,
+ [0][0][1][0][RTW89_ETSI][15] = 66,
+ [0][0][1][0][RTW89_KCC][15] = 24,
+ [0][0][1][0][RTW89_FCC][17] = 22,
+ [0][0][1][0][RTW89_ETSI][17] = 66,
+ [0][0][1][0][RTW89_KCC][17] = 24,
+ [0][0][1][0][RTW89_FCC][19] = 22,
+ [0][0][1][0][RTW89_ETSI][19] = 66,
+ [0][0][1][0][RTW89_KCC][19] = 24,
+ [0][0][1][0][RTW89_FCC][21] = 22,
+ [0][0][1][0][RTW89_ETSI][21] = 66,
+ [0][0][1][0][RTW89_KCC][21] = 24,
+ [0][0][1][0][RTW89_FCC][23] = 22,
+ [0][0][1][0][RTW89_ETSI][23] = 66,
+ [0][0][1][0][RTW89_KCC][23] = 24,
+ [0][0][1][0][RTW89_FCC][25] = 22,
+ [0][0][1][0][RTW89_ETSI][25] = 66,
+ [0][0][1][0][RTW89_KCC][25] = 24,
+ [0][0][1][0][RTW89_FCC][27] = 22,
+ [0][0][1][0][RTW89_ETSI][27] = 66,
+ [0][0][1][0][RTW89_KCC][27] = 24,
+ [0][0][1][0][RTW89_FCC][29] = 22,
+ [0][0][1][0][RTW89_ETSI][29] = 66,
+ [0][0][1][0][RTW89_KCC][29] = 24,
+ [0][0][1][0][RTW89_FCC][30] = 22,
+ [0][0][1][0][RTW89_ETSI][30] = 66,
+ [0][0][1][0][RTW89_KCC][30] = 24,
+ [0][0][1][0][RTW89_FCC][32] = 22,
+ [0][0][1][0][RTW89_ETSI][32] = 66,
+ [0][0][1][0][RTW89_KCC][32] = 24,
+ [0][0][1][0][RTW89_FCC][34] = 22,
+ [0][0][1][0][RTW89_ETSI][34] = 66,
+ [0][0][1][0][RTW89_KCC][34] = 24,
+ [0][0][1][0][RTW89_FCC][36] = 22,
+ [0][0][1][0][RTW89_ETSI][36] = 66,
+ [0][0][1][0][RTW89_KCC][36] = 24,
+ [0][0][1][0][RTW89_FCC][38] = 22,
+ [0][0][1][0][RTW89_ETSI][38] = 66,
+ [0][0][1][0][RTW89_KCC][38] = 24,
+ [0][0][1][0][RTW89_FCC][40] = 22,
+ [0][0][1][0][RTW89_ETSI][40] = 66,
+ [0][0][1][0][RTW89_KCC][40] = 24,
+ [0][0][1][0][RTW89_FCC][42] = 22,
+ [0][0][1][0][RTW89_ETSI][42] = 66,
+ [0][0][1][0][RTW89_KCC][42] = 24,
+ [0][0][1][0][RTW89_FCC][44] = 22,
+ [0][0][1][0][RTW89_ETSI][44] = 66,
+ [0][0][1][0][RTW89_KCC][44] = 24,
+ [0][0][1][0][RTW89_FCC][45] = 22,
+ [0][0][1][0][RTW89_ETSI][45] = 127,
+ [0][0][1][0][RTW89_KCC][45] = 24,
+ [0][0][1][0][RTW89_FCC][47] = 22,
+ [0][0][1][0][RTW89_ETSI][47] = 127,
+ [0][0][1][0][RTW89_KCC][47] = 24,
+ [0][0][1][0][RTW89_FCC][49] = 24,
+ [0][0][1][0][RTW89_ETSI][49] = 127,
+ [0][0][1][0][RTW89_KCC][49] = 24,
+ [0][0][1][0][RTW89_FCC][51] = 22,
+ [0][0][1][0][RTW89_ETSI][51] = 127,
+ [0][0][1][0][RTW89_KCC][51] = 24,
+ [0][0][1][0][RTW89_FCC][53] = 22,
+ [0][0][1][0][RTW89_ETSI][53] = 127,
+ [0][0][1][0][RTW89_KCC][53] = 24,
+ [0][0][1][0][RTW89_FCC][55] = 22,
+ [0][0][1][0][RTW89_ETSI][55] = 127,
+ [0][0][1][0][RTW89_KCC][55] = 26,
+ [0][0][1][0][RTW89_FCC][57] = 22,
+ [0][0][1][0][RTW89_ETSI][57] = 127,
+ [0][0][1][0][RTW89_KCC][57] = 26,
+ [0][0][1][0][RTW89_FCC][59] = 22,
+ [0][0][1][0][RTW89_ETSI][59] = 127,
+ [0][0][1][0][RTW89_KCC][59] = 26,
+ [0][0][1][0][RTW89_FCC][60] = 22,
+ [0][0][1][0][RTW89_ETSI][60] = 127,
+ [0][0][1][0][RTW89_KCC][60] = 26,
+ [0][0][1][0][RTW89_FCC][62] = 22,
+ [0][0][1][0][RTW89_ETSI][62] = 127,
+ [0][0][1][0][RTW89_KCC][62] = 26,
+ [0][0][1][0][RTW89_FCC][64] = 22,
+ [0][0][1][0][RTW89_ETSI][64] = 127,
+ [0][0][1][0][RTW89_KCC][64] = 26,
+ [0][0][1][0][RTW89_FCC][66] = 22,
+ [0][0][1][0][RTW89_ETSI][66] = 127,
+ [0][0][1][0][RTW89_KCC][66] = 26,
+ [0][0][1][0][RTW89_FCC][68] = 22,
+ [0][0][1][0][RTW89_ETSI][68] = 127,
+ [0][0][1][0][RTW89_KCC][68] = 26,
+ [0][0][1][0][RTW89_FCC][70] = 24,
+ [0][0][1][0][RTW89_ETSI][70] = 127,
+ [0][0][1][0][RTW89_KCC][70] = 26,
+ [0][0][1][0][RTW89_FCC][72] = 22,
+ [0][0][1][0][RTW89_ETSI][72] = 127,
+ [0][0][1][0][RTW89_KCC][72] = 26,
+ [0][0][1][0][RTW89_FCC][74] = 22,
+ [0][0][1][0][RTW89_ETSI][74] = 127,
+ [0][0][1][0][RTW89_KCC][74] = 26,
+ [0][0][1][0][RTW89_FCC][75] = 22,
+ [0][0][1][0][RTW89_ETSI][75] = 127,
+ [0][0][1][0][RTW89_KCC][75] = 26,
+ [0][0][1][0][RTW89_FCC][77] = 22,
+ [0][0][1][0][RTW89_ETSI][77] = 127,
+ [0][0][1][0][RTW89_KCC][77] = 26,
+ [0][0][1][0][RTW89_FCC][79] = 22,
+ [0][0][1][0][RTW89_ETSI][79] = 127,
+ [0][0][1][0][RTW89_KCC][79] = 26,
+ [0][0][1][0][RTW89_FCC][81] = 22,
+ [0][0][1][0][RTW89_ETSI][81] = 127,
+ [0][0][1][0][RTW89_KCC][81] = 26,
+ [0][0][1][0][RTW89_FCC][83] = 22,
+ [0][0][1][0][RTW89_ETSI][83] = 127,
+ [0][0][1][0][RTW89_KCC][83] = 32,
+ [0][0][1][0][RTW89_FCC][85] = 22,
+ [0][0][1][0][RTW89_ETSI][85] = 127,
+ [0][0][1][0][RTW89_KCC][85] = 32,
+ [0][0][1][0][RTW89_FCC][87] = 22,
+ [0][0][1][0][RTW89_ETSI][87] = 127,
+ [0][0][1][0][RTW89_KCC][87] = 32,
+ [0][0][1][0][RTW89_FCC][89] = 22,
+ [0][0][1][0][RTW89_ETSI][89] = 127,
+ [0][0][1][0][RTW89_KCC][89] = 32,
+ [0][0][1][0][RTW89_FCC][90] = 22,
+ [0][0][1][0][RTW89_ETSI][90] = 127,
+ [0][0][1][0][RTW89_KCC][90] = 32,
+ [0][0][1][0][RTW89_FCC][92] = 22,
+ [0][0][1][0][RTW89_ETSI][92] = 127,
+ [0][0][1][0][RTW89_KCC][92] = 32,
+ [0][0][1][0][RTW89_FCC][94] = 22,
+ [0][0][1][0][RTW89_ETSI][94] = 127,
+ [0][0][1][0][RTW89_KCC][94] = 32,
+ [0][0][1][0][RTW89_FCC][96] = 22,
+ [0][0][1][0][RTW89_ETSI][96] = 127,
+ [0][0][1][0][RTW89_KCC][96] = 32,
+ [0][0][1][0][RTW89_FCC][98] = 22,
+ [0][0][1][0][RTW89_ETSI][98] = 127,
+ [0][0][1][0][RTW89_KCC][98] = 32,
+ [0][0][1][0][RTW89_FCC][100] = 22,
+ [0][0][1][0][RTW89_ETSI][100] = 127,
+ [0][0][1][0][RTW89_KCC][100] = 32,
+ [0][0][1][0][RTW89_FCC][102] = 22,
+ [0][0][1][0][RTW89_ETSI][102] = 127,
+ [0][0][1][0][RTW89_KCC][102] = 32,
+ [0][0][1][0][RTW89_FCC][104] = 22,
+ [0][0][1][0][RTW89_ETSI][104] = 127,
+ [0][0][1][0][RTW89_KCC][104] = 32,
+ [0][0][1][0][RTW89_FCC][105] = 22,
+ [0][0][1][0][RTW89_ETSI][105] = 127,
+ [0][0][1][0][RTW89_KCC][105] = 32,
+ [0][0][1][0][RTW89_FCC][107] = 24,
+ [0][0][1][0][RTW89_ETSI][107] = 127,
+ [0][0][1][0][RTW89_KCC][107] = 32,
+ [0][0][1][0][RTW89_FCC][109] = 24,
+ [0][0][1][0][RTW89_ETSI][109] = 127,
+ [0][0][1][0][RTW89_KCC][109] = 32,
[0][0][1][0][RTW89_FCC][111] = 127,
+ [0][0][1][0][RTW89_ETSI][111] = 127,
+ [0][0][1][0][RTW89_KCC][111] = 127,
[0][0][1][0][RTW89_FCC][113] = 127,
+ [0][0][1][0][RTW89_ETSI][113] = 127,
+ [0][0][1][0][RTW89_KCC][113] = 127,
[0][0][1][0][RTW89_FCC][115] = 127,
+ [0][0][1][0][RTW89_ETSI][115] = 127,
+ [0][0][1][0][RTW89_KCC][115] = 127,
[0][0][1][0][RTW89_FCC][117] = 127,
+ [0][0][1][0][RTW89_ETSI][117] = 127,
+ [0][0][1][0][RTW89_KCC][117] = 127,
[0][0][1][0][RTW89_FCC][119] = 127,
- [0][1][1][0][RTW89_FCC][0] = 60,
- [0][1][1][0][RTW89_FCC][2] = 60,
- [0][1][1][0][RTW89_FCC][4] = 60,
- [0][1][1][0][RTW89_FCC][6] = 60,
- [0][1][1][0][RTW89_FCC][8] = 60,
- [0][1][1][0][RTW89_FCC][10] = 60,
- [0][1][1][0][RTW89_FCC][12] = 60,
- [0][1][1][0][RTW89_FCC][14] = 60,
- [0][1][1][0][RTW89_FCC][15] = 60,
- [0][1][1][0][RTW89_FCC][17] = 60,
- [0][1][1][0][RTW89_FCC][19] = 60,
- [0][1][1][0][RTW89_FCC][21] = 60,
- [0][1][1][0][RTW89_FCC][23] = 60,
- [0][1][1][0][RTW89_FCC][25] = 60,
- [0][1][1][0][RTW89_FCC][27] = 60,
- [0][1][1][0][RTW89_FCC][29] = 60,
- [0][1][1][0][RTW89_FCC][30] = 60,
- [0][1][1][0][RTW89_FCC][32] = 60,
- [0][1][1][0][RTW89_FCC][34] = 60,
- [0][1][1][0][RTW89_FCC][36] = 60,
- [0][1][1][0][RTW89_FCC][38] = 60,
- [0][1][1][0][RTW89_FCC][40] = 60,
- [0][1][1][0][RTW89_FCC][42] = 60,
- [0][1][1][0][RTW89_FCC][44] = 60,
- [0][1][1][0][RTW89_FCC][45] = 60,
- [0][1][1][0][RTW89_FCC][47] = 60,
- [0][1][1][0][RTW89_FCC][49] = 60,
- [0][1][1][0][RTW89_FCC][51] = 60,
- [0][1][1][0][RTW89_FCC][53] = 60,
- [0][1][1][0][RTW89_FCC][55] = 60,
- [0][1][1][0][RTW89_FCC][57] = 60,
- [0][1][1][0][RTW89_FCC][59] = 60,
- [0][1][1][0][RTW89_FCC][60] = 60,
- [0][1][1][0][RTW89_FCC][62] = 60,
- [0][1][1][0][RTW89_FCC][64] = 60,
- [0][1][1][0][RTW89_FCC][66] = 60,
- [0][1][1][0][RTW89_FCC][68] = 60,
- [0][1][1][0][RTW89_FCC][70] = 60,
- [0][1][1][0][RTW89_FCC][72] = 60,
- [0][1][1][0][RTW89_FCC][74] = 60,
- [0][1][1][0][RTW89_FCC][75] = 60,
- [0][1][1][0][RTW89_FCC][77] = 60,
- [0][1][1][0][RTW89_FCC][79] = 60,
- [0][1][1][0][RTW89_FCC][81] = 60,
- [0][1][1][0][RTW89_FCC][83] = 60,
- [0][1][1][0][RTW89_FCC][85] = 60,
- [0][1][1][0][RTW89_FCC][87] = 60,
- [0][1][1][0][RTW89_FCC][89] = 60,
- [0][1][1][0][RTW89_FCC][90] = 60,
- [0][1][1][0][RTW89_FCC][92] = 60,
- [0][1][1][0][RTW89_FCC][94] = 60,
- [0][1][1][0][RTW89_FCC][96] = 60,
- [0][1][1][0][RTW89_FCC][98] = 60,
- [0][1][1][0][RTW89_FCC][100] = 60,
- [0][1][1][0][RTW89_FCC][102] = 60,
- [0][1][1][0][RTW89_FCC][104] = 60,
- [0][1][1][0][RTW89_FCC][105] = 60,
- [0][1][1][0][RTW89_FCC][107] = 60,
- [0][1][1][0][RTW89_FCC][109] = 60,
+ [0][0][1][0][RTW89_ETSI][119] = 127,
+ [0][0][1][0][RTW89_KCC][119] = 127,
+ [0][1][1][0][RTW89_FCC][0] = -2,
+ [0][1][1][0][RTW89_ETSI][0] = 54,
+ [0][1][1][0][RTW89_KCC][0] = 12,
+ [0][1][1][0][RTW89_FCC][2] = -4,
+ [0][1][1][0][RTW89_ETSI][2] = 54,
+ [0][1][1][0][RTW89_KCC][2] = 12,
+ [0][1][1][0][RTW89_FCC][4] = -4,
+ [0][1][1][0][RTW89_ETSI][4] = 54,
+ [0][1][1][0][RTW89_KCC][4] = 12,
+ [0][1][1][0][RTW89_FCC][6] = -4,
+ [0][1][1][0][RTW89_ETSI][6] = 54,
+ [0][1][1][0][RTW89_KCC][6] = 12,
+ [0][1][1][0][RTW89_FCC][8] = -4,
+ [0][1][1][0][RTW89_ETSI][8] = 54,
+ [0][1][1][0][RTW89_KCC][8] = 12,
+ [0][1][1][0][RTW89_FCC][10] = -4,
+ [0][1][1][0][RTW89_ETSI][10] = 54,
+ [0][1][1][0][RTW89_KCC][10] = 12,
+ [0][1][1][0][RTW89_FCC][12] = -4,
+ [0][1][1][0][RTW89_ETSI][12] = 54,
+ [0][1][1][0][RTW89_KCC][12] = 12,
+ [0][1][1][0][RTW89_FCC][14] = -4,
+ [0][1][1][0][RTW89_ETSI][14] = 54,
+ [0][1][1][0][RTW89_KCC][14] = 12,
+ [0][1][1][0][RTW89_FCC][15] = -4,
+ [0][1][1][0][RTW89_ETSI][15] = 54,
+ [0][1][1][0][RTW89_KCC][15] = 12,
+ [0][1][1][0][RTW89_FCC][17] = -4,
+ [0][1][1][0][RTW89_ETSI][17] = 54,
+ [0][1][1][0][RTW89_KCC][17] = 12,
+ [0][1][1][0][RTW89_FCC][19] = -4,
+ [0][1][1][0][RTW89_ETSI][19] = 54,
+ [0][1][1][0][RTW89_KCC][19] = 12,
+ [0][1][1][0][RTW89_FCC][21] = -4,
+ [0][1][1][0][RTW89_ETSI][21] = 54,
+ [0][1][1][0][RTW89_KCC][21] = 12,
+ [0][1][1][0][RTW89_FCC][23] = -4,
+ [0][1][1][0][RTW89_ETSI][23] = 54,
+ [0][1][1][0][RTW89_KCC][23] = 12,
+ [0][1][1][0][RTW89_FCC][25] = -4,
+ [0][1][1][0][RTW89_ETSI][25] = 54,
+ [0][1][1][0][RTW89_KCC][25] = 12,
+ [0][1][1][0][RTW89_FCC][27] = -4,
+ [0][1][1][0][RTW89_ETSI][27] = 54,
+ [0][1][1][0][RTW89_KCC][27] = 12,
+ [0][1][1][0][RTW89_FCC][29] = -4,
+ [0][1][1][0][RTW89_ETSI][29] = 54,
+ [0][1][1][0][RTW89_KCC][29] = 12,
+ [0][1][1][0][RTW89_FCC][30] = -4,
+ [0][1][1][0][RTW89_ETSI][30] = 54,
+ [0][1][1][0][RTW89_KCC][30] = 12,
+ [0][1][1][0][RTW89_FCC][32] = -4,
+ [0][1][1][0][RTW89_ETSI][32] = 54,
+ [0][1][1][0][RTW89_KCC][32] = 12,
+ [0][1][1][0][RTW89_FCC][34] = -4,
+ [0][1][1][0][RTW89_ETSI][34] = 54,
+ [0][1][1][0][RTW89_KCC][34] = 12,
+ [0][1][1][0][RTW89_FCC][36] = -4,
+ [0][1][1][0][RTW89_ETSI][36] = 54,
+ [0][1][1][0][RTW89_KCC][36] = 12,
+ [0][1][1][0][RTW89_FCC][38] = -4,
+ [0][1][1][0][RTW89_ETSI][38] = 54,
+ [0][1][1][0][RTW89_KCC][38] = 12,
+ [0][1][1][0][RTW89_FCC][40] = -4,
+ [0][1][1][0][RTW89_ETSI][40] = 54,
+ [0][1][1][0][RTW89_KCC][40] = 12,
+ [0][1][1][0][RTW89_FCC][42] = -4,
+ [0][1][1][0][RTW89_ETSI][42] = 54,
+ [0][1][1][0][RTW89_KCC][42] = 12,
+ [0][1][1][0][RTW89_FCC][44] = -2,
+ [0][1][1][0][RTW89_ETSI][44] = 54,
+ [0][1][1][0][RTW89_KCC][44] = 12,
+ [0][1][1][0][RTW89_FCC][45] = -2,
+ [0][1][1][0][RTW89_ETSI][45] = 127,
+ [0][1][1][0][RTW89_KCC][45] = 12,
+ [0][1][1][0][RTW89_FCC][47] = -2,
+ [0][1][1][0][RTW89_ETSI][47] = 127,
+ [0][1][1][0][RTW89_KCC][47] = 12,
+ [0][1][1][0][RTW89_FCC][49] = -2,
+ [0][1][1][0][RTW89_ETSI][49] = 127,
+ [0][1][1][0][RTW89_KCC][49] = 12,
+ [0][1][1][0][RTW89_FCC][51] = -2,
+ [0][1][1][0][RTW89_ETSI][51] = 127,
+ [0][1][1][0][RTW89_KCC][51] = 12,
+ [0][1][1][0][RTW89_FCC][53] = -2,
+ [0][1][1][0][RTW89_ETSI][53] = 127,
+ [0][1][1][0][RTW89_KCC][53] = 12,
+ [0][1][1][0][RTW89_FCC][55] = -2,
+ [0][1][1][0][RTW89_ETSI][55] = 127,
+ [0][1][1][0][RTW89_KCC][55] = 12,
+ [0][1][1][0][RTW89_FCC][57] = -2,
+ [0][1][1][0][RTW89_ETSI][57] = 127,
+ [0][1][1][0][RTW89_KCC][57] = 12,
+ [0][1][1][0][RTW89_FCC][59] = -2,
+ [0][1][1][0][RTW89_ETSI][59] = 127,
+ [0][1][1][0][RTW89_KCC][59] = 12,
+ [0][1][1][0][RTW89_FCC][60] = -2,
+ [0][1][1][0][RTW89_ETSI][60] = 127,
+ [0][1][1][0][RTW89_KCC][60] = 12,
+ [0][1][1][0][RTW89_FCC][62] = -2,
+ [0][1][1][0][RTW89_ETSI][62] = 127,
+ [0][1][1][0][RTW89_KCC][62] = 12,
+ [0][1][1][0][RTW89_FCC][64] = -2,
+ [0][1][1][0][RTW89_ETSI][64] = 127,
+ [0][1][1][0][RTW89_KCC][64] = 12,
+ [0][1][1][0][RTW89_FCC][66] = -2,
+ [0][1][1][0][RTW89_ETSI][66] = 127,
+ [0][1][1][0][RTW89_KCC][66] = 12,
+ [0][1][1][0][RTW89_FCC][68] = -2,
+ [0][1][1][0][RTW89_ETSI][68] = 127,
+ [0][1][1][0][RTW89_KCC][68] = 12,
+ [0][1][1][0][RTW89_FCC][70] = -2,
+ [0][1][1][0][RTW89_ETSI][70] = 127,
+ [0][1][1][0][RTW89_KCC][70] = 12,
+ [0][1][1][0][RTW89_FCC][72] = -2,
+ [0][1][1][0][RTW89_ETSI][72] = 127,
+ [0][1][1][0][RTW89_KCC][72] = 12,
+ [0][1][1][0][RTW89_FCC][74] = -2,
+ [0][1][1][0][RTW89_ETSI][74] = 127,
+ [0][1][1][0][RTW89_KCC][74] = 12,
+ [0][1][1][0][RTW89_FCC][75] = -2,
+ [0][1][1][0][RTW89_ETSI][75] = 127,
+ [0][1][1][0][RTW89_KCC][75] = 12,
+ [0][1][1][0][RTW89_FCC][77] = -2,
+ [0][1][1][0][RTW89_ETSI][77] = 127,
+ [0][1][1][0][RTW89_KCC][77] = 12,
+ [0][1][1][0][RTW89_FCC][79] = -2,
+ [0][1][1][0][RTW89_ETSI][79] = 127,
+ [0][1][1][0][RTW89_KCC][79] = 12,
+ [0][1][1][0][RTW89_FCC][81] = -2,
+ [0][1][1][0][RTW89_ETSI][81] = 127,
+ [0][1][1][0][RTW89_KCC][81] = 12,
+ [0][1][1][0][RTW89_FCC][83] = -2,
+ [0][1][1][0][RTW89_ETSI][83] = 127,
+ [0][1][1][0][RTW89_KCC][83] = 20,
+ [0][1][1][0][RTW89_FCC][85] = -2,
+ [0][1][1][0][RTW89_ETSI][85] = 127,
+ [0][1][1][0][RTW89_KCC][85] = 20,
+ [0][1][1][0][RTW89_FCC][87] = -2,
+ [0][1][1][0][RTW89_ETSI][87] = 127,
+ [0][1][1][0][RTW89_KCC][87] = 20,
+ [0][1][1][0][RTW89_FCC][89] = -2,
+ [0][1][1][0][RTW89_ETSI][89] = 127,
+ [0][1][1][0][RTW89_KCC][89] = 20,
+ [0][1][1][0][RTW89_FCC][90] = -2,
+ [0][1][1][0][RTW89_ETSI][90] = 127,
+ [0][1][1][0][RTW89_KCC][90] = 20,
+ [0][1][1][0][RTW89_FCC][92] = -2,
+ [0][1][1][0][RTW89_ETSI][92] = 127,
+ [0][1][1][0][RTW89_KCC][92] = 20,
+ [0][1][1][0][RTW89_FCC][94] = -2,
+ [0][1][1][0][RTW89_ETSI][94] = 127,
+ [0][1][1][0][RTW89_KCC][94] = 20,
+ [0][1][1][0][RTW89_FCC][96] = -2,
+ [0][1][1][0][RTW89_ETSI][96] = 127,
+ [0][1][1][0][RTW89_KCC][96] = 20,
+ [0][1][1][0][RTW89_FCC][98] = -2,
+ [0][1][1][0][RTW89_ETSI][98] = 127,
+ [0][1][1][0][RTW89_KCC][98] = 20,
+ [0][1][1][0][RTW89_FCC][100] = -2,
+ [0][1][1][0][RTW89_ETSI][100] = 127,
+ [0][1][1][0][RTW89_KCC][100] = 20,
+ [0][1][1][0][RTW89_FCC][102] = -2,
+ [0][1][1][0][RTW89_ETSI][102] = 127,
+ [0][1][1][0][RTW89_KCC][102] = 20,
+ [0][1][1][0][RTW89_FCC][104] = -2,
+ [0][1][1][0][RTW89_ETSI][104] = 127,
+ [0][1][1][0][RTW89_KCC][104] = 20,
+ [0][1][1][0][RTW89_FCC][105] = -2,
+ [0][1][1][0][RTW89_ETSI][105] = 127,
+ [0][1][1][0][RTW89_KCC][105] = 20,
+ [0][1][1][0][RTW89_FCC][107] = 0,
+ [0][1][1][0][RTW89_ETSI][107] = 127,
+ [0][1][1][0][RTW89_KCC][107] = 20,
+ [0][1][1][0][RTW89_FCC][109] = 0,
+ [0][1][1][0][RTW89_ETSI][109] = 127,
+ [0][1][1][0][RTW89_KCC][109] = 20,
[0][1][1][0][RTW89_FCC][111] = 127,
+ [0][1][1][0][RTW89_ETSI][111] = 127,
+ [0][1][1][0][RTW89_KCC][111] = 127,
[0][1][1][0][RTW89_FCC][113] = 127,
+ [0][1][1][0][RTW89_ETSI][113] = 127,
+ [0][1][1][0][RTW89_KCC][113] = 127,
[0][1][1][0][RTW89_FCC][115] = 127,
+ [0][1][1][0][RTW89_ETSI][115] = 127,
+ [0][1][1][0][RTW89_KCC][115] = 127,
[0][1][1][0][RTW89_FCC][117] = 127,
+ [0][1][1][0][RTW89_ETSI][117] = 127,
+ [0][1][1][0][RTW89_KCC][117] = 127,
[0][1][1][0][RTW89_FCC][119] = 127,
- [0][0][2][0][RTW89_FCC][0] = 72,
- [0][0][2][0][RTW89_FCC][2] = 72,
- [0][0][2][0][RTW89_FCC][4] = 72,
- [0][0][2][0][RTW89_FCC][6] = 72,
- [0][0][2][0][RTW89_FCC][8] = 72,
- [0][0][2][0][RTW89_FCC][10] = 72,
- [0][0][2][0][RTW89_FCC][12] = 72,
- [0][0][2][0][RTW89_FCC][14] = 72,
- [0][0][2][0][RTW89_FCC][15] = 72,
- [0][0][2][0][RTW89_FCC][17] = 72,
- [0][0][2][0][RTW89_FCC][19] = 72,
- [0][0][2][0][RTW89_FCC][21] = 72,
- [0][0][2][0][RTW89_FCC][23] = 72,
- [0][0][2][0][RTW89_FCC][25] = 72,
- [0][0][2][0][RTW89_FCC][27] = 72,
- [0][0][2][0][RTW89_FCC][29] = 72,
- [0][0][2][0][RTW89_FCC][30] = 72,
- [0][0][2][0][RTW89_FCC][32] = 72,
- [0][0][2][0][RTW89_FCC][34] = 72,
- [0][0][2][0][RTW89_FCC][36] = 72,
- [0][0][2][0][RTW89_FCC][38] = 72,
- [0][0][2][0][RTW89_FCC][40] = 72,
- [0][0][2][0][RTW89_FCC][42] = 72,
- [0][0][2][0][RTW89_FCC][44] = 72,
- [0][0][2][0][RTW89_FCC][45] = 72,
- [0][0][2][0][RTW89_FCC][47] = 72,
- [0][0][2][0][RTW89_FCC][49] = 72,
- [0][0][2][0][RTW89_FCC][51] = 72,
- [0][0][2][0][RTW89_FCC][53] = 72,
- [0][0][2][0][RTW89_FCC][55] = 72,
- [0][0][2][0][RTW89_FCC][57] = 72,
- [0][0][2][0][RTW89_FCC][59] = 72,
- [0][0][2][0][RTW89_FCC][60] = 72,
- [0][0][2][0][RTW89_FCC][62] = 72,
- [0][0][2][0][RTW89_FCC][64] = 72,
- [0][0][2][0][RTW89_FCC][66] = 72,
- [0][0][2][0][RTW89_FCC][68] = 72,
- [0][0][2][0][RTW89_FCC][70] = 72,
- [0][0][2][0][RTW89_FCC][72] = 72,
- [0][0][2][0][RTW89_FCC][74] = 72,
- [0][0][2][0][RTW89_FCC][75] = 72,
- [0][0][2][0][RTW89_FCC][77] = 72,
- [0][0][2][0][RTW89_FCC][79] = 72,
- [0][0][2][0][RTW89_FCC][81] = 72,
- [0][0][2][0][RTW89_FCC][83] = 72,
- [0][0][2][0][RTW89_FCC][85] = 72,
- [0][0][2][0][RTW89_FCC][87] = 72,
- [0][0][2][0][RTW89_FCC][89] = 72,
- [0][0][2][0][RTW89_FCC][90] = 72,
- [0][0][2][0][RTW89_FCC][92] = 72,
- [0][0][2][0][RTW89_FCC][94] = 72,
- [0][0][2][0][RTW89_FCC][96] = 72,
- [0][0][2][0][RTW89_FCC][98] = 72,
- [0][0][2][0][RTW89_FCC][100] = 72,
- [0][0][2][0][RTW89_FCC][102] = 72,
- [0][0][2][0][RTW89_FCC][104] = 72,
- [0][0][2][0][RTW89_FCC][105] = 72,
- [0][0][2][0][RTW89_FCC][107] = 72,
- [0][0][2][0][RTW89_FCC][109] = 72,
+ [0][1][1][0][RTW89_ETSI][119] = 127,
+ [0][1][1][0][RTW89_KCC][119] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 24,
+ [0][0][2][0][RTW89_ETSI][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 24,
+ [0][0][2][0][RTW89_FCC][2] = 22,
+ [0][0][2][0][RTW89_ETSI][2] = 66,
+ [0][0][2][0][RTW89_KCC][2] = 24,
+ [0][0][2][0][RTW89_FCC][4] = 22,
+ [0][0][2][0][RTW89_ETSI][4] = 66,
+ [0][0][2][0][RTW89_KCC][4] = 24,
+ [0][0][2][0][RTW89_FCC][6] = 22,
+ [0][0][2][0][RTW89_ETSI][6] = 66,
+ [0][0][2][0][RTW89_KCC][6] = 24,
+ [0][0][2][0][RTW89_FCC][8] = 22,
+ [0][0][2][0][RTW89_ETSI][8] = 66,
+ [0][0][2][0][RTW89_KCC][8] = 24,
+ [0][0][2][0][RTW89_FCC][10] = 22,
+ [0][0][2][0][RTW89_ETSI][10] = 66,
+ [0][0][2][0][RTW89_KCC][10] = 24,
+ [0][0][2][0][RTW89_FCC][12] = 22,
+ [0][0][2][0][RTW89_ETSI][12] = 66,
+ [0][0][2][0][RTW89_KCC][12] = 24,
+ [0][0][2][0][RTW89_FCC][14] = 22,
+ [0][0][2][0][RTW89_ETSI][14] = 66,
+ [0][0][2][0][RTW89_KCC][14] = 24,
+ [0][0][2][0][RTW89_FCC][15] = 22,
+ [0][0][2][0][RTW89_ETSI][15] = 66,
+ [0][0][2][0][RTW89_KCC][15] = 24,
+ [0][0][2][0][RTW89_FCC][17] = 22,
+ [0][0][2][0][RTW89_ETSI][17] = 66,
+ [0][0][2][0][RTW89_KCC][17] = 24,
+ [0][0][2][0][RTW89_FCC][19] = 22,
+ [0][0][2][0][RTW89_ETSI][19] = 66,
+ [0][0][2][0][RTW89_KCC][19] = 24,
+ [0][0][2][0][RTW89_FCC][21] = 22,
+ [0][0][2][0][RTW89_ETSI][21] = 66,
+ [0][0][2][0][RTW89_KCC][21] = 24,
+ [0][0][2][0][RTW89_FCC][23] = 22,
+ [0][0][2][0][RTW89_ETSI][23] = 66,
+ [0][0][2][0][RTW89_KCC][23] = 24,
+ [0][0][2][0][RTW89_FCC][25] = 22,
+ [0][0][2][0][RTW89_ETSI][25] = 66,
+ [0][0][2][0][RTW89_KCC][25] = 24,
+ [0][0][2][0][RTW89_FCC][27] = 22,
+ [0][0][2][0][RTW89_ETSI][27] = 66,
+ [0][0][2][0][RTW89_KCC][27] = 24,
+ [0][0][2][0][RTW89_FCC][29] = 22,
+ [0][0][2][0][RTW89_ETSI][29] = 66,
+ [0][0][2][0][RTW89_KCC][29] = 24,
+ [0][0][2][0][RTW89_FCC][30] = 22,
+ [0][0][2][0][RTW89_ETSI][30] = 66,
+ [0][0][2][0][RTW89_KCC][30] = 24,
+ [0][0][2][0][RTW89_FCC][32] = 22,
+ [0][0][2][0][RTW89_ETSI][32] = 66,
+ [0][0][2][0][RTW89_KCC][32] = 24,
+ [0][0][2][0][RTW89_FCC][34] = 22,
+ [0][0][2][0][RTW89_ETSI][34] = 66,
+ [0][0][2][0][RTW89_KCC][34] = 24,
+ [0][0][2][0][RTW89_FCC][36] = 22,
+ [0][0][2][0][RTW89_ETSI][36] = 66,
+ [0][0][2][0][RTW89_KCC][36] = 24,
+ [0][0][2][0][RTW89_FCC][38] = 22,
+ [0][0][2][0][RTW89_ETSI][38] = 66,
+ [0][0][2][0][RTW89_KCC][38] = 24,
+ [0][0][2][0][RTW89_FCC][40] = 22,
+ [0][0][2][0][RTW89_ETSI][40] = 66,
+ [0][0][2][0][RTW89_KCC][40] = 24,
+ [0][0][2][0][RTW89_FCC][42] = 22,
+ [0][0][2][0][RTW89_ETSI][42] = 66,
+ [0][0][2][0][RTW89_KCC][42] = 24,
+ [0][0][2][0][RTW89_FCC][44] = 22,
+ [0][0][2][0][RTW89_ETSI][44] = 66,
+ [0][0][2][0][RTW89_KCC][44] = 24,
+ [0][0][2][0][RTW89_FCC][45] = 22,
+ [0][0][2][0][RTW89_ETSI][45] = 127,
+ [0][0][2][0][RTW89_KCC][45] = 24,
+ [0][0][2][0][RTW89_FCC][47] = 22,
+ [0][0][2][0][RTW89_ETSI][47] = 127,
+ [0][0][2][0][RTW89_KCC][47] = 24,
+ [0][0][2][0][RTW89_FCC][49] = 24,
+ [0][0][2][0][RTW89_ETSI][49] = 127,
+ [0][0][2][0][RTW89_KCC][49] = 24,
+ [0][0][2][0][RTW89_FCC][51] = 22,
+ [0][0][2][0][RTW89_ETSI][51] = 127,
+ [0][0][2][0][RTW89_KCC][51] = 24,
+ [0][0][2][0][RTW89_FCC][53] = 22,
+ [0][0][2][0][RTW89_ETSI][53] = 127,
+ [0][0][2][0][RTW89_KCC][53] = 24,
+ [0][0][2][0][RTW89_FCC][55] = 22,
+ [0][0][2][0][RTW89_ETSI][55] = 127,
+ [0][0][2][0][RTW89_KCC][55] = 26,
+ [0][0][2][0][RTW89_FCC][57] = 22,
+ [0][0][2][0][RTW89_ETSI][57] = 127,
+ [0][0][2][0][RTW89_KCC][57] = 26,
+ [0][0][2][0][RTW89_FCC][59] = 22,
+ [0][0][2][0][RTW89_ETSI][59] = 127,
+ [0][0][2][0][RTW89_KCC][59] = 26,
+ [0][0][2][0][RTW89_FCC][60] = 22,
+ [0][0][2][0][RTW89_ETSI][60] = 127,
+ [0][0][2][0][RTW89_KCC][60] = 26,
+ [0][0][2][0][RTW89_FCC][62] = 22,
+ [0][0][2][0][RTW89_ETSI][62] = 127,
+ [0][0][2][0][RTW89_KCC][62] = 26,
+ [0][0][2][0][RTW89_FCC][64] = 22,
+ [0][0][2][0][RTW89_ETSI][64] = 127,
+ [0][0][2][0][RTW89_KCC][64] = 26,
+ [0][0][2][0][RTW89_FCC][66] = 22,
+ [0][0][2][0][RTW89_ETSI][66] = 127,
+ [0][0][2][0][RTW89_KCC][66] = 26,
+ [0][0][2][0][RTW89_FCC][68] = 22,
+ [0][0][2][0][RTW89_ETSI][68] = 127,
+ [0][0][2][0][RTW89_KCC][68] = 26,
+ [0][0][2][0][RTW89_FCC][70] = 24,
+ [0][0][2][0][RTW89_ETSI][70] = 127,
+ [0][0][2][0][RTW89_KCC][70] = 26,
+ [0][0][2][0][RTW89_FCC][72] = 22,
+ [0][0][2][0][RTW89_ETSI][72] = 127,
+ [0][0][2][0][RTW89_KCC][72] = 26,
+ [0][0][2][0][RTW89_FCC][74] = 22,
+ [0][0][2][0][RTW89_ETSI][74] = 127,
+ [0][0][2][0][RTW89_KCC][74] = 26,
+ [0][0][2][0][RTW89_FCC][75] = 22,
+ [0][0][2][0][RTW89_ETSI][75] = 127,
+ [0][0][2][0][RTW89_KCC][75] = 26,
+ [0][0][2][0][RTW89_FCC][77] = 22,
+ [0][0][2][0][RTW89_ETSI][77] = 127,
+ [0][0][2][0][RTW89_KCC][77] = 26,
+ [0][0][2][0][RTW89_FCC][79] = 22,
+ [0][0][2][0][RTW89_ETSI][79] = 127,
+ [0][0][2][0][RTW89_KCC][79] = 26,
+ [0][0][2][0][RTW89_FCC][81] = 22,
+ [0][0][2][0][RTW89_ETSI][81] = 127,
+ [0][0][2][0][RTW89_KCC][81] = 26,
+ [0][0][2][0][RTW89_FCC][83] = 22,
+ [0][0][2][0][RTW89_ETSI][83] = 127,
+ [0][0][2][0][RTW89_KCC][83] = 32,
+ [0][0][2][0][RTW89_FCC][85] = 22,
+ [0][0][2][0][RTW89_ETSI][85] = 127,
+ [0][0][2][0][RTW89_KCC][85] = 32,
+ [0][0][2][0][RTW89_FCC][87] = 22,
+ [0][0][2][0][RTW89_ETSI][87] = 127,
+ [0][0][2][0][RTW89_KCC][87] = 32,
+ [0][0][2][0][RTW89_FCC][89] = 22,
+ [0][0][2][0][RTW89_ETSI][89] = 127,
+ [0][0][2][0][RTW89_KCC][89] = 32,
+ [0][0][2][0][RTW89_FCC][90] = 22,
+ [0][0][2][0][RTW89_ETSI][90] = 127,
+ [0][0][2][0][RTW89_KCC][90] = 32,
+ [0][0][2][0][RTW89_FCC][92] = 22,
+ [0][0][2][0][RTW89_ETSI][92] = 127,
+ [0][0][2][0][RTW89_KCC][92] = 32,
+ [0][0][2][0][RTW89_FCC][94] = 22,
+ [0][0][2][0][RTW89_ETSI][94] = 127,
+ [0][0][2][0][RTW89_KCC][94] = 32,
+ [0][0][2][0][RTW89_FCC][96] = 22,
+ [0][0][2][0][RTW89_ETSI][96] = 127,
+ [0][0][2][0][RTW89_KCC][96] = 32,
+ [0][0][2][0][RTW89_FCC][98] = 22,
+ [0][0][2][0][RTW89_ETSI][98] = 127,
+ [0][0][2][0][RTW89_KCC][98] = 32,
+ [0][0][2][0][RTW89_FCC][100] = 22,
+ [0][0][2][0][RTW89_ETSI][100] = 127,
+ [0][0][2][0][RTW89_KCC][100] = 32,
+ [0][0][2][0][RTW89_FCC][102] = 22,
+ [0][0][2][0][RTW89_ETSI][102] = 127,
+ [0][0][2][0][RTW89_KCC][102] = 32,
+ [0][0][2][0][RTW89_FCC][104] = 22,
+ [0][0][2][0][RTW89_ETSI][104] = 127,
+ [0][0][2][0][RTW89_KCC][104] = 32,
+ [0][0][2][0][RTW89_FCC][105] = 22,
+ [0][0][2][0][RTW89_ETSI][105] = 127,
+ [0][0][2][0][RTW89_KCC][105] = 32,
+ [0][0][2][0][RTW89_FCC][107] = 24,
+ [0][0][2][0][RTW89_ETSI][107] = 127,
+ [0][0][2][0][RTW89_KCC][107] = 32,
+ [0][0][2][0][RTW89_FCC][109] = 24,
+ [0][0][2][0][RTW89_ETSI][109] = 127,
+ [0][0][2][0][RTW89_KCC][109] = 32,
[0][0][2][0][RTW89_FCC][111] = 127,
+ [0][0][2][0][RTW89_ETSI][111] = 127,
+ [0][0][2][0][RTW89_KCC][111] = 127,
[0][0][2][0][RTW89_FCC][113] = 127,
+ [0][0][2][0][RTW89_ETSI][113] = 127,
+ [0][0][2][0][RTW89_KCC][113] = 127,
[0][0][2][0][RTW89_FCC][115] = 127,
+ [0][0][2][0][RTW89_ETSI][115] = 127,
+ [0][0][2][0][RTW89_KCC][115] = 127,
[0][0][2][0][RTW89_FCC][117] = 127,
+ [0][0][2][0][RTW89_ETSI][117] = 127,
+ [0][0][2][0][RTW89_KCC][117] = 127,
[0][0][2][0][RTW89_FCC][119] = 127,
- [0][1][2][0][RTW89_FCC][0] = 60,
- [0][1][2][0][RTW89_FCC][2] = 60,
- [0][1][2][0][RTW89_FCC][4] = 60,
- [0][1][2][0][RTW89_FCC][6] = 60,
- [0][1][2][0][RTW89_FCC][8] = 60,
- [0][1][2][0][RTW89_FCC][10] = 60,
- [0][1][2][0][RTW89_FCC][12] = 60,
- [0][1][2][0][RTW89_FCC][14] = 60,
- [0][1][2][0][RTW89_FCC][15] = 60,
- [0][1][2][0][RTW89_FCC][17] = 60,
- [0][1][2][0][RTW89_FCC][19] = 60,
- [0][1][2][0][RTW89_FCC][21] = 60,
- [0][1][2][0][RTW89_FCC][23] = 60,
- [0][1][2][0][RTW89_FCC][25] = 60,
- [0][1][2][0][RTW89_FCC][27] = 60,
- [0][1][2][0][RTW89_FCC][29] = 60,
- [0][1][2][0][RTW89_FCC][30] = 60,
- [0][1][2][0][RTW89_FCC][32] = 60,
- [0][1][2][0][RTW89_FCC][34] = 60,
- [0][1][2][0][RTW89_FCC][36] = 60,
- [0][1][2][0][RTW89_FCC][38] = 60,
- [0][1][2][0][RTW89_FCC][40] = 60,
- [0][1][2][0][RTW89_FCC][42] = 60,
- [0][1][2][0][RTW89_FCC][44] = 60,
- [0][1][2][0][RTW89_FCC][45] = 60,
- [0][1][2][0][RTW89_FCC][47] = 60,
- [0][1][2][0][RTW89_FCC][49] = 60,
- [0][1][2][0][RTW89_FCC][51] = 60,
- [0][1][2][0][RTW89_FCC][53] = 60,
- [0][1][2][0][RTW89_FCC][55] = 60,
- [0][1][2][0][RTW89_FCC][57] = 60,
- [0][1][2][0][RTW89_FCC][59] = 60,
- [0][1][2][0][RTW89_FCC][60] = 60,
- [0][1][2][0][RTW89_FCC][62] = 60,
- [0][1][2][0][RTW89_FCC][64] = 60,
- [0][1][2][0][RTW89_FCC][66] = 60,
- [0][1][2][0][RTW89_FCC][68] = 60,
- [0][1][2][0][RTW89_FCC][70] = 60,
- [0][1][2][0][RTW89_FCC][72] = 60,
- [0][1][2][0][RTW89_FCC][74] = 60,
- [0][1][2][0][RTW89_FCC][75] = 60,
- [0][1][2][0][RTW89_FCC][77] = 60,
- [0][1][2][0][RTW89_FCC][79] = 60,
- [0][1][2][0][RTW89_FCC][81] = 60,
- [0][1][2][0][RTW89_FCC][83] = 60,
- [0][1][2][0][RTW89_FCC][85] = 60,
- [0][1][2][0][RTW89_FCC][87] = 60,
- [0][1][2][0][RTW89_FCC][89] = 60,
- [0][1][2][0][RTW89_FCC][90] = 60,
- [0][1][2][0][RTW89_FCC][92] = 60,
- [0][1][2][0][RTW89_FCC][94] = 60,
- [0][1][2][0][RTW89_FCC][96] = 60,
- [0][1][2][0][RTW89_FCC][98] = 60,
- [0][1][2][0][RTW89_FCC][100] = 60,
- [0][1][2][0][RTW89_FCC][102] = 60,
- [0][1][2][0][RTW89_FCC][104] = 60,
- [0][1][2][0][RTW89_FCC][105] = 60,
- [0][1][2][0][RTW89_FCC][107] = 60,
- [0][1][2][0][RTW89_FCC][109] = 60,
+ [0][0][2][0][RTW89_ETSI][119] = 127,
+ [0][0][2][0][RTW89_KCC][119] = 127,
+ [0][1][2][0][RTW89_FCC][0] = -2,
+ [0][1][2][0][RTW89_ETSI][0] = 54,
+ [0][1][2][0][RTW89_KCC][0] = 12,
+ [0][1][2][0][RTW89_FCC][2] = -4,
+ [0][1][2][0][RTW89_ETSI][2] = 54,
+ [0][1][2][0][RTW89_KCC][2] = 12,
+ [0][1][2][0][RTW89_FCC][4] = -4,
+ [0][1][2][0][RTW89_ETSI][4] = 54,
+ [0][1][2][0][RTW89_KCC][4] = 12,
+ [0][1][2][0][RTW89_FCC][6] = -4,
+ [0][1][2][0][RTW89_ETSI][6] = 54,
+ [0][1][2][0][RTW89_KCC][6] = 12,
+ [0][1][2][0][RTW89_FCC][8] = -4,
+ [0][1][2][0][RTW89_ETSI][8] = 54,
+ [0][1][2][0][RTW89_KCC][8] = 12,
+ [0][1][2][0][RTW89_FCC][10] = -4,
+ [0][1][2][0][RTW89_ETSI][10] = 54,
+ [0][1][2][0][RTW89_KCC][10] = 12,
+ [0][1][2][0][RTW89_FCC][12] = -4,
+ [0][1][2][0][RTW89_ETSI][12] = 54,
+ [0][1][2][0][RTW89_KCC][12] = 12,
+ [0][1][2][0][RTW89_FCC][14] = -4,
+ [0][1][2][0][RTW89_ETSI][14] = 54,
+ [0][1][2][0][RTW89_KCC][14] = 12,
+ [0][1][2][0][RTW89_FCC][15] = -4,
+ [0][1][2][0][RTW89_ETSI][15] = 54,
+ [0][1][2][0][RTW89_KCC][15] = 12,
+ [0][1][2][0][RTW89_FCC][17] = -4,
+ [0][1][2][0][RTW89_ETSI][17] = 54,
+ [0][1][2][0][RTW89_KCC][17] = 12,
+ [0][1][2][0][RTW89_FCC][19] = -4,
+ [0][1][2][0][RTW89_ETSI][19] = 54,
+ [0][1][2][0][RTW89_KCC][19] = 12,
+ [0][1][2][0][RTW89_FCC][21] = -4,
+ [0][1][2][0][RTW89_ETSI][21] = 54,
+ [0][1][2][0][RTW89_KCC][21] = 12,
+ [0][1][2][0][RTW89_FCC][23] = -4,
+ [0][1][2][0][RTW89_ETSI][23] = 54,
+ [0][1][2][0][RTW89_KCC][23] = 12,
+ [0][1][2][0][RTW89_FCC][25] = -4,
+ [0][1][2][0][RTW89_ETSI][25] = 54,
+ [0][1][2][0][RTW89_KCC][25] = 12,
+ [0][1][2][0][RTW89_FCC][27] = -4,
+ [0][1][2][0][RTW89_ETSI][27] = 54,
+ [0][1][2][0][RTW89_KCC][27] = 12,
+ [0][1][2][0][RTW89_FCC][29] = -4,
+ [0][1][2][0][RTW89_ETSI][29] = 54,
+ [0][1][2][0][RTW89_KCC][29] = 12,
+ [0][1][2][0][RTW89_FCC][30] = -4,
+ [0][1][2][0][RTW89_ETSI][30] = 54,
+ [0][1][2][0][RTW89_KCC][30] = 12,
+ [0][1][2][0][RTW89_FCC][32] = -4,
+ [0][1][2][0][RTW89_ETSI][32] = 54,
+ [0][1][2][0][RTW89_KCC][32] = 12,
+ [0][1][2][0][RTW89_FCC][34] = -4,
+ [0][1][2][0][RTW89_ETSI][34] = 54,
+ [0][1][2][0][RTW89_KCC][34] = 12,
+ [0][1][2][0][RTW89_FCC][36] = -4,
+ [0][1][2][0][RTW89_ETSI][36] = 54,
+ [0][1][2][0][RTW89_KCC][36] = 12,
+ [0][1][2][0][RTW89_FCC][38] = -4,
+ [0][1][2][0][RTW89_ETSI][38] = 54,
+ [0][1][2][0][RTW89_KCC][38] = 12,
+ [0][1][2][0][RTW89_FCC][40] = -4,
+ [0][1][2][0][RTW89_ETSI][40] = 54,
+ [0][1][2][0][RTW89_KCC][40] = 12,
+ [0][1][2][0][RTW89_FCC][42] = -4,
+ [0][1][2][0][RTW89_ETSI][42] = 54,
+ [0][1][2][0][RTW89_KCC][42] = 12,
+ [0][1][2][0][RTW89_FCC][44] = -2,
+ [0][1][2][0][RTW89_ETSI][44] = 54,
+ [0][1][2][0][RTW89_KCC][44] = 12,
+ [0][1][2][0][RTW89_FCC][45] = -2,
+ [0][1][2][0][RTW89_ETSI][45] = 127,
+ [0][1][2][0][RTW89_KCC][45] = 12,
+ [0][1][2][0][RTW89_FCC][47] = -2,
+ [0][1][2][0][RTW89_ETSI][47] = 127,
+ [0][1][2][0][RTW89_KCC][47] = 12,
+ [0][1][2][0][RTW89_FCC][49] = -2,
+ [0][1][2][0][RTW89_ETSI][49] = 127,
+ [0][1][2][0][RTW89_KCC][49] = 12,
+ [0][1][2][0][RTW89_FCC][51] = -2,
+ [0][1][2][0][RTW89_ETSI][51] = 127,
+ [0][1][2][0][RTW89_KCC][51] = 12,
+ [0][1][2][0][RTW89_FCC][53] = -2,
+ [0][1][2][0][RTW89_ETSI][53] = 127,
+ [0][1][2][0][RTW89_KCC][53] = 12,
+ [0][1][2][0][RTW89_FCC][55] = -2,
+ [0][1][2][0][RTW89_ETSI][55] = 127,
+ [0][1][2][0][RTW89_KCC][55] = 12,
+ [0][1][2][0][RTW89_FCC][57] = -2,
+ [0][1][2][0][RTW89_ETSI][57] = 127,
+ [0][1][2][0][RTW89_KCC][57] = 12,
+ [0][1][2][0][RTW89_FCC][59] = -2,
+ [0][1][2][0][RTW89_ETSI][59] = 127,
+ [0][1][2][0][RTW89_KCC][59] = 12,
+ [0][1][2][0][RTW89_FCC][60] = -2,
+ [0][1][2][0][RTW89_ETSI][60] = 127,
+ [0][1][2][0][RTW89_KCC][60] = 12,
+ [0][1][2][0][RTW89_FCC][62] = -2,
+ [0][1][2][0][RTW89_ETSI][62] = 127,
+ [0][1][2][0][RTW89_KCC][62] = 12,
+ [0][1][2][0][RTW89_FCC][64] = -2,
+ [0][1][2][0][RTW89_ETSI][64] = 127,
+ [0][1][2][0][RTW89_KCC][64] = 12,
+ [0][1][2][0][RTW89_FCC][66] = -2,
+ [0][1][2][0][RTW89_ETSI][66] = 127,
+ [0][1][2][0][RTW89_KCC][66] = 12,
+ [0][1][2][0][RTW89_FCC][68] = -2,
+ [0][1][2][0][RTW89_ETSI][68] = 127,
+ [0][1][2][0][RTW89_KCC][68] = 12,
+ [0][1][2][0][RTW89_FCC][70] = -2,
+ [0][1][2][0][RTW89_ETSI][70] = 127,
+ [0][1][2][0][RTW89_KCC][70] = 12,
+ [0][1][2][0][RTW89_FCC][72] = -2,
+ [0][1][2][0][RTW89_ETSI][72] = 127,
+ [0][1][2][0][RTW89_KCC][72] = 12,
+ [0][1][2][0][RTW89_FCC][74] = -2,
+ [0][1][2][0][RTW89_ETSI][74] = 127,
+ [0][1][2][0][RTW89_KCC][74] = 12,
+ [0][1][2][0][RTW89_FCC][75] = -2,
+ [0][1][2][0][RTW89_ETSI][75] = 127,
+ [0][1][2][0][RTW89_KCC][75] = 12,
+ [0][1][2][0][RTW89_FCC][77] = -2,
+ [0][1][2][0][RTW89_ETSI][77] = 127,
+ [0][1][2][0][RTW89_KCC][77] = 12,
+ [0][1][2][0][RTW89_FCC][79] = -2,
+ [0][1][2][0][RTW89_ETSI][79] = 127,
+ [0][1][2][0][RTW89_KCC][79] = 12,
+ [0][1][2][0][RTW89_FCC][81] = -2,
+ [0][1][2][0][RTW89_ETSI][81] = 127,
+ [0][1][2][0][RTW89_KCC][81] = 12,
+ [0][1][2][0][RTW89_FCC][83] = -2,
+ [0][1][2][0][RTW89_ETSI][83] = 127,
+ [0][1][2][0][RTW89_KCC][83] = 20,
+ [0][1][2][0][RTW89_FCC][85] = -2,
+ [0][1][2][0][RTW89_ETSI][85] = 127,
+ [0][1][2][0][RTW89_KCC][85] = 20,
+ [0][1][2][0][RTW89_FCC][87] = -2,
+ [0][1][2][0][RTW89_ETSI][87] = 127,
+ [0][1][2][0][RTW89_KCC][87] = 20,
+ [0][1][2][0][RTW89_FCC][89] = -2,
+ [0][1][2][0][RTW89_ETSI][89] = 127,
+ [0][1][2][0][RTW89_KCC][89] = 20,
+ [0][1][2][0][RTW89_FCC][90] = -2,
+ [0][1][2][0][RTW89_ETSI][90] = 127,
+ [0][1][2][0][RTW89_KCC][90] = 20,
+ [0][1][2][0][RTW89_FCC][92] = -2,
+ [0][1][2][0][RTW89_ETSI][92] = 127,
+ [0][1][2][0][RTW89_KCC][92] = 20,
+ [0][1][2][0][RTW89_FCC][94] = -2,
+ [0][1][2][0][RTW89_ETSI][94] = 127,
+ [0][1][2][0][RTW89_KCC][94] = 20,
+ [0][1][2][0][RTW89_FCC][96] = -2,
+ [0][1][2][0][RTW89_ETSI][96] = 127,
+ [0][1][2][0][RTW89_KCC][96] = 20,
+ [0][1][2][0][RTW89_FCC][98] = -2,
+ [0][1][2][0][RTW89_ETSI][98] = 127,
+ [0][1][2][0][RTW89_KCC][98] = 20,
+ [0][1][2][0][RTW89_FCC][100] = -2,
+ [0][1][2][0][RTW89_ETSI][100] = 127,
+ [0][1][2][0][RTW89_KCC][100] = 20,
+ [0][1][2][0][RTW89_FCC][102] = -2,
+ [0][1][2][0][RTW89_ETSI][102] = 127,
+ [0][1][2][0][RTW89_KCC][102] = 20,
+ [0][1][2][0][RTW89_FCC][104] = -2,
+ [0][1][2][0][RTW89_ETSI][104] = 127,
+ [0][1][2][0][RTW89_KCC][104] = 20,
+ [0][1][2][0][RTW89_FCC][105] = -2,
+ [0][1][2][0][RTW89_ETSI][105] = 127,
+ [0][1][2][0][RTW89_KCC][105] = 20,
+ [0][1][2][0][RTW89_FCC][107] = 0,
+ [0][1][2][0][RTW89_ETSI][107] = 127,
+ [0][1][2][0][RTW89_KCC][107] = 20,
+ [0][1][2][0][RTW89_FCC][109] = 0,
+ [0][1][2][0][RTW89_ETSI][109] = 127,
+ [0][1][2][0][RTW89_KCC][109] = 20,
[0][1][2][0][RTW89_FCC][111] = 127,
+ [0][1][2][0][RTW89_ETSI][111] = 127,
+ [0][1][2][0][RTW89_KCC][111] = 127,
[0][1][2][0][RTW89_FCC][113] = 127,
+ [0][1][2][0][RTW89_ETSI][113] = 127,
+ [0][1][2][0][RTW89_KCC][113] = 127,
[0][1][2][0][RTW89_FCC][115] = 127,
+ [0][1][2][0][RTW89_ETSI][115] = 127,
+ [0][1][2][0][RTW89_KCC][115] = 127,
[0][1][2][0][RTW89_FCC][117] = 127,
+ [0][1][2][0][RTW89_ETSI][117] = 127,
+ [0][1][2][0][RTW89_KCC][117] = 127,
[0][1][2][0][RTW89_FCC][119] = 127,
- [0][1][2][1][RTW89_FCC][0] = 48,
- [0][1][2][1][RTW89_FCC][2] = 48,
- [0][1][2][1][RTW89_FCC][4] = 48,
- [0][1][2][1][RTW89_FCC][6] = 48,
- [0][1][2][1][RTW89_FCC][8] = 48,
- [0][1][2][1][RTW89_FCC][10] = 48,
- [0][1][2][1][RTW89_FCC][12] = 48,
- [0][1][2][1][RTW89_FCC][14] = 48,
- [0][1][2][1][RTW89_FCC][15] = 48,
- [0][1][2][1][RTW89_FCC][17] = 48,
- [0][1][2][1][RTW89_FCC][19] = 48,
- [0][1][2][1][RTW89_FCC][21] = 48,
- [0][1][2][1][RTW89_FCC][23] = 48,
- [0][1][2][1][RTW89_FCC][25] = 48,
- [0][1][2][1][RTW89_FCC][27] = 48,
- [0][1][2][1][RTW89_FCC][29] = 48,
- [0][1][2][1][RTW89_FCC][30] = 48,
- [0][1][2][1][RTW89_FCC][32] = 48,
- [0][1][2][1][RTW89_FCC][34] = 48,
- [0][1][2][1][RTW89_FCC][36] = 48,
- [0][1][2][1][RTW89_FCC][38] = 48,
- [0][1][2][1][RTW89_FCC][40] = 48,
- [0][1][2][1][RTW89_FCC][42] = 48,
- [0][1][2][1][RTW89_FCC][44] = 48,
- [0][1][2][1][RTW89_FCC][45] = 48,
- [0][1][2][1][RTW89_FCC][47] = 48,
- [0][1][2][1][RTW89_FCC][49] = 48,
- [0][1][2][1][RTW89_FCC][51] = 48,
- [0][1][2][1][RTW89_FCC][53] = 48,
- [0][1][2][1][RTW89_FCC][55] = 48,
- [0][1][2][1][RTW89_FCC][57] = 48,
- [0][1][2][1][RTW89_FCC][59] = 48,
- [0][1][2][1][RTW89_FCC][60] = 48,
- [0][1][2][1][RTW89_FCC][62] = 48,
- [0][1][2][1][RTW89_FCC][64] = 48,
- [0][1][2][1][RTW89_FCC][66] = 48,
- [0][1][2][1][RTW89_FCC][68] = 48,
- [0][1][2][1][RTW89_FCC][70] = 48,
- [0][1][2][1][RTW89_FCC][72] = 48,
- [0][1][2][1][RTW89_FCC][74] = 48,
- [0][1][2][1][RTW89_FCC][75] = 48,
- [0][1][2][1][RTW89_FCC][77] = 48,
- [0][1][2][1][RTW89_FCC][79] = 48,
- [0][1][2][1][RTW89_FCC][81] = 48,
- [0][1][2][1][RTW89_FCC][83] = 48,
- [0][1][2][1][RTW89_FCC][85] = 48,
- [0][1][2][1][RTW89_FCC][87] = 48,
- [0][1][2][1][RTW89_FCC][89] = 48,
- [0][1][2][1][RTW89_FCC][90] = 48,
- [0][1][2][1][RTW89_FCC][92] = 48,
- [0][1][2][1][RTW89_FCC][94] = 48,
- [0][1][2][1][RTW89_FCC][96] = 48,
- [0][1][2][1][RTW89_FCC][98] = 48,
- [0][1][2][1][RTW89_FCC][100] = 48,
- [0][1][2][1][RTW89_FCC][102] = 48,
- [0][1][2][1][RTW89_FCC][104] = 48,
- [0][1][2][1][RTW89_FCC][105] = 48,
- [0][1][2][1][RTW89_FCC][107] = 48,
- [0][1][2][1][RTW89_FCC][109] = 48,
+ [0][1][2][0][RTW89_ETSI][119] = 127,
+ [0][1][2][0][RTW89_KCC][119] = 127,
+ [0][1][2][1][RTW89_FCC][0] = -2,
+ [0][1][2][1][RTW89_ETSI][0] = 42,
+ [0][1][2][1][RTW89_KCC][0] = 12,
+ [0][1][2][1][RTW89_FCC][2] = -4,
+ [0][1][2][1][RTW89_ETSI][2] = 42,
+ [0][1][2][1][RTW89_KCC][2] = 12,
+ [0][1][2][1][RTW89_FCC][4] = -4,
+ [0][1][2][1][RTW89_ETSI][4] = 42,
+ [0][1][2][1][RTW89_KCC][4] = 12,
+ [0][1][2][1][RTW89_FCC][6] = -4,
+ [0][1][2][1][RTW89_ETSI][6] = 42,
+ [0][1][2][1][RTW89_KCC][6] = 12,
+ [0][1][2][1][RTW89_FCC][8] = -4,
+ [0][1][2][1][RTW89_ETSI][8] = 42,
+ [0][1][2][1][RTW89_KCC][8] = 12,
+ [0][1][2][1][RTW89_FCC][10] = -4,
+ [0][1][2][1][RTW89_ETSI][10] = 42,
+ [0][1][2][1][RTW89_KCC][10] = 12,
+ [0][1][2][1][RTW89_FCC][12] = -4,
+ [0][1][2][1][RTW89_ETSI][12] = 42,
+ [0][1][2][1][RTW89_KCC][12] = 12,
+ [0][1][2][1][RTW89_FCC][14] = -4,
+ [0][1][2][1][RTW89_ETSI][14] = 42,
+ [0][1][2][1][RTW89_KCC][14] = 12,
+ [0][1][2][1][RTW89_FCC][15] = -4,
+ [0][1][2][1][RTW89_ETSI][15] = 42,
+ [0][1][2][1][RTW89_KCC][15] = 12,
+ [0][1][2][1][RTW89_FCC][17] = -4,
+ [0][1][2][1][RTW89_ETSI][17] = 42,
+ [0][1][2][1][RTW89_KCC][17] = 12,
+ [0][1][2][1][RTW89_FCC][19] = -4,
+ [0][1][2][1][RTW89_ETSI][19] = 42,
+ [0][1][2][1][RTW89_KCC][19] = 12,
+ [0][1][2][1][RTW89_FCC][21] = -4,
+ [0][1][2][1][RTW89_ETSI][21] = 42,
+ [0][1][2][1][RTW89_KCC][21] = 12,
+ [0][1][2][1][RTW89_FCC][23] = -4,
+ [0][1][2][1][RTW89_ETSI][23] = 42,
+ [0][1][2][1][RTW89_KCC][23] = 12,
+ [0][1][2][1][RTW89_FCC][25] = -4,
+ [0][1][2][1][RTW89_ETSI][25] = 42,
+ [0][1][2][1][RTW89_KCC][25] = 12,
+ [0][1][2][1][RTW89_FCC][27] = -4,
+ [0][1][2][1][RTW89_ETSI][27] = 42,
+ [0][1][2][1][RTW89_KCC][27] = 12,
+ [0][1][2][1][RTW89_FCC][29] = -4,
+ [0][1][2][1][RTW89_ETSI][29] = 42,
+ [0][1][2][1][RTW89_KCC][29] = 12,
+ [0][1][2][1][RTW89_FCC][30] = -4,
+ [0][1][2][1][RTW89_ETSI][30] = 42,
+ [0][1][2][1][RTW89_KCC][30] = 12,
+ [0][1][2][1][RTW89_FCC][32] = -4,
+ [0][1][2][1][RTW89_ETSI][32] = 42,
+ [0][1][2][1][RTW89_KCC][32] = 12,
+ [0][1][2][1][RTW89_FCC][34] = -4,
+ [0][1][2][1][RTW89_ETSI][34] = 42,
+ [0][1][2][1][RTW89_KCC][34] = 12,
+ [0][1][2][1][RTW89_FCC][36] = -4,
+ [0][1][2][1][RTW89_ETSI][36] = 42,
+ [0][1][2][1][RTW89_KCC][36] = 12,
+ [0][1][2][1][RTW89_FCC][38] = -4,
+ [0][1][2][1][RTW89_ETSI][38] = 42,
+ [0][1][2][1][RTW89_KCC][38] = 12,
+ [0][1][2][1][RTW89_FCC][40] = -4,
+ [0][1][2][1][RTW89_ETSI][40] = 42,
+ [0][1][2][1][RTW89_KCC][40] = 12,
+ [0][1][2][1][RTW89_FCC][42] = -4,
+ [0][1][2][1][RTW89_ETSI][42] = 42,
+ [0][1][2][1][RTW89_KCC][42] = 12,
+ [0][1][2][1][RTW89_FCC][44] = -2,
+ [0][1][2][1][RTW89_ETSI][44] = 42,
+ [0][1][2][1][RTW89_KCC][44] = 12,
+ [0][1][2][1][RTW89_FCC][45] = -2,
+ [0][1][2][1][RTW89_ETSI][45] = 127,
+ [0][1][2][1][RTW89_KCC][45] = 12,
+ [0][1][2][1][RTW89_FCC][47] = -2,
+ [0][1][2][1][RTW89_ETSI][47] = 127,
+ [0][1][2][1][RTW89_KCC][47] = 12,
+ [0][1][2][1][RTW89_FCC][49] = -2,
+ [0][1][2][1][RTW89_ETSI][49] = 127,
+ [0][1][2][1][RTW89_KCC][49] = 12,
+ [0][1][2][1][RTW89_FCC][51] = -2,
+ [0][1][2][1][RTW89_ETSI][51] = 127,
+ [0][1][2][1][RTW89_KCC][51] = 12,
+ [0][1][2][1][RTW89_FCC][53] = -2,
+ [0][1][2][1][RTW89_ETSI][53] = 127,
+ [0][1][2][1][RTW89_KCC][53] = 12,
+ [0][1][2][1][RTW89_FCC][55] = -2,
+ [0][1][2][1][RTW89_ETSI][55] = 127,
+ [0][1][2][1][RTW89_KCC][55] = 12,
+ [0][1][2][1][RTW89_FCC][57] = -2,
+ [0][1][2][1][RTW89_ETSI][57] = 127,
+ [0][1][2][1][RTW89_KCC][57] = 12,
+ [0][1][2][1][RTW89_FCC][59] = -2,
+ [0][1][2][1][RTW89_ETSI][59] = 127,
+ [0][1][2][1][RTW89_KCC][59] = 12,
+ [0][1][2][1][RTW89_FCC][60] = -2,
+ [0][1][2][1][RTW89_ETSI][60] = 127,
+ [0][1][2][1][RTW89_KCC][60] = 12,
+ [0][1][2][1][RTW89_FCC][62] = -2,
+ [0][1][2][1][RTW89_ETSI][62] = 127,
+ [0][1][2][1][RTW89_KCC][62] = 12,
+ [0][1][2][1][RTW89_FCC][64] = -2,
+ [0][1][2][1][RTW89_ETSI][64] = 127,
+ [0][1][2][1][RTW89_KCC][64] = 12,
+ [0][1][2][1][RTW89_FCC][66] = -2,
+ [0][1][2][1][RTW89_ETSI][66] = 127,
+ [0][1][2][1][RTW89_KCC][66] = 12,
+ [0][1][2][1][RTW89_FCC][68] = -2,
+ [0][1][2][1][RTW89_ETSI][68] = 127,
+ [0][1][2][1][RTW89_KCC][68] = 12,
+ [0][1][2][1][RTW89_FCC][70] = -2,
+ [0][1][2][1][RTW89_ETSI][70] = 127,
+ [0][1][2][1][RTW89_KCC][70] = 12,
+ [0][1][2][1][RTW89_FCC][72] = -2,
+ [0][1][2][1][RTW89_ETSI][72] = 127,
+ [0][1][2][1][RTW89_KCC][72] = 12,
+ [0][1][2][1][RTW89_FCC][74] = -2,
+ [0][1][2][1][RTW89_ETSI][74] = 127,
+ [0][1][2][1][RTW89_KCC][74] = 12,
+ [0][1][2][1][RTW89_FCC][75] = -2,
+ [0][1][2][1][RTW89_ETSI][75] = 127,
+ [0][1][2][1][RTW89_KCC][75] = 12,
+ [0][1][2][1][RTW89_FCC][77] = -2,
+ [0][1][2][1][RTW89_ETSI][77] = 127,
+ [0][1][2][1][RTW89_KCC][77] = 12,
+ [0][1][2][1][RTW89_FCC][79] = -2,
+ [0][1][2][1][RTW89_ETSI][79] = 127,
+ [0][1][2][1][RTW89_KCC][79] = 12,
+ [0][1][2][1][RTW89_FCC][81] = -2,
+ [0][1][2][1][RTW89_ETSI][81] = 127,
+ [0][1][2][1][RTW89_KCC][81] = 12,
+ [0][1][2][1][RTW89_FCC][83] = -2,
+ [0][1][2][1][RTW89_ETSI][83] = 127,
+ [0][1][2][1][RTW89_KCC][83] = 20,
+ [0][1][2][1][RTW89_FCC][85] = -2,
+ [0][1][2][1][RTW89_ETSI][85] = 127,
+ [0][1][2][1][RTW89_KCC][85] = 20,
+ [0][1][2][1][RTW89_FCC][87] = -2,
+ [0][1][2][1][RTW89_ETSI][87] = 127,
+ [0][1][2][1][RTW89_KCC][87] = 20,
+ [0][1][2][1][RTW89_FCC][89] = -2,
+ [0][1][2][1][RTW89_ETSI][89] = 127,
+ [0][1][2][1][RTW89_KCC][89] = 20,
+ [0][1][2][1][RTW89_FCC][90] = -2,
+ [0][1][2][1][RTW89_ETSI][90] = 127,
+ [0][1][2][1][RTW89_KCC][90] = 20,
+ [0][1][2][1][RTW89_FCC][92] = -2,
+ [0][1][2][1][RTW89_ETSI][92] = 127,
+ [0][1][2][1][RTW89_KCC][92] = 20,
+ [0][1][2][1][RTW89_FCC][94] = -2,
+ [0][1][2][1][RTW89_ETSI][94] = 127,
+ [0][1][2][1][RTW89_KCC][94] = 20,
+ [0][1][2][1][RTW89_FCC][96] = -2,
+ [0][1][2][1][RTW89_ETSI][96] = 127,
+ [0][1][2][1][RTW89_KCC][96] = 20,
+ [0][1][2][1][RTW89_FCC][98] = -2,
+ [0][1][2][1][RTW89_ETSI][98] = 127,
+ [0][1][2][1][RTW89_KCC][98] = 20,
+ [0][1][2][1][RTW89_FCC][100] = -2,
+ [0][1][2][1][RTW89_ETSI][100] = 127,
+ [0][1][2][1][RTW89_KCC][100] = 20,
+ [0][1][2][1][RTW89_FCC][102] = -2,
+ [0][1][2][1][RTW89_ETSI][102] = 127,
+ [0][1][2][1][RTW89_KCC][102] = 20,
+ [0][1][2][1][RTW89_FCC][104] = -2,
+ [0][1][2][1][RTW89_ETSI][104] = 127,
+ [0][1][2][1][RTW89_KCC][104] = 20,
+ [0][1][2][1][RTW89_FCC][105] = -2,
+ [0][1][2][1][RTW89_ETSI][105] = 127,
+ [0][1][2][1][RTW89_KCC][105] = 20,
+ [0][1][2][1][RTW89_FCC][107] = 0,
+ [0][1][2][1][RTW89_ETSI][107] = 127,
+ [0][1][2][1][RTW89_KCC][107] = 20,
+ [0][1][2][1][RTW89_FCC][109] = 0,
+ [0][1][2][1][RTW89_ETSI][109] = 127,
+ [0][1][2][1][RTW89_KCC][109] = 20,
[0][1][2][1][RTW89_FCC][111] = 127,
+ [0][1][2][1][RTW89_ETSI][111] = 127,
+ [0][1][2][1][RTW89_KCC][111] = 127,
[0][1][2][1][RTW89_FCC][113] = 127,
+ [0][1][2][1][RTW89_ETSI][113] = 127,
+ [0][1][2][1][RTW89_KCC][113] = 127,
[0][1][2][1][RTW89_FCC][115] = 127,
+ [0][1][2][1][RTW89_ETSI][115] = 127,
+ [0][1][2][1][RTW89_KCC][115] = 127,
[0][1][2][1][RTW89_FCC][117] = 127,
+ [0][1][2][1][RTW89_ETSI][117] = 127,
+ [0][1][2][1][RTW89_KCC][117] = 127,
[0][1][2][1][RTW89_FCC][119] = 127,
- [1][0][2][0][RTW89_FCC][1] = 72,
- [1][0][2][0][RTW89_FCC][5] = 72,
- [1][0][2][0][RTW89_FCC][9] = 72,
- [1][0][2][0][RTW89_FCC][13] = 72,
- [1][0][2][0][RTW89_FCC][16] = 72,
- [1][0][2][0][RTW89_FCC][20] = 72,
- [1][0][2][0][RTW89_FCC][24] = 72,
- [1][0][2][0][RTW89_FCC][28] = 72,
- [1][0][2][0][RTW89_FCC][31] = 72,
- [1][0][2][0][RTW89_FCC][35] = 72,
- [1][0][2][0][RTW89_FCC][39] = 72,
- [1][0][2][0][RTW89_FCC][43] = 72,
- [1][0][2][0][RTW89_FCC][46] = 72,
- [1][0][2][0][RTW89_FCC][50] = 72,
- [1][0][2][0][RTW89_FCC][54] = 72,
- [1][0][2][0][RTW89_FCC][58] = 72,
- [1][0][2][0][RTW89_FCC][61] = 72,
- [1][0][2][0][RTW89_FCC][65] = 72,
- [1][0][2][0][RTW89_FCC][69] = 72,
- [1][0][2][0][RTW89_FCC][73] = 72,
- [1][0][2][0][RTW89_FCC][76] = 72,
- [1][0][2][0][RTW89_FCC][80] = 72,
- [1][0][2][0][RTW89_FCC][84] = 72,
- [1][0][2][0][RTW89_FCC][88] = 72,
- [1][0][2][0][RTW89_FCC][91] = 72,
- [1][0][2][0][RTW89_FCC][95] = 72,
- [1][0][2][0][RTW89_FCC][99] = 72,
- [1][0][2][0][RTW89_FCC][103] = 72,
- [1][0][2][0][RTW89_FCC][106] = 72,
+ [0][1][2][1][RTW89_ETSI][119] = 127,
+ [0][1][2][1][RTW89_KCC][119] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 34,
+ [1][0][2][0][RTW89_ETSI][1] = 66,
+ [1][0][2][0][RTW89_KCC][1] = 40,
+ [1][0][2][0][RTW89_FCC][5] = 34,
+ [1][0][2][0][RTW89_ETSI][5] = 66,
+ [1][0][2][0][RTW89_KCC][5] = 40,
+ [1][0][2][0][RTW89_FCC][9] = 34,
+ [1][0][2][0][RTW89_ETSI][9] = 66,
+ [1][0][2][0][RTW89_KCC][9] = 40,
+ [1][0][2][0][RTW89_FCC][13] = 34,
+ [1][0][2][0][RTW89_ETSI][13] = 66,
+ [1][0][2][0][RTW89_KCC][13] = 40,
+ [1][0][2][0][RTW89_FCC][16] = 34,
+ [1][0][2][0][RTW89_ETSI][16] = 66,
+ [1][0][2][0][RTW89_KCC][16] = 40,
+ [1][0][2][0][RTW89_FCC][20] = 34,
+ [1][0][2][0][RTW89_ETSI][20] = 66,
+ [1][0][2][0][RTW89_KCC][20] = 40,
+ [1][0][2][0][RTW89_FCC][24] = 36,
+ [1][0][2][0][RTW89_ETSI][24] = 66,
+ [1][0][2][0][RTW89_KCC][24] = 40,
+ [1][0][2][0][RTW89_FCC][28] = 34,
+ [1][0][2][0][RTW89_ETSI][28] = 66,
+ [1][0][2][0][RTW89_KCC][28] = 40,
+ [1][0][2][0][RTW89_FCC][31] = 34,
+ [1][0][2][0][RTW89_ETSI][31] = 66,
+ [1][0][2][0][RTW89_KCC][31] = 40,
+ [1][0][2][0][RTW89_FCC][35] = 34,
+ [1][0][2][0][RTW89_ETSI][35] = 66,
+ [1][0][2][0][RTW89_KCC][35] = 40,
+ [1][0][2][0][RTW89_FCC][39] = 34,
+ [1][0][2][0][RTW89_ETSI][39] = 66,
+ [1][0][2][0][RTW89_KCC][39] = 40,
+ [1][0][2][0][RTW89_FCC][43] = 34,
+ [1][0][2][0][RTW89_ETSI][43] = 66,
+ [1][0][2][0][RTW89_KCC][43] = 40,
+ [1][0][2][0][RTW89_FCC][46] = 34,
+ [1][0][2][0][RTW89_ETSI][46] = 127,
+ [1][0][2][0][RTW89_KCC][46] = 40,
+ [1][0][2][0][RTW89_FCC][50] = 34,
+ [1][0][2][0][RTW89_ETSI][50] = 127,
+ [1][0][2][0][RTW89_KCC][50] = 40,
+ [1][0][2][0][RTW89_FCC][54] = 36,
+ [1][0][2][0][RTW89_ETSI][54] = 127,
+ [1][0][2][0][RTW89_KCC][54] = 40,
+ [1][0][2][0][RTW89_FCC][58] = 36,
+ [1][0][2][0][RTW89_ETSI][58] = 127,
+ [1][0][2][0][RTW89_KCC][58] = 40,
+ [1][0][2][0][RTW89_FCC][61] = 34,
+ [1][0][2][0][RTW89_ETSI][61] = 127,
+ [1][0][2][0][RTW89_KCC][61] = 40,
+ [1][0][2][0][RTW89_FCC][65] = 34,
+ [1][0][2][0][RTW89_ETSI][65] = 127,
+ [1][0][2][0][RTW89_KCC][65] = 40,
+ [1][0][2][0][RTW89_FCC][69] = 34,
+ [1][0][2][0][RTW89_ETSI][69] = 127,
+ [1][0][2][0][RTW89_KCC][69] = 40,
+ [1][0][2][0][RTW89_FCC][73] = 34,
+ [1][0][2][0][RTW89_ETSI][73] = 127,
+ [1][0][2][0][RTW89_KCC][73] = 40,
+ [1][0][2][0][RTW89_FCC][76] = 34,
+ [1][0][2][0][RTW89_ETSI][76] = 127,
+ [1][0][2][0][RTW89_KCC][76] = 40,
+ [1][0][2][0][RTW89_FCC][80] = 34,
+ [1][0][2][0][RTW89_ETSI][80] = 127,
+ [1][0][2][0][RTW89_KCC][80] = 42,
+ [1][0][2][0][RTW89_FCC][84] = 34,
+ [1][0][2][0][RTW89_ETSI][84] = 127,
+ [1][0][2][0][RTW89_KCC][84] = 42,
+ [1][0][2][0][RTW89_FCC][88] = 34,
+ [1][0][2][0][RTW89_ETSI][88] = 127,
+ [1][0][2][0][RTW89_KCC][88] = 42,
+ [1][0][2][0][RTW89_FCC][91] = 36,
+ [1][0][2][0][RTW89_ETSI][91] = 127,
+ [1][0][2][0][RTW89_KCC][91] = 42,
+ [1][0][2][0][RTW89_FCC][95] = 34,
+ [1][0][2][0][RTW89_ETSI][95] = 127,
+ [1][0][2][0][RTW89_KCC][95] = 42,
+ [1][0][2][0][RTW89_FCC][99] = 34,
+ [1][0][2][0][RTW89_ETSI][99] = 127,
+ [1][0][2][0][RTW89_KCC][99] = 42,
+ [1][0][2][0][RTW89_FCC][103] = 34,
+ [1][0][2][0][RTW89_ETSI][103] = 127,
+ [1][0][2][0][RTW89_KCC][103] = 42,
+ [1][0][2][0][RTW89_FCC][106] = 36,
+ [1][0][2][0][RTW89_ETSI][106] = 127,
+ [1][0][2][0][RTW89_KCC][106] = 42,
[1][0][2][0][RTW89_FCC][110] = 127,
+ [1][0][2][0][RTW89_ETSI][110] = 127,
+ [1][0][2][0][RTW89_KCC][110] = 127,
[1][0][2][0][RTW89_FCC][114] = 127,
+ [1][0][2][0][RTW89_ETSI][114] = 127,
+ [1][0][2][0][RTW89_KCC][114] = 127,
[1][0][2][0][RTW89_FCC][118] = 127,
- [1][1][2][0][RTW89_FCC][1] = 60,
- [1][1][2][0][RTW89_FCC][5] = 60,
- [1][1][2][0][RTW89_FCC][9] = 60,
- [1][1][2][0][RTW89_FCC][13] = 60,
- [1][1][2][0][RTW89_FCC][16] = 60,
- [1][1][2][0][RTW89_FCC][20] = 60,
- [1][1][2][0][RTW89_FCC][24] = 60,
- [1][1][2][0][RTW89_FCC][28] = 60,
- [1][1][2][0][RTW89_FCC][31] = 60,
- [1][1][2][0][RTW89_FCC][35] = 60,
- [1][1][2][0][RTW89_FCC][39] = 60,
- [1][1][2][0][RTW89_FCC][43] = 60,
- [1][1][2][0][RTW89_FCC][46] = 60,
- [1][1][2][0][RTW89_FCC][50] = 60,
- [1][1][2][0][RTW89_FCC][54] = 60,
- [1][1][2][0][RTW89_FCC][58] = 60,
- [1][1][2][0][RTW89_FCC][61] = 60,
- [1][1][2][0][RTW89_FCC][65] = 60,
- [1][1][2][0][RTW89_FCC][69] = 60,
- [1][1][2][0][RTW89_FCC][73] = 60,
- [1][1][2][0][RTW89_FCC][76] = 60,
- [1][1][2][0][RTW89_FCC][80] = 60,
- [1][1][2][0][RTW89_FCC][84] = 60,
- [1][1][2][0][RTW89_FCC][88] = 60,
- [1][1][2][0][RTW89_FCC][91] = 60,
- [1][1][2][0][RTW89_FCC][95] = 60,
- [1][1][2][0][RTW89_FCC][99] = 60,
- [1][1][2][0][RTW89_FCC][103] = 60,
- [1][1][2][0][RTW89_FCC][106] = 60,
+ [1][0][2][0][RTW89_ETSI][118] = 127,
+ [1][0][2][0][RTW89_KCC][118] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 10,
+ [1][1][2][0][RTW89_ETSI][1] = 54,
+ [1][1][2][0][RTW89_KCC][1] = 28,
+ [1][1][2][0][RTW89_FCC][5] = 10,
+ [1][1][2][0][RTW89_ETSI][5] = 54,
+ [1][1][2][0][RTW89_KCC][5] = 28,
+ [1][1][2][0][RTW89_FCC][9] = 10,
+ [1][1][2][0][RTW89_ETSI][9] = 54,
+ [1][1][2][0][RTW89_KCC][9] = 28,
+ [1][1][2][0][RTW89_FCC][13] = 10,
+ [1][1][2][0][RTW89_ETSI][13] = 54,
+ [1][1][2][0][RTW89_KCC][13] = 28,
+ [1][1][2][0][RTW89_FCC][16] = 10,
+ [1][1][2][0][RTW89_ETSI][16] = 54,
+ [1][1][2][0][RTW89_KCC][16] = 28,
+ [1][1][2][0][RTW89_FCC][20] = 10,
+ [1][1][2][0][RTW89_ETSI][20] = 54,
+ [1][1][2][0][RTW89_KCC][20] = 28,
+ [1][1][2][0][RTW89_FCC][24] = 10,
+ [1][1][2][0][RTW89_ETSI][24] = 54,
+ [1][1][2][0][RTW89_KCC][24] = 28,
+ [1][1][2][0][RTW89_FCC][28] = 10,
+ [1][1][2][0][RTW89_ETSI][28] = 54,
+ [1][1][2][0][RTW89_KCC][28] = 28,
+ [1][1][2][0][RTW89_FCC][31] = 10,
+ [1][1][2][0][RTW89_ETSI][31] = 54,
+ [1][1][2][0][RTW89_KCC][31] = 28,
+ [1][1][2][0][RTW89_FCC][35] = 10,
+ [1][1][2][0][RTW89_ETSI][35] = 54,
+ [1][1][2][0][RTW89_KCC][35] = 28,
+ [1][1][2][0][RTW89_FCC][39] = 10,
+ [1][1][2][0][RTW89_ETSI][39] = 54,
+ [1][1][2][0][RTW89_KCC][39] = 28,
+ [1][1][2][0][RTW89_FCC][43] = 10,
+ [1][1][2][0][RTW89_ETSI][43] = 54,
+ [1][1][2][0][RTW89_KCC][43] = 28,
+ [1][1][2][0][RTW89_FCC][46] = 12,
+ [1][1][2][0][RTW89_ETSI][46] = 127,
+ [1][1][2][0][RTW89_KCC][46] = 28,
+ [1][1][2][0][RTW89_FCC][50] = 12,
+ [1][1][2][0][RTW89_ETSI][50] = 127,
+ [1][1][2][0][RTW89_KCC][50] = 28,
+ [1][1][2][0][RTW89_FCC][54] = 10,
+ [1][1][2][0][RTW89_ETSI][54] = 127,
+ [1][1][2][0][RTW89_KCC][54] = 28,
+ [1][1][2][0][RTW89_FCC][58] = 10,
+ [1][1][2][0][RTW89_ETSI][58] = 127,
+ [1][1][2][0][RTW89_KCC][58] = 28,
+ [1][1][2][0][RTW89_FCC][61] = 10,
+ [1][1][2][0][RTW89_ETSI][61] = 127,
+ [1][1][2][0][RTW89_KCC][61] = 28,
+ [1][1][2][0][RTW89_FCC][65] = 10,
+ [1][1][2][0][RTW89_ETSI][65] = 127,
+ [1][1][2][0][RTW89_KCC][65] = 28,
+ [1][1][2][0][RTW89_FCC][69] = 10,
+ [1][1][2][0][RTW89_ETSI][69] = 127,
+ [1][1][2][0][RTW89_KCC][69] = 28,
+ [1][1][2][0][RTW89_FCC][73] = 10,
+ [1][1][2][0][RTW89_ETSI][73] = 127,
+ [1][1][2][0][RTW89_KCC][73] = 28,
+ [1][1][2][0][RTW89_FCC][76] = 10,
+ [1][1][2][0][RTW89_ETSI][76] = 127,
+ [1][1][2][0][RTW89_KCC][76] = 28,
+ [1][1][2][0][RTW89_FCC][80] = 10,
+ [1][1][2][0][RTW89_ETSI][80] = 127,
+ [1][1][2][0][RTW89_KCC][80] = 32,
+ [1][1][2][0][RTW89_FCC][84] = 10,
+ [1][1][2][0][RTW89_ETSI][84] = 127,
+ [1][1][2][0][RTW89_KCC][84] = 32,
+ [1][1][2][0][RTW89_FCC][88] = 10,
+ [1][1][2][0][RTW89_ETSI][88] = 127,
+ [1][1][2][0][RTW89_KCC][88] = 32,
+ [1][1][2][0][RTW89_FCC][91] = 12,
+ [1][1][2][0][RTW89_ETSI][91] = 127,
+ [1][1][2][0][RTW89_KCC][91] = 32,
+ [1][1][2][0][RTW89_FCC][95] = 10,
+ [1][1][2][0][RTW89_ETSI][95] = 127,
+ [1][1][2][0][RTW89_KCC][95] = 32,
+ [1][1][2][0][RTW89_FCC][99] = 10,
+ [1][1][2][0][RTW89_ETSI][99] = 127,
+ [1][1][2][0][RTW89_KCC][99] = 32,
+ [1][1][2][0][RTW89_FCC][103] = 10,
+ [1][1][2][0][RTW89_ETSI][103] = 127,
+ [1][1][2][0][RTW89_KCC][103] = 32,
+ [1][1][2][0][RTW89_FCC][106] = 12,
+ [1][1][2][0][RTW89_ETSI][106] = 127,
+ [1][1][2][0][RTW89_KCC][106] = 32,
[1][1][2][0][RTW89_FCC][110] = 127,
+ [1][1][2][0][RTW89_ETSI][110] = 127,
+ [1][1][2][0][RTW89_KCC][110] = 127,
[1][1][2][0][RTW89_FCC][114] = 127,
+ [1][1][2][0][RTW89_ETSI][114] = 127,
+ [1][1][2][0][RTW89_KCC][114] = 127,
[1][1][2][0][RTW89_FCC][118] = 127,
- [1][1][2][1][RTW89_FCC][1] = 48,
- [1][1][2][1][RTW89_FCC][5] = 48,
- [1][1][2][1][RTW89_FCC][9] = 48,
- [1][1][2][1][RTW89_FCC][13] = 48,
- [1][1][2][1][RTW89_FCC][16] = 48,
- [1][1][2][1][RTW89_FCC][20] = 48,
- [1][1][2][1][RTW89_FCC][24] = 48,
- [1][1][2][1][RTW89_FCC][28] = 48,
- [1][1][2][1][RTW89_FCC][31] = 48,
- [1][1][2][1][RTW89_FCC][35] = 48,
- [1][1][2][1][RTW89_FCC][39] = 48,
- [1][1][2][1][RTW89_FCC][43] = 48,
- [1][1][2][1][RTW89_FCC][46] = 48,
- [1][1][2][1][RTW89_FCC][50] = 48,
- [1][1][2][1][RTW89_FCC][54] = 48,
- [1][1][2][1][RTW89_FCC][58] = 48,
- [1][1][2][1][RTW89_FCC][61] = 48,
- [1][1][2][1][RTW89_FCC][65] = 48,
- [1][1][2][1][RTW89_FCC][69] = 48,
- [1][1][2][1][RTW89_FCC][73] = 48,
- [1][1][2][1][RTW89_FCC][76] = 48,
- [1][1][2][1][RTW89_FCC][80] = 48,
- [1][1][2][1][RTW89_FCC][84] = 48,
- [1][1][2][1][RTW89_FCC][88] = 48,
- [1][1][2][1][RTW89_FCC][91] = 48,
- [1][1][2][1][RTW89_FCC][95] = 48,
- [1][1][2][1][RTW89_FCC][99] = 48,
- [1][1][2][1][RTW89_FCC][103] = 48,
- [1][1][2][1][RTW89_FCC][106] = 48,
+ [1][1][2][0][RTW89_ETSI][118] = 127,
+ [1][1][2][0][RTW89_KCC][118] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 10,
+ [1][1][2][1][RTW89_ETSI][1] = 42,
+ [1][1][2][1][RTW89_KCC][1] = 28,
+ [1][1][2][1][RTW89_FCC][5] = 10,
+ [1][1][2][1][RTW89_ETSI][5] = 42,
+ [1][1][2][1][RTW89_KCC][5] = 28,
+ [1][1][2][1][RTW89_FCC][9] = 10,
+ [1][1][2][1][RTW89_ETSI][9] = 42,
+ [1][1][2][1][RTW89_KCC][9] = 28,
+ [1][1][2][1][RTW89_FCC][13] = 10,
+ [1][1][2][1][RTW89_ETSI][13] = 42,
+ [1][1][2][1][RTW89_KCC][13] = 28,
+ [1][1][2][1][RTW89_FCC][16] = 10,
+ [1][1][2][1][RTW89_ETSI][16] = 42,
+ [1][1][2][1][RTW89_KCC][16] = 28,
+ [1][1][2][1][RTW89_FCC][20] = 10,
+ [1][1][2][1][RTW89_ETSI][20] = 42,
+ [1][1][2][1][RTW89_KCC][20] = 28,
+ [1][1][2][1][RTW89_FCC][24] = 10,
+ [1][1][2][1][RTW89_ETSI][24] = 42,
+ [1][1][2][1][RTW89_KCC][24] = 28,
+ [1][1][2][1][RTW89_FCC][28] = 10,
+ [1][1][2][1][RTW89_ETSI][28] = 42,
+ [1][1][2][1][RTW89_KCC][28] = 28,
+ [1][1][2][1][RTW89_FCC][31] = 10,
+ [1][1][2][1][RTW89_ETSI][31] = 42,
+ [1][1][2][1][RTW89_KCC][31] = 28,
+ [1][1][2][1][RTW89_FCC][35] = 10,
+ [1][1][2][1][RTW89_ETSI][35] = 42,
+ [1][1][2][1][RTW89_KCC][35] = 28,
+ [1][1][2][1][RTW89_FCC][39] = 10,
+ [1][1][2][1][RTW89_ETSI][39] = 42,
+ [1][1][2][1][RTW89_KCC][39] = 28,
+ [1][1][2][1][RTW89_FCC][43] = 10,
+ [1][1][2][1][RTW89_ETSI][43] = 42,
+ [1][1][2][1][RTW89_KCC][43] = 28,
+ [1][1][2][1][RTW89_FCC][46] = 12,
+ [1][1][2][1][RTW89_ETSI][46] = 127,
+ [1][1][2][1][RTW89_KCC][46] = 28,
+ [1][1][2][1][RTW89_FCC][50] = 12,
+ [1][1][2][1][RTW89_ETSI][50] = 127,
+ [1][1][2][1][RTW89_KCC][50] = 28,
+ [1][1][2][1][RTW89_FCC][54] = 10,
+ [1][1][2][1][RTW89_ETSI][54] = 127,
+ [1][1][2][1][RTW89_KCC][54] = 28,
+ [1][1][2][1][RTW89_FCC][58] = 10,
+ [1][1][2][1][RTW89_ETSI][58] = 127,
+ [1][1][2][1][RTW89_KCC][58] = 28,
+ [1][1][2][1][RTW89_FCC][61] = 10,
+ [1][1][2][1][RTW89_ETSI][61] = 127,
+ [1][1][2][1][RTW89_KCC][61] = 28,
+ [1][1][2][1][RTW89_FCC][65] = 10,
+ [1][1][2][1][RTW89_ETSI][65] = 127,
+ [1][1][2][1][RTW89_KCC][65] = 28,
+ [1][1][2][1][RTW89_FCC][69] = 10,
+ [1][1][2][1][RTW89_ETSI][69] = 127,
+ [1][1][2][1][RTW89_KCC][69] = 28,
+ [1][1][2][1][RTW89_FCC][73] = 10,
+ [1][1][2][1][RTW89_ETSI][73] = 127,
+ [1][1][2][1][RTW89_KCC][73] = 28,
+ [1][1][2][1][RTW89_FCC][76] = 10,
+ [1][1][2][1][RTW89_ETSI][76] = 127,
+ [1][1][2][1][RTW89_KCC][76] = 28,
+ [1][1][2][1][RTW89_FCC][80] = 10,
+ [1][1][2][1][RTW89_ETSI][80] = 127,
+ [1][1][2][1][RTW89_KCC][80] = 32,
+ [1][1][2][1][RTW89_FCC][84] = 10,
+ [1][1][2][1][RTW89_ETSI][84] = 127,
+ [1][1][2][1][RTW89_KCC][84] = 32,
+ [1][1][2][1][RTW89_FCC][88] = 10,
+ [1][1][2][1][RTW89_ETSI][88] = 127,
+ [1][1][2][1][RTW89_KCC][88] = 32,
+ [1][1][2][1][RTW89_FCC][91] = 12,
+ [1][1][2][1][RTW89_ETSI][91] = 127,
+ [1][1][2][1][RTW89_KCC][91] = 32,
+ [1][1][2][1][RTW89_FCC][95] = 10,
+ [1][1][2][1][RTW89_ETSI][95] = 127,
+ [1][1][2][1][RTW89_KCC][95] = 32,
+ [1][1][2][1][RTW89_FCC][99] = 10,
+ [1][1][2][1][RTW89_ETSI][99] = 127,
+ [1][1][2][1][RTW89_KCC][99] = 32,
+ [1][1][2][1][RTW89_FCC][103] = 10,
+ [1][1][2][1][RTW89_ETSI][103] = 127,
+ [1][1][2][1][RTW89_KCC][103] = 32,
+ [1][1][2][1][RTW89_FCC][106] = 12,
+ [1][1][2][1][RTW89_ETSI][106] = 127,
+ [1][1][2][1][RTW89_KCC][106] = 32,
[1][1][2][1][RTW89_FCC][110] = 127,
+ [1][1][2][1][RTW89_ETSI][110] = 127,
+ [1][1][2][1][RTW89_KCC][110] = 127,
[1][1][2][1][RTW89_FCC][114] = 127,
+ [1][1][2][1][RTW89_ETSI][114] = 127,
+ [1][1][2][1][RTW89_KCC][114] = 127,
[1][1][2][1][RTW89_FCC][118] = 127,
- [2][0][2][0][RTW89_FCC][3] = 64,
- [2][0][2][0][RTW89_FCC][11] = 64,
- [2][0][2][0][RTW89_FCC][18] = 64,
- [2][0][2][0][RTW89_FCC][26] = 64,
- [2][0][2][0][RTW89_FCC][33] = 64,
- [2][0][2][0][RTW89_FCC][41] = 64,
- [2][0][2][0][RTW89_FCC][48] = 64,
- [2][0][2][0][RTW89_FCC][56] = 64,
- [2][0][2][0][RTW89_FCC][63] = 64,
- [2][0][2][0][RTW89_FCC][71] = 64,
- [2][0][2][0][RTW89_FCC][78] = 64,
- [2][0][2][0][RTW89_FCC][86] = 64,
- [2][0][2][0][RTW89_FCC][93] = 64,
- [2][0][2][0][RTW89_FCC][101] = 64,
+ [1][1][2][1][RTW89_ETSI][118] = 127,
+ [1][1][2][1][RTW89_KCC][118] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 46,
+ [2][0][2][0][RTW89_ETSI][3] = 48,
+ [2][0][2][0][RTW89_KCC][3] = 50,
+ [2][0][2][0][RTW89_FCC][11] = 46,
+ [2][0][2][0][RTW89_ETSI][11] = 48,
+ [2][0][2][0][RTW89_KCC][11] = 50,
+ [2][0][2][0][RTW89_FCC][18] = 46,
+ [2][0][2][0][RTW89_ETSI][18] = 48,
+ [2][0][2][0][RTW89_KCC][18] = 50,
+ [2][0][2][0][RTW89_FCC][26] = 46,
+ [2][0][2][0][RTW89_ETSI][26] = 48,
+ [2][0][2][0][RTW89_KCC][26] = 50,
+ [2][0][2][0][RTW89_FCC][33] = 46,
+ [2][0][2][0][RTW89_ETSI][33] = 48,
+ [2][0][2][0][RTW89_KCC][33] = 50,
+ [2][0][2][0][RTW89_FCC][41] = 46,
+ [2][0][2][0][RTW89_ETSI][41] = 48,
+ [2][0][2][0][RTW89_KCC][41] = 50,
+ [2][0][2][0][RTW89_FCC][48] = 46,
+ [2][0][2][0][RTW89_ETSI][48] = 127,
+ [2][0][2][0][RTW89_KCC][48] = 48,
+ [2][0][2][0][RTW89_FCC][56] = 46,
+ [2][0][2][0][RTW89_ETSI][56] = 127,
+ [2][0][2][0][RTW89_KCC][56] = 48,
+ [2][0][2][0][RTW89_FCC][63] = 46,
+ [2][0][2][0][RTW89_ETSI][63] = 127,
+ [2][0][2][0][RTW89_KCC][63] = 48,
+ [2][0][2][0][RTW89_FCC][71] = 46,
+ [2][0][2][0][RTW89_ETSI][71] = 127,
+ [2][0][2][0][RTW89_KCC][71] = 48,
+ [2][0][2][0][RTW89_FCC][78] = 46,
+ [2][0][2][0][RTW89_ETSI][78] = 127,
+ [2][0][2][0][RTW89_KCC][78] = 52,
+ [2][0][2][0][RTW89_FCC][86] = 46,
+ [2][0][2][0][RTW89_ETSI][86] = 127,
+ [2][0][2][0][RTW89_KCC][86] = 52,
+ [2][0][2][0][RTW89_FCC][93] = 46,
+ [2][0][2][0][RTW89_ETSI][93] = 127,
+ [2][0][2][0][RTW89_KCC][93] = 50,
+ [2][0][2][0][RTW89_FCC][101] = 44,
+ [2][0][2][0][RTW89_ETSI][101] = 127,
+ [2][0][2][0][RTW89_KCC][101] = 50,
[2][0][2][0][RTW89_FCC][108] = 127,
+ [2][0][2][0][RTW89_ETSI][108] = 127,
+ [2][0][2][0][RTW89_KCC][108] = 127,
[2][0][2][0][RTW89_FCC][116] = 127,
- [2][1][2][0][RTW89_FCC][3] = 52,
- [2][1][2][0][RTW89_FCC][11] = 52,
- [2][1][2][0][RTW89_FCC][18] = 52,
- [2][1][2][0][RTW89_FCC][26] = 52,
- [2][1][2][0][RTW89_FCC][33] = 52,
- [2][1][2][0][RTW89_FCC][41] = 52,
- [2][1][2][0][RTW89_FCC][48] = 52,
- [2][1][2][0][RTW89_FCC][56] = 52,
- [2][1][2][0][RTW89_FCC][63] = 52,
- [2][1][2][0][RTW89_FCC][71] = 52,
- [2][1][2][0][RTW89_FCC][78] = 52,
- [2][1][2][0][RTW89_FCC][86] = 52,
- [2][1][2][0][RTW89_FCC][93] = 52,
- [2][1][2][0][RTW89_FCC][101] = 52,
+ [2][0][2][0][RTW89_ETSI][116] = 127,
+ [2][0][2][0][RTW89_KCC][116] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 22,
+ [2][1][2][0][RTW89_ETSI][3] = 48,
+ [2][1][2][0][RTW89_KCC][3] = 38,
+ [2][1][2][0][RTW89_FCC][11] = 20,
+ [2][1][2][0][RTW89_ETSI][11] = 48,
+ [2][1][2][0][RTW89_KCC][11] = 38,
+ [2][1][2][0][RTW89_FCC][18] = 20,
+ [2][1][2][0][RTW89_ETSI][18] = 48,
+ [2][1][2][0][RTW89_KCC][18] = 38,
+ [2][1][2][0][RTW89_FCC][26] = 20,
+ [2][1][2][0][RTW89_ETSI][26] = 48,
+ [2][1][2][0][RTW89_KCC][26] = 38,
+ [2][1][2][0][RTW89_FCC][33] = 20,
+ [2][1][2][0][RTW89_ETSI][33] = 48,
+ [2][1][2][0][RTW89_KCC][33] = 38,
+ [2][1][2][0][RTW89_FCC][41] = 22,
+ [2][1][2][0][RTW89_ETSI][41] = 48,
+ [2][1][2][0][RTW89_KCC][41] = 38,
+ [2][1][2][0][RTW89_FCC][48] = 22,
+ [2][1][2][0][RTW89_ETSI][48] = 127,
+ [2][1][2][0][RTW89_KCC][48] = 38,
+ [2][1][2][0][RTW89_FCC][56] = 20,
+ [2][1][2][0][RTW89_ETSI][56] = 127,
+ [2][1][2][0][RTW89_KCC][56] = 38,
+ [2][1][2][0][RTW89_FCC][63] = 22,
+ [2][1][2][0][RTW89_ETSI][63] = 127,
+ [2][1][2][0][RTW89_KCC][63] = 38,
+ [2][1][2][0][RTW89_FCC][71] = 20,
+ [2][1][2][0][RTW89_ETSI][71] = 127,
+ [2][1][2][0][RTW89_KCC][71] = 38,
+ [2][1][2][0][RTW89_FCC][78] = 20,
+ [2][1][2][0][RTW89_ETSI][78] = 127,
+ [2][1][2][0][RTW89_KCC][78] = 38,
+ [2][1][2][0][RTW89_FCC][86] = 20,
+ [2][1][2][0][RTW89_ETSI][86] = 127,
+ [2][1][2][0][RTW89_KCC][86] = 38,
+ [2][1][2][0][RTW89_FCC][93] = 22,
+ [2][1][2][0][RTW89_ETSI][93] = 127,
+ [2][1][2][0][RTW89_KCC][93] = 38,
+ [2][1][2][0][RTW89_FCC][101] = 22,
+ [2][1][2][0][RTW89_ETSI][101] = 127,
+ [2][1][2][0][RTW89_KCC][101] = 38,
[2][1][2][0][RTW89_FCC][108] = 127,
+ [2][1][2][0][RTW89_ETSI][108] = 127,
+ [2][1][2][0][RTW89_KCC][108] = 127,
[2][1][2][0][RTW89_FCC][116] = 127,
- [2][1][2][1][RTW89_FCC][3] = 40,
- [2][1][2][1][RTW89_FCC][11] = 40,
- [2][1][2][1][RTW89_FCC][18] = 40,
- [2][1][2][1][RTW89_FCC][26] = 40,
- [2][1][2][1][RTW89_FCC][33] = 40,
- [2][1][2][1][RTW89_FCC][41] = 40,
- [2][1][2][1][RTW89_FCC][48] = 40,
- [2][1][2][1][RTW89_FCC][56] = 40,
- [2][1][2][1][RTW89_FCC][63] = 40,
- [2][1][2][1][RTW89_FCC][71] = 40,
- [2][1][2][1][RTW89_FCC][78] = 40,
- [2][1][2][1][RTW89_FCC][86] = 40,
- [2][1][2][1][RTW89_FCC][93] = 40,
- [2][1][2][1][RTW89_FCC][101] = 40,
+ [2][1][2][0][RTW89_ETSI][116] = 127,
+ [2][1][2][0][RTW89_KCC][116] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 22,
+ [2][1][2][1][RTW89_ETSI][3] = 42,
+ [2][1][2][1][RTW89_KCC][3] = 38,
+ [2][1][2][1][RTW89_FCC][11] = 20,
+ [2][1][2][1][RTW89_ETSI][11] = 42,
+ [2][1][2][1][RTW89_KCC][11] = 38,
+ [2][1][2][1][RTW89_FCC][18] = 20,
+ [2][1][2][1][RTW89_ETSI][18] = 42,
+ [2][1][2][1][RTW89_KCC][18] = 38,
+ [2][1][2][1][RTW89_FCC][26] = 20,
+ [2][1][2][1][RTW89_ETSI][26] = 42,
+ [2][1][2][1][RTW89_KCC][26] = 38,
+ [2][1][2][1][RTW89_FCC][33] = 20,
+ [2][1][2][1][RTW89_ETSI][33] = 42,
+ [2][1][2][1][RTW89_KCC][33] = 38,
+ [2][1][2][1][RTW89_FCC][41] = 22,
+ [2][1][2][1][RTW89_ETSI][41] = 42,
+ [2][1][2][1][RTW89_KCC][41] = 38,
+ [2][1][2][1][RTW89_FCC][48] = 22,
+ [2][1][2][1][RTW89_ETSI][48] = 127,
+ [2][1][2][1][RTW89_KCC][48] = 38,
+ [2][1][2][1][RTW89_FCC][56] = 20,
+ [2][1][2][1][RTW89_ETSI][56] = 127,
+ [2][1][2][1][RTW89_KCC][56] = 38,
+ [2][1][2][1][RTW89_FCC][63] = 22,
+ [2][1][2][1][RTW89_ETSI][63] = 127,
+ [2][1][2][1][RTW89_KCC][63] = 38,
+ [2][1][2][1][RTW89_FCC][71] = 20,
+ [2][1][2][1][RTW89_ETSI][71] = 127,
+ [2][1][2][1][RTW89_KCC][71] = 38,
+ [2][1][2][1][RTW89_FCC][78] = 20,
+ [2][1][2][1][RTW89_ETSI][78] = 127,
+ [2][1][2][1][RTW89_KCC][78] = 38,
+ [2][1][2][1][RTW89_FCC][86] = 20,
+ [2][1][2][1][RTW89_ETSI][86] = 127,
+ [2][1][2][1][RTW89_KCC][86] = 38,
+ [2][1][2][1][RTW89_FCC][93] = 22,
+ [2][1][2][1][RTW89_ETSI][93] = 127,
+ [2][1][2][1][RTW89_KCC][93] = 38,
+ [2][1][2][1][RTW89_FCC][101] = 22,
+ [2][1][2][1][RTW89_ETSI][101] = 127,
+ [2][1][2][1][RTW89_KCC][101] = 38,
[2][1][2][1][RTW89_FCC][108] = 127,
+ [2][1][2][1][RTW89_ETSI][108] = 127,
+ [2][1][2][1][RTW89_KCC][108] = 127,
[2][1][2][1][RTW89_FCC][116] = 127,
- [3][0][2][0][RTW89_FCC][7] = 56,
- [3][0][2][0][RTW89_FCC][22] = 56,
- [3][0][2][0][RTW89_FCC][37] = 56,
- [3][0][2][0][RTW89_FCC][52] = 56,
- [3][0][2][0][RTW89_FCC][67] = 56,
- [3][0][2][0][RTW89_FCC][82] = 56,
- [3][0][2][0][RTW89_FCC][97] = 56,
+ [2][1][2][1][RTW89_ETSI][116] = 127,
+ [2][1][2][1][RTW89_KCC][116] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 52,
+ [3][0][2][0][RTW89_ETSI][7] = 38,
+ [3][0][2][0][RTW89_KCC][7] = 42,
+ [3][0][2][0][RTW89_FCC][22] = 52,
+ [3][0][2][0][RTW89_ETSI][22] = 38,
+ [3][0][2][0][RTW89_KCC][22] = 42,
+ [3][0][2][0][RTW89_FCC][37] = 52,
+ [3][0][2][0][RTW89_ETSI][37] = 38,
+ [3][0][2][0][RTW89_KCC][37] = 42,
+ [3][0][2][0][RTW89_FCC][52] = 54,
+ [3][0][2][0][RTW89_ETSI][52] = 127,
+ [3][0][2][0][RTW89_KCC][52] = 56,
+ [3][0][2][0][RTW89_FCC][67] = 54,
+ [3][0][2][0][RTW89_ETSI][67] = 127,
+ [3][0][2][0][RTW89_KCC][67] = 54,
+ [3][0][2][0][RTW89_FCC][82] = 54,
+ [3][0][2][0][RTW89_ETSI][82] = 127,
+ [3][0][2][0][RTW89_KCC][82] = 26,
+ [3][0][2][0][RTW89_FCC][97] = 40,
+ [3][0][2][0][RTW89_ETSI][97] = 127,
+ [3][0][2][0][RTW89_KCC][97] = 26,
[3][0][2][0][RTW89_FCC][112] = 127,
- [3][1][2][0][RTW89_FCC][7] = 44,
- [3][1][2][0][RTW89_FCC][22] = 44,
- [3][1][2][0][RTW89_FCC][37] = 44,
- [3][1][2][0][RTW89_FCC][52] = 44,
- [3][1][2][0][RTW89_FCC][67] = 44,
- [3][1][2][0][RTW89_FCC][82] = 44,
- [3][1][2][0][RTW89_FCC][97] = 44,
+ [3][0][2][0][RTW89_ETSI][112] = 127,
+ [3][0][2][0][RTW89_KCC][112] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 32,
+ [3][1][2][0][RTW89_ETSI][7] = 38,
+ [3][1][2][0][RTW89_KCC][7] = 40,
+ [3][1][2][0][RTW89_FCC][22] = 30,
+ [3][1][2][0][RTW89_ETSI][22] = 38,
+ [3][1][2][0][RTW89_KCC][22] = 40,
+ [3][1][2][0][RTW89_FCC][37] = 30,
+ [3][1][2][0][RTW89_ETSI][37] = 38,
+ [3][1][2][0][RTW89_KCC][37] = 40,
+ [3][1][2][0][RTW89_FCC][52] = 30,
+ [3][1][2][0][RTW89_ETSI][52] = 127,
+ [3][1][2][0][RTW89_KCC][52] = 48,
+ [3][1][2][0][RTW89_FCC][67] = 32,
+ [3][1][2][0][RTW89_ETSI][67] = 127,
+ [3][1][2][0][RTW89_KCC][67] = 48,
+ [3][1][2][0][RTW89_FCC][82] = 32,
+ [3][1][2][0][RTW89_ETSI][82] = 127,
+ [3][1][2][0][RTW89_KCC][82] = 24,
+ [3][1][2][0][RTW89_FCC][97] = 14,
+ [3][1][2][0][RTW89_ETSI][97] = 127,
+ [3][1][2][0][RTW89_KCC][97] = 24,
[3][1][2][0][RTW89_FCC][112] = 127,
+ [3][1][2][0][RTW89_ETSI][112] = 127,
+ [3][1][2][0][RTW89_KCC][112] = 127,
[3][1][2][1][RTW89_FCC][7] = 32,
- [3][1][2][1][RTW89_FCC][22] = 32,
- [3][1][2][1][RTW89_FCC][37] = 32,
- [3][1][2][1][RTW89_FCC][52] = 32,
+ [3][1][2][1][RTW89_ETSI][7] = 38,
+ [3][1][2][1][RTW89_KCC][7] = 40,
+ [3][1][2][1][RTW89_FCC][22] = 30,
+ [3][1][2][1][RTW89_ETSI][22] = 38,
+ [3][1][2][1][RTW89_KCC][22] = 40,
+ [3][1][2][1][RTW89_FCC][37] = 30,
+ [3][1][2][1][RTW89_ETSI][37] = 38,
+ [3][1][2][1][RTW89_KCC][37] = 40,
+ [3][1][2][1][RTW89_FCC][52] = 30,
+ [3][1][2][1][RTW89_ETSI][52] = 127,
+ [3][1][2][1][RTW89_KCC][52] = 48,
[3][1][2][1][RTW89_FCC][67] = 32,
+ [3][1][2][1][RTW89_ETSI][67] = 127,
+ [3][1][2][1][RTW89_KCC][67] = 48,
[3][1][2][1][RTW89_FCC][82] = 32,
- [3][1][2][1][RTW89_FCC][97] = 32,
+ [3][1][2][1][RTW89_ETSI][82] = 127,
+ [3][1][2][1][RTW89_KCC][82] = 24,
+ [3][1][2][1][RTW89_FCC][97] = 14,
+ [3][1][2][1][RTW89_ETSI][97] = 127,
+ [3][1][2][1][RTW89_KCC][97] = 24,
[3][1][2][1][RTW89_FCC][112] = 127,
+ [3][1][2][1][RTW89_ETSI][112] = 127,
+ [3][1][2][1][RTW89_KCC][112] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
@@ -17126,8 +33220,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_WW][8] = 32,
[0][0][RTW89_WW][9] = 32,
[0][0][RTW89_WW][10] = 32,
- [0][0][RTW89_WW][11] = 32,
- [0][0][RTW89_WW][12] = 24,
+ [0][0][RTW89_WW][11] = 26,
+ [0][0][RTW89_WW][12] = -20,
[0][0][RTW89_WW][13] = 0,
[0][1][RTW89_WW][0] = 20,
[0][1][RTW89_WW][1] = 22,
@@ -17141,7 +33235,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][9] = 22,
[0][1][RTW89_WW][10] = 22,
[0][1][RTW89_WW][11] = 22,
- [0][1][RTW89_WW][12] = 20,
+ [0][1][RTW89_WW][12] = -30,
[0][1][RTW89_WW][13] = 0,
[1][0][RTW89_WW][0] = 42,
[1][0][RTW89_WW][1] = 44,
@@ -17154,8 +33248,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_WW][8] = 44,
[1][0][RTW89_WW][9] = 44,
[1][0][RTW89_WW][10] = 44,
- [1][0][RTW89_WW][11] = 42,
- [1][0][RTW89_WW][12] = 30,
+ [1][0][RTW89_WW][11] = 36,
+ [1][0][RTW89_WW][12] = 4,
[1][0][RTW89_WW][13] = 0,
[1][1][RTW89_WW][0] = 32,
[1][1][RTW89_WW][1] = 32,
@@ -17169,7 +33263,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][9] = 32,
[1][1][RTW89_WW][10] = 32,
[1][1][RTW89_WW][11] = 30,
- [1][1][RTW89_WW][12] = 24,
+ [1][1][RTW89_WW][12] = -6,
[1][1][RTW89_WW][13] = 0,
[2][0][RTW89_WW][0] = 56,
[2][0][RTW89_WW][1] = 56,
@@ -17182,8 +33276,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_WW][8] = 56,
[2][0][RTW89_WW][9] = 56,
[2][0][RTW89_WW][10] = 56,
- [2][0][RTW89_WW][11] = 42,
- [2][0][RTW89_WW][12] = 38,
+ [2][0][RTW89_WW][11] = 48,
+ [2][0][RTW89_WW][12] = 16,
[2][0][RTW89_WW][13] = 0,
[2][1][RTW89_WW][0] = 44,
[2][1][RTW89_WW][1] = 44,
@@ -17196,2213 +33290,3353 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][8] = 44,
[2][1][RTW89_WW][9] = 44,
[2][1][RTW89_WW][10] = 44,
- [2][1][RTW89_WW][11] = 30,
- [2][1][RTW89_WW][12] = 26,
+ [2][1][RTW89_WW][11] = 44,
+ [2][1][RTW89_WW][12] = 6,
[2][1][RTW89_WW][13] = 0,
[0][0][RTW89_FCC][0] = 60,
[0][0][RTW89_ETSI][0] = 34,
[0][0][RTW89_MKK][0] = 36,
- [0][0][RTW89_IC][0] = 68,
- [0][0][RTW89_ACMA][0] = 32,
+ [0][0][RTW89_IC][0] = 60,
+ [0][0][RTW89_KCC][0] = 42,
+ [0][0][RTW89_ACMA][0] = 34,
+ [0][0][RTW89_CN][0] = 32,
+ [0][0][RTW89_UK][0] = 34,
[0][0][RTW89_FCC][1] = 60,
[0][0][RTW89_ETSI][1] = 38,
[0][0][RTW89_MKK][1] = 40,
- [0][0][RTW89_IC][1] = 68,
- [0][0][RTW89_ACMA][1] = 32,
+ [0][0][RTW89_IC][1] = 60,
+ [0][0][RTW89_KCC][1] = 42,
+ [0][0][RTW89_ACMA][1] = 38,
+ [0][0][RTW89_CN][1] = 32,
+ [0][0][RTW89_UK][1] = 38,
[0][0][RTW89_FCC][2] = 64,
[0][0][RTW89_ETSI][2] = 38,
[0][0][RTW89_MKK][2] = 40,
- [0][0][RTW89_IC][2] = 72,
- [0][0][RTW89_ACMA][2] = 32,
+ [0][0][RTW89_IC][2] = 64,
+ [0][0][RTW89_KCC][2] = 42,
+ [0][0][RTW89_ACMA][2] = 38,
+ [0][0][RTW89_CN][2] = 32,
+ [0][0][RTW89_UK][2] = 38,
[0][0][RTW89_FCC][3] = 68,
[0][0][RTW89_ETSI][3] = 38,
[0][0][RTW89_MKK][3] = 40,
- [0][0][RTW89_IC][3] = 76,
- [0][0][RTW89_ACMA][3] = 32,
+ [0][0][RTW89_IC][3] = 68,
+ [0][0][RTW89_KCC][3] = 42,
+ [0][0][RTW89_ACMA][3] = 38,
+ [0][0][RTW89_CN][3] = 32,
+ [0][0][RTW89_UK][3] = 38,
[0][0][RTW89_FCC][4] = 68,
[0][0][RTW89_ETSI][4] = 38,
[0][0][RTW89_MKK][4] = 40,
- [0][0][RTW89_IC][4] = 76,
- [0][0][RTW89_ACMA][4] = 32,
- [0][0][RTW89_FCC][5] = 76,
+ [0][0][RTW89_IC][4] = 68,
+ [0][0][RTW89_KCC][4] = 42,
+ [0][0][RTW89_ACMA][4] = 38,
+ [0][0][RTW89_CN][4] = 32,
+ [0][0][RTW89_UK][4] = 38,
+ [0][0][RTW89_FCC][5] = 78,
[0][0][RTW89_ETSI][5] = 38,
[0][0][RTW89_MKK][5] = 40,
- [0][0][RTW89_IC][5] = 84,
- [0][0][RTW89_ACMA][5] = 32,
- [0][0][RTW89_FCC][6] = 66,
+ [0][0][RTW89_IC][5] = 78,
+ [0][0][RTW89_KCC][5] = 42,
+ [0][0][RTW89_ACMA][5] = 38,
+ [0][0][RTW89_CN][5] = 32,
+ [0][0][RTW89_UK][5] = 38,
+ [0][0][RTW89_FCC][6] = 54,
[0][0][RTW89_ETSI][6] = 38,
[0][0][RTW89_MKK][6] = 40,
- [0][0][RTW89_IC][6] = 74,
- [0][0][RTW89_ACMA][6] = 32,
- [0][0][RTW89_FCC][7] = 66,
+ [0][0][RTW89_IC][6] = 54,
+ [0][0][RTW89_KCC][6] = 42,
+ [0][0][RTW89_ACMA][6] = 38,
+ [0][0][RTW89_CN][6] = 32,
+ [0][0][RTW89_UK][6] = 38,
+ [0][0][RTW89_FCC][7] = 54,
[0][0][RTW89_ETSI][7] = 38,
[0][0][RTW89_MKK][7] = 40,
- [0][0][RTW89_IC][7] = 74,
- [0][0][RTW89_ACMA][7] = 32,
- [0][0][RTW89_FCC][8] = 62,
+ [0][0][RTW89_IC][7] = 54,
+ [0][0][RTW89_KCC][7] = 42,
+ [0][0][RTW89_ACMA][7] = 38,
+ [0][0][RTW89_CN][7] = 32,
+ [0][0][RTW89_UK][7] = 38,
+ [0][0][RTW89_FCC][8] = 50,
[0][0][RTW89_ETSI][8] = 38,
[0][0][RTW89_MKK][8] = 40,
- [0][0][RTW89_IC][8] = 70,
- [0][0][RTW89_ACMA][8] = 32,
- [0][0][RTW89_FCC][9] = 58,
+ [0][0][RTW89_IC][8] = 50,
+ [0][0][RTW89_KCC][8] = 42,
+ [0][0][RTW89_ACMA][8] = 38,
+ [0][0][RTW89_CN][8] = 32,
+ [0][0][RTW89_UK][8] = 38,
+ [0][0][RTW89_FCC][9] = 46,
[0][0][RTW89_ETSI][9] = 38,
[0][0][RTW89_MKK][9] = 40,
- [0][0][RTW89_IC][9] = 66,
- [0][0][RTW89_ACMA][9] = 32,
- [0][0][RTW89_FCC][10] = 58,
+ [0][0][RTW89_IC][9] = 46,
+ [0][0][RTW89_KCC][9] = 40,
+ [0][0][RTW89_ACMA][9] = 38,
+ [0][0][RTW89_CN][9] = 32,
+ [0][0][RTW89_UK][9] = 38,
+ [0][0][RTW89_FCC][10] = 46,
[0][0][RTW89_ETSI][10] = 38,
[0][0][RTW89_MKK][10] = 40,
- [0][0][RTW89_IC][10] = 66,
- [0][0][RTW89_ACMA][10] = 32,
- [0][0][RTW89_FCC][11] = 42,
+ [0][0][RTW89_IC][10] = 46,
+ [0][0][RTW89_KCC][10] = 40,
+ [0][0][RTW89_ACMA][10] = 38,
+ [0][0][RTW89_CN][10] = 32,
+ [0][0][RTW89_UK][10] = 38,
+ [0][0][RTW89_FCC][11] = 26,
[0][0][RTW89_ETSI][11] = 38,
[0][0][RTW89_MKK][11] = 40,
- [0][0][RTW89_IC][11] = 56,
- [0][0][RTW89_ACMA][11] = 32,
- [0][0][RTW89_FCC][12] = 24,
+ [0][0][RTW89_IC][11] = 26,
+ [0][0][RTW89_KCC][11] = 40,
+ [0][0][RTW89_ACMA][11] = 38,
+ [0][0][RTW89_CN][11] = 32,
+ [0][0][RTW89_UK][11] = 38,
+ [0][0][RTW89_FCC][12] = -20,
[0][0][RTW89_ETSI][12] = 34,
[0][0][RTW89_MKK][12] = 36,
- [0][0][RTW89_IC][12] = 32,
- [0][0][RTW89_ACMA][12] = 32,
+ [0][0][RTW89_IC][12] = -20,
+ [0][0][RTW89_KCC][12] = 40,
+ [0][0][RTW89_ACMA][12] = 34,
+ [0][0][RTW89_CN][12] = 32,
+ [0][0][RTW89_UK][12] = 34,
[0][0][RTW89_FCC][13] = 127,
[0][0][RTW89_ETSI][13] = 127,
[0][0][RTW89_MKK][13] = 127,
[0][0][RTW89_IC][13] = 127,
+ [0][0][RTW89_KCC][13] = 127,
[0][0][RTW89_ACMA][13] = 127,
- [0][1][RTW89_FCC][0] = 46,
+ [0][0][RTW89_CN][13] = 127,
+ [0][0][RTW89_UK][13] = 127,
+ [0][1][RTW89_FCC][0] = 56,
[0][1][RTW89_ETSI][0] = 22,
[0][1][RTW89_MKK][0] = 24,
- [0][1][RTW89_IC][0] = 62,
- [0][1][RTW89_ACMA][0] = 20,
- [0][1][RTW89_FCC][1] = 46,
+ [0][1][RTW89_IC][0] = 56,
+ [0][1][RTW89_KCC][0] = 30,
+ [0][1][RTW89_ACMA][0] = 22,
+ [0][1][RTW89_CN][0] = 20,
+ [0][1][RTW89_UK][0] = 22,
+ [0][1][RTW89_FCC][1] = 56,
[0][1][RTW89_ETSI][1] = 24,
[0][1][RTW89_MKK][1] = 30,
- [0][1][RTW89_IC][1] = 62,
- [0][1][RTW89_ACMA][1] = 22,
- [0][1][RTW89_FCC][2] = 50,
+ [0][1][RTW89_IC][1] = 56,
+ [0][1][RTW89_KCC][1] = 30,
+ [0][1][RTW89_ACMA][1] = 24,
+ [0][1][RTW89_CN][1] = 22,
+ [0][1][RTW89_UK][1] = 24,
+ [0][1][RTW89_FCC][2] = 60,
[0][1][RTW89_ETSI][2] = 24,
[0][1][RTW89_MKK][2] = 30,
- [0][1][RTW89_IC][2] = 66,
- [0][1][RTW89_ACMA][2] = 22,
- [0][1][RTW89_FCC][3] = 54,
+ [0][1][RTW89_IC][2] = 60,
+ [0][1][RTW89_KCC][2] = 30,
+ [0][1][RTW89_ACMA][2] = 24,
+ [0][1][RTW89_CN][2] = 22,
+ [0][1][RTW89_UK][2] = 24,
+ [0][1][RTW89_FCC][3] = 64,
[0][1][RTW89_ETSI][3] = 24,
[0][1][RTW89_MKK][3] = 30,
- [0][1][RTW89_IC][3] = 70,
- [0][1][RTW89_ACMA][3] = 22,
- [0][1][RTW89_FCC][4] = 58,
+ [0][1][RTW89_IC][3] = 64,
+ [0][1][RTW89_KCC][3] = 30,
+ [0][1][RTW89_ACMA][3] = 24,
+ [0][1][RTW89_CN][3] = 22,
+ [0][1][RTW89_UK][3] = 24,
+ [0][1][RTW89_FCC][4] = 68,
[0][1][RTW89_ETSI][4] = 24,
[0][1][RTW89_MKK][4] = 30,
- [0][1][RTW89_IC][4] = 74,
- [0][1][RTW89_ACMA][4] = 22,
- [0][1][RTW89_FCC][5] = 66,
+ [0][1][RTW89_IC][4] = 68,
+ [0][1][RTW89_KCC][4] = 28,
+ [0][1][RTW89_ACMA][4] = 24,
+ [0][1][RTW89_CN][4] = 22,
+ [0][1][RTW89_UK][4] = 24,
+ [0][1][RTW89_FCC][5] = 76,
[0][1][RTW89_ETSI][5] = 24,
[0][1][RTW89_MKK][5] = 30,
- [0][1][RTW89_IC][5] = 74,
- [0][1][RTW89_ACMA][5] = 22,
- [0][1][RTW89_FCC][6] = 58,
+ [0][1][RTW89_IC][5] = 76,
+ [0][1][RTW89_KCC][5] = 28,
+ [0][1][RTW89_ACMA][5] = 24,
+ [0][1][RTW89_CN][5] = 22,
+ [0][1][RTW89_UK][5] = 24,
+ [0][1][RTW89_FCC][6] = 54,
[0][1][RTW89_ETSI][6] = 24,
[0][1][RTW89_MKK][6] = 30,
- [0][1][RTW89_IC][6] = 72,
- [0][1][RTW89_ACMA][6] = 22,
- [0][1][RTW89_FCC][7] = 54,
+ [0][1][RTW89_IC][6] = 54,
+ [0][1][RTW89_KCC][6] = 28,
+ [0][1][RTW89_ACMA][6] = 24,
+ [0][1][RTW89_CN][6] = 22,
+ [0][1][RTW89_UK][6] = 24,
+ [0][1][RTW89_FCC][7] = 50,
[0][1][RTW89_ETSI][7] = 24,
[0][1][RTW89_MKK][7] = 30,
- [0][1][RTW89_IC][7] = 68,
- [0][1][RTW89_ACMA][7] = 22,
- [0][1][RTW89_FCC][8] = 50,
+ [0][1][RTW89_IC][7] = 50,
+ [0][1][RTW89_KCC][7] = 28,
+ [0][1][RTW89_ACMA][7] = 24,
+ [0][1][RTW89_CN][7] = 22,
+ [0][1][RTW89_UK][7] = 24,
+ [0][1][RTW89_FCC][8] = 46,
[0][1][RTW89_ETSI][8] = 24,
[0][1][RTW89_MKK][8] = 30,
- [0][1][RTW89_IC][8] = 64,
- [0][1][RTW89_ACMA][8] = 22,
- [0][1][RTW89_FCC][9] = 46,
+ [0][1][RTW89_IC][8] = 46,
+ [0][1][RTW89_KCC][8] = 28,
+ [0][1][RTW89_ACMA][8] = 24,
+ [0][1][RTW89_CN][8] = 22,
+ [0][1][RTW89_UK][8] = 24,
+ [0][1][RTW89_FCC][9] = 42,
[0][1][RTW89_ETSI][9] = 24,
[0][1][RTW89_MKK][9] = 30,
- [0][1][RTW89_IC][9] = 60,
- [0][1][RTW89_ACMA][9] = 22,
- [0][1][RTW89_FCC][10] = 46,
+ [0][1][RTW89_IC][9] = 42,
+ [0][1][RTW89_KCC][9] = 28,
+ [0][1][RTW89_ACMA][9] = 24,
+ [0][1][RTW89_CN][9] = 22,
+ [0][1][RTW89_UK][9] = 24,
+ [0][1][RTW89_FCC][10] = 42,
[0][1][RTW89_ETSI][10] = 24,
[0][1][RTW89_MKK][10] = 30,
- [0][1][RTW89_IC][10] = 60,
- [0][1][RTW89_ACMA][10] = 22,
- [0][1][RTW89_FCC][11] = 30,
+ [0][1][RTW89_IC][10] = 42,
+ [0][1][RTW89_KCC][10] = 28,
+ [0][1][RTW89_ACMA][10] = 24,
+ [0][1][RTW89_CN][10] = 22,
+ [0][1][RTW89_UK][10] = 24,
+ [0][1][RTW89_FCC][11] = 22,
[0][1][RTW89_ETSI][11] = 24,
[0][1][RTW89_MKK][11] = 30,
- [0][1][RTW89_IC][11] = 52,
- [0][1][RTW89_ACMA][11] = 22,
- [0][1][RTW89_FCC][12] = 22,
+ [0][1][RTW89_IC][11] = 22,
+ [0][1][RTW89_KCC][11] = 28,
+ [0][1][RTW89_ACMA][11] = 24,
+ [0][1][RTW89_CN][11] = 22,
+ [0][1][RTW89_UK][11] = 24,
+ [0][1][RTW89_FCC][12] = -30,
[0][1][RTW89_ETSI][12] = 20,
[0][1][RTW89_MKK][12] = 24,
- [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_IC][12] = -30,
+ [0][1][RTW89_KCC][12] = 28,
[0][1][RTW89_ACMA][12] = 20,
+ [0][1][RTW89_CN][12] = 20,
+ [0][1][RTW89_UK][12] = 20,
[0][1][RTW89_FCC][13] = 127,
[0][1][RTW89_ETSI][13] = 127,
[0][1][RTW89_MKK][13] = 127,
[0][1][RTW89_IC][13] = 127,
+ [0][1][RTW89_KCC][13] = 127,
[0][1][RTW89_ACMA][13] = 127,
- [1][0][RTW89_FCC][0] = 64,
+ [0][1][RTW89_CN][13] = 127,
+ [0][1][RTW89_UK][13] = 127,
+ [1][0][RTW89_FCC][0] = 66,
[1][0][RTW89_ETSI][0] = 46,
[1][0][RTW89_MKK][0] = 48,
- [1][0][RTW89_IC][0] = 78,
- [1][0][RTW89_ACMA][0] = 42,
- [1][0][RTW89_FCC][1] = 64,
+ [1][0][RTW89_IC][0] = 66,
+ [1][0][RTW89_KCC][0] = 50,
+ [1][0][RTW89_ACMA][0] = 46,
+ [1][0][RTW89_CN][0] = 42,
+ [1][0][RTW89_UK][0] = 46,
+ [1][0][RTW89_FCC][1] = 66,
[1][0][RTW89_ETSI][1] = 46,
[1][0][RTW89_MKK][1] = 48,
- [1][0][RTW89_IC][1] = 78,
- [1][0][RTW89_ACMA][1] = 44,
- [1][0][RTW89_FCC][2] = 68,
+ [1][0][RTW89_IC][1] = 66,
+ [1][0][RTW89_KCC][1] = 50,
+ [1][0][RTW89_ACMA][1] = 46,
+ [1][0][RTW89_CN][1] = 44,
+ [1][0][RTW89_UK][1] = 46,
+ [1][0][RTW89_FCC][2] = 70,
[1][0][RTW89_ETSI][2] = 46,
[1][0][RTW89_MKK][2] = 48,
- [1][0][RTW89_IC][2] = 82,
- [1][0][RTW89_ACMA][2] = 44,
- [1][0][RTW89_FCC][3] = 70,
+ [1][0][RTW89_IC][2] = 70,
+ [1][0][RTW89_KCC][2] = 50,
+ [1][0][RTW89_ACMA][2] = 46,
+ [1][0][RTW89_CN][2] = 44,
+ [1][0][RTW89_UK][2] = 46,
+ [1][0][RTW89_FCC][3] = 72,
[1][0][RTW89_ETSI][3] = 46,
[1][0][RTW89_MKK][3] = 48,
- [1][0][RTW89_IC][3] = 84,
- [1][0][RTW89_ACMA][3] = 44,
- [1][0][RTW89_FCC][4] = 70,
+ [1][0][RTW89_IC][3] = 72,
+ [1][0][RTW89_KCC][3] = 50,
+ [1][0][RTW89_ACMA][3] = 46,
+ [1][0][RTW89_CN][3] = 44,
+ [1][0][RTW89_UK][3] = 46,
+ [1][0][RTW89_FCC][4] = 72,
[1][0][RTW89_ETSI][4] = 46,
[1][0][RTW89_MKK][4] = 48,
- [1][0][RTW89_IC][4] = 84,
- [1][0][RTW89_ACMA][4] = 44,
- [1][0][RTW89_FCC][5] = 76,
+ [1][0][RTW89_IC][4] = 72,
+ [1][0][RTW89_KCC][4] = 50,
+ [1][0][RTW89_ACMA][4] = 46,
+ [1][0][RTW89_CN][4] = 44,
+ [1][0][RTW89_UK][4] = 46,
+ [1][0][RTW89_FCC][5] = 82,
[1][0][RTW89_ETSI][5] = 46,
[1][0][RTW89_MKK][5] = 48,
- [1][0][RTW89_IC][5] = 84,
- [1][0][RTW89_ACMA][5] = 44,
- [1][0][RTW89_FCC][6] = 64,
+ [1][0][RTW89_IC][5] = 82,
+ [1][0][RTW89_KCC][5] = 50,
+ [1][0][RTW89_ACMA][5] = 46,
+ [1][0][RTW89_CN][5] = 44,
+ [1][0][RTW89_UK][5] = 46,
+ [1][0][RTW89_FCC][6] = 58,
[1][0][RTW89_ETSI][6] = 44,
[1][0][RTW89_MKK][6] = 48,
- [1][0][RTW89_IC][6] = 78,
+ [1][0][RTW89_IC][6] = 58,
+ [1][0][RTW89_KCC][6] = 50,
[1][0][RTW89_ACMA][6] = 44,
- [1][0][RTW89_FCC][7] = 64,
+ [1][0][RTW89_CN][6] = 44,
+ [1][0][RTW89_UK][6] = 44,
+ [1][0][RTW89_FCC][7] = 58,
[1][0][RTW89_ETSI][7] = 46,
[1][0][RTW89_MKK][7] = 48,
- [1][0][RTW89_IC][7] = 78,
- [1][0][RTW89_ACMA][7] = 44,
- [1][0][RTW89_FCC][8] = 64,
+ [1][0][RTW89_IC][7] = 58,
+ [1][0][RTW89_KCC][7] = 50,
+ [1][0][RTW89_ACMA][7] = 46,
+ [1][0][RTW89_CN][7] = 44,
+ [1][0][RTW89_UK][7] = 46,
+ [1][0][RTW89_FCC][8] = 58,
[1][0][RTW89_ETSI][8] = 46,
[1][0][RTW89_MKK][8] = 48,
- [1][0][RTW89_IC][8] = 78,
- [1][0][RTW89_ACMA][8] = 44,
- [1][0][RTW89_FCC][9] = 60,
+ [1][0][RTW89_IC][8] = 58,
+ [1][0][RTW89_KCC][8] = 50,
+ [1][0][RTW89_ACMA][8] = 46,
+ [1][0][RTW89_CN][8] = 44,
+ [1][0][RTW89_UK][8] = 46,
+ [1][0][RTW89_FCC][9] = 54,
[1][0][RTW89_ETSI][9] = 46,
[1][0][RTW89_MKK][9] = 48,
- [1][0][RTW89_IC][9] = 74,
- [1][0][RTW89_ACMA][9] = 44,
- [1][0][RTW89_FCC][10] = 60,
+ [1][0][RTW89_IC][9] = 54,
+ [1][0][RTW89_KCC][9] = 50,
+ [1][0][RTW89_ACMA][9] = 46,
+ [1][0][RTW89_CN][9] = 44,
+ [1][0][RTW89_UK][9] = 46,
+ [1][0][RTW89_FCC][10] = 54,
[1][0][RTW89_ETSI][10] = 46,
[1][0][RTW89_MKK][10] = 48,
- [1][0][RTW89_IC][10] = 74,
- [1][0][RTW89_ACMA][10] = 44,
- [1][0][RTW89_FCC][11] = 42,
+ [1][0][RTW89_IC][10] = 54,
+ [1][0][RTW89_KCC][10] = 50,
+ [1][0][RTW89_ACMA][10] = 46,
+ [1][0][RTW89_CN][10] = 44,
+ [1][0][RTW89_UK][10] = 46,
+ [1][0][RTW89_FCC][11] = 36,
[1][0][RTW89_ETSI][11] = 46,
[1][0][RTW89_MKK][11] = 48,
- [1][0][RTW89_IC][11] = 72,
- [1][0][RTW89_ACMA][11] = 44,
- [1][0][RTW89_FCC][12] = 30,
+ [1][0][RTW89_IC][11] = 36,
+ [1][0][RTW89_KCC][11] = 50,
+ [1][0][RTW89_ACMA][11] = 46,
+ [1][0][RTW89_CN][11] = 44,
+ [1][0][RTW89_UK][11] = 46,
+ [1][0][RTW89_FCC][12] = 4,
[1][0][RTW89_ETSI][12] = 46,
[1][0][RTW89_MKK][12] = 46,
- [1][0][RTW89_IC][12] = 38,
- [1][0][RTW89_ACMA][12] = 42,
+ [1][0][RTW89_IC][12] = 4,
+ [1][0][RTW89_KCC][12] = 50,
+ [1][0][RTW89_ACMA][12] = 46,
+ [1][0][RTW89_CN][12] = 42,
+ [1][0][RTW89_UK][12] = 46,
[1][0][RTW89_FCC][13] = 127,
[1][0][RTW89_ETSI][13] = 127,
[1][0][RTW89_MKK][13] = 127,
[1][0][RTW89_IC][13] = 127,
+ [1][0][RTW89_KCC][13] = 127,
[1][0][RTW89_ACMA][13] = 127,
- [1][1][RTW89_FCC][0] = 46,
+ [1][0][RTW89_CN][13] = 127,
+ [1][0][RTW89_UK][13] = 127,
+ [1][1][RTW89_FCC][0] = 58,
[1][1][RTW89_ETSI][0] = 32,
[1][1][RTW89_MKK][0] = 34,
- [1][1][RTW89_IC][0] = 66,
+ [1][1][RTW89_IC][0] = 58,
+ [1][1][RTW89_KCC][0] = 38,
[1][1][RTW89_ACMA][0] = 32,
- [1][1][RTW89_FCC][1] = 46,
+ [1][1][RTW89_CN][0] = 32,
+ [1][1][RTW89_UK][0] = 32,
+ [1][1][RTW89_FCC][1] = 58,
[1][1][RTW89_ETSI][1] = 34,
[1][1][RTW89_MKK][1] = 34,
- [1][1][RTW89_IC][1] = 66,
- [1][1][RTW89_ACMA][1] = 32,
- [1][1][RTW89_FCC][2] = 50,
+ [1][1][RTW89_IC][1] = 58,
+ [1][1][RTW89_KCC][1] = 38,
+ [1][1][RTW89_ACMA][1] = 34,
+ [1][1][RTW89_CN][1] = 32,
+ [1][1][RTW89_UK][1] = 34,
+ [1][1][RTW89_FCC][2] = 62,
[1][1][RTW89_ETSI][2] = 34,
[1][1][RTW89_MKK][2] = 34,
- [1][1][RTW89_IC][2] = 70,
- [1][1][RTW89_ACMA][2] = 32,
- [1][1][RTW89_FCC][3] = 54,
+ [1][1][RTW89_IC][2] = 62,
+ [1][1][RTW89_KCC][2] = 38,
+ [1][1][RTW89_ACMA][2] = 34,
+ [1][1][RTW89_CN][2] = 32,
+ [1][1][RTW89_UK][2] = 34,
+ [1][1][RTW89_FCC][3] = 66,
[1][1][RTW89_ETSI][3] = 34,
[1][1][RTW89_MKK][3] = 34,
- [1][1][RTW89_IC][3] = 74,
- [1][1][RTW89_ACMA][3] = 32,
- [1][1][RTW89_FCC][4] = 58,
+ [1][1][RTW89_IC][3] = 66,
+ [1][1][RTW89_KCC][3] = 38,
+ [1][1][RTW89_ACMA][3] = 34,
+ [1][1][RTW89_CN][3] = 32,
+ [1][1][RTW89_UK][3] = 34,
+ [1][1][RTW89_FCC][4] = 70,
[1][1][RTW89_ETSI][4] = 34,
[1][1][RTW89_MKK][4] = 34,
- [1][1][RTW89_IC][4] = 74,
- [1][1][RTW89_ACMA][4] = 32,
- [1][1][RTW89_FCC][5] = 66,
+ [1][1][RTW89_IC][4] = 70,
+ [1][1][RTW89_KCC][4] = 38,
+ [1][1][RTW89_ACMA][4] = 34,
+ [1][1][RTW89_CN][4] = 32,
+ [1][1][RTW89_UK][4] = 34,
+ [1][1][RTW89_FCC][5] = 82,
[1][1][RTW89_ETSI][5] = 34,
[1][1][RTW89_MKK][5] = 34,
- [1][1][RTW89_IC][5] = 74,
- [1][1][RTW89_ACMA][5] = 32,
- [1][1][RTW89_FCC][6] = 58,
+ [1][1][RTW89_IC][5] = 82,
+ [1][1][RTW89_KCC][5] = 38,
+ [1][1][RTW89_ACMA][5] = 34,
+ [1][1][RTW89_CN][5] = 32,
+ [1][1][RTW89_UK][5] = 34,
+ [1][1][RTW89_FCC][6] = 60,
[1][1][RTW89_ETSI][6] = 34,
[1][1][RTW89_MKK][6] = 34,
- [1][1][RTW89_IC][6] = 74,
- [1][1][RTW89_ACMA][6] = 32,
- [1][1][RTW89_FCC][7] = 54,
+ [1][1][RTW89_IC][6] = 60,
+ [1][1][RTW89_KCC][6] = 38,
+ [1][1][RTW89_ACMA][6] = 34,
+ [1][1][RTW89_CN][6] = 32,
+ [1][1][RTW89_UK][6] = 34,
+ [1][1][RTW89_FCC][7] = 56,
[1][1][RTW89_ETSI][7] = 34,
[1][1][RTW89_MKK][7] = 34,
- [1][1][RTW89_IC][7] = 74,
- [1][1][RTW89_ACMA][7] = 32,
- [1][1][RTW89_FCC][8] = 50,
+ [1][1][RTW89_IC][7] = 56,
+ [1][1][RTW89_KCC][7] = 38,
+ [1][1][RTW89_ACMA][7] = 34,
+ [1][1][RTW89_CN][7] = 32,
+ [1][1][RTW89_UK][7] = 34,
+ [1][1][RTW89_FCC][8] = 52,
[1][1][RTW89_ETSI][8] = 34,
[1][1][RTW89_MKK][8] = 34,
- [1][1][RTW89_IC][8] = 70,
- [1][1][RTW89_ACMA][8] = 32,
- [1][1][RTW89_FCC][9] = 46,
+ [1][1][RTW89_IC][8] = 52,
+ [1][1][RTW89_KCC][8] = 38,
+ [1][1][RTW89_ACMA][8] = 34,
+ [1][1][RTW89_CN][8] = 32,
+ [1][1][RTW89_UK][8] = 34,
+ [1][1][RTW89_FCC][9] = 48,
[1][1][RTW89_ETSI][9] = 34,
[1][1][RTW89_MKK][9] = 34,
- [1][1][RTW89_IC][9] = 66,
- [1][1][RTW89_ACMA][9] = 32,
- [1][1][RTW89_FCC][10] = 46,
+ [1][1][RTW89_IC][9] = 48,
+ [1][1][RTW89_KCC][9] = 38,
+ [1][1][RTW89_ACMA][9] = 34,
+ [1][1][RTW89_CN][9] = 32,
+ [1][1][RTW89_UK][9] = 34,
+ [1][1][RTW89_FCC][10] = 48,
[1][1][RTW89_ETSI][10] = 34,
[1][1][RTW89_MKK][10] = 34,
- [1][1][RTW89_IC][10] = 66,
- [1][1][RTW89_ACMA][10] = 32,
+ [1][1][RTW89_IC][10] = 48,
+ [1][1][RTW89_KCC][10] = 38,
+ [1][1][RTW89_ACMA][10] = 34,
+ [1][1][RTW89_CN][10] = 32,
+ [1][1][RTW89_UK][10] = 34,
[1][1][RTW89_FCC][11] = 30,
[1][1][RTW89_ETSI][11] = 34,
[1][1][RTW89_MKK][11] = 34,
- [1][1][RTW89_IC][11] = 48,
- [1][1][RTW89_ACMA][11] = 32,
- [1][1][RTW89_FCC][12] = 24,
+ [1][1][RTW89_IC][11] = 30,
+ [1][1][RTW89_KCC][11] = 38,
+ [1][1][RTW89_ACMA][11] = 34,
+ [1][1][RTW89_CN][11] = 32,
+ [1][1][RTW89_UK][11] = 34,
+ [1][1][RTW89_FCC][12] = -6,
[1][1][RTW89_ETSI][12] = 34,
[1][1][RTW89_MKK][12] = 34,
- [1][1][RTW89_IC][12] = 32,
- [1][1][RTW89_ACMA][12] = 32,
+ [1][1][RTW89_IC][12] = -6,
+ [1][1][RTW89_KCC][12] = 38,
+ [1][1][RTW89_ACMA][12] = 34,
+ [1][1][RTW89_CN][12] = 32,
+ [1][1][RTW89_UK][12] = 34,
[1][1][RTW89_FCC][13] = 127,
[1][1][RTW89_ETSI][13] = 127,
[1][1][RTW89_MKK][13] = 127,
[1][1][RTW89_IC][13] = 127,
+ [1][1][RTW89_KCC][13] = 127,
[1][1][RTW89_ACMA][13] = 127,
- [2][0][RTW89_FCC][0] = 64,
+ [1][1][RTW89_CN][13] = 127,
+ [1][1][RTW89_UK][13] = 127,
+ [2][0][RTW89_FCC][0] = 70,
[2][0][RTW89_ETSI][0] = 58,
[2][0][RTW89_MKK][0] = 58,
- [2][0][RTW89_IC][0] = 78,
- [2][0][RTW89_ACMA][0] = 56,
- [2][0][RTW89_FCC][1] = 64,
+ [2][0][RTW89_IC][0] = 70,
+ [2][0][RTW89_KCC][0] = 64,
+ [2][0][RTW89_ACMA][0] = 58,
+ [2][0][RTW89_CN][0] = 56,
+ [2][0][RTW89_UK][0] = 58,
+ [2][0][RTW89_FCC][1] = 70,
[2][0][RTW89_ETSI][1] = 58,
[2][0][RTW89_MKK][1] = 58,
- [2][0][RTW89_IC][1] = 78,
- [2][0][RTW89_ACMA][1] = 56,
- [2][0][RTW89_FCC][2] = 66,
+ [2][0][RTW89_IC][1] = 70,
+ [2][0][RTW89_KCC][1] = 64,
+ [2][0][RTW89_ACMA][1] = 58,
+ [2][0][RTW89_CN][1] = 56,
+ [2][0][RTW89_UK][1] = 58,
+ [2][0][RTW89_FCC][2] = 72,
[2][0][RTW89_ETSI][2] = 58,
[2][0][RTW89_MKK][2] = 58,
- [2][0][RTW89_IC][2] = 80,
- [2][0][RTW89_ACMA][2] = 56,
- [2][0][RTW89_FCC][3] = 66,
+ [2][0][RTW89_IC][2] = 72,
+ [2][0][RTW89_KCC][2] = 64,
+ [2][0][RTW89_ACMA][2] = 58,
+ [2][0][RTW89_CN][2] = 56,
+ [2][0][RTW89_UK][2] = 58,
+ [2][0][RTW89_FCC][3] = 72,
[2][0][RTW89_ETSI][3] = 58,
[2][0][RTW89_MKK][3] = 58,
- [2][0][RTW89_IC][3] = 80,
- [2][0][RTW89_ACMA][3] = 56,
- [2][0][RTW89_FCC][4] = 66,
+ [2][0][RTW89_IC][3] = 72,
+ [2][0][RTW89_KCC][3] = 64,
+ [2][0][RTW89_ACMA][3] = 58,
+ [2][0][RTW89_CN][3] = 56,
+ [2][0][RTW89_UK][3] = 58,
+ [2][0][RTW89_FCC][4] = 72,
[2][0][RTW89_ETSI][4] = 58,
[2][0][RTW89_MKK][4] = 58,
- [2][0][RTW89_IC][4] = 80,
- [2][0][RTW89_ACMA][4] = 56,
- [2][0][RTW89_FCC][5] = 76,
+ [2][0][RTW89_IC][4] = 72,
+ [2][0][RTW89_KCC][4] = 64,
+ [2][0][RTW89_ACMA][4] = 58,
+ [2][0][RTW89_CN][4] = 56,
+ [2][0][RTW89_UK][4] = 58,
+ [2][0][RTW89_FCC][5] = 82,
[2][0][RTW89_ETSI][5] = 58,
[2][0][RTW89_MKK][5] = 58,
- [2][0][RTW89_IC][5] = 84,
- [2][0][RTW89_ACMA][5] = 56,
- [2][0][RTW89_FCC][6] = 62,
+ [2][0][RTW89_IC][5] = 82,
+ [2][0][RTW89_KCC][5] = 64,
+ [2][0][RTW89_ACMA][5] = 58,
+ [2][0][RTW89_CN][5] = 56,
+ [2][0][RTW89_UK][5] = 58,
+ [2][0][RTW89_FCC][6] = 66,
[2][0][RTW89_ETSI][6] = 56,
[2][0][RTW89_MKK][6] = 58,
- [2][0][RTW89_IC][6] = 76,
+ [2][0][RTW89_IC][6] = 66,
+ [2][0][RTW89_KCC][6] = 64,
[2][0][RTW89_ACMA][6] = 56,
- [2][0][RTW89_FCC][7] = 62,
+ [2][0][RTW89_CN][6] = 56,
+ [2][0][RTW89_UK][6] = 56,
+ [2][0][RTW89_FCC][7] = 66,
[2][0][RTW89_ETSI][7] = 58,
[2][0][RTW89_MKK][7] = 58,
- [2][0][RTW89_IC][7] = 76,
- [2][0][RTW89_ACMA][7] = 56,
- [2][0][RTW89_FCC][8] = 62,
+ [2][0][RTW89_IC][7] = 66,
+ [2][0][RTW89_KCC][7] = 64,
+ [2][0][RTW89_ACMA][7] = 58,
+ [2][0][RTW89_CN][7] = 56,
+ [2][0][RTW89_UK][7] = 58,
+ [2][0][RTW89_FCC][8] = 66,
[2][0][RTW89_ETSI][8] = 58,
[2][0][RTW89_MKK][8] = 58,
- [2][0][RTW89_IC][8] = 76,
- [2][0][RTW89_ACMA][8] = 56,
- [2][0][RTW89_FCC][9] = 60,
+ [2][0][RTW89_IC][8] = 66,
+ [2][0][RTW89_KCC][8] = 64,
+ [2][0][RTW89_ACMA][8] = 58,
+ [2][0][RTW89_CN][8] = 56,
+ [2][0][RTW89_UK][8] = 58,
+ [2][0][RTW89_FCC][9] = 64,
[2][0][RTW89_ETSI][9] = 58,
[2][0][RTW89_MKK][9] = 58,
- [2][0][RTW89_IC][9] = 74,
- [2][0][RTW89_ACMA][9] = 56,
- [2][0][RTW89_FCC][10] = 60,
+ [2][0][RTW89_IC][9] = 64,
+ [2][0][RTW89_KCC][9] = 64,
+ [2][0][RTW89_ACMA][9] = 58,
+ [2][0][RTW89_CN][9] = 56,
+ [2][0][RTW89_UK][9] = 58,
+ [2][0][RTW89_FCC][10] = 64,
[2][0][RTW89_ETSI][10] = 58,
[2][0][RTW89_MKK][10] = 58,
- [2][0][RTW89_IC][10] = 74,
- [2][0][RTW89_ACMA][10] = 56,
- [2][0][RTW89_FCC][11] = 42,
+ [2][0][RTW89_IC][10] = 64,
+ [2][0][RTW89_KCC][10] = 64,
+ [2][0][RTW89_ACMA][10] = 58,
+ [2][0][RTW89_CN][10] = 56,
+ [2][0][RTW89_UK][10] = 58,
+ [2][0][RTW89_FCC][11] = 48,
[2][0][RTW89_ETSI][11] = 58,
[2][0][RTW89_MKK][11] = 58,
- [2][0][RTW89_IC][11] = 66,
- [2][0][RTW89_ACMA][11] = 56,
- [2][0][RTW89_FCC][12] = 38,
+ [2][0][RTW89_IC][11] = 48,
+ [2][0][RTW89_KCC][11] = 64,
+ [2][0][RTW89_ACMA][11] = 58,
+ [2][0][RTW89_CN][11] = 56,
+ [2][0][RTW89_UK][11] = 58,
+ [2][0][RTW89_FCC][12] = 16,
[2][0][RTW89_ETSI][12] = 58,
[2][0][RTW89_MKK][12] = 58,
- [2][0][RTW89_IC][12] = 56,
- [2][0][RTW89_ACMA][12] = 56,
+ [2][0][RTW89_IC][12] = 16,
+ [2][0][RTW89_KCC][12] = 64,
+ [2][0][RTW89_ACMA][12] = 58,
+ [2][0][RTW89_CN][12] = 56,
+ [2][0][RTW89_UK][12] = 58,
[2][0][RTW89_FCC][13] = 127,
[2][0][RTW89_ETSI][13] = 127,
[2][0][RTW89_MKK][13] = 127,
[2][0][RTW89_IC][13] = 127,
+ [2][0][RTW89_KCC][13] = 127,
[2][0][RTW89_ACMA][13] = 127,
- [2][1][RTW89_FCC][0] = 46,
+ [2][0][RTW89_CN][13] = 127,
+ [2][0][RTW89_UK][13] = 127,
+ [2][1][RTW89_FCC][0] = 64,
[2][1][RTW89_ETSI][0] = 46,
[2][1][RTW89_MKK][0] = 46,
- [2][1][RTW89_IC][0] = 70,
- [2][1][RTW89_ACMA][0] = 44,
- [2][1][RTW89_FCC][1] = 46,
+ [2][1][RTW89_IC][0] = 64,
+ [2][1][RTW89_KCC][0] = 52,
+ [2][1][RTW89_ACMA][0] = 46,
+ [2][1][RTW89_CN][0] = 44,
+ [2][1][RTW89_UK][0] = 46,
+ [2][1][RTW89_FCC][1] = 64,
[2][1][RTW89_ETSI][1] = 46,
[2][1][RTW89_MKK][1] = 46,
- [2][1][RTW89_IC][1] = 70,
- [2][1][RTW89_ACMA][1] = 44,
- [2][1][RTW89_FCC][2] = 50,
+ [2][1][RTW89_IC][1] = 64,
+ [2][1][RTW89_KCC][1] = 52,
+ [2][1][RTW89_ACMA][1] = 46,
+ [2][1][RTW89_CN][1] = 44,
+ [2][1][RTW89_UK][1] = 46,
+ [2][1][RTW89_FCC][2] = 68,
[2][1][RTW89_ETSI][2] = 46,
[2][1][RTW89_MKK][2] = 46,
- [2][1][RTW89_IC][2] = 74,
- [2][1][RTW89_ACMA][2] = 44,
- [2][1][RTW89_FCC][3] = 54,
+ [2][1][RTW89_IC][2] = 68,
+ [2][1][RTW89_KCC][2] = 52,
+ [2][1][RTW89_ACMA][2] = 46,
+ [2][1][RTW89_CN][2] = 44,
+ [2][1][RTW89_UK][2] = 46,
+ [2][1][RTW89_FCC][3] = 72,
[2][1][RTW89_ETSI][3] = 46,
[2][1][RTW89_MKK][3] = 46,
- [2][1][RTW89_IC][3] = 78,
- [2][1][RTW89_ACMA][3] = 44,
- [2][1][RTW89_FCC][4] = 56,
+ [2][1][RTW89_IC][3] = 72,
+ [2][1][RTW89_KCC][3] = 52,
+ [2][1][RTW89_ACMA][3] = 46,
+ [2][1][RTW89_CN][3] = 44,
+ [2][1][RTW89_UK][3] = 46,
+ [2][1][RTW89_FCC][4] = 74,
[2][1][RTW89_ETSI][4] = 46,
[2][1][RTW89_MKK][4] = 46,
- [2][1][RTW89_IC][4] = 80,
- [2][1][RTW89_ACMA][4] = 44,
- [2][1][RTW89_FCC][5] = 72,
+ [2][1][RTW89_IC][4] = 74,
+ [2][1][RTW89_KCC][4] = 50,
+ [2][1][RTW89_ACMA][4] = 46,
+ [2][1][RTW89_CN][4] = 44,
+ [2][1][RTW89_UK][4] = 46,
+ [2][1][RTW89_FCC][5] = 82,
[2][1][RTW89_ETSI][5] = 46,
[2][1][RTW89_MKK][5] = 46,
- [2][1][RTW89_IC][5] = 80,
- [2][1][RTW89_ACMA][5] = 44,
- [2][1][RTW89_FCC][6] = 54,
+ [2][1][RTW89_IC][5] = 82,
+ [2][1][RTW89_KCC][5] = 50,
+ [2][1][RTW89_ACMA][5] = 46,
+ [2][1][RTW89_CN][5] = 44,
+ [2][1][RTW89_UK][5] = 46,
+ [2][1][RTW89_FCC][6] = 72,
[2][1][RTW89_ETSI][6] = 44,
[2][1][RTW89_MKK][6] = 46,
- [2][1][RTW89_IC][6] = 78,
+ [2][1][RTW89_IC][6] = 72,
+ [2][1][RTW89_KCC][6] = 50,
[2][1][RTW89_ACMA][6] = 44,
- [2][1][RTW89_FCC][7] = 54,
+ [2][1][RTW89_CN][6] = 44,
+ [2][1][RTW89_UK][6] = 44,
+ [2][1][RTW89_FCC][7] = 72,
[2][1][RTW89_ETSI][7] = 46,
[2][1][RTW89_MKK][7] = 46,
- [2][1][RTW89_IC][7] = 78,
- [2][1][RTW89_ACMA][7] = 44,
- [2][1][RTW89_FCC][8] = 50,
+ [2][1][RTW89_IC][7] = 72,
+ [2][1][RTW89_KCC][7] = 50,
+ [2][1][RTW89_ACMA][7] = 46,
+ [2][1][RTW89_CN][7] = 44,
+ [2][1][RTW89_UK][7] = 46,
+ [2][1][RTW89_FCC][8] = 68,
[2][1][RTW89_ETSI][8] = 46,
[2][1][RTW89_MKK][8] = 46,
- [2][1][RTW89_IC][8] = 74,
- [2][1][RTW89_ACMA][8] = 44,
- [2][1][RTW89_FCC][9] = 46,
+ [2][1][RTW89_IC][8] = 68,
+ [2][1][RTW89_KCC][8] = 50,
+ [2][1][RTW89_ACMA][8] = 46,
+ [2][1][RTW89_CN][8] = 44,
+ [2][1][RTW89_UK][8] = 46,
+ [2][1][RTW89_FCC][9] = 64,
[2][1][RTW89_ETSI][9] = 46,
[2][1][RTW89_MKK][9] = 46,
- [2][1][RTW89_IC][9] = 70,
- [2][1][RTW89_ACMA][9] = 44,
- [2][1][RTW89_FCC][10] = 46,
+ [2][1][RTW89_IC][9] = 64,
+ [2][1][RTW89_KCC][9] = 52,
+ [2][1][RTW89_ACMA][9] = 46,
+ [2][1][RTW89_CN][9] = 44,
+ [2][1][RTW89_UK][9] = 46,
+ [2][1][RTW89_FCC][10] = 64,
[2][1][RTW89_ETSI][10] = 46,
[2][1][RTW89_MKK][10] = 46,
- [2][1][RTW89_IC][10] = 70,
- [2][1][RTW89_ACMA][10] = 44,
- [2][1][RTW89_FCC][11] = 30,
+ [2][1][RTW89_IC][10] = 64,
+ [2][1][RTW89_KCC][10] = 52,
+ [2][1][RTW89_ACMA][10] = 46,
+ [2][1][RTW89_CN][10] = 44,
+ [2][1][RTW89_UK][10] = 46,
+ [2][1][RTW89_FCC][11] = 46,
[2][1][RTW89_ETSI][11] = 46,
[2][1][RTW89_MKK][11] = 46,
- [2][1][RTW89_IC][11] = 60,
- [2][1][RTW89_ACMA][11] = 44,
- [2][1][RTW89_FCC][12] = 26,
+ [2][1][RTW89_IC][11] = 46,
+ [2][1][RTW89_KCC][11] = 52,
+ [2][1][RTW89_ACMA][11] = 46,
+ [2][1][RTW89_CN][11] = 44,
+ [2][1][RTW89_UK][11] = 46,
+ [2][1][RTW89_FCC][12] = 6,
[2][1][RTW89_ETSI][12] = 44,
[2][1][RTW89_MKK][12] = 46,
- [2][1][RTW89_IC][12] = 44,
- [2][1][RTW89_ACMA][12] = 42,
+ [2][1][RTW89_IC][12] = 6,
+ [2][1][RTW89_KCC][12] = 52,
+ [2][1][RTW89_ACMA][12] = 44,
+ [2][1][RTW89_CN][12] = 42,
+ [2][1][RTW89_UK][12] = 44,
[2][1][RTW89_FCC][13] = 127,
[2][1][RTW89_ETSI][13] = 127,
[2][1][RTW89_MKK][13] = 127,
[2][1][RTW89_IC][13] = 127,
+ [2][1][RTW89_KCC][13] = 127,
[2][1][RTW89_ACMA][13] = 127,
+ [2][1][RTW89_CN][13] = 127,
+ [2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
- [0][0][RTW89_WW][0] = 24,
- [0][0][RTW89_WW][2] = 24,
- [0][0][RTW89_WW][4] = 22,
- [0][0][RTW89_WW][6] = 22,
- [0][0][RTW89_WW][8] = 18,
- [0][0][RTW89_WW][10] = 18,
- [0][0][RTW89_WW][12] = 24,
- [0][0][RTW89_WW][14] = 24,
- [0][0][RTW89_WW][15] = 24,
- [0][0][RTW89_WW][17] = 24,
- [0][0][RTW89_WW][19] = 24,
- [0][0][RTW89_WW][21] = 24,
- [0][0][RTW89_WW][23] = 24,
+ [0][0][RTW89_WW][0] = 16,
+ [0][0][RTW89_WW][2] = 16,
+ [0][0][RTW89_WW][4] = 16,
+ [0][0][RTW89_WW][6] = 10,
+ [0][0][RTW89_WW][8] = 16,
+ [0][0][RTW89_WW][10] = 16,
+ [0][0][RTW89_WW][12] = 16,
+ [0][0][RTW89_WW][14] = 16,
+ [0][0][RTW89_WW][15] = 30,
+ [0][0][RTW89_WW][17] = 30,
+ [0][0][RTW89_WW][19] = 30,
+ [0][0][RTW89_WW][21] = 30,
+ [0][0][RTW89_WW][23] = 30,
[0][0][RTW89_WW][25] = 30,
[0][0][RTW89_WW][27] = 30,
[0][0][RTW89_WW][29] = 30,
- [0][0][RTW89_WW][31] = 24,
- [0][0][RTW89_WW][33] = 24,
- [0][0][RTW89_WW][35] = 24,
- [0][0][RTW89_WW][37] = 44,
+ [0][0][RTW89_WW][31] = 30,
+ [0][0][RTW89_WW][33] = 30,
+ [0][0][RTW89_WW][35] = 30,
+ [0][0][RTW89_WW][37] = 30,
[0][0][RTW89_WW][38] = 28,
[0][0][RTW89_WW][40] = 28,
[0][0][RTW89_WW][42] = 28,
[0][0][RTW89_WW][44] = 28,
[0][0][RTW89_WW][46] = 28,
- [0][0][RTW89_WW][48] = 24,
- [0][0][RTW89_WW][50] = 24,
- [0][0][RTW89_WW][52] = 24,
- [0][1][RTW89_WW][0] = 0,
+ [0][0][RTW89_WW][48] = 46,
+ [0][0][RTW89_WW][50] = 44,
+ [0][0][RTW89_WW][52] = 34,
+ [0][1][RTW89_WW][0] = 4,
[0][1][RTW89_WW][2] = 4,
- [0][1][RTW89_WW][4] = 0,
- [0][1][RTW89_WW][6] = 0,
- [0][1][RTW89_WW][8] = 12,
- [0][1][RTW89_WW][10] = 12,
- [0][1][RTW89_WW][12] = 12,
- [0][1][RTW89_WW][14] = 12,
- [0][1][RTW89_WW][15] = 12,
- [0][1][RTW89_WW][17] = 12,
- [0][1][RTW89_WW][19] = 12,
- [0][1][RTW89_WW][21] = 12,
- [0][1][RTW89_WW][23] = 12,
+ [0][1][RTW89_WW][4] = 4,
+ [0][1][RTW89_WW][6] = 1,
+ [0][1][RTW89_WW][8] = 4,
+ [0][1][RTW89_WW][10] = 4,
+ [0][1][RTW89_WW][12] = 4,
+ [0][1][RTW89_WW][14] = 4,
+ [0][1][RTW89_WW][15] = 18,
+ [0][1][RTW89_WW][17] = 18,
+ [0][1][RTW89_WW][19] = 18,
+ [0][1][RTW89_WW][21] = 18,
+ [0][1][RTW89_WW][23] = 18,
[0][1][RTW89_WW][25] = 18,
[0][1][RTW89_WW][27] = 16,
[0][1][RTW89_WW][29] = 16,
- [0][1][RTW89_WW][31] = 12,
- [0][1][RTW89_WW][33] = 12,
- [0][1][RTW89_WW][35] = 12,
- [0][1][RTW89_WW][37] = 30,
+ [0][1][RTW89_WW][31] = 16,
+ [0][1][RTW89_WW][33] = 16,
+ [0][1][RTW89_WW][35] = 16,
+ [0][1][RTW89_WW][37] = 18,
[0][1][RTW89_WW][38] = 16,
[0][1][RTW89_WW][40] = 16,
[0][1][RTW89_WW][42] = 16,
[0][1][RTW89_WW][44] = 16,
[0][1][RTW89_WW][46] = 16,
- [0][1][RTW89_WW][48] = 12,
- [0][1][RTW89_WW][50] = 12,
- [0][1][RTW89_WW][52] = 12,
- [1][0][RTW89_WW][0] = 34,
- [1][0][RTW89_WW][2] = 34,
- [1][0][RTW89_WW][4] = 34,
- [1][0][RTW89_WW][6] = 34,
- [1][0][RTW89_WW][8] = 34,
- [1][0][RTW89_WW][10] = 34,
- [1][0][RTW89_WW][12] = 34,
- [1][0][RTW89_WW][14] = 34,
- [1][0][RTW89_WW][15] = 34,
- [1][0][RTW89_WW][17] = 34,
- [1][0][RTW89_WW][19] = 34,
- [1][0][RTW89_WW][21] = 34,
- [1][0][RTW89_WW][23] = 34,
+ [0][1][RTW89_WW][48] = 20,
+ [0][1][RTW89_WW][50] = 20,
+ [0][1][RTW89_WW][52] = 8,
+ [1][0][RTW89_WW][0] = 26,
+ [1][0][RTW89_WW][2] = 26,
+ [1][0][RTW89_WW][4] = 26,
+ [1][0][RTW89_WW][6] = 24,
+ [1][0][RTW89_WW][8] = 26,
+ [1][0][RTW89_WW][10] = 26,
+ [1][0][RTW89_WW][12] = 26,
+ [1][0][RTW89_WW][14] = 26,
+ [1][0][RTW89_WW][15] = 40,
+ [1][0][RTW89_WW][17] = 40,
+ [1][0][RTW89_WW][19] = 40,
+ [1][0][RTW89_WW][21] = 40,
+ [1][0][RTW89_WW][23] = 40,
[1][0][RTW89_WW][25] = 40,
[1][0][RTW89_WW][27] = 42,
[1][0][RTW89_WW][29] = 42,
- [1][0][RTW89_WW][31] = 34,
- [1][0][RTW89_WW][33] = 34,
- [1][0][RTW89_WW][35] = 34,
- [1][0][RTW89_WW][37] = 56,
+ [1][0][RTW89_WW][31] = 42,
+ [1][0][RTW89_WW][33] = 42,
+ [1][0][RTW89_WW][35] = 42,
+ [1][0][RTW89_WW][37] = 42,
[1][0][RTW89_WW][38] = 28,
[1][0][RTW89_WW][40] = 28,
[1][0][RTW89_WW][42] = 28,
[1][0][RTW89_WW][44] = 28,
[1][0][RTW89_WW][46] = 28,
- [1][0][RTW89_WW][48] = 36,
- [1][0][RTW89_WW][50] = 36,
- [1][0][RTW89_WW][52] = 36,
- [1][1][RTW89_WW][0] = 10,
+ [1][0][RTW89_WW][48] = 56,
+ [1][0][RTW89_WW][50] = 58,
+ [1][0][RTW89_WW][52] = 56,
+ [1][1][RTW89_WW][0] = 14,
[1][1][RTW89_WW][2] = 14,
- [1][1][RTW89_WW][4] = 10,
- [1][1][RTW89_WW][6] = 10,
- [1][1][RTW89_WW][8] = 20,
- [1][1][RTW89_WW][10] = 20,
- [1][1][RTW89_WW][12] = 22,
- [1][1][RTW89_WW][14] = 22,
- [1][1][RTW89_WW][15] = 22,
- [1][1][RTW89_WW][17] = 22,
- [1][1][RTW89_WW][19] = 22,
- [1][1][RTW89_WW][21] = 22,
- [1][1][RTW89_WW][23] = 22,
+ [1][1][RTW89_WW][4] = 14,
+ [1][1][RTW89_WW][6] = 8,
+ [1][1][RTW89_WW][8] = 14,
+ [1][1][RTW89_WW][10] = 14,
+ [1][1][RTW89_WW][12] = 14,
+ [1][1][RTW89_WW][14] = 14,
+ [1][1][RTW89_WW][15] = 28,
+ [1][1][RTW89_WW][17] = 28,
+ [1][1][RTW89_WW][19] = 28,
+ [1][1][RTW89_WW][21] = 28,
+ [1][1][RTW89_WW][23] = 28,
[1][1][RTW89_WW][25] = 28,
[1][1][RTW89_WW][27] = 30,
[1][1][RTW89_WW][29] = 30,
- [1][1][RTW89_WW][31] = 22,
- [1][1][RTW89_WW][33] = 22,
- [1][1][RTW89_WW][35] = 22,
- [1][1][RTW89_WW][37] = 40,
+ [1][1][RTW89_WW][31] = 30,
+ [1][1][RTW89_WW][33] = 30,
+ [1][1][RTW89_WW][35] = 30,
+ [1][1][RTW89_WW][37] = 32,
[1][1][RTW89_WW][38] = 16,
[1][1][RTW89_WW][40] = 16,
[1][1][RTW89_WW][42] = 16,
[1][1][RTW89_WW][44] = 16,
[1][1][RTW89_WW][46] = 16,
- [1][1][RTW89_WW][48] = 24,
- [1][1][RTW89_WW][50] = 24,
- [1][1][RTW89_WW][52] = 24,
- [2][0][RTW89_WW][0] = 46,
- [2][0][RTW89_WW][2] = 46,
- [2][0][RTW89_WW][4] = 46,
- [2][0][RTW89_WW][6] = 46,
- [2][0][RTW89_WW][8] = 44,
- [2][0][RTW89_WW][10] = 44,
- [2][0][RTW89_WW][12] = 48,
- [2][0][RTW89_WW][14] = 48,
- [2][0][RTW89_WW][15] = 48,
- [2][0][RTW89_WW][17] = 48,
- [2][0][RTW89_WW][19] = 48,
- [2][0][RTW89_WW][21] = 48,
- [2][0][RTW89_WW][23] = 48,
+ [1][1][RTW89_WW][48] = 34,
+ [1][1][RTW89_WW][50] = 34,
+ [1][1][RTW89_WW][52] = 30,
+ [2][0][RTW89_WW][0] = 40,
+ [2][0][RTW89_WW][2] = 40,
+ [2][0][RTW89_WW][4] = 40,
+ [2][0][RTW89_WW][6] = 36,
+ [2][0][RTW89_WW][8] = 40,
+ [2][0][RTW89_WW][10] = 40,
+ [2][0][RTW89_WW][12] = 40,
+ [2][0][RTW89_WW][14] = 40,
+ [2][0][RTW89_WW][15] = 52,
+ [2][0][RTW89_WW][17] = 52,
+ [2][0][RTW89_WW][19] = 52,
+ [2][0][RTW89_WW][21] = 52,
+ [2][0][RTW89_WW][23] = 52,
[2][0][RTW89_WW][25] = 52,
[2][0][RTW89_WW][27] = 52,
[2][0][RTW89_WW][29] = 52,
- [2][0][RTW89_WW][31] = 48,
- [2][0][RTW89_WW][33] = 48,
- [2][0][RTW89_WW][35] = 48,
- [2][0][RTW89_WW][37] = 62,
+ [2][0][RTW89_WW][31] = 52,
+ [2][0][RTW89_WW][33] = 52,
+ [2][0][RTW89_WW][35] = 52,
+ [2][0][RTW89_WW][37] = 52,
[2][0][RTW89_WW][38] = 28,
[2][0][RTW89_WW][40] = 28,
[2][0][RTW89_WW][42] = 28,
[2][0][RTW89_WW][44] = 28,
[2][0][RTW89_WW][46] = 28,
- [2][0][RTW89_WW][48] = 48,
- [2][0][RTW89_WW][50] = 48,
- [2][0][RTW89_WW][52] = 48,
- [2][1][RTW89_WW][0] = 20,
- [2][1][RTW89_WW][2] = 18,
- [2][1][RTW89_WW][4] = 22,
- [2][1][RTW89_WW][6] = 22,
- [2][1][RTW89_WW][8] = 32,
- [2][1][RTW89_WW][10] = 32,
- [2][1][RTW89_WW][12] = 36,
- [2][1][RTW89_WW][14] = 36,
- [2][1][RTW89_WW][15] = 36,
- [2][1][RTW89_WW][17] = 36,
- [2][1][RTW89_WW][19] = 36,
- [2][1][RTW89_WW][21] = 36,
- [2][1][RTW89_WW][23] = 36,
+ [2][0][RTW89_WW][48] = 64,
+ [2][0][RTW89_WW][50] = 64,
+ [2][0][RTW89_WW][52] = 64,
+ [2][1][RTW89_WW][0] = 26,
+ [2][1][RTW89_WW][2] = 26,
+ [2][1][RTW89_WW][4] = 26,
+ [2][1][RTW89_WW][6] = 20,
+ [2][1][RTW89_WW][8] = 28,
+ [2][1][RTW89_WW][10] = 28,
+ [2][1][RTW89_WW][12] = 28,
+ [2][1][RTW89_WW][14] = 28,
+ [2][1][RTW89_WW][15] = 40,
+ [2][1][RTW89_WW][17] = 40,
+ [2][1][RTW89_WW][19] = 40,
+ [2][1][RTW89_WW][21] = 40,
+ [2][1][RTW89_WW][23] = 40,
[2][1][RTW89_WW][25] = 40,
[2][1][RTW89_WW][27] = 40,
[2][1][RTW89_WW][29] = 40,
- [2][1][RTW89_WW][31] = 36,
- [2][1][RTW89_WW][33] = 36,
- [2][1][RTW89_WW][35] = 36,
+ [2][1][RTW89_WW][31] = 40,
+ [2][1][RTW89_WW][33] = 40,
+ [2][1][RTW89_WW][35] = 40,
[2][1][RTW89_WW][37] = 42,
[2][1][RTW89_WW][38] = 16,
[2][1][RTW89_WW][40] = 16,
[2][1][RTW89_WW][42] = 16,
[2][1][RTW89_WW][44] = 16,
[2][1][RTW89_WW][46] = 16,
- [2][1][RTW89_WW][48] = 36,
- [2][1][RTW89_WW][50] = 36,
- [2][1][RTW89_WW][52] = 36,
- [0][0][RTW89_FCC][0] = 44,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
+ [0][0][RTW89_FCC][0] = 50,
[0][0][RTW89_ETSI][0] = 30,
[0][0][RTW89_MKK][0] = 36,
- [0][0][RTW89_IC][0] = 24,
- [0][0][RTW89_ACMA][0] = 24,
- [0][0][RTW89_FCC][2] = 44,
+ [0][0][RTW89_IC][0] = 32,
+ [0][0][RTW89_KCC][0] = 42,
+ [0][0][RTW89_ACMA][0] = 30,
+ [0][0][RTW89_CN][0] = 16,
+ [0][0][RTW89_UK][0] = 30,
+ [0][0][RTW89_FCC][2] = 50,
[0][0][RTW89_ETSI][2] = 30,
[0][0][RTW89_MKK][2] = 36,
- [0][0][RTW89_IC][2] = 24,
- [0][0][RTW89_ACMA][2] = 24,
- [0][0][RTW89_FCC][4] = 44,
+ [0][0][RTW89_IC][2] = 32,
+ [0][0][RTW89_KCC][2] = 42,
+ [0][0][RTW89_ACMA][2] = 30,
+ [0][0][RTW89_CN][2] = 16,
+ [0][0][RTW89_UK][2] = 30,
+ [0][0][RTW89_FCC][4] = 50,
[0][0][RTW89_ETSI][4] = 30,
[0][0][RTW89_MKK][4] = 22,
- [0][0][RTW89_IC][4] = 24,
- [0][0][RTW89_ACMA][4] = 24,
- [0][0][RTW89_FCC][6] = 44,
+ [0][0][RTW89_IC][4] = 32,
+ [0][0][RTW89_KCC][4] = 42,
+ [0][0][RTW89_ACMA][4] = 30,
+ [0][0][RTW89_CN][4] = 16,
+ [0][0][RTW89_UK][4] = 30,
+ [0][0][RTW89_FCC][6] = 50,
[0][0][RTW89_ETSI][6] = 30,
[0][0][RTW89_MKK][6] = 22,
- [0][0][RTW89_IC][6] = 24,
- [0][0][RTW89_ACMA][6] = 24,
- [0][0][RTW89_FCC][8] = 44,
+ [0][0][RTW89_IC][6] = 32,
+ [0][0][RTW89_KCC][6] = 10,
+ [0][0][RTW89_ACMA][6] = 30,
+ [0][0][RTW89_CN][6] = 16,
+ [0][0][RTW89_UK][6] = 30,
+ [0][0][RTW89_FCC][8] = 52,
[0][0][RTW89_ETSI][8] = 28,
[0][0][RTW89_MKK][8] = 18,
[0][0][RTW89_IC][8] = 52,
- [0][0][RTW89_ACMA][8] = 24,
- [0][0][RTW89_FCC][10] = 44,
+ [0][0][RTW89_KCC][8] = 44,
+ [0][0][RTW89_ACMA][8] = 28,
+ [0][0][RTW89_CN][8] = 16,
+ [0][0][RTW89_UK][8] = 28,
+ [0][0][RTW89_FCC][10] = 52,
[0][0][RTW89_ETSI][10] = 28,
[0][0][RTW89_MKK][10] = 18,
[0][0][RTW89_IC][10] = 52,
- [0][0][RTW89_ACMA][10] = 24,
- [0][0][RTW89_FCC][12] = 44,
+ [0][0][RTW89_KCC][10] = 44,
+ [0][0][RTW89_ACMA][10] = 28,
+ [0][0][RTW89_CN][10] = 16,
+ [0][0][RTW89_UK][10] = 28,
+ [0][0][RTW89_FCC][12] = 52,
[0][0][RTW89_ETSI][12] = 28,
[0][0][RTW89_MKK][12] = 34,
[0][0][RTW89_IC][12] = 52,
- [0][0][RTW89_ACMA][12] = 24,
- [0][0][RTW89_FCC][14] = 44,
+ [0][0][RTW89_KCC][12] = 40,
+ [0][0][RTW89_ACMA][12] = 28,
+ [0][0][RTW89_CN][12] = 16,
+ [0][0][RTW89_UK][12] = 28,
+ [0][0][RTW89_FCC][14] = 52,
[0][0][RTW89_ETSI][14] = 28,
[0][0][RTW89_MKK][14] = 34,
[0][0][RTW89_IC][14] = 52,
- [0][0][RTW89_ACMA][14] = 24,
- [0][0][RTW89_FCC][15] = 44,
+ [0][0][RTW89_KCC][14] = 40,
+ [0][0][RTW89_ACMA][14] = 28,
+ [0][0][RTW89_CN][14] = 16,
+ [0][0][RTW89_UK][14] = 28,
+ [0][0][RTW89_FCC][15] = 52,
[0][0][RTW89_ETSI][15] = 30,
[0][0][RTW89_MKK][15] = 56,
[0][0][RTW89_IC][15] = 52,
- [0][0][RTW89_ACMA][15] = 24,
- [0][0][RTW89_FCC][17] = 44,
+ [0][0][RTW89_KCC][15] = 42,
+ [0][0][RTW89_ACMA][15] = 30,
+ [0][0][RTW89_CN][15] = 127,
+ [0][0][RTW89_UK][15] = 30,
+ [0][0][RTW89_FCC][17] = 52,
[0][0][RTW89_ETSI][17] = 30,
[0][0][RTW89_MKK][17] = 58,
[0][0][RTW89_IC][17] = 52,
- [0][0][RTW89_ACMA][17] = 24,
- [0][0][RTW89_FCC][19] = 44,
+ [0][0][RTW89_KCC][17] = 42,
+ [0][0][RTW89_ACMA][17] = 30,
+ [0][0][RTW89_CN][17] = 127,
+ [0][0][RTW89_UK][17] = 30,
+ [0][0][RTW89_FCC][19] = 52,
[0][0][RTW89_ETSI][19] = 30,
[0][0][RTW89_MKK][19] = 58,
[0][0][RTW89_IC][19] = 52,
- [0][0][RTW89_ACMA][19] = 24,
- [0][0][RTW89_FCC][21] = 44,
+ [0][0][RTW89_KCC][19] = 42,
+ [0][0][RTW89_ACMA][19] = 30,
+ [0][0][RTW89_CN][19] = 127,
+ [0][0][RTW89_UK][19] = 30,
+ [0][0][RTW89_FCC][21] = 52,
[0][0][RTW89_ETSI][21] = 30,
[0][0][RTW89_MKK][21] = 58,
[0][0][RTW89_IC][21] = 52,
- [0][0][RTW89_ACMA][21] = 24,
- [0][0][RTW89_FCC][23] = 44,
+ [0][0][RTW89_KCC][21] = 42,
+ [0][0][RTW89_ACMA][21] = 30,
+ [0][0][RTW89_CN][21] = 127,
+ [0][0][RTW89_UK][21] = 30,
+ [0][0][RTW89_FCC][23] = 52,
[0][0][RTW89_ETSI][23] = 30,
[0][0][RTW89_MKK][23] = 58,
[0][0][RTW89_IC][23] = 52,
- [0][0][RTW89_ACMA][23] = 24,
- [0][0][RTW89_FCC][25] = 44,
+ [0][0][RTW89_KCC][23] = 42,
+ [0][0][RTW89_ACMA][23] = 30,
+ [0][0][RTW89_CN][23] = 127,
+ [0][0][RTW89_UK][23] = 30,
+ [0][0][RTW89_FCC][25] = 52,
[0][0][RTW89_ETSI][25] = 30,
[0][0][RTW89_MKK][25] = 58,
[0][0][RTW89_IC][25] = 127,
+ [0][0][RTW89_KCC][25] = 42,
[0][0][RTW89_ACMA][25] = 127,
- [0][0][RTW89_FCC][27] = 44,
+ [0][0][RTW89_CN][25] = 127,
+ [0][0][RTW89_UK][25] = 30,
+ [0][0][RTW89_FCC][27] = 52,
[0][0][RTW89_ETSI][27] = 30,
[0][0][RTW89_MKK][27] = 58,
[0][0][RTW89_IC][27] = 127,
+ [0][0][RTW89_KCC][27] = 42,
[0][0][RTW89_ACMA][27] = 127,
- [0][0][RTW89_FCC][29] = 44,
+ [0][0][RTW89_CN][27] = 127,
+ [0][0][RTW89_UK][27] = 30,
+ [0][0][RTW89_FCC][29] = 52,
[0][0][RTW89_ETSI][29] = 30,
[0][0][RTW89_MKK][29] = 58,
[0][0][RTW89_IC][29] = 127,
+ [0][0][RTW89_KCC][29] = 42,
[0][0][RTW89_ACMA][29] = 127,
- [0][0][RTW89_FCC][31] = 44,
+ [0][0][RTW89_CN][29] = 127,
+ [0][0][RTW89_UK][29] = 30,
+ [0][0][RTW89_FCC][31] = 52,
[0][0][RTW89_ETSI][31] = 30,
[0][0][RTW89_MKK][31] = 58,
- [0][0][RTW89_IC][31] = 52,
- [0][0][RTW89_ACMA][31] = 24,
+ [0][0][RTW89_IC][31] = 44,
+ [0][0][RTW89_KCC][31] = 42,
+ [0][0][RTW89_ACMA][31] = 30,
+ [0][0][RTW89_CN][31] = 127,
+ [0][0][RTW89_UK][31] = 30,
[0][0][RTW89_FCC][33] = 44,
[0][0][RTW89_ETSI][33] = 30,
[0][0][RTW89_MKK][33] = 58,
- [0][0][RTW89_IC][33] = 52,
- [0][0][RTW89_ACMA][33] = 24,
+ [0][0][RTW89_IC][33] = 44,
+ [0][0][RTW89_KCC][33] = 42,
+ [0][0][RTW89_ACMA][33] = 30,
+ [0][0][RTW89_CN][33] = 127,
+ [0][0][RTW89_UK][33] = 30,
[0][0][RTW89_FCC][35] = 44,
[0][0][RTW89_ETSI][35] = 30,
[0][0][RTW89_MKK][35] = 58,
- [0][0][RTW89_IC][35] = 52,
- [0][0][RTW89_ACMA][35] = 24,
- [0][0][RTW89_FCC][37] = 44,
+ [0][0][RTW89_IC][35] = 44,
+ [0][0][RTW89_KCC][35] = 42,
+ [0][0][RTW89_ACMA][35] = 30,
+ [0][0][RTW89_CN][35] = 127,
+ [0][0][RTW89_UK][35] = 30,
+ [0][0][RTW89_FCC][37] = 52,
[0][0][RTW89_ETSI][37] = 127,
[0][0][RTW89_MKK][37] = 58,
[0][0][RTW89_IC][37] = 52,
+ [0][0][RTW89_KCC][37] = 42,
[0][0][RTW89_ACMA][37] = 52,
- [0][0][RTW89_FCC][38] = 76,
+ [0][0][RTW89_CN][37] = 127,
+ [0][0][RTW89_UK][37] = 30,
+ [0][0][RTW89_FCC][38] = 64,
[0][0][RTW89_ETSI][38] = 28,
[0][0][RTW89_MKK][38] = 127,
- [0][0][RTW89_IC][38] = 84,
- [0][0][RTW89_ACMA][38] = 84,
- [0][0][RTW89_FCC][40] = 76,
+ [0][0][RTW89_IC][38] = 64,
+ [0][0][RTW89_KCC][38] = 42,
+ [0][0][RTW89_ACMA][38] = 64,
+ [0][0][RTW89_CN][38] = 54,
+ [0][0][RTW89_UK][38] = 30,
+ [0][0][RTW89_FCC][40] = 64,
[0][0][RTW89_ETSI][40] = 28,
[0][0][RTW89_MKK][40] = 127,
- [0][0][RTW89_IC][40] = 84,
- [0][0][RTW89_ACMA][40] = 84,
- [0][0][RTW89_FCC][42] = 76,
+ [0][0][RTW89_IC][40] = 64,
+ [0][0][RTW89_KCC][40] = 42,
+ [0][0][RTW89_ACMA][40] = 64,
+ [0][0][RTW89_CN][40] = 54,
+ [0][0][RTW89_UK][40] = 30,
+ [0][0][RTW89_FCC][42] = 60,
[0][0][RTW89_ETSI][42] = 28,
[0][0][RTW89_MKK][42] = 127,
- [0][0][RTW89_IC][42] = 84,
- [0][0][RTW89_ACMA][42] = 84,
- [0][0][RTW89_FCC][44] = 76,
+ [0][0][RTW89_IC][42] = 60,
+ [0][0][RTW89_KCC][42] = 42,
+ [0][0][RTW89_ACMA][42] = 60,
+ [0][0][RTW89_CN][42] = 54,
+ [0][0][RTW89_UK][42] = 30,
+ [0][0][RTW89_FCC][44] = 60,
[0][0][RTW89_ETSI][44] = 28,
[0][0][RTW89_MKK][44] = 127,
- [0][0][RTW89_IC][44] = 84,
- [0][0][RTW89_ACMA][44] = 84,
- [0][0][RTW89_FCC][46] = 76,
+ [0][0][RTW89_IC][44] = 60,
+ [0][0][RTW89_KCC][44] = 42,
+ [0][0][RTW89_ACMA][44] = 60,
+ [0][0][RTW89_CN][44] = 54,
+ [0][0][RTW89_UK][44] = 30,
+ [0][0][RTW89_FCC][46] = 60,
[0][0][RTW89_ETSI][46] = 28,
[0][0][RTW89_MKK][46] = 127,
- [0][0][RTW89_IC][46] = 84,
- [0][0][RTW89_ACMA][46] = 84,
- [0][0][RTW89_FCC][48] = 24,
+ [0][0][RTW89_IC][46] = 60,
+ [0][0][RTW89_KCC][46] = 42,
+ [0][0][RTW89_ACMA][46] = 60,
+ [0][0][RTW89_CN][46] = 54,
+ [0][0][RTW89_UK][46] = 30,
+ [0][0][RTW89_FCC][48] = 46,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
[0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
- [0][0][RTW89_FCC][50] = 24,
+ [0][0][RTW89_CN][48] = 127,
+ [0][0][RTW89_UK][48] = 127,
+ [0][0][RTW89_FCC][50] = 44,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
[0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
- [0][0][RTW89_FCC][52] = 24,
+ [0][0][RTW89_CN][50] = 127,
+ [0][0][RTW89_UK][50] = 127,
+ [0][0][RTW89_FCC][52] = 34,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
[0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
- [0][1][RTW89_FCC][0] = 26,
+ [0][0][RTW89_CN][52] = 127,
+ [0][0][RTW89_UK][52] = 127,
+ [0][1][RTW89_FCC][0] = 30,
[0][1][RTW89_ETSI][0] = 18,
[0][1][RTW89_MKK][0] = 20,
- [0][1][RTW89_IC][0] = 0,
- [0][1][RTW89_ACMA][0] = 12,
- [0][1][RTW89_FCC][2] = 30,
+ [0][1][RTW89_IC][0] = 8,
+ [0][1][RTW89_KCC][0] = 26,
+ [0][1][RTW89_ACMA][0] = 18,
+ [0][1][RTW89_CN][0] = 4,
+ [0][1][RTW89_UK][0] = 18,
+ [0][1][RTW89_FCC][2] = 32,
[0][1][RTW89_ETSI][2] = 18,
[0][1][RTW89_MKK][2] = 20,
- [0][1][RTW89_IC][2] = 4,
- [0][1][RTW89_ACMA][2] = 12,
- [0][1][RTW89_FCC][4] = 26,
+ [0][1][RTW89_IC][2] = 8,
+ [0][1][RTW89_KCC][2] = 26,
+ [0][1][RTW89_ACMA][2] = 18,
+ [0][1][RTW89_CN][2] = 4,
+ [0][1][RTW89_UK][2] = 18,
+ [0][1][RTW89_FCC][4] = 30,
[0][1][RTW89_ETSI][4] = 18,
[0][1][RTW89_MKK][4] = 8,
- [0][1][RTW89_IC][4] = 0,
- [0][1][RTW89_ACMA][4] = 12,
- [0][1][RTW89_FCC][6] = 26,
+ [0][1][RTW89_IC][4] = 8,
+ [0][1][RTW89_KCC][4] = 26,
+ [0][1][RTW89_ACMA][4] = 18,
+ [0][1][RTW89_CN][4] = 4,
+ [0][1][RTW89_UK][4] = 18,
+ [0][1][RTW89_FCC][6] = 30,
[0][1][RTW89_ETSI][6] = 18,
[0][1][RTW89_MKK][6] = 8,
- [0][1][RTW89_IC][6] = 0,
- [0][1][RTW89_ACMA][6] = 12,
- [0][1][RTW89_FCC][8] = 26,
+ [0][1][RTW89_IC][6] = 8,
+ [0][1][RTW89_KCC][6] = 0,
+ [0][1][RTW89_ACMA][6] = 18,
+ [0][1][RTW89_CN][6] = 4,
+ [0][1][RTW89_UK][6] = 18,
+ [0][1][RTW89_FCC][8] = 30,
[0][1][RTW89_ETSI][8] = 16,
[0][1][RTW89_MKK][8] = 20,
- [0][1][RTW89_IC][8] = 34,
- [0][1][RTW89_ACMA][8] = 12,
- [0][1][RTW89_FCC][10] = 26,
+ [0][1][RTW89_IC][8] = 30,
+ [0][1][RTW89_KCC][8] = 28,
+ [0][1][RTW89_ACMA][8] = 16,
+ [0][1][RTW89_CN][8] = 4,
+ [0][1][RTW89_UK][8] = 16,
+ [0][1][RTW89_FCC][10] = 30,
[0][1][RTW89_ETSI][10] = 16,
[0][1][RTW89_MKK][10] = 20,
- [0][1][RTW89_IC][10] = 34,
- [0][1][RTW89_ACMA][10] = 12,
+ [0][1][RTW89_IC][10] = 30,
+ [0][1][RTW89_KCC][10] = 28,
+ [0][1][RTW89_ACMA][10] = 16,
+ [0][1][RTW89_CN][10] = 4,
+ [0][1][RTW89_UK][10] = 16,
[0][1][RTW89_FCC][12] = 30,
[0][1][RTW89_ETSI][12] = 16,
[0][1][RTW89_MKK][12] = 34,
- [0][1][RTW89_IC][12] = 38,
- [0][1][RTW89_ACMA][12] = 12,
- [0][1][RTW89_FCC][14] = 26,
+ [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_KCC][12] = 28,
+ [0][1][RTW89_ACMA][12] = 16,
+ [0][1][RTW89_CN][12] = 4,
+ [0][1][RTW89_UK][12] = 16,
+ [0][1][RTW89_FCC][14] = 30,
[0][1][RTW89_ETSI][14] = 16,
[0][1][RTW89_MKK][14] = 34,
- [0][1][RTW89_IC][14] = 34,
- [0][1][RTW89_ACMA][14] = 12,
- [0][1][RTW89_FCC][15] = 26,
+ [0][1][RTW89_IC][14] = 30,
+ [0][1][RTW89_KCC][14] = 28,
+ [0][1][RTW89_ACMA][14] = 16,
+ [0][1][RTW89_CN][14] = 4,
+ [0][1][RTW89_UK][14] = 16,
+ [0][1][RTW89_FCC][15] = 32,
[0][1][RTW89_ETSI][15] = 18,
[0][1][RTW89_MKK][15] = 44,
- [0][1][RTW89_IC][15] = 34,
- [0][1][RTW89_ACMA][15] = 12,
- [0][1][RTW89_FCC][17] = 26,
+ [0][1][RTW89_IC][15] = 32,
+ [0][1][RTW89_KCC][15] = 28,
+ [0][1][RTW89_ACMA][15] = 18,
+ [0][1][RTW89_CN][15] = 127,
+ [0][1][RTW89_UK][15] = 18,
+ [0][1][RTW89_FCC][17] = 32,
[0][1][RTW89_ETSI][17] = 18,
[0][1][RTW89_MKK][17] = 44,
- [0][1][RTW89_IC][17] = 34,
- [0][1][RTW89_ACMA][17] = 12,
- [0][1][RTW89_FCC][19] = 30,
+ [0][1][RTW89_IC][17] = 32,
+ [0][1][RTW89_KCC][17] = 28,
+ [0][1][RTW89_ACMA][17] = 18,
+ [0][1][RTW89_CN][17] = 127,
+ [0][1][RTW89_UK][17] = 18,
+ [0][1][RTW89_FCC][19] = 32,
[0][1][RTW89_ETSI][19] = 18,
[0][1][RTW89_MKK][19] = 44,
- [0][1][RTW89_IC][19] = 38,
- [0][1][RTW89_ACMA][19] = 12,
- [0][1][RTW89_FCC][21] = 30,
+ [0][1][RTW89_IC][19] = 32,
+ [0][1][RTW89_KCC][19] = 28,
+ [0][1][RTW89_ACMA][19] = 18,
+ [0][1][RTW89_CN][19] = 127,
+ [0][1][RTW89_UK][19] = 18,
+ [0][1][RTW89_FCC][21] = 32,
[0][1][RTW89_ETSI][21] = 18,
[0][1][RTW89_MKK][21] = 44,
- [0][1][RTW89_IC][21] = 38,
- [0][1][RTW89_ACMA][21] = 12,
- [0][1][RTW89_FCC][23] = 30,
+ [0][1][RTW89_IC][21] = 32,
+ [0][1][RTW89_KCC][21] = 28,
+ [0][1][RTW89_ACMA][21] = 18,
+ [0][1][RTW89_CN][21] = 127,
+ [0][1][RTW89_UK][21] = 18,
+ [0][1][RTW89_FCC][23] = 32,
[0][1][RTW89_ETSI][23] = 18,
[0][1][RTW89_MKK][23] = 44,
- [0][1][RTW89_IC][23] = 38,
- [0][1][RTW89_ACMA][23] = 12,
- [0][1][RTW89_FCC][25] = 30,
+ [0][1][RTW89_IC][23] = 32,
+ [0][1][RTW89_KCC][23] = 28,
+ [0][1][RTW89_ACMA][23] = 18,
+ [0][1][RTW89_CN][23] = 127,
+ [0][1][RTW89_UK][23] = 18,
+ [0][1][RTW89_FCC][25] = 32,
[0][1][RTW89_ETSI][25] = 18,
[0][1][RTW89_MKK][25] = 44,
[0][1][RTW89_IC][25] = 127,
+ [0][1][RTW89_KCC][25] = 28,
[0][1][RTW89_ACMA][25] = 127,
- [0][1][RTW89_FCC][27] = 30,
+ [0][1][RTW89_CN][25] = 127,
+ [0][1][RTW89_UK][25] = 18,
+ [0][1][RTW89_FCC][27] = 32,
[0][1][RTW89_ETSI][27] = 16,
[0][1][RTW89_MKK][27] = 44,
[0][1][RTW89_IC][27] = 127,
+ [0][1][RTW89_KCC][27] = 28,
[0][1][RTW89_ACMA][27] = 127,
- [0][1][RTW89_FCC][29] = 30,
+ [0][1][RTW89_CN][27] = 127,
+ [0][1][RTW89_UK][27] = 16,
+ [0][1][RTW89_FCC][29] = 32,
[0][1][RTW89_ETSI][29] = 16,
[0][1][RTW89_MKK][29] = 44,
[0][1][RTW89_IC][29] = 127,
+ [0][1][RTW89_KCC][29] = 28,
[0][1][RTW89_ACMA][29] = 127,
- [0][1][RTW89_FCC][31] = 30,
+ [0][1][RTW89_CN][29] = 127,
+ [0][1][RTW89_UK][29] = 16,
+ [0][1][RTW89_FCC][31] = 32,
[0][1][RTW89_ETSI][31] = 16,
[0][1][RTW89_MKK][31] = 44,
- [0][1][RTW89_IC][31] = 34,
- [0][1][RTW89_ACMA][31] = 12,
- [0][1][RTW89_FCC][33] = 26,
+ [0][1][RTW89_IC][31] = 30,
+ [0][1][RTW89_KCC][31] = 28,
+ [0][1][RTW89_ACMA][31] = 16,
+ [0][1][RTW89_CN][31] = 127,
+ [0][1][RTW89_UK][31] = 16,
+ [0][1][RTW89_FCC][33] = 30,
[0][1][RTW89_ETSI][33] = 16,
[0][1][RTW89_MKK][33] = 44,
- [0][1][RTW89_IC][33] = 34,
- [0][1][RTW89_ACMA][33] = 12,
- [0][1][RTW89_FCC][35] = 26,
+ [0][1][RTW89_IC][33] = 30,
+ [0][1][RTW89_KCC][33] = 28,
+ [0][1][RTW89_ACMA][33] = 16,
+ [0][1][RTW89_CN][33] = 127,
+ [0][1][RTW89_UK][33] = 16,
+ [0][1][RTW89_FCC][35] = 30,
[0][1][RTW89_ETSI][35] = 16,
[0][1][RTW89_MKK][35] = 44,
- [0][1][RTW89_IC][35] = 34,
- [0][1][RTW89_ACMA][35] = 12,
- [0][1][RTW89_FCC][37] = 30,
+ [0][1][RTW89_IC][35] = 30,
+ [0][1][RTW89_KCC][35] = 28,
+ [0][1][RTW89_ACMA][35] = 16,
+ [0][1][RTW89_CN][35] = 127,
+ [0][1][RTW89_UK][35] = 16,
+ [0][1][RTW89_FCC][37] = 34,
[0][1][RTW89_ETSI][37] = 127,
[0][1][RTW89_MKK][37] = 44,
- [0][1][RTW89_IC][37] = 38,
- [0][1][RTW89_ACMA][37] = 38,
- [0][1][RTW89_FCC][38] = 74,
+ [0][1][RTW89_IC][37] = 34,
+ [0][1][RTW89_KCC][37] = 28,
+ [0][1][RTW89_ACMA][37] = 34,
+ [0][1][RTW89_CN][37] = 127,
+ [0][1][RTW89_UK][37] = 18,
+ [0][1][RTW89_FCC][38] = 62,
[0][1][RTW89_ETSI][38] = 16,
[0][1][RTW89_MKK][38] = 127,
- [0][1][RTW89_IC][38] = 82,
- [0][1][RTW89_ACMA][38] = 84,
- [0][1][RTW89_FCC][40] = 74,
+ [0][1][RTW89_IC][38] = 62,
+ [0][1][RTW89_KCC][38] = 28,
+ [0][1][RTW89_ACMA][38] = 62,
+ [0][1][RTW89_CN][38] = 42,
+ [0][1][RTW89_UK][38] = 18,
+ [0][1][RTW89_FCC][40] = 62,
[0][1][RTW89_ETSI][40] = 16,
[0][1][RTW89_MKK][40] = 127,
- [0][1][RTW89_IC][40] = 82,
- [0][1][RTW89_ACMA][40] = 84,
- [0][1][RTW89_FCC][42] = 74,
+ [0][1][RTW89_IC][40] = 62,
+ [0][1][RTW89_KCC][40] = 28,
+ [0][1][RTW89_ACMA][40] = 62,
+ [0][1][RTW89_CN][40] = 42,
+ [0][1][RTW89_UK][40] = 18,
+ [0][1][RTW89_FCC][42] = 58,
[0][1][RTW89_ETSI][42] = 16,
[0][1][RTW89_MKK][42] = 127,
- [0][1][RTW89_IC][42] = 82,
- [0][1][RTW89_ACMA][42] = 84,
- [0][1][RTW89_FCC][44] = 74,
+ [0][1][RTW89_IC][42] = 58,
+ [0][1][RTW89_KCC][42] = 28,
+ [0][1][RTW89_ACMA][42] = 58,
+ [0][1][RTW89_CN][42] = 42,
+ [0][1][RTW89_UK][42] = 18,
+ [0][1][RTW89_FCC][44] = 56,
[0][1][RTW89_ETSI][44] = 16,
[0][1][RTW89_MKK][44] = 127,
- [0][1][RTW89_IC][44] = 82,
- [0][1][RTW89_ACMA][44] = 84,
- [0][1][RTW89_FCC][46] = 74,
+ [0][1][RTW89_IC][44] = 56,
+ [0][1][RTW89_KCC][44] = 28,
+ [0][1][RTW89_ACMA][44] = 56,
+ [0][1][RTW89_CN][44] = 42,
+ [0][1][RTW89_UK][44] = 18,
+ [0][1][RTW89_FCC][46] = 56,
[0][1][RTW89_ETSI][46] = 16,
[0][1][RTW89_MKK][46] = 127,
- [0][1][RTW89_IC][46] = 82,
- [0][1][RTW89_ACMA][46] = 84,
- [0][1][RTW89_FCC][48] = 12,
+ [0][1][RTW89_IC][46] = 56,
+ [0][1][RTW89_KCC][46] = 28,
+ [0][1][RTW89_ACMA][46] = 56,
+ [0][1][RTW89_CN][46] = 42,
+ [0][1][RTW89_UK][46] = 18,
+ [0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
[0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
- [0][1][RTW89_FCC][50] = 12,
+ [0][1][RTW89_CN][48] = 127,
+ [0][1][RTW89_UK][48] = 127,
+ [0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
[0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
- [0][1][RTW89_FCC][52] = 12,
+ [0][1][RTW89_CN][50] = 127,
+ [0][1][RTW89_UK][50] = 127,
+ [0][1][RTW89_FCC][52] = 8,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
[0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
- [1][0][RTW89_FCC][0] = 54,
+ [0][1][RTW89_CN][52] = 127,
+ [0][1][RTW89_UK][52] = 127,
+ [1][0][RTW89_FCC][0] = 62,
[1][0][RTW89_ETSI][0] = 40,
[1][0][RTW89_MKK][0] = 48,
- [1][0][RTW89_IC][0] = 36,
- [1][0][RTW89_ACMA][0] = 34,
- [1][0][RTW89_FCC][2] = 54,
+ [1][0][RTW89_IC][0] = 42,
+ [1][0][RTW89_KCC][0] = 50,
+ [1][0][RTW89_ACMA][0] = 40,
+ [1][0][RTW89_CN][0] = 26,
+ [1][0][RTW89_UK][0] = 40,
+ [1][0][RTW89_FCC][2] = 62,
[1][0][RTW89_ETSI][2] = 40,
[1][0][RTW89_MKK][2] = 48,
- [1][0][RTW89_IC][2] = 36,
- [1][0][RTW89_ACMA][2] = 34,
- [1][0][RTW89_FCC][4] = 54,
+ [1][0][RTW89_IC][2] = 42,
+ [1][0][RTW89_KCC][2] = 50,
+ [1][0][RTW89_ACMA][2] = 40,
+ [1][0][RTW89_CN][2] = 26,
+ [1][0][RTW89_UK][2] = 40,
+ [1][0][RTW89_FCC][4] = 64,
[1][0][RTW89_ETSI][4] = 40,
[1][0][RTW89_MKK][4] = 40,
- [1][0][RTW89_IC][4] = 36,
- [1][0][RTW89_ACMA][4] = 34,
- [1][0][RTW89_FCC][6] = 54,
+ [1][0][RTW89_IC][4] = 42,
+ [1][0][RTW89_KCC][4] = 50,
+ [1][0][RTW89_ACMA][4] = 40,
+ [1][0][RTW89_CN][4] = 26,
+ [1][0][RTW89_UK][4] = 40,
+ [1][0][RTW89_FCC][6] = 64,
[1][0][RTW89_ETSI][6] = 40,
[1][0][RTW89_MKK][6] = 40,
- [1][0][RTW89_IC][6] = 36,
- [1][0][RTW89_ACMA][6] = 34,
- [1][0][RTW89_FCC][8] = 54,
+ [1][0][RTW89_IC][6] = 42,
+ [1][0][RTW89_KCC][6] = 24,
+ [1][0][RTW89_ACMA][6] = 40,
+ [1][0][RTW89_CN][6] = 26,
+ [1][0][RTW89_UK][6] = 40,
+ [1][0][RTW89_FCC][8] = 62,
[1][0][RTW89_ETSI][8] = 40,
[1][0][RTW89_MKK][8] = 34,
[1][0][RTW89_IC][8] = 62,
- [1][0][RTW89_ACMA][8] = 34,
- [1][0][RTW89_FCC][10] = 54,
+ [1][0][RTW89_KCC][8] = 52,
+ [1][0][RTW89_ACMA][8] = 40,
+ [1][0][RTW89_CN][8] = 26,
+ [1][0][RTW89_UK][8] = 40,
+ [1][0][RTW89_FCC][10] = 62,
[1][0][RTW89_ETSI][10] = 40,
[1][0][RTW89_MKK][10] = 34,
[1][0][RTW89_IC][10] = 62,
- [1][0][RTW89_ACMA][10] = 34,
- [1][0][RTW89_FCC][12] = 56,
+ [1][0][RTW89_KCC][10] = 52,
+ [1][0][RTW89_ACMA][10] = 40,
+ [1][0][RTW89_CN][10] = 26,
+ [1][0][RTW89_UK][10] = 40,
+ [1][0][RTW89_FCC][12] = 62,
[1][0][RTW89_ETSI][12] = 40,
[1][0][RTW89_MKK][12] = 46,
- [1][0][RTW89_IC][12] = 64,
- [1][0][RTW89_ACMA][12] = 34,
- [1][0][RTW89_FCC][14] = 54,
+ [1][0][RTW89_IC][12] = 62,
+ [1][0][RTW89_KCC][12] = 52,
+ [1][0][RTW89_ACMA][12] = 40,
+ [1][0][RTW89_CN][12] = 26,
+ [1][0][RTW89_UK][12] = 40,
+ [1][0][RTW89_FCC][14] = 62,
[1][0][RTW89_ETSI][14] = 40,
[1][0][RTW89_MKK][14] = 46,
[1][0][RTW89_IC][14] = 62,
- [1][0][RTW89_ACMA][14] = 34,
- [1][0][RTW89_FCC][15] = 54,
+ [1][0][RTW89_KCC][14] = 52,
+ [1][0][RTW89_ACMA][14] = 40,
+ [1][0][RTW89_CN][14] = 26,
+ [1][0][RTW89_UK][14] = 40,
+ [1][0][RTW89_FCC][15] = 62,
[1][0][RTW89_ETSI][15] = 40,
[1][0][RTW89_MKK][15] = 62,
[1][0][RTW89_IC][15] = 62,
- [1][0][RTW89_ACMA][15] = 34,
- [1][0][RTW89_FCC][17] = 54,
+ [1][0][RTW89_KCC][15] = 52,
+ [1][0][RTW89_ACMA][15] = 40,
+ [1][0][RTW89_CN][15] = 127,
+ [1][0][RTW89_UK][15] = 40,
+ [1][0][RTW89_FCC][17] = 62,
[1][0][RTW89_ETSI][17] = 40,
[1][0][RTW89_MKK][17] = 68,
[1][0][RTW89_IC][17] = 62,
- [1][0][RTW89_ACMA][17] = 34,
- [1][0][RTW89_FCC][19] = 54,
+ [1][0][RTW89_KCC][17] = 52,
+ [1][0][RTW89_ACMA][17] = 40,
+ [1][0][RTW89_CN][17] = 127,
+ [1][0][RTW89_UK][17] = 40,
+ [1][0][RTW89_FCC][19] = 64,
[1][0][RTW89_ETSI][19] = 40,
[1][0][RTW89_MKK][19] = 68,
- [1][0][RTW89_IC][19] = 62,
- [1][0][RTW89_ACMA][19] = 34,
- [1][0][RTW89_FCC][21] = 54,
+ [1][0][RTW89_IC][19] = 64,
+ [1][0][RTW89_KCC][19] = 52,
+ [1][0][RTW89_ACMA][19] = 40,
+ [1][0][RTW89_CN][19] = 127,
+ [1][0][RTW89_UK][19] = 40,
+ [1][0][RTW89_FCC][21] = 64,
[1][0][RTW89_ETSI][21] = 40,
[1][0][RTW89_MKK][21] = 68,
- [1][0][RTW89_IC][21] = 62,
- [1][0][RTW89_ACMA][21] = 34,
- [1][0][RTW89_FCC][23] = 54,
+ [1][0][RTW89_IC][21] = 64,
+ [1][0][RTW89_KCC][21] = 52,
+ [1][0][RTW89_ACMA][21] = 40,
+ [1][0][RTW89_CN][21] = 127,
+ [1][0][RTW89_UK][21] = 40,
+ [1][0][RTW89_FCC][23] = 64,
[1][0][RTW89_ETSI][23] = 40,
[1][0][RTW89_MKK][23] = 68,
- [1][0][RTW89_IC][23] = 62,
- [1][0][RTW89_ACMA][23] = 34,
- [1][0][RTW89_FCC][25] = 54,
+ [1][0][RTW89_IC][23] = 64,
+ [1][0][RTW89_KCC][23] = 52,
+ [1][0][RTW89_ACMA][23] = 40,
+ [1][0][RTW89_CN][23] = 127,
+ [1][0][RTW89_UK][23] = 40,
+ [1][0][RTW89_FCC][25] = 64,
[1][0][RTW89_ETSI][25] = 40,
[1][0][RTW89_MKK][25] = 68,
[1][0][RTW89_IC][25] = 127,
+ [1][0][RTW89_KCC][25] = 52,
[1][0][RTW89_ACMA][25] = 127,
- [1][0][RTW89_FCC][27] = 54,
+ [1][0][RTW89_CN][25] = 127,
+ [1][0][RTW89_UK][25] = 40,
+ [1][0][RTW89_FCC][27] = 64,
[1][0][RTW89_ETSI][27] = 42,
[1][0][RTW89_MKK][27] = 68,
[1][0][RTW89_IC][27] = 127,
+ [1][0][RTW89_KCC][27] = 52,
[1][0][RTW89_ACMA][27] = 127,
- [1][0][RTW89_FCC][29] = 54,
+ [1][0][RTW89_CN][27] = 127,
+ [1][0][RTW89_UK][27] = 42,
+ [1][0][RTW89_FCC][29] = 64,
[1][0][RTW89_ETSI][29] = 42,
[1][0][RTW89_MKK][29] = 68,
[1][0][RTW89_IC][29] = 127,
+ [1][0][RTW89_KCC][29] = 52,
[1][0][RTW89_ACMA][29] = 127,
- [1][0][RTW89_FCC][31] = 54,
+ [1][0][RTW89_CN][29] = 127,
+ [1][0][RTW89_UK][29] = 42,
+ [1][0][RTW89_FCC][31] = 64,
[1][0][RTW89_ETSI][31] = 42,
[1][0][RTW89_MKK][31] = 68,
- [1][0][RTW89_IC][31] = 62,
- [1][0][RTW89_ACMA][31] = 34,
- [1][0][RTW89_FCC][33] = 54,
+ [1][0][RTW89_IC][31] = 56,
+ [1][0][RTW89_KCC][31] = 52,
+ [1][0][RTW89_ACMA][31] = 42,
+ [1][0][RTW89_CN][31] = 127,
+ [1][0][RTW89_UK][31] = 42,
+ [1][0][RTW89_FCC][33] = 56,
[1][0][RTW89_ETSI][33] = 42,
[1][0][RTW89_MKK][33] = 68,
- [1][0][RTW89_IC][33] = 62,
- [1][0][RTW89_ACMA][33] = 34,
- [1][0][RTW89_FCC][35] = 54,
+ [1][0][RTW89_IC][33] = 56,
+ [1][0][RTW89_KCC][33] = 52,
+ [1][0][RTW89_ACMA][33] = 42,
+ [1][0][RTW89_CN][33] = 127,
+ [1][0][RTW89_UK][33] = 42,
+ [1][0][RTW89_FCC][35] = 56,
[1][0][RTW89_ETSI][35] = 42,
[1][0][RTW89_MKK][35] = 68,
- [1][0][RTW89_IC][35] = 62,
- [1][0][RTW89_ACMA][35] = 34,
- [1][0][RTW89_FCC][37] = 56,
+ [1][0][RTW89_IC][35] = 56,
+ [1][0][RTW89_KCC][35] = 52,
+ [1][0][RTW89_ACMA][35] = 42,
+ [1][0][RTW89_CN][35] = 127,
+ [1][0][RTW89_UK][35] = 42,
+ [1][0][RTW89_FCC][37] = 66,
[1][0][RTW89_ETSI][37] = 127,
[1][0][RTW89_MKK][37] = 68,
- [1][0][RTW89_IC][37] = 64,
- [1][0][RTW89_ACMA][37] = 64,
+ [1][0][RTW89_IC][37] = 66,
+ [1][0][RTW89_KCC][37] = 52,
+ [1][0][RTW89_ACMA][37] = 66,
+ [1][0][RTW89_CN][37] = 127,
+ [1][0][RTW89_UK][37] = 42,
[1][0][RTW89_FCC][38] = 76,
[1][0][RTW89_ETSI][38] = 28,
[1][0][RTW89_MKK][38] = 127,
- [1][0][RTW89_IC][38] = 84,
- [1][0][RTW89_ACMA][38] = 84,
+ [1][0][RTW89_IC][38] = 76,
+ [1][0][RTW89_KCC][38] = 54,
+ [1][0][RTW89_ACMA][38] = 76,
+ [1][0][RTW89_CN][38] = 66,
+ [1][0][RTW89_UK][38] = 44,
[1][0][RTW89_FCC][40] = 76,
[1][0][RTW89_ETSI][40] = 28,
[1][0][RTW89_MKK][40] = 127,
- [1][0][RTW89_IC][40] = 84,
- [1][0][RTW89_ACMA][40] = 84,
- [1][0][RTW89_FCC][42] = 76,
+ [1][0][RTW89_IC][40] = 76,
+ [1][0][RTW89_KCC][40] = 54,
+ [1][0][RTW89_ACMA][40] = 76,
+ [1][0][RTW89_CN][40] = 66,
+ [1][0][RTW89_UK][40] = 44,
+ [1][0][RTW89_FCC][42] = 68,
[1][0][RTW89_ETSI][42] = 28,
[1][0][RTW89_MKK][42] = 127,
- [1][0][RTW89_IC][42] = 84,
- [1][0][RTW89_ACMA][42] = 84,
- [1][0][RTW89_FCC][44] = 76,
+ [1][0][RTW89_IC][42] = 68,
+ [1][0][RTW89_KCC][42] = 54,
+ [1][0][RTW89_ACMA][42] = 68,
+ [1][0][RTW89_CN][42] = 66,
+ [1][0][RTW89_UK][42] = 44,
+ [1][0][RTW89_FCC][44] = 70,
[1][0][RTW89_ETSI][44] = 28,
[1][0][RTW89_MKK][44] = 127,
- [1][0][RTW89_IC][44] = 84,
- [1][0][RTW89_ACMA][44] = 84,
- [1][0][RTW89_FCC][46] = 76,
+ [1][0][RTW89_IC][44] = 70,
+ [1][0][RTW89_KCC][44] = 54,
+ [1][0][RTW89_ACMA][44] = 70,
+ [1][0][RTW89_CN][44] = 66,
+ [1][0][RTW89_UK][44] = 42,
+ [1][0][RTW89_FCC][46] = 70,
[1][0][RTW89_ETSI][46] = 28,
[1][0][RTW89_MKK][46] = 127,
- [1][0][RTW89_IC][46] = 84,
- [1][0][RTW89_ACMA][46] = 84,
- [1][0][RTW89_FCC][48] = 36,
+ [1][0][RTW89_IC][46] = 70,
+ [1][0][RTW89_KCC][46] = 54,
+ [1][0][RTW89_ACMA][46] = 70,
+ [1][0][RTW89_CN][46] = 66,
+ [1][0][RTW89_UK][46] = 42,
+ [1][0][RTW89_FCC][48] = 56,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
[1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
- [1][0][RTW89_FCC][50] = 36,
+ [1][0][RTW89_CN][48] = 127,
+ [1][0][RTW89_UK][48] = 127,
+ [1][0][RTW89_FCC][50] = 58,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
[1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
- [1][0][RTW89_FCC][52] = 36,
+ [1][0][RTW89_CN][50] = 127,
+ [1][0][RTW89_UK][50] = 127,
+ [1][0][RTW89_FCC][52] = 56,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
[1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
- [1][1][RTW89_FCC][0] = 34,
+ [1][0][RTW89_CN][52] = 127,
+ [1][0][RTW89_UK][52] = 127,
+ [1][1][RTW89_FCC][0] = 44,
[1][1][RTW89_ETSI][0] = 30,
[1][1][RTW89_MKK][0] = 34,
- [1][1][RTW89_IC][0] = 10,
- [1][1][RTW89_ACMA][0] = 22,
- [1][1][RTW89_FCC][2] = 36,
+ [1][1][RTW89_IC][0] = 20,
+ [1][1][RTW89_KCC][0] = 34,
+ [1][1][RTW89_ACMA][0] = 30,
+ [1][1][RTW89_CN][0] = 14,
+ [1][1][RTW89_UK][0] = 30,
+ [1][1][RTW89_FCC][2] = 44,
[1][1][RTW89_ETSI][2] = 30,
[1][1][RTW89_MKK][2] = 34,
- [1][1][RTW89_IC][2] = 14,
- [1][1][RTW89_ACMA][2] = 22,
- [1][1][RTW89_FCC][4] = 34,
+ [1][1][RTW89_IC][2] = 18,
+ [1][1][RTW89_KCC][2] = 34,
+ [1][1][RTW89_ACMA][2] = 30,
+ [1][1][RTW89_CN][2] = 14,
+ [1][1][RTW89_UK][2] = 30,
+ [1][1][RTW89_FCC][4] = 46,
[1][1][RTW89_ETSI][4] = 30,
[1][1][RTW89_MKK][4] = 26,
- [1][1][RTW89_IC][4] = 10,
- [1][1][RTW89_ACMA][4] = 22,
- [1][1][RTW89_FCC][6] = 34,
+ [1][1][RTW89_IC][4] = 20,
+ [1][1][RTW89_KCC][4] = 34,
+ [1][1][RTW89_ACMA][4] = 30,
+ [1][1][RTW89_CN][4] = 14,
+ [1][1][RTW89_UK][4] = 30,
+ [1][1][RTW89_FCC][6] = 46,
[1][1][RTW89_ETSI][6] = 30,
[1][1][RTW89_MKK][6] = 26,
- [1][1][RTW89_IC][6] = 10,
- [1][1][RTW89_ACMA][6] = 22,
- [1][1][RTW89_FCC][8] = 36,
+ [1][1][RTW89_IC][6] = 20,
+ [1][1][RTW89_KCC][6] = 8,
+ [1][1][RTW89_ACMA][6] = 30,
+ [1][1][RTW89_CN][6] = 14,
+ [1][1][RTW89_UK][6] = 30,
+ [1][1][RTW89_FCC][8] = 44,
[1][1][RTW89_ETSI][8] = 30,
[1][1][RTW89_MKK][8] = 20,
[1][1][RTW89_IC][8] = 44,
- [1][1][RTW89_ACMA][8] = 22,
- [1][1][RTW89_FCC][10] = 36,
+ [1][1][RTW89_KCC][8] = 34,
+ [1][1][RTW89_ACMA][8] = 30,
+ [1][1][RTW89_CN][8] = 14,
+ [1][1][RTW89_UK][8] = 30,
+ [1][1][RTW89_FCC][10] = 44,
[1][1][RTW89_ETSI][10] = 30,
[1][1][RTW89_MKK][10] = 20,
[1][1][RTW89_IC][10] = 44,
- [1][1][RTW89_ACMA][10] = 22,
- [1][1][RTW89_FCC][12] = 38,
+ [1][1][RTW89_KCC][10] = 34,
+ [1][1][RTW89_ACMA][10] = 30,
+ [1][1][RTW89_CN][10] = 14,
+ [1][1][RTW89_UK][10] = 30,
+ [1][1][RTW89_FCC][12] = 44,
[1][1][RTW89_ETSI][12] = 30,
[1][1][RTW89_MKK][12] = 34,
- [1][1][RTW89_IC][12] = 46,
- [1][1][RTW89_ACMA][12] = 22,
- [1][1][RTW89_FCC][14] = 34,
+ [1][1][RTW89_IC][12] = 44,
+ [1][1][RTW89_KCC][12] = 38,
+ [1][1][RTW89_ACMA][12] = 30,
+ [1][1][RTW89_CN][12] = 14,
+ [1][1][RTW89_UK][12] = 30,
+ [1][1][RTW89_FCC][14] = 44,
[1][1][RTW89_ETSI][14] = 30,
[1][1][RTW89_MKK][14] = 34,
- [1][1][RTW89_IC][14] = 40,
- [1][1][RTW89_ACMA][14] = 22,
- [1][1][RTW89_FCC][15] = 34,
+ [1][1][RTW89_IC][14] = 44,
+ [1][1][RTW89_KCC][14] = 38,
+ [1][1][RTW89_ACMA][14] = 30,
+ [1][1][RTW89_CN][14] = 14,
+ [1][1][RTW89_UK][14] = 30,
+ [1][1][RTW89_FCC][15] = 44,
[1][1][RTW89_ETSI][15] = 28,
[1][1][RTW89_MKK][15] = 56,
- [1][1][RTW89_IC][15] = 42,
- [1][1][RTW89_ACMA][15] = 22,
- [1][1][RTW89_FCC][17] = 34,
+ [1][1][RTW89_IC][15] = 44,
+ [1][1][RTW89_KCC][15] = 36,
+ [1][1][RTW89_ACMA][15] = 28,
+ [1][1][RTW89_CN][15] = 127,
+ [1][1][RTW89_UK][15] = 28,
+ [1][1][RTW89_FCC][17] = 44,
[1][1][RTW89_ETSI][17] = 28,
[1][1][RTW89_MKK][17] = 58,
- [1][1][RTW89_IC][17] = 42,
- [1][1][RTW89_ACMA][17] = 22,
- [1][1][RTW89_FCC][19] = 34,
+ [1][1][RTW89_IC][17] = 44,
+ [1][1][RTW89_KCC][17] = 36,
+ [1][1][RTW89_ACMA][17] = 28,
+ [1][1][RTW89_CN][17] = 127,
+ [1][1][RTW89_UK][17] = 28,
+ [1][1][RTW89_FCC][19] = 44,
[1][1][RTW89_ETSI][19] = 28,
[1][1][RTW89_MKK][19] = 58,
- [1][1][RTW89_IC][19] = 42,
- [1][1][RTW89_ACMA][19] = 22,
- [1][1][RTW89_FCC][21] = 34,
+ [1][1][RTW89_IC][19] = 44,
+ [1][1][RTW89_KCC][19] = 36,
+ [1][1][RTW89_ACMA][19] = 28,
+ [1][1][RTW89_CN][19] = 127,
+ [1][1][RTW89_UK][19] = 28,
+ [1][1][RTW89_FCC][21] = 44,
[1][1][RTW89_ETSI][21] = 28,
[1][1][RTW89_MKK][21] = 58,
- [1][1][RTW89_IC][21] = 42,
- [1][1][RTW89_ACMA][21] = 22,
- [1][1][RTW89_FCC][23] = 34,
+ [1][1][RTW89_IC][21] = 44,
+ [1][1][RTW89_KCC][21] = 36,
+ [1][1][RTW89_ACMA][21] = 28,
+ [1][1][RTW89_CN][21] = 127,
+ [1][1][RTW89_UK][21] = 28,
+ [1][1][RTW89_FCC][23] = 44,
[1][1][RTW89_ETSI][23] = 28,
[1][1][RTW89_MKK][23] = 58,
- [1][1][RTW89_IC][23] = 42,
- [1][1][RTW89_ACMA][23] = 22,
- [1][1][RTW89_FCC][25] = 34,
+ [1][1][RTW89_IC][23] = 44,
+ [1][1][RTW89_KCC][23] = 36,
+ [1][1][RTW89_ACMA][23] = 28,
+ [1][1][RTW89_CN][23] = 127,
+ [1][1][RTW89_UK][23] = 28,
+ [1][1][RTW89_FCC][25] = 44,
[1][1][RTW89_ETSI][25] = 28,
[1][1][RTW89_MKK][25] = 58,
[1][1][RTW89_IC][25] = 127,
+ [1][1][RTW89_KCC][25] = 36,
[1][1][RTW89_ACMA][25] = 127,
- [1][1][RTW89_FCC][27] = 34,
+ [1][1][RTW89_CN][25] = 127,
+ [1][1][RTW89_UK][25] = 28,
+ [1][1][RTW89_FCC][27] = 44,
[1][1][RTW89_ETSI][27] = 30,
[1][1][RTW89_MKK][27] = 58,
[1][1][RTW89_IC][27] = 127,
+ [1][1][RTW89_KCC][27] = 36,
[1][1][RTW89_ACMA][27] = 127,
- [1][1][RTW89_FCC][29] = 34,
+ [1][1][RTW89_CN][27] = 127,
+ [1][1][RTW89_UK][27] = 30,
+ [1][1][RTW89_FCC][29] = 44,
[1][1][RTW89_ETSI][29] = 30,
[1][1][RTW89_MKK][29] = 58,
[1][1][RTW89_IC][29] = 127,
+ [1][1][RTW89_KCC][29] = 36,
[1][1][RTW89_ACMA][29] = 127,
- [1][1][RTW89_FCC][31] = 34,
+ [1][1][RTW89_CN][29] = 127,
+ [1][1][RTW89_UK][29] = 30,
+ [1][1][RTW89_FCC][31] = 44,
[1][1][RTW89_ETSI][31] = 30,
[1][1][RTW89_MKK][31] = 58,
[1][1][RTW89_IC][31] = 38,
- [1][1][RTW89_ACMA][31] = 22,
- [1][1][RTW89_FCC][33] = 32,
+ [1][1][RTW89_KCC][31] = 36,
+ [1][1][RTW89_ACMA][31] = 30,
+ [1][1][RTW89_CN][31] = 127,
+ [1][1][RTW89_UK][31] = 30,
+ [1][1][RTW89_FCC][33] = 38,
[1][1][RTW89_ETSI][33] = 30,
[1][1][RTW89_MKK][33] = 58,
[1][1][RTW89_IC][33] = 38,
- [1][1][RTW89_ACMA][33] = 22,
- [1][1][RTW89_FCC][35] = 32,
+ [1][1][RTW89_KCC][33] = 36,
+ [1][1][RTW89_ACMA][33] = 30,
+ [1][1][RTW89_CN][33] = 127,
+ [1][1][RTW89_UK][33] = 30,
+ [1][1][RTW89_FCC][35] = 38,
[1][1][RTW89_ETSI][35] = 30,
[1][1][RTW89_MKK][35] = 58,
[1][1][RTW89_IC][35] = 38,
- [1][1][RTW89_ACMA][35] = 22,
- [1][1][RTW89_FCC][37] = 40,
+ [1][1][RTW89_KCC][35] = 36,
+ [1][1][RTW89_ACMA][35] = 30,
+ [1][1][RTW89_CN][35] = 127,
+ [1][1][RTW89_UK][35] = 30,
+ [1][1][RTW89_FCC][37] = 46,
[1][1][RTW89_ETSI][37] = 127,
[1][1][RTW89_MKK][37] = 58,
- [1][1][RTW89_IC][37] = 48,
- [1][1][RTW89_ACMA][37] = 48,
- [1][1][RTW89_FCC][38] = 76,
+ [1][1][RTW89_IC][37] = 46,
+ [1][1][RTW89_KCC][37] = 36,
+ [1][1][RTW89_ACMA][37] = 46,
+ [1][1][RTW89_CN][37] = 127,
+ [1][1][RTW89_UK][37] = 32,
+ [1][1][RTW89_FCC][38] = 74,
[1][1][RTW89_ETSI][38] = 16,
[1][1][RTW89_MKK][38] = 127,
- [1][1][RTW89_IC][38] = 84,
- [1][1][RTW89_ACMA][38] = 82,
- [1][1][RTW89_FCC][40] = 76,
+ [1][1][RTW89_IC][38] = 74,
+ [1][1][RTW89_KCC][38] = 36,
+ [1][1][RTW89_ACMA][38] = 74,
+ [1][1][RTW89_CN][38] = 54,
+ [1][1][RTW89_UK][38] = 30,
+ [1][1][RTW89_FCC][40] = 74,
[1][1][RTW89_ETSI][40] = 16,
[1][1][RTW89_MKK][40] = 127,
- [1][1][RTW89_IC][40] = 84,
- [1][1][RTW89_ACMA][40] = 82,
- [1][1][RTW89_FCC][42] = 76,
+ [1][1][RTW89_IC][40] = 74,
+ [1][1][RTW89_KCC][40] = 36,
+ [1][1][RTW89_ACMA][40] = 74,
+ [1][1][RTW89_CN][40] = 54,
+ [1][1][RTW89_UK][40] = 30,
+ [1][1][RTW89_FCC][42] = 74,
[1][1][RTW89_ETSI][42] = 16,
[1][1][RTW89_MKK][42] = 127,
- [1][1][RTW89_IC][42] = 84,
- [1][1][RTW89_ACMA][42] = 84,
- [1][1][RTW89_FCC][44] = 76,
+ [1][1][RTW89_IC][42] = 74,
+ [1][1][RTW89_KCC][42] = 36,
+ [1][1][RTW89_ACMA][42] = 74,
+ [1][1][RTW89_CN][42] = 54,
+ [1][1][RTW89_UK][42] = 30,
+ [1][1][RTW89_FCC][44] = 74,
[1][1][RTW89_ETSI][44] = 16,
[1][1][RTW89_MKK][44] = 127,
- [1][1][RTW89_IC][44] = 84,
- [1][1][RTW89_ACMA][44] = 84,
- [1][1][RTW89_FCC][46] = 76,
+ [1][1][RTW89_IC][44] = 74,
+ [1][1][RTW89_KCC][44] = 36,
+ [1][1][RTW89_ACMA][44] = 74,
+ [1][1][RTW89_CN][44] = 54,
+ [1][1][RTW89_UK][44] = 30,
+ [1][1][RTW89_FCC][46] = 74,
[1][1][RTW89_ETSI][46] = 16,
[1][1][RTW89_MKK][46] = 127,
- [1][1][RTW89_IC][46] = 84,
- [1][1][RTW89_ACMA][46] = 84,
- [1][1][RTW89_FCC][48] = 24,
+ [1][1][RTW89_IC][46] = 74,
+ [1][1][RTW89_KCC][46] = 36,
+ [1][1][RTW89_ACMA][46] = 74,
+ [1][1][RTW89_CN][46] = 54,
+ [1][1][RTW89_UK][46] = 30,
+ [1][1][RTW89_FCC][48] = 34,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
[1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
- [1][1][RTW89_FCC][50] = 24,
+ [1][1][RTW89_CN][48] = 127,
+ [1][1][RTW89_UK][48] = 127,
+ [1][1][RTW89_FCC][50] = 34,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
[1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
- [1][1][RTW89_FCC][52] = 24,
+ [1][1][RTW89_CN][50] = 127,
+ [1][1][RTW89_UK][50] = 127,
+ [1][1][RTW89_FCC][52] = 30,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
[1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
- [2][0][RTW89_FCC][0] = 62,
+ [1][1][RTW89_CN][52] = 127,
+ [1][1][RTW89_UK][52] = 127,
+ [2][0][RTW89_FCC][0] = 68,
[2][0][RTW89_ETSI][0] = 52,
[2][0][RTW89_MKK][0] = 60,
- [2][0][RTW89_IC][0] = 46,
- [2][0][RTW89_ACMA][0] = 48,
- [2][0][RTW89_FCC][2] = 62,
+ [2][0][RTW89_IC][0] = 52,
+ [2][0][RTW89_KCC][0] = 64,
+ [2][0][RTW89_ACMA][0] = 52,
+ [2][0][RTW89_CN][0] = 40,
+ [2][0][RTW89_UK][0] = 52,
+ [2][0][RTW89_FCC][2] = 64,
[2][0][RTW89_ETSI][2] = 52,
[2][0][RTW89_MKK][2] = 60,
- [2][0][RTW89_IC][2] = 46,
- [2][0][RTW89_ACMA][2] = 48,
- [2][0][RTW89_FCC][4] = 62,
+ [2][0][RTW89_IC][2] = 50,
+ [2][0][RTW89_KCC][2] = 64,
+ [2][0][RTW89_ACMA][2] = 52,
+ [2][0][RTW89_CN][2] = 40,
+ [2][0][RTW89_UK][2] = 52,
+ [2][0][RTW89_FCC][4] = 68,
[2][0][RTW89_ETSI][4] = 52,
[2][0][RTW89_MKK][4] = 50,
- [2][0][RTW89_IC][4] = 46,
- [2][0][RTW89_ACMA][4] = 48,
- [2][0][RTW89_FCC][6] = 62,
+ [2][0][RTW89_IC][4] = 50,
+ [2][0][RTW89_KCC][4] = 64,
+ [2][0][RTW89_ACMA][4] = 52,
+ [2][0][RTW89_CN][4] = 40,
+ [2][0][RTW89_UK][4] = 52,
+ [2][0][RTW89_FCC][6] = 68,
[2][0][RTW89_ETSI][6] = 52,
[2][0][RTW89_MKK][6] = 50,
- [2][0][RTW89_IC][6] = 46,
- [2][0][RTW89_ACMA][6] = 48,
- [2][0][RTW89_FCC][8] = 62,
+ [2][0][RTW89_IC][6] = 50,
+ [2][0][RTW89_KCC][6] = 36,
+ [2][0][RTW89_ACMA][6] = 52,
+ [2][0][RTW89_CN][6] = 40,
+ [2][0][RTW89_UK][6] = 52,
+ [2][0][RTW89_FCC][8] = 68,
[2][0][RTW89_ETSI][8] = 52,
[2][0][RTW89_MKK][8] = 44,
- [2][0][RTW89_IC][8] = 66,
- [2][0][RTW89_ACMA][8] = 48,
- [2][0][RTW89_FCC][10] = 62,
+ [2][0][RTW89_IC][8] = 64,
+ [2][0][RTW89_KCC][8] = 62,
+ [2][0][RTW89_ACMA][8] = 52,
+ [2][0][RTW89_CN][8] = 40,
+ [2][0][RTW89_UK][8] = 52,
+ [2][0][RTW89_FCC][10] = 68,
[2][0][RTW89_ETSI][10] = 52,
[2][0][RTW89_MKK][10] = 44,
- [2][0][RTW89_IC][10] = 66,
- [2][0][RTW89_ACMA][10] = 48,
- [2][0][RTW89_FCC][12] = 62,
+ [2][0][RTW89_IC][10] = 64,
+ [2][0][RTW89_KCC][10] = 62,
+ [2][0][RTW89_ACMA][10] = 52,
+ [2][0][RTW89_CN][10] = 40,
+ [2][0][RTW89_UK][10] = 52,
+ [2][0][RTW89_FCC][12] = 68,
[2][0][RTW89_ETSI][12] = 52,
[2][0][RTW89_MKK][12] = 58,
- [2][0][RTW89_IC][12] = 66,
- [2][0][RTW89_ACMA][12] = 48,
- [2][0][RTW89_FCC][14] = 62,
+ [2][0][RTW89_IC][12] = 64,
+ [2][0][RTW89_KCC][12] = 62,
+ [2][0][RTW89_ACMA][12] = 52,
+ [2][0][RTW89_CN][12] = 40,
+ [2][0][RTW89_UK][12] = 52,
+ [2][0][RTW89_FCC][14] = 68,
[2][0][RTW89_ETSI][14] = 52,
[2][0][RTW89_MKK][14] = 58,
- [2][0][RTW89_IC][14] = 66,
- [2][0][RTW89_ACMA][14] = 48,
- [2][0][RTW89_FCC][15] = 62,
+ [2][0][RTW89_IC][14] = 64,
+ [2][0][RTW89_KCC][14] = 62,
+ [2][0][RTW89_ACMA][14] = 52,
+ [2][0][RTW89_CN][14] = 40,
+ [2][0][RTW89_UK][14] = 52,
+ [2][0][RTW89_FCC][15] = 68,
[2][0][RTW89_ETSI][15] = 52,
[2][0][RTW89_MKK][15] = 68,
- [2][0][RTW89_IC][15] = 70,
- [2][0][RTW89_ACMA][15] = 48,
- [2][0][RTW89_FCC][17] = 62,
+ [2][0][RTW89_IC][15] = 68,
+ [2][0][RTW89_KCC][15] = 62,
+ [2][0][RTW89_ACMA][15] = 52,
+ [2][0][RTW89_CN][15] = 127,
+ [2][0][RTW89_UK][15] = 52,
+ [2][0][RTW89_FCC][17] = 68,
[2][0][RTW89_ETSI][17] = 52,
[2][0][RTW89_MKK][17] = 74,
- [2][0][RTW89_IC][17] = 70,
- [2][0][RTW89_ACMA][17] = 48,
- [2][0][RTW89_FCC][19] = 62,
+ [2][0][RTW89_IC][17] = 68,
+ [2][0][RTW89_KCC][17] = 62,
+ [2][0][RTW89_ACMA][17] = 52,
+ [2][0][RTW89_CN][17] = 127,
+ [2][0][RTW89_UK][17] = 52,
+ [2][0][RTW89_FCC][19] = 70,
[2][0][RTW89_ETSI][19] = 52,
[2][0][RTW89_MKK][19] = 74,
[2][0][RTW89_IC][19] = 70,
- [2][0][RTW89_ACMA][19] = 48,
- [2][0][RTW89_FCC][21] = 62,
+ [2][0][RTW89_KCC][19] = 62,
+ [2][0][RTW89_ACMA][19] = 52,
+ [2][0][RTW89_CN][19] = 127,
+ [2][0][RTW89_UK][19] = 52,
+ [2][0][RTW89_FCC][21] = 70,
[2][0][RTW89_ETSI][21] = 52,
[2][0][RTW89_MKK][21] = 74,
[2][0][RTW89_IC][21] = 70,
- [2][0][RTW89_ACMA][21] = 48,
- [2][0][RTW89_FCC][23] = 62,
+ [2][0][RTW89_KCC][21] = 62,
+ [2][0][RTW89_ACMA][21] = 52,
+ [2][0][RTW89_CN][21] = 127,
+ [2][0][RTW89_UK][21] = 52,
+ [2][0][RTW89_FCC][23] = 70,
[2][0][RTW89_ETSI][23] = 52,
[2][0][RTW89_MKK][23] = 74,
[2][0][RTW89_IC][23] = 70,
- [2][0][RTW89_ACMA][23] = 48,
- [2][0][RTW89_FCC][25] = 62,
+ [2][0][RTW89_KCC][23] = 62,
+ [2][0][RTW89_ACMA][23] = 52,
+ [2][0][RTW89_CN][23] = 127,
+ [2][0][RTW89_UK][23] = 52,
+ [2][0][RTW89_FCC][25] = 70,
[2][0][RTW89_ETSI][25] = 52,
[2][0][RTW89_MKK][25] = 74,
[2][0][RTW89_IC][25] = 127,
+ [2][0][RTW89_KCC][25] = 62,
[2][0][RTW89_ACMA][25] = 127,
- [2][0][RTW89_FCC][27] = 62,
+ [2][0][RTW89_CN][25] = 127,
+ [2][0][RTW89_UK][25] = 52,
+ [2][0][RTW89_FCC][27] = 70,
[2][0][RTW89_ETSI][27] = 52,
[2][0][RTW89_MKK][27] = 74,
[2][0][RTW89_IC][27] = 127,
+ [2][0][RTW89_KCC][27] = 62,
[2][0][RTW89_ACMA][27] = 127,
- [2][0][RTW89_FCC][29] = 62,
+ [2][0][RTW89_CN][27] = 127,
+ [2][0][RTW89_UK][27] = 52,
+ [2][0][RTW89_FCC][29] = 70,
[2][0][RTW89_ETSI][29] = 52,
[2][0][RTW89_MKK][29] = 74,
[2][0][RTW89_IC][29] = 127,
+ [2][0][RTW89_KCC][29] = 62,
[2][0][RTW89_ACMA][29] = 127,
- [2][0][RTW89_FCC][31] = 62,
+ [2][0][RTW89_CN][29] = 127,
+ [2][0][RTW89_UK][29] = 52,
+ [2][0][RTW89_FCC][31] = 70,
[2][0][RTW89_ETSI][31] = 52,
[2][0][RTW89_MKK][31] = 74,
- [2][0][RTW89_IC][31] = 72,
- [2][0][RTW89_ACMA][31] = 48,
- [2][0][RTW89_FCC][33] = 64,
+ [2][0][RTW89_IC][31] = 62,
+ [2][0][RTW89_KCC][31] = 62,
+ [2][0][RTW89_ACMA][31] = 52,
+ [2][0][RTW89_CN][31] = 127,
+ [2][0][RTW89_UK][31] = 52,
+ [2][0][RTW89_FCC][33] = 62,
[2][0][RTW89_ETSI][33] = 52,
[2][0][RTW89_MKK][33] = 74,
- [2][0][RTW89_IC][33] = 72,
- [2][0][RTW89_ACMA][33] = 48,
- [2][0][RTW89_FCC][35] = 64,
+ [2][0][RTW89_IC][33] = 62,
+ [2][0][RTW89_KCC][33] = 62,
+ [2][0][RTW89_ACMA][33] = 52,
+ [2][0][RTW89_CN][33] = 127,
+ [2][0][RTW89_UK][33] = 52,
+ [2][0][RTW89_FCC][35] = 62,
[2][0][RTW89_ETSI][35] = 52,
[2][0][RTW89_MKK][35] = 74,
- [2][0][RTW89_IC][35] = 72,
- [2][0][RTW89_ACMA][35] = 48,
- [2][0][RTW89_FCC][37] = 62,
+ [2][0][RTW89_IC][35] = 62,
+ [2][0][RTW89_KCC][35] = 62,
+ [2][0][RTW89_ACMA][35] = 52,
+ [2][0][RTW89_CN][35] = 127,
+ [2][0][RTW89_UK][35] = 52,
+ [2][0][RTW89_FCC][37] = 70,
[2][0][RTW89_ETSI][37] = 127,
[2][0][RTW89_MKK][37] = 74,
[2][0][RTW89_IC][37] = 70,
- [2][0][RTW89_ACMA][37] = 76,
- [2][0][RTW89_FCC][38] = 76,
+ [2][0][RTW89_KCC][37] = 62,
+ [2][0][RTW89_ACMA][37] = 70,
+ [2][0][RTW89_CN][37] = 127,
+ [2][0][RTW89_UK][37] = 52,
+ [2][0][RTW89_FCC][38] = 82,
[2][0][RTW89_ETSI][38] = 28,
[2][0][RTW89_MKK][38] = 127,
- [2][0][RTW89_IC][38] = 84,
- [2][0][RTW89_ACMA][38] = 84,
- [2][0][RTW89_FCC][40] = 76,
+ [2][0][RTW89_IC][38] = 82,
+ [2][0][RTW89_KCC][38] = 64,
+ [2][0][RTW89_ACMA][38] = 82,
+ [2][0][RTW89_CN][38] = 68,
+ [2][0][RTW89_UK][38] = 54,
+ [2][0][RTW89_FCC][40] = 82,
[2][0][RTW89_ETSI][40] = 28,
[2][0][RTW89_MKK][40] = 127,
- [2][0][RTW89_IC][40] = 84,
- [2][0][RTW89_ACMA][40] = 84,
+ [2][0][RTW89_IC][40] = 82,
+ [2][0][RTW89_KCC][40] = 64,
+ [2][0][RTW89_ACMA][40] = 82,
+ [2][0][RTW89_CN][40] = 68,
+ [2][0][RTW89_UK][40] = 54,
[2][0][RTW89_FCC][42] = 76,
[2][0][RTW89_ETSI][42] = 28,
[2][0][RTW89_MKK][42] = 127,
- [2][0][RTW89_IC][42] = 84,
- [2][0][RTW89_ACMA][42] = 84,
- [2][0][RTW89_FCC][44] = 76,
+ [2][0][RTW89_IC][42] = 76,
+ [2][0][RTW89_KCC][42] = 64,
+ [2][0][RTW89_ACMA][42] = 76,
+ [2][0][RTW89_CN][42] = 68,
+ [2][0][RTW89_UK][42] = 54,
+ [2][0][RTW89_FCC][44] = 80,
[2][0][RTW89_ETSI][44] = 28,
[2][0][RTW89_MKK][44] = 127,
- [2][0][RTW89_IC][44] = 84,
- [2][0][RTW89_ACMA][44] = 84,
- [2][0][RTW89_FCC][46] = 76,
+ [2][0][RTW89_IC][44] = 80,
+ [2][0][RTW89_KCC][44] = 64,
+ [2][0][RTW89_ACMA][44] = 80,
+ [2][0][RTW89_CN][44] = 68,
+ [2][0][RTW89_UK][44] = 54,
+ [2][0][RTW89_FCC][46] = 80,
[2][0][RTW89_ETSI][46] = 28,
[2][0][RTW89_MKK][46] = 127,
- [2][0][RTW89_IC][46] = 84,
- [2][0][RTW89_ACMA][46] = 84,
- [2][0][RTW89_FCC][48] = 48,
+ [2][0][RTW89_IC][46] = 80,
+ [2][0][RTW89_KCC][46] = 64,
+ [2][0][RTW89_ACMA][46] = 80,
+ [2][0][RTW89_CN][46] = 68,
+ [2][0][RTW89_UK][46] = 54,
+ [2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
- [2][0][RTW89_FCC][50] = 48,
+ [2][0][RTW89_CN][48] = 127,
+ [2][0][RTW89_UK][48] = 127,
+ [2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
- [2][0][RTW89_FCC][52] = 48,
+ [2][0][RTW89_CN][50] = 127,
+ [2][0][RTW89_UK][50] = 127,
+ [2][0][RTW89_FCC][52] = 64,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
- [2][1][RTW89_FCC][0] = 42,
+ [2][0][RTW89_CN][52] = 127,
+ [2][0][RTW89_UK][52] = 127,
+ [2][1][RTW89_FCC][0] = 50,
[2][1][RTW89_ETSI][0] = 40,
[2][1][RTW89_MKK][0] = 44,
- [2][1][RTW89_IC][0] = 20,
- [2][1][RTW89_ACMA][0] = 36,
- [2][1][RTW89_FCC][2] = 42,
+ [2][1][RTW89_IC][0] = 26,
+ [2][1][RTW89_KCC][0] = 44,
+ [2][1][RTW89_ACMA][0] = 40,
+ [2][1][RTW89_CN][0] = 28,
+ [2][1][RTW89_UK][0] = 40,
+ [2][1][RTW89_FCC][2] = 50,
[2][1][RTW89_ETSI][2] = 40,
[2][1][RTW89_MKK][2] = 44,
- [2][1][RTW89_IC][2] = 18,
- [2][1][RTW89_ACMA][2] = 36,
- [2][1][RTW89_FCC][4] = 42,
+ [2][1][RTW89_IC][2] = 26,
+ [2][1][RTW89_KCC][2] = 44,
+ [2][1][RTW89_ACMA][2] = 40,
+ [2][1][RTW89_CN][2] = 28,
+ [2][1][RTW89_UK][2] = 40,
+ [2][1][RTW89_FCC][4] = 50,
[2][1][RTW89_ETSI][4] = 40,
[2][1][RTW89_MKK][4] = 36,
- [2][1][RTW89_IC][4] = 22,
- [2][1][RTW89_ACMA][4] = 36,
- [2][1][RTW89_FCC][6] = 42,
+ [2][1][RTW89_IC][4] = 26,
+ [2][1][RTW89_KCC][4] = 44,
+ [2][1][RTW89_ACMA][4] = 40,
+ [2][1][RTW89_CN][4] = 28,
+ [2][1][RTW89_UK][4] = 40,
+ [2][1][RTW89_FCC][6] = 50,
[2][1][RTW89_ETSI][6] = 40,
[2][1][RTW89_MKK][6] = 36,
- [2][1][RTW89_IC][6] = 22,
- [2][1][RTW89_ACMA][6] = 36,
- [2][1][RTW89_FCC][8] = 42,
+ [2][1][RTW89_IC][6] = 26,
+ [2][1][RTW89_KCC][6] = 20,
+ [2][1][RTW89_ACMA][6] = 40,
+ [2][1][RTW89_CN][6] = 28,
+ [2][1][RTW89_UK][6] = 40,
+ [2][1][RTW89_FCC][8] = 50,
[2][1][RTW89_ETSI][8] = 40,
[2][1][RTW89_MKK][8] = 32,
[2][1][RTW89_IC][8] = 50,
- [2][1][RTW89_ACMA][8] = 36,
- [2][1][RTW89_FCC][10] = 42,
+ [2][1][RTW89_KCC][8] = 46,
+ [2][1][RTW89_ACMA][8] = 40,
+ [2][1][RTW89_CN][8] = 28,
+ [2][1][RTW89_UK][8] = 40,
+ [2][1][RTW89_FCC][10] = 50,
[2][1][RTW89_ETSI][10] = 40,
[2][1][RTW89_MKK][10] = 32,
[2][1][RTW89_IC][10] = 50,
- [2][1][RTW89_ACMA][10] = 36,
- [2][1][RTW89_FCC][12] = 44,
+ [2][1][RTW89_KCC][10] = 46,
+ [2][1][RTW89_ACMA][10] = 40,
+ [2][1][RTW89_CN][10] = 28,
+ [2][1][RTW89_UK][10] = 40,
+ [2][1][RTW89_FCC][12] = 48,
[2][1][RTW89_ETSI][12] = 40,
[2][1][RTW89_MKK][12] = 44,
- [2][1][RTW89_IC][12] = 52,
- [2][1][RTW89_ACMA][12] = 36,
- [2][1][RTW89_FCC][14] = 44,
+ [2][1][RTW89_IC][12] = 48,
+ [2][1][RTW89_KCC][12] = 46,
+ [2][1][RTW89_ACMA][12] = 40,
+ [2][1][RTW89_CN][12] = 28,
+ [2][1][RTW89_UK][12] = 40,
+ [2][1][RTW89_FCC][14] = 48,
[2][1][RTW89_ETSI][14] = 40,
[2][1][RTW89_MKK][14] = 44,
- [2][1][RTW89_IC][14] = 52,
- [2][1][RTW89_ACMA][14] = 36,
- [2][1][RTW89_FCC][15] = 42,
+ [2][1][RTW89_IC][14] = 48,
+ [2][1][RTW89_KCC][14] = 46,
+ [2][1][RTW89_ACMA][14] = 40,
+ [2][1][RTW89_CN][14] = 28,
+ [2][1][RTW89_UK][14] = 40,
+ [2][1][RTW89_FCC][15] = 50,
[2][1][RTW89_ETSI][15] = 40,
[2][1][RTW89_MKK][15] = 66,
[2][1][RTW89_IC][15] = 50,
- [2][1][RTW89_ACMA][15] = 36,
- [2][1][RTW89_FCC][17] = 42,
+ [2][1][RTW89_KCC][15] = 46,
+ [2][1][RTW89_ACMA][15] = 40,
+ [2][1][RTW89_CN][15] = 127,
+ [2][1][RTW89_UK][15] = 40,
+ [2][1][RTW89_FCC][17] = 50,
[2][1][RTW89_ETSI][17] = 40,
[2][1][RTW89_MKK][17] = 66,
[2][1][RTW89_IC][17] = 50,
- [2][1][RTW89_ACMA][17] = 36,
- [2][1][RTW89_FCC][19] = 42,
+ [2][1][RTW89_KCC][17] = 46,
+ [2][1][RTW89_ACMA][17] = 40,
+ [2][1][RTW89_CN][17] = 127,
+ [2][1][RTW89_UK][17] = 40,
+ [2][1][RTW89_FCC][19] = 50,
[2][1][RTW89_ETSI][19] = 40,
[2][1][RTW89_MKK][19] = 66,
[2][1][RTW89_IC][19] = 50,
- [2][1][RTW89_ACMA][19] = 36,
- [2][1][RTW89_FCC][21] = 42,
+ [2][1][RTW89_KCC][19] = 46,
+ [2][1][RTW89_ACMA][19] = 40,
+ [2][1][RTW89_CN][19] = 127,
+ [2][1][RTW89_UK][19] = 40,
+ [2][1][RTW89_FCC][21] = 50,
[2][1][RTW89_ETSI][21] = 40,
[2][1][RTW89_MKK][21] = 66,
[2][1][RTW89_IC][21] = 50,
- [2][1][RTW89_ACMA][21] = 36,
- [2][1][RTW89_FCC][23] = 42,
+ [2][1][RTW89_KCC][21] = 46,
+ [2][1][RTW89_ACMA][21] = 40,
+ [2][1][RTW89_CN][21] = 127,
+ [2][1][RTW89_UK][21] = 40,
+ [2][1][RTW89_FCC][23] = 50,
[2][1][RTW89_ETSI][23] = 40,
[2][1][RTW89_MKK][23] = 66,
[2][1][RTW89_IC][23] = 50,
- [2][1][RTW89_ACMA][23] = 36,
- [2][1][RTW89_FCC][25] = 42,
+ [2][1][RTW89_KCC][23] = 46,
+ [2][1][RTW89_ACMA][23] = 40,
+ [2][1][RTW89_CN][23] = 127,
+ [2][1][RTW89_UK][23] = 40,
+ [2][1][RTW89_FCC][25] = 50,
[2][1][RTW89_ETSI][25] = 40,
[2][1][RTW89_MKK][25] = 66,
[2][1][RTW89_IC][25] = 127,
+ [2][1][RTW89_KCC][25] = 46,
[2][1][RTW89_ACMA][25] = 127,
- [2][1][RTW89_FCC][27] = 42,
+ [2][1][RTW89_CN][25] = 127,
+ [2][1][RTW89_UK][25] = 40,
+ [2][1][RTW89_FCC][27] = 50,
[2][1][RTW89_ETSI][27] = 40,
[2][1][RTW89_MKK][27] = 66,
[2][1][RTW89_IC][27] = 127,
+ [2][1][RTW89_KCC][27] = 46,
[2][1][RTW89_ACMA][27] = 127,
- [2][1][RTW89_FCC][29] = 42,
+ [2][1][RTW89_CN][27] = 127,
+ [2][1][RTW89_UK][27] = 40,
+ [2][1][RTW89_FCC][29] = 50,
[2][1][RTW89_ETSI][29] = 40,
[2][1][RTW89_MKK][29] = 66,
[2][1][RTW89_IC][29] = 127,
+ [2][1][RTW89_KCC][29] = 46,
[2][1][RTW89_ACMA][29] = 127,
- [2][1][RTW89_FCC][31] = 42,
+ [2][1][RTW89_CN][29] = 127,
+ [2][1][RTW89_UK][29] = 40,
+ [2][1][RTW89_FCC][31] = 50,
[2][1][RTW89_ETSI][31] = 40,
[2][1][RTW89_MKK][31] = 66,
- [2][1][RTW89_IC][31] = 50,
- [2][1][RTW89_ACMA][31] = 36,
- [2][1][RTW89_FCC][33] = 42,
+ [2][1][RTW89_IC][31] = 48,
+ [2][1][RTW89_KCC][31] = 46,
+ [2][1][RTW89_ACMA][31] = 40,
+ [2][1][RTW89_CN][31] = 127,
+ [2][1][RTW89_UK][31] = 40,
+ [2][1][RTW89_FCC][33] = 48,
[2][1][RTW89_ETSI][33] = 40,
[2][1][RTW89_MKK][33] = 66,
- [2][1][RTW89_IC][33] = 50,
- [2][1][RTW89_ACMA][33] = 36,
- [2][1][RTW89_FCC][35] = 42,
+ [2][1][RTW89_IC][33] = 48,
+ [2][1][RTW89_KCC][33] = 46,
+ [2][1][RTW89_ACMA][33] = 40,
+ [2][1][RTW89_CN][33] = 127,
+ [2][1][RTW89_UK][33] = 40,
+ [2][1][RTW89_FCC][35] = 48,
[2][1][RTW89_ETSI][35] = 40,
[2][1][RTW89_MKK][35] = 66,
- [2][1][RTW89_IC][35] = 50,
- [2][1][RTW89_ACMA][35] = 36,
- [2][1][RTW89_FCC][37] = 42,
+ [2][1][RTW89_IC][35] = 48,
+ [2][1][RTW89_KCC][35] = 46,
+ [2][1][RTW89_ACMA][35] = 40,
+ [2][1][RTW89_CN][35] = 127,
+ [2][1][RTW89_UK][35] = 40,
+ [2][1][RTW89_FCC][37] = 52,
[2][1][RTW89_ETSI][37] = 127,
[2][1][RTW89_MKK][37] = 66,
- [2][1][RTW89_IC][37] = 50,
- [2][1][RTW89_ACMA][37] = 60,
- [2][1][RTW89_FCC][38] = 76,
+ [2][1][RTW89_IC][37] = 52,
+ [2][1][RTW89_KCC][37] = 46,
+ [2][1][RTW89_ACMA][37] = 52,
+ [2][1][RTW89_CN][37] = 127,
+ [2][1][RTW89_UK][37] = 42,
+ [2][1][RTW89_FCC][38] = 78,
[2][1][RTW89_ETSI][38] = 16,
[2][1][RTW89_MKK][38] = 127,
- [2][1][RTW89_IC][38] = 84,
- [2][1][RTW89_ACMA][38] = 84,
- [2][1][RTW89_FCC][40] = 76,
+ [2][1][RTW89_IC][38] = 78,
+ [2][1][RTW89_KCC][38] = 46,
+ [2][1][RTW89_ACMA][38] = 78,
+ [2][1][RTW89_CN][38] = 56,
+ [2][1][RTW89_UK][38] = 42,
+ [2][1][RTW89_FCC][40] = 78,
[2][1][RTW89_ETSI][40] = 16,
[2][1][RTW89_MKK][40] = 127,
- [2][1][RTW89_IC][40] = 84,
- [2][1][RTW89_ACMA][40] = 84,
- [2][1][RTW89_FCC][42] = 76,
+ [2][1][RTW89_IC][40] = 78,
+ [2][1][RTW89_KCC][40] = 46,
+ [2][1][RTW89_ACMA][40] = 78,
+ [2][1][RTW89_CN][40] = 56,
+ [2][1][RTW89_UK][40] = 42,
+ [2][1][RTW89_FCC][42] = 78,
[2][1][RTW89_ETSI][42] = 16,
[2][1][RTW89_MKK][42] = 127,
- [2][1][RTW89_IC][42] = 84,
- [2][1][RTW89_ACMA][42] = 84,
- [2][1][RTW89_FCC][44] = 76,
+ [2][1][RTW89_IC][42] = 78,
+ [2][1][RTW89_KCC][42] = 46,
+ [2][1][RTW89_ACMA][42] = 78,
+ [2][1][RTW89_CN][42] = 56,
+ [2][1][RTW89_UK][42] = 42,
+ [2][1][RTW89_FCC][44] = 74,
[2][1][RTW89_ETSI][44] = 16,
[2][1][RTW89_MKK][44] = 127,
- [2][1][RTW89_IC][44] = 84,
- [2][1][RTW89_ACMA][44] = 84,
- [2][1][RTW89_FCC][46] = 76,
+ [2][1][RTW89_IC][44] = 74,
+ [2][1][RTW89_KCC][44] = 46,
+ [2][1][RTW89_ACMA][44] = 74,
+ [2][1][RTW89_CN][44] = 56,
+ [2][1][RTW89_UK][44] = 42,
+ [2][1][RTW89_FCC][46] = 74,
[2][1][RTW89_ETSI][46] = 16,
[2][1][RTW89_MKK][46] = 127,
- [2][1][RTW89_IC][46] = 84,
- [2][1][RTW89_ACMA][46] = 84,
- [2][1][RTW89_FCC][48] = 36,
+ [2][1][RTW89_IC][46] = 74,
+ [2][1][RTW89_KCC][46] = 46,
+ [2][1][RTW89_ACMA][46] = 74,
+ [2][1][RTW89_CN][46] = 56,
+ [2][1][RTW89_UK][46] = 42,
+ [2][1][RTW89_FCC][48] = 40,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
[2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
- [2][1][RTW89_FCC][50] = 36,
+ [2][1][RTW89_CN][48] = 127,
+ [2][1][RTW89_UK][48] = 127,
+ [2][1][RTW89_FCC][50] = 40,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
[2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
- [2][1][RTW89_FCC][52] = 36,
+ [2][1][RTW89_CN][50] = 127,
+ [2][1][RTW89_UK][50] = 127,
+ [2][1][RTW89_FCC][52] = 40,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
[2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
+ [2][1][RTW89_CN][52] = 127,
+ [2][1][RTW89_UK][52] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
- [0][0][RTW89_WW][0] = 76,
- [0][0][RTW89_WW][2] = 76,
- [0][0][RTW89_WW][4] = 76,
- [0][0][RTW89_WW][6] = 76,
- [0][0][RTW89_WW][8] = 76,
- [0][0][RTW89_WW][10] = 76,
- [0][0][RTW89_WW][12] = 76,
- [0][0][RTW89_WW][14] = 76,
- [0][0][RTW89_WW][15] = 76,
- [0][0][RTW89_WW][17] = 76,
- [0][0][RTW89_WW][19] = 76,
- [0][0][RTW89_WW][21] = 76,
- [0][0][RTW89_WW][23] = 76,
- [0][0][RTW89_WW][25] = 76,
- [0][0][RTW89_WW][27] = 76,
- [0][0][RTW89_WW][29] = 76,
- [0][0][RTW89_WW][30] = 76,
- [0][0][RTW89_WW][32] = 76,
- [0][0][RTW89_WW][34] = 76,
- [0][0][RTW89_WW][36] = 76,
- [0][0][RTW89_WW][38] = 76,
- [0][0][RTW89_WW][40] = 76,
- [0][0][RTW89_WW][42] = 76,
- [0][0][RTW89_WW][44] = 76,
- [0][0][RTW89_WW][45] = 76,
- [0][0][RTW89_WW][47] = 76,
- [0][0][RTW89_WW][49] = 76,
- [0][0][RTW89_WW][51] = 76,
- [0][0][RTW89_WW][53] = 76,
- [0][0][RTW89_WW][55] = 76,
- [0][0][RTW89_WW][57] = 76,
- [0][0][RTW89_WW][59] = 76,
- [0][0][RTW89_WW][60] = 76,
- [0][0][RTW89_WW][62] = 76,
- [0][0][RTW89_WW][64] = 76,
- [0][0][RTW89_WW][66] = 76,
- [0][0][RTW89_WW][68] = 76,
- [0][0][RTW89_WW][70] = 76,
- [0][0][RTW89_WW][72] = 76,
- [0][0][RTW89_WW][74] = 76,
- [0][0][RTW89_WW][75] = 76,
- [0][0][RTW89_WW][77] = 76,
- [0][0][RTW89_WW][79] = 76,
- [0][0][RTW89_WW][81] = 76,
- [0][0][RTW89_WW][83] = 76,
- [0][0][RTW89_WW][85] = 76,
- [0][0][RTW89_WW][87] = 76,
- [0][0][RTW89_WW][89] = 76,
- [0][0][RTW89_WW][90] = 76,
- [0][0][RTW89_WW][92] = 76,
- [0][0][RTW89_WW][94] = 76,
- [0][0][RTW89_WW][96] = 76,
- [0][0][RTW89_WW][98] = 76,
- [0][0][RTW89_WW][100] = 76,
- [0][0][RTW89_WW][102] = 76,
- [0][0][RTW89_WW][104] = 76,
- [0][0][RTW89_WW][105] = 76,
- [0][0][RTW89_WW][107] = 76,
- [0][0][RTW89_WW][109] = 76,
+ [0][0][RTW89_WW][0] = -16,
+ [0][0][RTW89_WW][2] = -18,
+ [0][0][RTW89_WW][4] = -18,
+ [0][0][RTW89_WW][6] = -18,
+ [0][0][RTW89_WW][8] = -18,
+ [0][0][RTW89_WW][10] = -18,
+ [0][0][RTW89_WW][12] = -18,
+ [0][0][RTW89_WW][14] = -18,
+ [0][0][RTW89_WW][15] = -18,
+ [0][0][RTW89_WW][17] = -18,
+ [0][0][RTW89_WW][19] = -18,
+ [0][0][RTW89_WW][21] = -18,
+ [0][0][RTW89_WW][23] = -18,
+ [0][0][RTW89_WW][25] = -18,
+ [0][0][RTW89_WW][27] = -18,
+ [0][0][RTW89_WW][29] = -18,
+ [0][0][RTW89_WW][30] = -18,
+ [0][0][RTW89_WW][32] = -18,
+ [0][0][RTW89_WW][34] = -18,
+ [0][0][RTW89_WW][36] = -18,
+ [0][0][RTW89_WW][38] = -18,
+ [0][0][RTW89_WW][40] = -18,
+ [0][0][RTW89_WW][42] = -18,
+ [0][0][RTW89_WW][44] = -16,
+ [0][0][RTW89_WW][45] = -16,
+ [0][0][RTW89_WW][47] = -18,
+ [0][0][RTW89_WW][49] = -18,
+ [0][0][RTW89_WW][51] = -18,
+ [0][0][RTW89_WW][53] = -16,
+ [0][0][RTW89_WW][55] = -18,
+ [0][0][RTW89_WW][57] = -18,
+ [0][0][RTW89_WW][59] = -18,
+ [0][0][RTW89_WW][60] = -18,
+ [0][0][RTW89_WW][62] = -18,
+ [0][0][RTW89_WW][64] = -18,
+ [0][0][RTW89_WW][66] = -18,
+ [0][0][RTW89_WW][68] = -18,
+ [0][0][RTW89_WW][70] = -16,
+ [0][0][RTW89_WW][72] = -18,
+ [0][0][RTW89_WW][74] = -18,
+ [0][0][RTW89_WW][75] = -18,
+ [0][0][RTW89_WW][77] = -18,
+ [0][0][RTW89_WW][79] = -18,
+ [0][0][RTW89_WW][81] = -18,
+ [0][0][RTW89_WW][83] = -18,
+ [0][0][RTW89_WW][85] = -18,
+ [0][0][RTW89_WW][87] = -16,
+ [0][0][RTW89_WW][89] = -16,
+ [0][0][RTW89_WW][90] = -16,
+ [0][0][RTW89_WW][92] = -16,
+ [0][0][RTW89_WW][94] = -16,
+ [0][0][RTW89_WW][96] = -16,
+ [0][0][RTW89_WW][98] = -16,
+ [0][0][RTW89_WW][100] = -16,
+ [0][0][RTW89_WW][102] = -16,
+ [0][0][RTW89_WW][104] = -16,
+ [0][0][RTW89_WW][105] = -16,
+ [0][0][RTW89_WW][107] = -12,
+ [0][0][RTW89_WW][109] = -12,
[0][0][RTW89_WW][111] = 0,
[0][0][RTW89_WW][113] = 0,
[0][0][RTW89_WW][115] = 0,
[0][0][RTW89_WW][117] = 0,
[0][0][RTW89_WW][119] = 0,
- [0][1][RTW89_WW][0] = 76,
- [0][1][RTW89_WW][2] = 76,
- [0][1][RTW89_WW][4] = 76,
- [0][1][RTW89_WW][6] = 76,
- [0][1][RTW89_WW][8] = 76,
- [0][1][RTW89_WW][10] = 76,
- [0][1][RTW89_WW][12] = 76,
- [0][1][RTW89_WW][14] = 76,
- [0][1][RTW89_WW][15] = 76,
- [0][1][RTW89_WW][17] = 76,
- [0][1][RTW89_WW][19] = 76,
- [0][1][RTW89_WW][21] = 76,
- [0][1][RTW89_WW][23] = 76,
- [0][1][RTW89_WW][25] = 76,
- [0][1][RTW89_WW][27] = 76,
- [0][1][RTW89_WW][29] = 76,
- [0][1][RTW89_WW][30] = 76,
- [0][1][RTW89_WW][32] = 76,
- [0][1][RTW89_WW][34] = 76,
- [0][1][RTW89_WW][36] = 76,
- [0][1][RTW89_WW][38] = 76,
- [0][1][RTW89_WW][40] = 76,
- [0][1][RTW89_WW][42] = 76,
- [0][1][RTW89_WW][44] = 76,
- [0][1][RTW89_WW][45] = 76,
- [0][1][RTW89_WW][47] = 76,
- [0][1][RTW89_WW][49] = 76,
- [0][1][RTW89_WW][51] = 76,
- [0][1][RTW89_WW][53] = 76,
- [0][1][RTW89_WW][55] = 76,
- [0][1][RTW89_WW][57] = 76,
- [0][1][RTW89_WW][59] = 76,
- [0][1][RTW89_WW][60] = 76,
- [0][1][RTW89_WW][62] = 76,
- [0][1][RTW89_WW][64] = 76,
- [0][1][RTW89_WW][66] = 76,
- [0][1][RTW89_WW][68] = 76,
- [0][1][RTW89_WW][70] = 76,
- [0][1][RTW89_WW][72] = 76,
- [0][1][RTW89_WW][74] = 76,
- [0][1][RTW89_WW][75] = 76,
- [0][1][RTW89_WW][77] = 76,
- [0][1][RTW89_WW][79] = 76,
- [0][1][RTW89_WW][81] = 76,
- [0][1][RTW89_WW][83] = 76,
- [0][1][RTW89_WW][85] = 76,
- [0][1][RTW89_WW][87] = 76,
- [0][1][RTW89_WW][89] = 76,
- [0][1][RTW89_WW][90] = 76,
- [0][1][RTW89_WW][92] = 76,
- [0][1][RTW89_WW][94] = 76,
- [0][1][RTW89_WW][96] = 76,
- [0][1][RTW89_WW][98] = 76,
- [0][1][RTW89_WW][100] = 76,
- [0][1][RTW89_WW][102] = 76,
- [0][1][RTW89_WW][104] = 76,
- [0][1][RTW89_WW][105] = 76,
- [0][1][RTW89_WW][107] = 76,
- [0][1][RTW89_WW][109] = 76,
+ [0][1][RTW89_WW][0] = -40,
+ [0][1][RTW89_WW][2] = -40,
+ [0][1][RTW89_WW][4] = -40,
+ [0][1][RTW89_WW][6] = -40,
+ [0][1][RTW89_WW][8] = -40,
+ [0][1][RTW89_WW][10] = -40,
+ [0][1][RTW89_WW][12] = -40,
+ [0][1][RTW89_WW][14] = -40,
+ [0][1][RTW89_WW][15] = -40,
+ [0][1][RTW89_WW][17] = -40,
+ [0][1][RTW89_WW][19] = -40,
+ [0][1][RTW89_WW][21] = -40,
+ [0][1][RTW89_WW][23] = -40,
+ [0][1][RTW89_WW][25] = -40,
+ [0][1][RTW89_WW][27] = -40,
+ [0][1][RTW89_WW][29] = -40,
+ [0][1][RTW89_WW][30] = -40,
+ [0][1][RTW89_WW][32] = -40,
+ [0][1][RTW89_WW][34] = -40,
+ [0][1][RTW89_WW][36] = -40,
+ [0][1][RTW89_WW][38] = -40,
+ [0][1][RTW89_WW][40] = -40,
+ [0][1][RTW89_WW][42] = -40,
+ [0][1][RTW89_WW][44] = -40,
+ [0][1][RTW89_WW][45] = -40,
+ [0][1][RTW89_WW][47] = -40,
+ [0][1][RTW89_WW][49] = -40,
+ [0][1][RTW89_WW][51] = -40,
+ [0][1][RTW89_WW][53] = -40,
+ [0][1][RTW89_WW][55] = -40,
+ [0][1][RTW89_WW][57] = -40,
+ [0][1][RTW89_WW][59] = -40,
+ [0][1][RTW89_WW][60] = -40,
+ [0][1][RTW89_WW][62] = -40,
+ [0][1][RTW89_WW][64] = -40,
+ [0][1][RTW89_WW][66] = -40,
+ [0][1][RTW89_WW][68] = -40,
+ [0][1][RTW89_WW][70] = -38,
+ [0][1][RTW89_WW][72] = -38,
+ [0][1][RTW89_WW][74] = -38,
+ [0][1][RTW89_WW][75] = -38,
+ [0][1][RTW89_WW][77] = -38,
+ [0][1][RTW89_WW][79] = -38,
+ [0][1][RTW89_WW][81] = -38,
+ [0][1][RTW89_WW][83] = -38,
+ [0][1][RTW89_WW][85] = -38,
+ [0][1][RTW89_WW][87] = -40,
+ [0][1][RTW89_WW][89] = -38,
+ [0][1][RTW89_WW][90] = -38,
+ [0][1][RTW89_WW][92] = -38,
+ [0][1][RTW89_WW][94] = -38,
+ [0][1][RTW89_WW][96] = -38,
+ [0][1][RTW89_WW][98] = -38,
+ [0][1][RTW89_WW][100] = -38,
+ [0][1][RTW89_WW][102] = -38,
+ [0][1][RTW89_WW][104] = -38,
+ [0][1][RTW89_WW][105] = -38,
+ [0][1][RTW89_WW][107] = -34,
+ [0][1][RTW89_WW][109] = -34,
[0][1][RTW89_WW][111] = 0,
[0][1][RTW89_WW][113] = 0,
[0][1][RTW89_WW][115] = 0,
[0][1][RTW89_WW][117] = 0,
[0][1][RTW89_WW][119] = 0,
- [1][0][RTW89_WW][0] = 76,
- [1][0][RTW89_WW][2] = 76,
- [1][0][RTW89_WW][4] = 76,
- [1][0][RTW89_WW][6] = 76,
- [1][0][RTW89_WW][8] = 76,
- [1][0][RTW89_WW][10] = 76,
- [1][0][RTW89_WW][12] = 76,
- [1][0][RTW89_WW][14] = 76,
- [1][0][RTW89_WW][15] = 76,
- [1][0][RTW89_WW][17] = 76,
- [1][0][RTW89_WW][19] = 76,
- [1][0][RTW89_WW][21] = 76,
- [1][0][RTW89_WW][23] = 76,
- [1][0][RTW89_WW][25] = 76,
- [1][0][RTW89_WW][27] = 76,
- [1][0][RTW89_WW][29] = 76,
- [1][0][RTW89_WW][30] = 76,
- [1][0][RTW89_WW][32] = 76,
- [1][0][RTW89_WW][34] = 76,
- [1][0][RTW89_WW][36] = 76,
- [1][0][RTW89_WW][38] = 76,
- [1][0][RTW89_WW][40] = 76,
- [1][0][RTW89_WW][42] = 76,
- [1][0][RTW89_WW][44] = 76,
- [1][0][RTW89_WW][45] = 76,
- [1][0][RTW89_WW][47] = 76,
- [1][0][RTW89_WW][49] = 76,
- [1][0][RTW89_WW][51] = 76,
- [1][0][RTW89_WW][53] = 76,
- [1][0][RTW89_WW][55] = 76,
- [1][0][RTW89_WW][57] = 76,
- [1][0][RTW89_WW][59] = 76,
- [1][0][RTW89_WW][60] = 76,
- [1][0][RTW89_WW][62] = 76,
- [1][0][RTW89_WW][64] = 76,
- [1][0][RTW89_WW][66] = 76,
- [1][0][RTW89_WW][68] = 76,
- [1][0][RTW89_WW][70] = 76,
- [1][0][RTW89_WW][72] = 76,
- [1][0][RTW89_WW][74] = 76,
- [1][0][RTW89_WW][75] = 76,
- [1][0][RTW89_WW][77] = 76,
- [1][0][RTW89_WW][79] = 76,
- [1][0][RTW89_WW][81] = 76,
- [1][0][RTW89_WW][83] = 76,
- [1][0][RTW89_WW][85] = 76,
- [1][0][RTW89_WW][87] = 76,
- [1][0][RTW89_WW][89] = 76,
- [1][0][RTW89_WW][90] = 76,
- [1][0][RTW89_WW][92] = 76,
- [1][0][RTW89_WW][94] = 76,
- [1][0][RTW89_WW][96] = 76,
- [1][0][RTW89_WW][98] = 76,
- [1][0][RTW89_WW][100] = 76,
- [1][0][RTW89_WW][102] = 76,
- [1][0][RTW89_WW][104] = 76,
- [1][0][RTW89_WW][105] = 76,
- [1][0][RTW89_WW][107] = 76,
- [1][0][RTW89_WW][109] = 76,
+ [1][0][RTW89_WW][0] = -4,
+ [1][0][RTW89_WW][2] = -4,
+ [1][0][RTW89_WW][4] = -4,
+ [1][0][RTW89_WW][6] = -4,
+ [1][0][RTW89_WW][8] = -4,
+ [1][0][RTW89_WW][10] = -4,
+ [1][0][RTW89_WW][12] = -4,
+ [1][0][RTW89_WW][14] = -4,
+ [1][0][RTW89_WW][15] = -4,
+ [1][0][RTW89_WW][17] = -4,
+ [1][0][RTW89_WW][19] = -4,
+ [1][0][RTW89_WW][21] = -4,
+ [1][0][RTW89_WW][23] = -4,
+ [1][0][RTW89_WW][25] = -4,
+ [1][0][RTW89_WW][27] = -4,
+ [1][0][RTW89_WW][29] = -4,
+ [1][0][RTW89_WW][30] = -4,
+ [1][0][RTW89_WW][32] = -4,
+ [1][0][RTW89_WW][34] = -4,
+ [1][0][RTW89_WW][36] = -4,
+ [1][0][RTW89_WW][38] = -4,
+ [1][0][RTW89_WW][40] = -4,
+ [1][0][RTW89_WW][42] = -4,
+ [1][0][RTW89_WW][44] = -4,
+ [1][0][RTW89_WW][45] = -4,
+ [1][0][RTW89_WW][47] = -4,
+ [1][0][RTW89_WW][49] = -4,
+ [1][0][RTW89_WW][51] = -4,
+ [1][0][RTW89_WW][53] = -4,
+ [1][0][RTW89_WW][55] = -4,
+ [1][0][RTW89_WW][57] = -4,
+ [1][0][RTW89_WW][59] = -4,
+ [1][0][RTW89_WW][60] = -4,
+ [1][0][RTW89_WW][62] = -4,
+ [1][0][RTW89_WW][64] = -4,
+ [1][0][RTW89_WW][66] = -4,
+ [1][0][RTW89_WW][68] = -4,
+ [1][0][RTW89_WW][70] = -4,
+ [1][0][RTW89_WW][72] = -4,
+ [1][0][RTW89_WW][74] = -4,
+ [1][0][RTW89_WW][75] = -4,
+ [1][0][RTW89_WW][77] = -4,
+ [1][0][RTW89_WW][79] = -4,
+ [1][0][RTW89_WW][81] = -4,
+ [1][0][RTW89_WW][83] = -4,
+ [1][0][RTW89_WW][85] = -4,
+ [1][0][RTW89_WW][87] = -4,
+ [1][0][RTW89_WW][89] = -4,
+ [1][0][RTW89_WW][90] = -4,
+ [1][0][RTW89_WW][92] = -4,
+ [1][0][RTW89_WW][94] = -4,
+ [1][0][RTW89_WW][96] = -4,
+ [1][0][RTW89_WW][98] = -4,
+ [1][0][RTW89_WW][100] = -4,
+ [1][0][RTW89_WW][102] = -4,
+ [1][0][RTW89_WW][104] = -4,
+ [1][0][RTW89_WW][105] = -4,
+ [1][0][RTW89_WW][107] = 1,
+ [1][0][RTW89_WW][109] = 2,
[1][0][RTW89_WW][111] = 0,
[1][0][RTW89_WW][113] = 0,
[1][0][RTW89_WW][115] = 0,
[1][0][RTW89_WW][117] = 0,
[1][0][RTW89_WW][119] = 0,
- [1][1][RTW89_WW][0] = 76,
- [1][1][RTW89_WW][2] = 76,
- [1][1][RTW89_WW][4] = 76,
- [1][1][RTW89_WW][6] = 76,
- [1][1][RTW89_WW][8] = 76,
- [1][1][RTW89_WW][10] = 76,
- [1][1][RTW89_WW][12] = 76,
- [1][1][RTW89_WW][14] = 76,
- [1][1][RTW89_WW][15] = 76,
- [1][1][RTW89_WW][17] = 76,
- [1][1][RTW89_WW][19] = 76,
- [1][1][RTW89_WW][21] = 76,
- [1][1][RTW89_WW][23] = 76,
- [1][1][RTW89_WW][25] = 76,
- [1][1][RTW89_WW][27] = 76,
- [1][1][RTW89_WW][29] = 76,
- [1][1][RTW89_WW][30] = 76,
- [1][1][RTW89_WW][32] = 76,
- [1][1][RTW89_WW][34] = 76,
- [1][1][RTW89_WW][36] = 76,
- [1][1][RTW89_WW][38] = 76,
- [1][1][RTW89_WW][40] = 76,
- [1][1][RTW89_WW][42] = 76,
- [1][1][RTW89_WW][44] = 76,
- [1][1][RTW89_WW][45] = 76,
- [1][1][RTW89_WW][47] = 76,
- [1][1][RTW89_WW][49] = 76,
- [1][1][RTW89_WW][51] = 76,
- [1][1][RTW89_WW][53] = 76,
- [1][1][RTW89_WW][55] = 76,
- [1][1][RTW89_WW][57] = 76,
- [1][1][RTW89_WW][59] = 76,
- [1][1][RTW89_WW][60] = 76,
- [1][1][RTW89_WW][62] = 76,
- [1][1][RTW89_WW][64] = 76,
- [1][1][RTW89_WW][66] = 76,
- [1][1][RTW89_WW][68] = 76,
- [1][1][RTW89_WW][70] = 76,
- [1][1][RTW89_WW][72] = 76,
- [1][1][RTW89_WW][74] = 76,
- [1][1][RTW89_WW][75] = 76,
- [1][1][RTW89_WW][77] = 76,
- [1][1][RTW89_WW][79] = 76,
- [1][1][RTW89_WW][81] = 76,
- [1][1][RTW89_WW][83] = 76,
- [1][1][RTW89_WW][85] = 76,
- [1][1][RTW89_WW][87] = 76,
- [1][1][RTW89_WW][89] = 76,
- [1][1][RTW89_WW][90] = 76,
- [1][1][RTW89_WW][92] = 76,
- [1][1][RTW89_WW][94] = 76,
- [1][1][RTW89_WW][96] = 76,
- [1][1][RTW89_WW][98] = 76,
- [1][1][RTW89_WW][100] = 76,
- [1][1][RTW89_WW][102] = 76,
- [1][1][RTW89_WW][104] = 76,
- [1][1][RTW89_WW][105] = 76,
- [1][1][RTW89_WW][107] = 76,
- [1][1][RTW89_WW][109] = 76,
+ [1][1][RTW89_WW][0] = -26,
+ [1][1][RTW89_WW][2] = -28,
+ [1][1][RTW89_WW][4] = -28,
+ [1][1][RTW89_WW][6] = -28,
+ [1][1][RTW89_WW][8] = -28,
+ [1][1][RTW89_WW][10] = -28,
+ [1][1][RTW89_WW][12] = -28,
+ [1][1][RTW89_WW][14] = -28,
+ [1][1][RTW89_WW][15] = -28,
+ [1][1][RTW89_WW][17] = -28,
+ [1][1][RTW89_WW][19] = -28,
+ [1][1][RTW89_WW][21] = -28,
+ [1][1][RTW89_WW][23] = -28,
+ [1][1][RTW89_WW][25] = -28,
+ [1][1][RTW89_WW][27] = -28,
+ [1][1][RTW89_WW][29] = -28,
+ [1][1][RTW89_WW][30] = -28,
+ [1][1][RTW89_WW][32] = -28,
+ [1][1][RTW89_WW][34] = -28,
+ [1][1][RTW89_WW][36] = -28,
+ [1][1][RTW89_WW][38] = -28,
+ [1][1][RTW89_WW][40] = -28,
+ [1][1][RTW89_WW][42] = -28,
+ [1][1][RTW89_WW][44] = -28,
+ [1][1][RTW89_WW][45] = -26,
+ [1][1][RTW89_WW][47] = -28,
+ [1][1][RTW89_WW][49] = -28,
+ [1][1][RTW89_WW][51] = -28,
+ [1][1][RTW89_WW][53] = -26,
+ [1][1][RTW89_WW][55] = -28,
+ [1][1][RTW89_WW][57] = -28,
+ [1][1][RTW89_WW][59] = -28,
+ [1][1][RTW89_WW][60] = -28,
+ [1][1][RTW89_WW][62] = -28,
+ [1][1][RTW89_WW][64] = -28,
+ [1][1][RTW89_WW][66] = -28,
+ [1][1][RTW89_WW][68] = -28,
+ [1][1][RTW89_WW][70] = -26,
+ [1][1][RTW89_WW][72] = -28,
+ [1][1][RTW89_WW][74] = -28,
+ [1][1][RTW89_WW][75] = -28,
+ [1][1][RTW89_WW][77] = -28,
+ [1][1][RTW89_WW][79] = -28,
+ [1][1][RTW89_WW][81] = -28,
+ [1][1][RTW89_WW][83] = -28,
+ [1][1][RTW89_WW][85] = -28,
+ [1][1][RTW89_WW][87] = -28,
+ [1][1][RTW89_WW][89] = -26,
+ [1][1][RTW89_WW][90] = -26,
+ [1][1][RTW89_WW][92] = -26,
+ [1][1][RTW89_WW][94] = -26,
+ [1][1][RTW89_WW][96] = -26,
+ [1][1][RTW89_WW][98] = -26,
+ [1][1][RTW89_WW][100] = -26,
+ [1][1][RTW89_WW][102] = -26,
+ [1][1][RTW89_WW][104] = -26,
+ [1][1][RTW89_WW][105] = -26,
+ [1][1][RTW89_WW][107] = -22,
+ [1][1][RTW89_WW][109] = -22,
[1][1][RTW89_WW][111] = 0,
[1][1][RTW89_WW][113] = 0,
[1][1][RTW89_WW][115] = 0,
[1][1][RTW89_WW][117] = 0,
[1][1][RTW89_WW][119] = 0,
- [2][0][RTW89_WW][0] = 76,
- [2][0][RTW89_WW][2] = 76,
- [2][0][RTW89_WW][4] = 76,
- [2][0][RTW89_WW][6] = 76,
- [2][0][RTW89_WW][8] = 76,
- [2][0][RTW89_WW][10] = 76,
- [2][0][RTW89_WW][12] = 76,
- [2][0][RTW89_WW][14] = 76,
- [2][0][RTW89_WW][15] = 76,
- [2][0][RTW89_WW][17] = 76,
- [2][0][RTW89_WW][19] = 76,
- [2][0][RTW89_WW][21] = 76,
- [2][0][RTW89_WW][23] = 76,
- [2][0][RTW89_WW][25] = 76,
- [2][0][RTW89_WW][27] = 76,
- [2][0][RTW89_WW][29] = 76,
- [2][0][RTW89_WW][30] = 76,
- [2][0][RTW89_WW][32] = 76,
- [2][0][RTW89_WW][34] = 76,
- [2][0][RTW89_WW][36] = 76,
- [2][0][RTW89_WW][38] = 76,
- [2][0][RTW89_WW][40] = 76,
- [2][0][RTW89_WW][42] = 76,
- [2][0][RTW89_WW][44] = 76,
- [2][0][RTW89_WW][45] = 76,
- [2][0][RTW89_WW][47] = 76,
- [2][0][RTW89_WW][49] = 76,
- [2][0][RTW89_WW][51] = 76,
- [2][0][RTW89_WW][53] = 76,
- [2][0][RTW89_WW][55] = 76,
- [2][0][RTW89_WW][57] = 76,
- [2][0][RTW89_WW][59] = 76,
- [2][0][RTW89_WW][60] = 76,
- [2][0][RTW89_WW][62] = 76,
- [2][0][RTW89_WW][64] = 76,
- [2][0][RTW89_WW][66] = 76,
- [2][0][RTW89_WW][68] = 76,
- [2][0][RTW89_WW][70] = 76,
- [2][0][RTW89_WW][72] = 76,
- [2][0][RTW89_WW][74] = 76,
- [2][0][RTW89_WW][75] = 76,
- [2][0][RTW89_WW][77] = 76,
- [2][0][RTW89_WW][79] = 76,
- [2][0][RTW89_WW][81] = 76,
- [2][0][RTW89_WW][83] = 76,
- [2][0][RTW89_WW][85] = 76,
- [2][0][RTW89_WW][87] = 76,
- [2][0][RTW89_WW][89] = 76,
- [2][0][RTW89_WW][90] = 76,
- [2][0][RTW89_WW][92] = 76,
- [2][0][RTW89_WW][94] = 76,
- [2][0][RTW89_WW][96] = 76,
- [2][0][RTW89_WW][98] = 76,
- [2][0][RTW89_WW][100] = 76,
- [2][0][RTW89_WW][102] = 76,
- [2][0][RTW89_WW][104] = 76,
- [2][0][RTW89_WW][105] = 76,
- [2][0][RTW89_WW][107] = 76,
- [2][0][RTW89_WW][109] = 76,
+ [2][0][RTW89_WW][0] = 8,
+ [2][0][RTW89_WW][2] = 8,
+ [2][0][RTW89_WW][4] = 8,
+ [2][0][RTW89_WW][6] = 8,
+ [2][0][RTW89_WW][8] = 8,
+ [2][0][RTW89_WW][10] = 8,
+ [2][0][RTW89_WW][12] = 8,
+ [2][0][RTW89_WW][14] = 8,
+ [2][0][RTW89_WW][15] = 8,
+ [2][0][RTW89_WW][17] = 8,
+ [2][0][RTW89_WW][19] = 8,
+ [2][0][RTW89_WW][21] = 8,
+ [2][0][RTW89_WW][23] = 8,
+ [2][0][RTW89_WW][25] = 8,
+ [2][0][RTW89_WW][27] = 8,
+ [2][0][RTW89_WW][29] = 8,
+ [2][0][RTW89_WW][30] = 8,
+ [2][0][RTW89_WW][32] = 8,
+ [2][0][RTW89_WW][34] = 8,
+ [2][0][RTW89_WW][36] = 8,
+ [2][0][RTW89_WW][38] = 8,
+ [2][0][RTW89_WW][40] = 8,
+ [2][0][RTW89_WW][42] = 8,
+ [2][0][RTW89_WW][44] = 8,
+ [2][0][RTW89_WW][45] = 8,
+ [2][0][RTW89_WW][47] = 8,
+ [2][0][RTW89_WW][49] = 8,
+ [2][0][RTW89_WW][51] = 8,
+ [2][0][RTW89_WW][53] = 8,
+ [2][0][RTW89_WW][55] = 8,
+ [2][0][RTW89_WW][57] = 8,
+ [2][0][RTW89_WW][59] = 8,
+ [2][0][RTW89_WW][60] = 8,
+ [2][0][RTW89_WW][62] = 8,
+ [2][0][RTW89_WW][64] = 8,
+ [2][0][RTW89_WW][66] = 8,
+ [2][0][RTW89_WW][68] = 8,
+ [2][0][RTW89_WW][70] = 8,
+ [2][0][RTW89_WW][72] = 8,
+ [2][0][RTW89_WW][74] = 8,
+ [2][0][RTW89_WW][75] = 8,
+ [2][0][RTW89_WW][77] = 8,
+ [2][0][RTW89_WW][79] = 8,
+ [2][0][RTW89_WW][81] = 8,
+ [2][0][RTW89_WW][83] = 8,
+ [2][0][RTW89_WW][85] = 8,
+ [2][0][RTW89_WW][87] = 8,
+ [2][0][RTW89_WW][89] = 8,
+ [2][0][RTW89_WW][90] = 8,
+ [2][0][RTW89_WW][92] = 8,
+ [2][0][RTW89_WW][94] = 8,
+ [2][0][RTW89_WW][96] = 8,
+ [2][0][RTW89_WW][98] = 8,
+ [2][0][RTW89_WW][100] = 8,
+ [2][0][RTW89_WW][102] = 8,
+ [2][0][RTW89_WW][104] = 8,
+ [2][0][RTW89_WW][105] = 8,
+ [2][0][RTW89_WW][107] = 10,
+ [2][0][RTW89_WW][109] = 12,
[2][0][RTW89_WW][111] = 0,
[2][0][RTW89_WW][113] = 0,
[2][0][RTW89_WW][115] = 0,
[2][0][RTW89_WW][117] = 0,
[2][0][RTW89_WW][119] = 0,
- [2][1][RTW89_WW][0] = 76,
- [2][1][RTW89_WW][2] = 76,
- [2][1][RTW89_WW][4] = 76,
- [2][1][RTW89_WW][6] = 76,
- [2][1][RTW89_WW][8] = 76,
- [2][1][RTW89_WW][10] = 76,
- [2][1][RTW89_WW][12] = 76,
- [2][1][RTW89_WW][14] = 76,
- [2][1][RTW89_WW][15] = 76,
- [2][1][RTW89_WW][17] = 76,
- [2][1][RTW89_WW][19] = 76,
- [2][1][RTW89_WW][21] = 76,
- [2][1][RTW89_WW][23] = 76,
- [2][1][RTW89_WW][25] = 76,
- [2][1][RTW89_WW][27] = 76,
- [2][1][RTW89_WW][29] = 76,
- [2][1][RTW89_WW][30] = 76,
- [2][1][RTW89_WW][32] = 76,
- [2][1][RTW89_WW][34] = 76,
- [2][1][RTW89_WW][36] = 76,
- [2][1][RTW89_WW][38] = 76,
- [2][1][RTW89_WW][40] = 76,
- [2][1][RTW89_WW][42] = 76,
- [2][1][RTW89_WW][44] = 76,
- [2][1][RTW89_WW][45] = 76,
- [2][1][RTW89_WW][47] = 76,
- [2][1][RTW89_WW][49] = 76,
- [2][1][RTW89_WW][51] = 76,
- [2][1][RTW89_WW][53] = 76,
- [2][1][RTW89_WW][55] = 76,
- [2][1][RTW89_WW][57] = 76,
- [2][1][RTW89_WW][59] = 76,
- [2][1][RTW89_WW][60] = 76,
- [2][1][RTW89_WW][62] = 76,
- [2][1][RTW89_WW][64] = 76,
- [2][1][RTW89_WW][66] = 76,
- [2][1][RTW89_WW][68] = 76,
- [2][1][RTW89_WW][70] = 76,
- [2][1][RTW89_WW][72] = 76,
- [2][1][RTW89_WW][74] = 76,
- [2][1][RTW89_WW][75] = 76,
- [2][1][RTW89_WW][77] = 76,
- [2][1][RTW89_WW][79] = 76,
- [2][1][RTW89_WW][81] = 76,
- [2][1][RTW89_WW][83] = 76,
- [2][1][RTW89_WW][85] = 76,
- [2][1][RTW89_WW][87] = 76,
- [2][1][RTW89_WW][89] = 76,
- [2][1][RTW89_WW][90] = 76,
- [2][1][RTW89_WW][92] = 76,
- [2][1][RTW89_WW][94] = 76,
- [2][1][RTW89_WW][96] = 76,
- [2][1][RTW89_WW][98] = 76,
- [2][1][RTW89_WW][100] = 76,
- [2][1][RTW89_WW][102] = 76,
- [2][1][RTW89_WW][104] = 76,
- [2][1][RTW89_WW][105] = 76,
- [2][1][RTW89_WW][107] = 76,
- [2][1][RTW89_WW][109] = 76,
+ [2][1][RTW89_WW][0] = -16,
+ [2][1][RTW89_WW][2] = -16,
+ [2][1][RTW89_WW][4] = -16,
+ [2][1][RTW89_WW][6] = -16,
+ [2][1][RTW89_WW][8] = -16,
+ [2][1][RTW89_WW][10] = -16,
+ [2][1][RTW89_WW][12] = -16,
+ [2][1][RTW89_WW][14] = -16,
+ [2][1][RTW89_WW][15] = -16,
+ [2][1][RTW89_WW][17] = -16,
+ [2][1][RTW89_WW][19] = -16,
+ [2][1][RTW89_WW][21] = -16,
+ [2][1][RTW89_WW][23] = -16,
+ [2][1][RTW89_WW][25] = -16,
+ [2][1][RTW89_WW][27] = -16,
+ [2][1][RTW89_WW][29] = -16,
+ [2][1][RTW89_WW][30] = -16,
+ [2][1][RTW89_WW][32] = -16,
+ [2][1][RTW89_WW][34] = -16,
+ [2][1][RTW89_WW][36] = -16,
+ [2][1][RTW89_WW][38] = -16,
+ [2][1][RTW89_WW][40] = -16,
+ [2][1][RTW89_WW][42] = -16,
+ [2][1][RTW89_WW][44] = -16,
+ [2][1][RTW89_WW][45] = -16,
+ [2][1][RTW89_WW][47] = -16,
+ [2][1][RTW89_WW][49] = -16,
+ [2][1][RTW89_WW][51] = -16,
+ [2][1][RTW89_WW][53] = -16,
+ [2][1][RTW89_WW][55] = -16,
+ [2][1][RTW89_WW][57] = -16,
+ [2][1][RTW89_WW][59] = -16,
+ [2][1][RTW89_WW][60] = -16,
+ [2][1][RTW89_WW][62] = -16,
+ [2][1][RTW89_WW][64] = -16,
+ [2][1][RTW89_WW][66] = -16,
+ [2][1][RTW89_WW][68] = -16,
+ [2][1][RTW89_WW][70] = -16,
+ [2][1][RTW89_WW][72] = -16,
+ [2][1][RTW89_WW][74] = -16,
+ [2][1][RTW89_WW][75] = -16,
+ [2][1][RTW89_WW][77] = -16,
+ [2][1][RTW89_WW][79] = -16,
+ [2][1][RTW89_WW][81] = -16,
+ [2][1][RTW89_WW][83] = -16,
+ [2][1][RTW89_WW][85] = -18,
+ [2][1][RTW89_WW][87] = -16,
+ [2][1][RTW89_WW][89] = -16,
+ [2][1][RTW89_WW][90] = -16,
+ [2][1][RTW89_WW][92] = -16,
+ [2][1][RTW89_WW][94] = -16,
+ [2][1][RTW89_WW][96] = -16,
+ [2][1][RTW89_WW][98] = -16,
+ [2][1][RTW89_WW][100] = -16,
+ [2][1][RTW89_WW][102] = -16,
+ [2][1][RTW89_WW][104] = -16,
+ [2][1][RTW89_WW][105] = -16,
+ [2][1][RTW89_WW][107] = -12,
+ [2][1][RTW89_WW][109] = -10,
[2][1][RTW89_WW][111] = 0,
[2][1][RTW89_WW][113] = 0,
[2][1][RTW89_WW][115] = 0,
[2][1][RTW89_WW][117] = 0,
[2][1][RTW89_WW][119] = 0,
- [0][0][RTW89_FCC][0] = 76,
- [0][0][RTW89_FCC][2] = 76,
- [0][0][RTW89_FCC][4] = 76,
- [0][0][RTW89_FCC][6] = 76,
- [0][0][RTW89_FCC][8] = 76,
- [0][0][RTW89_FCC][10] = 76,
- [0][0][RTW89_FCC][12] = 76,
- [0][0][RTW89_FCC][14] = 76,
- [0][0][RTW89_FCC][15] = 76,
- [0][0][RTW89_FCC][17] = 76,
- [0][0][RTW89_FCC][19] = 76,
- [0][0][RTW89_FCC][21] = 76,
- [0][0][RTW89_FCC][23] = 76,
- [0][0][RTW89_FCC][25] = 76,
- [0][0][RTW89_FCC][27] = 76,
- [0][0][RTW89_FCC][29] = 76,
- [0][0][RTW89_FCC][30] = 76,
- [0][0][RTW89_FCC][32] = 76,
- [0][0][RTW89_FCC][34] = 76,
- [0][0][RTW89_FCC][36] = 76,
- [0][0][RTW89_FCC][38] = 76,
- [0][0][RTW89_FCC][40] = 76,
- [0][0][RTW89_FCC][42] = 76,
- [0][0][RTW89_FCC][44] = 76,
- [0][0][RTW89_FCC][45] = 76,
- [0][0][RTW89_FCC][47] = 76,
- [0][0][RTW89_FCC][49] = 76,
- [0][0][RTW89_FCC][51] = 76,
- [0][0][RTW89_FCC][53] = 76,
- [0][0][RTW89_FCC][55] = 76,
- [0][0][RTW89_FCC][57] = 76,
- [0][0][RTW89_FCC][59] = 76,
- [0][0][RTW89_FCC][60] = 76,
- [0][0][RTW89_FCC][62] = 76,
- [0][0][RTW89_FCC][64] = 76,
- [0][0][RTW89_FCC][66] = 76,
- [0][0][RTW89_FCC][68] = 76,
- [0][0][RTW89_FCC][70] = 76,
- [0][0][RTW89_FCC][72] = 76,
- [0][0][RTW89_FCC][74] = 76,
- [0][0][RTW89_FCC][75] = 76,
- [0][0][RTW89_FCC][77] = 76,
- [0][0][RTW89_FCC][79] = 76,
- [0][0][RTW89_FCC][81] = 76,
- [0][0][RTW89_FCC][83] = 76,
- [0][0][RTW89_FCC][85] = 76,
- [0][0][RTW89_FCC][87] = 76,
- [0][0][RTW89_FCC][89] = 76,
- [0][0][RTW89_FCC][90] = 76,
- [0][0][RTW89_FCC][92] = 76,
- [0][0][RTW89_FCC][94] = 76,
- [0][0][RTW89_FCC][96] = 76,
- [0][0][RTW89_FCC][98] = 76,
- [0][0][RTW89_FCC][100] = 76,
- [0][0][RTW89_FCC][102] = 76,
- [0][0][RTW89_FCC][104] = 76,
- [0][0][RTW89_FCC][105] = 76,
- [0][0][RTW89_FCC][107] = 76,
- [0][0][RTW89_FCC][109] = 76,
+ [0][0][RTW89_FCC][0] = -16,
+ [0][0][RTW89_ETSI][0] = 32,
+ [0][0][RTW89_FCC][2] = -18,
+ [0][0][RTW89_ETSI][2] = 32,
+ [0][0][RTW89_FCC][4] = -18,
+ [0][0][RTW89_ETSI][4] = 32,
+ [0][0][RTW89_FCC][6] = -18,
+ [0][0][RTW89_ETSI][6] = 32,
+ [0][0][RTW89_FCC][8] = -18,
+ [0][0][RTW89_ETSI][8] = 32,
+ [0][0][RTW89_FCC][10] = -18,
+ [0][0][RTW89_ETSI][10] = 32,
+ [0][0][RTW89_FCC][12] = -18,
+ [0][0][RTW89_ETSI][12] = 32,
+ [0][0][RTW89_FCC][14] = -18,
+ [0][0][RTW89_ETSI][14] = 32,
+ [0][0][RTW89_FCC][15] = -18,
+ [0][0][RTW89_ETSI][15] = 32,
+ [0][0][RTW89_FCC][17] = -18,
+ [0][0][RTW89_ETSI][17] = 32,
+ [0][0][RTW89_FCC][19] = -18,
+ [0][0][RTW89_ETSI][19] = 32,
+ [0][0][RTW89_FCC][21] = -18,
+ [0][0][RTW89_ETSI][21] = 32,
+ [0][0][RTW89_FCC][23] = -18,
+ [0][0][RTW89_ETSI][23] = 32,
+ [0][0][RTW89_FCC][25] = -18,
+ [0][0][RTW89_ETSI][25] = 32,
+ [0][0][RTW89_FCC][27] = -18,
+ [0][0][RTW89_ETSI][27] = 32,
+ [0][0][RTW89_FCC][29] = -18,
+ [0][0][RTW89_ETSI][29] = 32,
+ [0][0][RTW89_FCC][30] = -18,
+ [0][0][RTW89_ETSI][30] = 32,
+ [0][0][RTW89_FCC][32] = -18,
+ [0][0][RTW89_ETSI][32] = 32,
+ [0][0][RTW89_FCC][34] = -18,
+ [0][0][RTW89_ETSI][34] = 32,
+ [0][0][RTW89_FCC][36] = -18,
+ [0][0][RTW89_ETSI][36] = 32,
+ [0][0][RTW89_FCC][38] = -18,
+ [0][0][RTW89_ETSI][38] = 32,
+ [0][0][RTW89_FCC][40] = -18,
+ [0][0][RTW89_ETSI][40] = 32,
+ [0][0][RTW89_FCC][42] = -18,
+ [0][0][RTW89_ETSI][42] = 32,
+ [0][0][RTW89_FCC][44] = -16,
+ [0][0][RTW89_ETSI][44] = 32,
+ [0][0][RTW89_FCC][45] = -16,
+ [0][0][RTW89_ETSI][45] = 127,
+ [0][0][RTW89_FCC][47] = -18,
+ [0][0][RTW89_ETSI][47] = 127,
+ [0][0][RTW89_FCC][49] = -18,
+ [0][0][RTW89_ETSI][49] = 127,
+ [0][0][RTW89_FCC][51] = -18,
+ [0][0][RTW89_ETSI][51] = 127,
+ [0][0][RTW89_FCC][53] = -16,
+ [0][0][RTW89_ETSI][53] = 127,
+ [0][0][RTW89_FCC][55] = -18,
+ [0][0][RTW89_ETSI][55] = 127,
+ [0][0][RTW89_FCC][57] = -18,
+ [0][0][RTW89_ETSI][57] = 127,
+ [0][0][RTW89_FCC][59] = -18,
+ [0][0][RTW89_ETSI][59] = 127,
+ [0][0][RTW89_FCC][60] = -18,
+ [0][0][RTW89_ETSI][60] = 127,
+ [0][0][RTW89_FCC][62] = -18,
+ [0][0][RTW89_ETSI][62] = 127,
+ [0][0][RTW89_FCC][64] = -18,
+ [0][0][RTW89_ETSI][64] = 127,
+ [0][0][RTW89_FCC][66] = -18,
+ [0][0][RTW89_ETSI][66] = 127,
+ [0][0][RTW89_FCC][68] = -18,
+ [0][0][RTW89_ETSI][68] = 127,
+ [0][0][RTW89_FCC][70] = -16,
+ [0][0][RTW89_ETSI][70] = 127,
+ [0][0][RTW89_FCC][72] = -18,
+ [0][0][RTW89_ETSI][72] = 127,
+ [0][0][RTW89_FCC][74] = -18,
+ [0][0][RTW89_ETSI][74] = 127,
+ [0][0][RTW89_FCC][75] = -18,
+ [0][0][RTW89_ETSI][75] = 127,
+ [0][0][RTW89_FCC][77] = -18,
+ [0][0][RTW89_ETSI][77] = 127,
+ [0][0][RTW89_FCC][79] = -18,
+ [0][0][RTW89_ETSI][79] = 127,
+ [0][0][RTW89_FCC][81] = -18,
+ [0][0][RTW89_ETSI][81] = 127,
+ [0][0][RTW89_FCC][83] = -18,
+ [0][0][RTW89_ETSI][83] = 127,
+ [0][0][RTW89_FCC][85] = -18,
+ [0][0][RTW89_ETSI][85] = 127,
+ [0][0][RTW89_FCC][87] = -16,
+ [0][0][RTW89_ETSI][87] = 127,
+ [0][0][RTW89_FCC][89] = -16,
+ [0][0][RTW89_ETSI][89] = 127,
+ [0][0][RTW89_FCC][90] = -16,
+ [0][0][RTW89_ETSI][90] = 127,
+ [0][0][RTW89_FCC][92] = -16,
+ [0][0][RTW89_ETSI][92] = 127,
+ [0][0][RTW89_FCC][94] = -16,
+ [0][0][RTW89_ETSI][94] = 127,
+ [0][0][RTW89_FCC][96] = -16,
+ [0][0][RTW89_ETSI][96] = 127,
+ [0][0][RTW89_FCC][98] = -16,
+ [0][0][RTW89_ETSI][98] = 127,
+ [0][0][RTW89_FCC][100] = -16,
+ [0][0][RTW89_ETSI][100] = 127,
+ [0][0][RTW89_FCC][102] = -16,
+ [0][0][RTW89_ETSI][102] = 127,
+ [0][0][RTW89_FCC][104] = -16,
+ [0][0][RTW89_ETSI][104] = 127,
+ [0][0][RTW89_FCC][105] = -16,
+ [0][0][RTW89_ETSI][105] = 127,
+ [0][0][RTW89_FCC][107] = -12,
+ [0][0][RTW89_ETSI][107] = 127,
+ [0][0][RTW89_FCC][109] = -12,
+ [0][0][RTW89_ETSI][109] = 127,
[0][0][RTW89_FCC][111] = 127,
+ [0][0][RTW89_ETSI][111] = 127,
[0][0][RTW89_FCC][113] = 127,
+ [0][0][RTW89_ETSI][113] = 127,
[0][0][RTW89_FCC][115] = 127,
+ [0][0][RTW89_ETSI][115] = 127,
[0][0][RTW89_FCC][117] = 127,
+ [0][0][RTW89_ETSI][117] = 127,
[0][0][RTW89_FCC][119] = 127,
- [0][1][RTW89_FCC][0] = 76,
- [0][1][RTW89_FCC][2] = 76,
- [0][1][RTW89_FCC][4] = 76,
- [0][1][RTW89_FCC][6] = 76,
- [0][1][RTW89_FCC][8] = 76,
- [0][1][RTW89_FCC][10] = 76,
- [0][1][RTW89_FCC][12] = 76,
- [0][1][RTW89_FCC][14] = 76,
- [0][1][RTW89_FCC][15] = 76,
- [0][1][RTW89_FCC][17] = 76,
- [0][1][RTW89_FCC][19] = 76,
- [0][1][RTW89_FCC][21] = 76,
- [0][1][RTW89_FCC][23] = 76,
- [0][1][RTW89_FCC][25] = 76,
- [0][1][RTW89_FCC][27] = 76,
- [0][1][RTW89_FCC][29] = 76,
- [0][1][RTW89_FCC][30] = 76,
- [0][1][RTW89_FCC][32] = 76,
- [0][1][RTW89_FCC][34] = 76,
- [0][1][RTW89_FCC][36] = 76,
- [0][1][RTW89_FCC][38] = 76,
- [0][1][RTW89_FCC][40] = 76,
- [0][1][RTW89_FCC][42] = 76,
- [0][1][RTW89_FCC][44] = 76,
- [0][1][RTW89_FCC][45] = 76,
- [0][1][RTW89_FCC][47] = 76,
- [0][1][RTW89_FCC][49] = 76,
- [0][1][RTW89_FCC][51] = 76,
- [0][1][RTW89_FCC][53] = 76,
- [0][1][RTW89_FCC][55] = 76,
- [0][1][RTW89_FCC][57] = 76,
- [0][1][RTW89_FCC][59] = 76,
- [0][1][RTW89_FCC][60] = 76,
- [0][1][RTW89_FCC][62] = 76,
- [0][1][RTW89_FCC][64] = 76,
- [0][1][RTW89_FCC][66] = 76,
- [0][1][RTW89_FCC][68] = 76,
- [0][1][RTW89_FCC][70] = 76,
- [0][1][RTW89_FCC][72] = 76,
- [0][1][RTW89_FCC][74] = 76,
- [0][1][RTW89_FCC][75] = 76,
- [0][1][RTW89_FCC][77] = 76,
- [0][1][RTW89_FCC][79] = 76,
- [0][1][RTW89_FCC][81] = 76,
- [0][1][RTW89_FCC][83] = 76,
- [0][1][RTW89_FCC][85] = 76,
- [0][1][RTW89_FCC][87] = 76,
- [0][1][RTW89_FCC][89] = 76,
- [0][1][RTW89_FCC][90] = 76,
- [0][1][RTW89_FCC][92] = 76,
- [0][1][RTW89_FCC][94] = 76,
- [0][1][RTW89_FCC][96] = 76,
- [0][1][RTW89_FCC][98] = 76,
- [0][1][RTW89_FCC][100] = 76,
- [0][1][RTW89_FCC][102] = 76,
- [0][1][RTW89_FCC][104] = 76,
- [0][1][RTW89_FCC][105] = 76,
- [0][1][RTW89_FCC][107] = 76,
- [0][1][RTW89_FCC][109] = 76,
+ [0][0][RTW89_ETSI][119] = 127,
+ [0][1][RTW89_FCC][0] = -40,
+ [0][1][RTW89_ETSI][0] = 20,
+ [0][1][RTW89_FCC][2] = -40,
+ [0][1][RTW89_ETSI][2] = 20,
+ [0][1][RTW89_FCC][4] = -40,
+ [0][1][RTW89_ETSI][4] = 20,
+ [0][1][RTW89_FCC][6] = -40,
+ [0][1][RTW89_ETSI][6] = 20,
+ [0][1][RTW89_FCC][8] = -40,
+ [0][1][RTW89_ETSI][8] = 20,
+ [0][1][RTW89_FCC][10] = -40,
+ [0][1][RTW89_ETSI][10] = 20,
+ [0][1][RTW89_FCC][12] = -40,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_FCC][14] = -40,
+ [0][1][RTW89_ETSI][14] = 20,
+ [0][1][RTW89_FCC][15] = -40,
+ [0][1][RTW89_ETSI][15] = 20,
+ [0][1][RTW89_FCC][17] = -40,
+ [0][1][RTW89_ETSI][17] = 20,
+ [0][1][RTW89_FCC][19] = -40,
+ [0][1][RTW89_ETSI][19] = 20,
+ [0][1][RTW89_FCC][21] = -40,
+ [0][1][RTW89_ETSI][21] = 20,
+ [0][1][RTW89_FCC][23] = -40,
+ [0][1][RTW89_ETSI][23] = 20,
+ [0][1][RTW89_FCC][25] = -40,
+ [0][1][RTW89_ETSI][25] = 20,
+ [0][1][RTW89_FCC][27] = -40,
+ [0][1][RTW89_ETSI][27] = 20,
+ [0][1][RTW89_FCC][29] = -40,
+ [0][1][RTW89_ETSI][29] = 20,
+ [0][1][RTW89_FCC][30] = -40,
+ [0][1][RTW89_ETSI][30] = 20,
+ [0][1][RTW89_FCC][32] = -40,
+ [0][1][RTW89_ETSI][32] = 20,
+ [0][1][RTW89_FCC][34] = -40,
+ [0][1][RTW89_ETSI][34] = 20,
+ [0][1][RTW89_FCC][36] = -40,
+ [0][1][RTW89_ETSI][36] = 20,
+ [0][1][RTW89_FCC][38] = -40,
+ [0][1][RTW89_ETSI][38] = 20,
+ [0][1][RTW89_FCC][40] = -40,
+ [0][1][RTW89_ETSI][40] = 20,
+ [0][1][RTW89_FCC][42] = -40,
+ [0][1][RTW89_ETSI][42] = 20,
+ [0][1][RTW89_FCC][44] = -40,
+ [0][1][RTW89_ETSI][44] = 20,
+ [0][1][RTW89_FCC][45] = -40,
+ [0][1][RTW89_ETSI][45] = 127,
+ [0][1][RTW89_FCC][47] = -40,
+ [0][1][RTW89_ETSI][47] = 127,
+ [0][1][RTW89_FCC][49] = -40,
+ [0][1][RTW89_ETSI][49] = 127,
+ [0][1][RTW89_FCC][51] = -40,
+ [0][1][RTW89_ETSI][51] = 127,
+ [0][1][RTW89_FCC][53] = -40,
+ [0][1][RTW89_ETSI][53] = 127,
+ [0][1][RTW89_FCC][55] = -40,
+ [0][1][RTW89_ETSI][55] = 127,
+ [0][1][RTW89_FCC][57] = -40,
+ [0][1][RTW89_ETSI][57] = 127,
+ [0][1][RTW89_FCC][59] = -40,
+ [0][1][RTW89_ETSI][59] = 127,
+ [0][1][RTW89_FCC][60] = -40,
+ [0][1][RTW89_ETSI][60] = 127,
+ [0][1][RTW89_FCC][62] = -40,
+ [0][1][RTW89_ETSI][62] = 127,
+ [0][1][RTW89_FCC][64] = -40,
+ [0][1][RTW89_ETSI][64] = 127,
+ [0][1][RTW89_FCC][66] = -40,
+ [0][1][RTW89_ETSI][66] = 127,
+ [0][1][RTW89_FCC][68] = -40,
+ [0][1][RTW89_ETSI][68] = 127,
+ [0][1][RTW89_FCC][70] = -38,
+ [0][1][RTW89_ETSI][70] = 127,
+ [0][1][RTW89_FCC][72] = -38,
+ [0][1][RTW89_ETSI][72] = 127,
+ [0][1][RTW89_FCC][74] = -38,
+ [0][1][RTW89_ETSI][74] = 127,
+ [0][1][RTW89_FCC][75] = -38,
+ [0][1][RTW89_ETSI][75] = 127,
+ [0][1][RTW89_FCC][77] = -38,
+ [0][1][RTW89_ETSI][77] = 127,
+ [0][1][RTW89_FCC][79] = -38,
+ [0][1][RTW89_ETSI][79] = 127,
+ [0][1][RTW89_FCC][81] = -38,
+ [0][1][RTW89_ETSI][81] = 127,
+ [0][1][RTW89_FCC][83] = -38,
+ [0][1][RTW89_ETSI][83] = 127,
+ [0][1][RTW89_FCC][85] = -38,
+ [0][1][RTW89_ETSI][85] = 127,
+ [0][1][RTW89_FCC][87] = -40,
+ [0][1][RTW89_ETSI][87] = 127,
+ [0][1][RTW89_FCC][89] = -38,
+ [0][1][RTW89_ETSI][89] = 127,
+ [0][1][RTW89_FCC][90] = -38,
+ [0][1][RTW89_ETSI][90] = 127,
+ [0][1][RTW89_FCC][92] = -38,
+ [0][1][RTW89_ETSI][92] = 127,
+ [0][1][RTW89_FCC][94] = -38,
+ [0][1][RTW89_ETSI][94] = 127,
+ [0][1][RTW89_FCC][96] = -38,
+ [0][1][RTW89_ETSI][96] = 127,
+ [0][1][RTW89_FCC][98] = -38,
+ [0][1][RTW89_ETSI][98] = 127,
+ [0][1][RTW89_FCC][100] = -38,
+ [0][1][RTW89_ETSI][100] = 127,
+ [0][1][RTW89_FCC][102] = -38,
+ [0][1][RTW89_ETSI][102] = 127,
+ [0][1][RTW89_FCC][104] = -38,
+ [0][1][RTW89_ETSI][104] = 127,
+ [0][1][RTW89_FCC][105] = -38,
+ [0][1][RTW89_ETSI][105] = 127,
+ [0][1][RTW89_FCC][107] = -34,
+ [0][1][RTW89_ETSI][107] = 127,
+ [0][1][RTW89_FCC][109] = -34,
+ [0][1][RTW89_ETSI][109] = 127,
[0][1][RTW89_FCC][111] = 127,
+ [0][1][RTW89_ETSI][111] = 127,
[0][1][RTW89_FCC][113] = 127,
+ [0][1][RTW89_ETSI][113] = 127,
[0][1][RTW89_FCC][115] = 127,
+ [0][1][RTW89_ETSI][115] = 127,
[0][1][RTW89_FCC][117] = 127,
+ [0][1][RTW89_ETSI][117] = 127,
[0][1][RTW89_FCC][119] = 127,
- [1][0][RTW89_FCC][0] = 76,
- [1][0][RTW89_FCC][2] = 76,
- [1][0][RTW89_FCC][4] = 76,
- [1][0][RTW89_FCC][6] = 76,
- [1][0][RTW89_FCC][8] = 76,
- [1][0][RTW89_FCC][10] = 76,
- [1][0][RTW89_FCC][12] = 76,
- [1][0][RTW89_FCC][14] = 76,
- [1][0][RTW89_FCC][15] = 76,
- [1][0][RTW89_FCC][17] = 76,
- [1][0][RTW89_FCC][19] = 76,
- [1][0][RTW89_FCC][21] = 76,
- [1][0][RTW89_FCC][23] = 76,
- [1][0][RTW89_FCC][25] = 76,
- [1][0][RTW89_FCC][27] = 76,
- [1][0][RTW89_FCC][29] = 76,
- [1][0][RTW89_FCC][30] = 76,
- [1][0][RTW89_FCC][32] = 76,
- [1][0][RTW89_FCC][34] = 76,
- [1][0][RTW89_FCC][36] = 76,
- [1][0][RTW89_FCC][38] = 76,
- [1][0][RTW89_FCC][40] = 76,
- [1][0][RTW89_FCC][42] = 76,
- [1][0][RTW89_FCC][44] = 76,
- [1][0][RTW89_FCC][45] = 76,
- [1][0][RTW89_FCC][47] = 76,
- [1][0][RTW89_FCC][49] = 76,
- [1][0][RTW89_FCC][51] = 76,
- [1][0][RTW89_FCC][53] = 76,
- [1][0][RTW89_FCC][55] = 76,
- [1][0][RTW89_FCC][57] = 76,
- [1][0][RTW89_FCC][59] = 76,
- [1][0][RTW89_FCC][60] = 76,
- [1][0][RTW89_FCC][62] = 76,
- [1][0][RTW89_FCC][64] = 76,
- [1][0][RTW89_FCC][66] = 76,
- [1][0][RTW89_FCC][68] = 76,
- [1][0][RTW89_FCC][70] = 76,
- [1][0][RTW89_FCC][72] = 76,
- [1][0][RTW89_FCC][74] = 76,
- [1][0][RTW89_FCC][75] = 76,
- [1][0][RTW89_FCC][77] = 76,
- [1][0][RTW89_FCC][79] = 76,
- [1][0][RTW89_FCC][81] = 76,
- [1][0][RTW89_FCC][83] = 76,
- [1][0][RTW89_FCC][85] = 76,
- [1][0][RTW89_FCC][87] = 76,
- [1][0][RTW89_FCC][89] = 76,
- [1][0][RTW89_FCC][90] = 76,
- [1][0][RTW89_FCC][92] = 76,
- [1][0][RTW89_FCC][94] = 76,
- [1][0][RTW89_FCC][96] = 76,
- [1][0][RTW89_FCC][98] = 76,
- [1][0][RTW89_FCC][100] = 76,
- [1][0][RTW89_FCC][102] = 76,
- [1][0][RTW89_FCC][104] = 76,
- [1][0][RTW89_FCC][105] = 76,
- [1][0][RTW89_FCC][107] = 76,
- [1][0][RTW89_FCC][109] = 76,
+ [0][1][RTW89_ETSI][119] = 127,
+ [1][0][RTW89_FCC][0] = -4,
+ [1][0][RTW89_ETSI][0] = 46,
+ [1][0][RTW89_FCC][2] = -4,
+ [1][0][RTW89_ETSI][2] = 46,
+ [1][0][RTW89_FCC][4] = -4,
+ [1][0][RTW89_ETSI][4] = 46,
+ [1][0][RTW89_FCC][6] = -4,
+ [1][0][RTW89_ETSI][6] = 46,
+ [1][0][RTW89_FCC][8] = -4,
+ [1][0][RTW89_ETSI][8] = 46,
+ [1][0][RTW89_FCC][10] = -4,
+ [1][0][RTW89_ETSI][10] = 46,
+ [1][0][RTW89_FCC][12] = -4,
+ [1][0][RTW89_ETSI][12] = 46,
+ [1][0][RTW89_FCC][14] = -4,
+ [1][0][RTW89_ETSI][14] = 46,
+ [1][0][RTW89_FCC][15] = -4,
+ [1][0][RTW89_ETSI][15] = 46,
+ [1][0][RTW89_FCC][17] = -4,
+ [1][0][RTW89_ETSI][17] = 46,
+ [1][0][RTW89_FCC][19] = -4,
+ [1][0][RTW89_ETSI][19] = 46,
+ [1][0][RTW89_FCC][21] = -4,
+ [1][0][RTW89_ETSI][21] = 46,
+ [1][0][RTW89_FCC][23] = -4,
+ [1][0][RTW89_ETSI][23] = 46,
+ [1][0][RTW89_FCC][25] = -4,
+ [1][0][RTW89_ETSI][25] = 46,
+ [1][0][RTW89_FCC][27] = -4,
+ [1][0][RTW89_ETSI][27] = 46,
+ [1][0][RTW89_FCC][29] = -4,
+ [1][0][RTW89_ETSI][29] = 46,
+ [1][0][RTW89_FCC][30] = -4,
+ [1][0][RTW89_ETSI][30] = 46,
+ [1][0][RTW89_FCC][32] = -4,
+ [1][0][RTW89_ETSI][32] = 46,
+ [1][0][RTW89_FCC][34] = -4,
+ [1][0][RTW89_ETSI][34] = 46,
+ [1][0][RTW89_FCC][36] = -4,
+ [1][0][RTW89_ETSI][36] = 46,
+ [1][0][RTW89_FCC][38] = -4,
+ [1][0][RTW89_ETSI][38] = 46,
+ [1][0][RTW89_FCC][40] = -4,
+ [1][0][RTW89_ETSI][40] = 46,
+ [1][0][RTW89_FCC][42] = -4,
+ [1][0][RTW89_ETSI][42] = 46,
+ [1][0][RTW89_FCC][44] = -4,
+ [1][0][RTW89_ETSI][44] = 46,
+ [1][0][RTW89_FCC][45] = -4,
+ [1][0][RTW89_ETSI][45] = 127,
+ [1][0][RTW89_FCC][47] = -4,
+ [1][0][RTW89_ETSI][47] = 127,
+ [1][0][RTW89_FCC][49] = -4,
+ [1][0][RTW89_ETSI][49] = 127,
+ [1][0][RTW89_FCC][51] = -4,
+ [1][0][RTW89_ETSI][51] = 127,
+ [1][0][RTW89_FCC][53] = -4,
+ [1][0][RTW89_ETSI][53] = 127,
+ [1][0][RTW89_FCC][55] = -4,
+ [1][0][RTW89_ETSI][55] = 127,
+ [1][0][RTW89_FCC][57] = -4,
+ [1][0][RTW89_ETSI][57] = 127,
+ [1][0][RTW89_FCC][59] = -4,
+ [1][0][RTW89_ETSI][59] = 127,
+ [1][0][RTW89_FCC][60] = -4,
+ [1][0][RTW89_ETSI][60] = 127,
+ [1][0][RTW89_FCC][62] = -4,
+ [1][0][RTW89_ETSI][62] = 127,
+ [1][0][RTW89_FCC][64] = -4,
+ [1][0][RTW89_ETSI][64] = 127,
+ [1][0][RTW89_FCC][66] = -4,
+ [1][0][RTW89_ETSI][66] = 127,
+ [1][0][RTW89_FCC][68] = -4,
+ [1][0][RTW89_ETSI][68] = 127,
+ [1][0][RTW89_FCC][70] = -4,
+ [1][0][RTW89_ETSI][70] = 127,
+ [1][0][RTW89_FCC][72] = -4,
+ [1][0][RTW89_ETSI][72] = 127,
+ [1][0][RTW89_FCC][74] = -4,
+ [1][0][RTW89_ETSI][74] = 127,
+ [1][0][RTW89_FCC][75] = -4,
+ [1][0][RTW89_ETSI][75] = 127,
+ [1][0][RTW89_FCC][77] = -4,
+ [1][0][RTW89_ETSI][77] = 127,
+ [1][0][RTW89_FCC][79] = -4,
+ [1][0][RTW89_ETSI][79] = 127,
+ [1][0][RTW89_FCC][81] = -4,
+ [1][0][RTW89_ETSI][81] = 127,
+ [1][0][RTW89_FCC][83] = -4,
+ [1][0][RTW89_ETSI][83] = 127,
+ [1][0][RTW89_FCC][85] = -4,
+ [1][0][RTW89_ETSI][85] = 127,
+ [1][0][RTW89_FCC][87] = -4,
+ [1][0][RTW89_ETSI][87] = 127,
+ [1][0][RTW89_FCC][89] = -4,
+ [1][0][RTW89_ETSI][89] = 127,
+ [1][0][RTW89_FCC][90] = -4,
+ [1][0][RTW89_ETSI][90] = 127,
+ [1][0][RTW89_FCC][92] = -4,
+ [1][0][RTW89_ETSI][92] = 127,
+ [1][0][RTW89_FCC][94] = -4,
+ [1][0][RTW89_ETSI][94] = 127,
+ [1][0][RTW89_FCC][96] = -4,
+ [1][0][RTW89_ETSI][96] = 127,
+ [1][0][RTW89_FCC][98] = -4,
+ [1][0][RTW89_ETSI][98] = 127,
+ [1][0][RTW89_FCC][100] = -4,
+ [1][0][RTW89_ETSI][100] = 127,
+ [1][0][RTW89_FCC][102] = -4,
+ [1][0][RTW89_ETSI][102] = 127,
+ [1][0][RTW89_FCC][104] = -4,
+ [1][0][RTW89_ETSI][104] = 127,
+ [1][0][RTW89_FCC][105] = -4,
+ [1][0][RTW89_ETSI][105] = 127,
+ [1][0][RTW89_FCC][107] = 0,
+ [1][0][RTW89_ETSI][107] = 127,
+ [1][0][RTW89_FCC][109] = 2,
+ [1][0][RTW89_ETSI][109] = 127,
[1][0][RTW89_FCC][111] = 127,
+ [1][0][RTW89_ETSI][111] = 127,
[1][0][RTW89_FCC][113] = 127,
+ [1][0][RTW89_ETSI][113] = 127,
[1][0][RTW89_FCC][115] = 127,
+ [1][0][RTW89_ETSI][115] = 127,
[1][0][RTW89_FCC][117] = 127,
+ [1][0][RTW89_ETSI][117] = 127,
[1][0][RTW89_FCC][119] = 127,
- [1][1][RTW89_FCC][0] = 76,
- [1][1][RTW89_FCC][2] = 76,
- [1][1][RTW89_FCC][4] = 76,
- [1][1][RTW89_FCC][6] = 76,
- [1][1][RTW89_FCC][8] = 76,
- [1][1][RTW89_FCC][10] = 76,
- [1][1][RTW89_FCC][12] = 76,
- [1][1][RTW89_FCC][14] = 76,
- [1][1][RTW89_FCC][15] = 76,
- [1][1][RTW89_FCC][17] = 76,
- [1][1][RTW89_FCC][19] = 76,
- [1][1][RTW89_FCC][21] = 76,
- [1][1][RTW89_FCC][23] = 76,
- [1][1][RTW89_FCC][25] = 76,
- [1][1][RTW89_FCC][27] = 76,
- [1][1][RTW89_FCC][29] = 76,
- [1][1][RTW89_FCC][30] = 76,
- [1][1][RTW89_FCC][32] = 76,
- [1][1][RTW89_FCC][34] = 76,
- [1][1][RTW89_FCC][36] = 76,
- [1][1][RTW89_FCC][38] = 76,
- [1][1][RTW89_FCC][40] = 76,
- [1][1][RTW89_FCC][42] = 76,
- [1][1][RTW89_FCC][44] = 76,
- [1][1][RTW89_FCC][45] = 76,
- [1][1][RTW89_FCC][47] = 76,
- [1][1][RTW89_FCC][49] = 76,
- [1][1][RTW89_FCC][51] = 76,
- [1][1][RTW89_FCC][53] = 76,
- [1][1][RTW89_FCC][55] = 76,
- [1][1][RTW89_FCC][57] = 76,
- [1][1][RTW89_FCC][59] = 76,
- [1][1][RTW89_FCC][60] = 76,
- [1][1][RTW89_FCC][62] = 76,
- [1][1][RTW89_FCC][64] = 76,
- [1][1][RTW89_FCC][66] = 76,
- [1][1][RTW89_FCC][68] = 76,
- [1][1][RTW89_FCC][70] = 76,
- [1][1][RTW89_FCC][72] = 76,
- [1][1][RTW89_FCC][74] = 76,
- [1][1][RTW89_FCC][75] = 76,
- [1][1][RTW89_FCC][77] = 76,
- [1][1][RTW89_FCC][79] = 76,
- [1][1][RTW89_FCC][81] = 76,
- [1][1][RTW89_FCC][83] = 76,
- [1][1][RTW89_FCC][85] = 76,
- [1][1][RTW89_FCC][87] = 76,
- [1][1][RTW89_FCC][89] = 76,
- [1][1][RTW89_FCC][90] = 76,
- [1][1][RTW89_FCC][92] = 76,
- [1][1][RTW89_FCC][94] = 76,
- [1][1][RTW89_FCC][96] = 76,
- [1][1][RTW89_FCC][98] = 76,
- [1][1][RTW89_FCC][100] = 76,
- [1][1][RTW89_FCC][102] = 76,
- [1][1][RTW89_FCC][104] = 76,
- [1][1][RTW89_FCC][105] = 76,
- [1][1][RTW89_FCC][107] = 76,
- [1][1][RTW89_FCC][109] = 76,
+ [1][0][RTW89_ETSI][119] = 127,
+ [1][1][RTW89_FCC][0] = -26,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_FCC][2] = -28,
+ [1][1][RTW89_ETSI][2] = 32,
+ [1][1][RTW89_FCC][4] = -28,
+ [1][1][RTW89_ETSI][4] = 32,
+ [1][1][RTW89_FCC][6] = -28,
+ [1][1][RTW89_ETSI][6] = 32,
+ [1][1][RTW89_FCC][8] = -28,
+ [1][1][RTW89_ETSI][8] = 32,
+ [1][1][RTW89_FCC][10] = -28,
+ [1][1][RTW89_ETSI][10] = 32,
+ [1][1][RTW89_FCC][12] = -28,
+ [1][1][RTW89_ETSI][12] = 32,
+ [1][1][RTW89_FCC][14] = -28,
+ [1][1][RTW89_ETSI][14] = 32,
+ [1][1][RTW89_FCC][15] = -28,
+ [1][1][RTW89_ETSI][15] = 32,
+ [1][1][RTW89_FCC][17] = -28,
+ [1][1][RTW89_ETSI][17] = 32,
+ [1][1][RTW89_FCC][19] = -28,
+ [1][1][RTW89_ETSI][19] = 32,
+ [1][1][RTW89_FCC][21] = -28,
+ [1][1][RTW89_ETSI][21] = 32,
+ [1][1][RTW89_FCC][23] = -28,
+ [1][1][RTW89_ETSI][23] = 32,
+ [1][1][RTW89_FCC][25] = -28,
+ [1][1][RTW89_ETSI][25] = 32,
+ [1][1][RTW89_FCC][27] = -28,
+ [1][1][RTW89_ETSI][27] = 32,
+ [1][1][RTW89_FCC][29] = -28,
+ [1][1][RTW89_ETSI][29] = 32,
+ [1][1][RTW89_FCC][30] = -28,
+ [1][1][RTW89_ETSI][30] = 32,
+ [1][1][RTW89_FCC][32] = -28,
+ [1][1][RTW89_ETSI][32] = 32,
+ [1][1][RTW89_FCC][34] = -28,
+ [1][1][RTW89_ETSI][34] = 32,
+ [1][1][RTW89_FCC][36] = -28,
+ [1][1][RTW89_ETSI][36] = 32,
+ [1][1][RTW89_FCC][38] = -28,
+ [1][1][RTW89_ETSI][38] = 32,
+ [1][1][RTW89_FCC][40] = -28,
+ [1][1][RTW89_ETSI][40] = 32,
+ [1][1][RTW89_FCC][42] = -28,
+ [1][1][RTW89_ETSI][42] = 32,
+ [1][1][RTW89_FCC][44] = -28,
+ [1][1][RTW89_ETSI][44] = 34,
+ [1][1][RTW89_FCC][45] = -26,
+ [1][1][RTW89_ETSI][45] = 127,
+ [1][1][RTW89_FCC][47] = -28,
+ [1][1][RTW89_ETSI][47] = 127,
+ [1][1][RTW89_FCC][49] = -28,
+ [1][1][RTW89_ETSI][49] = 127,
+ [1][1][RTW89_FCC][51] = -28,
+ [1][1][RTW89_ETSI][51] = 127,
+ [1][1][RTW89_FCC][53] = -26,
+ [1][1][RTW89_ETSI][53] = 127,
+ [1][1][RTW89_FCC][55] = -28,
+ [1][1][RTW89_ETSI][55] = 127,
+ [1][1][RTW89_FCC][57] = -28,
+ [1][1][RTW89_ETSI][57] = 127,
+ [1][1][RTW89_FCC][59] = -28,
+ [1][1][RTW89_ETSI][59] = 127,
+ [1][1][RTW89_FCC][60] = -28,
+ [1][1][RTW89_ETSI][60] = 127,
+ [1][1][RTW89_FCC][62] = -28,
+ [1][1][RTW89_ETSI][62] = 127,
+ [1][1][RTW89_FCC][64] = -28,
+ [1][1][RTW89_ETSI][64] = 127,
+ [1][1][RTW89_FCC][66] = -28,
+ [1][1][RTW89_ETSI][66] = 127,
+ [1][1][RTW89_FCC][68] = -28,
+ [1][1][RTW89_ETSI][68] = 127,
+ [1][1][RTW89_FCC][70] = -26,
+ [1][1][RTW89_ETSI][70] = 127,
+ [1][1][RTW89_FCC][72] = -28,
+ [1][1][RTW89_ETSI][72] = 127,
+ [1][1][RTW89_FCC][74] = -28,
+ [1][1][RTW89_ETSI][74] = 127,
+ [1][1][RTW89_FCC][75] = -28,
+ [1][1][RTW89_ETSI][75] = 127,
+ [1][1][RTW89_FCC][77] = -28,
+ [1][1][RTW89_ETSI][77] = 127,
+ [1][1][RTW89_FCC][79] = -28,
+ [1][1][RTW89_ETSI][79] = 127,
+ [1][1][RTW89_FCC][81] = -28,
+ [1][1][RTW89_ETSI][81] = 127,
+ [1][1][RTW89_FCC][83] = -28,
+ [1][1][RTW89_ETSI][83] = 127,
+ [1][1][RTW89_FCC][85] = -28,
+ [1][1][RTW89_ETSI][85] = 127,
+ [1][1][RTW89_FCC][87] = -28,
+ [1][1][RTW89_ETSI][87] = 127,
+ [1][1][RTW89_FCC][89] = -26,
+ [1][1][RTW89_ETSI][89] = 127,
+ [1][1][RTW89_FCC][90] = -26,
+ [1][1][RTW89_ETSI][90] = 127,
+ [1][1][RTW89_FCC][92] = -26,
+ [1][1][RTW89_ETSI][92] = 127,
+ [1][1][RTW89_FCC][94] = -26,
+ [1][1][RTW89_ETSI][94] = 127,
+ [1][1][RTW89_FCC][96] = -26,
+ [1][1][RTW89_ETSI][96] = 127,
+ [1][1][RTW89_FCC][98] = -26,
+ [1][1][RTW89_ETSI][98] = 127,
+ [1][1][RTW89_FCC][100] = -26,
+ [1][1][RTW89_ETSI][100] = 127,
+ [1][1][RTW89_FCC][102] = -26,
+ [1][1][RTW89_ETSI][102] = 127,
+ [1][1][RTW89_FCC][104] = -26,
+ [1][1][RTW89_ETSI][104] = 127,
+ [1][1][RTW89_FCC][105] = -26,
+ [1][1][RTW89_ETSI][105] = 127,
+ [1][1][RTW89_FCC][107] = -22,
+ [1][1][RTW89_ETSI][107] = 127,
+ [1][1][RTW89_FCC][109] = -22,
+ [1][1][RTW89_ETSI][109] = 127,
[1][1][RTW89_FCC][111] = 127,
+ [1][1][RTW89_ETSI][111] = 127,
[1][1][RTW89_FCC][113] = 127,
+ [1][1][RTW89_ETSI][113] = 127,
[1][1][RTW89_FCC][115] = 127,
+ [1][1][RTW89_ETSI][115] = 127,
[1][1][RTW89_FCC][117] = 127,
+ [1][1][RTW89_ETSI][117] = 127,
[1][1][RTW89_FCC][119] = 127,
- [2][0][RTW89_FCC][0] = 76,
- [2][0][RTW89_FCC][2] = 76,
- [2][0][RTW89_FCC][4] = 76,
- [2][0][RTW89_FCC][6] = 76,
- [2][0][RTW89_FCC][8] = 76,
- [2][0][RTW89_FCC][10] = 76,
- [2][0][RTW89_FCC][12] = 76,
- [2][0][RTW89_FCC][14] = 76,
- [2][0][RTW89_FCC][15] = 76,
- [2][0][RTW89_FCC][17] = 76,
- [2][0][RTW89_FCC][19] = 76,
- [2][0][RTW89_FCC][21] = 76,
- [2][0][RTW89_FCC][23] = 76,
- [2][0][RTW89_FCC][25] = 76,
- [2][0][RTW89_FCC][27] = 76,
- [2][0][RTW89_FCC][29] = 76,
- [2][0][RTW89_FCC][30] = 76,
- [2][0][RTW89_FCC][32] = 76,
- [2][0][RTW89_FCC][34] = 76,
- [2][0][RTW89_FCC][36] = 76,
- [2][0][RTW89_FCC][38] = 76,
- [2][0][RTW89_FCC][40] = 76,
- [2][0][RTW89_FCC][42] = 76,
- [2][0][RTW89_FCC][44] = 76,
- [2][0][RTW89_FCC][45] = 76,
- [2][0][RTW89_FCC][47] = 76,
- [2][0][RTW89_FCC][49] = 76,
- [2][0][RTW89_FCC][51] = 76,
- [2][0][RTW89_FCC][53] = 76,
- [2][0][RTW89_FCC][55] = 76,
- [2][0][RTW89_FCC][57] = 76,
- [2][0][RTW89_FCC][59] = 76,
- [2][0][RTW89_FCC][60] = 76,
- [2][0][RTW89_FCC][62] = 76,
- [2][0][RTW89_FCC][64] = 76,
- [2][0][RTW89_FCC][66] = 76,
- [2][0][RTW89_FCC][68] = 76,
- [2][0][RTW89_FCC][70] = 76,
- [2][0][RTW89_FCC][72] = 76,
- [2][0][RTW89_FCC][74] = 76,
- [2][0][RTW89_FCC][75] = 76,
- [2][0][RTW89_FCC][77] = 76,
- [2][0][RTW89_FCC][79] = 76,
- [2][0][RTW89_FCC][81] = 76,
- [2][0][RTW89_FCC][83] = 76,
- [2][0][RTW89_FCC][85] = 76,
- [2][0][RTW89_FCC][87] = 76,
- [2][0][RTW89_FCC][89] = 76,
- [2][0][RTW89_FCC][90] = 76,
- [2][0][RTW89_FCC][92] = 76,
- [2][0][RTW89_FCC][94] = 76,
- [2][0][RTW89_FCC][96] = 76,
- [2][0][RTW89_FCC][98] = 76,
- [2][0][RTW89_FCC][100] = 76,
- [2][0][RTW89_FCC][102] = 76,
- [2][0][RTW89_FCC][104] = 76,
- [2][0][RTW89_FCC][105] = 76,
- [2][0][RTW89_FCC][107] = 76,
- [2][0][RTW89_FCC][109] = 76,
+ [1][1][RTW89_ETSI][119] = 127,
+ [2][0][RTW89_FCC][0] = 8,
+ [2][0][RTW89_ETSI][0] = 56,
+ [2][0][RTW89_FCC][2] = 8,
+ [2][0][RTW89_ETSI][2] = 56,
+ [2][0][RTW89_FCC][4] = 8,
+ [2][0][RTW89_ETSI][4] = 56,
+ [2][0][RTW89_FCC][6] = 8,
+ [2][0][RTW89_ETSI][6] = 56,
+ [2][0][RTW89_FCC][8] = 8,
+ [2][0][RTW89_ETSI][8] = 56,
+ [2][0][RTW89_FCC][10] = 8,
+ [2][0][RTW89_ETSI][10] = 56,
+ [2][0][RTW89_FCC][12] = 8,
+ [2][0][RTW89_ETSI][12] = 56,
+ [2][0][RTW89_FCC][14] = 8,
+ [2][0][RTW89_ETSI][14] = 56,
+ [2][0][RTW89_FCC][15] = 8,
+ [2][0][RTW89_ETSI][15] = 56,
+ [2][0][RTW89_FCC][17] = 8,
+ [2][0][RTW89_ETSI][17] = 56,
+ [2][0][RTW89_FCC][19] = 8,
+ [2][0][RTW89_ETSI][19] = 56,
+ [2][0][RTW89_FCC][21] = 8,
+ [2][0][RTW89_ETSI][21] = 56,
+ [2][0][RTW89_FCC][23] = 8,
+ [2][0][RTW89_ETSI][23] = 56,
+ [2][0][RTW89_FCC][25] = 8,
+ [2][0][RTW89_ETSI][25] = 56,
+ [2][0][RTW89_FCC][27] = 8,
+ [2][0][RTW89_ETSI][27] = 56,
+ [2][0][RTW89_FCC][29] = 8,
+ [2][0][RTW89_ETSI][29] = 56,
+ [2][0][RTW89_FCC][30] = 8,
+ [2][0][RTW89_ETSI][30] = 56,
+ [2][0][RTW89_FCC][32] = 8,
+ [2][0][RTW89_ETSI][32] = 56,
+ [2][0][RTW89_FCC][34] = 8,
+ [2][0][RTW89_ETSI][34] = 56,
+ [2][0][RTW89_FCC][36] = 8,
+ [2][0][RTW89_ETSI][36] = 56,
+ [2][0][RTW89_FCC][38] = 8,
+ [2][0][RTW89_ETSI][38] = 56,
+ [2][0][RTW89_FCC][40] = 8,
+ [2][0][RTW89_ETSI][40] = 56,
+ [2][0][RTW89_FCC][42] = 8,
+ [2][0][RTW89_ETSI][42] = 56,
+ [2][0][RTW89_FCC][44] = 8,
+ [2][0][RTW89_ETSI][44] = 56,
+ [2][0][RTW89_FCC][45] = 8,
+ [2][0][RTW89_ETSI][45] = 127,
+ [2][0][RTW89_FCC][47] = 8,
+ [2][0][RTW89_ETSI][47] = 127,
+ [2][0][RTW89_FCC][49] = 8,
+ [2][0][RTW89_ETSI][49] = 127,
+ [2][0][RTW89_FCC][51] = 8,
+ [2][0][RTW89_ETSI][51] = 127,
+ [2][0][RTW89_FCC][53] = 8,
+ [2][0][RTW89_ETSI][53] = 127,
+ [2][0][RTW89_FCC][55] = 8,
+ [2][0][RTW89_ETSI][55] = 127,
+ [2][0][RTW89_FCC][57] = 8,
+ [2][0][RTW89_ETSI][57] = 127,
+ [2][0][RTW89_FCC][59] = 8,
+ [2][0][RTW89_ETSI][59] = 127,
+ [2][0][RTW89_FCC][60] = 8,
+ [2][0][RTW89_ETSI][60] = 127,
+ [2][0][RTW89_FCC][62] = 8,
+ [2][0][RTW89_ETSI][62] = 127,
+ [2][0][RTW89_FCC][64] = 8,
+ [2][0][RTW89_ETSI][64] = 127,
+ [2][0][RTW89_FCC][66] = 8,
+ [2][0][RTW89_ETSI][66] = 127,
+ [2][0][RTW89_FCC][68] = 8,
+ [2][0][RTW89_ETSI][68] = 127,
+ [2][0][RTW89_FCC][70] = 8,
+ [2][0][RTW89_ETSI][70] = 127,
+ [2][0][RTW89_FCC][72] = 8,
+ [2][0][RTW89_ETSI][72] = 127,
+ [2][0][RTW89_FCC][74] = 8,
+ [2][0][RTW89_ETSI][74] = 127,
+ [2][0][RTW89_FCC][75] = 8,
+ [2][0][RTW89_ETSI][75] = 127,
+ [2][0][RTW89_FCC][77] = 8,
+ [2][0][RTW89_ETSI][77] = 127,
+ [2][0][RTW89_FCC][79] = 8,
+ [2][0][RTW89_ETSI][79] = 127,
+ [2][0][RTW89_FCC][81] = 8,
+ [2][0][RTW89_ETSI][81] = 127,
+ [2][0][RTW89_FCC][83] = 8,
+ [2][0][RTW89_ETSI][83] = 127,
+ [2][0][RTW89_FCC][85] = 8,
+ [2][0][RTW89_ETSI][85] = 127,
+ [2][0][RTW89_FCC][87] = 8,
+ [2][0][RTW89_ETSI][87] = 127,
+ [2][0][RTW89_FCC][89] = 8,
+ [2][0][RTW89_ETSI][89] = 127,
+ [2][0][RTW89_FCC][90] = 8,
+ [2][0][RTW89_ETSI][90] = 127,
+ [2][0][RTW89_FCC][92] = 8,
+ [2][0][RTW89_ETSI][92] = 127,
+ [2][0][RTW89_FCC][94] = 8,
+ [2][0][RTW89_ETSI][94] = 127,
+ [2][0][RTW89_FCC][96] = 8,
+ [2][0][RTW89_ETSI][96] = 127,
+ [2][0][RTW89_FCC][98] = 8,
+ [2][0][RTW89_ETSI][98] = 127,
+ [2][0][RTW89_FCC][100] = 8,
+ [2][0][RTW89_ETSI][100] = 127,
+ [2][0][RTW89_FCC][102] = 8,
+ [2][0][RTW89_ETSI][102] = 127,
+ [2][0][RTW89_FCC][104] = 8,
+ [2][0][RTW89_ETSI][104] = 127,
+ [2][0][RTW89_FCC][105] = 8,
+ [2][0][RTW89_ETSI][105] = 127,
+ [2][0][RTW89_FCC][107] = 10,
+ [2][0][RTW89_ETSI][107] = 127,
+ [2][0][RTW89_FCC][109] = 12,
+ [2][0][RTW89_ETSI][109] = 127,
[2][0][RTW89_FCC][111] = 127,
+ [2][0][RTW89_ETSI][111] = 127,
[2][0][RTW89_FCC][113] = 127,
+ [2][0][RTW89_ETSI][113] = 127,
[2][0][RTW89_FCC][115] = 127,
+ [2][0][RTW89_ETSI][115] = 127,
[2][0][RTW89_FCC][117] = 127,
+ [2][0][RTW89_ETSI][117] = 127,
[2][0][RTW89_FCC][119] = 127,
- [2][1][RTW89_FCC][0] = 76,
- [2][1][RTW89_FCC][2] = 76,
- [2][1][RTW89_FCC][4] = 76,
- [2][1][RTW89_FCC][6] = 76,
- [2][1][RTW89_FCC][8] = 76,
- [2][1][RTW89_FCC][10] = 76,
- [2][1][RTW89_FCC][12] = 76,
- [2][1][RTW89_FCC][14] = 76,
- [2][1][RTW89_FCC][15] = 76,
- [2][1][RTW89_FCC][17] = 76,
- [2][1][RTW89_FCC][19] = 76,
- [2][1][RTW89_FCC][21] = 76,
- [2][1][RTW89_FCC][23] = 76,
- [2][1][RTW89_FCC][25] = 76,
- [2][1][RTW89_FCC][27] = 76,
- [2][1][RTW89_FCC][29] = 76,
- [2][1][RTW89_FCC][30] = 76,
- [2][1][RTW89_FCC][32] = 76,
- [2][1][RTW89_FCC][34] = 76,
- [2][1][RTW89_FCC][36] = 76,
- [2][1][RTW89_FCC][38] = 76,
- [2][1][RTW89_FCC][40] = 76,
- [2][1][RTW89_FCC][42] = 76,
- [2][1][RTW89_FCC][44] = 76,
- [2][1][RTW89_FCC][45] = 76,
- [2][1][RTW89_FCC][47] = 76,
- [2][1][RTW89_FCC][49] = 76,
- [2][1][RTW89_FCC][51] = 76,
- [2][1][RTW89_FCC][53] = 76,
- [2][1][RTW89_FCC][55] = 76,
- [2][1][RTW89_FCC][57] = 76,
- [2][1][RTW89_FCC][59] = 76,
- [2][1][RTW89_FCC][60] = 76,
- [2][1][RTW89_FCC][62] = 76,
- [2][1][RTW89_FCC][64] = 76,
- [2][1][RTW89_FCC][66] = 76,
- [2][1][RTW89_FCC][68] = 76,
- [2][1][RTW89_FCC][70] = 76,
- [2][1][RTW89_FCC][72] = 76,
- [2][1][RTW89_FCC][74] = 76,
- [2][1][RTW89_FCC][75] = 76,
- [2][1][RTW89_FCC][77] = 76,
- [2][1][RTW89_FCC][79] = 76,
- [2][1][RTW89_FCC][81] = 76,
- [2][1][RTW89_FCC][83] = 76,
- [2][1][RTW89_FCC][85] = 76,
- [2][1][RTW89_FCC][87] = 76,
- [2][1][RTW89_FCC][89] = 76,
- [2][1][RTW89_FCC][90] = 76,
- [2][1][RTW89_FCC][92] = 76,
- [2][1][RTW89_FCC][94] = 76,
- [2][1][RTW89_FCC][96] = 76,
- [2][1][RTW89_FCC][98] = 76,
- [2][1][RTW89_FCC][100] = 76,
- [2][1][RTW89_FCC][102] = 76,
- [2][1][RTW89_FCC][104] = 76,
- [2][1][RTW89_FCC][105] = 76,
- [2][1][RTW89_FCC][107] = 76,
- [2][1][RTW89_FCC][109] = 76,
+ [2][0][RTW89_ETSI][119] = 127,
+ [2][1][RTW89_FCC][0] = -16,
+ [2][1][RTW89_ETSI][0] = 44,
+ [2][1][RTW89_FCC][2] = -16,
+ [2][1][RTW89_ETSI][2] = 44,
+ [2][1][RTW89_FCC][4] = -16,
+ [2][1][RTW89_ETSI][4] = 44,
+ [2][1][RTW89_FCC][6] = -16,
+ [2][1][RTW89_ETSI][6] = 44,
+ [2][1][RTW89_FCC][8] = -16,
+ [2][1][RTW89_ETSI][8] = 44,
+ [2][1][RTW89_FCC][10] = -16,
+ [2][1][RTW89_ETSI][10] = 44,
+ [2][1][RTW89_FCC][12] = -16,
+ [2][1][RTW89_ETSI][12] = 44,
+ [2][1][RTW89_FCC][14] = -16,
+ [2][1][RTW89_ETSI][14] = 44,
+ [2][1][RTW89_FCC][15] = -16,
+ [2][1][RTW89_ETSI][15] = 44,
+ [2][1][RTW89_FCC][17] = -16,
+ [2][1][RTW89_ETSI][17] = 44,
+ [2][1][RTW89_FCC][19] = -16,
+ [2][1][RTW89_ETSI][19] = 44,
+ [2][1][RTW89_FCC][21] = -16,
+ [2][1][RTW89_ETSI][21] = 44,
+ [2][1][RTW89_FCC][23] = -16,
+ [2][1][RTW89_ETSI][23] = 44,
+ [2][1][RTW89_FCC][25] = -16,
+ [2][1][RTW89_ETSI][25] = 44,
+ [2][1][RTW89_FCC][27] = -16,
+ [2][1][RTW89_ETSI][27] = 44,
+ [2][1][RTW89_FCC][29] = -16,
+ [2][1][RTW89_ETSI][29] = 44,
+ [2][1][RTW89_FCC][30] = -16,
+ [2][1][RTW89_ETSI][30] = 44,
+ [2][1][RTW89_FCC][32] = -16,
+ [2][1][RTW89_ETSI][32] = 44,
+ [2][1][RTW89_FCC][34] = -16,
+ [2][1][RTW89_ETSI][34] = 44,
+ [2][1][RTW89_FCC][36] = -16,
+ [2][1][RTW89_ETSI][36] = 44,
+ [2][1][RTW89_FCC][38] = -16,
+ [2][1][RTW89_ETSI][38] = 44,
+ [2][1][RTW89_FCC][40] = -16,
+ [2][1][RTW89_ETSI][40] = 44,
+ [2][1][RTW89_FCC][42] = -16,
+ [2][1][RTW89_ETSI][42] = 44,
+ [2][1][RTW89_FCC][44] = -16,
+ [2][1][RTW89_ETSI][44] = 44,
+ [2][1][RTW89_FCC][45] = -16,
+ [2][1][RTW89_ETSI][45] = 127,
+ [2][1][RTW89_FCC][47] = -16,
+ [2][1][RTW89_ETSI][47] = 127,
+ [2][1][RTW89_FCC][49] = -16,
+ [2][1][RTW89_ETSI][49] = 127,
+ [2][1][RTW89_FCC][51] = -16,
+ [2][1][RTW89_ETSI][51] = 127,
+ [2][1][RTW89_FCC][53] = -16,
+ [2][1][RTW89_ETSI][53] = 127,
+ [2][1][RTW89_FCC][55] = -16,
+ [2][1][RTW89_ETSI][55] = 127,
+ [2][1][RTW89_FCC][57] = -16,
+ [2][1][RTW89_ETSI][57] = 127,
+ [2][1][RTW89_FCC][59] = -16,
+ [2][1][RTW89_ETSI][59] = 127,
+ [2][1][RTW89_FCC][60] = -16,
+ [2][1][RTW89_ETSI][60] = 127,
+ [2][1][RTW89_FCC][62] = -16,
+ [2][1][RTW89_ETSI][62] = 127,
+ [2][1][RTW89_FCC][64] = -16,
+ [2][1][RTW89_ETSI][64] = 127,
+ [2][1][RTW89_FCC][66] = -16,
+ [2][1][RTW89_ETSI][66] = 127,
+ [2][1][RTW89_FCC][68] = -16,
+ [2][1][RTW89_ETSI][68] = 127,
+ [2][1][RTW89_FCC][70] = -16,
+ [2][1][RTW89_ETSI][70] = 127,
+ [2][1][RTW89_FCC][72] = -16,
+ [2][1][RTW89_ETSI][72] = 127,
+ [2][1][RTW89_FCC][74] = -16,
+ [2][1][RTW89_ETSI][74] = 127,
+ [2][1][RTW89_FCC][75] = -16,
+ [2][1][RTW89_ETSI][75] = 127,
+ [2][1][RTW89_FCC][77] = -16,
+ [2][1][RTW89_ETSI][77] = 127,
+ [2][1][RTW89_FCC][79] = -16,
+ [2][1][RTW89_ETSI][79] = 127,
+ [2][1][RTW89_FCC][81] = -16,
+ [2][1][RTW89_ETSI][81] = 127,
+ [2][1][RTW89_FCC][83] = -16,
+ [2][1][RTW89_ETSI][83] = 127,
+ [2][1][RTW89_FCC][85] = -18,
+ [2][1][RTW89_ETSI][85] = 127,
+ [2][1][RTW89_FCC][87] = -16,
+ [2][1][RTW89_ETSI][87] = 127,
+ [2][1][RTW89_FCC][89] = -16,
+ [2][1][RTW89_ETSI][89] = 127,
+ [2][1][RTW89_FCC][90] = -16,
+ [2][1][RTW89_ETSI][90] = 127,
+ [2][1][RTW89_FCC][92] = -16,
+ [2][1][RTW89_ETSI][92] = 127,
+ [2][1][RTW89_FCC][94] = -16,
+ [2][1][RTW89_ETSI][94] = 127,
+ [2][1][RTW89_FCC][96] = -16,
+ [2][1][RTW89_ETSI][96] = 127,
+ [2][1][RTW89_FCC][98] = -16,
+ [2][1][RTW89_ETSI][98] = 127,
+ [2][1][RTW89_FCC][100] = -16,
+ [2][1][RTW89_ETSI][100] = 127,
+ [2][1][RTW89_FCC][102] = -16,
+ [2][1][RTW89_ETSI][102] = 127,
+ [2][1][RTW89_FCC][104] = -16,
+ [2][1][RTW89_ETSI][104] = 127,
+ [2][1][RTW89_FCC][105] = -16,
+ [2][1][RTW89_ETSI][105] = 127,
+ [2][1][RTW89_FCC][107] = -12,
+ [2][1][RTW89_ETSI][107] = 127,
+ [2][1][RTW89_FCC][109] = -10,
+ [2][1][RTW89_ETSI][109] = 127,
[2][1][RTW89_FCC][111] = 127,
+ [2][1][RTW89_ETSI][111] = 127,
[2][1][RTW89_FCC][113] = 127,
+ [2][1][RTW89_ETSI][113] = 127,
[2][1][RTW89_FCC][115] = 127,
+ [2][1][RTW89_ETSI][115] = 127,
[2][1][RTW89_FCC][117] = 127,
+ [2][1][RTW89_ETSI][117] = 127,
[2][1][RTW89_FCC][119] = 127,
+ [2][1][RTW89_ETSI][119] = 127,
};
const struct rtw89_phy_table rtw89_8852c_phy_bb_table = {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index fc0394494013..35901f64d17d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -42,14 +42,15 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM_V1_MASK,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR_V1,
.txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1,
- .dma_stop1_reg = R_AX_HAXI_DMA_STOP1,
- .dma_stop2_reg = R_AX_HAXI_DMA_STOP2,
- .dma_busy1_reg = R_AX_HAXI_DMA_BUSY1,
+ .dma_stop1 = {R_AX_HAXI_DMA_STOP1, B_AX_TX_STOP1_MASK},
+ .dma_stop2 = {R_AX_HAXI_DMA_STOP2, B_AX_TX_STOP2_ALL},
+ .dma_busy1 = {R_AX_HAXI_DMA_BUSY1, DMA_BUSY1_CHECK},
.dma_busy2_reg = R_AX_HAXI_DMA_BUSY2,
.dma_busy3_reg = R_AX_HAXI_DMA_BUSY3,
.rpwm_addr = R_AX_PCIE_HRPWM_V1,
.cpwm_addr = R_AX_PCIE_CRPWM,
+ .tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index eb2d3ec28775..dfccae81c380 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -81,9 +81,9 @@ static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_band band = hal->current_band_type;
- u32 center_freq = hal->current_freq;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u32 center_freq = chan->freq;
const struct rtw89_sar_span *span = NULL;
enum rtw89_sar_subband subband_l, subband_h;
int idx;
@@ -228,7 +228,7 @@ static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
}
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
exit:
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 726223f25dc6..c1a4bc1c64d1 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -5,6 +5,7 @@
#include <linux/devcoredump.h>
#include "cam.h"
+#include "chan.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
@@ -152,7 +153,10 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
ser_st_name(ser), ser_ev_name(ser, evt));
+ mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
ser->st_tbl[ser->state].st_func(ser, evt);
}
@@ -298,7 +302,7 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtwvif->trigger = false;
}
-static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
+static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
@@ -308,15 +312,19 @@ static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
if (sta->tdls)
rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
+
+ INIT_LIST_HEAD(&rtwsta->ba_cam_list);
}
static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
ieee80211_iterate_stations_atomic(rtwdev->hw,
- ser_sta_deinit_addr_cam_iter,
+ ser_sta_deinit_cam_iter,
rtwvif);
rtw89_cam_deinit(rtwdev, rtwvif);
+
+ bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
}
static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
@@ -388,6 +396,7 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
rtw89_hci_recovery_complete(rtwdev);
+ clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
break;
case SER_EV_L1_RESET:
ser_state_goto(ser, SER_RESET_TRX_ST);
@@ -531,7 +540,7 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
const struct __fw_backtrace_entry *ent)
{
struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
- u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
+ u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
u32 fwbt_size = ent->size;
u32 fwbt_key = ent->key;
u32 i;
@@ -601,6 +610,7 @@ bottom:
ser_reset_mac_binding(rtwdev);
rtw89_core_stop(rtwdev);
+ rtw89_entity_init(rtwdev);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
}
@@ -623,7 +633,6 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
fallthrough;
case SER_EV_L2_RECFG_DONE:
ser_state_goto(ser, SER_IDLE_ST);
- clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
break;
case SER_EV_STATE_OUT:
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 05524291d60c..82a7458e01ae 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -251,7 +251,7 @@ struct ndis_80211_bssid_ex {
struct ndis_80211_bssid_list_ex {
__le32 num_items;
- struct ndis_80211_bssid_ex bssid[];
+ u8 bssid_data[];
} __packed;
struct ndis_80211_fixed_ies {
@@ -489,14 +489,16 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast);
+ int link_id, u8 key_index, bool unicast,
+ bool multicast);
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo);
@@ -2082,7 +2084,8 @@ resize_buf:
netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len);
bssid_len = 0;
- bssid = next_bssid_list_item(bssid_list->bssid, &bssid_len, buf, len);
+ bssid = next_bssid_list_item((void *)bssid_list->bssid_data,
+ &bssid_len, buf, len);
/* Device returns incorrect 'num_items'. Workaround by ignoring the
* received 'num_items' and walking through full bssid buffer instead.
@@ -2377,8 +2380,8 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
}
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2413,7 +2416,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2424,7 +2428,8 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index bf39c4bda26f..2fbec51c8f94 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -889,6 +889,7 @@ static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
* for a hardware TX queue.
* @hw: Pointer to the ieee80211_hw structure
* @vif: Pointer to the ieee80211_vif structure.
+ * @link_id: the link ID if MLO is used, otherwise 0
* @queue: Queue number.
* @params: Pointer to ieee80211_tx_queue_params structure.
*
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index e015bfb8d221..84d82ddded56 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -181,7 +181,7 @@ int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
while (len > 0) {
chunk_type = get_unaligned_le16(buf + 0);
chunk_len = get_unaligned_le16(buf + 2);
- if (chunk_len > len) {
+ if (chunk_len < 4 || chunk_len > len) {
dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num);
return -EINVAL;
}
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index e06da4b3b0d4..805a3c1bf8fe 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -91,23 +91,25 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
bool unlock)
{
struct cw1200_queue_stats *stats = queue->stats;
- struct cw1200_queue_item *item = NULL, *tmp;
+ struct cw1200_queue_item *item = NULL, *iter, *tmp;
bool wakeup_stats = false;
- list_for_each_entry_safe(item, tmp, &queue->queue, head) {
- if (time_is_after_jiffies(item->queue_timestamp + queue->ttl))
+ list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
+ if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
+ item = iter;
break;
+ }
--queue->num_queued;
- --queue->link_map_cache[item->txpriv.link_id];
+ --queue->link_map_cache[iter->txpriv.link_id];
spin_lock_bh(&stats->lock);
--stats->num_queued;
- if (!--stats->link_map_cache[item->txpriv.link_id])
+ if (!--stats->link_map_cache[iter->txpriv.link_id])
wakeup_stats = true;
spin_unlock_bh(&stats->lock);
cw1200_debug_tx_ttl(stats->priv);
- cw1200_queue_register_post_gc(head, item);
- item->skb = NULL;
- list_move_tail(&item->head, &queue->free_pool);
+ cw1200_queue_register_post_gc(head, iter);
+ iter->skb = NULL;
+ list_move_tail(&iter->head, &queue->free_pool);
}
if (wakeup_stats)
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 26d3614519b1..8ef1d06b9bbd 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -195,7 +195,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
priv->bss_loss_state++;
- skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, -1, false);
WARN_ON(!skb);
if (skb)
cw1200_tx(priv->hw, NULL, skb);
@@ -2263,7 +2263,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
.rate = 0xFF,
};
- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif,-1, false);
if (!frame.skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index fde21fca6c5e..6894b919ff94 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -762,8 +762,7 @@ void cw1200_tx(struct ieee80211_hw *dev,
if (ret)
goto drop;
- rcu_read_lock();
- sta = rcu_dereference(t.sta);
+ sta = t.sta;
spin_lock_bh(&priv->ps_state_lock);
{
@@ -776,8 +775,6 @@ void cw1200_tx(struct ieee80211_hw *dev,
if (tid_update && sta)
ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
- rcu_read_unlock();
-
cw1200_bh_wakeup(priv);
return;
@@ -1145,8 +1142,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
/* Remove TSF from the end of frame */
if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
- memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
- hdr->mactime = le64_to_cpu(hdr->mactime);
+ hdr->mactime = get_unaligned_le64(skb->data + skb->len - 8);
if (skb->len >= 8)
skb_trim(skb, skb->len - 8);
} else {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9144ef5538a8..289371689a8d 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -546,7 +546,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, -1, false);
if (!skb)
goto out;
size = skb->len;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 13d78ada4bb6..34d95f458e1a 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -131,10 +131,10 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
if (vector & TIME_SYNC_EVENT_ID)
wlcore_event_time_sync(wl,
- mbox->time_sync_tsf_high_msb,
- mbox->time_sync_tsf_high_lsb,
- mbox->time_sync_tsf_low_msb,
- mbox->time_sync_tsf_low_lsb);
+ le16_to_cpu(mbox->time_sync_tsf_high_msb),
+ le16_to_cpu(mbox->time_sync_tsf_high_lsb),
+ le16_to_cpu(mbox->time_sync_tsf_low_msb),
+ le16_to_cpu(mbox->time_sync_tsf_low_lsb));
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 138edd28b0de..a939fd89a7f5 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1065,7 +1065,7 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
} else {
skb = ieee80211_nullfunc_get(wl->hw,
wl12xx_wlvif_to_vif(wlvif),
- false);
+ -1, false);
if (!skb)
goto out;
size = skb->len;
@@ -1092,7 +1092,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, vif, false);
+ skb = ieee80211_nullfunc_get(wl->hw, vif,-1, false);
if (!skb)
goto out;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index dad38fc04243..1b532e00a56f 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1441,7 +1441,7 @@ static void wl3501_detach(struct pcmcia_device *link)
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- strlcpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
+ strscpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
return 0;
}
@@ -1652,7 +1652,7 @@ static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info,
if (wrqu->data.length > sizeof(this->nick))
return -E2BIG;
- strlcpy(this->nick, extra, wrqu->data.length);
+ strscpy(this->nick, extra, wrqu->data.length);
return 0;
}
@@ -1661,7 +1661,7 @@ static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info,
{
struct wl3501_card *this = netdev_priv(dev);
- strlcpy(extra, this->nick, 32);
+ strscpy(extra, this->nick, 32);
wrqu->data.length = strlen(extra);
return 0;
}
@@ -1965,7 +1965,7 @@ static int wl3501_config(struct pcmcia_device *link)
this->firmware_date[0] = '\0';
this->rssi = 255;
this->chan = iw_default_channel(this->reg_domain);
- strlcpy(this->nick, "Planet WL3501", sizeof(this->nick));
+ strscpy(this->nick, "Planet WL3501", sizeof(this->nick));
spin_lock_init(&this->lock);
init_waitqueue_head(&this->wait);
netif_start_queue(dev);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 57304a5adf68..b7f9237dedf7 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -590,7 +590,7 @@ int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
goto out;
}
- memcpy(skb_put(skb, count), buf, count);
+ skb_put_data(skb, buf, count);
IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index 27151148c782..2f1f8b5d5b59 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -103,8 +103,8 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
}
/* Transmit a packet */
-static int ipc_wwan_link_transmit(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
@@ -323,15 +323,16 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
ipc_wwan->dev = dev;
ipc_wwan->ipc_imem = ipc_imem;
+ mutex_init(&ipc_wwan->if_mutex);
+
/* WWAN core will create a netdev for the default IP MUX channel */
if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
IP_MUX_SESSION_DEFAULT)) {
+ mutex_destroy(&ipc_wwan->if_mutex);
kfree(ipc_wwan);
return NULL;
}
- mutex_init(&ipc_wwan->if_mutex);
-
return ipc_wwan;
}
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
index e4d0f696687f..f7ca52353f40 100644
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -258,6 +258,7 @@ static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
{ .chan = "DUN", .driver_data = WWAN_PORT_AT },
+ { .chan = "DUN2", .driver_data = WWAN_PORT_AT },
{ .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
{ .chan = "QMI", .driver_data = WWAN_PORT_QMI },
{ .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index c6b6547f2c6f..f71d3bc3b237 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -74,7 +74,7 @@ static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
return 0;
}
-static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
int skb_len = skb->len;
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index fad642f9ffd8..ff09a8cedf93 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -157,8 +157,8 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
if ((i + 1) < in->len && in->data[i + 1] == '\n')
i++;
n = i - s + 1;
- memcpy(skb_put(out, n), &in->data[s], n);/* Echo */
- memcpy(skb_put(out, 6), "\r\nOK\r\n", 6);
+ skb_put_data(out, &in->data[s], n);/* Echo */
+ skb_put_data(out, "\r\nOK\r\n", 6);
s = i + 1;
port->pstate = AT_PARSER_WAIT_A;
} else if (port->pstate == AT_PARSER_SKIP_LINE) {
@@ -171,7 +171,7 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
if (i > s) {
/* Echo the processed portion of a not yet completed command */
n = i - s;
- memcpy(skb_put(out, n), &in->data[s], n);
+ skb_put_data(out, &in->data[s], n);
}
consume_skb(in);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8174d7b2966c..1545cbee77a4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -62,7 +62,7 @@ struct pending_tx_info {
* ubuf_to_vif is a helper which finds the struct xenvif from a pointer
* to this field.
*/
- struct ubuf_info callback_struct;
+ struct ubuf_info_msgzc callback_struct;
};
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fb32ae82d9b0..650fa180220f 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -591,8 +591,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
}
for (i = 0; i < MAX_PENDING_REQS; i++) {
- queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
- { .callback = xenvif_zerocopy_callback,
+ queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
+ { { .callback = xenvif_zerocopy_callback },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
@@ -723,8 +723,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
- netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
queue->stalled = true;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a256695fc89e..3d2081bbbc86 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -133,7 +133,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
+static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
@@ -1228,11 +1228,12 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct xenvif_queue *queue = ubuf_to_queue(ubuf);
/* This is the only place where we grab this lock, to protect callbacks
@@ -1241,7 +1242,7 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
spin_lock_irqsave(&queue->callback_lock, flags);
do {
u16 pending_idx = ubuf->desc;
- ubuf = (struct ubuf_info *) ubuf->ctx;
+ ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
MAX_PENDING_REQS);
index = pending_index(queue->dealloc_prod);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 990360d75cb6..c1ba4294f364 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -866,13 +865,12 @@ static int connect_data_rings(struct backend_info *be,
* queue-N.
*/
if (num_queues == 1) {
- xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
+ xspath = kstrdup(dev->otherend, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM,
"reading ring references");
return -ENOMEM;
}
- strcpy(xspath, dev->otherend);
} else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kzalloc(xspathsize, GFP_KERNEL);
@@ -984,6 +982,7 @@ static int netback_remove(struct xenbus_device *dev)
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 27a11cc08c61..9af2b027c19c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -673,7 +673,7 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
return nxmit;
}
-struct sk_buff *bounce_skb(const struct sk_buff *skb)
+static struct sk_buff *bounce_skb(const struct sk_buff *skb)
{
unsigned int headerlen = skb_headroom(skb);
/* Align size to allocate full pages and avoid contiguous data leaks */
@@ -2224,8 +2224,7 @@ static int xennet_create_queues(struct netfront_info *info,
return ret;
}
- netif_napi_add(queue->info->netdev, &queue->napi,
- xennet_poll, 64);
+ netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
if (netif_running(info->netdev))
napi_enable(&queue->napi);
}
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 28a9e1eb9bcf..2d53e0f88d2f 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -336,14 +336,12 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
return 0;
}
-static int fdp_nci_i2c_remove(struct i2c_client *client)
+static void fdp_nci_i2c_remove(struct i2c_client *client)
{
struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
fdp_nci_remove(phy->ndev);
fdp_nci_i2c_disable(phy);
-
- return 0;
}
static const struct acpi_device_id fdp_nci_i2c_acpi_match[] = {
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 067295124eb9..5eaa18f81355 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -268,15 +268,13 @@ err_irq:
return r;
}
-static int microread_i2c_remove(struct i2c_client *client)
+static void microread_i2c_remove(struct i2c_client *client)
{
struct microread_i2c_phy *phy = i2c_get_clientdata(client);
microread_remove(phy->hdev);
free_irq(client->irq, phy);
-
- return 0;
}
static const struct i2c_device_id microread_i2c_id[] = {
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 01329b91d59d..acef0cfd76af 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -231,13 +231,11 @@ static int nfcmrvl_i2c_probe(struct i2c_client *client,
return 0;
}
-static int nfcmrvl_i2c_remove(struct i2c_client *client)
+static void nfcmrvl_i2c_remove(struct i2c_client *client)
{
struct nfcmrvl_i2c_drv_data *drv_data = i2c_get_clientdata(client);
nfcmrvl_nci_unregister_dev(drv_data->priv);
-
- return 0;
}
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index ae2ba08d8ac3..ec6446511984 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -314,14 +314,12 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
return r;
}
-static int nxp_nci_i2c_remove(struct i2c_client *client)
+static void nxp_nci_i2c_remove(struct i2c_client *client)
{
struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
nxp_nci_remove(phy->ndev);
free_irq(client->irq, phy);
-
- return 0;
}
static const struct i2c_device_id nxp_nci_i2c_id_table[] = {
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 673eb5e9b887..ddf3db286bad 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -227,7 +227,7 @@ nfc_alloc_err:
return r;
}
-static int pn533_i2c_remove(struct i2c_client *client)
+static void pn533_i2c_remove(struct i2c_client *client)
{
struct pn533_i2c_phy *phy = i2c_get_clientdata(client);
@@ -235,8 +235,6 @@ static int pn533_i2c_remove(struct i2c_client *client)
pn53x_unregister_nfc(phy->priv);
pn53x_common_clean(phy->priv);
-
- return 0;
}
static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = {
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2caf997f9bc9..07596bf5f7d6 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
pn53x_unregister_nfc(pn532->priv);
serdev_device_close(serdev);
pn53x_common_clean(pn532->priv);
+ del_timer_sync(&pn532->cmd_timeout);
kfree_skb(pn532->recv_skb);
kfree(pn532);
}
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 62a0f1a010cb..9e754abcfa2a 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -928,7 +928,7 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
return 0;
}
-static int pn544_hci_i2c_remove(struct i2c_client *client)
+static void pn544_hci_i2c_remove(struct i2c_client *client)
{
struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
@@ -940,8 +940,6 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
pn544_hci_i2c_disable(phy);
-
- return 0;
}
static const struct of_device_id of_pn544_i2c_match[] __maybe_unused = {
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 4d1cf1bb55b0..f824dc7099ce 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -246,14 +246,12 @@ disable_clk:
return ret;
}
-static int s3fwrn5_i2c_remove(struct i2c_client *client)
+static void s3fwrn5_i2c_remove(struct i2c_client *client)
{
struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
s3fwrn5_remove(phy->common.ndev);
clk_disable_unprepare(phy->clk);
-
- return 0;
}
static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index cbd968f013c7..89fa24d71bef 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -250,13 +250,11 @@ static int st_nci_i2c_probe(struct i2c_client *client,
return r;
}
-static int st_nci_i2c_remove(struct i2c_client *client)
+static void st_nci_i2c_remove(struct i2c_client *client)
{
struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
ndlc_remove(phy->ndlc);
-
- return 0;
}
static const struct i2c_device_id st_nci_i2c_id_table[] = {
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 42dc0e5eb161..76b55986bcf8 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -562,7 +562,7 @@ out_free:
return r;
}
-static int st21nfca_hci_i2c_remove(struct i2c_client *client)
+static void st21nfca_hci_i2c_remove(struct i2c_client *client)
{
struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
@@ -571,8 +571,6 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
st21nfca_hci_i2c_disable(phy);
kfree_skb(phy->pending_skb);
-
- return 0;
}
static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index bf4f5c09d9b1..bbe5099c836d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1712,8 +1712,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
res->flags = IORESOURCE_MEM;
for (i = 0; i < nd_region->ndr_mappings; i++) {
- uuid_t uuid;
-
nsl_get_uuid(ndd, nd_label, &uuid);
if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
continue;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7e88cd242380..96e6e9a5f235 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
+static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
{
return pmem->phys_addr + offset;
}
@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
- phys_addr_t phys = to_phys(pmem, offset);
+ phys_addr_t phys = pmem_to_phys(pmem, offset);
unsigned long pfn_start, pfn_end, pfn;
/* only pmem in the linear map supports HWPoison */
@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
static long __pmem_clear_poison(struct pmem_device *pmem,
phys_addr_t offset, unsigned int len)
{
- phys_addr_t phys = to_phys(pmem, offset);
+ phys_addr_t phys = pmem_to_phys(pmem, offset);
long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
if (cleared > 0) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index af367b22871b..059737c1a2c1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1111,8 +1111,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects;
}
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
- struct nvme_command *cmd, int status)
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl);
@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
break;
}
}
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
-int nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
- u32 effects;
- int ret;
- effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- ret = nvme_execute_rq(rq, false);
- if (effects) /* nothing to be done for zero cmd effects */
- nvme_passthru_end(ctrl, effects, cmd, ret);
-
- return ret;
+ *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ return nvme_execute_rq(rq, false);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1177,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
}
-static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
@@ -1189,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return;
+ return RQ_END_IO_NONE;
}
ctrl->comp_seen = false;
@@ -1200,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
nvme_queue_keep_alive_work(ctrl);
+ return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@@ -2162,14 +2159,14 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
@@ -2696,7 +2693,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -2704,7 +2701,11 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
}
- /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
+ /*
+ * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
+ * Base Specification 2.0. It is slightly different from the format
+ * specified there due to historic reasons, and we can't change it now.
+ */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
"nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
@@ -2894,7 +2895,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
nvme_init_subnqn(subsys, ctrl, id);
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
memcpy(subsys->model, id->mn, sizeof(subsys->model));
- memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
@@ -3113,6 +3113,8 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->quirks |= core_quirks[i].quirks;
}
}
+ memcpy(ctrl->subsys->firmware_rev, id->fr,
+ sizeof(ctrl->subsys->firmware_rev));
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -3976,6 +3978,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_cdev(struct nvme_ns *ns)
@@ -4703,6 +4706,8 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_start_queues(ctrl);
/* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
static u32 nvme_aer_type(u32 result)
@@ -4715,9 +4720,10 @@ static u32 nvme_aer_subtype(u32 result)
return (result & 0xff00) >> 8;
}
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
u32 aer_notice_type = nvme_aer_subtype(result);
+ bool requeue = true;
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4734,6 +4740,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
*/
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
nvme_auth_stop(ctrl);
+ requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work);
}
break;
@@ -4750,6 +4757,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
+ return requeue;
}
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
@@ -4765,13 +4773,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
u32 result = le32_to_cpu(res->u32);
u32 aer_type = nvme_aer_type(result);
u32 aer_subtype = nvme_aer_subtype(result);
+ bool requeue = true;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
switch (aer_type) {
case NVME_AER_NOTICE:
- nvme_handle_aen_notice(ctrl, result);
+ requeue = nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
/*
@@ -4792,10 +4801,114 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
break;
}
- queue_work(nvme_wq, &ctrl->async_event_work);
+
+ if (requeue)
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = flags;
+ set->cmd_size = cmd_size;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ ctrl->admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+ goto out_free_tagset;
+ }
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->fabrics_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->fabrics_q)) {
+ ret = PTR_ERR(ctrl->fabrics_q);
+ goto out_cleanup_admin_q;
+ }
+ }
+
+ ctrl->admin_tagset = set;
+ return 0;
+
+out_cleanup_admin_q:
+ blk_mq_destroy_queue(ctrl->fabrics_q);
+out_free_tagset:
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+ blk_mq_destroy_queue(ctrl->admin_q);
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_mq_destroy_queue(ctrl->fabrics_q);
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = ctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = flags;
+ set->cmd_size = cmd_size,
+ set->driver_data = ctrl;
+ set->nr_hw_queues = ctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ if (ops->map_queues)
+ set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->connect_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
+ }
+ }
+
+ ctrl->tagset = set;
+ return 0;
+
+out_free_tag_set:
+ blk_mq_free_tag_set(set);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_mq_destroy_queue(ctrl->connect_q);
+ blk_mq_free_tag_set(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
+
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
@@ -4815,6 +4928,16 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_enable_aen(ctrl);
+ /*
+ * persistent discovery controllers need to send indication to userspace
+ * to re-read the discovery log page to learn about possible changes
+ * that were missed. We identify persistent discovery controllers by
+ * checking that they started once before, hence are reconnecting back.
+ */
+ if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+ nvme_discovery_ctrl(ctrl))
+ nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 10cc4a814602..ce27276f552d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -49,7 +49,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
goto out_unlock;
kref_init(&host->ref);
- strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -971,13 +971,17 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
return false;
/*
- * Checking the local address is rough. In most cases, none is specified
- * and the host port is selected by the stack.
+ * Checking the local address or host interfaces is rough.
+ *
+ * In most cases, none is specified and the host port or
+ * host interface is selected by the stack.
*
* Assume no match if:
- * - local address is specified and address is not the same
- * - local address is not specified but remote is, or vice versa
- * (admin using specific host_traddr when it matters).
+ * - local address or host interface is specified and address
+ * or host interface is not the same
+ * - local address or host interface is not specified but
+ * remote is, or vice versa (admin using specific
+ * host_traddr/host_iface when it matters).
*/
if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
(ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
@@ -988,6 +992,15 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
return false;
}
+ if ((opts->mask & NVMF_OPT_HOST_IFACE) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ if (strcmp(opts->host_iface, ctrl->opts->host_iface))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_IFACE) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ return false;
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 127abaf9ba5d..5d57a042dbca 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- return __nvme_fc_exit_request(set->driver_data, op);
+ return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
}
static int
@@ -2135,7 +2135,7 @@ static int
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
- struct nvme_fc_ctrl *ctrl = set->driver_data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
}
}
-static inline void
-__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
- unsigned int qidx)
+static inline int
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
struct nvme_fc_queue *queue = &ctrl->queues[qidx];
hctx->driver_data = queue;
queue->hctx = hctx;
+ return 0;
}
static int
-nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
{
- struct nvme_fc_ctrl *ctrl = data;
-
- __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
-
- return 0;
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
}
static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_fc_ctrl *ctrl = data;
-
- __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
-
- return 0;
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx);
}
static void
@@ -2391,10 +2383,8 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags;
- if (ctrl->ctrl.tagset) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (ctrl->ctrl.tagset)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
/* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags);
@@ -2402,9 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_start_admin_queue(&ctrl->ctrl);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
kfree(ctrl->queues);
@@ -2860,9 +2848,9 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
-static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
+static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_fc_ctrl *ctrl = set->driver_data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
int i;
for (i = 0; i < set->nr_maps; i++) {
@@ -2880,7 +2868,6 @@ static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
else
blk_mq_map_queues(map);
}
- return 0;
}
static const struct blk_mq_ops nvme_fc_mq_ops = {
@@ -2915,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_io_queues(ctrl);
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_fc_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size =
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
return ret;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tag_set;
-
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_tagset;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
@@ -2952,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl);
-out_cleanup_blk_queue:
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
nvme_fc_free_io_queues(ctrl);
/* force put free routine to ignore io queues */
@@ -3166,15 +3135,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
"to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
- }
-
- if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- /* warn if sqsize is lower than queue_size */
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, reducing "
- "to sqsize\n",
- opts->queue_size, ctrl->ctrl.sqsize + 1);
- opts->queue_size = ctrl->ctrl.sqsize + 1;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
}
ret = nvme_fc_init_aen_ops(ctrl);
@@ -3547,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
nvme_fc_init_queue(ctrl, 0);
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size =
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
- ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
goto out_free_queues;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
-
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- ret = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_admin_tag_set;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- ret = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/*
* Would have been nice to init io queues tag set as well.
@@ -3586,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
- goto out_cleanup_admin_q;
+ goto out_cleanup_tagset;
/* at this point, teardown path changes to ref counting on nvme ctrl */
@@ -3641,12 +3579,8 @@ fail_ctrl:
return ERR_PTR(-EIO);
-out_cleanup_admin_q:
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_admin_tag_set:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_cleanup_tagset:
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_queues:
kfree(ctrl->queues);
out_free_ida:
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 27614bee7380..81f5550b670d 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -20,19 +20,20 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
- unsigned len, u32 seed, bool write)
+static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ unsigned len, u32 seed)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
+ struct bio *bio = req->bio;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
- if (write && copy_from_user(buf, ubuf, len))
+ if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
@@ -45,9 +46,13 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
- if (ret == len)
- return buf;
- ret = -ENOMEM;
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto out_free_meta;
+ }
+
+ req->cmd_flags |= REQ_INTEGRITY;
+ return buf;
out_free_meta:
kfree(buf);
out:
@@ -65,90 +70,102 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
}
static struct request *nvme_alloc_user_request(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
- unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, unsigned timeout, bool vec,
- blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
+ struct nvme_command *cmd, blk_opf_t rq_flags,
+ blk_mq_req_flags_t blk_flags)
{
- bool write = nvme_is_write(cmd);
- struct nvme_ns *ns = q->queuedata;
- struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
- struct bio *bio = NULL;
- void *meta = NULL;
- int ret;
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
return req;
nvme_init_request(req, cmd);
-
- if (timeout)
- req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
- if (ubuffer && bufflen) {
- if (!vec)
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
- GFP_KERNEL);
- else {
- struct iovec fast_iov[UIO_FASTIOV];
- struct iovec *iov = fast_iov;
- struct iov_iter iter;
-
- ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
- UIO_FASTIOV, &iov, &iter);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter,
- GFP_KERNEL);
- kfree(iov);
- }
- if (ret)
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+ bool vec)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(vec))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
goto out;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
- meta_seed, write);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
- }
- req->cmd_flags |= REQ_INTEGRITY;
- *metap = meta;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, vec, 0, 0,
+ rq_data_dir(req));
+ }
+
+ if (ret)
+ goto out;
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
+ meta_seed);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
}
+ *metap = meta;
}
- return req;
+ return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
- return ERR_PTR(ret);
+ return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
+ struct nvme_command *cmd, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
+ struct nvme_ctrl *ctrl;
struct request *req;
void *meta = NULL;
struct bio *bio;
+ u32 effects;
int ret;
- req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ req = nvme_alloc_user_request(q, cmd, 0, 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->timeout = timeout;
+ if (ubuffer && bufflen) {
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, NULL, vec);
+ if (ret)
+ return ret;
+ }
+
bio = req->bio;
+ ctrl = nvme_req(req)->ctrl;
- ret = nvme_execute_passthru_rq(req);
+ ret = nvme_execute_passthru_rq(req, &effects);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
@@ -158,6 +175,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (bio)
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, cmd, ret);
+
return ret;
}
@@ -220,7 +241,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
- nvme_to_user_ptr(io.addr), length,
+ io.addr, length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
false);
}
@@ -274,7 +295,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout, false);
@@ -320,7 +341,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout, vec);
@@ -349,9 +370,15 @@ struct nvme_uring_cmd_pdu {
struct bio *bio;
struct request *req;
};
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
u32 meta_len;
+ u32 nvme_status;
+ union {
+ struct {
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ };
+ u64 result;
+ } u;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -360,11 +387,10 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
- struct bio *bio = req->bio;
int status;
u64 result;
@@ -375,27 +401,72 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
result = le64_to_cpu(nvme_req(req)->result.u64);
- if (pdu->meta)
- status = nvme_finish_user_metadata(req, pdu->meta_buffer,
- pdu->meta, pdu->meta_len, status);
- if (bio)
- blk_rq_unmap_user(bio);
+ if (pdu->meta_len)
+ status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+ pdu->u.meta, pdu->meta_len, status);
+ if (req->bio)
+ blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
io_uring_cmd_done(ioucmd, status, result);
}
-static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- /* extract bio before reusing the same field for request */
- struct bio *bio = pdu->bio;
+ void *cookie = READ_ONCE(ioucmd->cookie);
+ req->bio = pdu->bio;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ pdu->nvme_status = -EINTR;
+ else
+ pdu->nvme_status = nvme_req(req)->status;
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ return RQ_END_IO_FREE;
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
pdu->req = req;
- req->bio = bio;
- /* this takes care of moving rest of completion-work to task context */
- io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_meta_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+
+ return RQ_END_IO_NONE;
}
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -410,6 +481,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -445,23 +517,45 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags = REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
}
+ if (issue_flags & IO_URING_F_IOPOLL)
+ rq_flags |= REQ_POLLED;
- req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, d.timeout_ms ?
- msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
- blk_flags);
+retry:
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
- req->end_io = nvme_uring_cmd_end_io;
- req->end_io_data = ioucmd;
+ req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
+
+ if (d.addr && d.data_len) {
+ ret = nvme_map_user_request(req, d.addr,
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
+ if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
+ if (unlikely(!req->bio)) {
+ /* we can't poll this, so alloc regular req instead */
+ blk_mq_free_request(req);
+ rq_flags &= ~REQ_POLLED;
+ goto retry;
+ } else {
+ WRITE_ONCE(ioucmd->cookie, req->bio);
+ req->bio->bi_opf |= REQ_POLLED;
+ }
+ }
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta = meta;
- pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
-
+ req->end_io_data = ioucmd;
+ if (pdu->meta_len) {
+ pdu->u.meta = meta;
+ pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
+ req->end_io = nvme_uring_cmd_end_io_meta;
+ } else {
+ req->end_io = nvme_uring_cmd_end_io;
+ }
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
@@ -559,9 +653,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int nvme_uring_cmd_checks(unsigned int issue_flags)
{
- /* IOPOLL not supported yet */
- if (issue_flags & IO_URING_F_IOPOLL)
- return -EOPNOTSUPP;
/* NVMe passthrough requires big SQE/CQE support */
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
@@ -604,6 +695,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
}
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct bio *bio;
+ int ret = 0;
+ struct nvme_ns *ns;
+ struct request_queue *q;
+
+ rcu_read_lock();
+ bio = READ_ONCE(ioucmd->cookie);
+ ns = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct nvme_ns, cdev);
+ q = ns->queue;
+ if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
+ ret = bio_poll(bio, iob, poll_flags);
+ rcu_read_unlock();
+ return ret;
+}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -685,6 +795,31 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
+
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ struct bio *bio;
+ int ret = 0;
+ struct request_queue *q;
+
+ if (ns) {
+ rcu_read_lock();
+ bio = READ_ONCE(ioucmd->cookie);
+ q = ns->queue;
+ if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
+ && bio->bi_bdev)
+ ret = bio_poll(bio, iob, poll_flags);
+ rcu_read_unlock();
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
@@ -692,6 +827,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ctrl *ctrl = ioucmd->file->private_data;
int ret;
+ /* IOPOLL not supported yet */
+ if (issue_flags & IO_URING_F_IOPOLL)
+ return -EOPNOTSUPP;
+
ret = nvme_uring_cmd_checks(issue_flags);
if (ret)
return ret;
@@ -757,11 +896,17 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
nvme_queue_scan(ctrl);
return 0;
default:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 6ef497c75a16..00f2f81e20fa 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1bdf714dcd9e..a29877217ee6 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -233,6 +233,12 @@ struct nvme_fault_inject {
#endif
};
+enum nvme_ctrl_flags {
+ NVME_CTRL_FAILFAST_EXPIRED = 0,
+ NVME_CTRL_ADMIN_Q_STOPPED = 1,
+ NVME_CTRL_STARTED_ONCE = 2,
+};
+
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
@@ -354,8 +360,6 @@ struct nvme_ctrl {
u16 maxcmd;
int nr_reconnects;
unsigned long flags;
-#define NVME_CTRL_FAILFAST_EXPIRED 0
-#define NVME_CTRL_ADMIN_Q_STOPPED 1
struct nvmf_ctrl_options *opts;
struct page *discard_page;
@@ -602,11 +606,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
static inline void nvme_should_fail(struct request *req) {}
#endif
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
+
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
+ int ret;
+
if (!ctrl->subsystem)
return -ENOTTY;
- return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
}
/*
@@ -712,7 +728,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -722,6 +737,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size);
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size);
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
@@ -802,7 +825,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
@@ -821,6 +843,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
@@ -968,14 +994,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
}
#endif
-static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
-{
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q))
- return PTR_ERR(ctrl->connect_q);
- return 0;
-}
-
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
@@ -1023,7 +1041,9 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
-int nvme_execute_passthru_rq(struct request *rq);
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3a1c37f32f30..5b796efa325b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -226,12 +226,12 @@ struct nvme_queue {
struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
- struct nvme_queue *nvmeq;
bool use_sgl;
- int aborted;
- int npages; /* In the PRP list. 0 means small pool in use */
- dma_addr_t first_dma;
+ bool aborted;
+ s8 nr_allocations; /* PRP list pool allocations. 0 means small
+ pool in use */
unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t first_dma;
dma_addr_t meta_dma;
struct sg_table sgt;
};
@@ -430,11 +430,6 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = &dev->queues[queue_idx];
-
- BUG_ON(!nvmeq);
- iod->nvmeq = nvmeq;
nvme_req(req)->ctrl = &dev->ctrl;
nvme_req(req)->cmd = &iod->cmd;
@@ -450,7 +445,7 @@ static int queue_irq_offset(struct nvme_dev *dev)
return 0;
}
-static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
int i, qoff, offset;
@@ -477,8 +472,6 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
qoff += map->nr_queues;
offset += map->nr_queues;
}
-
- return 0;
}
/*
@@ -528,7 +521,7 @@ static void **nvme_pci_iod_list(struct request *req)
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
@@ -536,7 +529,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
if (!nvme_ctrl_sgl_supported(&dev->ctrl))
return false;
- if (!iod->nvmeq->qid)
+ if (!nvmeq->qid)
return false;
if (!sgl_threshold || avg_seg_size < sgl_threshold)
return false;
@@ -550,7 +543,7 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
dma_addr_t dma_addr = iod->first_dma;
int i;
- for (i = 0; i < iod->npages; i++) {
+ for (i = 0; i < iod->nr_allocations; i++) {
__le64 *prp_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
@@ -566,7 +559,7 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
dma_addr_t dma_addr = iod->first_dma;
int i;
- for (i = 0; i < iod->npages; i++) {
+ for (i = 0; i < iod->nr_allocations; i++) {
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
@@ -589,7 +582,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- if (iod->npages == 0)
+ if (iod->nr_allocations == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
else if (iod->use_sgl)
@@ -651,15 +644,15 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
- iod->npages = 0;
+ iod->nr_allocations = 0;
} else {
pool = dev->prp_page_pool;
- iod->npages = 1;
+ iod->nr_allocations = 1;
}
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->npages = -1;
+ iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
list[0] = prp_list;
@@ -671,7 +664,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
goto free_prps;
- list[iod->npages++] = prp_list;
+ list[iod->nr_allocations++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
@@ -746,15 +739,15 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
- iod->npages = 0;
+ iod->nr_allocations = 0;
} else {
pool = dev->prp_page_pool;
- iod->npages = 1;
+ iod->nr_allocations = 1;
}
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list) {
- iod->npages = -1;
+ iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
@@ -773,7 +766,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
goto free_sgls;
i = 0;
- nvme_pci_iod_list(req)[iod->npages++] = sg_list;
+ nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list;
sg_list[i++] = *link;
nvme_pci_sgl_set_seg(link, sgl_dma, entries);
}
@@ -833,6 +826,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
@@ -840,7 +834,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
- if (iod->nvmeq->qid && sgl_threshold &&
+ if (nvmeq->qid && sgl_threshold &&
nvme_ctrl_sgl_supported(&dev->ctrl))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
@@ -898,8 +892,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
- iod->aborted = 0;
- iod->npages = -1;
+ iod->aborted = false;
+ iod->nr_allocations = -1;
iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
@@ -1019,12 +1013,16 @@ static void nvme_queue_rqs(struct request **rqlist)
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_dev *dev = iod->nvmeq->dev;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+
+ if (blk_integrity_rq(req)) {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+ }
+
if (blk_rq_nr_phys_segments(req))
nvme_unmap_data(dev, req);
}
@@ -1270,15 +1268,15 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static void abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = iod->nvmeq;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
dev_warn(nvmeq->dev->ctrl.device,
"Abort status: 0x%x", nvme_req(req)->status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -1335,7 +1333,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
static enum blk_eh_timer_return nvme_timeout(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = iod->nvmeq;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
@@ -1416,7 +1414,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
- iod->aborted = 1;
+ iod->aborted = true;
cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = nvme_cid(req);
@@ -2450,22 +2448,25 @@ out_unlock:
return result;
}
-static void nvme_del_queue_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
blk_mq_free_request(req);
complete(&nvmeq->delete_done);
+ return RQ_END_IO_NONE;
}
-static void nvme_del_cq_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
if (error)
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
- nvme_del_queue_end(req, error);
+ return nvme_del_queue_end(req, error);
}
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
@@ -2529,9 +2530,11 @@ static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
set->ops = &nvme_mq_ops;
set->nr_hw_queues = dev->online_queues - 1;
- set->nr_maps = 2; /* default + read */
+ set->nr_maps = 1;
+ if (dev->io_queues[HCTX_TYPE_READ])
+ set->nr_maps = 2;
if (dev->io_queues[HCTX_TYPE_POLL])
- set->nr_maps++;
+ set->nr_maps = 3;
set->timeout = NVME_IO_TIMEOUT;
set->numa_node = dev->ctrl.numa_node;
set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
@@ -2834,6 +2837,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_start_admin_queue(&dev->ctrl);
}
+ dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
+
/*
* Limit the max command size to prevent iod->sg allocations going
* over a single page.
@@ -2846,7 +2851,6 @@ static void nvme_reset_work(struct work_struct *work)
* Don't limit the IOMMU merged segment size.
*/
dma_set_max_seg_size(dev->dev, 0xffffffff);
- dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
mutex_unlock(&dev->shutdown_lock);
@@ -3470,6 +3474,10 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
@@ -3517,6 +3525,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
@@ -3563,6 +3573,8 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+ BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) >
+ S8_MAX);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3100643be299..5ad0ab2853a4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -295,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
@@ -320,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -332,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -696,11 +696,12 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
return ret;
}
-static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
+static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
+ int first, int last)
{
int i, ret = 0;
- for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_rdma_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -709,7 +710,7 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_rdma_stop_queue(&ctrl->queues[i]);
return ret;
}
@@ -787,64 +788,21 @@ out_free_queues:
return ret;
}
-static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
- int ret;
+ unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- ctrl->ctrl.admin_tagset = set;
- return ret;
-}
-
-static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->tag_set;
- int ret;
+ if (ctrl->max_integrity_segments)
+ cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- if (nctrl->max_integrity_segments)
- set->cmd_size += sizeof(struct nvme_rdma_sgl) +
- NVME_RDMA_METADATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- ctrl->ctrl.tagset = set;
- return ret;
+ return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+ &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
}
-static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
- bool remove)
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
{
- if (remove) {
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
- }
if (ctrl->async_event_sqe.data) {
cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -886,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+ &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+ BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE);
if (error)
goto out_free_async_qe;
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
}
error = nvme_rdma_start_queue(ctrl, 0);
if (error)
- goto out_cleanup_queue;
+ goto out_remove_admin_tag_set;
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
@@ -932,15 +883,9 @@ out_quiesce_queue:
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
-out_cleanup_queue:
- if (new)
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
+out_remove_admin_tag_set:
if (new)
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- if (new)
- blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_async_qe:
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -952,19 +897,9 @@ out_free_queue:
return error;
}
-static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
- bool remove)
-{
- if (remove) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(ctrl->ctrl.tagset);
- }
- nvme_rdma_free_io_queues(ctrl);
-}
-
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_rdma_alloc_io_queues(ctrl);
if (ret)
@@ -974,15 +909,17 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
if (ret)
goto out_free_io_queues;
-
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tag_set;
}
- ret = nvme_rdma_start_io_queues(ctrl);
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
+ ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
if (!new) {
nvme_start_queues(&ctrl->ctrl);
@@ -1000,19 +937,25 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
nvme_unfreeze(&ctrl->ctrl);
}
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
+ ctrl->tag_set.nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
return 0;
out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
-out_cleanup_connect_q:
+out_cleanup_tagset:
nvme_cancel_tagset(&ctrl->ctrl);
if (new)
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
- if (new)
- blk_mq_free_tag_set(ctrl->ctrl.tagset);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
@@ -1025,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
- if (remove)
+ if (remove) {
nvme_start_admin_queue(&ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl, remove);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_destroy_admin_queue(ctrl);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
@@ -1039,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
- if (remove)
+ if (remove) {
nvme_start_queues(&ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, remove);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_free_io_queues(ctrl);
}
}
@@ -1163,14 +1110,18 @@ destroy_io:
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, new);
+ if (new)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl, new);
+ if (new)
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
return ret;
}
@@ -2188,9 +2139,9 @@ static void nvme_rdma_complete_rq(struct request *rq)
nvme_complete_rq(rq);
}
-static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
@@ -2231,8 +2182,6 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
-
- return 0;
}
static const struct blk_mq_ops nvme_rdma_mq_ops = {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 044da18c06f5..93e2e313fa70 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
- bool more_requests;
/* recv state */
void *pdu;
@@ -134,7 +133,6 @@ struct nvme_tcp_queue {
/* send state */
struct nvme_tcp_request *request;
- int queue_size;
u32 maxh2cdata;
size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl;
@@ -320,7 +318,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
+ !llist_empty(&queue->req_list);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
@@ -339,9 +337,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
*/
if (queue->io_cpu == raw_smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
- queue->more_requests = !last;
nvme_tcp_send_all(queue);
- queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
}
@@ -466,7 +462,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_tcp_cmd_pdu *pdu;
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
@@ -490,7 +486,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
hctx->driver_data = queue;
@@ -500,7 +496,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[0];
hctx->driver_data = queue;
@@ -1229,7 +1225,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
else if (unlikely(result < 0))
return;
- if (!pending)
+ if (!pending || !queue->rd_enabled)
return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
@@ -1479,8 +1475,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
-static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
- int qid, size_t queue_size)
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1492,7 +1487,6 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
INIT_LIST_HEAD(&queue->send_list);
mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work);
- queue->queue_size = queue_size;
if (qid > 0)
queue->cmnd_capsule_len = nctrl->ioccsz * 16;
@@ -1690,51 +1684,6 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
- int ret;
-
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- nctrl->admin_tagset = set;
- return ret;
-}
-
-static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->tag_set;
- int ret;
-
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- nctrl->tagset = set;
- return ret;
-}
-
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{
if (to_tcp_ctrl(ctrl)->async_req.pdu) {
@@ -1762,11 +1711,12 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
nvme_tcp_stop_queue(ctrl, i);
}
-static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+ int first, int last)
{
int i, ret;
- for (i = 1; i < ctrl->queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_tcp_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -1775,7 +1725,7 @@ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_tcp_stop_queue(ctrl, i);
return ret;
}
@@ -1784,7 +1734,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{
int ret;
- ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+ ret = nvme_tcp_alloc_queue(ctrl, 0);
if (ret)
return ret;
@@ -1804,7 +1754,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
+ ret = nvme_tcp_alloc_queue(ctrl, i);
if (ret)
goto out_free_queues;
}
@@ -1892,32 +1842,35 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
- if (remove) {
- blk_mq_destroy_queue(ctrl->connect_q);
- blk_mq_free_tag_set(ctrl->tagset);
- }
+ if (remove)
+ nvme_remove_io_tag_set(ctrl);
nvme_tcp_free_io_queues(ctrl);
}
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_tcp_alloc_io_queues(ctrl);
if (ret)
return ret;
if (new) {
- ret = nvme_tcp_alloc_tag_set(ctrl);
+ ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ &nvme_tcp_mq_ops,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+ sizeof(struct nvme_tcp_request));
if (ret)
goto out_free_io_queues;
-
- ret = nvme_ctrl_init_connect_q(ctrl);
- if (ret)
- goto out_free_tag_set;
}
- ret = nvme_tcp_start_io_queues(ctrl);
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
+ ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
if (ret)
goto out_cleanup_connect_q;
@@ -1937,6 +1890,15 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
nvme_unfreeze(ctrl);
}
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
+ ctrl->tagset->nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
return 0;
out_wait_freeze_timed_out:
@@ -1946,10 +1908,7 @@ out_wait_freeze_timed_out:
out_cleanup_connect_q:
nvme_cancel_tagset(ctrl);
if (new)
- blk_mq_destroy_queue(ctrl->connect_q);
-out_free_tag_set:
- if (new)
- blk_mq_free_tag_set(ctrl->tagset);
+ nvme_remove_io_tag_set(ctrl);
out_free_io_queues:
nvme_tcp_free_io_queues(ctrl);
return ret;
@@ -1958,11 +1917,8 @@ out_free_io_queues:
static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
- if (remove) {
- blk_mq_destroy_queue(ctrl->admin_q);
- blk_mq_destroy_queue(ctrl->fabrics_q);
- blk_mq_free_tag_set(ctrl->admin_tagset);
- }
+ if (remove)
+ nvme_remove_admin_tag_set(ctrl);
nvme_tcp_free_admin_queue(ctrl);
}
@@ -1975,26 +1931,17 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- error = nvme_tcp_alloc_admin_tag_set(ctrl);
+ error = nvme_alloc_admin_tag_set(ctrl,
+ &to_tcp_ctrl(ctrl)->admin_tag_set,
+ &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
+ sizeof(struct nvme_tcp_request));
if (error)
goto out_free_queue;
-
- ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
- if (IS_ERR(ctrl->fabrics_q)) {
- error = PTR_ERR(ctrl->fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
- if (IS_ERR(ctrl->admin_q)) {
- error = PTR_ERR(ctrl->admin_q);
- goto out_cleanup_fabrics_q;
- }
}
error = nvme_tcp_start_queue(ctrl, 0);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
error = nvme_enable_ctrl(ctrl);
if (error)
@@ -2014,15 +1961,9 @@ out_quiesce_queue:
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
-out_cleanup_queue:
- if (new)
- blk_mq_destroy_queue(ctrl->admin_q);
-out_cleanup_fabrics_q:
+out_cleanup_tagset:
if (new)
- blk_mq_destroy_queue(ctrl->fabrics_q);
-out_free_tagset:
- if (new)
- blk_mq_free_tag_set(ctrl->admin_tagset);
+ nvme_remove_admin_tag_set(ctrl);
out_free_queue:
nvme_tcp_free_admin_queue(ctrl);
return error;
@@ -2471,9 +2412,9 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
@@ -2512,8 +2453,6 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
-
- return 0;
}
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
@@ -2532,6 +2471,25 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return queue->nr_cqe;
}
+static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
+ struct sockaddr_storage src_addr;
+ int ret, len;
+
+ len = nvmf_get_address(ctrl, buf, size);
+
+ ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+ if (ret > 0) {
+ if (len > 0)
+ len--; /* strip trailing newline */
+ len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+ (len) ? "," : "", &src_addr);
+ }
+
+ return len;
+}
+
static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.commit_rqs = nvme_tcp_commit_rqs,
@@ -2563,7 +2521,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.free_ctrl = nvme_tcp_free_ctrl,
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
- .get_address = nvmf_get_address,
+ .get_address = nvme_tcp_get_address,
.stop_ctrl = nvme_tcp_stop_ctrl,
};
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index fc8a957fad0a..c8a061ce3ee5 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -449,7 +449,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/*
* Max command capsule size is sqe + in-capsule data size.
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index cf690df34775..c4113b43dbfe 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -196,6 +196,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
if (IS_ERR(ctrl->ctrl_key)) {
ret = PTR_ERR(ctrl->ctrl_key);
ctrl->ctrl_key = NULL;
+ goto out_free_hash;
}
pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
ctrl->ctrl_key->hash > 0 ?
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2bcd60758919..e34a2896fedb 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1281,6 +1281,34 @@ static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
#endif
+static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
+}
+
+static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u16 qid_max;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ if (sscanf(page, "%hu\n", &qid_max) != 1)
+ return -EINVAL;
+
+ if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->max_qid = qid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
@@ -1288,6 +1316,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_qid_max,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a1345790005f..14677145bbba 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_ns *ns = req->ns;
+
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
- if (req->ns)
- nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
@@ -830,6 +832,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
init_completion(&sq->free_done);
init_completion(&sq->confirm_done);
+ nvmet_auth_sq_init(sq);
return 0;
}
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c2162eef8ce1..668d257fa986 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -292,7 +292,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index ebdf9aa81041..7970a7640e58 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -23,17 +23,12 @@ static void nvmet_auth_expired_work(struct work_struct *work)
sq->dhchap_tid = -1;
}
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+void nvmet_auth_sq_init(struct nvmet_sq *sq)
{
- u32 result = le32_to_cpu(req->cqe->result.u32);
-
/* Initialize in-band authentication */
- INIT_DELAYED_WORK(&req->sq->auth_expired_work,
- nvmet_auth_expired_work);
- req->sq->authenticated = false;
- req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
- result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
- req->cqe->result.u32 = cpu_to_le32(result);
+ INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
+ sq->authenticated = false;
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
}
static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
@@ -177,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
return 0;
}
-static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+static u16 nvmet_auth_failure2(void *d)
{
struct nvmf_auth_dhchap_failure_data *data = d;
@@ -229,10 +224,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
}
status = nvmet_copy_from_sgl(req, 0, d, tl);
- if (status) {
- kfree(d);
- goto done;
- }
+ if (status)
+ goto done_kfree;
data = d;
pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
@@ -310,7 +303,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
goto done_kfree;
break;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
- status = nvmet_auth_failure2(req, d);
+ status = nvmet_auth_failure2(d);
if (status) {
pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
ctrl->cntlid, req->sq->qid, status);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f91a56180d3d..43b5bd8bb6a5 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -198,6 +198,12 @@ err:
return ret;
}
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+{
+ return (u32)ctrl->cntlid |
+ (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+}
+
static void nvmet_execute_admin_connect(struct nvmet_req *req)
{
struct nvmf_connect_command *c = &req->cmd->connect;
@@ -269,10 +275,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
- if (nvmet_has_auth(ctrl))
- nvmet_init_auth(ctrl, req);
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
@@ -328,14 +331,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out_ctrl_put;
- /* pass back cntlid for successful completion */
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
- if (nvmet_has_auth(ctrl))
- nvmet_init_auth(ctrl, req);
-
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 2dc1c1035626..c2d6cea0236b 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -12,11 +12,9 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
- const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of logical blocks per physical block. */
- const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */
- const __le16 lpp0b = to0based(lpp);
+ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
+ bdev_logical_block_size(bdev));
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -42,11 +40,12 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
- id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
+ id->npdg = to0based(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
/* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
- id->nows = to0based(ql->io_opt / ql->logical_block_size);
+ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -334,6 +333,11 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!bdev_write_cache(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+ return;
+ }
+
if (!nvmet_check_transfer_len(req, 0))
return;
@@ -347,6 +351,9 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
+ if (!bdev_write_cache(req->ns->bdev))
+ return 0;
+
if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9750a7fca268..b45fe3adf015 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -204,7 +204,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_loop_ctrl *ctrl = set->driver_data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &ctrl->ctrl;
@@ -218,7 +218,7 @@ static struct lock_class_key loop_hctx_fq_lock_key;
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -238,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex);
- if (nctrl->tagset) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (nctrl->tagset)
+ nvme_remove_io_tag_set(nctrl);
kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error)
return error;
ctrl->ctrl.queue_count = 1;
- error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error)
goto out_free_sq;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/* reset stopped state for the fresh admin queue */
clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_queue:
+out_cleanup_tagset:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error;
@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
return ret;
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_loop_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)
goto out_destroy_queues;
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tagset;
-
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_connect_q:
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl);
return ret;
@@ -601,7 +556,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = -ENOMEM;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
@@ -621,6 +575,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
if (opts->nr_io_queues) {
ret = nvme_loop_create_io_queues(ctrl);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ffeeb0a1c49..dfe3894205aa 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -704,7 +704,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
@@ -726,8 +726,9 @@ static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
return 0;
}
-static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
- struct nvmet_req *req) {};
+static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+}
static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
static inline bool nvmet_check_auth_status(struct nvmet_req *req)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 6f39a29828b1..79af5140af8b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -215,9 +215,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ u32 effects;
int status;
- status = nvme_execute_passthru_rq(rq);
+ status = nvme_execute_passthru_rq(rq, &effects);
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
@@ -238,16 +240,20 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
blk_mq_free_request(rq);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, req->cmd, status);
}
-static void nvmet_passthru_req_done(struct request *rq,
- blk_status_t blk_status)
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
{
struct nvmet_req *req = rq->end_io_data;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
blk_mq_free_request(rq);
+ return RQ_END_IO_NONE;
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index dc3b4dc8fe08..6c1476e086ef 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -77,9 +77,8 @@ struct nvmet_tcp_cmd {
u32 pdu_len;
u32 pdu_recv;
int sg_idx;
- int nr_mapped;
struct msghdr recv_msg;
- struct kvec *iov;
+ struct bio_vec *iov;
u32 flags;
struct list_head entry;
@@ -165,9 +164,7 @@ static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -301,35 +298,21 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
- WARN_ON(unlikely(cmd->nr_mapped > 0));
-
kfree(cmd->iov);
sgl_free(cmd->req.sg);
cmd->iov = NULL;
cmd->req.sg = NULL;
}
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
-{
- struct scatterlist *sg;
- int i;
-
- sg = &cmd->req.sg[cmd->sg_idx];
-
- for (i = 0; i < cmd->nr_mapped; i++)
- kunmap(sg_page(&sg[i]));
-
- cmd->nr_mapped = 0;
-}
-
-static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
- struct kvec *iov = cmd->iov;
+ struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
u32 length, offset, sg_offset;
+ int nr_pages;
length = cmd->pdu_len;
- cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
+ nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
@@ -338,8 +321,9 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
- iov->iov_len = iov_len;
+ iov->bv_page = sg_page(sg);
+ iov->bv_len = sg->length;
+ iov->bv_offset = sg->offset + sg_offset;
length -= iov_len;
sg = sg_next(sg);
@@ -347,8 +331,8 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
sg_offset = 0;
}
- iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
- cmd->nr_mapped, cmd->pdu_len);
+ iov_iter_bvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
+ nr_pages, cmd->pdu_len);
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
@@ -926,7 +910,7 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
}
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
cmd->flags |= NVMET_TCP_F_INIT_FAILED;
}
@@ -935,10 +919,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- if (likely(queue->nr_cmds))
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd = &queue->cmds[data->ttag];
- else
+ } else {
cmd = &queue->connect;
+ }
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
@@ -952,7 +943,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len = le32_to_cpu(data->data_length);
cmd->pdu_recv = 0;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
@@ -976,6 +967,13 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
return nvmet_tcp_handle_icreq(queue);
}
+ if (unlikely(hdr->type == nvme_tcp_icreq)) {
+ pr_err("queue %d: received icreq pdu in state %d\n",
+ queue->idx, queue->state);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
if (hdr->type == nvme_tcp_h2c_data) {
ret = nvmet_tcp_handle_h2c_data_pdu(queue);
if (unlikely(ret))
@@ -1021,7 +1019,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (nvmet_tcp_need_data_in(queue->cmd)) {
if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(queue->cmd);
+ nvmet_tcp_build_pdu_iovec(queue->cmd);
return 0;
}
/* send back R2T */
@@ -1141,7 +1139,6 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
- nvmet_tcp_unmap_pdu_iovec(cmd);
if (queue->data_digest) {
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
@@ -1179,7 +1176,8 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
queue->idx, cmd->req.cmd->common.command_id,
queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
le32_to_cpu(cmd->exp_ddgst));
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
+ nvmet_tcp_free_cmd_buffers(cmd);
nvmet_tcp_fatal_error(queue);
ret = -EPROTO;
goto out;
@@ -1408,13 +1406,6 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
write_unlock_bh(&sock->sk->sk_callback_lock);
}
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
-{
- nvmet_req_uninit(&cmd->req);
- nvmet_tcp_unmap_pdu_iovec(cmd);
- nvmet_tcp_free_cmd_buffers(cmd);
-}
-
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmds;
@@ -1423,17 +1414,28 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
nvmet_req_uninit(&cmd->req);
-
- nvmet_tcp_unmap_pdu_iovec(cmd);
- nvmet_tcp_free_cmd_buffers(cmd);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
/* failed in connect */
- nvmet_tcp_finish_cmd(&queue->connect);
+ nvmet_req_uninit(&queue->connect.req);
}
}
+static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_tcp_free_cmd_buffers(cmd);
+ }
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
+}
+
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
struct page *page;
@@ -1452,6 +1454,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
+ nvmet_tcp_free_cmd_data_in_buffers(queue);
sock_release(queue->sock);
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
@@ -1506,6 +1509,9 @@ static void nvmet_tcp_state_change(struct sock *sk)
goto done;
switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
case TCP_FIN_WAIT1:
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index c7ef69f29fe4..1254cf57e008 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
struct nvme_id_ns_zns *id_zns;
u64 zsze;
u16 status;
+ u32 mar, mor;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
+
+ mor = bdev_max_open_zones(req->ns->bdev);
+ if (!mor)
+ mor = U32_MAX;
+ else
+ mor--;
+ id_zns->mor = cpu_to_le32(mor);
+
+ mar = bdev_max_active_zones(req->ns->bdev);
+ if (!mar)
+ mar = U32_MAX;
+ else
+ mar--;
+ id_zns->mar = cpu_to_le32(mar);
done:
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
@@ -387,7 +400,6 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{
struct block_device *bdev = req->ns->bdev;
unsigned int nr_zones = bdev_nr_zones(bdev);
- struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
sector_t sector = 0;
int ret;
@@ -396,7 +408,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
};
d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
- GFP_NOIO, q->node);
+ GFP_NOIO, bdev->bd_disk->node_id);
if (!d.zbitmap) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7fa960bd3df1..42da760e0f45 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -578,6 +578,7 @@ int of_device_compatible_match(struct device_node *device,
return score;
}
+EXPORT_SYMBOL_GPL(of_device_compatible_match);
/**
* of_machine_is_compatible - Test root of device tree for a given compatible value
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7bc92923104c..1c573e7a60bc 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index eafa8ffefbd0..f9614552db82 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2524,13 +2524,12 @@ static int unittest_i2c_dev_probe(struct i2c_client *client,
return 0;
};
-static int unittest_i2c_dev_remove(struct i2c_client *client)
+static void unittest_i2c_dev_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
- return 0;
}
static const struct i2c_device_id unittest_i2c_dev_id[] = {
@@ -2601,7 +2600,7 @@ static int unittest_i2c_mux_probe(struct i2c_client *client,
return 0;
};
-static int unittest_i2c_mux_remove(struct i2c_client *client)
+static void unittest_i2c_mux_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
@@ -2609,7 +2608,6 @@ static int unittest_i2c_mux_remove(struct i2c_client *client)
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
i2c_mux_del_adapters(muxc);
- return 0;
}
static const struct i2c_device_id unittest_i2c_mux_id[] = {
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 77d1ba3a4154..e87567dbe99f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -873,7 +873,7 @@ int dev_pm_opp_config_clks_simple(struct device *dev,
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9be007c9420f..a66386043aa6 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -268,7 +268,7 @@ static int ioc_count;
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
-* This was was copied from sba_iommu.c. Don't try to unify
+* This was copied from sba_iommu.c. Don't try to unify
* the two resource managers unless a way to have different
* allocation policies is also adjusted. We'd like to avoid
* I/O TLB thrashing by having resource allocation policy
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,11 @@ static int __init ccio_probe(struct parisc_device *dev)
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ iounmap(ioc->ioc_regs);
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 3a8c98615634..bdef7a8d6ab8 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -221,16 +221,7 @@ static size_t irt_num_entry;
static struct irt_entry *iosapic_alloc_irt(int num_entries)
{
- unsigned long a;
-
- /* The IRT needs to be 8-byte aligned for the PDC call.
- * Normally kmalloc would guarantee larger alignment, but
- * if CONFIG_DEBUG_SLAB is enabled, then we can get only
- * 4-byte alignment on 32-bit kernels
- */
- a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
- a = (a + 7UL) & ~7UL;
- return (struct irt_entry *)a;
+ return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL);
}
/**
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 1e4a5663d011..d4be9d2ee74d 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -646,7 +646,7 @@ int lcd_print( const char *str )
cancel_delayed_work_sync(&led_task);
/* copy display string to buffer for procfs */
- strlcpy(lcd_text, str, sizeof(lcd_text));
+ strscpy(lcd_text, str, sizeof(lcd_text));
/* Set LCD Cursor to 1st character */
gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 374b9199878d..ecd870087a3d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -28,6 +28,12 @@
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
+/*
+ * The semantics of 64 register access on 32bit systems can't be guaranteed
+ * by the C standard, we hope the _lo_hi() macros defining readq and writeq
+ * here will behave as expected.
+ */
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/byteorder.h>
#include <asm/io.h>
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index d1c5fcf00a8a..bfd9bac37e24 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -274,7 +274,7 @@ config VMD
config PCIE_BRCMSTB
tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || \
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index bf495bf0f48a..1525023e49b6 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -240,10 +240,6 @@ config PCMCIA_PROBE
bool
default y if ISA && !ARCH_SA1100 && !PARISC
-config PCMCIA_VRC4171
- tristate "NEC VRC4171 Card Controllers support"
- depends on CPU_VR41XX && ISA && PCMCIA
-
config OMAP_CF
tristate "OMAP CompactFlash Controller"
depends on PCMCIA
@@ -252,15 +248,6 @@ config OMAP_CF
Say Y here to support the CompactFlash controller on OMAP.
Note that this doesn't support "True IDE" mode.
-config AT91_CF
- tristate "AT91 CompactFlash Controller"
- depends on PCI
- depends on OF
- depends on PCMCIA && ARCH_AT91
- help
- Say Y here to support the CompactFlash controller on AT91 chips.
- Or choose M to compile the driver as a module named "at91_cf".
-
config ELECTRA_CF
tristate "Electra CompactFlash Controller"
depends on PCMCIA && PPC_PASEMI
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index c59ddde42007..b3a2accf47af 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -29,9 +29,7 @@ obj-$(CONFIG_PCMCIA_SA11XX_BASE) += sa11xx_base.o
obj-$(CONFIG_PCMCIA_SA1100) += sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa1111_cs.o
obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
-obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
obj-$(CONFIG_OMAP_CF) += omap_cf.o
-obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
obj-$(CONFIG_PCMCIA_MAX1600) += max1600.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
deleted file mode 100644
index 92df2c2c5d07..000000000000
--- a/drivers/pcmcia/at91_cf.c
+++ /dev/null
@@ -1,407 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * at91_cf.c -- AT91 CompactFlash controller driver
- *
- * Copyright (C) 2005 David Brownell
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/sizes.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/atmel-mc.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/pci.h>
-#include <linux/regmap.h>
-
-#include <pcmcia/ss.h>
-
-/*
- * A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW;
- * some other bit in {A24,A22..A11} is nREG to flag memory access
- * (vs attributes). So more than 2KB/region would just be waste.
- * Note: These are offsets from the physical base address.
- */
-#define CF_ATTR_PHYS (0)
-#define CF_IO_PHYS (1 << 23)
-#define CF_MEM_PHYS (0x017ff800)
-
-struct at91_cf_data {
- int irq_pin; /* I/O IRQ */
- int det_pin; /* Card detect */
- int vcc_pin; /* power switching */
- int rst_pin; /* card reset */
- u8 chipselect; /* EBI Chip Select number */
- u8 flags;
-#define AT91_CF_TRUE_IDE 0x01
-#define AT91_IDE_SWAP_A0_A2 0x02
-};
-
-struct regmap *mc;
-
-/*--------------------------------------------------------------------------*/
-
-struct at91_cf_socket {
- struct pcmcia_socket socket;
-
- unsigned present:1;
-
- struct platform_device *pdev;
- struct at91_cf_data *board;
-
- unsigned long phys_baseaddr;
-};
-
-static inline int at91_cf_present(struct at91_cf_socket *cf)
-{
- return !gpio_get_value(cf->board->det_pin);
-}
-
-/*--------------------------------------------------------------------------*/
-
-static int at91_cf_ss_init(struct pcmcia_socket *s)
-{
- return 0;
-}
-
-static irqreturn_t at91_cf_irq(int irq, void *_cf)
-{
- struct at91_cf_socket *cf = _cf;
-
- if (irq == gpio_to_irq(cf->board->det_pin)) {
- unsigned present = at91_cf_present(cf);
-
- /* kick pccard as needed */
- if (present != cf->present) {
- cf->present = present;
- dev_dbg(&cf->pdev->dev, "card %s\n",
- present ? "present" : "gone");
- pcmcia_parse_events(&cf->socket, SS_DETECT);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
-{
- struct at91_cf_socket *cf;
-
- if (!sp)
- return -EINVAL;
-
- cf = container_of(s, struct at91_cf_socket, socket);
-
- /* NOTE: CF is always 3VCARD */
- if (at91_cf_present(cf)) {
- int rdy = gpio_is_valid(cf->board->irq_pin); /* RDY/nIRQ */
- int vcc = gpio_is_valid(cf->board->vcc_pin);
-
- *sp = SS_DETECT | SS_3VCARD;
- if (!rdy || gpio_get_value(cf->board->irq_pin))
- *sp |= SS_READY;
- if (!vcc || gpio_get_value(cf->board->vcc_pin))
- *sp |= SS_POWERON;
- } else
- *sp = 0;
-
- return 0;
-}
-
-static int
-at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
-{
- struct at91_cf_socket *cf;
-
- cf = container_of(sock, struct at91_cf_socket, socket);
-
- /* switch Vcc if needed and possible */
- if (gpio_is_valid(cf->board->vcc_pin)) {
- switch (s->Vcc) {
- case 0:
- gpio_set_value(cf->board->vcc_pin, 0);
- break;
- case 33:
- gpio_set_value(cf->board->vcc_pin, 1);
- break;
- default:
- return -EINVAL;
- }
- }
-
- /* toggle reset if needed */
- gpio_set_value(cf->board->rst_pin, s->flags & SS_RESET);
-
- dev_dbg(&cf->pdev->dev, "Vcc %d, io_irq %d, flags %04x csc %04x\n",
- s->Vcc, s->io_irq, s->flags, s->csc_mask);
-
- return 0;
-}
-
-static int at91_cf_ss_suspend(struct pcmcia_socket *s)
-{
- return at91_cf_set_socket(s, &dead_socket);
-}
-
-/* we already mapped the I/O region */
-static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
-{
- struct at91_cf_socket *cf;
- u32 csr;
-
- cf = container_of(s, struct at91_cf_socket, socket);
- io->flags &= (MAP_ACTIVE | MAP_16BIT | MAP_AUTOSZ);
-
- /*
- * Use 16 bit accesses unless/until we need 8-bit i/o space.
- *
- * NOTE: this CF controller ignores IOIS16, so we can't really do
- * MAP_AUTOSZ. The 16bit mode allows single byte access on either
- * D0-D7 (even addr) or D8-D15 (odd), so it's close enough for many
- * purposes (and handles ide-cs).
- *
- * The 8bit mode is needed for odd byte access on D0-D7. It seems
- * some cards only like that way to get at the odd byte, despite
- * CF 3.0 spec table 35 also giving the D8-D15 option.
- */
- if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) {
- csr = AT91_MC_SMC_DBW_8;
- dev_dbg(&cf->pdev->dev, "8bit i/o bus\n");
- } else {
- csr = AT91_MC_SMC_DBW_16;
- dev_dbg(&cf->pdev->dev, "16bit i/o bus\n");
- }
- regmap_update_bits(mc, AT91_MC_SMC_CSR(cf->board->chipselect),
- AT91_MC_SMC_DBW, csr);
-
- io->start = cf->socket.io_offset;
- io->stop = io->start + SZ_2K - 1;
-
- return 0;
-}
-
-/* pcmcia layer maps/unmaps mem regions */
-static int
-at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
-{
- struct at91_cf_socket *cf;
-
- if (map->card_start)
- return -EINVAL;
-
- cf = container_of(s, struct at91_cf_socket, socket);
-
- map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT);
- if (map->flags & MAP_ATTRIB)
- map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS;
- else
- map->static_start = cf->phys_baseaddr + CF_MEM_PHYS;
-
- return 0;
-}
-
-static struct pccard_operations at91_cf_ops = {
- .init = at91_cf_ss_init,
- .suspend = at91_cf_ss_suspend,
- .get_status = at91_cf_get_status,
- .set_socket = at91_cf_set_socket,
- .set_io_map = at91_cf_set_io_map,
- .set_mem_map = at91_cf_set_mem_map,
-};
-
-/*--------------------------------------------------------------------------*/
-
-static const struct of_device_id at91_cf_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-cf" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91_cf_dt_ids);
-
-static int at91_cf_probe(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf;
- struct at91_cf_data *board;
- struct resource *io;
- struct resource realio;
- int status;
-
- board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
- if (!board)
- return -ENOMEM;
-
- board->irq_pin = of_get_gpio(pdev->dev.of_node, 0);
- board->det_pin = of_get_gpio(pdev->dev.of_node, 1);
- board->vcc_pin = of_get_gpio(pdev->dev.of_node, 2);
- board->rst_pin = of_get_gpio(pdev->dev.of_node, 3);
-
- mc = syscon_regmap_lookup_by_compatible("atmel,at91rm9200-sdramc");
- if (IS_ERR(mc))
- return PTR_ERR(mc);
-
- if (!gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
- return -ENODEV;
-
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -ENODEV;
-
- cf = devm_kzalloc(&pdev->dev, sizeof(*cf), GFP_KERNEL);
- if (!cf)
- return -ENOMEM;
-
- cf->board = board;
- cf->pdev = pdev;
- cf->phys_baseaddr = io->start;
- platform_set_drvdata(pdev, cf);
-
- /* must be a GPIO; ergo must trigger on both edges */
- status = devm_gpio_request(&pdev->dev, board->det_pin, "cf_det");
- if (status < 0)
- return status;
-
- status = devm_request_irq(&pdev->dev, gpio_to_irq(board->det_pin),
- at91_cf_irq, 0, "at91_cf detect", cf);
- if (status < 0)
- return status;
-
- device_init_wakeup(&pdev->dev, 1);
-
- status = devm_gpio_request(&pdev->dev, board->rst_pin, "cf_rst");
- if (status < 0)
- goto fail0a;
-
- if (gpio_is_valid(board->vcc_pin)) {
- status = devm_gpio_request(&pdev->dev, board->vcc_pin, "cf_vcc");
- if (status < 0)
- goto fail0a;
- }
-
- /*
- * The card driver will request this irq later as needed.
- * but it causes lots of "irqNN: nobody cared" messages
- * unless we report that we handle everything (sigh).
- * (Note: DK board doesn't wire the IRQ pin...)
- */
- if (gpio_is_valid(board->irq_pin)) {
- status = devm_gpio_request(&pdev->dev, board->irq_pin, "cf_irq");
- if (status < 0)
- goto fail0a;
-
- status = devm_request_irq(&pdev->dev, gpio_to_irq(board->irq_pin),
- at91_cf_irq, IRQF_SHARED, "at91_cf", cf);
- if (status < 0)
- goto fail0a;
- cf->socket.pci_irq = gpio_to_irq(board->irq_pin);
- } else
- cf->socket.pci_irq = nr_irqs + 1;
-
- /*
- * pcmcia layer only remaps "real" memory not iospace
- * io_offset is set to 0x10000 to avoid the check in static_find_io().
- * */
- cf->socket.io_offset = 0x10000;
- realio.start = cf->socket.io_offset;
- realio.end = realio.start + SZ_64K - 1;
- status = pci_remap_iospace(&realio, cf->phys_baseaddr + CF_IO_PHYS);
- if (status)
- goto fail0a;
-
- /* reserve chip-select regions */
- if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), "at91_cf")) {
- status = -ENXIO;
- goto fail0a;
- }
-
- dev_info(&pdev->dev, "irqs det #%d, io #%d\n",
- gpio_to_irq(board->det_pin), gpio_to_irq(board->irq_pin));
-
- cf->socket.owner = THIS_MODULE;
- cf->socket.dev.parent = &pdev->dev;
- cf->socket.ops = &at91_cf_ops;
- cf->socket.resource_ops = &pccard_static_ops;
- cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
- | SS_CAP_MEM_ALIGN;
- cf->socket.map_size = SZ_2K;
- cf->socket.io[0].res = io;
-
- status = pcmcia_register_socket(&cf->socket);
- if (status < 0)
- goto fail0a;
-
- return 0;
-
-fail0a:
- device_init_wakeup(&pdev->dev, 0);
- return status;
-}
-
-static int at91_cf_remove(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
-
- pcmcia_unregister_socket(&cf->socket);
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
- struct at91_cf_data *board = cf->board;
-
- if (device_may_wakeup(&pdev->dev)) {
- enable_irq_wake(gpio_to_irq(board->det_pin));
- if (gpio_is_valid(board->irq_pin))
- enable_irq_wake(gpio_to_irq(board->irq_pin));
- }
- return 0;
-}
-
-static int at91_cf_resume(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
- struct at91_cf_data *board = cf->board;
-
- if (device_may_wakeup(&pdev->dev)) {
- disable_irq_wake(gpio_to_irq(board->det_pin));
- if (gpio_is_valid(board->irq_pin))
- disable_irq_wake(gpio_to_irq(board->irq_pin));
- }
-
- return 0;
-}
-
-#else
-#define at91_cf_suspend NULL
-#define at91_cf_resume NULL
-#endif
-
-static struct platform_driver at91_cf_driver = {
- .driver = {
- .name = "at91_cf",
- .of_match_table = at91_cf_dt_ids,
- },
- .probe = at91_cf_probe,
- .remove = at91_cf_remove,
- .suspend = at91_cf_suspend,
- .resume = at91_cf_resume,
-};
-
-module_platform_driver(at91_cf_driver);
-
-MODULE_DESCRIPTION("AT91 Compact Flash Driver");
-MODULE_AUTHOR("David Brownell");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:at91_cf");
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 192c9049d654..a335748bdef5 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -661,12 +661,12 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket,
return 0;
}
-static int i82092aa_module_init(void)
+static int __init i82092aa_module_init(void)
{
return pci_register_driver(&i82092aa_pci_driver);
}
-static void i82092aa_module_exit(void)
+static void __exit i82092aa_module_exit(void)
{
pci_unregister_driver(&i82092aa_pci_driver);
if (sockets[0].io_base > 0)
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 1972a8f6fa8e..d3f827d4224a 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -124,8 +124,6 @@ static int omap_cf_get_status(struct pcmcia_socket *s, u_int *sp)
static int
omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
{
- u16 control;
-
/* REVISIT some non-OSK boards may support power switching */
switch (s->Vcc) {
case 0:
@@ -135,7 +133,7 @@ omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
return -EINVAL;
}
- control = omap_readw(CF_CONTROL);
+ omap_readw(CF_CONTROL);
if (s->flags & SS_RESET)
omap_writew(CF_CONTROL_RESET, CF_CONTROL);
else
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 47b060c57418..c2b6e828c2c6 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -125,7 +125,7 @@ static int sa11x0_drv_pcmcia_legacy_probe(struct platform_device *dev)
return ret;
}
-static int sa11x0_drv_pcmcia_legacy_remove(struct platform_device *dev)
+static void sa11x0_drv_pcmcia_legacy_remove(struct platform_device *dev)
{
struct skt_dev_info *sinfo = platform_get_drvdata(dev);
int i;
@@ -134,8 +134,6 @@ static int sa11x0_drv_pcmcia_legacy_remove(struct platform_device *dev)
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
-
- return 0;
}
static int sa11x0_drv_pcmcia_probe(struct platform_device *pdev)
@@ -167,8 +165,10 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
{
struct soc_pcmcia_socket *skt;
- if (dev->id == -1)
- return sa11x0_drv_pcmcia_legacy_remove(dev);
+ if (dev->id == -1) {
+ sa11x0_drv_pcmcia_legacy_remove(dev);
+ return 0;
+ }
skt = platform_get_drvdata(dev);
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
deleted file mode 100644
index 177d77892144..000000000000
--- a/drivers/pcmcia/vrc4171_card.c
+++ /dev/null
@@ -1,745 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services.
- *
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-
-#include <pcmcia/ss.h>
-
-#include "i82365.h"
-
-MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_LICENSE("GPL");
-
-#define CARD_MAX_SLOTS 2
-#define CARD_SLOTA 0
-#define CARD_SLOTB 1
-#define CARD_SLOTB_OFFSET 0x40
-
-#define CARD_MEM_START 0x10000000
-#define CARD_MEM_END 0x13ffffff
-#define CARD_MAX_MEM_OFFSET 0x3ffffff
-#define CARD_MAX_MEM_SPEED 1000
-
-#define CARD_CONTROLLER_INDEX 0x03e0
-#define CARD_CONTROLLER_DATA 0x03e1
- /* Power register */
- #define VPP_GET_VCC 0x01
- #define POWER_ENABLE 0x10
- #define CARD_VOLTAGE_SENSE 0x1f
- #define VCC_3VORXV_CAPABLE 0x00
- #define VCC_XV_ONLY 0x01
- #define VCC_3V_CAPABLE 0x02
- #define VCC_5V_ONLY 0x03
- #define CARD_VOLTAGE_SELECT 0x2f
- #define VCC_3V 0x01
- #define VCC_5V 0x00
- #define VCC_XV 0x02
- #define VCC_STATUS_3V 0x02
- #define VCC_STATUS_5V 0x01
- #define VCC_STATUS_XV 0x03
- #define GLOBAL_CONTROL 0x1e
- #define EXWRBK 0x04
- #define IRQPM_EN 0x08
- #define CLRPMIRQ 0x10
-
-#define INTERRUPT_STATUS 0x05fa
- #define IRQ_A 0x02
- #define IRQ_B 0x04
-
-#define CONFIGURATION1 0x05fe
- #define SLOTB_CONFIG 0xc000
- #define SLOTB_NONE 0x0000
- #define SLOTB_PCCARD 0x4000
- #define SLOTB_CF 0x8000
- #define SLOTB_FLASHROM 0xc000
-
-#define CARD_CONTROLLER_START CARD_CONTROLLER_INDEX
-#define CARD_CONTROLLER_END CARD_CONTROLLER_DATA
-
-#define IO_MAX_MAPS 2
-#define MEM_MAX_MAPS 5
-
-enum vrc4171_slot {
- SLOT_PROBE = 0,
- SLOT_NOPROBE_IO,
- SLOT_NOPROBE_MEM,
- SLOT_NOPROBE_ALL,
- SLOT_INITIALIZED,
-};
-
-enum vrc4171_slotb {
- SLOTB_IS_NONE,
- SLOTB_IS_PCCARD,
- SLOTB_IS_CF,
- SLOTB_IS_FLASHROM,
-};
-
-struct vrc4171_socket {
- enum vrc4171_slot slot;
- struct pcmcia_socket pcmcia_socket;
- char name[24];
- int csc_irq;
- int io_irq;
- spinlock_t lock;
-};
-
-static struct vrc4171_socket vrc4171_sockets[CARD_MAX_SLOTS];
-static enum vrc4171_slotb vrc4171_slotb = SLOTB_IS_NONE;
-static char vrc4171_card_name[] = "NEC VRC4171 Card Controller";
-static unsigned int vrc4171_irq;
-static uint16_t vrc4171_irq_mask = 0xdeb8;
-
-static struct resource vrc4171_card_resource[3] = {
- { .name = vrc4171_card_name,
- .start = CARD_CONTROLLER_START,
- .end = CARD_CONTROLLER_END,
- .flags = IORESOURCE_IO, },
- { .name = vrc4171_card_name,
- .start = INTERRUPT_STATUS,
- .end = INTERRUPT_STATUS,
- .flags = IORESOURCE_IO, },
- { .name = vrc4171_card_name,
- .start = CONFIGURATION1,
- .end = CONFIGURATION1,
- .flags = IORESOURCE_IO, },
-};
-
-static struct platform_device vrc4171_card_device = {
- .name = vrc4171_card_name,
- .id = 0,
- .num_resources = 3,
- .resource = vrc4171_card_resource,
-};
-
-static inline uint16_t vrc4171_get_irq_status(void)
-{
- return inw(INTERRUPT_STATUS);
-}
-
-static inline void vrc4171_set_multifunction_pin(enum vrc4171_slotb config)
-{
- uint16_t config1;
-
- config1 = inw(CONFIGURATION1);
- config1 &= ~SLOTB_CONFIG;
-
- switch (config) {
- case SLOTB_IS_NONE:
- config1 |= SLOTB_NONE;
- break;
- case SLOTB_IS_PCCARD:
- config1 |= SLOTB_PCCARD;
- break;
- case SLOTB_IS_CF:
- config1 |= SLOTB_CF;
- break;
- case SLOTB_IS_FLASHROM:
- config1 |= SLOTB_FLASHROM;
- break;
- default:
- break;
- }
-
- outw(config1, CONFIGURATION1);
-}
-
-static inline uint8_t exca_read_byte(int slot, uint8_t index)
-{
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index, CARD_CONTROLLER_INDEX);
- return inb(CARD_CONTROLLER_DATA);
-}
-
-static inline uint16_t exca_read_word(int slot, uint8_t index)
-{
- uint16_t data;
-
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index++, CARD_CONTROLLER_INDEX);
- data = inb(CARD_CONTROLLER_DATA);
-
- outb(index, CARD_CONTROLLER_INDEX);
- data |= ((uint16_t)inb(CARD_CONTROLLER_DATA)) << 8;
-
- return data;
-}
-
-static inline uint8_t exca_write_byte(int slot, uint8_t index, uint8_t data)
-{
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index, CARD_CONTROLLER_INDEX);
- outb(data, CARD_CONTROLLER_DATA);
-
- return data;
-}
-
-static inline uint16_t exca_write_word(int slot, uint8_t index, uint16_t data)
-{
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index++, CARD_CONTROLLER_INDEX);
- outb(data, CARD_CONTROLLER_DATA);
-
- outb(index, CARD_CONTROLLER_INDEX);
- outb((uint8_t)(data >> 8), CARD_CONTROLLER_DATA);
-
- return data;
-}
-
-static inline int search_nonuse_irq(void)
-{
- int i;
-
- for (i = 0; i < 16; i++) {
- if (vrc4171_irq_mask & (1 << i)) {
- vrc4171_irq_mask &= ~(1 << i);
- return i;
- }
- }
-
- return -1;
-}
-
-static int pccard_init(struct pcmcia_socket *sock)
-{
- struct vrc4171_socket *socket;
- unsigned int slot;
-
- sock->features |= SS_CAP_PCCARD | SS_CAP_PAGE_REGS;
- sock->irq_mask = 0;
- sock->map_size = 0x1000;
- sock->pci_irq = vrc4171_irq;
-
- slot = sock->sock;
- socket = &vrc4171_sockets[slot];
- socket->csc_irq = search_nonuse_irq();
- socket->io_irq = search_nonuse_irq();
- spin_lock_init(&socket->lock);
-
- return 0;
-}
-
-static int pccard_get_status(struct pcmcia_socket *sock, u_int *value)
-{
- unsigned int slot;
- uint8_t status, sense;
- u_int val = 0;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS || value == NULL)
- return -EINVAL;
-
- slot = sock->sock;
-
- status = exca_read_byte(slot, I365_STATUS);
- if (exca_read_byte(slot, I365_INTCTL) & I365_PC_IOCARD) {
- if (status & I365_CS_STSCHG)
- val |= SS_STSCHG;
- } else {
- if (!(status & I365_CS_BVD1))
- val |= SS_BATDEAD;
- else if ((status & (I365_CS_BVD1 | I365_CS_BVD2)) == I365_CS_BVD1)
- val |= SS_BATWARN;
- }
- if ((status & I365_CS_DETECT) == I365_CS_DETECT)
- val |= SS_DETECT;
- if (status & I365_CS_WRPROT)
- val |= SS_WRPROT;
- if (status & I365_CS_READY)
- val |= SS_READY;
- if (status & I365_CS_POWERON)
- val |= SS_POWERON;
-
- sense = exca_read_byte(slot, CARD_VOLTAGE_SENSE);
- switch (sense) {
- case VCC_3VORXV_CAPABLE:
- val |= SS_3VCARD | SS_XVCARD;
- break;
- case VCC_XV_ONLY:
- val |= SS_XVCARD;
- break;
- case VCC_3V_CAPABLE:
- val |= SS_3VCARD;
- break;
- default:
- /* 5V only */
- break;
- }
-
- *value = val;
-
- return 0;
-}
-
-static inline uint8_t set_Vcc_value(u_char Vcc)
-{
- switch (Vcc) {
- case 33:
- return VCC_3V;
- case 50:
- return VCC_5V;
- }
-
- /* Small voltage is chosen for safety. */
- return VCC_3V;
-}
-
-static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
-{
- struct vrc4171_socket *socket;
- unsigned int slot;
- uint8_t voltage, power, control, cscint;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS ||
- (state->Vpp != state->Vcc && state->Vpp != 0) ||
- (state->Vcc != 50 && state->Vcc != 33 && state->Vcc != 0))
- return -EINVAL;
-
- slot = sock->sock;
- socket = &vrc4171_sockets[slot];
-
- spin_lock_irq(&socket->lock);
-
- voltage = set_Vcc_value(state->Vcc);
- exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage);
-
- power = POWER_ENABLE;
- if (state->Vpp == state->Vcc)
- power |= VPP_GET_VCC;
- if (state->flags & SS_OUTPUT_ENA)
- power |= I365_PWR_OUT;
- exca_write_byte(slot, I365_POWER, power);
-
- control = 0;
- if (state->io_irq != 0)
- control |= socket->io_irq;
- if (state->flags & SS_IOCARD)
- control |= I365_PC_IOCARD;
- if (state->flags & SS_RESET)
- control &= ~I365_PC_RESET;
- else
- control |= I365_PC_RESET;
- exca_write_byte(slot, I365_INTCTL, control);
-
- cscint = 0;
- exca_write_byte(slot, I365_CSCINT, cscint);
- exca_read_byte(slot, I365_CSC); /* clear CardStatus change */
- if (state->csc_mask != 0)
- cscint |= socket->csc_irq << 8;
- if (state->flags & SS_IOCARD) {
- if (state->csc_mask & SS_STSCHG)
- cscint |= I365_CSC_STSCHG;
- } else {
- if (state->csc_mask & SS_BATDEAD)
- cscint |= I365_CSC_BVD1;
- if (state->csc_mask & SS_BATWARN)
- cscint |= I365_CSC_BVD2;
- }
- if (state->csc_mask & SS_READY)
- cscint |= I365_CSC_READY;
- if (state->csc_mask & SS_DETECT)
- cscint |= I365_CSC_DETECT;
- exca_write_byte(slot, I365_CSCINT, cscint);
-
- spin_unlock_irq(&socket->lock);
-
- return 0;
-}
-
-static int pccard_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
-{
- unsigned int slot;
- uint8_t ioctl, addrwin;
- u_char map;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS ||
- io == NULL || io->map >= IO_MAX_MAPS ||
- io->start > 0xffff || io->stop > 0xffff || io->start > io->stop)
- return -EINVAL;
-
- slot = sock->sock;
- map = io->map;
-
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- if (addrwin & I365_ENA_IO(map)) {
- addrwin &= ~I365_ENA_IO(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- exca_write_word(slot, I365_IO(map)+I365_W_START, io->start);
- exca_write_word(slot, I365_IO(map)+I365_W_STOP, io->stop);
-
- ioctl = 0;
- if (io->speed > 0)
- ioctl |= I365_IOCTL_WAIT(map);
- if (io->flags & MAP_16BIT)
- ioctl |= I365_IOCTL_16BIT(map);
- if (io->flags & MAP_AUTOSZ)
- ioctl |= I365_IOCTL_IOCS16(map);
- if (io->flags & MAP_0WS)
- ioctl |= I365_IOCTL_0WS(map);
- exca_write_byte(slot, I365_IOCTL, ioctl);
-
- if (io->flags & MAP_ACTIVE) {
- addrwin |= I365_ENA_IO(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- return 0;
-}
-
-static int pccard_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
-{
- unsigned int slot;
- uint16_t start, stop, offset;
- uint8_t addrwin;
- u_char map;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS ||
- mem == NULL || mem->map >= MEM_MAX_MAPS ||
- mem->res->start < CARD_MEM_START || mem->res->start > CARD_MEM_END ||
- mem->res->end < CARD_MEM_START || mem->res->end > CARD_MEM_END ||
- mem->res->start > mem->res->end ||
- mem->card_start > CARD_MAX_MEM_OFFSET ||
- mem->speed > CARD_MAX_MEM_SPEED)
- return -EINVAL;
-
- slot = sock->sock;
- map = mem->map;
-
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- if (addrwin & I365_ENA_MEM(map)) {
- addrwin &= ~I365_ENA_MEM(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- start = (mem->res->start >> 12) & 0x3fff;
- if (mem->flags & MAP_16BIT)
- start |= I365_MEM_16BIT;
- exca_write_word(slot, I365_MEM(map)+I365_W_START, start);
-
- stop = (mem->res->end >> 12) & 0x3fff;
- switch (mem->speed) {
- case 0:
- break;
- case 1:
- stop |= I365_MEM_WS0;
- break;
- case 2:
- stop |= I365_MEM_WS1;
- break;
- default:
- stop |= I365_MEM_WS0 | I365_MEM_WS1;
- break;
- }
- exca_write_word(slot, I365_MEM(map)+I365_W_STOP, stop);
-
- offset = (mem->card_start >> 12) & 0x3fff;
- if (mem->flags & MAP_ATTRIB)
- offset |= I365_MEM_REG;
- if (mem->flags & MAP_WRPROT)
- offset |= I365_MEM_WRPROT;
- exca_write_word(slot, I365_MEM(map)+I365_W_OFF, offset);
-
- if (mem->flags & MAP_ACTIVE) {
- addrwin |= I365_ENA_MEM(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- return 0;
-}
-
-static struct pccard_operations vrc4171_pccard_operations = {
- .init = pccard_init,
- .get_status = pccard_get_status,
- .set_socket = pccard_set_socket,
- .set_io_map = pccard_set_io_map,
- .set_mem_map = pccard_set_mem_map,
-};
-
-static inline unsigned int get_events(int slot)
-{
- unsigned int events = 0;
- uint8_t status, csc;
-
- status = exca_read_byte(slot, I365_STATUS);
- csc = exca_read_byte(slot, I365_CSC);
-
- if (exca_read_byte(slot, I365_INTCTL) & I365_PC_IOCARD) {
- if ((csc & I365_CSC_STSCHG) && (status & I365_CS_STSCHG))
- events |= SS_STSCHG;
- } else {
- if (csc & (I365_CSC_BVD1 | I365_CSC_BVD2)) {
- if (!(status & I365_CS_BVD1))
- events |= SS_BATDEAD;
- else if ((status & (I365_CS_BVD1 | I365_CS_BVD2)) == I365_CS_BVD1)
- events |= SS_BATWARN;
- }
- }
- if ((csc & I365_CSC_READY) && (status & I365_CS_READY))
- events |= SS_READY;
- if ((csc & I365_CSC_DETECT) && ((status & I365_CS_DETECT) == I365_CS_DETECT))
- events |= SS_DETECT;
-
- return events;
-}
-
-static irqreturn_t pccard_interrupt(int irq, void *dev_id)
-{
- struct vrc4171_socket *socket;
- unsigned int events;
- irqreturn_t retval = IRQ_NONE;
- uint16_t status;
-
- status = vrc4171_get_irq_status();
- if (status & IRQ_A) {
- socket = &vrc4171_sockets[CARD_SLOTA];
- if (socket->slot == SLOT_INITIALIZED) {
- if (status & (1 << socket->csc_irq)) {
- events = get_events(CARD_SLOTA);
- if (events != 0) {
- pcmcia_parse_events(&socket->pcmcia_socket, events);
- retval = IRQ_HANDLED;
- }
- }
- }
- }
-
- if (status & IRQ_B) {
- socket = &vrc4171_sockets[CARD_SLOTB];
- if (socket->slot == SLOT_INITIALIZED) {
- if (status & (1 << socket->csc_irq)) {
- events = get_events(CARD_SLOTB);
- if (events != 0) {
- pcmcia_parse_events(&socket->pcmcia_socket, events);
- retval = IRQ_HANDLED;
- }
- }
- }
- }
-
- return retval;
-}
-
-static inline void reserve_using_irq(int slot)
-{
- unsigned int irq;
-
- irq = exca_read_byte(slot, I365_INTCTL);
- irq &= 0x0f;
- vrc4171_irq_mask &= ~(1 << irq);
-
- irq = exca_read_byte(slot, I365_CSCINT);
- irq = (irq & 0xf0) >> 4;
- vrc4171_irq_mask &= ~(1 << irq);
-}
-
-static int vrc4171_add_sockets(void)
-{
- struct vrc4171_socket *socket;
- int slot, retval;
-
- for (slot = 0; slot < CARD_MAX_SLOTS; slot++) {
- if (slot == CARD_SLOTB && vrc4171_slotb == SLOTB_IS_NONE)
- continue;
-
- socket = &vrc4171_sockets[slot];
- if (socket->slot != SLOT_PROBE) {
- uint8_t addrwin;
-
- switch (socket->slot) {
- case SLOT_NOPROBE_MEM:
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- addrwin &= 0x1f;
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- break;
- case SLOT_NOPROBE_IO:
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- addrwin &= 0xc0;
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- break;
- default:
- break;
- }
-
- reserve_using_irq(slot);
- continue;
- }
-
- sprintf(socket->name, "NEC VRC4171 Card Slot %1c", 'A' + slot);
- socket->pcmcia_socket.dev.parent = &vrc4171_card_device.dev;
- socket->pcmcia_socket.ops = &vrc4171_pccard_operations;
- socket->pcmcia_socket.owner = THIS_MODULE;
-
- retval = pcmcia_register_socket(&socket->pcmcia_socket);
- if (retval < 0)
- return retval;
-
- exca_write_byte(slot, I365_ADDRWIN, 0);
- exca_write_byte(slot, GLOBAL_CONTROL, 0);
-
- socket->slot = SLOT_INITIALIZED;
- }
-
- return 0;
-}
-
-static void vrc4171_remove_sockets(void)
-{
- struct vrc4171_socket *socket;
- int slot;
-
- for (slot = 0; slot < CARD_MAX_SLOTS; slot++) {
- if (slot == CARD_SLOTB && vrc4171_slotb == SLOTB_IS_NONE)
- continue;
-
- socket = &vrc4171_sockets[slot];
- if (socket->slot == SLOT_INITIALIZED)
- pcmcia_unregister_socket(&socket->pcmcia_socket);
-
- socket->slot = SLOT_PROBE;
- }
-}
-
-static int vrc4171_card_setup(char *options)
-{
- if (options == NULL || *options == '\0')
- return 1;
-
- if (strncmp(options, "irq:", 4) == 0) {
- int irq;
- options += 4;
- irq = simple_strtoul(options, &options, 0);
- if (irq >= 0 && irq < nr_irqs)
- vrc4171_irq = irq;
-
- if (*options != ',')
- return 1;
- options++;
- }
-
- if (strncmp(options, "slota:", 6) == 0) {
- options += 6;
- if (*options != '\0') {
- if (strncmp(options, "memnoprobe", 10) == 0) {
- vrc4171_sockets[CARD_SLOTA].slot = SLOT_NOPROBE_MEM;
- options += 10;
- } else if (strncmp(options, "ionoprobe", 9) == 0) {
- vrc4171_sockets[CARD_SLOTA].slot = SLOT_NOPROBE_IO;
- options += 9;
- } else if ( strncmp(options, "noprobe", 7) == 0) {
- vrc4171_sockets[CARD_SLOTA].slot = SLOT_NOPROBE_ALL;
- options += 7;
- }
-
- if (*options != ',')
- return 1;
- options++;
- } else
- return 1;
-
- }
-
- if (strncmp(options, "slotb:", 6) == 0) {
- options += 6;
- if (*options != '\0') {
- if (strncmp(options, "pccard", 6) == 0) {
- vrc4171_slotb = SLOTB_IS_PCCARD;
- options += 6;
- } else if (strncmp(options, "cf", 2) == 0) {
- vrc4171_slotb = SLOTB_IS_CF;
- options += 2;
- } else if (strncmp(options, "flashrom", 8) == 0) {
- vrc4171_slotb = SLOTB_IS_FLASHROM;
- options += 8;
- } else if (strncmp(options, "none", 4) == 0) {
- vrc4171_slotb = SLOTB_IS_NONE;
- options += 4;
- }
-
- if (*options != ',')
- return 1;
- options++;
-
- if (strncmp(options, "memnoprobe", 10) == 0)
- vrc4171_sockets[CARD_SLOTB].slot = SLOT_NOPROBE_MEM;
- if (strncmp(options, "ionoprobe", 9) == 0)
- vrc4171_sockets[CARD_SLOTB].slot = SLOT_NOPROBE_IO;
- if (strncmp(options, "noprobe", 7) == 0)
- vrc4171_sockets[CARD_SLOTB].slot = SLOT_NOPROBE_ALL;
- }
- }
-
- return 1;
-}
-
-__setup("vrc4171_card=", vrc4171_card_setup);
-
-static struct platform_driver vrc4171_card_driver = {
- .driver = {
- .name = vrc4171_card_name,
- },
-};
-
-static int vrc4171_card_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&vrc4171_card_driver);
- if (retval < 0)
- return retval;
-
- retval = platform_device_register(&vrc4171_card_device);
- if (retval < 0) {
- platform_driver_unregister(&vrc4171_card_driver);
- return retval;
- }
-
- vrc4171_set_multifunction_pin(vrc4171_slotb);
-
- retval = vrc4171_add_sockets();
- if (retval == 0)
- retval = request_irq(vrc4171_irq, pccard_interrupt, IRQF_SHARED,
- vrc4171_card_name, vrc4171_sockets);
-
- if (retval < 0) {
- vrc4171_remove_sockets();
- platform_device_unregister(&vrc4171_card_device);
- platform_driver_unregister(&vrc4171_card_driver);
- return retval;
- }
-
- printk(KERN_INFO "%s, connected to IRQ %d\n",
- vrc4171_card_driver.driver.name, vrc4171_irq);
-
- return 0;
-}
-
-static void vrc4171_card_exit(void)
-{
- free_irq(vrc4171_irq, vrc4171_sockets);
- vrc4171_remove_sockets();
- platform_device_unregister(&vrc4171_card_device);
- platform_driver_unregister(&vrc4171_card_driver);
-}
-
-module_init(vrc4171_card_init);
-module_exit(vrc4171_card_exit);
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
index 1925ddc13f00..731c5d8f75c6 100644
--- a/drivers/peci/controller/peci-aspeed.c
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -523,7 +523,7 @@ static int aspeed_peci_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (!priv->irq)
+ if (priv->irq < 0)
return priv->irq;
ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
index 68eb61c65d34..de4a7b3e5966 100644
--- a/drivers/peci/cpu.c
+++ b/drivers/peci/cpu.c
@@ -188,8 +188,6 @@ static void adev_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
- auxiliary_device_uninit(adev);
-
kfree(adev->name);
kfree(adev);
}
@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev)
struct auxiliary_device *adev = _adev;
auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
static int devm_adev_add(struct device *dev, int idx)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 1e2d69453771..44c07ea487f4 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -183,6 +183,13 @@ config APPLE_M1_CPU_PMU
Provides support for the non-architectural CPU PMUs present on
the Apple M1 SoCs and derivatives.
+config ALIBABA_UNCORE_DRW_PMU
+ tristate "Alibaba T-Head Yitian 710 DDR Sub-system Driveway PMU driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for Driveway PMU events monitoring on Yitian 710 DDR
+ Sub-system.
+
source "drivers/perf/hisilicon/Kconfig"
config MARVELL_CN10K_DDR_PMU
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 57a279c61df5..050d04ee19dd 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
+obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
new file mode 100644
index 000000000000..82729b874f09
--- /dev/null
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alibaba DDR Sub-System Driveway PMU driver
+ *
+ * Copyright (C) 2022 Alibaba Inc
+ */
+
+#define ALI_DRW_PMUNAME "ali_drw"
+#define ALI_DRW_DRVNAME ALI_DRW_PMUNAME "_pmu"
+#define pr_fmt(fmt) ALI_DRW_DRVNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/refcount.h>
+
+
+#define ALI_DRW_PMU_COMMON_MAX_COUNTERS 16
+#define ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE 19
+
+#define ALI_DRW_PMU_PA_SHIFT 12
+#define ALI_DRW_PMU_CNT_INIT 0x00000000
+#define ALI_DRW_CNT_MAX_PERIOD 0xffffffff
+#define ALI_DRW_PMU_CYCLE_EVT_ID 0x80
+
+#define ALI_DRW_PMU_CNT_CTRL 0xC00
+#define ALI_DRW_PMU_CNT_RST BIT(2)
+#define ALI_DRW_PMU_CNT_STOP BIT(1)
+#define ALI_DRW_PMU_CNT_START BIT(0)
+
+#define ALI_DRW_PMU_CNT_STATE 0xC04
+#define ALI_DRW_PMU_TEST_CTRL 0xC08
+#define ALI_DRW_PMU_CNT_PRELOAD 0xC0C
+
+#define ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK GENMASK(23, 0)
+#define ALI_DRW_PMU_CYCLE_CNT_LOW_MASK GENMASK(31, 0)
+#define ALI_DRW_PMU_CYCLE_CNT_HIGH 0xC10
+#define ALI_DRW_PMU_CYCLE_CNT_LOW 0xC14
+
+/* PMU EVENT SEL 0-3 are paired in 32-bit registers on a 4-byte stride */
+#define ALI_DRW_PMU_EVENT_SEL0 0xC68
+/* counter 0-3 use sel0, counter 4-7 use sel1...*/
+#define ALI_DRW_PMU_EVENT_SELn(n) \
+ (ALI_DRW_PMU_EVENT_SEL0 + (n / 4) * 0x4)
+#define ALI_DRW_PMCOM_CNT_EN BIT(7)
+#define ALI_DRW_PMCOM_CNT_EVENT_MASK GENMASK(5, 0)
+#define ALI_DRW_PMCOM_CNT_EVENT_OFFSET(n) \
+ (8 * (n % 4))
+
+/* PMU COMMON COUNTER 0-15, are paired in 32-bit registers on a 4-byte stride */
+#define ALI_DRW_PMU_COMMON_COUNTER0 0xC78
+#define ALI_DRW_PMU_COMMON_COUNTERn(n) \
+ (ALI_DRW_PMU_COMMON_COUNTER0 + 0x4 * (n))
+
+#define ALI_DRW_PMU_OV_INTR_ENABLE_CTL 0xCB8
+#define ALI_DRW_PMU_OV_INTR_DISABLE_CTL 0xCBC
+#define ALI_DRW_PMU_OV_INTR_ENABLE_STATUS 0xCC0
+#define ALI_DRW_PMU_OV_INTR_CLR 0xCC4
+#define ALI_DRW_PMU_OV_INTR_STATUS 0xCC8
+#define ALI_DRW_PMCOM_CNT_OV_INTR_MASK GENMASK(23, 8)
+#define ALI_DRW_PMBW_CNT_OV_INTR_MASK GENMASK(7, 0)
+#define ALI_DRW_PMU_OV_INTR_MASK GENMASK_ULL(63, 0)
+
+static int ali_drw_cpuhp_state_num;
+
+static LIST_HEAD(ali_drw_pmu_irqs);
+static DEFINE_MUTEX(ali_drw_pmu_irqs_lock);
+
+struct ali_drw_pmu_irq {
+ struct hlist_node node;
+ struct list_head irqs_node;
+ struct list_head pmus_node;
+ int irq_num;
+ int cpu;
+ refcount_t refcount;
+};
+
+struct ali_drw_pmu {
+ void __iomem *cfg_base;
+ struct device *dev;
+
+ struct list_head pmus_node;
+ struct ali_drw_pmu_irq *irq;
+ int irq_num;
+ int cpu;
+ DECLARE_BITMAP(used_mask, ALI_DRW_PMU_COMMON_MAX_COUNTERS);
+ struct perf_event *events[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
+ int evtids[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
+
+ struct pmu pmu;
+};
+
+#define to_ali_drw_pmu(p) (container_of(p, struct ali_drw_pmu, pmu))
+
+#define DRW_CONFIG_EVENTID GENMASK(7, 0)
+#define GET_DRW_EVENTID(event) FIELD_GET(DRW_CONFIG_EVENTID, (event)->attr.config)
+
+static ssize_t ali_drw_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+/*
+ * PMU event attributes
+ */
+static ssize_t ali_drw_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+#define ALI_DRW_PMU_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
+ })[0].attr.attr)
+
+#define ALI_DRW_PMU_FORMAT_ATTR(_name, _config) \
+ ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_format_show, (void *)_config)
+#define ALI_DRW_PMU_EVENT_ATTR(_name, _config) \
+ ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_event_show, (unsigned long)_config)
+
+static struct attribute *ali_drw_pmu_events_attrs[] = {
+ ALI_DRW_PMU_EVENT_ATTR(hif_rd_or_wr, 0x0),
+ ALI_DRW_PMU_EVENT_ATTR(hif_wr, 0x1),
+ ALI_DRW_PMU_EVENT_ATTR(hif_rd, 0x2),
+ ALI_DRW_PMU_EVENT_ATTR(hif_rmw, 0x3),
+ ALI_DRW_PMU_EVENT_ATTR(hif_hi_pri_rd, 0x4),
+ ALI_DRW_PMU_EVENT_ATTR(dfi_wr_data_cycles, 0x7),
+ ALI_DRW_PMU_EVENT_ATTR(dfi_rd_data_cycles, 0x8),
+ ALI_DRW_PMU_EVENT_ATTR(hpr_xact_when_critical, 0x9),
+ ALI_DRW_PMU_EVENT_ATTR(lpr_xact_when_critical, 0xA),
+ ALI_DRW_PMU_EVENT_ATTR(wr_xact_when_critical, 0xB),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_activate, 0xC),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_rd_or_wr, 0xD),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_rd_activate, 0xE),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_rd, 0xF),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_wr, 0x10),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_mwr, 0x11),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_precharge, 0x12),
+ ALI_DRW_PMU_EVENT_ATTR(precharge_for_rdwr, 0x13),
+ ALI_DRW_PMU_EVENT_ATTR(precharge_for_other, 0x14),
+ ALI_DRW_PMU_EVENT_ATTR(rdwr_transitions, 0x15),
+ ALI_DRW_PMU_EVENT_ATTR(write_combine, 0x16),
+ ALI_DRW_PMU_EVENT_ATTR(war_hazard, 0x17),
+ ALI_DRW_PMU_EVENT_ATTR(raw_hazard, 0x18),
+ ALI_DRW_PMU_EVENT_ATTR(waw_hazard, 0x19),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk0, 0x1A),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk1, 0x1B),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk2, 0x1C),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk3, 0x1D),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk0, 0x1E),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk1, 0x1F),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk2, 0x20),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk3, 0x21),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk0, 0x26),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk1, 0x27),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk2, 0x28),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk3, 0x29),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_refresh, 0x2A),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_crit_ref, 0x2B),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_load_mode, 0x2D),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_zqcl, 0x2E),
+ ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_rd, 0x30),
+ ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_wr, 0x31),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mpc, 0x34),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mrr, 0x35),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_tcr_mrr, 0x36),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_zqstart, 0x37),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_zqlatch, 0x38),
+ ALI_DRW_PMU_EVENT_ATTR(chi_txreq, 0x39),
+ ALI_DRW_PMU_EVENT_ATTR(chi_txdat, 0x3A),
+ ALI_DRW_PMU_EVENT_ATTR(chi_rxdat, 0x3B),
+ ALI_DRW_PMU_EVENT_ATTR(chi_rxrsp, 0x3C),
+ ALI_DRW_PMU_EVENT_ATTR(tsz_vio, 0x3D),
+ ALI_DRW_PMU_EVENT_ATTR(cycle, 0x80),
+ NULL,
+};
+
+static struct attribute_group ali_drw_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ali_drw_pmu_events_attrs,
+};
+
+static struct attribute *ali_drw_pmu_format_attr[] = {
+ ALI_DRW_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL,
+};
+
+static const struct attribute_group ali_drw_pmu_format_group = {
+ .name = "format",
+ .attrs = ali_drw_pmu_format_attr,
+};
+
+static ssize_t ali_drw_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(drw_pmu->cpu));
+}
+
+static struct device_attribute ali_drw_pmu_cpumask_attr =
+ __ATTR(cpumask, 0444, ali_drw_pmu_cpumask_show, NULL);
+
+static struct attribute *ali_drw_pmu_cpumask_attrs[] = {
+ &ali_drw_pmu_cpumask_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ali_drw_pmu_cpumask_attr_group = {
+ .attrs = ali_drw_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *ali_drw_pmu_attr_groups[] = {
+ &ali_drw_pmu_events_attr_group,
+ &ali_drw_pmu_cpumask_attr_group,
+ &ali_drw_pmu_format_group,
+ NULL,
+};
+
+/* find a counter for event, then in add func, hw.idx will equal to counter */
+static int ali_drw_get_counter_idx(struct perf_event *event)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ int idx;
+
+ for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; ++idx) {
+ if (!test_and_set_bit(idx, drw_pmu->used_mask))
+ return idx;
+ }
+
+ /* The counters are all in use. */
+ return -EBUSY;
+}
+
+static u64 ali_drw_pmu_read_counter(struct perf_event *event)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ u64 cycle_high, cycle_low;
+
+ if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
+ cycle_high = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_HIGH);
+ cycle_high &= ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK;
+ cycle_low = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_LOW);
+ cycle_low &= ALI_DRW_PMU_CYCLE_CNT_LOW_MASK;
+ return (cycle_high << 32 | cycle_low);
+ }
+
+ return readl(drw_pmu->cfg_base +
+ ALI_DRW_PMU_COMMON_COUNTERn(event->hw.idx));
+}
+
+static void ali_drw_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev, now;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ now = ali_drw_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ /* handle overflow. */
+ delta = now - prev;
+ if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID)
+ delta &= ALI_DRW_PMU_OV_INTR_MASK;
+ else
+ delta &= ALI_DRW_CNT_MAX_PERIOD;
+ local64_add(delta, &event->count);
+}
+
+static void ali_drw_pmu_event_set_period(struct perf_event *event)
+{
+ u64 pre_val;
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ /* set a preload counter for test purpose */
+ writel(ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE + event->hw.idx,
+ drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
+
+ /* set conunter initial value */
+ pre_val = ALI_DRW_PMU_CNT_INIT;
+ writel(pre_val, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
+ local64_set(&event->hw.prev_count, pre_val);
+
+ /* set sel mode to zero to start test */
+ writel(0x0, drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
+}
+
+static void ali_drw_pmu_enable_counter(struct perf_event *event)
+{
+ u32 val, subval, reg, shift;
+ int counter = event->hw.idx;
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ reg = ALI_DRW_PMU_EVENT_SELn(counter);
+ val = readl(drw_pmu->cfg_base + reg);
+ subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 1) |
+ FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, drw_pmu->evtids[counter]);
+
+ shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
+ val &= ~(GENMASK(7, 0) << shift);
+ val |= subval << shift;
+
+ writel(val, drw_pmu->cfg_base + reg);
+}
+
+static void ali_drw_pmu_disable_counter(struct perf_event *event)
+{
+ u32 val, reg, subval, shift;
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ int counter = event->hw.idx;
+
+ reg = ALI_DRW_PMU_EVENT_SELn(counter);
+ val = readl(drw_pmu->cfg_base + reg);
+ subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 0) |
+ FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, 0);
+
+ shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
+ val &= ~(GENMASK(7, 0) << shift);
+ val |= subval << shift;
+
+ writel(val, drw_pmu->cfg_base + reg);
+}
+
+static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
+{
+ struct ali_drw_pmu_irq *irq = data;
+ struct ali_drw_pmu *drw_pmu;
+ irqreturn_t ret = IRQ_NONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(drw_pmu, &irq->pmus_node, pmus_node) {
+ unsigned long status, clr_status;
+ struct perf_event *event;
+ unsigned int idx;
+
+ for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
+ event = drw_pmu->events[idx];
+ if (!event)
+ continue;
+ ali_drw_pmu_disable_counter(event);
+ }
+
+ /* common counter intr status */
+ status = readl(drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_STATUS);
+ status = FIELD_GET(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
+ if (status) {
+ for_each_set_bit(idx, &status,
+ ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
+ event = drw_pmu->events[idx];
+ if (WARN_ON_ONCE(!event))
+ continue;
+ ali_drw_pmu_event_update(event);
+ ali_drw_pmu_event_set_period(event);
+ }
+
+ /* clear common counter intr status */
+ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
+ writel(clr_status,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
+ }
+
+ for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
+ event = drw_pmu->events[idx];
+ if (!event)
+ continue;
+ if (!(event->hw.state & PERF_HES_STOPPED))
+ ali_drw_pmu_enable_counter(event);
+ }
+ if (status)
+ ret = IRQ_HANDLED;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct ali_drw_pmu_irq *__ali_drw_pmu_init_irq(struct platform_device
+ *pdev, int irq_num)
+{
+ int ret;
+ struct ali_drw_pmu_irq *irq;
+
+ list_for_each_entry(irq, &ali_drw_pmu_irqs, irqs_node) {
+ if (irq->irq_num == irq_num
+ && refcount_inc_not_zero(&irq->refcount))
+ return irq;
+ }
+
+ irq = kzalloc(sizeof(*irq), GFP_KERNEL);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&irq->pmus_node);
+
+ /* Pick one CPU to be the preferred one to use */
+ irq->cpu = smp_processor_id();
+ refcount_set(&irq->refcount, 1);
+
+ /*
+ * FIXME: one of DDRSS Driveway PMU overflow interrupt shares the same
+ * irq number with MPAM ERR_IRQ. To register DDRSS PMU and MPAM drivers
+ * successfully, add IRQF_SHARED flag. Howerer, PMU interrupt should not
+ * share with other component.
+ */
+ ret = devm_request_irq(&pdev->dev, irq_num, ali_drw_pmu_isr,
+ IRQF_SHARED, dev_name(&pdev->dev), irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq_num, ret);
+ goto out_free;
+ }
+
+ ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
+ if (ret)
+ goto out_free;
+
+ ret = cpuhp_state_add_instance_nocalls(ali_drw_cpuhp_state_num,
+ &irq->node);
+ if (ret)
+ goto out_free;
+
+ irq->irq_num = irq_num;
+ list_add(&irq->irqs_node, &ali_drw_pmu_irqs);
+
+ return irq;
+
+out_free:
+ kfree(irq);
+ return ERR_PTR(ret);
+}
+
+static int ali_drw_pmu_init_irq(struct ali_drw_pmu *drw_pmu,
+ struct platform_device *pdev)
+{
+ int irq_num;
+ struct ali_drw_pmu_irq *irq;
+
+ /* Read and init IRQ */
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0)
+ return irq_num;
+
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ irq = __ali_drw_pmu_init_irq(pdev, irq_num);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ drw_pmu->irq = irq;
+
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ list_add_rcu(&drw_pmu->pmus_node, &irq->pmus_node);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ return 0;
+}
+
+static void ali_drw_pmu_uninit_irq(struct ali_drw_pmu *drw_pmu)
+{
+ struct ali_drw_pmu_irq *irq = drw_pmu->irq;
+
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ list_del_rcu(&drw_pmu->pmus_node);
+
+ if (!refcount_dec_and_test(&irq->refcount)) {
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+ return;
+ }
+
+ list_del(&irq->irqs_node);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
+ cpuhp_state_remove_instance_nocalls(ali_drw_cpuhp_state_num,
+ &irq->node);
+ kfree(irq);
+}
+
+static int ali_drw_pmu_event_init(struct perf_event *event)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+ struct device *dev = drw_pmu->pmu.dev;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event)) {
+ dev_err(dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->attach_state & PERF_ATTACH_TASK) {
+ dev_err(dev, "Per-task counter cannot allocate!\n");
+ return -EOPNOTSUPP;
+ }
+
+ event->cpu = drw_pmu->cpu;
+ if (event->cpu < 0) {
+ dev_err(dev, "Per-task mode not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->group_leader != event &&
+ !is_software_event(event->group_leader)) {
+ dev_err(dev, "driveway only allow one event!\n");
+ return -EINVAL;
+ }
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling != event && !is_software_event(sibling)) {
+ dev_err(dev, "driveway event not allowed!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* reset all the pmu counters */
+ writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+
+ hwc->idx = -1;
+
+ return 0;
+}
+
+static void ali_drw_pmu_start(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ event->hw.state = 0;
+
+ if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
+ writel(ALI_DRW_PMU_CNT_START,
+ drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+ return;
+ }
+
+ ali_drw_pmu_event_set_period(event);
+ if (flags & PERF_EF_RELOAD) {
+ unsigned long prev_raw_count =
+ local64_read(&event->hw.prev_count);
+ writel(prev_raw_count,
+ drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
+ }
+
+ ali_drw_pmu_enable_counter(event);
+
+ writel(ALI_DRW_PMU_CNT_START, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+}
+
+static void ali_drw_pmu_stop(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ if (GET_DRW_EVENTID(event) != ALI_DRW_PMU_CYCLE_EVT_ID)
+ ali_drw_pmu_disable_counter(event);
+
+ writel(ALI_DRW_PMU_CNT_STOP, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+
+ ali_drw_pmu_event_update(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int ali_drw_pmu_add(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = -1;
+ int evtid;
+
+ evtid = GET_DRW_EVENTID(event);
+
+ if (evtid != ALI_DRW_PMU_CYCLE_EVT_ID) {
+ idx = ali_drw_get_counter_idx(event);
+ if (idx < 0)
+ return idx;
+ drw_pmu->events[idx] = event;
+ drw_pmu->evtids[idx] = evtid;
+ }
+ hwc->idx = idx;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ ali_drw_pmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void ali_drw_pmu_del(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ ali_drw_pmu_stop(event, PERF_EF_UPDATE);
+
+ if (idx >= 0 && idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
+ drw_pmu->events[idx] = NULL;
+ drw_pmu->evtids[idx] = 0;
+ clear_bit(idx, drw_pmu->used_mask);
+ }
+
+ perf_event_update_userpage(event);
+}
+
+static void ali_drw_pmu_read(struct perf_event *event)
+{
+ ali_drw_pmu_event_update(event);
+}
+
+static int ali_drw_pmu_probe(struct platform_device *pdev)
+{
+ struct ali_drw_pmu *drw_pmu;
+ struct resource *res;
+ char *name;
+ int ret;
+
+ drw_pmu = devm_kzalloc(&pdev->dev, sizeof(*drw_pmu), GFP_KERNEL);
+ if (!drw_pmu)
+ return -ENOMEM;
+
+ drw_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drw_pmu);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!drw_pmu->cfg_base)
+ return -ENOMEM;
+
+ name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx",
+ (u64) (res->start >> ALI_DRW_PMU_PA_SHIFT));
+ if (!name)
+ return -ENOMEM;
+
+ writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+
+ /* enable the generation of interrupt by all common counters */
+ writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_ENABLE_CTL);
+
+ /* clearing interrupt status */
+ writel(0xffffff, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
+
+ drw_pmu->cpu = smp_processor_id();
+
+ ret = ali_drw_pmu_init_irq(drw_pmu, pdev);
+ if (ret)
+ return ret;
+
+ drw_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = ali_drw_pmu_event_init,
+ .add = ali_drw_pmu_add,
+ .del = ali_drw_pmu_del,
+ .start = ali_drw_pmu_start,
+ .stop = ali_drw_pmu_stop,
+ .read = ali_drw_pmu_read,
+ .attr_groups = ali_drw_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ ret = perf_pmu_register(&drw_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(drw_pmu->dev, "DRW Driveway PMU PMU register failed!\n");
+ ali_drw_pmu_uninit_irq(drw_pmu);
+ }
+
+ return ret;
+}
+
+static int ali_drw_pmu_remove(struct platform_device *pdev)
+{
+ struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
+
+ /* disable the generation of interrupt by all common counters */
+ writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_DISABLE_CTL);
+
+ ali_drw_pmu_uninit_irq(drw_pmu);
+ perf_pmu_unregister(&drw_pmu->pmu);
+
+ return 0;
+}
+
+static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct ali_drw_pmu_irq *irq;
+ struct ali_drw_pmu *drw_pmu;
+ unsigned int target;
+ int ret;
+ cpumask_t node_online_cpus;
+
+ irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
+ if (cpu != irq->cpu)
+ return 0;
+
+ ret = cpumask_and(&node_online_cpus,
+ cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask);
+ if (ret)
+ target = cpumask_any_but(&node_online_cpus, cpu);
+ else
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ /* We're only reading, but this isn't the place to be involving RCU */
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ list_for_each_entry(drw_pmu, &irq->pmus_node, pmus_node)
+ perf_pmu_migrate_context(&drw_pmu->pmu, irq->cpu, target);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
+ irq->cpu = target;
+
+ return 0;
+}
+
+/*
+ * Due to historical reasons, the HID used in the production environment is
+ * ARMHD700, so we leave ARMHD700 as Compatible ID.
+ */
+static const struct acpi_device_id ali_drw_acpi_match[] = {
+ {"BABA5000", 0},
+ {"ARMHD700", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ali_drw_acpi_match);
+
+static struct platform_driver ali_drw_pmu_driver = {
+ .driver = {
+ .name = "ali_drw_pmu",
+ .acpi_match_table = ali_drw_acpi_match,
+ },
+ .probe = ali_drw_pmu_probe,
+ .remove = ali_drw_pmu_remove,
+};
+
+static int __init ali_drw_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "ali_drw_pmu:online",
+ NULL, ali_drw_pmu_offline_cpu);
+
+ if (ret < 0) {
+ pr_err("DRW Driveway PMU: setup hotplug failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+ ali_drw_cpuhp_state_num = ret;
+
+ ret = platform_driver_register(&ali_drw_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
+
+ return ret;
+}
+
+static void __exit ali_drw_pmu_exit(void)
+{
+ platform_driver_unregister(&ali_drw_pmu_driver);
+ cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
+}
+
+module_init(ali_drw_pmu_init);
+module_exit(ali_drw_pmu_exit);
+
+MODULE_AUTHOR("Hongbo Yao <yaohongbo@linux.alibaba.com>");
+MODULE_AUTHOR("Neng Chen <nengchen@linux.alibaba.com>");
+MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba DDR Sub-System Driveway PMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 80d8309652a4..b80a9b74662b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -36,7 +36,7 @@
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
+#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
#define CMN_MAX_DIMENSION 12
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index a36698a90d2f..4a15c86f45ef 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -639,6 +639,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask)
{
#ifdef CONFIG_ACPI
+ struct acpi_device *parent_adev = acpi_dev_parent(ACPI_COMPANION(dev));
int cpu;
/*
@@ -653,8 +654,7 @@ static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask)
continue;
acpi_dev = ACPI_COMPANION(cpu_dev);
- if (acpi_dev &&
- acpi_dev->parent == ACPI_COMPANION(dev)->parent)
+ if (acpi_dev && acpi_dev_parent(acpi_dev) == parent_adev)
cpumask_set_cpu(cpu, mask);
}
#endif
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 59d3980b8ca2..3f07df5a7e95 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -894,7 +894,7 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
* pmu::filter_match callback and pmu::event_init group
* validation).
*/
- .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
+ .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
};
pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 513de1f54e2d..933b96e243b8 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu_devid(irq))
+ if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index b65a7d9640e1..6ce05ef4844d 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -674,9 +674,9 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{
switch (pmsver) {
- case ID_AA64DFR0_PMSVER_8_2:
+ case ID_AA64DFR0_EL1_PMSVer_IMP:
return SYS_PMSEVFR_EL1_RES0_8_2;
- case ID_AA64DFR0_PMSVER_8_3:
+ case ID_AA64DFR0_EL1_PMSVer_V1P1:
/* Return the highest version we support in default */
default:
return SYS_PMSEVFR_EL1_RES0_8_3;
@@ -958,7 +958,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
struct device *dev = &spe_pmu->pdev->dev;
fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
- ID_AA64DFR0_PMSVER_SHIFT);
+ ID_AA64DFR0_EL1_PMSVer_SHIFT);
if (!fld) {
dev_err(dev,
"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 30234c261b05..aaca6db7d8f6 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -840,16 +840,16 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev->parent);
struct platform_device *sdev = to_platform_device(dev);
- struct acpi_device *adev = ACPI_COMPANION(dev);
struct l2cache_pmu *l2cache_pmu = data;
struct cluster_pmu *cluster;
- unsigned long fw_cluster_id;
+ u64 fw_cluster_id;
int err;
int irq;
- if (!adev || kstrtoul(adev->pnp.unique_id, 10, &fw_cluster_id) < 0) {
+ err = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &fw_cluster_id);
+ if (err) {
dev_err(&pdev->dev, "unable to read ACPI uid\n");
- return -ENODEV;
+ return err;
}
cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
@@ -879,7 +879,7 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
}
dev_info(&pdev->dev,
- "Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
+ "Registered L2 cache PMU cluster %lld\n", fw_cluster_id);
spin_lock_init(&cluster->pmu_lock);
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 1ff2ff6582bf..346311a05460 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -742,7 +742,8 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
- acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id);
+ acpi_dev_parent(acpi_dev)->pnp.unique_id,
+ acpi_dev->pnp.unique_id);
if (!l3pmu || !name)
return -ENOMEM;
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..2c20b0de8cb0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
-/**
+/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 6f6681bbfd36..8de4ca2fef21 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -473,7 +473,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (!pmu_ctr_list)
return -ENOMEM;
- for (i = 0; i <= nctr; i++) {
+ for (i = 0; i < nctr; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 93a6a8ee4716..1d89a2fd9b79 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -93,11 +93,11 @@ config PHY_BRCM_SATA
config PHY_BRCM_USB
tristate "Broadcom STB USB PHY driver"
- depends on ARCH_BCM4908 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB if ARCH_BRCMSTB
- default ARCH_BCM4908 || ARCH_BRCMSTB
+ default ARCH_BCMBCA || ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
This driver is required by the USB XHCI, EHCI and OHCI
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index a4d7d9bd100d..67712c77d806 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane {
int submode;
bool invert_tx;
bool invert_rx;
- bool needs_reset;
};
struct gbe_phy_init_data_fix {
@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane)
0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
}
-static int mvebu_a3700_comphy_reset(struct phy *phy)
+static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane)
{
- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- u16 mask, data;
-
- dev_dbg(lane->dev, "resetting lane %d\n", lane->id);
-
- /* COMPHY reset for internal logic */
- comphy_lane_reg_set(lane, COMPHY_SFT_RESET,
- SFT_RST_NO_REG, SFT_RST_NO_REG);
-
- /* COMPHY register reset (cleared automatically) */
- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
-
- /* PIPE soft and register reset */
- data = PIPE_SOFT_RESET | PIPE_REG_RESET;
- mask = data;
- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask);
-
- /* Release PIPE register reset */
- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL,
- 0x0, PIPE_REG_RESET);
-
- /* Reset SB configuration register (only for lanes 0 and 1) */
- if (lane->id == 0 || lane->id == 1) {
- u32 mask, data;
-
- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT |
- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT;
- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT;
- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask);
- }
-
- return 0;
+ /*
+ * The USB3 MAC sets the USB3 PHY to low state, so we do not
+ * need to power off USB3 PHY again.
+ */
}
static bool mvebu_a3700_comphy_check_mode(int lane,
@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
(lane->mode != mode || lane->submode != submode))
return -EBUSY;
- /* If changing mode, ensure reset is called */
- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode)
- lane->needs_reset = true;
-
/* Just remember the mode, ->power_on() will do the real setup */
lane->mode = mode;
lane->submode = submode;
@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
static int mvebu_a3700_comphy_power_on(struct phy *phy)
{
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- int ret;
if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode,
lane->submode)) {
@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
return -EINVAL;
}
- if (lane->needs_reset) {
- ret = mvebu_a3700_comphy_reset(phy);
- if (ret)
- return ret;
-
- lane->needs_reset = false;
- }
-
switch (lane->mode) {
case PHY_MODE_USB_HOST_SS:
dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy)
{
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- switch (lane->mode) {
- case PHY_MODE_USB_HOST_SS:
- /*
- * The USB3 MAC sets the USB3 PHY to low state, so we do not
- * need to power off USB3 PHY again.
- */
- break;
-
- case PHY_MODE_SATA:
- mvebu_a3700_comphy_sata_power_off(lane);
- break;
-
- case PHY_MODE_ETHERNET:
+ switch (lane->id) {
+ case 0:
+ mvebu_a3700_comphy_usb3_power_off(lane);
mvebu_a3700_comphy_ethernet_power_off(lane);
- break;
-
- case PHY_MODE_PCIE:
+ return 0;
+ case 1:
mvebu_a3700_comphy_pcie_power_off(lane);
- break;
-
+ mvebu_a3700_comphy_ethernet_power_off(lane);
+ return 0;
+ case 2:
+ mvebu_a3700_comphy_usb3_power_off(lane);
+ mvebu_a3700_comphy_sata_power_off(lane);
+ return 0;
default:
dev_err(lane->dev, "invalid COMPHY mode\n");
return -EINVAL;
}
-
- return 0;
}
static const struct phy_ops mvebu_a3700_comphy_ops = {
.power_on = mvebu_a3700_comphy_power_on,
.power_off = mvebu_a3700_comphy_power_off,
- .reset = mvebu_a3700_comphy_reset,
.set_mode = mvebu_a3700_comphy_set_mode,
.owner = THIS_MODULE,
};
@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
* To avoid relying on the bootloader/firmware configuration,
* power off all comphys.
*/
- mvebu_a3700_comphy_reset(phy);
- lane->needs_reset = false;
+ mvebu_a3700_comphy_power_off(phy);
}
provider = devm_of_phy_provider_register(&pdev->dev,
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index e86a879b92b5..d1a50fa81130 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -401,6 +401,9 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
submode == PHY_INTERFACE_MODE_2500BASEX)
submode = PHY_INTERFACE_MODE_SGMII;
+ if (submode == PHY_INTERFACE_MODE_QUSGMII)
+ submode = PHY_INTERFACE_MODE_QSGMII;
+
for (i = 0; i < ARRAY_SIZE(lan966x_serdes_muxes); i++) {
if (macro->idx != lan966x_serdes_muxes[i].idx ||
mode != lan966x_serdes_muxes[i].mode ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1cf74b0c42e5..d768dcf75cf1 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -292,7 +292,7 @@ config PINCTRL_MCP23S08
corresponding interrupt-controller.
config PINCTRL_MICROCHIP_SGPIO
- bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
+ tristate "Pinctrl driver for Microsemi/Microchip Serial GPIO"
depends on OF
depends on HAS_IOMEM
select GPIOLIB
@@ -310,6 +310,9 @@ config PINCTRL_MICROCHIP_SGPIO
connect control signals from SFP modules and to act as an
LED controller.
+ If compiled as a module, the module name will be
+ pinctrl-microchip-sgpio.
+
config PINCTRL_OCELOT
tristate "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 8f4d89806fcb..35b51ce4298e 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -31,13 +31,13 @@ config PINCTRL_BCM2835
config PINCTRL_BCM4908
tristate "Broadcom BCM4908 pinmux driver"
- depends on OF && (ARCH_BCM4908 || COMPILE_TEST)
+ depends on OF && (ARCH_BCMBCA || COMPILE_TEST)
select PINMUX
select PINCONF
select GENERIC_PINCONF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
- default ARCH_BCM4908
+ default ARCH_BCMBCA
help
Driver for BCM4908 family SoCs with integrated pin controller.
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 6f55bf7d5e05..2b4167a09b3b 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/mfd/ocelot.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pinctrl/pinmux.h>
@@ -904,7 +905,6 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
struct reset_control *reset;
struct sgpio_priv *priv;
struct clk *clk;
- u32 __iomem *regs;
u32 val;
struct regmap_config regmap_config = {
.reg_bits = 32,
@@ -937,11 +937,7 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
-
- priv->regs = devm_regmap_init_mmio(dev, regs, &regmap_config);
+ priv->regs = ocelot_regmap_from_resource(pdev, 0, &regmap_config);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
@@ -999,6 +995,7 @@ static const struct of_device_id microchip_sgpio_gpio_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, microchip_sgpio_gpio_of_match);
static struct platform_driver microchip_sgpio_pinctrl_driver = {
.driver = {
@@ -1008,4 +1005,7 @@ static struct platform_driver microchip_sgpio_pinctrl_driver = {
},
.probe = microchip_sgpio_probe,
};
-builtin_platform_driver(microchip_sgpio_pinctrl_driver);
+module_platform_driver(microchip_sgpio_pinctrl_driver);
+
+MODULE_DESCRIPTION("Microchip SGPIO Pinctrl Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index c5fd154990c8..83464e0bf4e6 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -10,6 +10,7 @@
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mfd/ocelot.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -331,6 +332,7 @@ struct ocelot_pinctrl {
const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
+ struct workqueue_struct *wq;
};
struct ocelot_match_data {
@@ -338,6 +340,11 @@ struct ocelot_match_data {
struct ocelot_pincfg_data pincfg_data;
};
+struct ocelot_irq_work {
+ struct work_struct irq_work;
+ struct irq_desc *irq_desc;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1813,6 +1820,75 @@ static void ocelot_irq_mask(struct irq_data *data)
gpiochip_disable_irq(chip, gpio);
}
+static void ocelot_irq_work(struct work_struct *work)
+{
+ struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
+ struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc);
+ struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc);
+ struct irq_data *data = irq_desc_get_irq_data(w->irq_desc);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ local_irq_disable();
+ chained_irq_enter(parent_chip, w->irq_desc);
+ generic_handle_domain_irq(chip->irq.domain, gpio);
+ chained_irq_exit(parent_chip, w->irq_desc);
+ local_irq_enable();
+
+ kfree(w);
+}
+
+static void ocelot_irq_unmask_level(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ struct irq_desc *desc = irq_data_to_desc(data);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bit = BIT(gpio % 32);
+ bool ack = false, active = false;
+ u8 trigger_level;
+ int val;
+
+ trigger_level = irqd_get_trigger_type(data);
+
+ /* Check if the interrupt line is still active. */
+ regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+ if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+ (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+ active = true;
+
+ /*
+ * Check if the interrupt controller has seen any changes in the
+ * interrupt line.
+ */
+ regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
+ if (val & bit)
+ ack = true;
+
+ /* Enable the interrupt now */
+ gpiochip_enable_irq(chip, gpio);
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
+ bit, bit);
+
+ /*
+ * In case the interrupt line is still active and the interrupt
+ * controller has not seen any changes in the interrupt line, then it
+ * means that there happen another interrupt while the line was active.
+ * So we missed that one, so we need to kick the interrupt again
+ * handler.
+ */
+ if (active && !ack) {
+ struct ocelot_irq_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->irq_desc = desc;
+ INIT_WORK(&work->irq_work, ocelot_irq_work);
+ queue_work(info->wq, &work->irq_work);
+ }
+}
+
static void ocelot_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
@@ -1836,13 +1912,12 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_eoi_irqchip = {
+static struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
- .irq_eoi = ocelot_irq_ack,
- .irq_unmask = ocelot_irq_unmask,
- .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
- IRQCHIP_IMMUTABLE,
+ .irq_ack = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask_level,
+ .flags = IRQCHIP_IMMUTABLE,
.irq_set_type = ocelot_irq_set_type,
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
@@ -1859,14 +1934,9 @@ static struct irq_chip ocelot_irqchip = {
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
{
- type &= IRQ_TYPE_SENSE_MASK;
-
- if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
- if (type & IRQ_TYPE_LEVEL_HIGH)
- irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
- handle_fasteoi_irq, NULL);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip,
+ handle_level_irq, NULL);
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
handle_edge_irq, NULL);
@@ -1975,7 +2045,6 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
struct ocelot_pinctrl *info;
struct reset_control *reset;
struct regmap *pincfg;
- void __iomem *base;
int ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
@@ -1996,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info->desc)
return -ENOMEM;
+ info->wq = alloc_ordered_workqueue("ocelot_ordered", 0);
+ if (!info->wq)
+ return -ENOMEM;
+
info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
@@ -2004,21 +2077,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
"Failed to get reset\n");
reset_control_reset(reset);
- base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
- if (IS_ERR(base))
- return PTR_ERR(base);
-
info->stride = 1 + (info->desc->npins - 1) / 32;
regmap_config.max_register = OCELOT_GPIO_SD_MAP * info->stride + 15 * 4;
- info->map = devm_regmap_init_mmio(dev, base, &regmap_config);
- if (IS_ERR(info->map)) {
- dev_err(dev, "Failed to create regmap\n");
- return PTR_ERR(info->map);
- }
- dev_set_drvdata(dev, info->map);
+ info->map = ocelot_regmap_from_resource(pdev, 0, &regmap_config);
+ if (IS_ERR(info->map))
+ return dev_err_probe(dev, PTR_ERR(info->map),
+ "Failed to create regmap\n");
+ dev_set_drvdata(dev, info);
info->dev = dev;
/* Pinconf registers */
@@ -2043,6 +2110,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static int ocelot_pinctrl_remove(struct platform_device *pdev)
+{
+ struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+
+ destroy_workqueue(info->wq);
+
+ return 0;
+}
+
static struct platform_driver ocelot_pinctrl_driver = {
.driver = {
.name = "pinctrl-ocelot",
@@ -2050,6 +2126,7 @@ static struct platform_driver ocelot_pinctrl_driver = {
.suppress_bind_attrs = true,
},
.probe = ocelot_pinctrl_probe,
+ .remove = ocelot_pinctrl_remove,
};
module_platform_driver(ocelot_pinctrl_driver);
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index 6bec7f143134..704a99d2f93c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
DECLARE_MSM_GPIO_PINS(188);
DECLARE_MSM_GPIO_PINS(189);
-static const unsigned int sdc2_clk_pins[] = { 190 };
-static const unsigned int sdc2_cmd_pins[] = { 191 };
-static const unsigned int sdc2_data_pins[] = { 192 };
-static const unsigned int ufs_reset_pins[] = { 193 };
+static const unsigned int ufs_reset_pins[] = { 190 };
+static const unsigned int sdc2_clk_pins[] = { 191 };
+static const unsigned int sdc2_cmd_pins[] = { 192 };
+static const unsigned int sdc2_data_pins[] = { 193 };
enum sc8180x_functions {
msm_mux_adsp_ext,
@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
{ 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
{ 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
- { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+ { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
{ 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
{ 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
{ 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index afc1f5df7545..b82ad135bf2a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
static struct platform_driver a100_r_pinctrl_driver = {
.probe = a100_r_pinctrl_probe,
.driver = {
- .name = "sun50iw10p1-r-pinctrl",
+ .name = "sun50i-a100-r-pinctrl",
.of_match_table = a100_r_pinctrl_match,
},
};
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index c45fb376d653..6b954c5acadb 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -265,6 +265,17 @@ config CHROMEOS_PRIVACY_SCREEN
this should probably always be built into the kernel to avoid or
minimize drm probe deferral.
+config CROS_TYPEC_SWITCH
+ tristate "ChromeOS EC Type-C Switch Control"
+ depends on MFD_CROS_EC_DEV && TYPEC && ACPI
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for configuring the ChromeOS EC Type-C
+ muxes and retimers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cros_typec_switch.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
# Kunit test cases
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index f7e74a845afc..2950610101f1 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
obj-$(CONFIG_CROS_EC) += cros_ec.o
obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o
+obj-$(CONFIG_CROS_TYPEC_SWITCH) += cros_typec_switch.o
obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 4e14b4d6635d..a2cdbfbaeae6 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -740,6 +740,7 @@ static int __init
chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
const struct chromeos_laptop *src)
{
+ struct i2c_peripheral *i2c_peripherals;
struct i2c_peripheral *i2c_dev;
struct i2c_board_info *info;
int i;
@@ -748,17 +749,15 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
if (!src->num_i2c_peripherals)
return 0;
- cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals,
- src->num_i2c_peripherals *
- sizeof(*src->i2c_peripherals),
- GFP_KERNEL);
- if (!cros_laptop->i2c_peripherals)
+ i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!i2c_peripherals)
return -ENOMEM;
- cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
-
- for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ for (i = 0; i < src->num_i2c_peripherals; i++) {
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
error = chromeos_laptop_setup_irq(i2c_dev);
@@ -775,16 +774,19 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
}
}
+ cros_laptop->i2c_peripherals = i2c_peripherals;
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
return 0;
err_out:
while (--i >= 0) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
if (!IS_ERR_OR_NULL(info->fwnode))
fwnode_remove_software_node(info->fwnode);
}
- kfree(cros_laptop->i2c_peripherals);
+ kfree(i2c_peripherals);
return error;
}
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 8aace50d446d..ec733f683f34 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -115,7 +115,7 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
if (ec_dev->host_sleep_v1) {
buf.u.req1.sleep_event = sleep_event;
buf.u.req1.suspend_params.sleep_timeout_ms =
- EC_HOST_SLEEP_TIMEOUT_DEFAULT;
+ ec_dev->suspend_timeout_ms;
buf.msg.outsize = sizeof(buf.u.req1);
if ((sleep_event == HOST_SLEEP_EVENT_S3_RESUME) ||
@@ -188,6 +188,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
ec_dev->max_passthru = 0;
ec_dev->ec = NULL;
ec_dev->pd = NULL;
+ ec_dev->suspend_timeout_ms = EC_HOST_SLEEP_TIMEOUT_DEFAULT;
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din)
@@ -349,10 +350,16 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
+ bool wake_event;
+
while (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL, NULL) > 0)
+ cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) {
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
+
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+ }
}
/**
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index fd33de546aee..0de7c255254e 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -327,6 +327,9 @@ static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec,
if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
return -EFAULT;
+ if (s_mem.bytes > sizeof(s_mem.buffer))
+ return -EINVAL;
+
num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
s_mem.buffer);
if (num <= 0)
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 0dbceee87a4b..4e63adf083ea 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -470,6 +470,9 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
&ec->ec_dev->last_resume_result);
+ debugfs_create_u16("suspend_timeout_ms", 0664, debug_info->dir,
+ &ec->ec_dev->suspend_timeout_ms);
+
ec->debug_info = debug_info;
dev_set_drvdata(&pd->dev, ec);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 9f5b95763173..b6823c654c3f 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -317,13 +317,11 @@ static int cros_ec_i2c_probe(struct i2c_client *client,
return 0;
}
-static int cros_ec_i2c_remove(struct i2c_client *client)
+static void cros_ec_i2c_remove(struct i2c_client *client)
{
struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
cros_ec_unregister(ec_dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 05d2e8765a66..475a6dd72db6 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -773,6 +773,7 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
u8 event_type;
u32 host_event;
int ret;
+ u32 ver_mask;
/*
* Default value for wake_event.
@@ -794,6 +795,37 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
+ /*
+ * -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION.
+ * This can occur when EC based device (e.g. Fingerprint MCU) jumps to
+ * the RO image which doesn't support newer version of the command. In
+ * this case we will attempt to update maximum supported version of the
+ * EC_CMD_GET_NEXT_EVENT.
+ */
+ if (ret == -ENOPROTOOPT) {
+ dev_dbg(ec_dev->dev,
+ "GET_NEXT_EVENT returned invalid version error.\n");
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
+ if (ret < 0 || ver_mask == 0)
+ /*
+ * Do not change the MKBP supported version if we can't
+ * obtain supported version correctly. Please note that
+ * calling EC_CMD_GET_NEXT_EVENT returned
+ * EC_RES_INVALID_VERSION which means that the command
+ * is present.
+ */
+ return -ENOPROTOOPT;
+
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+ dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n",
+ ec_dev->mkbp_event_supported - 1);
+
+ /* Try to get next event with new MKBP support version set. */
+ ret = get_next_event(ec_dev);
+ }
+
if (ret <= 0)
return ret;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index de6ee0f926a6..2a7ff14dc37e 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -20,12 +20,14 @@
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
#include <linux/usb/typec_tbt.h>
#include <linux/usb/role.h>
#define DRV_NAME "cros-ec-typec"
-#define DP_PORT_VDO (BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) | DP_CAP_DFP_D)
+#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
+ DP_CAP_DFP_D)
/* Supported alt modes. */
enum {
@@ -55,6 +57,7 @@ struct cros_typec_port {
struct usb_pd_identity c_identity;
struct typec_switch *ori_sw;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
struct usb_role_switch *role_sw;
/* Variables keeping track of switch state. */
@@ -70,6 +73,11 @@ struct cros_typec_port {
struct ec_response_typec_discovery *disc_data;
struct list_head partner_mode_list;
struct list_head plug_mode_list;
+
+ /* PDO-related structs */
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_src_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -143,6 +151,12 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
goto mux_err;
}
+ port->retimer = fwnode_typec_retimer_get(fwnode);
+ if (IS_ERR(port->retimer)) {
+ dev_dbg(dev, "Retimer handle not found.\n");
+ goto retimer_sw_err;
+ }
+
port->ori_sw = fwnode_typec_switch_get(fwnode);
if (IS_ERR(port->ori_sw)) {
dev_dbg(dev, "Orientation switch handle not found.\n");
@@ -158,12 +172,12 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
return 0;
role_sw_err:
- usb_role_switch_put(port->role_sw);
-ori_sw_err:
typec_switch_put(port->ori_sw);
-mux_err:
+ori_sw_err:
+ typec_retimer_put(port->retimer);
+retimer_sw_err:
typec_mux_put(port->mux);
-
+mux_err:
return -ENODEV;
}
@@ -206,6 +220,21 @@ static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int po
}
}
+/*
+ * Map the Type-C Mux state to retimer state and call the retimer set function. We need this
+ * because we re-use the Type-C mux state for retimers.
+ */
+static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_mux_state state)
+{
+ struct typec_retimer_state rstate = {
+ .alt = state.alt,
+ .mode = state.mode,
+ .data = state.data,
+ };
+
+ return typec_retimer_set(retimer, &rstate);
+}
+
static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
{
port->state.alt = NULL;
@@ -214,6 +243,7 @@ static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
+ cros_typec_retimer_set(port->retimer, port->state);
return typec_mux_set(port->mux, &port->state);
}
@@ -228,6 +258,14 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
cros_typec_unregister_altmodes(typec, port_num, true);
+ typec_partner_set_usb_power_delivery(port->partner, NULL);
+ usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
+ port->partner_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(port->partner_src_caps);
+ port->partner_src_caps = NULL;
+ usb_power_delivery_unregister(port->partner_pd);
+ port->partner_pd = NULL;
+
cros_typec_usb_disconnect_state(port);
port->mux_flags = USB_PD_MUX_NONE;
@@ -411,9 +449,14 @@ unregister_ports:
static int cros_typec_usb_safe_state(struct cros_typec_port *port)
{
+ int ret;
port->state.mode = TYPEC_STATE_SAFE;
- return typec_mux_set(port->mux, &port->state);
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+
+ return ret;
}
/*
@@ -510,7 +553,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
port->state.data = &dp_data;
port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
- return typec_mux_set(port->mux, &port->state);
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+
+ return ret;
}
static int cros_typec_enable_usb4(struct cros_typec_data *typec,
@@ -599,7 +646,10 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
} else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
- ret = typec_mux_set(port->mux, &port->state);
+
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
} else {
dev_dbg(typec->dev,
"Unrecognized mode requested, mux flags: %x\n",
@@ -697,7 +747,7 @@ static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_
for (j = 0; j < sop_disc->svids[i].mode_count; j++) {
memset(&desc, 0, sizeof(desc));
desc.svid = sop_disc->svids[i].svid;
- desc.mode = j;
+ desc.mode = j + 1;
desc.vdo = sop_disc->svids[i].mode_vdo[j];
if (is_partner)
@@ -902,6 +952,46 @@ static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_n
sizeof(req), NULL, 0);
}
+static void cros_typec_register_partner_pdos(struct cros_typec_data *typec,
+ struct ec_response_typec_status *resp, int port_num)
+{
+ struct usb_power_delivery_capabilities_desc caps_desc = {};
+ struct usb_power_delivery_desc desc = {
+ .revision = (le16_to_cpu(resp->sop_revision) & 0xff00) >> 4,
+ };
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ if (!port->partner || port->partner_pd)
+ return;
+
+ /* If no caps are available, don't bother creating a device. */
+ if (!resp->source_cap_count && !resp->sink_cap_count)
+ return;
+
+ port->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(port->partner_pd)) {
+ dev_warn(typec->dev, "Failed to register partner PD device, port: %d\n", port_num);
+ return;
+ }
+
+ typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
+
+ memcpy(caps_desc.pdo, resp->source_cap_pdos, sizeof(u32) * resp->source_cap_count);
+ caps_desc.role = TYPEC_SOURCE;
+ port->partner_src_caps = usb_power_delivery_register_capabilities(port->partner_pd,
+ &caps_desc);
+ if (IS_ERR(port->partner_src_caps))
+ dev_warn(typec->dev, "Failed to register source caps, port: %d\n", port_num);
+
+ memset(&caps_desc, 0, sizeof(caps_desc));
+ memcpy(caps_desc.pdo, resp->sink_cap_pdos, sizeof(u32) * resp->sink_cap_count);
+ caps_desc.role = TYPEC_SINK;
+ port->partner_sink_caps = usb_power_delivery_register_capabilities(port->partner_pd,
+ &caps_desc);
+ if (IS_ERR(port->partner_sink_caps))
+ dev_warn(typec->dev, "Failed to register sink caps, port: %d\n", port_num);
+}
+
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
{
struct ec_response_typec_status resp;
@@ -949,6 +1039,8 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
}
if (resp.sop_connected)
typec_set_pwr_opmode(typec->ports[port_num]->port, TYPEC_PWR_MODE_PD);
+
+ cros_typec_register_partner_pdos(typec, &resp, port_num);
}
if (resp.events & PD_STATUS_EVENT_SOP_PRIME_DISC_DONE &&
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
new file mode 100644
index 000000000000..a26219e97c93
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC
+ *
+ * This driver provides the ability to configure Type-C muxes and retimers which are controlled by
+ * the ChromeOS EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+/* Handles and other relevant data required for each port's switches. */
+struct cros_typec_port {
+ int port_num;
+ struct typec_mux_dev *mode_switch;
+ struct typec_retimer *retimer;
+ struct cros_typec_switch_data *sdata;
+};
+
+/* Driver-specific data. */
+struct cros_typec_switch_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+};
+
+static int cros_typec_cmd_mux_set(struct cros_typec_switch_data *sdata, int port_num, u8 index,
+ u8 state)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+ .mux_params = {
+ .mux_index = index,
+ .mux_flags = state,
+ },
+ };
+
+ return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
+}
+
+static int cros_typec_get_mux_state(unsigned long mode, struct typec_altmode *alt)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (mode == TYPEC_STATE_SAFE)
+ ret = USB_PD_MUX_SAFE_MODE;
+ else if (mode == TYPEC_STATE_USB)
+ ret = USB_PD_MUX_USB_ENABLED;
+ else if (alt && alt->svid == USB_TYPEC_DP_SID)
+ ret = USB_PD_MUX_DP_ENABLED;
+
+ return ret;
+}
+
+static int cros_typec_send_clear_event(struct cros_typec_switch_data *sdata, int port_num,
+ u32 events_mask)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ .clear_events_mask = events_mask,
+ };
+
+ return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
+}
+
+static bool cros_typec_check_event(struct cros_typec_switch_data *sdata, int port_num, u32 mask)
+{
+ struct ec_response_typec_status resp;
+ struct ec_params_typec_status req = {
+ .port = port_num,
+ };
+ int ret;
+
+ ret = cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(sdata->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
+ return false;
+ }
+
+ if (resp.events & mask)
+ return true;
+
+ return false;
+}
+
+/*
+ * The ChromeOS EC treats both mode-switches and retimers as "muxes" for the purposes of the
+ * host command API. This common function configures and verifies the retimer/mode-switch
+ * according to the provided setting.
+ */
+static int cros_typec_configure_mux(struct cros_typec_switch_data *sdata, int port_num, int index,
+ unsigned long mode, struct typec_altmode *alt)
+{
+ unsigned long end;
+ u32 event_mask;
+ u8 mux_state;
+ int ret;
+
+ ret = cros_typec_get_mux_state(mode, alt);
+ if (ret < 0)
+ return ret;
+ mux_state = (u8)ret;
+
+ /* Clear any old mux set done event. */
+ if (index == 0)
+ event_mask = PD_STATUS_EVENT_MUX_0_SET_DONE;
+ else
+ event_mask = PD_STATUS_EVENT_MUX_1_SET_DONE;
+
+ ret = cros_typec_send_clear_event(sdata, port_num, event_mask);
+ if (ret < 0)
+ return ret;
+
+ /* Send the set command. */
+ ret = cros_typec_cmd_mux_set(sdata, port_num, index, mux_state);
+ if (ret < 0)
+ return ret;
+
+ /* Check for the mux set done event. */
+ end = jiffies + msecs_to_jiffies(1000);
+ do {
+ if (cros_typec_check_event(sdata, port_num, event_mask))
+ return 0;
+
+ usleep_range(500, 1000);
+ } while (time_before(jiffies, end));
+
+ dev_err(sdata->dev, "Timed out waiting for mux set done on index: %d, state: %d\n",
+ index, mux_state);
+
+ return -ETIMEDOUT;
+}
+
+static int cros_typec_mode_switch_set(struct typec_mux_dev *mode_switch,
+ struct typec_mux_state *state)
+{
+ struct cros_typec_port *port = typec_mux_get_drvdata(mode_switch);
+
+ /* Mode switches have index 0. */
+ return cros_typec_configure_mux(port->sdata, port->port_num, 0, state->mode, state->alt);
+}
+
+static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
+{
+ struct cros_typec_port *port = typec_retimer_get_drvdata(retimer);
+
+ /* Retimers have index 1. */
+ return cros_typec_configure_mux(port->sdata, port->port_num, 1, state->mode, state->alt);
+}
+
+static void cros_typec_unregister_switches(struct cros_typec_switch_data *sdata)
+{
+ int i;
+
+ for (i = 0; i < EC_USB_PD_MAX_PORTS; i++) {
+ if (!sdata->ports[i])
+ continue;
+ typec_retimer_unregister(sdata->ports[i]->retimer);
+ typec_mux_unregister(sdata->ports[i]->mode_switch);
+ }
+}
+
+static int cros_typec_register_mode_switch(struct cros_typec_port *port,
+ struct fwnode_handle *fwnode)
+{
+ struct typec_mux_desc mode_switch_desc = {
+ .fwnode = fwnode,
+ .drvdata = port,
+ .name = fwnode_get_name(fwnode),
+ .set = cros_typec_mode_switch_set,
+ };
+
+ port->mode_switch = typec_mux_register(port->sdata->dev, &mode_switch_desc);
+
+ return PTR_ERR_OR_ZERO(port->mode_switch);
+}
+
+static int cros_typec_register_retimer(struct cros_typec_port *port, struct fwnode_handle *fwnode)
+{
+ struct typec_retimer_desc retimer_desc = {
+ .fwnode = fwnode,
+ .drvdata = port,
+ .name = fwnode_get_name(fwnode),
+ .set = cros_typec_retimer_set,
+ };
+
+ port->retimer = typec_retimer_register(port->sdata->dev, &retimer_desc);
+
+ return PTR_ERR_OR_ZERO(port->retimer);
+}
+
+static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
+{
+ struct cros_typec_port *port;
+ struct device *dev = sdata->dev;
+ struct fwnode_handle *fwnode;
+ struct acpi_device *adev;
+ unsigned long long index;
+ int nports, ret;
+
+ nports = device_get_child_node_count(dev);
+ if (nports == 0) {
+ dev_err(dev, "No switch devices found.\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(dev, fwnode) {
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ goto err_switch;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (!adev) {
+ dev_err(fwnode->dev, "Couldn't get ACPI device handle\n");
+ ret = -ENODEV;
+ goto err_switch;
+ }
+
+ ret = acpi_evaluate_integer(adev->handle, "_ADR", NULL, &index);
+ if (ACPI_FAILURE(ret)) {
+ dev_err(fwnode->dev, "_ADR wasn't evaluated\n");
+ ret = -ENODATA;
+ goto err_switch;
+ }
+
+ if (index >= EC_USB_PD_MAX_PORTS) {
+ dev_err(fwnode->dev, "Invalid port index number: %llu\n", index);
+ ret = -EINVAL;
+ goto err_switch;
+ }
+ port->sdata = sdata;
+ port->port_num = index;
+ sdata->ports[index] = port;
+
+ ret = cros_typec_register_retimer(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Retimer switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Retimer switch registered for index %llu\n", index);
+
+ if (!device_property_present(fwnode->dev, "mode-switch"))
+ continue;
+
+ ret = cros_typec_register_mode_switch(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Mode switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Mode switch registered for index %llu\n", index);
+ }
+
+ return 0;
+err_switch:
+ cros_typec_unregister_switches(sdata);
+ return ret;
+}
+
+static int cros_typec_switch_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_typec_switch_data *sdata;
+
+ sdata = devm_kzalloc(dev, sizeof(*sdata), GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ sdata->dev = dev;
+ sdata->ec = dev_get_drvdata(pdev->dev.parent);
+
+ platform_set_drvdata(pdev, sdata);
+
+ return cros_typec_register_switches(sdata);
+}
+
+static int cros_typec_switch_remove(struct platform_device *pdev)
+{
+ struct cros_typec_switch_data *sdata = platform_get_drvdata(pdev);
+
+ cros_typec_unregister_switches(sdata);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_typec_switch_acpi_id[] = {
+ { "GOOG001A", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cros_typec_switch_acpi_id);
+#endif
+
+static struct platform_driver cros_typec_switch_driver = {
+ .driver = {
+ .name = "cros-typec-switch",
+ .acpi_match_table = ACPI_PTR(cros_typec_switch_acpi_id),
+ },
+ .probe = cros_typec_switch_probe,
+ .remove = cros_typec_switch_remove,
+};
+
+module_platform_driver(cros_typec_switch_driver);
+
+MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC Type-C Switch control");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 8be13d416f48..1ae3c56b66b0 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -928,7 +928,6 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index 55834ccb4ac7..8d833836a6d3 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -460,8 +460,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
u32 regval;
int err;
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
if (err)
goto regmap_read_fail;
@@ -474,7 +472,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -491,8 +488,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
* line card which is already has been enabled. Disabling does not affect the disabled line
* card.
*/
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
if (err)
goto regmap_read_fail;
@@ -505,7 +500,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -538,6 +532,15 @@ mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
static void
mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
{
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+}
+
+static void
+mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
mutex_lock(&mlxreg_lc->lock);
if (action)
@@ -560,8 +563,9 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
+ mutex_lock(&mlxreg_lc->lock);
if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
- return 0;
+ goto mlxreg_lc_non_initialzed_exit;
switch (kind) {
case MLXREG_HOTPLUG_LC_SYNCED:
@@ -574,7 +578,7 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
- return err;
+ goto mlxreg_lc_power_on_off_fail;
}
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
@@ -588,12 +592,13 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
- return err;
+
+ goto mlxreg_lc_enable_disable_exit;
}
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
if (err)
- return err;
+ goto mlxreg_lc_create_static_devices_fail;
/* In case line card is already in ready state - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
@@ -620,6 +625,12 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
break;
}
+mlxreg_lc_enable_disable_exit:
+mlxreg_lc_power_on_off_fail:
+mlxreg_lc_create_static_devices_fail:
+mlxreg_lc_non_initialzed_exit:
+ mutex_unlock(&mlxreg_lc->lock);
+
return err;
}
@@ -665,7 +676,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
if (err)
goto mlxreg_lc_create_static_devices_failed;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
}
/* Verify if line card is synchronized. */
@@ -676,7 +687,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
/* Power on line card if necessary. */
if (regval & mlxreg_lc->data->mask) {
mlxreg_lc->state |= MLXREG_LC_SYNCED;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
@@ -684,7 +695,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
}
}
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
return 0;
@@ -814,10 +825,9 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
mutex_init(&mlxreg_lc->lock);
/* Set event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = mlxreg_lc_event_handler;
- data->notifier->handle = mlxreg_lc;
- }
+ data->notifier->user_handler = mlxreg_lc_event_handler;
+ data->notifier->handle = mlxreg_lc;
+
data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
@@ -863,7 +873,6 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
- err = PTR_ERR(regmap);
goto regcache_sync_fail;
}
@@ -878,16 +887,14 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err)
goto mlxreg_lc_config_init_fail;
- return err;
+ return 0;
mlxreg_lc_config_init_fail:
regcache_sync_fail:
regmap_write_fail:
devm_regmap_init_i2c_fail:
- if (data->hpdev.client) {
- i2c_unregister_device(data->hpdev.client);
- data->hpdev.client = NULL;
- }
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
@@ -905,6 +912,8 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
+
/*
* Probing and removing are invoked by hotplug events raised upon line card insertion and
* removing. If probing procedure fails all data is cleared. However, hotplug event still
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index 444ec81ba02d..73961a24c849 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -519,7 +519,7 @@ static int mshw0011_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
+ strscpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
bat0 = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(bat0))
@@ -554,7 +554,7 @@ out_err:
return error;
}
-static int mshw0011_remove(struct i2c_client *client)
+static void mshw0011_remove(struct i2c_client *client)
{
struct mshw0011_data *cdata = i2c_get_clientdata(client);
@@ -564,8 +564,6 @@ static int mshw0011_remove(struct i2c_client *client)
kthread_stop(cdata->poll_task);
i2c_unregister_device(cdata->bat0);
-
- return 0;
}
static const struct acpi_device_id mshw0011_acpi_match[] = {
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 44e317970557..50500e562963 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -355,7 +355,8 @@ static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
work->dev = d->dev;
- memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
+ work->event = *event;
+ memcpy(work->event.data, event->data, event->length);
queue_delayed_work(san_wq, &work->work, delay);
return SSAM_NOTIF_HANDLED;
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d5655f6a4a41..585911020cea 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -86,38 +86,38 @@ static const struct software_node ssam_node_bas_dtx = {
.parent = &ssam_node_root,
};
-/* HID keyboard (TID1). */
-static const struct software_node ssam_node_hid_tid1_keyboard = {
+/* HID keyboard (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_keyboard = {
.name = "ssam:01:15:01:01:00",
.parent = &ssam_node_root,
};
-/* HID pen stash (TID1; pen taken / stashed away evens). */
-static const struct software_node ssam_node_hid_tid1_penstash = {
+/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_sam_penstash = {
.name = "ssam:01:15:01:02:00",
.parent = &ssam_node_root,
};
-/* HID touchpad (TID1). */
-static const struct software_node ssam_node_hid_tid1_touchpad = {
+/* HID touchpad (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_touchpad = {
.name = "ssam:01:15:01:03:00",
.parent = &ssam_node_root,
};
-/* HID device instance 6 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid6 = {
+/* HID device instance 6 (SAM, TID=1, HID sensor collection). */
+static const struct software_node ssam_node_hid_sam_sensors = {
.name = "ssam:01:15:01:06:00",
.parent = &ssam_node_root,
};
-/* HID device instance 7 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid7 = {
+/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */
+static const struct software_node ssam_node_hid_sam_ucm_ucsi = {
.name = "ssam:01:15:01:07:00",
.parent = &ssam_node_root,
};
-/* HID system controls (TID1). */
-static const struct software_node ssam_node_hid_tid1_sysctrl = {
+/* HID system controls (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_sysctrl = {
.name = "ssam:01:15:01:08:00",
.parent = &ssam_node_root,
};
@@ -182,8 +182,8 @@ static const struct software_node ssam_node_hid_kip_touchpad = {
.parent = &ssam_node_hub_kip,
};
-/* HID device instance 5 (KIP hub, unknown HID device). */
-static const struct software_node ssam_node_hid_kip_iid5 = {
+/* HID device instance 5 (KIP hub, type-cover firmware update). */
+static const struct software_node ssam_node_hid_kip_fwupd = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_hub_kip,
};
@@ -241,12 +241,12 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_pos_tablet_switch,
- &ssam_node_hid_tid1_keyboard,
- &ssam_node_hid_tid1_penstash,
- &ssam_node_hid_tid1_touchpad,
- &ssam_node_hid_tid1_iid6,
- &ssam_node_hid_tid1_iid7,
- &ssam_node_hid_tid1_sysctrl,
+ &ssam_node_hid_sam_keyboard,
+ &ssam_node_hid_sam_penstash,
+ &ssam_node_hid_sam_touchpad,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ &ssam_node_hid_sam_sysctrl,
NULL,
};
@@ -278,7 +278,9 @@ static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
&ssam_node_hid_kip_touchpad,
- &ssam_node_hid_kip_iid5,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
NULL,
};
@@ -325,6 +327,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
/* Surface Laptop Studio */
{ "MSHW0123", (unsigned long)ssam_node_group_sls },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f2f98e942cf2..f5312f51de19 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -93,6 +93,7 @@ config PEAQ_WMI
config NVIDIA_WMI_EC_BACKLIGHT
tristate "EC Backlight Driver for Hybrid Graphics Notebook Systems"
+ depends on ACPI_VIDEO
depends on ACPI_WMI
depends on BACKLIGHT_CLASS_DEVICE
help
@@ -790,6 +791,7 @@ config SAMSUNG_Q10
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
+ depends on ACPI_BATTERY
depends on ACPI_WMI
select LEDS_CLASS
select NEW_LEDS
@@ -797,6 +799,7 @@ config ACPI_TOSHIBA
depends on INPUT
depends on SERIO_I8042 || SERIO_I8042 = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on HWMON || HWMON = n
depends on RFKILL || RFKILL = n
depends on IIO
select INPUT_SPARSEKMAP
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e0230ea0cb7e..18224f9a5bc0 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
{KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
{KE_IGNORE, 0x41, {KEY_MUTE} },
{KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
{KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
{KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
{KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
@@ -643,69 +650,6 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = {
{}
};
-static int __init
-video_set_backlight_video_vendor(const struct dmi_system_id *d)
-{
- interface->capability &= ~ACER_CAP_BRIGHTNESS;
- pr_info("Brightness must be controlled by generic video driver\n");
- return 0;
-}
-
-static const struct dmi_system_id video_vendor_dmi_table[] __initconst = {
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer TravelMate 4750",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer Extensa 5235",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer TravelMate 5760",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer Aspire 5750",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer Aspire 5741",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
- },
- },
- {
- /*
- * Note no video_set_backlight_video_vendor, we must use the
- * acer interface, as there is no native backlight interface.
- */
- .ident = "Acer KAV80",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
- },
- },
- {}
-};
-
/* Find which quirks are needed for a particular vendor/ model pair */
static void __init find_quirks(void)
{
@@ -2477,9 +2421,6 @@ static int __init acer_wmi_init(void)
set_quirks();
- if (dmi_check_system(video_vendor_dmi_table))
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
interface->capability &= ~ACER_CAP_BRIGHTNESS;
@@ -2522,7 +2463,7 @@ static int __init acer_wmi_init(void)
goto error_platform_register;
}
- acer_platform_device = platform_device_alloc("acer-wmi", -1);
+ acer_platform_device = platform_device_alloc("acer-wmi", PLATFORM_DEVID_NONE);
if (!acer_platform_device) {
err = -ENOMEM;
goto error_device_alloc;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 3463629f8764..d2c0fc38c201 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -676,7 +676,7 @@ static int __init acerhdf_register_platform(void)
if (err)
return err;
- acerhdf_dev = platform_device_alloc("acerhdf", -1);
+ acerhdf_dev = platform_device_alloc("acerhdf", PLATFORM_DEVID_NONE);
if (!acerhdf_dev) {
err = -ENOMEM;
goto err_device_alloc;
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index c0d0a3c5170c..a825af8126c8 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -3,6 +3,8 @@
# AMD x86 Platform Specific Drivers
#
+source "drivers/platform/x86/amd/pmf/Kconfig"
+
config AMD_PMC
tristate "AMD SoC PMC driver"
depends on ACPI && PCI && RTC_CLASS
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index a03fbb08e808..2c229198e24c 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -8,3 +8,4 @@ amd-pmc-y := pmc.o
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
amd_hsmp-y := hsmp.o
obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index a0c54b838c11..521c6a229362 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -392,7 +392,7 @@ static int __init hsmp_plt_init(void)
if (ret)
return ret;
- amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, -1);
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!amd_hsmp_platdev) {
ret = -ENOMEM;
goto drv_unregister;
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index 700eb19e8450..ce859b300712 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -39,7 +39,9 @@
#define AMD_PMC_STB_INDEX_ADDRESS 0xF8
#define AMD_PMC_STB_INDEX_DATA 0xFC
#define AMD_PMC_STB_PMI_0 0x03E30600
-#define AMD_PMC_STB_PREDEF 0xC6000001
+#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
+#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
+#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
/* STB S2D(Spill to DRAM) has different message port offset */
#define STB_SPILL_TO_DRAM 0xBE
@@ -151,9 +153,7 @@ struct amd_pmc_dev {
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
-#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
};
static bool enable_stb;
@@ -369,7 +369,64 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
}
#endif
-#ifdef CONFIG_DEBUG_FS
+static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+{
+ int rc;
+ u32 val;
+
+ rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
+ if (rc)
+ return rc;
+
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
+ dev->minor = (val >> 8) & GENMASK(7, 0);
+ dev->rev = (val >> 0) & GENMASK(7, 0);
+
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
+
+ return 0;
+}
+
+static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev);
+}
+
+static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u\n", dev->smu_program);
+}
+
+static DEVICE_ATTR_RO(smu_fw_version);
+static DEVICE_ATTR_RO(smu_program);
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_smu_fw_version.attr,
+ &dev_attr_smu_program.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(pmc);
+
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
@@ -435,26 +492,6 @@ static int s0ix_stats_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
-static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
-{
- int rc;
- u32 val;
-
- rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
- if (rc)
- return rc;
-
- dev->smu_program = (val >> 24) & GENMASK(7, 0);
- dev->major = (val >> 16) & GENMASK(7, 0);
- dev->minor = (val >> 8) & GENMASK(7, 0);
- dev->rev = (val >> 0) & GENMASK(7, 0);
-
- dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
- dev->smu_program, dev->major, dev->minor, dev->rev);
-
- return 0;
-}
-
static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
@@ -504,15 +541,6 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&amd_pmc_stb_debugfs_fops);
}
}
-#else
-static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
-{
-}
-
-static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
@@ -691,8 +719,6 @@ static void amd_pmc_s2idle_prepare(void)
}
}
- /* Dump the IdleMask before we send hint to SMU */
- amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
if (rc) {
@@ -700,11 +726,22 @@ static void amd_pmc_s2idle_prepare(void)
return;
}
- if (enable_stb) {
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
- if (rc)
- dev_err(pdev->dev, "error writing to STB: %d\n", rc);
- }
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+}
+
+static void amd_pmc_s2idle_check(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ int rc;
+
+ /* Dump the IdleMask before we add to the STB */
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
static void amd_pmc_s2idle_restore(void)
@@ -721,15 +758,9 @@ static void amd_pmc_s2idle_restore(void)
/* Let SMU know that we are looking for stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
- /* Dump the IdleMask to see the blockers */
- amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
-
- /* Write data incremented by 1 to distinguish in stb_read */
- if (enable_stb) {
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
- if (rc)
- dev_err(pdev->dev, "error writing to STB: %d\n", rc);
- }
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
@@ -737,6 +768,7 @@ static void amd_pmc_s2idle_restore(void)
static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.prepare = amd_pmc_s2idle_prepare,
+ .check = amd_pmc_s2idle_check,
.restore = amd_pmc_s2idle_restore,
};
#endif
@@ -935,6 +967,7 @@ static struct platform_driver amd_pmc_driver = {
.driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
+ .dev_groups = pmc_groups,
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
new file mode 100644
index 000000000000..c375498c4071
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD PMF Driver
+#
+
+config AMD_PMF
+ tristate "AMD Platform Management Framework"
+ depends on ACPI && PCI
+ select ACPI_PLATFORM_PROFILE
+ help
+ This driver provides support for the AMD Platform Management Framework.
+ The goal is to enhance end user experience by making AMD PCs smarter,
+ quiter, power efficient by adapting to user behavior and environment.
+
+ To compile this driver as a module, choose M here: the module will
+ be called amd_pmf.
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
new file mode 100644
index 000000000000..fdededf54392
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/amd/pmf
+# AMD Platform Management Framework
+#
+
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-objs := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
new file mode 100644
index 000000000000..081e84e116e7
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include "pmf.h"
+
+#define APMF_CQL_NOTIFICATION 2
+#define APMF_AMT_NOTIFICATION 3
+
+static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apmf_if_arg_list;
+ union acpi_object apmf_if_args[2];
+ acpi_status status;
+
+ apmf_if_arg_list.count = 2;
+ apmf_if_arg_list.pointer = &apmf_if_args[0];
+
+ apmf_if_args[0].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[0].integer.value = fn;
+
+ if (param) {
+ apmf_if_args[1].type = ACPI_TYPE_BUFFER;
+ apmf_if_args[1].buffer.length = param->length;
+ apmf_if_args[1].buffer.pointer = param->pointer;
+ } else {
+ apmf_if_args[1].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APMF method:%d call failed\n", fn);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apmf_if_call(pdev, fn, NULL);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (info->buffer.length < 2) {
+ dev_err(pdev->dev, "buffer too small\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dest, info->buffer.pointer, out_sz);
+
+out:
+ kfree(info);
+ return err;
+}
+
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
+{
+ /* If bit-n is set, that indicates function n+1 is supported */
+ return !!(pdev->supported_func & BIT(index - 1));
+}
+
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
+static void apmf_sbios_heartbeat_notify(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
+ union acpi_object *info;
+
+ dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
+ if (!info)
+ goto out;
+
+ schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+
+out:
+ kfree(info);
+}
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
+{
+ union acpi_object *info;
+ struct apmf_fan_idx args;
+ struct acpi_buffer params;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.fan_ctl_mode = manual;
+ args.fan_ctl_idx = idx;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(info);
+ return err;
+}
+
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
+}
+
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
+ req, sizeof(*req));
+}
+
+static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ struct apmf_sbios_req req;
+ int ret;
+
+ mutex_lock(&pmf_dev->update_mutex);
+ ret = apmf_get_sbios_requests(pmf_dev, &req);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
+ goto out;
+ }
+
+ if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n",
+ req.amt_event ? "Enabled" : "Disabled");
+ pmf_dev->amt_enabled = !!req.amt_event;
+
+ if (pmf_dev->amt_enabled)
+ amd_pmf_handle_amt(pmf_dev);
+ else
+ amd_pmf_reset_amt(pmf_dev);
+ }
+
+ if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n",
+ req.cql_event ? "Enabled" : "Disabled");
+
+ /* update the target mode information */
+ if (pmf_dev->amt_enabled)
+ amd_pmf_update_2_cql(pmf_dev, req.cql_event);
+ }
+out:
+ mutex_unlock(&pmf_dev->update_mutex);
+}
+
+static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
+{
+ struct apmf_verify_interface output;
+ int err;
+
+ err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output));
+ if (err)
+ return err;
+
+ pdev->supported_func = output.supported_functions;
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
+ output.supported_functions, output.notification_mask);
+
+ return 0;
+}
+
+static int apmf_get_system_params(struct amd_pmf_dev *dev)
+{
+ struct apmf_system_params params;
+ int err;
+
+ if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS))
+ return -EINVAL;
+
+ err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, &params, sizeof(params));
+ if (err)
+ return err;
+
+ dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n",
+ params.valid_mask,
+ params.flags,
+ params.command_code,
+ params.heartbeat_int);
+ params.flags = params.flags & params.valid_mask;
+ dev->hb_interval = params.heartbeat_int;
+
+ return 0;
+}
+
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data));
+}
+
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
+}
+
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+ acpi_status status;
+
+ /* Install the APMF Notify handler */
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler\n");
+ return -ENODEV;
+ }
+
+ /* Call the handler once manually to catch up with possibly missed notifies. */
+ apmf_event_handler(ahandle, 0, pmf_dev);
+ }
+
+ return 0;
+}
+
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+
+ if (pmf_dev->hb_interval)
+ cancel_delayed_work_sync(&pmf_dev->heart_beat);
+
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+}
+
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
+{
+ int ret;
+
+ ret = apmf_if_verify_interface(pmf_dev);
+ if (ret) {
+ dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret);
+ goto out;
+ }
+
+ ret = apmf_get_system_params(pmf_dev);
+ if (ret) {
+ dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ goto out;
+ }
+
+ if (pmf_dev->hb_interval) {
+ /* send heartbeats only if the interval is not zero */
+ INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
+ schedule_delayed_work(&pmf_dev->heart_beat, 0);
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
new file mode 100644
index 000000000000..644af42e07cf
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct auto_mode_mode_config config_store;
+static const char *state_as_str(unsigned int state);
+
+static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
+ struct auto_mode_mode_config *table)
+{
+ struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
+ config_store.mode_set[idx].fan_control.fan_id);
+}
+
+static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power)
+{
+ int i, total = 0;
+
+ if (pdev->socket_power_history_idx == -1) {
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ pdev->socket_power_history[i] = socket_power;
+ }
+
+ pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE;
+ pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power;
+
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ total += pdev->socket_power_history[i];
+
+ return total / AVG_SAMPLE_SIZE;
+}
+
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms)
+{
+ int avg_power = 0;
+ bool update = false;
+ int i, j;
+
+ /* Get the average moving average computed by auto mode algorithm */
+ avg_power = amd_pmf_get_moving_avg(dev, socket_power);
+
+ for (i = 0; i < AUTO_TRANSITION_MAX; i++) {
+ if ((config_store.transition[i].shifting_up && avg_power >=
+ config_store.transition[i].power_threshold) ||
+ (!config_store.transition[i].shifting_up && avg_power <=
+ config_store.transition[i].power_threshold)) {
+ if (config_store.transition[i].timer <
+ config_store.transition[i].time_constant)
+ config_store.transition[i].timer += time_elapsed_ms;
+ } else {
+ config_store.transition[i].timer = 0;
+ }
+
+ if (config_store.transition[i].timer >=
+ config_store.transition[i].time_constant &&
+ !config_store.transition[i].applied) {
+ config_store.transition[i].applied = true;
+ update = true;
+ } else if (config_store.transition[i].timer <=
+ config_store.transition[i].time_constant &&
+ config_store.transition[i].applied) {
+ config_store.transition[i].applied = false;
+ update = true;
+ }
+ }
+
+ dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power,
+ state_as_str(config_store.current_mode));
+
+ if (update) {
+ for (j = 0; j < AUTO_TRANSITION_MAX; j++) {
+ /* Apply the mode with highest priority indentified */
+ if (config_store.transition[j].applied) {
+ if (config_store.current_mode !=
+ config_store.transition[j].target_mode) {
+ config_store.current_mode =
+ config_store.transition[j].target_mode;
+ dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ }
+}
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event)
+{
+ int mode = config_store.current_mode;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE;
+
+ if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) &&
+ mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) {
+ mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode;
+ amd_pmf_set_automode(dev, mode, NULL);
+ }
+ dev_dbg(dev->dev, "updated CQL thermals\n");
+}
+
+static void amd_pmf_get_power_threshold(void)
+{
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_QUIET].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case AUTO_QUIET:
+ return "QUIET";
+ case AUTO_BALANCE:
+ return "BALANCED";
+ case AUTO_PERFORMANCE_ON_LAP:
+ return "ON_LAP";
+ case AUTO_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown Auto Mode State";
+ }
+}
+
+static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev)
+{
+ struct apmf_auto_mode output;
+ struct power_table_control *pwr_ctrl;
+ int i;
+
+ apmf_get_auto_mode_def(dev, &output);
+ /* time constant */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant =
+ output.balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant =
+ output.balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant =
+ output.quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant =
+ output.perf_to_balanced;
+
+ /* power floor */
+ config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet;
+ config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf;
+
+ /* Power delta for mode change */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta =
+ output.pd_balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta =
+ output.pd_balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta =
+ output.pd_quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta =
+ output.pd_perf_to_balanced;
+
+ /* Power threshold */
+ amd_pmf_get_power_threshold();
+
+ /* skin temperature limits */
+ pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control;
+ pwr_ctrl->spl = output.spl_quiet;
+ pwr_ctrl->sppt = output.sppt_quiet;
+ pwr_ctrl->fppt = output.fppt_quiet;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet;
+ pwr_ctrl->stt_min = output.stt_min_limit_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control;
+ pwr_ctrl->spl = output.spl_balanced;
+ pwr_ctrl->sppt = output.sppt_balanced;
+ pwr_ctrl->fppt = output.fppt_balanced;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced;
+ pwr_ctrl->stt_min = output.stt_min_limit_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control;
+ pwr_ctrl->spl = output.spl_perf;
+ pwr_ctrl->sppt = output.sppt_perf;
+ pwr_ctrl->fppt = output.fppt_perf;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control;
+ pwr_ctrl->spl = output.spl_perf_on_lap;
+ pwr_ctrl->sppt = output.sppt_perf_on_lap;
+ pwr_ctrl->fppt = output.fppt_perf_on_lap;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap;
+
+ /* Fan ID */
+ config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet;
+ config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id =
+ output.fan_id_perf;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ AUTO_PERFORMANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up =
+ false;
+
+ for (i = 0 ; i < AUTO_MODE_MAX ; i++) {
+ if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i].fan_control.manual = false;
+ else
+ config_store.mode_set[i].fan_control.manual = true;
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = AUTO_BALANCE;
+ dev->socket_power_history_idx = -1;
+}
+
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
+{
+ /*
+ * OEM BIOS implementation guide says that if the auto mode is enabled
+ * the platform_profile registration shall be done by the OEM driver.
+ * There could be cases where both static slider and auto mode BIOS
+ * functions are enabled, in that case enable static slider updates
+ * only if it advertised as supported.
+ */
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ int mode = amd_pmf_get_pprof_modes(dev);
+
+ if (mode < 0)
+ return mode;
+
+ dev_dbg(dev->dev, "resetting AMT thermals\n");
+ amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
+ }
+ return 0;
+}
+
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev)
+{
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+}
+
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
+{
+ amd_pmf_load_defaults_auto_mode(dev);
+ /* update the thermal limits for Automode */
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ amd_pmf_init_metrics_table(dev);
+}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
new file mode 100644
index 000000000000..668c7c0fea83
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct cnqf_config config_store;
+
+static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
+ struct cnqf_config *table)
+{
+ struct power_table_control *pc;
+
+ pc = &config_store.mode_set[src][idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
+ NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
+ NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev,
+ config_store.mode_set[src][idx].fan_control.manual,
+ config_store.mode_set[src][idx].fan_control.fan_id);
+
+ return 0;
+}
+
+static void amd_pmf_update_power_threshold(int src)
+{
+ struct cnqf_mode_settings *ts;
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_QUIET];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_TURBO];
+ tp->power_threshold = ts->power_floor;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case CNQF_MODE_QUIET:
+ return "QUIET";
+ case CNQF_MODE_BALANCE:
+ return "BALANCED";
+ case CNQF_MODE_TURBO:
+ return "TURBO";
+ case CNQF_MODE_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown CnQF mode";
+ }
+}
+
+static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) &&
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return amd_pmf_get_power_source();
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return POWER_SOURCE_DC;
+ else
+ return POWER_SOURCE_AC;
+}
+
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms)
+{
+ struct cnqf_tran_params *tp;
+ int src, i, j;
+ u32 avg_power = 0;
+
+ src = amd_pmf_cnqf_get_power_source(dev);
+
+ if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ } else {
+ /*
+ * Return from here if the platform_profile is not balanced
+ * so that preference is given to user mode selection, rather
+ * than enforcing CnQF to run all the time (if enabled)
+ */
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CNQF_TRANSITION_MAX; i++) {
+ config_store.trans_param[src][i].timer += time_lapsed_ms;
+ config_store.trans_param[src][i].total_power += socket_power;
+ config_store.trans_param[src][i].count++;
+
+ tp = &config_store.trans_param[src][i];
+ if (tp->timer >= tp->time_constant && tp->count) {
+ avg_power = tp->total_power / tp->count;
+
+ /* Reset the indices */
+ tp->timer = 0;
+ tp->total_power = 0;
+ tp->count = 0;
+
+ if ((tp->shifting_up && avg_power >= tp->power_threshold) ||
+ (!tp->shifting_up && avg_power <= tp->power_threshold)) {
+ tp->priority = true;
+ } else {
+ tp->priority = false;
+ }
+ }
+ }
+
+ dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n",
+ avg_power, socket_power, state_as_str(config_store.current_mode));
+
+ for (j = 0; j < CNQF_TRANSITION_MAX; j++) {
+ /* apply the highest priority */
+ if (config_store.trans_param[src][j].priority) {
+ if (config_store.current_mode !=
+ config_store.trans_param[src][j].target_mode) {
+ config_store.current_mode =
+ config_store.trans_param[src][j].target_mode;
+ dev_dbg(dev->dev, "Moving to Mode :%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_cnqf(dev, src,
+ config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output out)
+{
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET];
+ tp->time_constant = out.t_balanced_to_quiet;
+ tp->target_mode = CNQF_MODE_QUIET;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ tp->time_constant = out.t_balanced_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ tp->time_constant = out.t_quiet_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ tp->time_constant = out.t_perf_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ tp->time_constant = out.t_turbo_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO];
+ tp->time_constant = out.t_perf_to_turbo;
+ tp->target_mode = CNQF_MODE_TURBO;
+ tp->shifting_up = true;
+}
+
+static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output out)
+{
+ struct cnqf_mode_settings *ms;
+
+ /* Quiet Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_QUIET];
+ ms->power_floor = out.ps[APMF_CNQF_QUIET].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_QUIET].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_QUIET].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_QUIET].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_QUIET].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_QUIET].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_QUIET].fan_id;
+
+ /* Balance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE];
+ ms->power_floor = out.ps[APMF_CNQF_BALANCE].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_BALANCE].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_BALANCE].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_BALANCE].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_BALANCE].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_BALANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_BALANCE].fan_id;
+
+ /* Performance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE];
+ ms->power_floor = out.ps[APMF_CNQF_PERFORMANCE].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_PERFORMANCE].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_PERFORMANCE].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_PERFORMANCE].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_PERFORMANCE].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_PERFORMANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_PERFORMANCE].fan_id;
+
+ /* Turbo Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_TURBO];
+ ms->power_floor = out.ps[APMF_CNQF_TURBO].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_TURBO].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_TURBO].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_TURBO].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_TURBO].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_TURBO].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_TURBO].fan_id;
+}
+
+static int amd_pmf_check_flags(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out = {};
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC))
+ apmf_get_dyn_slider_def_ac(dev, &out);
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ apmf_get_dyn_slider_def_dc(dev, &out);
+
+ return out.flags;
+}
+
+static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out;
+ int i, j, ret;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i))
+ continue;
+
+ if (i == POWER_SOURCE_AC)
+ ret = apmf_get_dyn_slider_def_ac(dev, &out);
+ else
+ ret = apmf_get_dyn_slider_def_dc(dev, &out);
+ if (ret) {
+ dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret);
+ return ret;
+ }
+
+ amd_pmf_update_mode_set(i, out);
+ amd_pmf_update_trans_data(i, out);
+ amd_pmf_update_power_threshold(i);
+
+ for (j = 0; j < CNQF_MODE_MAX; j++) {
+ if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i][j].fan_control.manual = false;
+ else
+ config_store.mode_set[i][j].fan_control.manual = true;
+ }
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = CNQF_MODE_BALANCE;
+
+ return 0;
+}
+
+static ssize_t cnqf_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+ int mode, result, src;
+ bool input;
+
+ mode = amd_pmf_get_pprof_modes(pdev);
+ if (mode < 0)
+ return mode;
+
+ result = kstrtobool(buf, &input);
+ if (result)
+ return result;
+
+ src = amd_pmf_cnqf_get_power_source(pdev);
+ pdev->cnqf_enabled = input;
+
+ if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
+ } else {
+ if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
+ }
+
+ dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
+ return count;
+}
+
+static ssize_t cnqf_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", pdev->cnqf_enabled ? "on" : "off");
+}
+
+static DEVICE_ATTR_RW(cnqf_enable);
+
+static umode_t cnqf_feature_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return pdev->cnqf_supported ? attr->mode : 0;
+}
+
+static struct attribute *cnqf_feature_attrs[] = {
+ &dev_attr_cnqf_enable.attr,
+ NULL
+};
+
+const struct attribute_group cnqf_feature_attribute_group = {
+ .is_visible = cnqf_feature_is_visible,
+ .attrs = cnqf_feature_attrs,
+};
+
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
+{
+ int ret, src;
+
+ /*
+ * Note the caller of this function has already checked that both
+ * APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported.
+ */
+
+ ret = amd_pmf_load_defaults_cnqf(dev);
+ if (ret < 0)
+ return ret;
+
+ amd_pmf_init_metrics_table(dev);
+
+ dev->cnqf_supported = true;
+ dev->cnqf_enabled = amd_pmf_check_flags(dev);
+
+ /* update the thermal for CnQF */
+ if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ src = amd_pmf_cnqf_get_power_source(dev);
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
new file mode 100644
index 000000000000..a5f5a4bcff6d
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include "pmf.h"
+
+/* PMF-SMU communication registers */
+#define AMD_PMF_REGISTER_MESSAGE 0xA18
+#define AMD_PMF_REGISTER_RESPONSE 0xA78
+#define AMD_PMF_REGISTER_ARGUMENT 0xA58
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMF_SMU_INDEX_ADDRESS 0xB8
+#define AMD_PMF_SMU_INDEX_DATA 0xBC
+#define AMD_PMF_MAPPING_SIZE 0x01000
+#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMF_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMF_RESULT_OK 0x01
+#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMF_RESULT_FAILED 0xFF
+
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RMB 0x14b5
+#define AMD_CPU_ID_PS 0x14e8
+
+#define PMF_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+
+/* override Metrics Table sample size time (in ms) */
+static int metrics_table_loop_ms = 1000;
+module_param(metrics_table_loop_ms, int, 0644);
+MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
+
+/* Force load on supported older platforms */
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+
+static int current_power_limits_show(struct seq_file *seq, void *unused)
+{
+ struct amd_pmf_dev *dev = seq->private;
+ struct amd_pmf_static_slider_granular table;
+ int mode, src = 0;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+ amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
+ seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
+ table.prop[src][mode].spl,
+ table.prop[src][mode].fppt,
+ table.prop[src][mode].sppt,
+ table.prop[src][mode].sppt_apu_only,
+ table.prop[src][mode].stt_min,
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(current_power_limits);
+
+static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
+{
+ debugfs_remove_recursive(dev->dbgfs_dir);
+}
+
+static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
+{
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
+}
+
+int amd_pmf_get_power_source(void)
+{
+ if (power_supply_is_system_supplied() > 0)
+ return POWER_SOURCE_AC;
+ else
+ return POWER_SOURCE_DC;
+}
+
+static void amd_pmf_get_metrics(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
+ ktime_t time_elapsed_ms;
+ int socket_power;
+
+ mutex_lock(&dev->update_mutex);
+ /* Transfer table contents */
+ memset(dev->buf, 0, sizeof(dev->m_table));
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
+
+ time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
+ /* Calculate the avg SoC power consumption */
+ socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+
+ if (dev->amt_enabled) {
+ /* Apply the Auto Mode transition */
+ amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
+ }
+
+ if (dev->cnqf_enabled) {
+ /* Apply the CnQF transition */
+ amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
+ }
+
+ dev->start_time = ktime_to_ms(ktime_get());
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
+ mutex_unlock(&dev->update_mutex);
+}
+
+static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
+{
+ return ioread32(dev->regbase + reg_offset);
+}
+
+static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
+{
+ iowrite32(val, dev->regbase + reg_offset);
+}
+
+static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
+{
+ u32 value;
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
+}
+
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
+{
+ int rc;
+ u32 val;
+
+ mutex_lock(&dev->lock);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+ goto out_unlock;
+ }
+
+ /* Write zero to response register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
+
+ /* Write argument into argument register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
+
+ /* Write message ID to message ID register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMF_RESULT_OK:
+ if (get) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ }
+ break;
+ case AMD_PMF_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMF_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmf_dump_registers(dev);
+ return rc;
+}
+
+static const struct pci_device_id pmf_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { }
+};
+
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
+ u64 phys_addr;
+ u32 hi, low;
+
+ INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+
+ /* Get Metrics Table Address */
+ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ if (!dev->buf)
+ return -ENOMEM;
+
+ phys_addr = virt_to_phys(dev->buf);
+ hi = phys_addr >> 32;
+ low = phys_addr & GENMASK(31, 0);
+
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+
+ /*
+ * Start collecting the metrics data after a small delay
+ * or else, we might end up getting stale values from PMFW.
+ */
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
+
+ return 0;
+}
+
+static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+{
+ int ret;
+
+ /* Enable Static Slider */
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ amd_pmf_init_sps(dev);
+ dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
+ }
+
+ /* Enable Auto Mode */
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_init_auto_mode(dev);
+ dev_dbg(dev->dev, "Auto Mode Init done\n");
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ /* Enable Cool n Quiet Framework (CnQF) */
+ ret = amd_pmf_init_cnqf(dev);
+ if (ret)
+ dev_warn(dev->dev, "CnQF Init failed\n");
+ }
+}
+
+static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_deinit_sps(dev);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_deinit_auto_mode(dev);
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ amd_pmf_deinit_cnqf(dev);
+ }
+}
+
+static const struct acpi_device_id amd_pmf_acpi_ids[] = {
+ {"AMDI0100", 0x100},
+ {"AMDI0102", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+
+static int amd_pmf_probe(struct platform_device *pdev)
+{
+ const struct acpi_device_id *id;
+ struct amd_pmf_dev *dev;
+ struct pci_dev *rdev;
+ u32 base_addr_lo;
+ u32 base_addr_hi;
+ u64 base_addr;
+ u32 val;
+ int err;
+
+ id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == 0x100 && !force_load)
+ return -ENODEV;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->dev = &pdev->dev;
+
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
+ pci_dev_put(rdev);
+ return -ENODEV;
+ }
+
+ dev->cpu_id = rdev->device;
+ err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
+ if (err) {
+ dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ if (err) {
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
+
+ err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
+ if (err) {
+ dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ if (err) {
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
+ AMD_PMF_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+ apmf_acpi_init(dev);
+ platform_set_drvdata(pdev, dev);
+ amd_pmf_init_features(dev);
+ apmf_install_handler(dev);
+ amd_pmf_dbgfs_register(dev);
+
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+ dev_info(dev->dev, "registered PMF device successfully\n");
+
+ return 0;
+}
+
+static int amd_pmf_remove(struct platform_device *pdev)
+{
+ struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
+
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
+ amd_pmf_deinit_features(dev);
+ apmf_acpi_deinit(dev);
+ amd_pmf_dbgfs_unregister(dev);
+ kfree(dev->buf);
+ return 0;
+}
+
+static const struct attribute_group *amd_pmf_driver_groups[] = {
+ &cnqf_feature_attribute_group,
+ NULL,
+};
+
+static struct platform_driver amd_pmf_driver = {
+ .driver = {
+ .name = "amd-pmf",
+ .acpi_match_table = amd_pmf_acpi_ids,
+ .dev_groups = amd_pmf_driver_groups,
+ },
+ .probe = amd_pmf_probe,
+ .remove = amd_pmf_remove,
+};
+module_platform_driver(amd_pmf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
new file mode 100644
index 000000000000..84bbe2c6ea61
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#ifndef PMF_H
+#define PMF_H
+
+#include <linux/acpi.h>
+#include <linux/platform_profile.h>
+
+/* APMF Functions */
+#define APMF_FUNC_VERIFY_INTERFACE 0
+#define APMF_FUNC_GET_SYS_PARAMS 1
+#define APMF_FUNC_SBIOS_REQUESTS 2
+#define APMF_FUNC_SBIOS_HEARTBEAT 4
+#define APMF_FUNC_AUTO_MODE 5
+#define APMF_FUNC_SET_FAN_IDX 7
+#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
+#define APMF_FUNC_DYN_SLIDER_AC 11
+#define APMF_FUNC_DYN_SLIDER_DC 12
+
+/* Message Definitions */
+#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
+#define SET_SPPT 0x05 /* SPPT: Slow Package Power Tracking */
+#define SET_FPPT 0x07 /* FPPT: Fast Package Power Tracking */
+#define GET_SPL 0x0B
+#define GET_SPPT 0x0D
+#define GET_FPPT 0x0F
+#define SET_DRAM_ADDR_HIGH 0x14
+#define SET_DRAM_ADDR_LOW 0x15
+#define SET_TRANSFER_TABLE 0x16
+#define SET_STT_MIN_LIMIT 0x18 /* STT: Skin Temperature Tracking */
+#define SET_STT_LIMIT_APU 0x19
+#define SET_STT_LIMIT_HS2 0x1A
+#define SET_SPPT_APU_ONLY 0x1D
+#define GET_SPPT_APU_ONLY 0x1E
+#define GET_STT_MIN_LIMIT 0x1F
+#define GET_STT_LIMIT_APU 0x20
+#define GET_STT_LIMIT_HS2 0x21
+
+/* Fan Index for Auto Mode */
+#define FAN_INDEX_AUTO 0xFFFFFFFF
+
+#define ARG_NONE 0
+#define AVG_SAMPLE_SIZE 3
+
+/* AMD PMF BIOS interfaces */
+struct apmf_verify_interface {
+ u16 size;
+ u16 version;
+ u32 notification_mask;
+ u32 supported_functions;
+} __packed;
+
+struct apmf_system_params {
+ u16 size;
+ u32 valid_mask;
+ u32 flags;
+ u8 command_code;
+ u32 heartbeat_int;
+} __packed;
+
+struct apmf_sbios_req {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u8 cql_event;
+ u8 amt_event;
+ u32 fppt;
+ u32 sppt;
+ u32 fppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+} __packed;
+
+struct apmf_fan_idx {
+ u16 size;
+ u8 fan_ctl_mode;
+ u32 fan_ctl_idx;
+} __packed;
+
+struct smu_pmf_metrics {
+ u16 gfxclk_freq; /* in MHz */
+ u16 socclk_freq; /* in MHz */
+ u16 vclk_freq; /* in MHz */
+ u16 dclk_freq; /* in MHz */
+ u16 memclk_freq; /* in MHz */
+ u16 spare;
+ u16 gfx_activity; /* in Centi */
+ u16 uvd_activity; /* in Centi */
+ u16 voltage[2]; /* in mV */
+ u16 currents[2]; /* in mA */
+ u16 power[2];/* in mW */
+ u16 core_freq[8]; /* in MHz */
+ u16 core_power[8]; /* in mW */
+ u16 core_temp[8]; /* in centi-Celsius */
+ u16 l3_freq; /* in MHz */
+ u16 l3_temp; /* in centi-Celsius */
+ u16 gfx_temp; /* in centi-Celsius */
+ u16 soc_temp; /* in centi-Celsius */
+ u16 throttler_status;
+ u16 current_socketpower; /* in mW */
+ u16 stapm_orig_limit; /* in W */
+ u16 stapm_cur_limit; /* in W */
+ u32 apu_power; /* in mW */
+ u32 dgpu_power; /* in mW */
+ u16 vdd_tdc_val; /* in mA */
+ u16 soc_tdc_val; /* in mA */
+ u16 vdd_edc_val; /* in mA */
+ u16 soc_edcv_al; /* in mA */
+ u16 infra_cpu_maxfreq; /* in MHz */
+ u16 infra_gfx_maxfreq; /* in MHz */
+ u16 skin_temp; /* in centi-Celsius */
+ u16 device_state;
+} __packed;
+
+enum amd_stt_skin_temp {
+ STT_TEMP_APU,
+ STT_TEMP_HS2,
+ STT_TEMP_COUNT,
+};
+
+enum amd_slider_op {
+ SLIDER_OP_GET,
+ SLIDER_OP_SET,
+};
+
+enum power_source {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_MAX,
+};
+
+enum power_modes {
+ POWER_MODE_PERFORMANCE,
+ POWER_MODE_BALANCED_POWER,
+ POWER_MODE_POWER_SAVER,
+ POWER_MODE_MAX,
+};
+
+struct amd_pmf_dev {
+ void __iomem *regbase;
+ void __iomem *smu_virt_addr;
+ void *buf;
+ u32 base_addr;
+ u32 cpu_id;
+ struct device *dev;
+ struct mutex lock; /* protects the PMF interface */
+ u32 supported_func;
+ enum platform_profile_option current_profile;
+ struct platform_profile_handler pprof;
+ struct dentry *dbgfs_dir;
+ int hb_interval; /* SBIOS heartbeat interval */
+ struct delayed_work heart_beat;
+ struct smu_pmf_metrics m_table;
+ struct delayed_work work_buffer;
+ ktime_t start_time;
+ int socket_power_history[AVG_SAMPLE_SIZE];
+ int socket_power_history_idx;
+ bool amt_enabled;
+ struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
+ bool cnqf_enabled;
+ bool cnqf_supported;
+};
+
+struct apmf_sps_prop_granular {
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min;
+ u8 stt_skin_temp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+/* Static Slider */
+struct apmf_static_slider_granular_output {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX * POWER_MODE_MAX];
+} __packed;
+
+struct amd_pmf_static_slider_granular {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
+};
+
+struct fan_table_control {
+ bool manual;
+ unsigned long fan_id;
+};
+
+struct power_table_control {
+ u32 spl;
+ u32 sppt;
+ u32 fppt;
+ u32 sppt_apu_only;
+ u32 stt_min;
+ u32 stt_skin_temp[STT_TEMP_COUNT];
+ u32 reserved[16];
+};
+
+/* Auto Mode Layer */
+enum auto_mode_transition_priority {
+ AUTO_TRANSITION_TO_PERFORMANCE, /* Any other mode to Performance Mode */
+ AUTO_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ AUTO_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance Mode to Balance Mode */
+ AUTO_TRANSITION_MAX,
+};
+
+enum auto_mode_mode {
+ AUTO_QUIET,
+ AUTO_BALANCE,
+ AUTO_PERFORMANCE_ON_LAP,
+ AUTO_PERFORMANCE,
+ AUTO_MODE_MAX,
+};
+
+struct auto_mode_trans_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_delta; /* delta power to shift mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > TimeThreshold, it will move to next mode */
+ u32 applied;
+ enum auto_mode_mode target_mode;
+ u32 shifting_up;
+};
+
+struct auto_mode_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct auto_mode_mode_config {
+ struct auto_mode_trans_params transition[AUTO_TRANSITION_MAX];
+ struct auto_mode_mode_settings mode_set[AUTO_MODE_MAX];
+ enum auto_mode_mode current_mode;
+};
+
+struct apmf_auto_mode {
+ u16 size;
+ /* time constant */
+ u32 balanced_to_perf;
+ u32 perf_to_balanced;
+ u32 quiet_to_balanced;
+ u32 balanced_to_quiet;
+ /* power floor */
+ u32 pfloor_perf;
+ u32 pfloor_balanced;
+ u32 pfloor_quiet;
+ /* Power delta for mode change */
+ u32 pd_balanced_to_perf;
+ u32 pd_perf_to_balanced;
+ u32 pd_quiet_to_balanced;
+ u32 pd_balanced_to_quiet;
+ /* skin temperature limits */
+ u8 stt_apu_perf_on_lap; /* CQL ON */
+ u8 stt_hs2_perf_on_lap; /* CQL ON */
+ u8 stt_apu_perf;
+ u8 stt_hs2_perf;
+ u8 stt_apu_balanced;
+ u8 stt_hs2_balanced;
+ u8 stt_apu_quiet;
+ u8 stt_hs2_quiet;
+ u32 stt_min_limit_perf_on_lap; /* CQL ON */
+ u32 stt_min_limit_perf;
+ u32 stt_min_limit_balanced;
+ u32 stt_min_limit_quiet;
+ /* SPL based */
+ u32 fppt_perf_on_lap; /* CQL ON */
+ u32 sppt_perf_on_lap; /* CQL ON */
+ u32 spl_perf_on_lap; /* CQL ON */
+ u32 sppt_apu_only_perf_on_lap; /* CQL ON */
+ u32 fppt_perf;
+ u32 sppt_perf;
+ u32 spl_perf;
+ u32 sppt_apu_only_perf;
+ u32 fppt_balanced;
+ u32 sppt_balanced;
+ u32 spl_balanced;
+ u32 sppt_apu_only_balanced;
+ u32 fppt_quiet;
+ u32 sppt_quiet;
+ u32 spl_quiet;
+ u32 sppt_apu_only_quiet;
+ /* Fan ID */
+ u32 fan_id_perf;
+ u32 fan_id_balanced;
+ u32 fan_id_quiet;
+} __packed;
+
+/* CnQF Layer */
+enum cnqf_trans_priority {
+ CNQF_TRANSITION_TO_TURBO, /* Any other mode to Turbo Mode */
+ CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE, /* quiet/balance to Performance Mode */
+ CNQF_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ CNQF_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance/Turbo to Balance Mode */
+ CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE, /* Turbo mode to Performance Mode */
+ CNQF_TRANSITION_MAX,
+};
+
+enum cnqf_mode {
+ CNQF_MODE_QUIET,
+ CNQF_MODE_BALANCE,
+ CNQF_MODE_PERFORMANCE,
+ CNQF_MODE_TURBO,
+ CNQF_MODE_MAX,
+};
+
+enum apmf_cnqf_pos {
+ APMF_CNQF_TURBO,
+ APMF_CNQF_PERFORMANCE,
+ APMF_CNQF_BALANCE,
+ APMF_CNQF_QUIET,
+ APMF_CNQF_MAX,
+};
+
+struct cnqf_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct cnqf_tran_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > timethreshold, it will move to next mode */
+ u32 total_power;
+ u32 count;
+ bool priority;
+ bool shifting_up;
+ enum cnqf_mode target_mode;
+};
+
+struct cnqf_config {
+ struct cnqf_tran_params trans_param[POWER_SOURCE_MAX][CNQF_TRANSITION_MAX];
+ struct cnqf_mode_settings mode_set[POWER_SOURCE_MAX][CNQF_MODE_MAX];
+ struct power_table_control defaults;
+ enum cnqf_mode current_mode;
+ u32 power_src;
+ u32 avg_power;
+};
+
+struct apmf_cnqf_power_set {
+ u32 pfloor;
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 stt_skintemp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+struct apmf_dyn_slider_output {
+ u16 size;
+ u16 flags;
+ u32 t_perf_to_turbo;
+ u32 t_balanced_to_perf;
+ u32 t_quiet_to_balanced;
+ u32 t_balanced_to_quiet;
+ u32 t_perf_to_balanced;
+ u32 t_turbo_to_perf;
+ struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
+} __packed;
+
+/* Core Layer */
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev);
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev);
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index);
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data);
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
+int amd_pmf_get_power_source(void);
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+
+/* SPS Layer */
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table);
+int amd_pmf_init_sps(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *output);
+
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+
+/* Auto Mode Layer */
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev);
+
+/* CnQF Layer */
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev);
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms);
+extern const struct attribute_group cnqf_feature_attribute_group;
+
+#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
new file mode 100644
index 000000000000..dba7e36962dc
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework (PMF) Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include "pmf.h"
+
+static struct amd_pmf_static_slider_granular config_store;
+
+static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output output;
+ int i, j, idx = 0;
+
+ memset(&config_store, 0, sizeof(config_store));
+ apmf_get_static_slider_granular(dev, &output);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ for (j = 0; j < POWER_MODE_MAX; j++) {
+ config_store.prop[i][j].spl = output.prop[idx].spl;
+ config_store.prop[i][j].sppt = output.prop[idx].sppt;
+ config_store.prop[i][j].sppt_apu_only =
+ output.prop[idx].sppt_apu_only;
+ config_store.prop[i][j].fppt = output.prop[idx].fppt;
+ config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_APU];
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
+ config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
+ idx++;
+ }
+ }
+}
+
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table)
+{
+ int src = amd_pmf_get_power_source();
+
+ if (op == SLIDER_OP_SET) {
+ amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ config_store.prop[src][idx].sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ config_store.prop[src][idx].stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
+ } else if (op == SLIDER_OP_GET) {
+ amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ &table->prop[src][idx].sppt_apu_only);
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ &table->prop[src][idx].stt_min);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
+ }
+}
+
+static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+
+ *profile = pmf->current_profile;
+ return 0;
+}
+
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ switch (pmf->current_profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ mode = POWER_MODE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ mode = POWER_MODE_BALANCED_POWER;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ mode = POWER_MODE_POWER_SAVER;
+ break;
+ default:
+ dev_err(pmf->dev, "Unknown Platform Profile.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return mode;
+}
+
+static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ int mode;
+
+ pmf->current_profile = profile;
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+ return 0;
+}
+
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
+ int err;
+
+ dev->current_profile = PLATFORM_PROFILE_BALANCED;
+ amd_pmf_load_defaults_sps(dev);
+
+ dev->pprof.profile_get = amd_pmf_profile_get;
+ dev->pprof.profile_set = amd_pmf_profile_set;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dev->pprof);
+ if (err)
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
+ err);
+
+ return err;
+}
+
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
+{
+ platform_profile_remove();
+}
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index 493e169c8f61..3e313c4d538d 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -150,7 +150,8 @@ static int __init amilo_rfkill_init(void)
if (rc)
return rc;
- amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
+ amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME,
+ PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(amilo_rfkill_pdev)) {
rc = PTR_ERR(amilo_rfkill_pdev);
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index ffe98a18440b..ca33df7ea550 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -21,7 +21,6 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
-#include <acpi/video.h>
#include <asm/io.h>
/**
@@ -694,7 +693,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
* backlight control and supports more levels than other options.
* Disable the other backlight choices.
*/
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
apple_bl_unregister();
gmux_data->power_state = VGA_SWITCHEROO_ON;
@@ -804,7 +802,6 @@ static void gmux_remove(struct pnp_dev *pnp)
apple_gmux_data = NULL;
kfree(gmux_data);
- acpi_video_register();
apple_bl_register();
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 4d2d32bfbe2a..47b2f8bb6fb5 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1633,7 +1633,7 @@ static int asus_platform_init(struct asus_laptop *asus)
{
int result;
- asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
+ asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, PLATFORM_DEVID_NONE);
if (!asus->platform_device)
return -ENOMEM;
platform_set_drvdata(asus->platform_device, asus);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 478dd300b9c9..613c45c9fbe3 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(wapf, "WAPF value");
static int tablet_mode_sw = -1;
module_param(tablet_mode_sw, uint, 0444);
-MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip 3:lid-flip-rog");
static struct quirk_entry *quirks;
@@ -79,12 +79,10 @@ static struct quirk_entry quirk_asus_q500a = {
/*
* For those machines that need software to control bt/wifi status
- * and can't adjust brightness through ACPI interface
* and have duplicate events(ACPI and WMI) for display toggle
*/
static struct quirk_entry quirk_asus_x55u = {
.wapf = 4,
- .wmi_backlight_power = true,
.wmi_backlight_set_devstate = true,
.no_display_toggle = true,
};
@@ -99,11 +97,6 @@ static struct quirk_entry quirk_asus_x200ca = {
.wmi_backlight_set_devstate = true,
};
-static struct quirk_entry quirk_asus_ux303ub = {
- .wmi_backlight_native = true,
- .wmi_backlight_set_devstate = true,
-};
-
static struct quirk_entry quirk_asus_x550lb = {
.wmi_backlight_set_devstate = true,
.xusb2pr = 0x01D9,
@@ -115,12 +108,17 @@ static struct quirk_entry quirk_asus_forceals = {
};
static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
- .use_kbd_dock_devid = true,
+ .tablet_switch_mode = asus_wmi_kbd_dock_devid,
};
static struct quirk_entry quirk_asus_use_lid_flip_devid = {
.wmi_backlight_set_devstate = true,
- .use_lid_flip_devid = true,
+ .tablet_switch_mode = asus_wmi_lid_flip_devid,
+};
+
+static struct quirk_entry quirk_asus_tablet_mode = {
+ .wmi_backlight_set_devstate = true,
+ .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
};
static int dmi_matched(const struct dmi_system_id *dmi)
@@ -147,11 +145,6 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "U32U"),
},
- /*
- * Note this machine has a Brazos APU, and most Brazos Asus
- * machines need quirk_asus_x55u / wmi_backlight_power but
- * here acpi-video seems to work fine for backlight control.
- */
.driver_data = &quirk_asus_wapf4,
},
{
@@ -381,15 +374,6 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. UX303UB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
- },
- .driver_data = &quirk_asus_ux303ub,
- },
- {
- .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX330UAK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -471,6 +455,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_use_lid_flip_devid,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
{},
};
@@ -490,20 +483,8 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
else
wapf = quirks->wapf;
- switch (tablet_mode_sw) {
- case 0:
- quirks->use_kbd_dock_devid = false;
- quirks->use_lid_flip_devid = false;
- break;
- case 1:
- quirks->use_kbd_dock_devid = true;
- quirks->use_lid_flip_devid = false;
- break;
- case 2:
- quirks->use_kbd_dock_devid = false;
- quirks->use_lid_flip_devid = true;
- break;
- }
+ if (tablet_mode_sw != -1)
+ quirks->tablet_switch_mode = tablet_mode_sw;
if (quirks->i8042_filter) {
ret = i8042_install_filter(quirks->i8042_filter);
@@ -575,12 +556,14 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
{ KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
{ KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
{ KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
{ KE_KEY, 0xB5, { KEY_CALC } },
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
{ KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
+ { KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */
{ KE_END, 0},
};
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index 6fd0c9fea82d..62310e06282b 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -878,14 +878,12 @@ static int tf103c_dock_probe(struct i2c_client *client)
return 0;
}
-static int tf103c_dock_remove(struct i2c_client *client)
+static void tf103c_dock_remove(struct i2c_client *client)
{
struct tf103c_dock_data *dock = i2c_get_clientdata(client);
tf103c_dock_stop_hpd(dock);
tf103c_dock_disable(dock);
-
- return 0;
}
static int __maybe_unused tf103c_dock_suspend(struct device *dev)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 89b604e04d7f..6e8e093f96b3 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -68,9 +68,11 @@ module_param(fnlock_default, bool, 0444);
#define NOTIFY_KBD_FBM 0x99
#define NOTIFY_KBD_TTP 0xae
#define NOTIFY_LID_FLIP 0xfa
+#define NOTIFY_LID_FLIP_ROG 0xbd
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
+#define ASUS_GPU_FAN_DESC "gpu_fan"
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
@@ -107,7 +109,7 @@ module_param(fnlock_default, bool, 0444);
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
-#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2)
+#define FAN_CURVE_BUF_LEN 32
#define FAN_CURVE_DEV_CPU 0x00
#define FAN_CURVE_DEV_GPU 0x01
/* Mask to determine if setting temperature or percentage */
@@ -221,19 +223,25 @@ struct asus_wmi {
struct asus_rfkill gps;
struct asus_rfkill uwb;
+ int tablet_switch_event_code;
+ u32 tablet_switch_dev_id;
+
enum fan_type fan_type;
+ enum fan_type gpu_fan_type;
int fan_pwm_mode;
+ int gpu_fan_pwm_mode;
int agfn_pwm;
bool fan_boost_mode_available;
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
- bool egpu_enable_available; // 0 = enable
- bool egpu_enable;
-
+ bool egpu_enable_available;
bool dgpu_disable_available;
- bool dgpu_disable;
+ bool gpu_mux_mode_available;
+
+ bool kbd_rgb_mode_available;
+ bool kbd_rgb_state_available;
bool throttle_thermal_policy_available;
u8 throttle_thermal_policy_mode;
@@ -249,7 +257,6 @@ struct asus_wmi {
bool battery_rsoc_available;
bool panel_overdrive_available;
- bool panel_overdrive;
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
@@ -486,10 +493,28 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
}
/* Input **********************************************************************/
+static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
+{
+ struct device *dev = &asus->platform_device->dev;
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, dev_id);
+ if (result >= 0) {
+ input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
+ asus->tablet_switch_dev_id = dev_id;
+ asus->tablet_switch_event_code = event_code;
+ } else if (result == -ENODEV) {
+ dev_err(dev, "This device has tablet-mode-switch quirk but got ENODEV checking it. This is a bug.");
+ } else {
+ dev_err(dev, "Error checking for tablet-mode-switch: %d\n", result);
+ }
+}
static int asus_wmi_input_init(struct asus_wmi *asus)
{
- int err, result;
+ struct device *dev = &asus->platform_device->dev;
+ int err;
asus->inputdev = input_allocate_device();
if (!asus->inputdev)
@@ -498,35 +523,25 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
asus->inputdev->name = asus->driver->input_name;
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
- asus->inputdev->dev.parent = &asus->platform_device->dev;
+ asus->inputdev->dev.parent = dev;
set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
goto err_free_dev;
- if (asus->driver->quirks->use_kbd_dock_devid) {
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_KBD_DOCK);
- if (result >= 0) {
- input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
- input_report_switch(asus->inputdev, SW_TABLET_MODE, !result);
- } else if (result != -ENODEV) {
- pr_err("Error checking for keyboard-dock: %d\n", result);
- }
- }
-
- if (asus->driver->quirks->use_lid_flip_devid) {
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_LID_FLIP);
- if (result < 0)
- asus->driver->quirks->use_lid_flip_devid = 0;
- if (result >= 0) {
- input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
- } else if (result == -ENODEV) {
- pr_err("This device has lid_flip quirk but got ENODEV checking it. This is a bug.");
- } else {
- pr_err("Error checking for lid-flip: %d\n", result);
- }
+ switch (asus->driver->quirks->tablet_switch_mode) {
+ case asus_wmi_no_tablet_switch:
+ break;
+ case asus_wmi_kbd_dock_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
+ break;
+ case asus_wmi_lid_flip_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP, NOTIFY_LID_FLIP);
+ break;
+ case asus_wmi_lid_flip_rog_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP_ROG, NOTIFY_LID_FLIP_ROG);
+ break;
}
err = input_register_device(asus->inputdev);
@@ -550,10 +565,14 @@ static void asus_wmi_input_exit(struct asus_wmi *asus)
/* Tablet mode ****************************************************************/
-static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
+static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
{
- int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_LID_FLIP);
+ int result;
+
+ if (!asus->tablet_switch_dev_id)
+ return;
+ result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
if (result >= 0) {
input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
input_sync(asus->inputdev);
@@ -561,179 +580,267 @@ static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
}
/* dGPU ********************************************************************/
-static int dgpu_disable_check_present(struct asus_wmi *asus)
+static ssize_t dgpu_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u32 result;
- int err;
-
- asus->dgpu_disable_available = false;
-
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_DGPU, &result);
- if (err) {
- if (err == -ENODEV)
- return 0;
- return err;
- }
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
- asus->dgpu_disable_available = true;
- asus->dgpu_disable = result & ASUS_WMI_DSTS_STATUS_BIT;
- }
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
+ if (result < 0)
+ return result;
- return 0;
+ return sysfs_emit(buf, "%d\n", result);
}
-static int dgpu_disable_write(struct asus_wmi *asus)
+/*
+ * A user may be required to store the value twice, typcial store first, then
+ * rescan PCI bus to activate power, then store a second time to save correctly.
+ * The reason for this is that an extra code path in the ACPI is enabled when
+ * the device and bus are powered.
+ */
+static ssize_t dgpu_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- u32 retval;
- u8 value;
- int err;
+ int result, err;
+ u32 disable;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
- /* Don't rely on type conversion */
- value = asus->dgpu_disable ? 1 : 0;
+ result = kstrtou32(buf, 10, &disable);
+ if (result)
+ return result;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, value, &retval);
+ if (disable > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
if (err) {
pr_warn("Failed to set dgpu disable: %d\n", err);
return err;
}
- if (retval > 1) {
- pr_warn("Failed to set dgpu disable (retval): 0x%x\n", retval);
+ if (result > 1) {
+ pr_warn("Failed to set dgpu disable (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "dgpu_disable");
- return 0;
+ return count;
}
+static DEVICE_ATTR_RW(dgpu_disable);
-static ssize_t dgpu_disable_show(struct device *dev,
+/* eGPU ********************************************************************/
+static ssize_t egpu_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
- u8 mode = asus->dgpu_disable;
+ int result;
- return sysfs_emit(buf, "%d\n", mode);
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
}
-/*
- * A user may be required to store the value twice, typcial store first, then
- * rescan PCI bus to activate power, then store a second time to save correctly.
- * The reason for this is that an extra code path in the ACPI is enabled when
- * the device and bus are powered.
- */
-static ssize_t dgpu_disable_store(struct device *dev,
+/* The ACPI call to enable the eGPU also disables the internal dGPU */
+static ssize_t egpu_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- bool disable;
- int result;
+ int result, err;
+ u32 enable;
struct asus_wmi *asus = dev_get_drvdata(dev);
- result = kstrtobool(buf, &disable);
- if (result)
- return result;
+ err = kstrtou32(buf, 10, &enable);
+ if (err)
+ return err;
- asus->dgpu_disable = disable;
+ if (enable > 1)
+ return -EINVAL;
- result = dgpu_disable_write(asus);
- if (result)
- return result;
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
+ if (err) {
+ pr_warn("Failed to set egpu disable: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set egpu disable (retval): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
return count;
}
+static DEVICE_ATTR_RW(egpu_enable);
-static DEVICE_ATTR_RW(dgpu_disable);
+/* gpu mux switch *************************************************************/
+static ssize_t gpu_mux_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
-/* eGPU ********************************************************************/
-static int egpu_enable_check_present(struct asus_wmi *asus)
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t gpu_mux_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- u32 result;
- int err;
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 optimus;
+
+ err = kstrtou32(buf, 10, &optimus);
+ if (err)
+ return err;
- asus->egpu_enable_available = false;
+ if (optimus > 1)
+ return -EINVAL;
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_EGPU, &result);
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
if (err) {
- if (err == -ENODEV)
- return 0;
+ dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
return err;
}
-
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
- asus->egpu_enable_available = true;
- asus->egpu_enable = result & ASUS_WMI_DSTS_STATUS_BIT;
+ /* !1 is considered a fail by ASUS */
+ if (result != 1) {
+ dev_warn(dev, "Failed to set GPU MUX mode (result): 0x%x\n", result);
+ return -EIO;
}
- return 0;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "gpu_mux_mode");
+
+ return count;
}
+static DEVICE_ATTR_RW(gpu_mux_mode);
-static int egpu_enable_write(struct asus_wmi *asus)
+/* TUF Laptop Keyboard RGB Modes **********************************************/
+static ssize_t kbd_rgb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- u32 retval;
- u8 value;
+ u32 cmd, mode, r, g, b, speed;
int err;
- /* Don't rely on type conversion */
- value = asus->egpu_enable ? 1 : 0;
+ if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
+ return -EINVAL;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, value, &retval);
+ cmd = !!cmd;
- if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
- return err;
- }
+ /* These are the known usable modes across all TUF/ROG */
+ if (mode >= 12 || mode == 9)
+ mode = 10;
- if (retval > 1) {
- pr_warn("Failed to set egpu disable (retval): 0x%x\n", retval);
- return -EIO;
+ switch (speed) {
+ case 0:
+ speed = 0xe1;
+ break;
+ case 1:
+ speed = 0xeb;
+ break;
+ case 2:
+ speed = 0xf5;
+ break;
+ default:
+ speed = 0xeb;
}
- sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE,
+ cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL);
+ if (err)
+ return err;
- return 0;
+ return count;
}
+static DEVICE_ATTR_WO(kbd_rgb_mode);
-static ssize_t egpu_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t kbd_rgb_mode_index_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
- struct asus_wmi *asus = dev_get_drvdata(dev);
- bool mode = asus->egpu_enable;
-
- return sysfs_emit(buf, "%d\n", mode);
+ return sysfs_emit(buf, "%s\n", "cmd mode red green blue speed");
}
+static DEVICE_ATTR_RO(kbd_rgb_mode_index);
-/* The ACPI call to enable the eGPU also disables the internal dGPU */
-static ssize_t egpu_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- bool enable;
- int result;
-
- struct asus_wmi *asus = dev_get_drvdata(dev);
+static struct attribute *kbd_rgb_mode_attrs[] = {
+ &dev_attr_kbd_rgb_mode.attr,
+ &dev_attr_kbd_rgb_mode_index.attr,
+ NULL,
+};
- result = kstrtobool(buf, &enable);
- if (result)
- return result;
+static const struct attribute_group kbd_rgb_mode_group = {
+ .attrs = kbd_rgb_mode_attrs,
+};
- asus->egpu_enable = enable;
+/* TUF Laptop Keyboard RGB State **********************************************/
+static ssize_t kbd_rgb_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 flags, cmd, boot, awake, sleep, keyboard;
+ int err;
- result = egpu_enable_write(asus);
- if (result)
- return result;
+ if (sscanf(buf, "%d %d %d %d %d", &cmd, &boot, &awake, &sleep, &keyboard) != 5)
+ return -EINVAL;
- /* Ensure that the kernel status of dgpu is updated */
- result = dgpu_disable_check_present(asus);
- if (result)
- return result;
+ if (cmd)
+ cmd = BIT(2);
+
+ flags = 0;
+ if (boot)
+ flags |= BIT(1);
+ if (awake)
+ flags |= BIT(3);
+ if (sleep)
+ flags |= BIT(5);
+ if (keyboard)
+ flags |= BIT(7);
+
+ /* 0xbd is the required default arg0 for the method. Nothing happens otherwise */
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS,
+ ASUS_WMI_DEVID_TUF_RGB_STATE, 0xbd | cmd << 8 | (flags << 16), 0, NULL);
+ if (err)
+ return err;
return count;
}
+static DEVICE_ATTR_WO(kbd_rgb_state);
-static DEVICE_ATTR_RW(egpu_enable);
+static ssize_t kbd_rgb_state_index_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", "cmd boot awake sleep keyboard");
+}
+static DEVICE_ATTR_RO(kbd_rgb_state_index);
+
+static struct attribute *kbd_rgb_state_attrs[] = {
+ &dev_attr_kbd_rgb_state.attr,
+ &dev_attr_kbd_rgb_state_index.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_rgb_state_group = {
+ .attrs = kbd_rgb_state_attrs,
+};
+
+static const struct attribute_group *kbd_rgb_mode_groups[] = {
+ NULL,
+ NULL,
+ NULL,
+};
/* Battery ********************************************************************/
@@ -771,7 +878,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", charge_end_threshold);
+ return sysfs_emit(buf, "%d\n", charge_end_threshold);
}
static DEVICE_ATTR_RW(charge_control_end_threshold);
@@ -1053,7 +1160,12 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
static int asus_wmi_led_init(struct asus_wmi *asus)
{
- int rv = 0, led_val;
+ int rv = 0, num_rgb_groups = 0, led_val;
+
+ if (asus->kbd_rgb_mode_available)
+ kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_mode_group;
+ if (asus->kbd_rgb_state_available)
+ kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_state_group;
asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!asus->led_workqueue)
@@ -1081,6 +1193,9 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
+ if (num_rgb_groups != 0)
+ asus->kbd_led.groups = kbd_rgb_mode_groups;
+
rv = led_classdev_register(&asus->platform_device->dev,
&asus->kbd_led);
if (rv)
@@ -1118,7 +1233,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
- asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
@@ -1555,84 +1670,51 @@ exit:
}
/* Panel Overdrive ************************************************************/
-static int panel_od_check_present(struct asus_wmi *asus)
-{
- u32 result;
- int err;
-
- asus->panel_overdrive_available = false;
-
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_PANEL_OD, &result);
- if (err) {
- if (err == -ENODEV)
- return 0;
- return err;
- }
-
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
- asus->panel_overdrive_available = true;
- asus->panel_overdrive = result & ASUS_WMI_DSTS_STATUS_BIT;
- }
-
- return 0;
-}
-
-static int panel_od_write(struct asus_wmi *asus)
-{
- u32 retval;
- u8 value;
- int err;
-
- /* Don't rely on type conversion */
- value = asus->panel_overdrive ? 1 : 0;
-
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, value, &retval);
-
- if (err) {
- pr_warn("Failed to set panel overdrive: %d\n", err);
- return err;
- }
-
- if (retval > 1) {
- pr_warn("Failed to set panel overdrive (retval): 0x%x\n", retval);
- return -EIO;
- }
-
- sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
-
- return 0;
-}
-
static ssize_t panel_od_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
- return sysfs_emit(buf, "%d\n", asus->panel_overdrive);
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_PANEL_OD);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
}
static ssize_t panel_od_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- bool overdrive;
- int result;
+ int result, err;
+ u32 overdrive;
struct asus_wmi *asus = dev_get_drvdata(dev);
- result = kstrtobool(buf, &overdrive);
+ result = kstrtou32(buf, 10, &overdrive);
if (result)
return result;
- asus->panel_overdrive = overdrive;
- result = panel_od_write(asus);
+ if (overdrive > 1)
+ return -EINVAL;
- if (result)
- return result;
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, overdrive, &result);
+
+ if (err) {
+ pr_warn("Failed to set panel overdrive: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set panel overdrive (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
return count;
}
-
static DEVICE_ATTR_RW(panel_od);
/* Quirks *********************************************************************/
@@ -1782,6 +1864,18 @@ static int asus_fan_set_auto(struct asus_wmi *asus)
return -ENXIO;
}
+ /*
+ * Modern models like the G713 also have GPU fan control (this is not AGFN)
+ */
+ if (asus->gpu_fan_type == FAN_TYPE_SPEC83) {
+ status = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
+ 0, &retval);
+ if (status)
+ return status;
+
+ if (retval != 1)
+ return -EIO;
+ }
return 0;
}
@@ -1819,7 +1913,7 @@ static ssize_t pwm1_show(struct device *dev,
value = -1;
}
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pwm1_store(struct device *dev,
@@ -1879,7 +1973,7 @@ static ssize_t fan1_input_show(struct device *dev,
return -ENXIO;
}
- return sprintf(buf, "%d\n", value < 0 ? -1 : value*100);
+ return sysfs_emit(buf, "%d\n", value < 0 ? -1 : value * 100);
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -1897,7 +1991,7 @@ static ssize_t pwm1_enable_show(struct device *dev,
* in practice on X532FL at least (the bit is always 0) and there's
* also nothing in the DSDT to indicate that this behaviour exists.
*/
- return sprintf(buf, "%d\n", asus->fan_pwm_mode);
+ return sysfs_emit(buf, "%d\n", asus->fan_pwm_mode);
}
static ssize_t pwm1_enable_store(struct device *dev,
@@ -1965,7 +2059,7 @@ static ssize_t fan1_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", ASUS_FAN_DESC);
+ return sysfs_emit(buf, "%s\n", ASUS_FAN_DESC);
}
static ssize_t asus_hwmon_temp1(struct device *dev,
@@ -1984,11 +2078,86 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
deci_kelvin_to_millicelsius(value & 0xFFFF));
}
+/* GPU fan on modern ROG laptops */
+static ssize_t fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int ret;
+
+ ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= 0xffff;
+
+ return sysfs_emit(buf, "%d\n", value * 100);
+}
+
+static ssize_t fan2_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", ASUS_GPU_FAN_DESC);
+}
+
+static ssize_t pwm2_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", asus->gpu_fan_pwm_mode);
+}
+
+static ssize_t pwm2_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int state;
+ int value;
+ int ret;
+ u32 retval;
+
+ ret = kstrtouint(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ switch (state) { /* standard documented hwmon values */
+ case ASUS_FAN_CTRL_FULLSPEED:
+ value = 1;
+ break;
+ case ASUS_FAN_CTRL_AUTO:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
+ value, &retval);
+ if (ret)
+ return ret;
+
+ if (retval != 1)
+ return -EIO;
+
+ asus->gpu_fan_pwm_mode = state;
+ return count;
+}
+
/* Fan1 */
static DEVICE_ATTR_RW(pwm1);
static DEVICE_ATTR_RW(pwm1_enable);
static DEVICE_ATTR_RO(fan1_input);
static DEVICE_ATTR_RO(fan1_label);
+/* Fan2 - GPU fan */
+static DEVICE_ATTR_RW(pwm2_enable);
+static DEVICE_ATTR_RO(fan2_input);
+static DEVICE_ATTR_RO(fan2_label);
/* Temperature */
static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
@@ -1996,8 +2165,11 @@ static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
static struct attribute *hwmon_attributes[] = {
&dev_attr_pwm1.attr,
&dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm2_enable.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_label.attr,
+ &dev_attr_fan2_input.attr,
+ &dev_attr_fan2_label.attr,
&dev_attr_temp1_input.attr,
NULL
@@ -2006,7 +2178,7 @@ static struct attribute *hwmon_attributes[] = {
static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev->parent);
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
@@ -2018,6 +2190,11 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
|| attr == &dev_attr_pwm1_enable.attr) {
if (asus->fan_type == FAN_TYPE_NONE)
return 0;
+ } else if (attr == &dev_attr_fan2_input.attr
+ || attr == &dev_attr_fan2_label.attr
+ || attr == &dev_attr_pwm2_enable.attr) {
+ if (asus->gpu_fan_type == FAN_TYPE_NONE)
+ return 0;
} else if (attr == &dev_attr_temp1_input.attr) {
int err = asus_wmi_get_devstate(asus,
ASUS_WMI_DEVID_THERMAL_CTRL,
@@ -2060,6 +2237,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
static int asus_wmi_fan_init(struct asus_wmi *asus)
{
+ asus->gpu_fan_type = FAN_TYPE_NONE;
asus->fan_type = FAN_TYPE_NONE;
asus->agfn_pwm = -1;
@@ -2068,6 +2246,10 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
else if (asus_wmi_has_agfn_fan(asus))
asus->fan_type = FAN_TYPE_AGFN;
+ /* Modern models like G713 also have GPU fan control */
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL))
+ asus->gpu_fan_type = FAN_TYPE_SPEC83;
+
if (asus->fan_type == FAN_TYPE_NONE)
return -ENODEV;
@@ -2158,7 +2340,7 @@ static ssize_t fan_boost_mode_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", asus->fan_boost_mode);
+ return sysfs_emit(buf, "%d\n", asus->fan_boost_mode);
}
static ssize_t fan_boost_mode_store(struct device *dev,
@@ -2233,8 +2415,10 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
curves = &asus->custom_fan_curves[fan_idx];
err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
FAN_CURVE_BUF_LEN);
- if (err)
+ if (err) {
+ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
return err;
+ }
fan_curve_copy_from_buf(curves, buf);
curves->device_id = fan_dev;
@@ -2252,9 +2436,6 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
- fan_dev, err);
- /* Don't cause probe to fail on devices without fan-curves */
return 0;
}
@@ -2711,7 +2892,7 @@ static ssize_t throttle_thermal_policy_show(struct device *dev,
struct asus_wmi *asus = dev_get_drvdata(dev);
u8 mode = asus->throttle_thermal_policy_mode;
- return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+ return sysfs_emit(buf, "%d\n", mode);
}
static ssize_t throttle_thermal_policy_store(struct device *dev,
@@ -3063,9 +3244,7 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
unsigned int key_value = 1;
bool autorelease = 1;
- int result, orig_code;
-
- orig_code = code;
+ int orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
@@ -3108,30 +3287,18 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
- if (asus->driver->quirks->use_kbd_dock_devid && code == NOTIFY_KBD_DOCK_CHANGE) {
- result = asus_wmi_get_devstate_simple(asus,
- ASUS_WMI_DEVID_KBD_DOCK);
- if (result >= 0) {
- input_report_switch(asus->inputdev, SW_TABLET_MODE,
- !result);
- input_sync(asus->inputdev);
- }
- return;
- }
-
- if (asus->driver->quirks->use_lid_flip_devid && code == NOTIFY_LID_FLIP) {
- lid_flip_tablet_mode_get_state(asus);
+ if (code == asus->tablet_switch_event_code) {
+ asus_wmi_tablet_mode_get_state(asus);
return;
}
- if (asus->fan_boost_mode_available && code == NOTIFY_KBD_FBM) {
- fan_boost_mode_switch_next(asus);
+ if (code == NOTIFY_KBD_FBM || code == NOTIFY_KBD_TTP) {
+ if (asus->fan_boost_mode_available)
+ fan_boost_mode_switch_next(asus);
+ if (asus->throttle_thermal_policy_available)
+ throttle_thermal_policy_switch_next(asus);
return;
- }
- if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
- throttle_thermal_policy_switch_next(asus);
- return;
}
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
@@ -3283,6 +3450,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_touchpad.attr,
&dev_attr_egpu_enable.attr,
&dev_attr_dgpu_disable.attr,
+ &dev_attr_gpu_mux_mode.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
@@ -3294,7 +3462,7 @@ static struct attribute *platform_attributes[] = {
static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev);
bool ok = true;
int devid = -1;
@@ -3313,6 +3481,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
ok = asus->egpu_enable_available;
else if (attr == &dev_attr_dgpu_disable.attr)
ok = asus->dgpu_disable_available;
+ else if (attr == &dev_attr_gpu_mux_mode.attr)
+ ok = asus->gpu_mux_mode_available;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
@@ -3553,7 +3723,6 @@ static int asus_wmi_add(struct platform_device *pdev)
struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
struct asus_wmi *asus;
- const char *chassis_type;
acpi_status status;
int err;
u32 result;
@@ -3574,13 +3743,12 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform;
- err = egpu_enable_check_present(asus);
- if (err)
- goto fail_egpu_enable;
-
- err = dgpu_disable_check_present(asus);
- if (err)
- goto fail_dgpu_disable;
+ asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
+ asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
+ asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
+ asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
err = fan_boost_mode_check_present(asus);
if (err)
@@ -3596,10 +3764,6 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform_profile_setup;
- err = panel_od_check_present(asus);
- if (err)
- goto fail_panel_od;
-
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
@@ -3635,18 +3799,6 @@ static int asus_wmi_add(struct platform_device *pdev)
if (asus->driver->quirks->wmi_force_als_set)
asus_wmi_set_als();
- /* Some Asus desktop boards export an acpi-video backlight interface,
- stop this from showing up */
- chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
- if (chassis_type && !strcmp(chassis_type, "3"))
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
- if (asus->driver->quirks->wmi_backlight_power)
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
- if (asus->driver->quirks->wmi_backlight_native)
- acpi_video_set_dmi_backlight_type(acpi_backlight_native);
-
if (asus->driver->quirks->xusb2pr)
asus_wmi_set_xusb2pr(asus);
@@ -3694,10 +3846,7 @@ fail_platform_profile_setup:
if (asus->platform_profile_support)
platform_profile_remove();
fail_fan_boost_mode:
-fail_egpu_enable:
-fail_dgpu_disable:
fail_platform:
-fail_panel_od:
kfree(asus);
return err;
}
@@ -3756,9 +3905,7 @@ static int asus_hotk_resume(struct device *device)
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
- if (asus->driver->quirks->use_lid_flip_devid)
- lid_flip_tablet_mode_get_state(asus);
-
+ asus_wmi_tablet_mode_get_state(asus);
return 0;
}
@@ -3798,9 +3945,7 @@ static int asus_hotk_restore(struct device *device)
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
- if (asus->driver->quirks->use_lid_flip_devid)
- lid_flip_tablet_mode_get_state(asus);
-
+ asus_wmi_tablet_mode_get_state(asus);
return 0;
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index b302415bf1d9..65316998b898 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -25,16 +25,20 @@ struct module;
struct key_entry;
struct asus_wmi;
+enum asus_wmi_tablet_switch_mode {
+ asus_wmi_no_tablet_switch,
+ asus_wmi_kbd_dock_devid,
+ asus_wmi_lid_flip_devid,
+ asus_wmi_lid_flip_rog_devid,
+};
+
struct quirk_entry {
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
- bool wmi_backlight_power;
- bool wmi_backlight_native;
bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
- bool use_kbd_dock_devid;
- bool use_lid_flip_devid;
+ enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
* For machines with AMD graphic chips, it will send out WMI event
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 0942f50bd793..e10d2f64dfad 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -721,16 +721,6 @@ static struct attribute *compal_hwmon_attrs[] = {
};
ATTRIBUTE_GROUPS(compal_hwmon);
-static int compal_probe(struct platform_device *);
-static int compal_remove(struct platform_device *);
-static struct platform_driver compal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- },
- .probe = compal_probe,
- .remove = compal_remove,
-};
-
static enum power_supply_property compal_bat_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
@@ -965,6 +955,80 @@ err_wifi:
return ret;
}
+static int compal_probe(struct platform_device *pdev)
+{
+ int err;
+ struct compal_data *data;
+ struct device *hwmon_dev;
+ struct power_supply_config psy_cfg = {};
+
+ if (!extra_features)
+ return 0;
+
+ /* Fan control */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ initialize_fan_control_data(data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "compal", data,
+ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
+ }
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+ psy_cfg.drv_data = data;
+ data->psy = power_supply_register(&compal_device->dev, &psy_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(data->psy)) {
+ err = PTR_ERR(data->psy);
+ goto remove;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+remove:
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ return err;
+}
+
+static int compal_remove(struct platform_device *pdev)
+{
+ struct compal_data *data;
+
+ if (!extra_features)
+ return 0;
+
+ pr_info("Unloading: resetting fan control to motherboard\n");
+ pwm_disable_control();
+
+ data = platform_get_drvdata(pdev);
+ power_supply_unregister(data->psy);
+
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver compal_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = compal_probe,
+ .remove = compal_remove,
+};
+
static int __init compal_init(void)
{
int ret;
@@ -996,7 +1060,7 @@ static int __init compal_init(void)
if (ret)
goto err_backlight;
- compal_device = platform_device_alloc(DRIVER_NAME, -1);
+ compal_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!compal_device) {
ret = -ENOMEM;
goto err_platform_driver;
@@ -1028,54 +1092,6 @@ err_backlight:
return ret;
}
-static int compal_probe(struct platform_device *pdev)
-{
- int err;
- struct compal_data *data;
- struct device *hwmon_dev;
- struct power_supply_config psy_cfg = {};
-
- if (!extra_features)
- return 0;
-
- /* Fan control */
- data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- initialize_fan_control_data(data);
-
- err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
- if (err)
- return err;
-
- hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
- "compal", data,
- compal_hwmon_groups);
- if (IS_ERR(hwmon_dev)) {
- err = PTR_ERR(hwmon_dev);
- goto remove;
- }
-
- /* Power supply */
- initialize_power_supply_data(data);
- psy_cfg.drv_data = data;
- data->psy = power_supply_register(&compal_device->dev, &psy_bat_desc,
- &psy_cfg);
- if (IS_ERR(data->psy)) {
- err = PTR_ERR(data->psy);
- goto remove;
- }
-
- platform_set_drvdata(pdev, data);
-
- return 0;
-
-remove:
- sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
- return err;
-}
-
static void __exit compal_cleanup(void)
{
platform_device_unregister(compal_device);
@@ -1089,25 +1105,6 @@ static void __exit compal_cleanup(void)
pr_info("Driver unloaded\n");
}
-static int compal_remove(struct platform_device *pdev)
-{
- struct compal_data *data;
-
- if (!extra_features)
- return 0;
-
- pr_info("Unloading: resetting fan control to motherboard\n");
- pwm_disable_control();
-
- data = platform_get_drvdata(pdev);
- power_supply_unregister(data->psy);
-
- sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
-
- return 0;
-}
-
-
module_init(compal_init);
module_exit(compal_cleanup);
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index f21248255529..a34e07ef2c79 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -791,7 +791,7 @@ static int __init alienware_wmi_init(void)
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
- platform_device = platform_device_alloc("alienware-wmi", -1);
+ platform_device = platform_device_alloc("alienware-wmi", PLATFORM_DEVID_NONE);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device1;
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 42beafbc54b2..0ecb7b164750 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -716,7 +716,7 @@ static struct platform_driver dcdbas_driver = {
static const struct platform_device_info dcdbas_dev_info __initconst = {
.name = DRIVER_NAME,
- .id = -1,
+ .id = PLATFORM_DEVID_NONE,
.dma_mask = DMA_BIT_MASK(32),
};
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 1321687d923e..e92c3ad06d69 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -2193,7 +2193,7 @@ static int __init dell_init(void)
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
- platform_device = platform_device_alloc("dell-laptop", -1);
+ platform_device = platform_device_alloc("dell-laptop", PLATFORM_DEVID_NONE);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device1;
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index fc086b66f70b..e61bfaf8b5c4 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -441,7 +441,7 @@ static ssize_t location_show(struct device *dev,
i = match_attribute(dev, attr);
if (i > 0)
- return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].location);
+ return sysfs_emit(buf, "%08x", da_tokens[i].location);
return 0;
}
@@ -455,7 +455,7 @@ static ssize_t value_show(struct device *dev,
i = match_attribute(dev, attr);
if (i > 0)
- return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].value);
+ return sysfs_emit(buf, "%08x", da_tokens[i].value);
return 0;
}
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index e07d3ba85a3f..0a259a27459f 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -344,6 +344,9 @@ static const struct key_entry dell_wmi_keymap_type_0011[] = {
* They are events with extended data
*/
static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* Backlight brightness change event */
+ { KE_IGNORE, 0x0003, { KEY_RESERVED } },
+
/* Ultra-performance mode switch request */
{ KE_IGNORE, 0x000d, { KEY_RESERVED } },
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
index 074b7e68c227..c82b3d6867c5 100644
--- a/drivers/platform/x86/dell/dell-wmi-privacy.c
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
@@ -174,15 +174,12 @@ static ssize_t dell_privacy_current_state_show(struct device *dev,
static DEVICE_ATTR_RO(dell_privacy_supported_type);
static DEVICE_ATTR_RO(dell_privacy_current_state);
-static struct attribute *privacy_attributes[] = {
+static struct attribute *privacy_attrs[] = {
&dev_attr_dell_privacy_supported_type.attr,
&dev_attr_dell_privacy_current_state.attr,
NULL,
};
-
-static const struct attribute_group privacy_attribute_group = {
- .attrs = privacy_attributes
-};
+ATTRIBUTE_GROUPS(privacy);
/*
* Describes the Device State class exposed by BIOS which can be consumed by
@@ -342,10 +339,6 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
if (ret)
return ret;
- ret = devm_device_add_group(&wdev->dev, &privacy_attribute_group);
- if (ret)
- return ret;
-
if (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO)) {
ret = dell_privacy_leds_setup(&priv->wdev->dev);
if (ret)
@@ -374,6 +367,7 @@ static const struct wmi_device_id dell_wmi_privacy_wmi_id_table[] = {
static struct wmi_driver dell_privacy_wmi_driver = {
.driver = {
.name = "dell-privacy",
+ .dev_groups = privacy_groups,
},
.probe = dell_privacy_wmi_probe,
.remove = dell_privacy_wmi_remove,
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 636bdfa83284..0a6411a8a104 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -270,7 +270,7 @@ void strlcpy_attr(char *dest, char *src)
size_t len = strlen(src) + 1;
if (len > 1 && len <= MAX_BUFF)
- strlcpy(dest, src, len);
+ strscpy(dest, src, len);
/*len can be zero because any property not-applicable to attribute can
* be empty so check only for too long buffers and log error
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index e9f4b30dcafa..9f51e0fcab04 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -645,7 +645,7 @@ static int __init dcdrbu_init(void)
spin_lock_init(&rbu_data.lock);
init_packet_head();
- rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0);
+ rbu_device = platform_device_register_simple("dell_rbu", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(rbu_device)) {
pr_err("platform_device_register_simple failed\n");
return PTR_ERR(rbu_device);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index ba08c9235f76..a388a28b6f2a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -444,7 +444,7 @@ static int eeepc_platform_init(struct eeepc_laptop *eeepc)
{
int result;
- eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, -1);
+ eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, PLATFORM_DEVID_NONE);
if (!eeepc->platform_device)
return -ENOMEM;
platform_set_drvdata(eeepc->platform_device, eeepc);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index ce86d84ee796..32d9f0ba6be3 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -96,11 +96,6 @@ static struct quirk_entry quirk_asus_et2012_type3 = {
.store_backlight_power = true,
};
-static struct quirk_entry quirk_asus_x101ch = {
- /* We need this when ACPI function doesn't do this well */
- .wmi_backlight_power = true,
-};
-
static struct quirk_entry *quirks;
static void et2012_quirks(void)
@@ -151,25 +146,7 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_unknown,
},
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK Computer INC. X101CH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"),
- },
- .driver_data = &quirk_asus_x101ch,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK Computer INC. 1015CX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
- },
- .driver_data = &quirk_asus_x101ch,
- },
- {},
+ {}
};
static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 80929380ec7e..b543d117b12c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -543,7 +543,7 @@ static int fujitsu_laptop_platform_add(struct acpi_device *device)
struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
- priv->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+ priv->pf_device = platform_device_alloc("fujitsu-laptop", PLATFORM_DEVID_NONE);
if (!priv->pf_device)
return -ENOMEM;
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 9996485f5295..f11f726d2062 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -547,7 +547,7 @@ static int __init hdaps_init(void)
if (ret)
goto out_region;
- pdev = platform_device_register_simple("hdaps", -1, NULL, 0);
+ pdev = platform_device_register_simple("hdaps", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
goto out_driver;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index bc7020e9df9e..627a6d0eaf83 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -177,7 +177,8 @@ enum hp_thermal_profile_omen_v1 {
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
- HP_THERMAL_PROFILE_COOL = 0x02
+ HP_THERMAL_PROFILE_COOL = 0x02,
+ HP_THERMAL_PROFILE_QUIET = 0x03,
};
#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
@@ -206,15 +207,17 @@ struct bios_rfkill2_state {
};
static const struct key_entry hp_wmi_keymap[] = {
- { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
- { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
- { KE_KEY, 0x20e6, { KEY_PROG1 } },
- { KE_KEY, 0x20e8, { KEY_MEDIA } },
- { KE_KEY, 0x2142, { KEY_MEDIA } },
- { KE_KEY, 0x213b, { KEY_INFO } },
- { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
- { KE_KEY, 0x216a, { KEY_SETUP } },
- { KE_KEY, 0x231b, { KEY_HELP } },
+ { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x20e6, { KEY_PROG1 } },
+ { KE_KEY, 0x20e8, { KEY_MEDIA } },
+ { KE_KEY, 0x2142, { KEY_MEDIA } },
+ { KE_KEY, 0x213b, { KEY_INFO } },
+ { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
+ { KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } },
+ { KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -1194,6 +1197,9 @@ static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
case HP_THERMAL_PROFILE_COOL:
*profile = PLATFORM_PROFILE_COOL;
break;
+ case HP_THERMAL_PROFILE_QUIET:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
default:
return -EINVAL;
}
@@ -1216,6 +1222,9 @@ static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
case PLATFORM_PROFILE_COOL:
tp = HP_THERMAL_PROFILE_COOL;
break;
+ case PLATFORM_PROFILE_QUIET:
+ tp = HP_THERMAL_PROFILE_QUIET;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -1263,6 +1272,8 @@ static int thermal_profile_setup(void)
platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
}
set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
@@ -1508,7 +1519,7 @@ static int __init hp_wmi_init(void)
if (bios_capable) {
hp_wmi_platform_dev =
- platform_device_register_simple("hp-wmi", -1, NULL, 0);
+ platform_device_register_simple("hp-wmi", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(hp_wmi_platform_dev)) {
err = PTR_ERR(hp_wmi_platform_dev);
goto err_destroy_input;
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index eac3e6b4ea11..5873c2663a65 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -871,7 +871,7 @@ static __init int huawei_wmi_init(void)
if (err)
goto pdrv_err;
- pdev = platform_device_register_simple("huawei-wmi", -1, NULL, 0);
+ pdev = platform_device_register_simple("huawei-wmi", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
goto pdev_err;
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index c52ac23e2331..2c9a7d52be07 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -219,7 +219,7 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
/*
* Update node used in "usb-role-switch" property. Note that we
- * rely on software_node_register_nodes() to use the original
+ * rely on software_node_register_node_group() to use the original
* instance of properties instead of copying them.
*/
fusb302_mux_refs[0].node = mux_ref_node;
@@ -270,7 +270,7 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
}
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
@@ -361,7 +361,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
}
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
board_info.dev_name = "fusb302";
board_info.fwnode = fwnode;
board_info.irq = fusb302_irq;
@@ -381,7 +381,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
board_info.fwnode = fwnode;
- strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+ strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
if (IS_ERR(data->pi3usb30532)) {
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index 77cf058e4168..9db2bb0bbba4 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -62,7 +62,7 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
struct acpi_device *sensor;
int ret = 0;
- sensor = acpi_dev_get_first_consumer_dev(adev);
+ sensor = acpi_dev_get_next_consumer_dev(adev, NULL);
if (!sensor) {
dev_err(dev, "INT3472 seems to have no dependents.\n");
return -ENODEV;
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index ed4c9d760757..974a132db651 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -331,7 +331,22 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
return 0;
}
-static int skl_int3472_discrete_remove(struct platform_device *pdev);
+static int skl_int3472_discrete_remove(struct platform_device *pdev)
+{
+ struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
+
+ gpiod_remove_lookup_table(&int3472->gpios);
+
+ if (int3472->clock.cl)
+ skl_int3472_unregister_clock(int3472);
+
+ gpiod_put(int3472->clock.ena_gpio);
+ gpiod_put(int3472->clock.led_gpio);
+
+ skl_int3472_unregister_regulator(int3472);
+
+ return 0;
+}
static int skl_int3472_discrete_probe(struct platform_device *pdev)
{
@@ -383,23 +398,6 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
return 0;
}
-static int skl_int3472_discrete_remove(struct platform_device *pdev)
-{
- struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
-
- gpiod_remove_lookup_table(&int3472->gpios);
-
- if (int3472->clock.cl)
- skl_int3472_unregister_clock(int3472);
-
- gpiod_put(int3472->clock.ena_gpio);
- gpiod_put(int3472->clock.led_gpio);
-
- skl_int3472_unregister_regulator(int3472);
-
- return 0;
-}
-
static const struct acpi_device_id int3472_device_id[] = {
{ "INT3472", 0 },
{ }
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 22f61b47f9e5..f83e9c393f31 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Author: Dan Scally <djrscally@gmail.com> */
+#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
@@ -95,20 +96,65 @@ static int skl_int3472_tps68470_calc_type(struct acpi_device *adev)
return DESIGNED_FOR_WINDOWS;
}
+/*
+ * Return the size of the flexible array member, because we'll need that later
+ * on to pass .pdata_size to cells.
+ */
+static int
+skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data **clk_pdata)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *consumer;
+ unsigned int n_consumers = 0;
+ const char *sensor_name;
+ unsigned int i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer)
+ n_consumers++;
+
+ if (!n_consumers) {
+ dev_err(dev, "INT3472 seems to have no dependents\n");
+ return -ENODEV;
+ }
+
+ *clk_pdata = devm_kzalloc(dev, struct_size(*clk_pdata, consumers, n_consumers),
+ GFP_KERNEL);
+ if (!*clk_pdata)
+ return -ENOMEM;
+
+ (*clk_pdata)->n_consumers = n_consumers;
+ i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer) {
+ sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(consumer));
+ if (!sensor_name)
+ return -ENOMEM;
+
+ (*clk_pdata)->consumers[i].consumer_dev_name = sensor_name;
+ i++;
+ }
+
+ acpi_dev_put(consumer);
+
+ return n_consumers;
+}
+
static int skl_int3472_tps68470_probe(struct i2c_client *client)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
const struct int3472_tps68470_board_data *board_data;
- struct tps68470_clk_platform_data clk_pdata = {};
+ struct tps68470_clk_platform_data *clk_pdata;
struct mfd_cell *cells;
struct regmap *regmap;
+ int n_consumers;
int device_type;
int ret;
+ int i;
- ret = skl_int3472_get_sensor_adev_and_name(&client->dev, NULL,
- &clk_pdata.consumer_dev_name);
- if (ret)
- return ret;
+ n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
+ if (n_consumers < 0)
+ return n_consumers;
regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
if (IS_ERR(regmap)) {
@@ -142,22 +188,25 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
* the clk + regulators must be ready when this happens.
*/
cells[0].name = "tps68470-clk";
- cells[0].platform_data = &clk_pdata;
- cells[0].pdata_size = sizeof(clk_pdata);
+ cells[0].platform_data = clk_pdata;
+ cells[0].pdata_size = struct_size(clk_pdata, consumers, n_consumers);
cells[1].name = "tps68470-regulator";
cells[1].platform_data = (void *)board_data->tps68470_regulator_pdata;
cells[1].pdata_size = sizeof(struct tps68470_regulator_platform_data);
cells[2].name = "tps68470-gpio";
- gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_table);
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
cells, TPS68470_WIN_MFD_CELL_COUNT,
NULL, 0, NULL);
kfree(cells);
- if (ret)
- gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table);
+ if (ret) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
break;
case DESIGNED_FOR_CHROMEOS:
@@ -178,15 +227,16 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
return ret;
}
-static int skl_int3472_tps68470_remove(struct i2c_client *client)
+static void skl_int3472_tps68470_remove(struct i2c_client *client)
{
const struct int3472_tps68470_board_data *board_data;
+ int i;
board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
- if (board_data)
- gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table);
-
- return 0;
+ if (board_data) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
}
static const struct acpi_device_id int3472_device_id[] = {
diff --git a/drivers/platform/x86/intel/int3472/tps68470.h b/drivers/platform/x86/intel/int3472/tps68470.h
index cfd33eb62740..35915e701593 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.h
+++ b/drivers/platform/x86/intel/int3472/tps68470.h
@@ -16,8 +16,9 @@ struct tps68470_regulator_platform_data;
struct int3472_tps68470_board_data {
const char *dev_name;
- struct gpiod_lookup_table *tps68470_gpio_lookup_table;
const struct tps68470_regulator_platform_data *tps68470_regulator_pdata;
+ unsigned int n_gpiod_lookups;
+ struct gpiod_lookup_table *tps68470_gpio_lookup_tables[];
};
const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name);
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index 525f09a3b5ff..309eab9c0558 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -30,6 +30,15 @@ static struct regulator_consumer_supply int347a_vcm_consumer_supplies[] = {
static struct regulator_consumer_supply int347a_vsio_consumer_supplies[] = {
REGULATOR_SUPPLY("dovdd", "i2c-INT347A:00"),
REGULATOR_SUPPLY("vsio", "i2c-INT347A:00-VCM"),
+ REGULATOR_SUPPLY("vddd", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux1_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdda", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux2_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdddo", "i2c-INT347E:00"),
};
static const struct regulator_init_data surface_go_tps68470_core_reg_init_data = {
@@ -86,6 +95,28 @@ static const struct regulator_init_data surface_go_tps68470_vsio_reg_init_data =
.consumer_supplies = int347a_vsio_consumer_supplies,
};
+static const struct regulator_init_data surface_go_tps68470_aux1_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux1_consumer_supplies),
+ .consumer_supplies = int347a_aux1_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_aux2_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux2_consumer_supplies),
+ .consumer_supplies = int347a_aux2_consumer_supplies,
+};
+
static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = {
.reg_init_data = {
[TPS68470_CORE] = &surface_go_tps68470_core_reg_init_data,
@@ -93,10 +124,12 @@ static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata =
[TPS68470_VCM] = &surface_go_tps68470_vcm_reg_init_data,
[TPS68470_VIO] = &surface_go_tps68470_vio_reg_init_data,
[TPS68470_VSIO] = &surface_go_tps68470_vsio_reg_init_data,
+ [TPS68470_AUX1] = &surface_go_tps68470_aux1_reg_init_data,
+ [TPS68470_AUX2] = &surface_go_tps68470_aux2_reg_init_data,
},
};
-static struct gpiod_lookup_table surface_go_tps68470_gpios = {
+static struct gpiod_lookup_table surface_go_int347a_gpios = {
.dev_id = "i2c-INT347A:00",
.table = {
GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
@@ -105,16 +138,31 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
}
};
+static struct gpiod_lookup_table surface_go_int347e_gpios = {
+ .dev_id = "i2c-INT347E:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 5, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = {
.dev_name = "i2c-INT3472:05",
- .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios,
.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 2,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
+ },
};
static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
.dev_name = "i2c-INT3472:01",
- .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios,
.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 1,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios
+ },
};
static const struct dmi_system_id int3472_tps68470_board_data_table[] = {
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index 1a09a75bd16d..7c5c623630c1 100644
--- a/drivers/platform/x86/intel/oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -317,7 +317,7 @@ static int __init oaktrail_init(void)
goto err_driver_reg;
}
- oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!oaktrail_device) {
pr_warn("Unable to allocate platform device\n");
ret = -ENOMEM;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 84eabd6156bb..cb24de9e97dc 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -113,7 +113,7 @@ show_uncore_perf_status(current_freq_khz);
struct uncore_data *data = container_of(attr, struct uncore_data,\
member_name##_dev_attr);\
\
- return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ return sysfs_emit(buf, "%u\n", \
data->member_name); \
} \
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index 4ae87060d18b..fc333ff82d1e 100644
--- a/drivers/platform/x86/intel/wmi/thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -51,26 +51,7 @@ static struct attribute *tbt_attrs[] = {
&dev_attr_force_power.attr,
NULL
};
-
-static const struct attribute_group tbt_attribute_group = {
- .attrs = tbt_attrs,
-};
-
-static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev,
- const void *context)
-{
- int ret;
-
- ret = sysfs_create_group(&wdev->dev.kobj, &tbt_attribute_group);
- kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
- return ret;
-}
-
-static void intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
-{
- sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
- kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
-}
+ATTRIBUTE_GROUPS(tbt);
static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
{ .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
@@ -80,9 +61,8 @@ static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
static struct wmi_driver intel_wmi_thunderbolt_driver = {
.driver = {
.name = "intel-wmi-thunderbolt",
+ .dev_groups = tbt_groups,
},
- .probe = intel_wmi_thunderbolt_probe,
- .remove = intel_wmi_thunderbolt_remove,
.id_table = intel_wmi_thunderbolt_id_table,
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 5e072a0666f4..2fac05a17a5c 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -5181,7 +5181,7 @@ static int __init mlxplat_init(void)
if (!dmi_check_system(mlxplat_dmi_table))
return -ENODEV;
- mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
+ mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, PLATFORM_DEVID_NONE,
mlxplat_lpc_resources,
ARRAY_SIZE(mlxplat_lpc_resources));
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 24ffc8e2d2d1..6b18ec543ac3 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -53,8 +53,6 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
-#define MSI_DRIVER_VERSION "0.5"
-
#define MSI_LCD_LEVEL_MAX 9
#define MSI_EC_COMMAND_WIRELESS 0x10
@@ -592,15 +590,22 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
return 1;
}
+static unsigned long msi_work_delay(int msecs)
+{
+ if (quirks->ec_delay)
+ return msecs_to_jiffies(msecs);
+
+ return 0;
+}
+
static const struct dmi_system_id msi_dmi_table[] __initconst = {
{
.ident = "MSI S270",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -633,8 +638,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -705,6 +709,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = {
},
{ }
};
+MODULE_DEVICE_TABLE(dmi, msi_dmi_table);
static int rfkill_bluetooth_set(void *data, bool blocked)
{
@@ -785,7 +790,6 @@ static void msi_update_rfkill(struct work_struct *ignored)
msi_rfkill_set_state(rfk_threeg, !threeg_s);
}
static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
-static DECLARE_WORK(msi_rfkill_work, msi_update_rfkill);
static void msi_send_touchpad_key(struct work_struct *ignored)
{
@@ -801,7 +805,6 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
}
static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
-static DECLARE_WORK(msi_touchpad_work, msi_send_touchpad_key);
static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
@@ -819,20 +822,12 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
extended = false;
switch (data) {
case 0xE4:
- if (quirks->ec_delay) {
- schedule_delayed_work(&msi_touchpad_dwork,
- round_jiffies_relative(0.5 * HZ));
- } else
- schedule_work(&msi_touchpad_work);
+ schedule_delayed_work(&msi_touchpad_dwork, msi_work_delay(500));
break;
case 0x54:
case 0x62:
case 0x76:
- if (quirks->ec_delay) {
- schedule_delayed_work(&msi_rfkill_dwork,
- round_jiffies_relative(0.5 * HZ));
- } else
- schedule_work(&msi_rfkill_work);
+ schedule_delayed_work(&msi_rfkill_dwork, msi_work_delay(500));
break;
}
}
@@ -899,12 +894,7 @@ static int rfkill_init(struct platform_device *sdev)
}
/* schedule to run rfkill state initial */
- if (quirks->ec_delay) {
- schedule_delayed_work(&msi_rfkill_init,
- round_jiffies_relative(1 * HZ));
- } else
- schedule_work(&msi_rfkill_work);
-
+ schedule_delayed_work(&msi_rfkill_init, msi_work_delay(1000));
return 0;
err_threeg:
@@ -921,8 +911,7 @@ err_bluetooth:
return retval;
}
-#ifdef CONFIG_PM_SLEEP
-static int msi_laptop_resume(struct device *device)
+static int msi_scm_disable_hw_fn_handling(void)
{
u8 data;
int result;
@@ -942,6 +931,12 @@ static int msi_laptop_resume(struct device *device)
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int msi_laptop_resume(struct device *device)
+{
+ return msi_scm_disable_hw_fn_handling();
+}
#endif
static int __init msi_laptop_input_setup(void)
@@ -974,7 +969,6 @@ err_free_dev:
static int __init load_scm_model_init(struct platform_device *sdev)
{
- u8 data;
int result;
if (!quirks->ec_read_only) {
@@ -988,12 +982,7 @@ static int __init load_scm_model_init(struct platform_device *sdev)
}
/* disable hardware control by fn key */
- result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
- if (result < 0)
- return result;
-
- result = ec_write(MSI_STANDARD_EC_SCM_LOAD_ADDRESS,
- data | MSI_STANDARD_EC_SCM_LOAD_MASK);
+ result = msi_scm_disable_hw_fn_handling();
if (result < 0)
return result;
@@ -1022,9 +1011,19 @@ fail_input:
rfkill_cleanup();
fail_rfkill:
-
return result;
+}
+
+static void msi_scm_model_exit(void)
+{
+ if (!quirks->load_scm_model)
+ return;
+ i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
+ input_unregister_device(msi_laptop_input_dev);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ rfkill_cleanup();
}
static int __init msi_init(void)
@@ -1048,8 +1047,7 @@ static int __init msi_init(void)
return -EINVAL;
/* Register backlight stuff */
-
- if (quirks->old_ec_model ||
+ if (quirks->old_ec_model &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -1068,7 +1066,7 @@ static int __init msi_init(void)
/* Register platform stuff */
- msipf_device = platform_device_alloc("msi-laptop-pf", -1);
+ msipf_device = platform_device_alloc("msi-laptop-pf", PLATFORM_DEVID_NONE);
if (!msipf_device) {
ret = -ENOMEM;
goto fail_platform_driver;
@@ -1108,19 +1106,12 @@ static int __init msi_init(void)
set_auto_brightness(auto_brightness);
}
- pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
-
return 0;
fail_create_attr:
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
fail_create_group:
- if (quirks->load_scm_model) {
- i8042_remove_filter(msi_laptop_i8042_filter);
- cancel_delayed_work_sync(&msi_rfkill_dwork);
- cancel_work_sync(&msi_rfkill_work);
- rfkill_cleanup();
- }
+ msi_scm_model_exit();
fail_scm_model_init:
platform_device_del(msipf_device);
fail_device_add:
@@ -1135,14 +1126,7 @@ fail_backlight:
static void __exit msi_cleanup(void)
{
- if (quirks->load_scm_model) {
- i8042_remove_filter(msi_laptop_i8042_filter);
- input_unregister_device(msi_laptop_input_dev);
- cancel_delayed_work_sync(&msi_rfkill_dwork);
- cancel_work_sync(&msi_rfkill_work);
- rfkill_cleanup();
- }
-
+ msi_scm_model_exit();
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
if (!quirks->old_ec_model && threeg_exists)
device_remove_file(&msipf_device->dev, &dev_attr_threeg);
@@ -1155,8 +1139,6 @@ static void __exit msi_cleanup(void)
if (auto_brightness != 2)
set_auto_brightness(1);
}
-
- pr_info("driver unloaded\n");
}
module_init(msi_init);
@@ -1164,16 +1146,4 @@ module_exit(msi_cleanup);
MODULE_AUTHOR("Lennart Poettering");
MODULE_DESCRIPTION("MSI Laptop Support");
-MODULE_VERSION(MSI_DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("dmi:*:svnMICRO-STARINT'LCO.,LTD:pnMS-1013:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1058:pvr0581:rvnMSI:rnMS-1058:*:ct10:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1412:*:rvnMSI:rnMS-1412:*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
-MODULE_ALIAS("dmi:*:svnNOTEBOOK:pnSAM2000:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnU90/U100:*");
diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
index 61e37194df70..baccdf658538 100644
--- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
@@ -7,73 +7,10 @@
#include <linux/backlight.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
#include <linux/types.h>
#include <linux/wmi.h>
-
-/**
- * enum wmi_brightness_method - WMI method IDs
- * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
- * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
- */
-enum wmi_brightness_method {
- WMI_BRIGHTNESS_METHOD_LEVEL = 1,
- WMI_BRIGHTNESS_METHOD_SOURCE = 2,
- WMI_BRIGHTNESS_METHOD_MAX
-};
-
-/**
- * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
- * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
- * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
- * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
- * is only valid when the WMI method is
- * %WMI_BRIGHTNESS_METHOD_LEVEL.
- */
-enum wmi_brightness_mode {
- WMI_BRIGHTNESS_MODE_GET = 0,
- WMI_BRIGHTNESS_MODE_SET = 1,
- WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
- WMI_BRIGHTNESS_MODE_MAX
-};
-
-/**
- * enum wmi_brightness_source - Backlight brightness control source selection
- * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
- * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
- * system's Embedded Controller (EC).
- * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
- * DisplayPort AUX channel.
- */
-enum wmi_brightness_source {
- WMI_BRIGHTNESS_SOURCE_GPU = 1,
- WMI_BRIGHTNESS_SOURCE_EC = 2,
- WMI_BRIGHTNESS_SOURCE_AUX = 3,
- WMI_BRIGHTNESS_SOURCE_MAX
-};
-
-/**
- * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
- * @mode: Pass in an &enum wmi_brightness_mode value to select between
- * getting or setting a value.
- * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
- * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
- * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
- * @ret: Out parameter returning retrieved value when operating in
- * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
- * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
- * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
- *
- * This is the parameters structure for the WmiBrightnessNotify ACPI method as
- * wrapped by WMI. The value passed in to @val or returned by @ret will be a
- * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
- * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
- */
-struct wmi_brightness_args {
- u32 mode;
- u32 val;
- u32 ret;
- u32 ignored[3];
-};
+#include <acpi/video.h>
/**
* wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
@@ -151,19 +88,10 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
{
struct backlight_properties props = {};
struct backlight_device *bdev;
- u32 source;
int ret;
- ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_SOURCE,
- WMI_BRIGHTNESS_MODE_GET, &source);
- if (ret)
- return ret;
-
- /*
- * This driver is only to be used when brightness control is handled
- * by the EC; otherwise, the GPU driver(s) should control brightness.
- */
- if (source != WMI_BRIGHTNESS_SOURCE_EC)
+ /* drivers/acpi/video_detect.c also checks that SOURCE == EC */
+ if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
return -ENODEV;
/*
@@ -191,8 +119,6 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
return PTR_ERR_OR_ZERO(bdev);
}
-#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
-
static const struct wmi_device_id nvidia_wmi_ec_backlight_id_table[] = {
{ .guid_string = WMI_BRIGHTNESS_GUID },
{ }
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index fb2e141f3eb8..384d0962ae93 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -42,10 +42,24 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
+/* Copy resource from the first BAR of the device in question */
static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- /* Copy resource from the first BAR of the device in question */
- *mem = pdev->resource[0];
+ struct resource *bar0 = &pdev->resource[0];
+
+ /* Make sure we have no dangling pointers in the output */
+ memset(mem, 0, sizeof(*mem));
+
+ /*
+ * We copy only selected fields from the original resource.
+ * Because a PCI device will be removed soon, we may not use
+ * any allocated data, hence we may not copy any pointers.
+ */
+ mem->start = bar0->start;
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+
return 0;
}
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index d9a095d2c0eb..ad3083f9946d 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -1034,7 +1034,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
/* optical drive initialization */
if (ACPI_SUCCESS(check_optd_present())) {
pcc->platform = platform_device_register_simple("panasonic",
- -1, NULL, 0);
+ PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pcc->platform)) {
result = PTR_ERR(pcc->platform);
goto out_backlight;
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 154317e9910d..93a6414c6611 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Intel Atom SOC Power Management Controller Driver
- * Copyright (c) 2014, Intel Corporation.
+ * Intel Atom SoC Power Management Controller Driver
+ * Copyright (c) 2014-2015,2017,2022 Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -60,7 +60,7 @@ static const struct pmc_clk byt_clks[] = {
.freq = 19200000,
.parent_name = "xtal",
},
- {},
+ {}
};
static const struct pmc_clk cht_clks[] = {
@@ -69,7 +69,7 @@ static const struct pmc_clk cht_clks[] = {
.freq = 19200000,
.parent_name = NULL,
},
- {},
+ {}
};
static const struct pmc_bit_map d3_sts_0_map[] = {
@@ -105,7 +105,7 @@ static const struct pmc_bit_map d3_sts_0_map[] = {
{"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5},
{"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6},
{"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7},
- {},
+ {}
};
static struct pmc_bit_map byt_d3_sts_1_map[] = {
@@ -113,21 +113,21 @@ static struct pmc_bit_map byt_d3_sts_1_map[] = {
{"OTG_SS_PHY", BIT_OTG_SS_PHY},
{"USH_SS_PHY", BIT_USH_SS_PHY},
{"DFX", BIT_DFX},
- {},
+ {}
};
static struct pmc_bit_map cht_d3_sts_1_map[] = {
{"SMB", BIT_SMB},
{"GMM", BIT_STS_GMM},
{"ISH", BIT_STS_ISH},
- {},
+ {}
};
static struct pmc_bit_map cht_func_dis_2_map[] = {
{"SMB", BIT_SMB},
{"GMM", BIT_FD_GMM},
{"ISH", BIT_FD_ISH},
- {},
+ {}
};
static const struct pmc_bit_map byt_pss_map[] = {
@@ -149,7 +149,7 @@ static const struct pmc_bit_map byt_pss_map[] = {
{"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
{"USB", PMC_PSS_BIT_USB},
{"USB_SUS", PMC_PSS_BIT_USB_SUS},
- {},
+ {}
};
static const struct pmc_bit_map cht_pss_map[] = {
@@ -172,7 +172,7 @@ static const struct pmc_bit_map cht_pss_map[] = {
{"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3},
{"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4},
{"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5},
- {},
+ {}
};
static const struct pmc_reg_map byt_reg_map = {
@@ -232,7 +232,7 @@ static void pmc_power_off(void)
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
- pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
@@ -354,7 +354,7 @@ static bool pmc_clk_is_critical = true;
static int dmi_callback(const struct dmi_system_id *d)
{
- pr_info("%s critclks quirk enabled\n", d->ident);
+ pr_info("%s: PMC critical clocks quirk enabled\n", d->ident);
return 1;
}
@@ -417,8 +417,7 @@ static const struct dmi_system_id critclk_systems[] = {
DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
},
},
-
- { /*sentinel*/ }
+ {}
};
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
@@ -490,15 +489,11 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
-/*
- * Data for PCI driver interface
- *
- * used by pci_match_id() call below.
- */
+/* Data for PCI driver interface used by pci_match_id() call below */
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_data },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_data },
- { 0, },
+ {}
};
static int __init pmc_atom_init(void)
@@ -506,8 +501,9 @@ static int __init pmc_atom_init(void)
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
- /* We look for our device - PCU PMC
- * we assume that there is max. one device.
+ /*
+ * We look for our device - PCU PMC.
+ * We assume that there is maximum one device.
*
* We can't use plain pci_driver mechanism,
* as the device is really a multiple function device,
@@ -519,7 +515,7 @@ static int __init pmc_atom_init(void)
if (ent)
return pmc_setup_dev(pdev, ent);
}
- /* Device not found. */
+ /* Device not found */
return -ENODEV;
}
@@ -527,6 +523,6 @@ device_initcall(pmc_atom_init);
/*
MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface");
+MODULE_DESCRIPTION("Intel Atom SoC Power Management Controller Interface");
MODULE_LICENSE("GPL v2");
*/
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index c187dcdf82f0..b4aa8ba35d2d 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -356,23 +356,13 @@ struct samsung_laptop {
};
struct samsung_quirks {
- bool broken_acpi_video;
bool four_kbd_backlight_levels;
bool enable_kbd_backlight;
- bool use_native_backlight;
bool lid_handling;
};
static struct samsung_quirks samsung_unknown = {};
-static struct samsung_quirks samsung_broken_acpi_video = {
- .broken_acpi_video = true,
-};
-
-static struct samsung_quirks samsung_use_native_backlight = {
- .use_native_backlight = true,
-};
-
static struct samsung_quirks samsung_np740u3e = {
.four_kbd_backlight_levels = true,
.enable_kbd_backlight = true,
@@ -1484,7 +1474,7 @@ static int __init samsung_platform_init(struct samsung_laptop *samsung)
{
struct platform_device *pdev;
- pdev = platform_device_register_simple("samsung", -1, NULL, 0);
+ pdev = platform_device_register_simple("samsung", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -1542,76 +1532,6 @@ static const struct dmi_system_id samsung_dmi_table[] __initconst = {
/* Specific DMI ids for laptop with quirks */
{
.callback = samsung_dmi_matched,
- .ident = "N150P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
- DMI_MATCH(DMI_BOARD_NAME, "N150P"),
- },
- .driver_data = &samsung_use_native_backlight,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "N145P/N250P/N260P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
- },
- .driver_data = &samsung_use_native_backlight,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "N150/N210/N220",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "NF110/NF210/NF310",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "X360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
- DMI_MATCH(DMI_BOARD_NAME, "X360"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "N250P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
- DMI_MATCH(DMI_BOARD_NAME, "N250P"),
- },
- .driver_data = &samsung_use_native_backlight,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "NC210",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
- DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
.ident = "730U3E/740U3E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
@@ -1654,15 +1574,8 @@ static int __init samsung_init(void)
samsung->handle_backlight = true;
samsung->quirks = quirks;
-#ifdef CONFIG_ACPI
- if (samsung->quirks->broken_acpi_video)
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
- if (samsung->quirks->use_native_backlight)
- acpi_video_set_dmi_backlight_type(acpi_backlight_native);
-
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
samsung->handle_backlight = false;
-#endif
ret = samsung_platform_init(samsung);
if (ret)
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 67feed25c9db..5362f1a7b77c 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -328,6 +328,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
index ca3647b751d5..ca76076fc706 100644
--- a/drivers/platform/x86/simatic-ipc.c
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -41,10 +41,12 @@ static struct {
{SIMATIC_IPC_IPC127E, SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE},
{SIMATIC_IPC_IPC227D, SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE},
{SIMATIC_IPC_IPC227E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E},
+ {SIMATIC_IPC_IPC227G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
{SIMATIC_IPC_IPC277E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E},
{SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
{SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
{SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
+ {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
};
static int register_platform_devices(u32 station_id)
@@ -65,7 +67,8 @@ static int register_platform_devices(u32 station_id)
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
- if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ if (ledmode == SIMATIC_IPC_DEVICE_127E ||
+ ledmode == SIMATIC_IPC_DEVICE_227G)
pdevname = KBUILD_MODNAME "_leds_gpio";
platform_data.devmode = ledmode;
ipc_led_platform_device =
@@ -80,6 +83,11 @@ static int register_platform_devices(u32 station_id)
ipc_led_platform_device->name);
}
+ if (wdtmode == SIMATIC_IPC_DEVICE_227G) {
+ request_module("w83627hf_wdt");
+ return 0;
+ }
+
if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
platform_data.devmode = wdtmode;
ipc_wdt_platform_device =
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 07ef05f727a2..765fcaba4d12 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -584,7 +584,7 @@ static int sony_pf_add(void)
if (ret)
goto out;
- sony_pf_device = platform_device_alloc("sony-laptop", -1);
+ sony_pf_device = platform_device_alloc("sony-laptop", PLATFORM_DEVID_NONE);
if (!sony_pf_device) {
ret = -ENOMEM;
goto out_platform_registered;
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 9072eb302618..ded26213c420 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -233,7 +233,7 @@ static int __init tc1100_init(void)
if (!wmi_has_guid(GUID))
return -ENODEV;
- tc1100_device = platform_device_alloc("tc1100-wmi", -1);
+ tc1100_device = platform_device_alloc("tc1100-wmi", PLATFORM_DEVID_NONE);
if (!tc1100_device)
return -ENOMEM;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 22d4e8633e30..6a823b850a77 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7623,9 +7623,9 @@ static int __init volume_create_alsa_mixer(void)
data = card->private_data;
data->card = card;
- strlcpy(card->driver, TPACPI_ALSA_DRVNAME,
+ strscpy(card->driver, TPACPI_ALSA_DRVNAME,
sizeof(card->driver));
- strlcpy(card->shortname, TPACPI_ALSA_SHRTNAME,
+ strscpy(card->shortname, TPACPI_ALSA_SHRTNAME,
sizeof(card->shortname));
snprintf(card->mixername, sizeof(card->mixername), "ThinkPad EC %s",
(thinkpad_id.ec_version_str) ?
@@ -10592,10 +10592,9 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
- /* Set AMT correctly now we know current profile */
- if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
- (dytc_capabilities & BIT(DYTC_FC_AMT)))
- dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+ /* Workaround for https://bugzilla.kernel.org/show_bug.cgi?id=216347 */
+ if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
return 0;
}
@@ -11716,7 +11715,7 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.quirks = dmi_id->driver_data;
/* Device initialization */
- tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
@@ -11727,7 +11726,7 @@ static int __init thinkpad_acpi_module_init(void)
}
tpacpi_sensors_pdev = platform_device_register_simple(
TPACPI_HWMON_DRVR_NAME,
- -1, NULL, 0);
+ PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index f7761d98c0fd..6d18fbf8762b 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -192,7 +192,7 @@ static int topstar_platform_init(struct topstar_laptop *topstar)
{
int err;
- topstar->platform = platform_device_alloc(TOPSTAR_LAPTOP_CLASS, -1);
+ topstar->platform = platform_device_alloc(TOPSTAR_LAPTOP_CLASS, PLATFORM_DEVID_NONE);
if (!topstar->platform)
return -ENOMEM;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 0fc9e8b8827b..160abd3b3af8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -23,6 +23,7 @@
#define PROC_INTERFACE_VERSION 1
#include <linux/compiler.h>
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -38,18 +39,24 @@
#include <linux/workqueue.h>
#include <linux/i8042.h>
#include <linux/acpi.h>
-#include <linux/dmi.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/rfkill.h>
+#include <linux/hwmon.h>
#include <linux/iio/iio.h>
#include <linux/toshiba.h>
+#include <acpi/battery.h>
#include <acpi/video.h>
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
+static int turn_on_panel_on_resume = -1;
+module_param(turn_on_panel_on_resume, int, 0644);
+MODULE_PARM_DESC(turn_on_panel_on_resume,
+ "Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+
#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
/* Scan code for Fn key on TOS1900 models */
@@ -100,18 +107,21 @@ MODULE_LICENSE("GPL");
#define TOS_NOT_INSTALLED 0x8e00
/* Registers */
+#define HCI_PANEL_POWER_ON 0x0002
#define HCI_FAN 0x0004
#define HCI_TR_BACKLIGHT 0x0005
#define HCI_SYSTEM_EVENT 0x0016
#define HCI_VIDEO_OUT 0x001c
#define HCI_HOTKEY_EVENT 0x001e
#define HCI_LCD_BRIGHTNESS 0x002a
+#define HCI_FAN_RPM 0x0045
#define HCI_WIRELESS 0x0056
#define HCI_ACCELEROMETER 0x006d
#define HCI_COOLING_METHOD 0x007f
#define HCI_KBD_ILLUMINATION 0x0095
#define HCI_ECO_MODE 0x0097
#define HCI_ACCELEROMETER2 0x00a6
+#define HCI_BATTERY_CHARGE_MODE 0x00ba
#define HCI_SYSTEM_INFO 0xc000
#define SCI_PANEL_POWER_ON 0x010d
#define SCI_ILLUMINATION 0x014e
@@ -170,6 +180,9 @@ struct toshiba_acpi_dev {
struct miscdevice miscdev;
struct rfkill *wwan_rfk;
struct iio_dev *indio_dev;
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_device;
+#endif
int force_fan;
int last_key_event;
@@ -185,6 +198,7 @@ struct toshiba_acpi_dev {
unsigned int illumination_supported:1;
unsigned int video_supported:1;
unsigned int fan_supported:1;
+ unsigned int fan_rpm_supported:1;
unsigned int system_event_supported:1;
unsigned int ntfy_supported:1;
unsigned int info_supported:1;
@@ -201,6 +215,7 @@ struct toshiba_acpi_dev {
unsigned int usb_three_supported:1;
unsigned int wwan_supported:1;
unsigned int cooling_method_supported:1;
+ unsigned int battery_charge_mode_supported:1;
unsigned int sysfs_created:1;
unsigned int special_functions;
@@ -272,14 +287,6 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
};
/*
- * List of models which have a broken acpi-video backlight interface and thus
- * need to use the toshiba (vendor) interface instead.
- */
-static const struct dmi_system_id toshiba_vendor_backlight_dmi[] = {
- {}
-};
-
-/*
* Utility
*/
@@ -675,12 +682,15 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
return;
}
- if (out[0] == TOS_INPUT_DATA_ERROR) {
+ if (out[0] == TOS_INPUT_DATA_ERROR || out[0] == TOS_NOT_SUPPORTED) {
/*
* If we receive 0x8300 (Input Data Error), it means that the
* LED device is present, but that we just screwed the input
* parameters.
*
+ * On some laptops 0x8000 (Not supported) is also returned in
+ * this case, so we need to allow for that as well.
+ *
* Let's query the status of the LED to see if we really have a
* success response, indicating the actual presense of the LED,
* bail out otherwise.
@@ -1282,6 +1292,69 @@ static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state)
return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
}
+/* Battery charge control */
+static void toshiba_battery_charge_mode_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_BATTERY_CHARGE_MODE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->battery_charge_mode_supported = 0;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Battery Charge Mode failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return;
+
+ dev->battery_charge_mode_supported = 1;
+}
+
+static int toshiba_battery_charge_mode_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_BATTERY_CHARGE_MODE, 0, 0, 0, 0x1 };
+ u32 out[TCI_WORDS];
+ int retries = 3;
+
+ do {
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status))
+ pr_err("ACPI call to get Battery Charge Mode failed\n");
+ switch (out[0]) {
+ case TOS_SUCCESS:
+ case TOS_SUCCESS2:
+ *state = out[2];
+ return 0;
+ case TOS_NOT_SUPPORTED:
+ return -ENODEV;
+ case TOS_DATA_NOT_AVAILABLE:
+ retries--;
+ break;
+ default:
+ return -EIO;
+ }
+ } while (retries);
+
+ return -EIO;
+}
+
+static int toshiba_battery_charge_mode_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result = hci_write(dev, HCI_BATTERY_CHARGE_MODE, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Battery Charge Mode failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
/* Transflective Backlight */
static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
{
@@ -1616,6 +1689,29 @@ static const struct proc_ops fan_proc_ops = {
.proc_write = fan_proc_write,
};
+/* Fan RPM */
+static int get_fan_rpm(struct toshiba_acpi_dev *dev, u32 *rpm)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_FAN_RPM, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Fan speed failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] == TOS_SUCCESS) {
+ *rpm = out[2];
+ return 0;
+ }
+
+ return -EIO;
+}
+
static int keys_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
@@ -2786,6 +2882,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
dev->hotkey_dev->name = "Toshiba input device";
dev->hotkey_dev->phys = "toshiba_acpi/input0";
dev->hotkey_dev->id.bustype = BUS_HOST;
+ dev->hotkey_dev->dev.parent = &dev->acpi_dev->dev;
if (dev->hotkey_event_type == HCI_SYSTEM_TYPE1 ||
!dev->kbd_function_keys_supported)
@@ -2881,14 +2978,6 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
- /*
- * Tell acpi-video-detect code to prefer vendor backlight on all
- * systems with transflective backlight and on dmi matched systems.
- */
- if (dev->tr_backlight_supported ||
- dmi_check_system(toshiba_vendor_backlight_dmi))
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
@@ -2916,6 +3005,139 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
+/* HWMON support for fan */
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t toshiba_acpi_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int toshiba_acpi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ /*
+ * There is only a single channel and single attribute (for the
+ * fan) at this point.
+ * This can be replaced with more advanced logic in the future,
+ * should the need arise.
+ */
+ if (type == hwmon_fan && channel == 0 && attr == hwmon_fan_input) {
+ u32 value;
+ int ret;
+
+ ret = get_fan_rpm(toshiba_acpi, &value);
+ if (ret)
+ return ret;
+
+ *val = value;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_channel_info *toshiba_acpi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops toshiba_acpi_hwmon_ops = {
+ .is_visible = toshiba_acpi_hwmon_is_visible,
+ .read = toshiba_acpi_hwmon_read,
+};
+
+static const struct hwmon_chip_info toshiba_acpi_hwmon_chip_info = {
+ .ops = &toshiba_acpi_hwmon_ops,
+ .info = toshiba_acpi_hwmon_info,
+};
+#endif
+
+/* ACPI battery hooking */
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 state;
+ int status;
+
+ if (toshiba_acpi == NULL) {
+ pr_err("Toshiba ACPI object invalid\n");
+ return -ENODEV;
+ }
+
+ status = toshiba_battery_charge_mode_get(toshiba_acpi, &state);
+
+ if (status != 0)
+ return status;
+
+ if (state == 1)
+ return sprintf(buf, "80\n");
+ else
+ return sprintf(buf, "100\n");
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u32 value;
+ int rval;
+
+ if (toshiba_acpi == NULL) {
+ pr_err("Toshiba ACPI object invalid\n");
+ return -ENODEV;
+ }
+
+ rval = kstrtou32(buf, 10, &value);
+ if (rval)
+ return rval;
+
+ if (value < 1 || value > 100)
+ return -EINVAL;
+ rval = toshiba_battery_charge_mode_set(toshiba_acpi,
+ (value < 90) ? 1 : 0);
+ if (rval < 0)
+ return rval;
+ else
+ return count;
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static struct attribute *toshiba_acpi_battery_attrs[] = {
+ &dev_attr_charge_control_end_threshold.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(toshiba_acpi_battery);
+
+static int toshiba_acpi_battery_add(struct power_supply *battery)
+{
+ if (toshiba_acpi == NULL) {
+ pr_err("Init order issue\n");
+ return -ENODEV;
+ }
+ if (!toshiba_acpi->battery_charge_mode_supported)
+ return -ENODEV;
+ if (device_add_groups(&battery->dev, toshiba_acpi_battery_groups))
+ return -ENODEV;
+ return 0;
+}
+
+static int toshiba_acpi_battery_remove(struct power_supply *battery)
+{
+ device_remove_groups(&battery->dev, toshiba_acpi_battery_groups);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = toshiba_acpi_battery_add,
+ .remove_battery = toshiba_acpi_battery_remove,
+ .name = "Toshiba Battery Extension",
+};
+
static void print_supported_features(struct toshiba_acpi_dev *dev)
{
pr_info("Supported laptop features:");
@@ -2928,6 +3150,8 @@ static void print_supported_features(struct toshiba_acpi_dev *dev)
pr_cont(" video-out");
if (dev->fan_supported)
pr_cont(" fan");
+ if (dev->fan_rpm_supported)
+ pr_cont(" fan-rpm");
if (dev->tr_backlight_supported)
pr_cont(" transflective-backlight");
if (dev->illumination_supported)
@@ -2956,6 +3180,8 @@ static void print_supported_features(struct toshiba_acpi_dev *dev)
pr_cont(" wwan");
if (dev->cooling_method_supported)
pr_cont(" cooling-method");
+ if (dev->battery_charge_mode_supported)
+ pr_cont(" battery-charge-mode");
pr_cont("\n");
}
@@ -2968,6 +3194,11 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
remove_toshiba_proc_entries(dev);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon_device)
+ hwmon_device_unregister(dev->hwmon_device);
+#endif
+
if (dev->accelerometer_supported && dev->indio_dev) {
iio_device_unregister(dev->indio_dev);
iio_device_free(dev->indio_dev);
@@ -2996,6 +3227,9 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
rfkill_destroy(dev->wwan_rfk);
}
+ if (dev->battery_charge_mode_supported)
+ battery_hook_unregister(&battery_hook);
+
if (toshiba_acpi)
toshiba_acpi = NULL;
@@ -3015,6 +3249,43 @@ static const char *find_hci_method(acpi_handle handle)
return NULL;
}
+/*
+ * Some Toshibas have a broken acpi-video interface for brightness control,
+ * these are quirked in drivers/acpi/video_detect.c to use the GPU native
+ * (/sys/class/backlight/intel_backlight) instead.
+ * But these need a HCI_SET call to actually turn the panel back on at resume,
+ * without this call the screen stays black at resume.
+ * Either HCI_LCD_BRIGHTNESS (used by acpi_video's _BCM) or HCI_PANEL_POWER_ON
+ * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
+ * the configured brightness level.
+ */
+static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ {
+ /* Toshiba Portégé R700 */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ },
+ },
+ {
+ /* Toshiba Satellite/Portégé R830 */
+ /* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ /* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
+ },
+ },
+ {
+ /* Toshiba Satellite/Portégé Z830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
+ },
+ },
+};
+
static int toshiba_acpi_add(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev;
@@ -3157,12 +3428,32 @@ iio_error:
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
+ ret = get_fan_rpm(dev, &dummy);
+ dev->fan_rpm_supported = !ret;
+
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->fan_rpm_supported) {
+ dev->hwmon_device = hwmon_device_register_with_info(
+ &dev->acpi_dev->dev, "toshiba_acpi_sensors", NULL,
+ &toshiba_acpi_hwmon_chip_info, NULL);
+ if (IS_ERR(dev->hwmon_device)) {
+ dev->hwmon_device = NULL;
+ pr_warn("unable to register hwmon device, skipping\n");
+ }
+ }
+#endif
+
+ if (turn_on_panel_on_resume == -1)
+ turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
+
toshiba_wwan_available(dev);
if (dev->wwan_supported)
toshiba_acpi_setup_wwan_rfkill(dev);
toshiba_cooling_method_available(dev);
+ toshiba_battery_charge_mode_available(dev);
+
print_supported_features(dev);
ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
@@ -3177,6 +3468,13 @@ iio_error:
toshiba_acpi = dev;
+ /*
+ * As the battery hook relies on the static variable toshiba_acpi being
+ * set, this must be done after toshiba_acpi is assigned.
+ */
+ if (dev->battery_charge_mode_supported)
+ battery_hook_register(&battery_hook);
+
return 0;
error:
@@ -3273,6 +3571,9 @@ static int toshiba_acpi_resume(struct device *device)
rfkill_set_hw_state(dev->wwan_rfk, !dev->killswitch);
}
+ if (turn_on_panel_on_resume)
+ hci_write(dev, HCI_PANEL_POWER_ON, 1);
+
return 0;
}
#endif
diff --git a/drivers/platform/x86/winmate-fm07-keys.c b/drivers/platform/x86/winmate-fm07-keys.c
index 2c90c5c7eca2..465ffad81a65 100644
--- a/drivers/platform/x86/winmate-fm07-keys.c
+++ b/drivers/platform/x86/winmate-fm07-keys.c
@@ -161,7 +161,7 @@ static int __init fm07keys_init(void)
return ret;
}
- dev = platform_device_register_simple(DRV_NAME, -1, NULL, 0);
+ dev = platform_device_register_simple(DRV_NAME, PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
pr_err("fm07keys: failed to allocate device, err = %d\n", ret);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aed293b5af81..223550a10d4d 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -95,9 +95,6 @@ module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
-static int acpi_wmi_remove(struct platform_device *device);
-static int acpi_wmi_probe(struct platform_device *device);
-
static const struct acpi_device_id wmi_device_ids[] = {
{"PNP0C14", 0},
{"pnp0c14", 0},
@@ -105,13 +102,10 @@ static const struct acpi_device_id wmi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
-static struct platform_driver acpi_wmi_driver = {
- .driver = {
- .name = "acpi-wmi",
- .acpi_match_table = wmi_device_ids,
- },
- .probe = acpi_wmi_probe,
- .remove = acpi_wmi_remove,
+/* allow duplicate GUIDs as these device drivers use struct wmi_driver */
+static const char * const allow_duplicates[] = {
+ "05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
+ NULL
};
/*
@@ -1073,6 +1067,23 @@ static const struct device_type wmi_type_data = {
.release = wmi_dev_release,
};
+/*
+ * _WDG is a static list that is only parsed at startup,
+ * so it's safe to count entries without extra protection.
+ */
+static int guid_count(const guid_t *guid)
+{
+ struct wmi_block *wblock;
+ int count = 0;
+
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ if (guid_equal(&wblock->gblock.guid, guid))
+ count++;
+ }
+
+ return count;
+}
+
static int wmi_create_device(struct device *wmi_bus_dev,
struct wmi_block *wblock,
struct acpi_device *device)
@@ -1080,6 +1091,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
struct acpi_device_info *info;
char method[WMI_ACPI_METHOD_NAME_SIZE];
int result;
+ uint count;
if (wblock->gblock.flags & ACPI_WMI_EVENT) {
wblock->dev.dev.type = &wmi_type_event;
@@ -1134,7 +1146,11 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
- dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
+ count = guid_count(&wblock->gblock.guid);
+ if (count)
+ dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count);
+ else
+ dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
device_initialize(&wblock->dev.dev);
@@ -1154,11 +1170,20 @@ static void wmi_free_devices(struct acpi_device *device)
}
}
-static bool guid_already_parsed(struct acpi_device *device, const guid_t *guid)
+static bool guid_already_parsed_for_legacy(struct acpi_device *device, const guid_t *guid)
{
struct wmi_block *wblock;
list_for_each_entry(wblock, &wmi_block_list, list) {
+ /* skip warning and register if we know the driver will use struct wmi_driver */
+ for (int i = 0; allow_duplicates[i] != NULL; i++) {
+ guid_t tmp;
+
+ if (guid_parse(allow_duplicates[i], &tmp))
+ continue;
+ if (guid_equal(&tmp, guid))
+ return false;
+ }
if (guid_equal(&wblock->gblock.guid, guid)) {
/*
* Because we historically didn't track the relationship
@@ -1208,13 +1233,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
if (debug_dump_wdg)
wmi_dump_wdg(&gblock[i]);
- /*
- * Some WMI devices, like those for nVidia hooks, have a
- * duplicate GUID. It's not clear what we should do in this
- * case yet, so for now, we'll just ignore the duplicate
- * for device creation.
- */
- if (guid_already_parsed(device, &gblock[i].guid))
+ if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
continue;
wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
@@ -1449,6 +1468,15 @@ void wmi_driver_unregister(struct wmi_driver *driver)
}
EXPORT_SYMBOL(wmi_driver_unregister);
+static struct platform_driver acpi_wmi_driver = {
+ .driver = {
+ .name = "acpi-wmi",
+ .acpi_match_table = wmi_device_ids,
+ },
+ .probe = acpi_wmi_probe,
+ .remove = acpi_wmi_remove,
+};
+
static int __init acpi_wmi_init(void)
{
int error;
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 480375977435..4acd6fa8d43b 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -663,9 +663,23 @@ static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
},
};
+static int __init chuwi_hi8_init(void)
+{
+ /*
+ * Avoid the acpi_unregister_gsi() call in x86_acpi_irq_helper_get()
+ * breaking the touchscreen + logging various errors when the Windows
+ * BIOS is used.
+ */
+ if (acpi_dev_present("MSSL0001", NULL, 1))
+ return -ENODEV;
+
+ return 0;
+}
+
static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_info = chuwi_hi8_i2c_clients,
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
+ .init = chuwi_hi8_init,
};
#define CZC_EC_EXTRA_PORT 0x68
diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h
index 2ce739ff9c1a..f3302006842e 100644
--- a/drivers/pnp/pnpbios/pnpbios.h
+++ b/drivers/pnp/pnpbios/pnpbios.h
@@ -153,7 +153,6 @@ extern int pnpbios_dont_use_current_config;
extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
extern int pnpbios_read_resources_from_node(struct pnp_dev *dev, struct pnp_bios_node *node);
extern int pnpbios_write_resources_to_node(struct pnp_dev *dev, struct pnp_bios_node *node);
-extern void pnpid32_to_pnpid(u32 id, char *str);
extern void pnpbios_print_status(const char * module, u16 status);
extern void pnpbios_calls_init(union pnp_bios_install_struct * header);
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 5724001e66b9..6b99e1c675b8 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1696,7 +1696,7 @@ error_1:
/* main bq2415x remove function */
-static int bq2415x_remove(struct i2c_client *client)
+static void bq2415x_remove(struct i2c_client *client)
{
struct bq2415x_device *bq = i2c_get_clientdata(client);
@@ -1715,8 +1715,6 @@ static int bq2415x_remove(struct i2c_client *client)
dev_info(bq->dev, "driver unregistered\n");
kfree(bq->name);
-
- return 0;
}
static const struct i2c_device_id bq2415x_i2c_id_table[] = {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 27f5c7648617..2274679c5ddd 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1901,7 +1901,7 @@ out_pmrt:
return ret;
}
-static int bq24190_remove(struct i2c_client *client)
+static void bq24190_remove(struct i2c_client *client)
{
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;
@@ -1918,8 +1918,6 @@ static int bq24190_remove(struct i2c_client *client)
pm_runtime_put_sync(bdi->dev);
pm_runtime_dont_use_autosuspend(bdi->dev);
pm_runtime_disable(bdi->dev);
-
- return 0;
}
static void bq24190_shutdown(struct i2c_client *client)
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index ecba9ab86faf..a309bbedfe52 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -1077,7 +1077,7 @@ static int bq24257_probe(struct i2c_client *client,
return 0;
}
-static int bq24257_remove(struct i2c_client *client)
+static void bq24257_remove(struct i2c_client *client)
{
struct bq24257_device *bq = i2c_get_clientdata(client);
@@ -1085,8 +1085,6 @@ static int bq24257_remove(struct i2c_client *client)
cancel_delayed_work_sync(&bq->iilimit_setup_work);
bq24257_field_write(bq, F_RESET, 1); /* reset to defaults */
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 852a6fec4339..06ea7399d151 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -1258,7 +1258,7 @@ err_unregister_usb_notifier:
return ret;
}
-static int bq25890_remove(struct i2c_client *client)
+static void bq25890_remove(struct i2c_client *client)
{
struct bq25890_device *bq = i2c_get_clientdata(client);
@@ -1269,8 +1269,6 @@ static int bq25890_remove(struct i2c_client *client)
/* reset all registers to default values */
bq25890_chip_reset(bq);
}
-
- return 0;
}
static void bq25890_shutdown(struct i2c_client *client)
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index cf38cbfe13e9..94b00bb89c17 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -205,7 +205,7 @@ err_failed:
return ret;
}
-static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
+static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
@@ -214,8 +214,6 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
mutex_lock(&battery_mutex);
idr_remove(&battery_id, di->id);
mutex_unlock(&battery_mutex);
-
- return 0;
}
static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 728e2a6cc9c3..81e17ad80163 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -725,13 +725,12 @@ static int __maybe_unused cw_bat_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume);
-static int cw_bat_remove(struct i2c_client *client)
+static void cw_bat_remove(struct i2c_client *client)
{
struct cw_battery *cw_bat = i2c_get_clientdata(client);
cancel_delayed_work_sync(&cw_bat->battery_delay_work);
power_supply_put_battery_info(cw_bat->rk_bat, cw_bat->battery);
- return 0;
}
static const struct i2c_device_id cw_bat_id_table[] = {
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index 9ae273fde7a2..d78cd05402f6 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -312,7 +312,7 @@ static void ds278x_power_supply_init(struct power_supply_desc *battery)
battery->external_power_changed = NULL;
}
-static int ds278x_battery_remove(struct i2c_client *client)
+static void ds278x_battery_remove(struct i2c_client *client)
{
struct ds278x_info *info = i2c_get_clientdata(client);
int id = info->id;
@@ -325,8 +325,6 @@ static int ds278x_battery_remove(struct i2c_client *client)
mutex_lock(&battery_lock);
idr_remove(&battery_id, id);
mutex_unlock(&battery_lock);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 9ee54e397754..384a374b52c1 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -590,13 +590,12 @@ static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
return 0;
}
-static int lp8727_remove(struct i2c_client *cl)
+static void lp8727_remove(struct i2c_client *cl)
{
struct lp8727_chg *pchg = i2c_get_clientdata(cl);
lp8727_release_irq(pchg);
lp8727_unregister_psy(pchg);
- return 0;
}
static const struct of_device_id lp8727_dt_ids[] = {
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index 7a23c70f4879..736dec608ff6 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -149,13 +149,11 @@ static int rt5033_battery_probe(struct i2c_client *client,
return 0;
}
-static int rt5033_battery_remove(struct i2c_client *client)
+static void rt5033_battery_remove(struct i2c_client *client)
{
struct rt5033_battery *battery = i2c_get_clientdata(client);
power_supply_unregister(battery->psy);
-
- return 0;
}
static const struct i2c_device_id rt5033_battery_id[] = {
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index 74ee54320e6a..72962286d704 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1698,7 +1698,7 @@ put_usb_notifier:
return ret;
}
-static int rt9455_remove(struct i2c_client *client)
+static void rt9455_remove(struct i2c_client *client)
{
int ret;
struct rt9455_info *info = i2c_get_clientdata(client);
@@ -1715,8 +1715,6 @@ static int rt9455_remove(struct i2c_client *client)
cancel_delayed_work_sync(&info->pwr_rdy_work);
cancel_delayed_work_sync(&info->max_charging_time_work);
cancel_delayed_work_sync(&info->batt_presence_work);
-
- return 0;
}
static const struct i2c_device_id rt9455_i2c_id_table[] = {
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index 1511f71f937c..996a82f8a2a1 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -1595,14 +1595,12 @@ static int smb347_probe(struct i2c_client *client,
return 0;
}
-static int smb347_remove(struct i2c_client *client)
+static void smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
smb347_usb_vbus_regulator_disable(smb->usb_rdev);
smb347_irq_disable(smb);
-
- return 0;
}
static void smb347_shutdown(struct i2c_client *client)
diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c
index 7ed4e4bb26ec..1897c2984860 100644
--- a/drivers/power/supply/z2_battery.c
+++ b/drivers/power/supply/z2_battery.c
@@ -251,7 +251,7 @@ err:
return ret;
}
-static int z2_batt_remove(struct i2c_client *client)
+static void z2_batt_remove(struct i2c_client *client)
{
struct z2_charger *charger = i2c_get_clientdata(client);
@@ -263,8 +263,6 @@ static int z2_batt_remove(struct i2c_client *client)
free_irq(gpiod_to_irq(charger->charge_gpiod), charger);
kfree(charger);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 21d624f9f5fb..26d00b1853b4 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -994,6 +994,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
y = value & 0x1f;
value = (1 << y) * (4 + f) * rp->time_unit / 4;
} else {
+ if (value < rp->time_unit)
+ return 0;
+
do_div(value, rp->time_unit);
y = ilog2(value);
f = div64_u64(4 * (value - (1 << y)), 1 << y);
@@ -1035,7 +1038,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = {
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
- .dram_domain_energy_unit = 15300,
.psys_domain_energy_unit = 1000000000,
.spr_psys_bits = true,
};
@@ -1110,6 +1112,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 688cde320bb0..51cae72bb6db 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -174,7 +174,7 @@ static void ptp_clock_release(struct device *dev)
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
- ida_simple_remove(&ptp_clocks_map, ptp->index);
+ ida_free(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -217,7 +217,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp == NULL)
goto no_memory;
- index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL);
+ index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
if (index < 0) {
err = index;
goto no_slot;
@@ -332,7 +332,7 @@ kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
- ida_simple_remove(&ptp_clocks_map, index);
+ ida_free(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index e59ea2173aac..d36c3f597f77 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1311,12 +1311,6 @@ fail:
goto out;
}
-static int
-ptp_ocp_firstchild(struct device *dev, void *data)
-{
- return 1;
-}
-
static struct device *
ptp_ocp_find_flash(struct ptp_ocp *bp)
{
@@ -1325,7 +1319,7 @@ ptp_ocp_find_flash(struct ptp_ocp *bp)
last = NULL;
dev = &bp->spi_flash->dev;
- while ((dev = device_find_child(dev, NULL, ptp_ocp_firstchild))) {
+ while ((dev = device_find_any_child(dev))) {
if (!strcmp("mtd", dev_bus_name(dev)))
break;
put_device(last);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0e042410f6b9..cfe3a0327471 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -734,8 +734,8 @@ static struct device_link *pwm_device_link_add(struct device *dev,
* Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
* error code on failure.
*/
-struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id)
+static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id)
{
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
@@ -797,7 +797,6 @@ put:
return pwm;
}
-EXPORT_SYMBOL_GPL(of_pwm_get);
/**
* acpi_pwm_get() - request a PWM via parsing "pwms" property in ACPI
@@ -1071,36 +1070,6 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
EXPORT_SYMBOL_GPL(devm_pwm_get);
/**
- * devm_of_pwm_get() - resource managed of_pwm_get()
- * @dev: device for PWM consumer
- * @np: device node to get the PWM from
- * @con_id: consumer name
- *
- * This function performs like of_pwm_get() but the acquired PWM device will
- * automatically be released on driver detach.
- *
- * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
- * error code on failure.
- */
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id)
-{
- struct pwm_device *pwm;
- int ret;
-
- pwm = of_pwm_get(dev, np, con_id);
- if (IS_ERR(pwm))
- return pwm;
-
- ret = devm_add_action_or_reset(dev, devm_pwm_release, pwm);
- if (ret)
- return ERR_PTR(ret);
-
- return pwm;
-}
-EXPORT_SYMBOL_GPL(devm_of_pwm_get);
-
-/**
* devm_fwnode_pwm_get() - request a resource managed PWM from firmware node
* @dev: device for PWM consumer
* @fwnode: firmware node to get the PWM from
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index c91fa7f9e33d..f230c10d28bb 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -598,7 +598,7 @@ static int pca9685_pwm_probe(struct i2c_client *client,
return 0;
}
-static int pca9685_pwm_remove(struct i2c_client *client)
+static void pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
@@ -610,8 +610,6 @@ static int pca9685_pwm_remove(struct i2c_client *client)
}
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 42f2fc0bc8a9..321af498ee11 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -556,6 +556,14 @@ static int __init cec_init(void)
if (ce_arr.disabled)
return -ENODEV;
+ /*
+ * Intel systems may avoid uncorrectable errors
+ * if pages with corrected errors are aggressively
+ * taken offline.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ action_threshold = 2;
+
ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
if (!ce_arr.array) {
pr_err("Error allocating CE array page!\n");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 23e3e4a35cc9..d663ab9670fe 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -787,6 +787,24 @@ config REGULATOR_MT6323
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6331
+ tristate "MediaTek MT6331 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6331 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface
+
+config REGULATOR_MT6332
+ tristate "MediaTek MT6332 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6332 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface
+
config REGULATOR_MT6358
tristate "MediaTek MT6358 PMIC"
depends on MFD_MT6397
@@ -1384,6 +1402,15 @@ config REGULATOR_TPS65218
voltage regulators. It supports software based voltage control
for different voltage domains
+config REGULATOR_TPS65219
+ tristate "TI TPS65219 Power regulators"
+ depends on MFD_TPS65219 && OF
+ help
+ This driver supports TPS65219 voltage regulator chips.
+ TPS65219 series of PMICs have 3 single phase BUCKs & 4 LDOs
+ voltage regulators. It supports software based voltage control
+ for different voltage domains.
+
config REGULATOR_TPS6524X
tristate "TI TPS6524X Power regulators"
depends on SPI
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index fa49bb6cc544..5962307e1130 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,8 @@ obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6315) += mt6315-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
+obj-$(CONFIG_REGULATOR_MT6331) += mt6331-regulator.o
+obj-$(CONFIG_REGULATOR_MT6332) += mt6332-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_MT6359) += mt6359-regulator.o
obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
@@ -162,6 +164,7 @@ obj-$(CONFIG_REGULATOR_TPS65086) += tps65086-regulator.o
obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65219) += tps65219-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index acaa6607898e..c2b8b8be7824 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -571,11 +571,10 @@ static int bd7181x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No parent regmap\n");
return -ENODEV;
}
- ldo4_en = devm_gpiod_get_from_of_node(&pdev->dev,
- pdev->dev.parent->of_node,
- "rohm,vsel-gpios", 0,
- GPIOD_ASIS, "ldo4-en");
+ ldo4_en = devm_fwnode_gpiod_get(&pdev->dev,
+ dev_fwnode(pdev->dev.parent),
+ "rohm,vsel", GPIOD_ASIS, "ldo4-en");
if (IS_ERR(ldo4_en)) {
ret = PTR_ERR(ldo4_en);
if (ret != -ENOENT)
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
index aa42da4d141e..393c8693b327 100644
--- a/drivers/regulator/bd9576-regulator.c
+++ b/drivers/regulator/bd9576-regulator.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
@@ -939,8 +940,8 @@ static int bd957x_probe(struct platform_device *pdev)
}
ic_data->regmap = regmap;
- vout_mode = of_property_read_bool(pdev->dev.parent->of_node,
- "rohm,vout1-en-low");
+ vout_mode = device_property_read_bool(pdev->dev.parent,
+ "rohm,vout1-en-low");
if (vout_mode) {
struct gpio_desc *en;
@@ -948,10 +949,10 @@ static int bd957x_probe(struct platform_device *pdev)
/* VOUT1 enable state judged by VOUT1_EN pin */
/* See if we have GPIO defined */
- en = devm_gpiod_get_from_of_node(&pdev->dev,
- pdev->dev.parent->of_node,
- "rohm,vout1-en-gpios", 0,
- GPIOD_OUT_LOW, "vout1-en");
+ en = devm_fwnode_gpiod_get(&pdev->dev,
+ dev_fwnode(pdev->dev.parent),
+ "rohm,vout1-en", GPIOD_OUT_LOW,
+ "vout1-en");
if (!IS_ERR(en)) {
/* VOUT1_OPS gpio ctrl */
/*
@@ -986,8 +987,8 @@ static int bd957x_probe(struct platform_device *pdev)
* like DDR voltage selection.
*/
platform_set_drvdata(pdev, ic_data);
- ddr_sel = of_property_read_bool(pdev->dev.parent->of_node,
- "rohm,ddr-sel-low");
+ ddr_sel = device_property_read_bool(pdev->dev.parent,
+ "rohm,ddr-sel-low");
if (ddr_sel)
ic_data->regulator_data[2].desc.fixed_uV = 1350000;
else
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7150b1d0159e..bcccad8f7516 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -977,12 +977,27 @@ static int drms_uA_update(struct regulator_dev *rdev)
rdev_err(rdev, "failed to set load %d: %pe\n",
current_uA, ERR_PTR(err));
} else {
+ /*
+ * Unfortunately in some cases the constraints->valid_ops has
+ * REGULATOR_CHANGE_DRMS but there are no valid modes listed.
+ * That's not really legit but we won't consider it a fatal
+ * error here. We'll treat it as if REGULATOR_CHANGE_DRMS
+ * wasn't set.
+ */
+ if (!rdev->constraints->valid_modes_mask) {
+ rdev_dbg(rdev, "Can change modes; but no valid mode\n");
+ return 0;
+ }
+
/* get output voltage */
output_uV = regulator_get_voltage_rdev(rdev);
- if (output_uV <= 0) {
- rdev_err(rdev, "invalid output voltage found\n");
- return -EINVAL;
- }
+
+ /*
+ * Don't return an error; if regulator driver cares about
+ * output_uV then it's up to the driver to validate.
+ */
+ if (output_uV <= 0)
+ rdev_dbg(rdev, "invalid output voltage found\n");
/* get input voltage */
input_uV = 0;
@@ -990,10 +1005,13 @@ static int drms_uA_update(struct regulator_dev *rdev)
input_uV = regulator_get_voltage(rdev->supply);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
- if (input_uV <= 0) {
- rdev_err(rdev, "invalid input voltage found\n");
- return -EINVAL;
- }
+
+ /*
+ * Don't return an error; if regulator driver cares about
+ * input_uV then it's up to the driver to validate.
+ */
+ if (input_uV <= 0)
+ rdev_dbg(rdev, "invalid input voltage found\n");
/* now get the optimum mode for our new total regulator load */
mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
@@ -2681,7 +2699,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* return -ETIMEDOUT.
*/
if (rdev->desc->poll_enabled_time) {
- unsigned int time_remaining = delay;
+ int time_remaining = delay;
while (time_remaining > 0) {
_regulator_delay_helper(rdev->desc->poll_enabled_time);
@@ -2733,13 +2751,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
+ int ret;
struct regulator_dev *rdev = regulator->rdev;
lockdep_assert_held_once(&rdev->mutex.base);
regulator->enable_count++;
- if (regulator->uA_load && regulator->enable_count == 1)
- return drms_uA_update(rdev);
+ if (regulator->uA_load && regulator->enable_count == 1) {
+ ret = drms_uA_update(rdev);
+ if (ret)
+ regulator->enable_count--;
+ return ret;
+ }
return 0;
}
@@ -3497,10 +3520,8 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
(new_uV < old_uV))
return rdev->constraints->settling_time_down;
- if (ramp_delay == 0) {
- rdev_dbg(rdev, "ramp_delay not set\n");
+ if (ramp_delay == 0)
return 0;
- }
return DIV_ROUND_UP(abs(new_uV - old_uV), ramp_delay);
}
@@ -4784,10 +4805,10 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- consumers[i].consumer = NULL;
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
"Failed to get supply '%s'",
consumers[i].supply);
+ consumers[i].consumer = NULL;
goto err;
}
@@ -5393,6 +5414,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
bool dangling_of_gpiod = false;
struct device *dev;
int ret, i;
+ bool resolved_early = false;
if (cfg == NULL)
return ERR_PTR(-EINVAL);
@@ -5496,24 +5518,10 @@ regulator_register(const struct regulator_desc *regulator_desc,
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
- /* preform any regulator specific init */
- if (init_data && init_data->regulator_init) {
- ret = init_data->regulator_init(rdev->reg_data);
- if (ret < 0)
- goto clean;
- }
-
- if (config->ena_gpiod) {
- ret = regulator_ena_gpio_request(rdev, config);
- if (ret != 0) {
- rdev_err(rdev, "Failed to request enable GPIO: %pe\n",
- ERR_PTR(ret));
- goto clean;
- }
- /* The regulator core took over the GPIO descriptor */
- dangling_cfg_gpiod = false;
- dangling_of_gpiod = false;
- }
+ if (init_data && init_data->supply_regulator)
+ rdev->supply_name = init_data->supply_regulator;
+ else if (regulator_desc->supply_name)
+ rdev->supply_name = regulator_desc->supply_name;
/* register with sysfs */
rdev->dev.class = &regulator_class;
@@ -5535,13 +5543,38 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto wash;
}
- if (init_data && init_data->supply_regulator)
- rdev->supply_name = init_data->supply_regulator;
- else if (regulator_desc->supply_name)
- rdev->supply_name = regulator_desc->supply_name;
+ if ((rdev->supply_name && !rdev->supply) &&
+ (rdev->constraints->always_on ||
+ rdev->constraints->boot_on)) {
+ ret = regulator_resolve_supply(rdev);
+ if (ret)
+ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
+ ERR_PTR(ret));
+
+ resolved_early = true;
+ }
+
+ /* perform any regulator specific init */
+ if (init_data && init_data->regulator_init) {
+ ret = init_data->regulator_init(rdev->reg_data);
+ if (ret < 0)
+ goto wash;
+ }
+
+ if (config->ena_gpiod) {
+ ret = regulator_ena_gpio_request(rdev, config);
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO: %pe\n",
+ ERR_PTR(ret));
+ goto wash;
+ }
+ /* The regulator core took over the GPIO descriptor */
+ dangling_cfg_gpiod = false;
+ dangling_of_gpiod = false;
+ }
ret = set_machine_constraints(rdev);
- if (ret == -EPROBE_DEFER) {
+ if (ret == -EPROBE_DEFER && !resolved_early) {
/* Regulator might be in bypass mode and so needs its supply
* to set the constraints
*/
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 76e0e23bf598..e4c753b83088 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -1164,7 +1164,7 @@ error:
return ret;
}
-static int da9121_i2c_remove(struct i2c_client *i2c)
+static void da9121_i2c_remove(struct i2c_client *i2c)
{
struct da9121 *chip = i2c_get_clientdata(i2c);
const int mask_all[4] = { 0xFF, 0xFF, 0xFF, 0xFF };
@@ -1176,7 +1176,6 @@ static int da9121_i2c_remove(struct i2c_client *i2c)
ret = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_MASK_0, mask_all, 4);
if (ret != 0)
dev_err(chip->dev, "Failed to set IRQ masks: %d\n", ret);
- return 0;
}
static const struct i2c_device_id da9121_i2c_id[] = {
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 32823a87fd40..3265e75e97ab 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -70,6 +70,65 @@ struct regulator *devm_regulator_get_exclusive(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
+static void regulator_action_disable(void *d)
+{
+ struct regulator *r = (struct regulator *)d;
+
+ regulator_disable(r);
+}
+
+static int _devm_regulator_get_enable(struct device *dev, const char *id,
+ int get_type)
+{
+ struct regulator *r;
+ int ret;
+
+ r = _devm_regulator_get(dev, id, get_type);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ret = regulator_enable(r);
+ if (!ret)
+ ret = devm_add_action_or_reset(dev, &regulator_action_disable, r);
+
+ if (ret)
+ devm_regulator_put(r);
+
+ return ret;
+}
+
+/**
+ * devm_regulator_get_enable_optional - Resource managed regulator get and enable
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
+ *
+ * Get and enable regulator for duration of the device life-time.
+ * regulator_disable() and regulator_put() are automatically called on driver
+ * detach. See regulator_get_optional() and regulator_enable() for more
+ * information.
+ */
+int devm_regulator_get_enable_optional(struct device *dev, const char *id)
+{
+ return _devm_regulator_get_enable(dev, id, OPTIONAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_enable_optional);
+
+/**
+ * devm_regulator_get_enable - Resource managed regulator get and enable
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
+ *
+ * Get and enable regulator for duration of the device life-time.
+ * regulator_disable() and regulator_put() are automatically called on driver
+ * detach. See regulator_get() and regulator_enable() for more
+ * information.
+ */
+int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return _devm_regulator_get_enable(dev, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_enable);
+
/**
* devm_regulator_get_optional - Resource managed regulator_get_optional()
* @dev: device to supply
@@ -194,6 +253,111 @@ int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_const);
+static int devm_regulator_bulk_match(struct device *dev, void *res,
+ void *data)
+{
+ struct regulator_bulk_devres *match = res;
+ struct regulator_bulk_data *target = data;
+
+ /*
+ * We check the put uses same consumer list as the get did.
+ * We _could_ scan all entries in consumer array and check the
+ * regulators match but ATM I don't see the need. We can change this
+ * later if needed.
+ */
+ return match->consumers == target;
+}
+
+/**
+ * devm_regulator_bulk_put - Resource managed regulator_bulk_put()
+ * @consumers: consumers to free
+ *
+ * Deallocate regulators allocated with devm_regulator_bulk_get(). Normally
+ * this function will not need to be called and the resource management
+ * code will ensure that the resource is freed.
+ */
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+ int rc;
+ struct regulator *regulator = consumers[0].consumer;
+
+ rc = devres_release(regulator->dev, devm_regulator_bulk_release,
+ devm_regulator_bulk_match, consumers);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_put);
+
+static void devm_regulator_bulk_disable(void *res)
+{
+ struct regulator_bulk_devres *devres = res;
+ int i;
+
+ for (i = 0; i < devres->num_consumers; i++)
+ regulator_disable(devres->consumers[i].consumer);
+}
+
+/**
+ * devm_regulator_bulk_get_enable - managed get'n enable multiple regulators
+ *
+ * @dev: device to supply
+ * @num_consumers: number of consumers to register
+ * @id: list of supply names or regulator IDs
+ *
+ * @return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to get several regulator
+ * consumers in one operation with management, the regulators will
+ * automatically be freed when the device is unbound. If any of the
+ * regulators cannot be acquired then any regulators that were
+ * allocated will be freed before returning to the caller.
+ */
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id)
+{
+ struct regulator_bulk_devres *devres;
+ struct regulator_bulk_data *consumers;
+ int i, ret;
+
+ devres = devm_kmalloc(dev, sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->consumers = devm_kcalloc(dev, num_consumers, sizeof(*consumers),
+ GFP_KERNEL);
+ consumers = devres->consumers;
+ if (!consumers)
+ return -ENOMEM;
+
+ devres->num_consumers = num_consumers;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].supply = id[i];
+
+ ret = devm_regulator_bulk_get(dev, num_consumers, consumers);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_consumers; i++) {
+ ret = regulator_enable(consumers[i].consumer);
+ if (ret)
+ goto unwind;
+ }
+
+ ret = devm_add_action(dev, devm_regulator_bulk_disable, devres);
+ if (!ret)
+ return 0;
+
+unwind:
+ while (--i >= 0)
+ regulator_disable(consumers[i].consumer);
+
+ devm_regulator_bulk_put(consumers);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_enable);
+
static void devm_rdev_release(struct device *dev, void *res)
{
regulator_unregister(*(struct regulator_dev **)res);
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 5927d4f3eabd..95e61a2f43f5 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -220,6 +220,9 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
regtype);
}
+ if (of_find_property(np, "vin-supply", NULL))
+ config->input_supply = "vin";
+
return config;
}
@@ -259,6 +262,18 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *),
GFP_KERNEL);
+
+ if (config->input_supply) {
+ drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
+ config->input_supply,
+ GFP_KERNEL);
+ if (!drvdata->desc.supply_name) {
+ dev_err(&pdev->dev,
+ "Failed to allocate input supply\n");
+ return -ENOMEM;
+ }
+ }
+
if (!drvdata->gpiods)
return -ENOMEM;
for (i = 0; i < config->ngpios; i++) {
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 321bec6e3f8d..31b43426d47c 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -422,15 +422,13 @@ err:
return ret;
}
-static int lp8755_remove(struct i2c_client *client)
+static void lp8755_remove(struct i2c_client *client)
{
int icnt;
struct lp8755_chip *pchip = i2c_get_clientdata(client);
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
regmap_write(pchip->regmap, icnt, 0x00);
-
- return 0;
}
static const struct i2c_device_id lp8755_id[] = {
diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c
index 03c6027682d8..39f803ff0a90 100644
--- a/drivers/regulator/max597x-regulator.c
+++ b/drivers/regulator/max597x-regulator.c
@@ -137,7 +137,7 @@ static int max597x_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity,
static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA,
int severity, bool enable)
{
- int ret, val, reg;
+ int val, reg;
unsigned int vthst, vthfst;
struct max597x_regulator *data = rdev_get_drvdata(rdev);
@@ -183,9 +183,8 @@ static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA,
val = 0xFF;
reg = MAX5970_REG_DAC_FAST(rdev_id);
- ret = regmap_write(rdev->regmap, reg, val);
- return ret;
+ return regmap_write(rdev->regmap, reg, val);
}
static int max597x_get_status(struct regulator_dev *rdev)
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index fdcb0f508984..596cc36aaff6 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -434,9 +434,9 @@ static int max8973_init_dcdc(struct max8973_chip *max,
return ret;
}
-static int max8973_thermal_read_temp(void *data, int *temp)
+static int max8973_thermal_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct max8973_chip *mchip = data;
+ struct max8973_chip *mchip = tz->devdata;
unsigned int val;
int ret;
@@ -465,7 +465,7 @@ static irqreturn_t max8973_thermal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct thermal_zone_of_device_ops max77621_tz_ops = {
+static const struct thermal_zone_device_ops max77621_tz_ops = {
.get_temp = max8973_thermal_read_temp,
};
@@ -479,8 +479,8 @@ static int max8973_thermal_init(struct max8973_chip *mchip)
if (mchip->id != MAX77621)
return 0;
- tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip,
- &max77621_tz_ops);
+ tzd = devm_thermal_of_zone_register(mchip->dev, 0, mchip,
+ &max77621_tz_ops);
if (IS_ERR(tzd)) {
ret = PTR_ERR(tzd);
dev_err(mchip->dev, "Failed to register thermal sensor: %d\n",
diff --git a/drivers/regulator/mt6331-regulator.c b/drivers/regulator/mt6331-regulator.c
new file mode 100644
index 000000000000..56be9a3a84ab
--- /dev/null
+++ b/drivers/regulator/mt6331-regulator.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2022 Collabora Ltd.
+// Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+//
+// Based on mt6323-regulator.c,
+// Copyright (c) 2016 MediaTek Inc.
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6331/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6331-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6331_LDO_MODE_NORMAL 0
+#define MT6331_LDO_MODE_LP 1
+
+/*
+ * MT6331 regulators information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ * @status_reg: Register for regulator enable status where qi unavailable
+ * @status_mask: Mask for querying regulator enable status
+ */
+struct mt6331_regulator_info {
+ struct regulator_desc desc;
+ u32 qi;
+ u32 vselon_reg;
+ u32 vselctrl_reg;
+ u32 vselctrl_mask;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 status_reg;
+ u32 status_mask;
+};
+
+#define MT6331_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
+ vosel, vosel_mask, voselon, vosel_ctrl) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6331_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(13), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+ .status_mask = 0, \
+}
+
+#define MT6331_LDO_AO(match, vreg, ldo_volt_table, vosel, vosel_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6331_volt_table_ao_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ }, \
+}
+
+#define MT6331_LDO_S(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask, \
+ _status_reg, _status_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6331_volt_table_no_qi_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+ .status_reg = _status_reg, \
+ .status_mask = _status_mask, \
+}
+
+#define MT6331_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = (_modeset_reg ? \
+ &mt6331_volt_table_ops : \
+ &mt6331_volt_table_no_ms_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .qi = BIT(15), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+}
+
+#define MT6331_REG_FIXED(match, vreg, enreg, enbit, qibit, volt, \
+ _modeset_reg, _modeset_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = (_modeset_reg ? \
+ &mt6331_volt_fixed_ops : \
+ &mt6331_volt_fixed_no_ms_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .qi = BIT(qibit), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+}
+
+static const struct linear_range buck_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const unsigned int ldo_volt_table1[] = {
+ 2800000, 3000000, 0, 3200000
+};
+
+static const unsigned int ldo_volt_table2[] = {
+ 1500000, 1800000, 2500000, 2800000,
+};
+
+static const unsigned int ldo_volt_table3[] = {
+ 1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
+};
+
+static const unsigned int ldo_volt_table4[] = {
+ 0, 0, 1700000, 1800000, 1860000, 2760000, 3000000, 3100000,
+};
+
+static const unsigned int ldo_volt_table5[] = {
+ 1800000, 3300000, 1800000, 3300000,
+};
+
+static const unsigned int ldo_volt_table6[] = {
+ 3000000, 3300000,
+};
+
+static const unsigned int ldo_volt_table7[] = {
+ 1200000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
+};
+
+static const unsigned int ldo_volt_table8[] = {
+ 900000, 1000000, 1100000, 1220000, 1300000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int ldo_volt_table9[] = {
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1300000,
+};
+
+static const unsigned int ldo_volt_table10[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const unsigned int ldo_volt_table11[] = {
+ 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
+};
+
+static int mt6331_get_status(struct regulator_dev *rdev)
+{
+ struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 regval;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6331_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ val = MT6331_LDO_MODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6331_LDO_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= ffs(info->modeset_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+}
+
+static unsigned int mt6331_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= info->modeset_mask;
+ val >>= ffs(info->modeset_mask) - 1;
+
+ return (val & BIT(0)) ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops mt6331_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+};
+
+static const struct regulator_ops mt6331_volt_table_no_ms_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+};
+
+static const struct regulator_ops mt6331_volt_table_no_qi_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = mt6331_ldo_set_mode,
+ .get_mode = mt6331_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6331_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+ .set_mode = mt6331_ldo_set_mode,
+ .get_mode = mt6331_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6331_volt_table_ao_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops mt6331_volt_fixed_no_ms_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+};
+
+static const struct regulator_ops mt6331_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+ .set_mode = mt6331_ldo_set_mode,
+ .get_mode = mt6331_ldo_get_mode,
+};
+
+/* The array is indexed by id(MT6331_ID_XXX) */
+static struct mt6331_regulator_info mt6331_regulators[] = {
+ MT6331_BUCK("buck-vdvfs11", VDVFS11, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS11_CON9,
+ MT6331_VDVFS11_CON11, GENMASK(6, 0),
+ MT6331_VDVFS11_CON12, MT6331_VDVFS11_CON7),
+ MT6331_BUCK("buck-vdvfs12", VDVFS12, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS12_CON9,
+ MT6331_VDVFS12_CON11, GENMASK(6, 0),
+ MT6331_VDVFS12_CON12, MT6331_VDVFS12_CON7),
+ MT6331_BUCK("buck-vdvfs13", VDVFS13, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS13_CON9,
+ MT6331_VDVFS13_CON11, GENMASK(6, 0),
+ MT6331_VDVFS13_CON12, MT6331_VDVFS13_CON7),
+ MT6331_BUCK("buck-vdvfs14", VDVFS14, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS14_CON9,
+ MT6331_VDVFS14_CON11, GENMASK(6, 0),
+ MT6331_VDVFS14_CON12, MT6331_VDVFS14_CON7),
+ MT6331_BUCK("buck-vcore2", VCORE2, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VCORE2_CON9,
+ MT6331_VCORE2_CON11, GENMASK(6, 0),
+ MT6331_VCORE2_CON12, MT6331_VCORE2_CON7),
+ MT6331_REG_FIXED("buck-vio18", VIO18, MT6331_VIO18_CON9, 0, 13, 1800000, 0, 0),
+ MT6331_REG_FIXED("ldo-vrtc", VRTC, MT6331_DIGLDO_CON11, 8, 15, 2800000, 0, 0),
+ MT6331_REG_FIXED("ldo-vtcxo1", VTCXO1, MT6331_ANALDO_CON1, 10, 15, 2800000,
+ MT6331_ANALDO_CON1, GENMASK(1, 0)),
+ MT6331_REG_FIXED("ldo-vtcxo2", VTCXO2, MT6331_ANALDO_CON2, 10, 15, 2800000,
+ MT6331_ANALDO_CON2, GENMASK(1, 0)),
+ MT6331_REG_FIXED("ldo-vsram", VSRAM_DVFS1, MT6331_SYSLDO_CON4, 10, 15, 1012500,
+ MT6331_SYSLDO_CON4, GENMASK(1, 0)),
+ MT6331_REG_FIXED("ldo-vio28", VIO28, MT6331_DIGLDO_CON1, 10, 15, 2800000,
+ MT6331_DIGLDO_CON1, GENMASK(1, 0)),
+ MT6331_LDO("ldo-avdd32aud", AVDD32_AUD, ldo_volt_table1, MT6331_ANALDO_CON3, 10,
+ MT6331_ANALDO_CON10, GENMASK(6, 5), MT6331_ANALDO_CON3, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vauxa32", VAUXA32, ldo_volt_table1, MT6331_ANALDO_CON4, 10,
+ MT6331_ANALDO_CON6, GENMASK(6, 5), MT6331_ANALDO_CON4, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vemc33", VEMC33, ldo_volt_table6, MT6331_DIGLDO_CON5, 10,
+ MT6331_DIGLDO_CON17, BIT(6), MT6331_DIGLDO_CON5, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vibr", VIBR, ldo_volt_table3, MT6331_DIGLDO_CON12, 10,
+ MT6331_DIGLDO_CON20, GENMASK(6, 4), MT6331_DIGLDO_CON12, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vmc", VMC, ldo_volt_table5, MT6331_DIGLDO_CON3, 10,
+ MT6331_DIGLDO_CON15, GENMASK(5, 4), MT6331_DIGLDO_CON3, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vmch", VMCH, ldo_volt_table6, MT6331_DIGLDO_CON4, 10,
+ MT6331_DIGLDO_CON16, BIT(6), MT6331_DIGLDO_CON4, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vmipi", VMIPI, ldo_volt_table3, MT6331_SYSLDO_CON5, 10,
+ MT6331_SYSLDO_CON13, GENMASK(5, 3), MT6331_SYSLDO_CON5, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vsim1", VSIM1, ldo_volt_table4, MT6331_DIGLDO_CON8, 10,
+ MT6331_DIGLDO_CON21, GENMASK(6, 4), MT6331_DIGLDO_CON8, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vsim2", VSIM2, ldo_volt_table4, MT6331_DIGLDO_CON9, 10,
+ MT6331_DIGLDO_CON22, GENMASK(6, 4), MT6331_DIGLDO_CON9, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vusb10", VUSB10, ldo_volt_table9, MT6331_SYSLDO_CON2, 10,
+ MT6331_SYSLDO_CON10, GENMASK(5, 3), MT6331_SYSLDO_CON2, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vcama", VCAMA, ldo_volt_table2, MT6331_ANALDO_CON5, 15,
+ MT6331_ANALDO_CON9, GENMASK(5, 4), 0, 0),
+ MT6331_LDO_S("ldo-vcamaf", VCAM_AF, ldo_volt_table3, MT6331_DIGLDO_CON2, 10,
+ MT6331_DIGLDO_CON14, GENMASK(6, 4), MT6331_DIGLDO_CON2, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(0)),
+ MT6331_LDO_S("ldo-vcamd", VCAMD, ldo_volt_table8, MT6331_SYSLDO_CON1, 15,
+ MT6331_SYSLDO_CON9, GENMASK(6, 4), MT6331_SYSLDO_CON1, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(11)),
+ MT6331_LDO_S("ldo-vcamio", VCAM_IO, ldo_volt_table10, MT6331_SYSLDO_CON3, 10,
+ MT6331_SYSLDO_CON11, GENMASK(4, 3), MT6331_SYSLDO_CON3, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(13)),
+ MT6331_LDO_S("ldo-vgp1", VGP1, ldo_volt_table3, MT6331_DIGLDO_CON6, 10,
+ MT6331_DIGLDO_CON19, GENMASK(6, 4), MT6331_DIGLDO_CON6, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(4)),
+ MT6331_LDO_S("ldo-vgp2", VGP2, ldo_volt_table10, MT6331_SYSLDO_CON6, 10,
+ MT6331_SYSLDO_CON14, GENMASK(4, 3), MT6331_SYSLDO_CON6, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(15)),
+ MT6331_LDO_S("ldo-vgp3", VGP3, ldo_volt_table10, MT6331_SYSLDO_CON7, 10,
+ MT6331_SYSLDO_CON15, GENMASK(4, 3), MT6331_SYSLDO_CON7, GENMASK(1, 0),
+ MT6331_EN_STATUS2, BIT(0)),
+ MT6331_LDO_S("ldo-vgp4", VGP4, ldo_volt_table7, MT6331_DIGLDO_CON7, 10,
+ MT6331_DIGLDO_CON18, GENMASK(6, 4), MT6331_DIGLDO_CON7, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(5)),
+ MT6331_LDO_AO("ldo-vdig18", VDIG18, ldo_volt_table11,
+ MT6331_DIGLDO_CON28, GENMASK(14, 12)),
+};
+
+static int mt6331_set_buck_vosel_reg(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6331 = dev_get_drvdata(pdev->dev.parent);
+ int i;
+ u32 regval;
+
+ for (i = 0; i < MT6331_ID_VREG_MAX; i++) {
+ if (mt6331_regulators[i].vselctrl_reg) {
+ if (regmap_read(mt6331->regmap,
+ mt6331_regulators[i].vselctrl_reg,
+ &regval) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to read buck ctrl\n");
+ return -EIO;
+ }
+
+ if (regval & mt6331_regulators[i].vselctrl_mask) {
+ mt6331_regulators[i].desc.vsel_reg =
+ mt6331_regulators[i].vselon_reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt6331_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6331 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+ u32 reg_value;
+
+ /* Query buck controller to select activated voltage register part */
+ if (mt6331_set_buck_vosel_reg(pdev))
+ return -EIO;
+
+ /* Read PMIC chip revision to update constraints and voltage table */
+ if (regmap_read(mt6331->regmap, MT6331_HWCID, &reg_value) < 0) {
+ dev_err(&pdev->dev, "Failed to read Chip ID\n");
+ return -EIO;
+ }
+ reg_value &= GENMASK(7, 0);
+
+ dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+ /*
+ * ChipID 0x10 is "MT6331 E1", has a different voltage table and
+ * it's currently not supported in this driver. Upon detection of
+ * this ID, refuse to register the regulators, as we will wrongly
+ * interpret the VSEL for this revision, potentially overvolting
+ * some device.
+ */
+ if (reg_value == 0x10) {
+ dev_err(&pdev->dev, "Chip version not supported. Bailing out.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MT6331_ID_VREG_MAX; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6331_regulators[i];
+ config.regmap = mt6331->regmap;
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6331_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6331_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct platform_device_id mt6331_platform_ids[] = {
+ {"mt6331-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6331_platform_ids);
+
+static struct platform_driver mt6331_regulator_driver = {
+ .driver = {
+ .name = "mt6331-regulator",
+ },
+ .probe = mt6331_regulator_probe,
+ .id_table = mt6331_platform_ids,
+};
+
+module_platform_driver(mt6331_regulator_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6331 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mt6332-regulator.c b/drivers/regulator/mt6332-regulator.c
new file mode 100644
index 000000000000..77a27d8127a3
--- /dev/null
+++ b/drivers/regulator/mt6332-regulator.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2022 Collabora Ltd.
+// Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+//
+// Based on mt6323-regulator.c,
+// Copyright (c) 2016 MediaTek Inc.
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6332/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6332-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6332_LDO_MODE_NORMAL 0
+#define MT6332_LDO_MODE_LP 1
+
+/*
+ * MT6332 regulators information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ * @status_reg: Register for regulator enable status where qi unavailable
+ * @status_mask: Mask for querying regulator enable status
+ */
+struct mt6332_regulator_info {
+ struct regulator_desc desc;
+ u32 qi;
+ u32 vselon_reg;
+ u32 vselctrl_reg;
+ u32 vselctrl_mask;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 status_reg;
+ u32 status_mask;
+};
+
+#define MT6332_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
+ vosel, vosel_mask, voselon, vosel_ctrl) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_buck_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(13), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+ .status_mask = 0, \
+}
+
+#define MT6332_LDO_LINEAR(match, vreg, min, max, step, volt_ranges, \
+ enreg, vosel, vosel_mask, voselon, \
+ vosel_ctrl, _modeset_reg, _modeset_mask) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_ldo_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(15), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+ .status_mask = 0, \
+}
+
+#define MT6332_LDO_AO(match, vreg, ldo_volt_table, vosel, vosel_mask) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_volt_table_ao_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ }, \
+}
+
+#define MT6332_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .qi = BIT(15), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+ .status_mask = 0, \
+}
+
+#define MT6332_REG_FIXED(match, vreg, enreg, enbit, qibit, volt, stbit) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .qi = BIT(qibit), \
+ .status_reg = MT6332_EN_STATUS0, \
+ .status_mask = BIT(stbit), \
+}
+
+static const struct linear_range boost_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(3500000, 0, 0x7f, 31250),
+};
+
+static const struct linear_range buck_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct linear_range buck_pa_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+};
+
+static const struct linear_range buck_rf_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(1050000, 0, 0x7f, 9375),
+};
+
+static const unsigned int ldo_volt_table1[] = {
+ 2800000, 3000000, 0, 3200000
+};
+
+static const unsigned int ldo_volt_table2[] = {
+ 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
+};
+
+static int mt6332_get_status(struct regulator_dev *rdev)
+{
+ struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 reg, en_mask, regval;
+ int ret;
+
+ if (info->qi > 0) {
+ reg = info->desc.enable_reg;
+ en_mask = info->qi;
+ } else {
+ reg = info->status_reg;
+ en_mask = info->status_mask;
+ }
+
+ ret = regmap_read(rdev->regmap, reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & en_mask) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6332_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ val = MT6332_LDO_MODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6332_LDO_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= ffs(info->modeset_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+}
+
+static unsigned int mt6332_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= info->modeset_mask;
+ val >>= ffs(info->modeset_mask) - 1;
+
+ return (val & BIT(0)) ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops mt6332_buck_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+};
+
+static const struct regulator_ops mt6332_ldo_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+ .set_mode = mt6332_ldo_set_mode,
+ .get_mode = mt6332_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6332_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+ .set_mode = mt6332_ldo_set_mode,
+ .get_mode = mt6332_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6332_volt_table_ao_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops mt6332_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+};
+
+/* The array is indexed by id(MT6332_ID_XXX) */
+static struct mt6332_regulator_info mt6332_regulators[] = {
+ MT6332_BUCK("buck-vdram", VDRAM, 700000, 1493750, 6250, buck_volt_range,
+ MT6332_EN_STATUS0, MT6332_VDRAM_CON11, GENMASK(6, 0),
+ MT6332_VDRAM_CON12, MT6332_VDRAM_CON7),
+ MT6332_BUCK("buck-vdvfs2", VDVFS2, 700000, 1312500, 6250, buck_volt_range,
+ MT6332_VDVFS2_CON9, MT6332_VDVFS2_CON11, GENMASK(6, 0),
+ MT6332_VDVFS2_CON12, MT6332_VDVFS2_CON7),
+ MT6332_BUCK("buck-vpa", VPA, 500000, 3400000, 50000, buck_pa_volt_range,
+ MT6332_VPA_CON9, MT6332_VPA_CON11, GENMASK(5, 0),
+ MT6332_VPA_CON12, MT6332_VPA_CON7),
+ MT6332_BUCK("buck-vrf18a", VRF1, 1050000, 2240625, 9375, buck_rf_volt_range,
+ MT6332_VRF1_CON9, MT6332_VRF1_CON11, GENMASK(6, 0),
+ MT6332_VRF1_CON12, MT6332_VRF1_CON7),
+ MT6332_BUCK("buck-vrf18b", VRF2, 1050000, 2240625, 9375, buck_rf_volt_range,
+ MT6332_VRF2_CON9, MT6332_VRF2_CON11, GENMASK(6, 0),
+ MT6332_VRF2_CON12, MT6332_VRF2_CON7),
+ MT6332_BUCK("buck-vsbst", VSBST, 3500000, 7468750, 31250, boost_volt_range,
+ MT6332_VSBST_CON8, MT6332_VSBST_CON12, GENMASK(6, 0),
+ MT6332_VSBST_CON13, MT6332_VSBST_CON8),
+ MT6332_LDO("ldo-vauxb32", VAUXB32, ldo_volt_table1, MT6332_LDO_CON1, 10,
+ MT6332_LDO_CON9, GENMASK(6, 5), MT6332_LDO_CON1, GENMASK(1, 0)),
+ MT6332_REG_FIXED("ldo-vbif28", VBIF28, MT6332_LDO_CON2, 10, 0, 2800000, 1),
+ MT6332_REG_FIXED("ldo-vusb33", VUSB33, MT6332_LDO_CON3, 10, 0, 3300000, 2),
+ MT6332_LDO_LINEAR("ldo-vsram", VSRAM_DVFS2, 700000, 1493750, 6250, buck_volt_range,
+ MT6332_EN_STATUS0, MT6332_LDO_CON8, GENMASK(15, 9),
+ MT6332_VDVFS2_CON23, MT6332_VDVFS2_CON22,
+ MT6332_LDO_CON5, GENMASK(1, 0)),
+ MT6332_LDO_AO("ldo-vdig18", VDIG18, ldo_volt_table2, MT6332_LDO_CON12, GENMASK(11, 9)),
+};
+
+static int mt6332_set_buck_vosel_reg(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6332 = dev_get_drvdata(pdev->dev.parent);
+ int i;
+ u32 regval;
+
+ for (i = 0; i < MT6332_ID_VREG_MAX; i++) {
+ if (mt6332_regulators[i].vselctrl_reg) {
+ if (regmap_read(mt6332->regmap,
+ mt6332_regulators[i].vselctrl_reg,
+ &regval) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to read buck ctrl\n");
+ return -EIO;
+ }
+
+ if (regval & mt6332_regulators[i].vselctrl_mask) {
+ mt6332_regulators[i].desc.vsel_reg =
+ mt6332_regulators[i].vselon_reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt6332_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6332 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+ u32 reg_value;
+
+ /* Query buck controller to select activated voltage register part */
+ if (mt6332_set_buck_vosel_reg(pdev))
+ return -EIO;
+
+ /* Read PMIC chip revision to update constraints and voltage table */
+ if (regmap_read(mt6332->regmap, MT6332_HWCID, &reg_value) < 0) {
+ dev_err(&pdev->dev, "Failed to read Chip ID\n");
+ return -EIO;
+ }
+ reg_value &= GENMASK(7, 0);
+
+ dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+ /*
+ * ChipID 0x10 is "MT6332 E1", has a different voltage table and
+ * it's currently not supported in this driver. Upon detection of
+ * this ID, refuse to register the regulators, as we will wrongly
+ * interpret the VSEL for this revision, potentially overvolting
+ * some device.
+ */
+ if (reg_value == 0x10) {
+ dev_err(&pdev->dev, "Chip version not supported. Bailing out.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MT6332_ID_VREG_MAX; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6332_regulators[i];
+ config.regmap = mt6332->regmap;
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6332_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6332_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct platform_device_id mt6332_platform_ids[] = {
+ {"mt6332-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6332_platform_ids);
+
+static struct platform_driver mt6332_regulator_driver = {
+ .driver = {
+ .name = "mt6332-regulator",
+ },
+ .probe = mt6332_regulator_probe,
+ .id_table = mt6332_platform_ids,
+};
+
+module_platform_driver(mt6332_regulator_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6332 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index e12b681c72e5..0aff1c2886b5 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -676,7 +676,7 @@ clean:
}
/**
- * of_parse_coupled regulator - Get regulator_dev pointer from rdev's property
+ * of_parse_coupled_regulator() - Get regulator_dev pointer from rdev's property
* @rdev: Pointer to regulator_dev, whose DTS is used as a source to parse
* "regulator-coupled-with" property
* @index: Index in phandles array
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 6b617024a67d..d899d6e98fb8 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -766,7 +766,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 561de6b2e6e3..4158ff126a67 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -306,9 +306,10 @@ static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
}
/**
- * rpmh_regulator_vrm_set_load() - set the regulator mode based upon the load
- * current requested
+ * rpmh_regulator_vrm_get_optimum_mode() - get the mode based on the load
* @rdev: Regulator device pointer for the rpmh-regulator
+ * @input_uV: Input voltage
+ * @output_uV: Output voltage
* @load_uA: Aggregated load current in microamps
*
* This function is used in the regulator_ops for VRM type RPMh regulator
@@ -316,17 +317,15 @@ static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
*
* Return: 0 on success, errno on failure
*/
-static int rpmh_regulator_vrm_set_load(struct regulator_dev *rdev, int load_uA)
+static unsigned int rpmh_regulator_vrm_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA)
{
struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
- unsigned int mode;
if (load_uA >= vreg->hw_data->hpm_min_load_uA)
- mode = REGULATOR_MODE_NORMAL;
+ return REGULATOR_MODE_NORMAL;
else
- mode = REGULATOR_MODE_IDLE;
-
- return rpmh_regulator_vrm_set_mode(rdev, mode);
+ return REGULATOR_MODE_IDLE;
}
static int rpmh_regulator_vrm_set_bypass(struct regulator_dev *rdev,
@@ -375,7 +374,7 @@ static const struct regulator_ops rpmh_regulator_vrm_drms_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_mode = rpmh_regulator_vrm_set_mode,
.get_mode = rpmh_regulator_vrm_get_mode,
- .set_load = rpmh_regulator_vrm_set_load,
+ .get_optimum_mode = rpmh_regulator_vrm_get_optimum_mode,
};
static const struct regulator_ops rpmh_regulator_vrm_bypass_ops = {
@@ -1199,6 +1198,52 @@ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pm660_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic4_hfsmps3, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic4_hfsmps3, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic4_hfsmps3, "vdd-s6"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic4_nldo, "vdd-l1-l6-l7"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic4_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic4_nldo, "vdd-l2-l3"),
+ /* ldo4 is inaccessible on PM660 */
+ RPMH_VREG("ldo5", "ldo%s5", &pmic4_nldo, "vdd-l5"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic4_nldo, "vdd-l1-l6-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic4_nldo, "vdd-l1-l6-l7"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ {}
+};
+
+static const struct rpmh_vreg_init_data pm660l_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic4_ftsmps426, "vdd-s5"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic4_nldo, "vdd-l1-l9-l10"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic4_pldo, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic4_pldo, "vdd-l4-l6"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic4_pldo, "vdd-l4-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
+ {}
+};
+
static int rpmh_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1321,6 +1366,14 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.compatible = "qcom,pmr735a-rpmh-regulators",
.data = pmr735a_vreg_data,
},
+ {
+ .compatible = "qcom,pm660-rpmh-regulators",
+ .data = pm660_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm660l-rpmh-regulators",
+ .data = pm660l_vreg_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 7f9d66ac37ff..3c41b71a1f52 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -802,6 +802,12 @@ static const struct rpm_regulator_data rpm_pm8018_regulators[] = {
};
static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
+ { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l2", QCOM_RPM_PM8058_LDO2, &pm8058_pldo, "vdd_l2_l11_l12" },
@@ -829,12 +835,6 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
{ "l24", QCOM_RPM_PM8058_LDO24, &pm8058_nldo, "vdd_l23_l24_l25" },
{ "l25", QCOM_RPM_PM8058_LDO25, &pm8058_nldo, "vdd_l23_l24_l25" },
- { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8058_LVS0, &pm8058_switch, "vdd_l0_l1_lvs" },
{ "lvs1", QCOM_RPM_PM8058_LVS1, &pm8058_switch, "vdd_l0_l1_lvs" },
@@ -843,6 +843,12 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
};
static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
+ { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8901_LDO0, &pm8901_nldo, "vdd_l0" },
{ "l1", QCOM_RPM_PM8901_LDO1, &pm8901_pldo, "vdd_l1" },
{ "l2", QCOM_RPM_PM8901_LDO2, &pm8901_pldo, "vdd_l2" },
@@ -851,12 +857,6 @@ static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
{ "l5", QCOM_RPM_PM8901_LDO5, &pm8901_pldo, "vdd_l5" },
{ "l6", QCOM_RPM_PM8901_LDO6, &pm8901_pldo, "vdd_l6" },
- { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8901_LVS0, &pm8901_switch, "lvs0_in" },
{ "lvs1", QCOM_RPM_PM8901_LVS1, &pm8901_switch, "lvs1_in" },
{ "lvs2", QCOM_RPM_PM8901_LVS2, &pm8901_switch, "lvs2_in" },
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 59024c639141..f98168d58dce 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -668,6 +668,15 @@ static const struct regulator_desc pm660l_bob = {
.ops = &rpm_bob_ops,
};
+static const struct regulator_desc pm6125_ftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 268, 4000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 269,
+ .ops = &rpm_smps_ldo_ops,
+};
+
static const struct regulator_desc pms405_hfsmps3 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
@@ -772,6 +781,158 @@ static const struct rpm_regulator_data rpm_mp5496_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm2250_lvftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm2250_lvftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm2250_lvftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm2250_ftsmps, "vdd_s4" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ {}
+};
+
+static const struct rpm_regulator_data rpm_pm6125_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm6125_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm6125_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm6125_ftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm6125_ftsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8998_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8998_hfsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8998_hfsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPA, 8, &pm6125_ftsmps, "vdd_s8" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l2_l3_l4" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3_l4" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_nldo660, "vdd_l2_l3_l4" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l6_l8" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l6_l8" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l9_l11" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l9_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l12_l16" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l12_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm660_pldo660, "vdd_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm660_pldo660, "vdd_l23_l24" },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm660_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm660_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm660_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm660_ftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm660_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm660_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm660_hfsmps, "vdd_s6" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l6_l7" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_ht_nldo, "vdd_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3" },
+ /* l4 is unaccessible on PM660 */
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_ht_nldo, "vdd_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_ht_nldo, "vdd_l1_l6_l7" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_ht_nldo, "vdd_l1_l6_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm660l_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pm660_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pm660_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_RWCX, 0, &pm660_ftsmps, "vdd_s3_s4" },
+ { "s5", QCOM_SMD_RPM_RWMX, 0, &pm660_ftsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOB, 1, &pm660_nldo660, "vdd_l1_l9_l10" },
+ { "l2", QCOM_SMD_RPM_LDOB, 2, &pm660_pldo660, "vdd_l2" },
+ { "l3", QCOM_SMD_RPM_LDOB, 3, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l4", QCOM_SMD_RPM_LDOB, 4, &pm660_pldo660, "vdd_l4_l6" },
+ { "l5", QCOM_SMD_RPM_LDOB, 5, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l6", QCOM_SMD_RPM_LDOB, 6, &pm660_pldo660, "vdd_l4_l6" },
+ { "l7", QCOM_SMD_RPM_LDOB, 7, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l8", QCOM_SMD_RPM_LDOB, 8, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l9", QCOM_SMD_RPM_RWLC, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
+ { "l10", QCOM_SMD_RPM_RWLM, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
+ { "bob", QCOM_SMD_RPM_BOBB, 1, &pm660l_bob, "vdd_bob", },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
@@ -833,44 +994,6 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
- { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
- { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
- { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
- {}
-};
-
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
@@ -912,57 +1035,6 @@ static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pma8084_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pma8084_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pma8084_hfsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pma8084_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pma8084_hfsmps, "vdd_s5" },
- { "s6", QCOM_SMD_RPM_SMPA, 6, &pma8084_ftsmps, "vdd_s6" },
- { "s7", QCOM_SMD_RPM_SMPA, 7, &pma8084_ftsmps, "vdd_s7" },
- { "s8", QCOM_SMD_RPM_SMPA, 8, &pma8084_ftsmps, "vdd_s8" },
- { "s9", QCOM_SMD_RPM_SMPA, 9, &pma8084_ftsmps, "vdd_s9" },
- { "s10", QCOM_SMD_RPM_SMPA, 10, &pma8084_ftsmps, "vdd_s10" },
- { "s11", QCOM_SMD_RPM_SMPA, 11, &pma8084_ftsmps, "vdd_s11" },
- { "s12", QCOM_SMD_RPM_SMPA, 12, &pma8084_ftsmps, "vdd_s12" },
-
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pma8084_nldo, "vdd_l1_l11" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pma8084_pldo, "vdd_l5_l7" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pma8084_pldo, "vdd_l5_l7" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pma8084_pldo, "vdd_l8" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pma8084_nldo, "vdd_l1_l11" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pma8084_pldo, "vdd_l16_l25" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pma8084_pldo, "vdd_l17" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pma8084_pldo, "vdd_l18" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pma8084_pldo, "vdd_l19" },
- { "l20", QCOM_SMD_RPM_LDOA, 20, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l21", QCOM_SMD_RPM_LDOA, 21, &pma8084_pldo, "vdd_l21" },
- { "l22", QCOM_SMD_RPM_LDOA, 22, &pma8084_pldo, "vdd_l22" },
- { "l23", QCOM_SMD_RPM_LDOA, 23, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l24", QCOM_SMD_RPM_LDOA, 24, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l25", QCOM_SMD_RPM_LDOA, 25, &pma8084_pldo, "vdd_l16_l25" },
- { "l26", QCOM_SMD_RPM_LDOA, 26, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l27", QCOM_SMD_RPM_LDOA, 27, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
-
- { "lvs1", QCOM_SMD_RPM_VSA, 1, &pma8084_switch },
- { "lvs2", QCOM_SMD_RPM_VSA, 2, &pma8084_switch },
- { "lvs3", QCOM_SMD_RPM_VSA, 3, &pma8084_switch },
- { "lvs4", QCOM_SMD_RPM_VSA, 4, &pma8084_switch },
- { "5vs1", QCOM_SMD_RPM_VSA, 5, &pma8084_switch },
-
- {}
-};
-
static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8950_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
@@ -1082,14 +1154,6 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
- { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
- {}
-};
-
static const struct rpm_regulator_data rpm_pm8998_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_ftsmps, "vdd_s2" },
@@ -1137,57 +1201,68 @@ static const struct rpm_regulator_data rpm_pm8998_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
- { "bob", QCOM_SMD_RPM_BOBB, 1, &pmi8998_bob, "vdd_bob" },
+static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pma8084_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pma8084_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pma8084_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pma8084_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pma8084_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pma8084_ftsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pma8084_ftsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPA, 8, &pma8084_ftsmps, "vdd_s8" },
+ { "s9", QCOM_SMD_RPM_SMPA, 9, &pma8084_ftsmps, "vdd_s9" },
+ { "s10", QCOM_SMD_RPM_SMPA, 10, &pma8084_ftsmps, "vdd_s10" },
+ { "s11", QCOM_SMD_RPM_SMPA, 11, &pma8084_ftsmps, "vdd_s11" },
+ { "s12", QCOM_SMD_RPM_SMPA, 12, &pma8084_ftsmps, "vdd_s12" },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pma8084_nldo, "vdd_l1_l11" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pma8084_pldo, "vdd_l5_l7" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pma8084_pldo, "vdd_l5_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pma8084_pldo, "vdd_l8" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pma8084_nldo, "vdd_l1_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pma8084_pldo, "vdd_l16_l25" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pma8084_pldo, "vdd_l17" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pma8084_pldo, "vdd_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pma8084_pldo, "vdd_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pma8084_pldo, "vdd_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pma8084_pldo, "vdd_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pma8084_pldo, "vdd_l16_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pma8084_switch },
+ { "lvs2", QCOM_SMD_RPM_VSA, 2, &pma8084_switch },
+ { "lvs3", QCOM_SMD_RPM_VSA, 3, &pma8084_switch },
+ { "lvs4", QCOM_SMD_RPM_VSA, 4, &pma8084_switch },
+ { "5vs1", QCOM_SMD_RPM_VSA, 5, &pma8084_switch },
+
{}
};
-static const struct rpm_regulator_data rpm_pm660_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pm660_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pm660_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pm660_ftsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pm660_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pm660_hfsmps, "vdd_s5" },
- { "s6", QCOM_SMD_RPM_SMPA, 6, &pm660_hfsmps, "vdd_s6" },
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l6_l7" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_ht_nldo, "vdd_l2_l3" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3" },
- /* l4 is unaccessible on PM660 */
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_ht_nldo, "vdd_l5" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_ht_nldo, "vdd_l1_l6_l7" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_ht_nldo, "vdd_l1_l6_l7" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { }
+static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
+ { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
+ {}
};
-static const struct rpm_regulator_data rpm_pm660l_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPB, 1, &pm660_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPB, 2, &pm660_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_RWCX, 0, &pm660_ftsmps, "vdd_s3_s4" },
- { "s5", QCOM_SMD_RPM_RWMX, 0, &pm660_ftsmps, "vdd_s5" },
- { "l1", QCOM_SMD_RPM_LDOB, 1, &pm660_nldo660, "vdd_l1_l9_l10" },
- { "l2", QCOM_SMD_RPM_LDOB, 2, &pm660_pldo660, "vdd_l2" },
- { "l3", QCOM_SMD_RPM_LDOB, 3, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l4", QCOM_SMD_RPM_LDOB, 4, &pm660_pldo660, "vdd_l4_l6" },
- { "l5", QCOM_SMD_RPM_LDOB, 5, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l6", QCOM_SMD_RPM_LDOB, 6, &pm660_pldo660, "vdd_l4_l6" },
- { "l7", QCOM_SMD_RPM_LDOB, 7, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l8", QCOM_SMD_RPM_LDOB, 8, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l9", QCOM_SMD_RPM_RWLC, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
- { "l10", QCOM_SMD_RPM_RWLM, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
- { "bob", QCOM_SMD_RPM_BOBB, 1, &pm660l_bob, "vdd_bob", },
- { }
+static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
+ { "bob", QCOM_SMD_RPM_BOBB, 1, &pmi8998_bob, "vdd_bob" },
+ {}
};
static const struct rpm_regulator_data rpm_pms405_regulators[] = {
@@ -1212,54 +1287,25 @@ static const struct rpm_regulator_data rpm_pms405_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pm2250_lvftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pm2250_lvftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pm2250_lvftsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pm2250_ftsmps, "vdd_s4" },
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- {}
-};
-
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
+ { .compatible = "qcom,rpm-pm2250-regulators", .data = &rpm_pm2250_regulators },
+ { .compatible = "qcom,rpm-pm6125-regulators", .data = &rpm_pm6125_regulators },
+ { .compatible = "qcom,rpm-pm660-regulators", .data = &rpm_pm660_regulators },
+ { .compatible = "qcom,rpm-pm660l-regulators", .data = &rpm_pm660l_regulators },
+ { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
- { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
- { .compatible = "qcom,rpm-pm660-regulators", .data = &rpm_pm660_regulators },
- { .compatible = "qcom,rpm-pm660l-regulators", .data = &rpm_pm660l_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{ .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
{ .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators },
- { .compatible = "qcom,rpm-pm2250-regulators", .data = &rpm_pm2250_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index a2d0292a92fd..3e312729741e 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -99,6 +99,9 @@ enum spmi_regulator_logical_type {
SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO,
SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS426,
SPMI_REGULATOR_LOGICAL_TYPE_HFS430,
+ SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3,
+ SPMI_REGULATOR_LOGICAL_TYPE_LDO_510,
+ SPMI_REGULATOR_LOGICAL_TYPE_HFSMPS,
};
enum spmi_regulator_type {
@@ -166,6 +169,17 @@ enum spmi_regulator_subtype {
SPMI_REGULATOR_SUBTYPE_HFS430 = 0x0a,
SPMI_REGULATOR_SUBTYPE_HT_P150 = 0x35,
SPMI_REGULATOR_SUBTYPE_HT_P600 = 0x3d,
+ SPMI_REGULATOR_SUBTYPE_HFSMPS_510 = 0x0a,
+ SPMI_REGULATOR_SUBTYPE_FTSMPS_510 = 0x0b,
+ SPMI_REGULATOR_SUBTYPE_LV_P150_510 = 0x71,
+ SPMI_REGULATOR_SUBTYPE_LV_P300_510 = 0x72,
+ SPMI_REGULATOR_SUBTYPE_LV_P600_510 = 0x73,
+ SPMI_REGULATOR_SUBTYPE_N300_510 = 0x6a,
+ SPMI_REGULATOR_SUBTYPE_N600_510 = 0x6b,
+ SPMI_REGULATOR_SUBTYPE_N1200_510 = 0x6c,
+ SPMI_REGULATOR_SUBTYPE_MV_P50_510 = 0x7a,
+ SPMI_REGULATOR_SUBTYPE_MV_P150_510 = 0x7b,
+ SPMI_REGULATOR_SUBTYPE_MV_P600_510 = 0x7d,
};
enum spmi_common_regulator_registers {
@@ -193,6 +207,14 @@ enum spmi_ftsmps426_regulator_registers {
SPMI_FTSMPS426_REG_VOLTAGE_ULS_MSB = 0x69,
};
+/*
+ * Third common register layout
+ */
+enum spmi_hfsmps_regulator_registers {
+ SPMI_HFSMPS_REG_STEP_CTRL = 0x3c,
+ SPMI_HFSMPS_REG_PULL_DOWN = 0xa0,
+};
+
enum spmi_vs_registers {
SPMI_VS_REG_OCP = 0x4a,
SPMI_VS_REG_SOFT_START = 0x4c,
@@ -260,6 +282,15 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS426_MODE_MASK 0x07
+/* Third common regulator mode register values */
+#define SPMI_HFSMPS_MODE_BYPASS_MASK 2
+#define SPMI_HFSMPS_MODE_RETENTION_MASK 3
+#define SPMI_HFSMPS_MODE_LPM_MASK 4
+#define SPMI_HFSMPS_MODE_AUTO_MASK 6
+#define SPMI_HFSMPS_MODE_HPM_MASK 7
+
+#define SPMI_HFSMPS_MODE_MASK 0x07
+
/* Common regulator pull down control register layout */
#define SPMI_COMMON_PULL_DOWN_ENABLE_MASK 0x80
@@ -305,6 +336,9 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
+/* slew_rate has units of uV/us. */
+#define SPMI_HFSMPS_SLEW_RATE_38p4 38400
+
#define SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK 0x03
#define SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT 0
@@ -554,6 +588,14 @@ static struct spmi_voltage_range ht_p600_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1704000, 1704000, 1896000, 1896000, 8000),
};
+static struct spmi_voltage_range nldo_510_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
+};
+
+static struct spmi_voltage_range ftsmps510_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 300000, 300000, 1372000, 1372000, 4000),
+};
+
static DEFINE_SPMI_SET_POINTS(pldo);
static DEFINE_SPMI_SET_POINTS(nldo1);
static DEFINE_SPMI_SET_POINTS(nldo2);
@@ -576,6 +618,8 @@ static DEFINE_SPMI_SET_POINTS(ht_nldo);
static DEFINE_SPMI_SET_POINTS(hfs430);
static DEFINE_SPMI_SET_POINTS(ht_p150);
static DEFINE_SPMI_SET_POINTS(ht_p600);
+static DEFINE_SPMI_SET_POINTS(nldo_510);
+static DEFINE_SPMI_SET_POINTS(ftsmps510);
static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf,
int len)
@@ -1062,6 +1106,23 @@ static unsigned int spmi_regulator_ftsmps426_get_mode(struct regulator_dev *rdev
}
}
+static unsigned int spmi_regulator_hfsmps_get_mode(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 reg;
+
+ spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, &reg, 1);
+
+ switch (reg) {
+ case SPMI_HFSMPS_MODE_HPM_MASK:
+ return REGULATOR_MODE_NORMAL;
+ case SPMI_HFSMPS_MODE_AUTO_MASK:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_IDLE;
+ }
+}
+
static int
spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
@@ -1109,6 +1170,33 @@ spmi_regulator_ftsmps426_set_mode(struct regulator_dev *rdev, unsigned int mode)
}
static int
+spmi_regulator_hfsmps_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 mask = SPMI_HFSMPS_MODE_MASK;
+ u8 val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = SPMI_HFSMPS_MODE_HPM_MASK;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = SPMI_HFSMPS_MODE_AUTO_MASK;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = vreg->logical_type ==
+ SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3 ?
+ SPMI_HFSMPS_MODE_RETENTION_MASK :
+ SPMI_HFSMPS_MODE_LPM_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
+}
+
+static int
spmi_regulator_common_set_load(struct regulator_dev *rdev, int load_uA)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -1131,6 +1219,15 @@ static int spmi_regulator_common_set_pull_down(struct regulator_dev *rdev)
mask, mask);
}
+static int spmi_regulator_hfsmps_set_pull_down(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ unsigned int mask = SPMI_COMMON_PULL_DOWN_ENABLE_MASK;
+
+ return spmi_vreg_update_bits(vreg, SPMI_HFSMPS_REG_PULL_DOWN,
+ mask, mask);
+}
+
static int spmi_regulator_common_set_soft_start(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -1465,6 +1562,21 @@ static const struct regulator_ops spmi_hfs430_ops = {
.get_mode = spmi_regulator_ftsmps426_get_mode,
};
+static const struct regulator_ops spmi_hfsmps_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
+ .list_voltage = spmi_regulator_common_list_voltage,
+ .set_mode = spmi_regulator_hfsmps_set_mode,
+ .get_mode = spmi_regulator_hfsmps_get_mode,
+ .set_load = spmi_regulator_common_set_load,
+ .set_pull_down = spmi_regulator_hfsmps_set_pull_down,
+};
+
/* Maximum possible digital major revision value */
#define INF 0xFF
@@ -1473,7 +1585,8 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
SPMI_VREG(LDO, HT_P600, 0, INF, HFS430, hfs430, ht_p600, 10000),
SPMI_VREG(LDO, HT_P150, 0, INF, HFS430, hfs430, ht_p150, 10000),
SPMI_VREG(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
- SPMI_VREG(BUCK, HFS430, 0, INF, HFS430, hfs430, hfs430, 10000),
+ SPMI_VREG(BUCK, HFS430, 0, 3, HFS430, hfs430, hfs430, 10000),
+ SPMI_VREG(BUCK, HFSMPS_510, 4, INF, HFSMPS, hfsmps, hfs430, 100000),
SPMI_VREG(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
SPMI_VREG(LDO, N600, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N1200, 0, 0, LDO, ldo, nldo2, 10000),
@@ -1549,6 +1662,16 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
SPMI_VREG(ULT_LDO, P300, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P50, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 5000),
+ SPMI_VREG(LDO, LV_P150_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
+ SPMI_VREG(LDO, LV_P300_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
+ SPMI_VREG(LDO, LV_P600_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
+ SPMI_VREG(LDO, MV_P50_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
+ SPMI_VREG(LDO, MV_P150_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
+ SPMI_VREG(LDO, MV_P600_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
+ SPMI_VREG(LDO, N300_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
+ SPMI_VREG(LDO, N600_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
+ SPMI_VREG(LDO, N1200_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
+ SPMI_VREG(FTS, FTSMPS_510, 0, INF, FTSMPS3, hfsmps, ftsmps510, 100000),
};
static void spmi_calculate_num_voltages(struct spmi_voltage_set_points *points)
@@ -1696,6 +1819,26 @@ static int spmi_regulator_init_slew_rate_ftsmps426(struct spmi_regulator *vreg,
return ret;
}
+static int spmi_regulator_init_slew_rate_hfsmps(struct spmi_regulator *vreg)
+{
+ int ret;
+ u8 reg = 0;
+ int delay;
+
+ ret = spmi_vreg_read(vreg, SPMI_HFSMPS_REG_STEP_CTRL, &reg, 1);
+ if (ret) {
+ dev_err(vreg->dev, "spmi read failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ delay = reg & SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK;
+ delay >>= SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT;
+
+ vreg->slew_rate = SPMI_HFSMPS_SLEW_RATE_38p4 >> delay;
+
+ return ret;
+}
+
static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
const struct spmi_regulator_init_data *data)
{
@@ -1846,6 +1989,12 @@ static int spmi_regulator_of_parse(struct device_node *node,
if (ret)
return ret;
break;
+ case SPMI_REGULATOR_LOGICAL_TYPE_HFSMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3:
+ ret = spmi_regulator_init_slew_rate_hfsmps(vreg);
+ if (ret)
+ return ret;
+ break;
default:
break;
}
@@ -1872,40 +2021,100 @@ static int spmi_regulator_of_parse(struct device_node *node,
return 0;
}
-static const struct spmi_regulator_data pm8941_regulators[] = {
+static const struct spmi_regulator_data pm6125_regulators[] = {
+ { "s1", 0x1400, "vdd_s1" },
+ { "s2", 0x1700, "vdd_s2" },
+ { "s3", 0x1a00, "vdd_s3" },
+ { "s4", 0x1d00, "vdd_s4" },
+ { "s5", 0x2000, "vdd_s5" },
+ { "s6", 0x2300, "vdd_s6" },
+ { "s7", 0x2600, "vdd_s7" },
+ { "s8", 0x2900, "vdd_s8" },
+ { "l1", 0x4000, "vdd_l1_l7_l17_l18" },
+ { "l2", 0x4100, "vdd_l2_l3_l4" },
+ { "l3", 0x4200, "vdd_l2_l3_l4" },
+ { "l4", 0x4300, "vdd_l2_l3_l4" },
+ { "l5", 0x4400, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l6", 0x4500, "vdd_l6_l8" },
+ { "l7", 0x4600, "vdd_l1_l7_l17_l18" },
+ { "l8", 0x4700, "vdd_l6_l8" },
+ { "l9", 0x4800, "vdd_l9_l11" },
+ { "l10", 0x4900, "vdd_l10_l13_l14" },
+ { "l11", 0x4a00, "vdd_l9_l11" },
+ { "l12", 0x4b00, "vdd_l12_l16" },
+ { "l13", 0x4c00, "vdd_l10_l13_l14" },
+ { "l14", 0x4d00, "vdd_l10_l13_l14" },
+ { "l15", 0x4e00, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l16", 0x4f00, "vdd_l12_l16" },
+ { "l17", 0x5000, "vdd_l1_l7_l17_l18" },
+ { "l18", 0x5100, "vdd_l1_l7_l17_l18" },
+ { "l19", 0x5200, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l20", 0x5300, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l21", 0x5400, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l22", 0x5500, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l23", 0x5600, "vdd_l23_l24" },
+ { "l24", 0x5700, "vdd_l23_l24" },
+};
+
+static const struct spmi_regulator_data pm660_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
- { "s4", 0xa000, },
- { "l1", 0x4000, "vdd_l1_l3", },
- { "l2", 0x4100, "vdd_l2_lvs_1_2_3", },
- { "l3", 0x4200, "vdd_l1_l3", },
- { "l4", 0x4300, "vdd_l4_l11", },
- { "l5", 0x4400, "vdd_l5_l7", NULL, 0x0410 },
- { "l6", 0x4500, "vdd_l6_l12_l14_l15", },
- { "l7", 0x4600, "vdd_l5_l7", NULL, 0x0410 },
- { "l8", 0x4700, "vdd_l8_l16_l18_19", },
- { "l9", 0x4800, "vdd_l9_l10_l17_l22", },
- { "l10", 0x4900, "vdd_l9_l10_l17_l22", },
- { "l11", 0x4a00, "vdd_l4_l11", },
- { "l12", 0x4b00, "vdd_l6_l12_l14_l15", },
- { "l13", 0x4c00, "vdd_l13_l20_l23_l24", },
- { "l14", 0x4d00, "vdd_l6_l12_l14_l15", },
- { "l15", 0x4e00, "vdd_l6_l12_l14_l15", },
- { "l16", 0x4f00, "vdd_l8_l16_l18_19", },
- { "l17", 0x5000, "vdd_l9_l10_l17_l22", },
- { "l18", 0x5100, "vdd_l8_l16_l18_19", },
- { "l19", 0x5200, "vdd_l8_l16_l18_19", },
- { "l20", 0x5300, "vdd_l13_l20_l23_l24", },
- { "l21", 0x5400, "vdd_l21", },
- { "l22", 0x5500, "vdd_l9_l10_l17_l22", },
- { "l23", 0x5600, "vdd_l13_l20_l23_l24", },
- { "l24", 0x5700, "vdd_l13_l20_l23_l24", },
- { "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", },
- { "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", },
- { "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", },
- { "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", },
- { "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", },
+ { "s4", 0x1d00, "vdd_s3", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l6_l7", },
+ { "l2", 0x4100, "vdd_l2_l3", },
+ { "l3", 0x4200, "vdd_l2_l3", },
+ /* l4 is unaccessible on PM660 */
+ { "l5", 0x4400, "vdd_l5", },
+ { "l6", 0x4500, "vdd_l1_l6_l7", },
+ { "l7", 0x4600, "vdd_l1_l6_l7", },
+ { "l8", 0x4700, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l9", 0x4800, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l10", 0x4900, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l11", 0x4a00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l12", 0x4b00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l13", 0x4c00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l14", 0x4d00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l15", 0x4e00, "vdd_l15_l16_l17_l18_l19", },
+ { "l16", 0x4f00, "vdd_l15_l16_l17_l18_l19", },
+ { "l17", 0x5000, "vdd_l15_l16_l17_l18_l19", },
+ { "l18", 0x5100, "vdd_l15_l16_l17_l18_l19", },
+ { "l19", 0x5200, "vdd_l15_l16_l17_l18_l19", },
+ { }
+};
+
+static const struct spmi_regulator_data pm660l_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "l1", 0x4000, "vdd_l1_l9_l10", },
+ { "l2", 0x4100, "vdd_l2", },
+ { "l3", 0x4200, "vdd_l3_l5_l7_l8", },
+ { "l4", 0x4300, "vdd_l4_l6", },
+ { "l5", 0x4400, "vdd_l3_l5_l7_l8", },
+ { "l6", 0x4500, "vdd_l4_l6", },
+ { "l7", 0x4600, "vdd_l3_l5_l7_l8", },
+ { "l8", 0x4700, "vdd_l3_l5_l7_l8", },
+ { "l9", 0x4800, "vdd_l1_l9_l10", },
+ { "l10", 0x4900, "vdd_l1_l9_l10", },
+ { }
+};
+
+static const struct spmi_regulator_data pm8004_regulators[] = {
+ { "s2", 0x1700, "vdd_s2", },
+ { "s5", 0x2000, "vdd_s5", },
+ { }
+};
+
+static const struct spmi_regulator_data pm8005_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
{ }
};
@@ -1985,6 +2194,43 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8941_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0xa000, },
+ { "l1", 0x4000, "vdd_l1_l3", },
+ { "l2", 0x4100, "vdd_l2_lvs_1_2_3", },
+ { "l3", 0x4200, "vdd_l1_l3", },
+ { "l4", 0x4300, "vdd_l4_l11", },
+ { "l5", 0x4400, "vdd_l5_l7", NULL, 0x0410 },
+ { "l6", 0x4500, "vdd_l6_l12_l14_l15", },
+ { "l7", 0x4600, "vdd_l5_l7", NULL, 0x0410 },
+ { "l8", 0x4700, "vdd_l8_l16_l18_19", },
+ { "l9", 0x4800, "vdd_l9_l10_l17_l22", },
+ { "l10", 0x4900, "vdd_l9_l10_l17_l22", },
+ { "l11", 0x4a00, "vdd_l4_l11", },
+ { "l12", 0x4b00, "vdd_l6_l12_l14_l15", },
+ { "l13", 0x4c00, "vdd_l13_l20_l23_l24", },
+ { "l14", 0x4d00, "vdd_l6_l12_l14_l15", },
+ { "l15", 0x4e00, "vdd_l6_l12_l14_l15", },
+ { "l16", 0x4f00, "vdd_l8_l16_l18_19", },
+ { "l17", 0x5000, "vdd_l9_l10_l17_l22", },
+ { "l18", 0x5100, "vdd_l8_l16_l18_19", },
+ { "l19", 0x5200, "vdd_l8_l16_l18_19", },
+ { "l20", 0x5300, "vdd_l13_l20_l23_l24", },
+ { "l21", 0x5400, "vdd_l21", },
+ { "l22", 0x5500, "vdd_l9_l10_l17_l22", },
+ { "l23", 0x5600, "vdd_l13_l20_l23_l24", },
+ { "l24", 0x5700, "vdd_l13_l20_l23_l24", },
+ { "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", },
+ { "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", },
+ { "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", },
+ { "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", },
+ { "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", },
+ { }
+};
+
static const struct spmi_regulator_data pm8950_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -2076,69 +2322,6 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ }
};
-static const struct spmi_regulator_data pm660_regulators[] = {
- { "s1", 0x1400, "vdd_s1", },
- { "s2", 0x1700, "vdd_s2", },
- { "s3", 0x1a00, "vdd_s3", },
- { "s4", 0x1d00, "vdd_s3", },
- { "s5", 0x2000, "vdd_s5", },
- { "s6", 0x2300, "vdd_s6", },
- { "l1", 0x4000, "vdd_l1_l6_l7", },
- { "l2", 0x4100, "vdd_l2_l3", },
- { "l3", 0x4200, "vdd_l2_l3", },
- /* l4 is unaccessible on PM660 */
- { "l5", 0x4400, "vdd_l5", },
- { "l6", 0x4500, "vdd_l1_l6_l7", },
- { "l7", 0x4600, "vdd_l1_l6_l7", },
- { "l8", 0x4700, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l9", 0x4800, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l10", 0x4900, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l11", 0x4a00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l12", 0x4b00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l13", 0x4c00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l14", 0x4d00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l15", 0x4e00, "vdd_l15_l16_l17_l18_l19", },
- { "l16", 0x4f00, "vdd_l15_l16_l17_l18_l19", },
- { "l17", 0x5000, "vdd_l15_l16_l17_l18_l19", },
- { "l18", 0x5100, "vdd_l15_l16_l17_l18_l19", },
- { "l19", 0x5200, "vdd_l15_l16_l17_l18_l19", },
- { }
-};
-
-static const struct spmi_regulator_data pm660l_regulators[] = {
- { "s1", 0x1400, "vdd_s1", },
- { "s2", 0x1700, "vdd_s2", },
- { "s3", 0x1a00, "vdd_s3", },
- { "s4", 0x1d00, "vdd_s4", },
- { "s5", 0x2000, "vdd_s5", },
- { "l1", 0x4000, "vdd_l1_l9_l10", },
- { "l2", 0x4100, "vdd_l2", },
- { "l3", 0x4200, "vdd_l3_l5_l7_l8", },
- { "l4", 0x4300, "vdd_l4_l6", },
- { "l5", 0x4400, "vdd_l3_l5_l7_l8", },
- { "l6", 0x4500, "vdd_l4_l6", },
- { "l7", 0x4600, "vdd_l3_l5_l7_l8", },
- { "l8", 0x4700, "vdd_l3_l5_l7_l8", },
- { "l9", 0x4800, "vdd_l1_l9_l10", },
- { "l10", 0x4900, "vdd_l1_l9_l10", },
- { }
-};
-
-
-static const struct spmi_regulator_data pm8004_regulators[] = {
- { "s2", 0x1700, "vdd_s2", },
- { "s5", 0x2000, "vdd_s5", },
- { }
-};
-
-static const struct spmi_regulator_data pm8005_regulators[] = {
- { "s1", 0x1400, "vdd_s1", },
- { "s2", 0x1700, "vdd_s2", },
- { "s3", 0x1a00, "vdd_s3", },
- { "s4", 0x1d00, "vdd_s4", },
- { }
-};
-
static const struct spmi_regulator_data pmp8074_regulators[] = {
{ "s1", 0x1400, "vdd_s1"},
{ "s2", 0x1700, "vdd_s2"},
@@ -2167,6 +2350,9 @@ static const struct spmi_regulator_data pms405_regulators[] = {
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
+ { .compatible = "qcom,pm6125-regulators", .data = &pm6125_regulators },
+ { .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
+ { .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
{ .compatible = "qcom,pm8004-regulators", .data = &pm8004_regulators },
{ .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
{ .compatible = "qcom,pm8226-regulators", .data = &pm8226_regulators },
@@ -2176,8 +2362,6 @@ static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
- { .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
- { .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
{ .compatible = "qcom,pmp8074-regulators", .data = &pmp8074_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
{ }
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index 105f694a67e6..308f7972941b 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -381,13 +381,11 @@ error:
return ret;
}
-static int attiny_i2c_remove(struct i2c_client *client)
+static void attiny_i2c_remove(struct i2c_client *client)
{
struct attiny_lcd *state = i2c_get_clientdata(client);
mutex_destroy(&state->lock);
-
- return 0;
}
static const struct of_device_id attiny_dt_ids[] = {
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index ce00db27589a..115345e9fded 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -151,7 +151,7 @@ static inline void ti_abb_clear_txdone(const struct ti_abb *abb)
};
/**
- * ti_abb_wait_tranx() - waits for ABB tranxdone event
+ * ti_abb_wait_txdone() - waits for ABB tranxdone event
* @dev: device
* @abb: pointer to the abb instance
*
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
new file mode 100644
index 000000000000..c484c943e467
--- /dev/null
+++ b/drivers/regulator/tps65219-regulator.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// tps65219-regulator.c
+//
+// Regulator driver for TPS65219 PMIC
+//
+// Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+//
+// This implementation derived from tps65218 authored by
+// "J Keerthy <j-keerthy@ti.com>"
+//
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65219.h>
+
+struct tps65219_regulator_irq_type {
+ const char *irq_name;
+ const char *regulator_name;
+ const char *event_name;
+ unsigned long event;
+};
+
+static struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
+ { "LDO3_SCG", "LDO3", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO3_OC", "LDO3", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO3_UV", "LDO3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO4_SCG", "LDO4", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO4_OC", "LDO4", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO4_UV", "LDO4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO1_SCG", "LDO1", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO1_OC", "LDO1", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO1_UV", "LDO1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO2_SCG", "LDO2", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO2_OC", "LDO2", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO2_UV", "LDO2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK3_SCG", "BUCK3", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "BUCK3_OC", "BUCK3", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK3_NEG_OC", "BUCK3", "negative overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK3_UV", "BUCK3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK1_SCG", "BUCK1", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "BUCK1_OC", "BUCK1", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK1_NEG_OC", "BUCK1", "negative overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK1_UV", "BUCK1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK2_SCG", "BUCK2", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "BUCK2_OC", "BUCK2", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK2_NEG_OC", "BUCK2", "negative overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK2_UV", "BUCK2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK1_RV", "BUCK1", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK2_RV", "BUCK2", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK3_RV", "BUCK3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO1_RV", "LDO1", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO2_RV", "LDO2", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO3_RV", "LDO3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO4_RV", "LDO4", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK1_RV_SD", "BUCK1", "residual voltage on shutdown",
+ REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK2_RV_SD", "BUCK2", "residual voltage on shutdown",
+ REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK3_RV_SD", "BUCK3", "residual voltage on shutdown",
+ REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO1_RV_SD", "LDO1", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO2_RV_SD", "LDO2", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO3_RV_SD", "LDO3", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO4_RV_SD", "LDO4", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "SENSOR_3_WARM", "SENSOR3", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN},
+ { "SENSOR_2_WARM", "SENSOR2", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
+ { "SENSOR_1_WARM", "SENSOR1", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
+ { "SENSOR_0_WARM", "SENSOR0", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
+ { "SENSOR_3_HOT", "SENSOR3", "hot temperature", REGULATOR_EVENT_OVER_TEMP},
+ { "SENSOR_2_HOT", "SENSOR2", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
+ { "SENSOR_1_HOT", "SENSOR1", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
+ { "SENSOR_0_HOT", "SENSOR0", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
+ { "TIMEOUT", "", "", REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE },
+};
+
+struct tps65219_regulator_irq_data {
+ struct device *dev;
+ struct tps65219_regulator_irq_type *type;
+ struct regulator_dev *rdev;
+};
+
+#define TPS65219_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, \
+ _ct, _ncl, _bpm) \
+ { \
+ .name = _name, \
+ .of_match = _of, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .supply_name = _of, \
+ .id = _id, \
+ .ops = &(_ops), \
+ .n_voltages = _n, \
+ .type = _type, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .csel_reg = _cr, \
+ .csel_mask = _cm, \
+ .curr_table = _ct, \
+ .n_current_limits = _ncl, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .volt_table = NULL, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ .ramp_delay = _delay, \
+ .fixed_uV = _fuv, \
+ .bypass_reg = _vr, \
+ .bypass_mask = _bpm, \
+ } \
+
+static const struct linear_range bucks_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x20, 0x33, 100000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x34, 0x3f, 0),
+};
+
+static const struct linear_range ldos_1_2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x37, 50000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x38, 0x3f, 0),
+};
+
+static const struct linear_range ldos_3_4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x0, 0xC, 0),
+ REGULATOR_LINEAR_RANGE(1250000, 0xD, 0x35, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x36, 0x3F, 0),
+};
+
+static int tps65219_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ return regmap_set_bits(tps->regmap, TPS65219_REG_STBY_1_CONFIG,
+ dev->desc->enable_mask);
+
+ case REGULATOR_MODE_STANDBY:
+ return regmap_clear_bits(tps->regmap,
+ TPS65219_REG_STBY_1_CONFIG,
+ dev->desc->enable_mask);
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int tps65219_get_mode(struct regulator_dev *dev)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+ int ret, value = 0;
+
+ ret = regmap_read(tps->regmap, TPS65219_REG_STBY_1_CONFIG, &value);
+ if (ret) {
+ dev_dbg(tps->dev, "%s failed for regulator %s: %d ",
+ __func__, dev->desc->name, ret);
+ return ret;
+ }
+ value = (value & BIT(rid)) >> rid;
+ if (value)
+ return REGULATOR_MODE_STANDBY;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+/*
+ * generic regulator_set_bypass_regmap does not fully match requirements
+ * TPS65219 Requires explicitly that regulator is disabled before switch
+ */
+static int tps65219_set_bypass(struct regulator_dev *dev, bool enable)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (dev->desc->ops->is_enabled(dev)) {
+ dev_err(tps->dev,
+ "%s LDO%d enabled, must be shut down to set bypass ",
+ __func__, rid);
+ return -EBUSY;
+ }
+ return regulator_set_bypass_regmap(dev, enable);
+}
+
+/* Operations permitted on BUCK1/2/3 */
+static const struct regulator_ops tps65219_bucks_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+
+};
+
+/* Operations permitted on LDO1/2 */
+static const struct regulator_ops tps65219_ldos_1_2_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_bypass = tps65219_set_bypass,
+ .get_bypass = regulator_get_bypass_regmap,
+};
+
+/* Operations permitted on LDO3/4 */
+static const struct regulator_ops tps65219_ldos_3_4_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS65219_REGULATOR("BUCK1", "buck1", TPS65219_BUCK_1,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK1_EN_MASK, 0, 0, bucks_ranges,
+ 3, 4000, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("BUCK2", "buck2", TPS65219_BUCK_2,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK2_EN_MASK, 0, 0, bucks_ranges,
+ 3, 4000, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("BUCK3", "buck3", TPS65219_BUCK_3,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK3_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK3_EN_MASK, 0, 0, bucks_ranges,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO1", "ldo1", TPS65219_LDO_1,
+ REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ TPS65219_REG_LDO1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO1_EN_MASK, 0, 0, ldos_1_2_ranges,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO2", "ldo2", TPS65219_LDO_2,
+ REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ TPS65219_REG_LDO2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO2_EN_MASK, 0, 0, ldos_1_2_ranges,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO3", "ldo3", TPS65219_LDO_3,
+ REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ TPS65219_REG_LDO3_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO3_EN_MASK, 0, 0, ldos_3_4_ranges,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO4", "ldo4", TPS65219_LDO_4,
+ REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ TPS65219_REG_LDO4_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO4_EN_MASK, 0, 0, ldos_3_4_ranges,
+ 3, 0, 0, NULL, 0, 0),
+};
+
+static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
+{
+ struct tps65219_regulator_irq_data *irq_data = data;
+
+ if (irq_data->type->event_name[0] == '\0') {
+ /* This is the timeout interrupt no specific regulator */
+ dev_err(irq_data->dev,
+ "System was put in shutdown due to timeout during an active or standby transition.\n");
+ return IRQ_HANDLED;
+ }
+
+ regulator_notifier_call_chain(irq_data->rdev,
+ irq_data->type->event, NULL);
+
+ dev_err(irq_data->dev, "Error IRQ trap %s for %s\n",
+ irq_data->type->event_name, irq_data->type->regulator_name);
+ return IRQ_HANDLED;
+}
+
+static int tps65219_get_rdev_by_name(const char *regulator_name,
+ struct regulator_dev *rdevtbl[7],
+ struct regulator_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ if (strcmp(regulator_name, regulators[i].name) == 0) {
+ dev = rdevtbl[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int tps65219_regulator_probe(struct platform_device *pdev)
+{
+ struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int i;
+ int error;
+ int irq;
+ struct tps65219_regulator_irq_data *irq_data;
+ struct tps65219_regulator_irq_type *irq_type;
+ struct regulator_dev *rdevtbl[7];
+
+ config.dev = tps->dev;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ dev_dbg(tps->dev, "%s regul i= %d START", __func__, i);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+ rdevtbl[i] = rdev;
+ dev_dbg(tps->dev, "%s regul i= %d COMPLETED", __func__, i);
+ }
+
+ irq_data = devm_kmalloc(tps->dev,
+ ARRAY_SIZE(tps65219_regulator_irq_types) *
+ sizeof(struct tps65219_regulator_irq_data),
+ GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(tps65219_regulator_irq_types); ++i) {
+ irq_type = &tps65219_regulator_irq_types[i];
+
+ irq = platform_get_irq_byname(pdev, irq_type->irq_name);
+ if (irq < 0)
+ return -EINVAL;
+
+ irq_data[i].dev = tps->dev;
+ irq_data[i].type = irq_type;
+
+ tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, rdev);
+ if (rdev < 0) {
+ dev_err(tps->dev, "Failed to get rdev for %s\n",
+ irq_type->regulator_name);
+ return -EINVAL;
+ }
+ irq_data[i].rdev = rdev;
+
+ error = devm_request_threaded_irq(tps->dev, irq, NULL,
+ tps65219_regulator_irq_handler,
+ IRQF_ONESHOT,
+ irq_type->irq_name,
+ &irq_data[i]);
+ if (error) {
+ dev_err(tps->dev, "failed to request %s IRQ %d: %d\n",
+ irq_type->irq_name, irq, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id tps65219_regulator_id_table[] = {
+ { "tps65219-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65219_regulator_id_table);
+
+static struct platform_driver tps65219_regulator_driver = {
+ .driver = {
+ .name = "tps65219-pmic",
+ },
+ .probe = tps65219_regulator_probe,
+ .id_table = tps65219_regulator_id_table,
+};
+
+module_platform_driver(tps65219_regulator_driver);
+
+MODULE_AUTHOR("Jerome Neanne <j-neanne@baylibre.com>");
+MODULE_DESCRIPTION("TPS65219 voltage regulator driver");
+MODULE_ALIAS("platform:tps65219-pmic");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 81c4f5776109..0f7706e23eb9 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -158,7 +158,6 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool * ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 806773e88832..2a8238eb8794 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -201,7 +201,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
- default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 185a333df66c..d2408725eb2c 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MP_RESET_PCIEPHY_PERST:
value = assert ? 0 : bit;
break;
}
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index 00b612a0effa..f3528dd1d084 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -33,11 +33,8 @@ static struct regmap_config sparx5_reset_regmap_config = {
.reg_stride = 4,
};
-static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int sparx5_switch_reset(struct mchp_reset_context *ctx)
{
- struct mchp_reset_context *ctx =
- container_of(rcdev, struct mchp_reset_context, rcdev);
u32 val;
/* Make sure the core is PROTECTED from reset */
@@ -54,8 +51,14 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
1, 100);
}
+static int sparx5_reset_noop(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
static const struct reset_control_ops sparx5_reset_ops = {
- .reset = sparx5_switch_reset,
+ .reset = sparx5_reset_noop,
};
static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
@@ -122,6 +125,11 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
ctx->rcdev.of_node = dn;
ctx->props = device_get_match_data(&pdev->dev);
+ /* Issue the reset very early, our actual reset callback is a noop. */
+ err = sparx5_switch_reset(ctx);
+ if (err)
+ return err;
+
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
@@ -163,6 +171,10 @@ static int __init mchp_sparx5_reset_init(void)
return platform_driver_register(&mchp_sparx5_reset_driver);
}
+/*
+ * Because this is a global reset, keep this postcore_initcall() to issue the
+ * reset as early as possible during the kernel startup.
+ */
postcore_initcall(mchp_sparx5_reset_init);
MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
index 24c55efa98e5..f2333506b0a6 100644
--- a/drivers/reset/reset-npcm.c
+++ b/drivers/reset/reset-npcm.c
@@ -291,7 +291,7 @@ static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
iprst2 |= ipsrst2_bits;
iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
NPCM_IPSRST3_USBPHY2);
- iprst2 |= ipsrst4_bits;
+ iprst4 |= ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index e0bbb11d912e..6d6a55efb9cc 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -297,11 +297,9 @@ static int bq32k_probe(struct i2c_client *client)
return 0;
}
-static int bq32k_remove(struct i2c_client *client)
+static void bq32k_remove(struct i2c_client *client)
{
bq32k_sysfs_unregister(&client->dev);
-
- return 0;
}
static const struct i2c_device_id bq32k_id[] = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index b19de5100b1a..7f089f066163 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -530,7 +530,7 @@ static int ds1374_probe(struct i2c_client *client)
return 0;
}
-static int ds1374_remove(struct i2c_client *client)
+static void ds1374_remove(struct i2c_client *client)
{
struct ds1374 *ds1374 = i2c_get_clientdata(client);
@@ -542,8 +542,6 @@ static int ds1374_remove(struct i2c_client *client)
devm_free_irq(&client->dev, client->irq, client);
cancel_work_sync(&ds1374->work);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 1fc6627d854d..1bfca39079d4 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -472,12 +472,11 @@ static int isl12026_probe_new(struct i2c_client *client)
return devm_rtc_register_device(priv->rtc);
}
-static int isl12026_remove(struct i2c_client *client)
+static void isl12026_remove(struct i2c_client *client)
{
struct isl12026 *priv = i2c_get_clientdata(client);
i2c_unregister_device(priv->nvm_client);
- return 0;
}
static const struct of_device_id isl12026_dt_match[] = {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index d868458cd40e..e0b4d3794320 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -989,7 +989,7 @@ static int m41t80_probe(struct i2c_client *client,
return 0;
}
-static int m41t80_remove(struct i2c_client *client)
+static void m41t80_remove(struct i2c_client *client)
{
#ifdef CONFIG_RTC_DRV_M41T80_WDT
struct m41t80_data *clientdata = i2c_get_clientdata(client);
@@ -999,8 +999,6 @@ static int m41t80_remove(struct i2c_client *client)
unregister_reboot_notifier(&wdt_notifier);
}
#endif
-
- return 0;
}
static struct i2c_driver m41t80_driver = {
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index cb15983383f5..9562c477e1c9 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -910,10 +910,9 @@ exit:
return err;
}
-static int rs5c372_remove(struct i2c_client *client)
+static void rs5c372_remove(struct i2c_client *client)
{
rs5c_sysfs_unregister(&client->dev);
- return 0;
}
static struct i2c_driver rs5c372_driver = {
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index ba0d22a5b421..f587afa84357 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -657,10 +657,9 @@ static int x1205_probe(struct i2c_client *client)
return 0;
}
-static int x1205_remove(struct i2c_client *client)
+static void x1205_remove(struct i2c_client *client)
{
x1205_sysfs_unregister(&client->dev);
- return 0;
}
static const struct i2c_device_id x1205_id[] = {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ea82821599f6..5a6d9c15395f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -41,15 +41,6 @@
#define DASD_DIAG_MOD "dasd_diag_mod"
-static unsigned int queue_depth = 32;
-static unsigned int nr_hw_queues = 4;
-
-module_param(queue_depth, uint, 0444);
-MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
-
-module_param(nr_hw_queues, uint, 0444);
-MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
-
/*
* SECTION: exported variables of dasd.c
*/
@@ -68,8 +59,6 @@ MODULE_LICENSE("GPL");
/*
* SECTION: prototypes for static functions of dasd.c
*/
-static int dasd_alloc_queue(struct dasd_block *);
-static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(unsigned long);
static void dasd_block_tasklet(unsigned long);
@@ -198,21 +187,11 @@ EXPORT_SYMBOL_GPL(dasd_free_block);
*/
static int dasd_state_new_to_known(struct dasd_device *device)
{
- int rc;
-
/*
* As long as the device is not in state DASD_STATE_NEW we want to
* keep the reference count > 0.
*/
dasd_get_device(device);
-
- if (device->block) {
- rc = dasd_alloc_queue(device->block);
- if (rc) {
- dasd_put_device(device);
- return rc;
- }
- }
device->state = DASD_STATE_KNOWN;
return 0;
}
@@ -226,9 +205,6 @@ static int dasd_state_known_to_new(struct dasd_device *device)
dasd_eer_disable(device);
device->state = DASD_STATE_NEW;
- if (device->block)
- dasd_free_queue(device->block);
-
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device);
return 0;
@@ -1591,9 +1567,8 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- if (device->block->request_queue)
- blk_mq_run_hw_queues(device->block->request_queue,
- true);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -2691,7 +2666,7 @@ static void dasd_block_timeout(struct timer_list *t)
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
- blk_mq_run_hw_queues(block->request_queue, true);
+ blk_mq_run_hw_queues(block->gdp->queue, true);
}
/*
@@ -3239,7 +3214,7 @@ static void dasd_request_done(struct request *req)
blk_mq_run_hw_queues(req->q, true);
}
-static struct blk_mq_ops dasd_mq_ops = {
+struct blk_mq_ops dasd_mq_ops = {
.queue_rq = do_dasd_request,
.complete = dasd_request_done,
.timeout = dasd_times_out,
@@ -3247,45 +3222,6 @@ static struct blk_mq_ops dasd_mq_ops = {
.exit_hctx = dasd_exit_hctx,
};
-/*
- * Allocate and initialize request queue and default I/O scheduler.
- */
-static int dasd_alloc_queue(struct dasd_block *block)
-{
- int rc;
-
- block->tag_set.ops = &dasd_mq_ops;
- block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
- block->tag_set.nr_hw_queues = nr_hw_queues;
- block->tag_set.queue_depth = queue_depth;
- block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- block->tag_set.numa_node = NUMA_NO_NODE;
-
- rc = blk_mq_alloc_tag_set(&block->tag_set);
- if (rc)
- return rc;
-
- block->request_queue = blk_mq_init_queue(&block->tag_set);
- if (IS_ERR(block->request_queue))
- return PTR_ERR(block->request_queue);
-
- block->request_queue->queuedata = block;
-
- return 0;
-}
-
-/*
- * Deactivate and free request queue.
- */
-static void dasd_free_queue(struct dasd_block *block)
-{
- if (block->request_queue) {
- blk_mq_destroy_queue(block->request_queue);
- blk_mq_free_tag_set(&block->tag_set);
- block->request_queue = NULL;
- }
-}
-
static int dasd_open(struct block_device *bdev, fmode_t mode)
{
struct dasd_device *base;
@@ -3762,10 +3698,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- if (device->block->request_queue)
- blk_mq_run_hw_queues(device->block->request_queue,
- true);
- }
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
+ }
if (!device->stopped)
wake_up(&generic_waitq);
@@ -3916,8 +3851,8 @@ void dasd_generic_space_avail(struct dasd_device *device)
if (device->block) {
dasd_schedule_block_bh(device->block);
- if (device->block->request_queue)
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
if (!device->stopped)
wake_up(&generic_waitq);
@@ -3927,7 +3862,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
/*
* clear active requests and requeue them to block layer if possible
*/
-static int dasd_generic_requeue_all_requests(struct dasd_device *device)
+int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
@@ -4001,6 +3936,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
dasd_schedule_device_bh(device);
return rc;
}
+EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
static void do_requeue_requests(struct work_struct *work)
{
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 299001ad9a32..81d283b3cd3b 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1050,6 +1050,11 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
dev_err(&device->cdev->dev, "An I/O request was rejected"
" because writing is inhibited\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ } else if (sense[7] & SNS7_INVALID_ON_SEC) {
+ dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
+ /* suppress dump of sense data for this error */
+ set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else {
/* fatal error - set status to FAILED
internal error 09 - Command Reject */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index dc78a523a69f..b6b938aa6615 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
- struct alias_pav_group *group = private->pavgroup;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
+ struct alias_pav_group *group;
unsigned long flags;
- if (!group || !lcu)
+ if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
}
spin_lock_irqsave(&lcu->lock, flags);
+ group = private->pavgroup;
+ if (!group) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ }
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 811e79c9f59c..1beb596d1434 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -26,7 +26,6 @@
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
-#define DASD_BUS_ID_SIZE 20
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@@ -50,6 +49,7 @@ struct dasd_devmap {
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
+ struct dasd_copy_relation *copy;
};
/*
@@ -130,7 +130,7 @@ __setup ("dasd=", dasd_call_setup);
/*
* Read a device busid/devno from a string.
*/
-static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
+static int dasd_busid(char *str, int *id0, int *id1, int *devno)
{
unsigned int val;
char *tok;
@@ -438,16 +438,12 @@ dasd_add_busid(const char *bus_id, int features)
return devmap;
}
-/*
- * Find devmap for device with given bus_id.
- */
static struct dasd_devmap *
-dasd_find_busid(const char *bus_id)
+dasd_find_busid_locked(const char *bus_id)
{
struct dasd_devmap *devmap, *tmp;
int hash;
- spin_lock(&dasd_devmap_lock);
devmap = ERR_PTR(-ENODEV);
hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
@@ -456,6 +452,19 @@ dasd_find_busid(const char *bus_id)
break;
}
}
+ return devmap;
+}
+
+/*
+ * Find devmap for device with given bus_id.
+ */
+static struct dasd_devmap *
+dasd_find_busid(const char *bus_id)
+{
+ struct dasd_devmap *devmap;
+
+ spin_lock(&dasd_devmap_lock);
+ devmap = dasd_find_busid_locked(bus_id);
spin_unlock(&dasd_devmap_lock);
return devmap;
}
@@ -585,6 +594,238 @@ dasd_create_device(struct ccw_device *cdev)
}
/*
+ * allocate a PPRC data structure and call the discipline function to fill
+ */
+static int dasd_devmap_get_pprc_status(struct dasd_device *device,
+ struct dasd_pprc_data_sc4 **data)
+{
+ struct dasd_pprc_data_sc4 *temp;
+
+ if (!device->discipline || !device->discipline->pprc_status) {
+ dev_warn(&device->cdev->dev, "Unable to query copy relation status\n");
+ return -EOPNOTSUPP;
+ }
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ /* get PPRC information from storage */
+ if (device->discipline->pprc_status(device, temp)) {
+ dev_warn(&device->cdev->dev, "Error during copy relation status query\n");
+ kfree(temp);
+ return -EINVAL;
+ }
+ *data = temp;
+
+ return 0;
+}
+
+/*
+ * find an entry in a PPRC device_info array by a given UID
+ * depending on the primary/secondary state of the device it has to be
+ * matched with the respective fields
+ */
+static int dasd_devmap_entry_from_pprc_data(struct dasd_pprc_data_sc4 *data,
+ struct dasd_uid uid,
+ bool primary)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (primary) {
+ if (data->dev_info[i].prim_cu_ssid == uid.ssid &&
+ data->dev_info[i].primary == uid.real_unit_addr)
+ return i;
+ } else {
+ if (data->dev_info[i].sec_cu_ssid == uid.ssid &&
+ data->dev_info[i].secondary == uid.real_unit_addr)
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * check the consistency of a specified copy relation by checking
+ * the following things:
+ *
+ * - is the given device part of a copy pair setup
+ * - does the state of the device match the state in the PPRC status data
+ * - does the device UID match with the UID in the PPRC status data
+ * - to prevent misrouted IO check if the given device is present in all
+ * related PPRC status data
+ */
+static int dasd_devmap_check_copy_relation(struct dasd_device *device,
+ struct dasd_copy_entry *entry,
+ struct dasd_pprc_data_sc4 *data,
+ struct dasd_copy_relation *copy)
+{
+ struct dasd_pprc_data_sc4 *tmp_dat;
+ struct dasd_device *tmp_dev;
+ struct dasd_uid uid;
+ int i, j;
+
+ if (!device->discipline || !device->discipline->get_uid ||
+ device->discipline->get_uid(device, &uid))
+ return 1;
+
+ i = dasd_devmap_entry_from_pprc_data(data, uid, entry->primary);
+ if (i < 0) {
+ dev_warn(&device->cdev->dev, "Device not part of a copy relation\n");
+ return 1;
+ }
+
+ /* double check which role the current device has */
+ if (entry->primary) {
+ if (data->dev_info[i].flags & 0x80) {
+ dev_warn(&device->cdev->dev, "Copy pair secondary is setup as primary\n");
+ return 1;
+ }
+ if (data->dev_info[i].prim_cu_ssid != uid.ssid ||
+ data->dev_info[i].primary != uid.real_unit_addr) {
+ dev_warn(&device->cdev->dev,
+ "Primary device %s does not match copy pair status primary device %04x\n",
+ dev_name(&device->cdev->dev),
+ data->dev_info[i].prim_cu_ssid |
+ data->dev_info[i].primary);
+ return 1;
+ }
+ } else {
+ if (!(data->dev_info[i].flags & 0x80)) {
+ dev_warn(&device->cdev->dev, "Copy pair primary is setup as secondary\n");
+ return 1;
+ }
+ if (data->dev_info[i].sec_cu_ssid != uid.ssid ||
+ data->dev_info[i].secondary != uid.real_unit_addr) {
+ dev_warn(&device->cdev->dev,
+ "Secondary device %s does not match copy pair status secondary device %04x\n",
+ dev_name(&device->cdev->dev),
+ data->dev_info[i].sec_cu_ssid |
+ data->dev_info[i].secondary);
+ return 1;
+ }
+ }
+
+ /*
+ * the current device has to be part of the copy relation of all
+ * entries to prevent misrouted IO to another copy pair
+ */
+ for (j = 0; j < DASD_CP_ENTRIES; j++) {
+ if (entry == &copy->entry[j])
+ tmp_dev = device;
+ else
+ tmp_dev = copy->entry[j].device;
+
+ if (!tmp_dev)
+ continue;
+
+ if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat))
+ return 1;
+
+ if (dasd_devmap_entry_from_pprc_data(tmp_dat, uid, entry->primary) < 0) {
+ dev_warn(&tmp_dev->cdev->dev,
+ "Copy pair relation does not contain device: %s\n",
+ dev_name(&device->cdev->dev));
+ kfree(tmp_dat);
+ return 1;
+ }
+ kfree(tmp_dat);
+ }
+ return 0;
+}
+
+/* delete device from copy relation entry */
+static void dasd_devmap_delete_copy_relation_device(struct dasd_device *device)
+{
+ struct dasd_copy_relation *copy;
+ int i;
+
+ if (!device->copy)
+ return;
+
+ copy = device->copy;
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].device == device)
+ copy->entry[i].device = NULL;
+ }
+ dasd_put_device(device);
+ device->copy = NULL;
+}
+
+/*
+ * read all required information for a copy relation setup and setup the device
+ * accordingly
+ */
+int dasd_devmap_set_device_copy_relation(struct ccw_device *cdev,
+ bool pprc_enabled)
+{
+ struct dasd_pprc_data_sc4 *data = NULL;
+ struct dasd_copy_entry *entry = NULL;
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ int i, rc = 0;
+
+ devmap = dasd_devmap_from_cdev(cdev);
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ device = devmap->device;
+ if (!device)
+ return -ENODEV;
+
+ copy = devmap->copy;
+ /* no copy pair setup for this device */
+ if (!copy)
+ goto out;
+
+ rc = dasd_devmap_get_pprc_status(device, &data);
+ if (rc)
+ return rc;
+
+ /* print error if PPRC is requested but not enabled on storage server */
+ if (!pprc_enabled) {
+ dev_err(&cdev->dev, "Copy relation not enabled on storage server\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!data->dev_info[0].state) {
+ dev_warn(&device->cdev->dev, "Copy pair setup requested for device not in copy relation\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* find entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(dev_name(&cdev->dev),
+ copy->entry[i].busid, DASD_BUS_ID_SIZE) == 0) {
+ entry = &copy->entry[i];
+ break;
+ }
+ }
+ if (!entry) {
+ dev_warn(&device->cdev->dev, "Copy relation entry not found\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* check if the copy relation is valid */
+ if (dasd_devmap_check_copy_relation(device, entry, data, copy)) {
+ dev_warn(&device->cdev->dev, "Copy relation faulty\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dasd_get_device(device);
+ copy->entry[i].device = device;
+ device->copy = copy;
+out:
+ kfree(data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_devmap_set_device_copy_relation);
+
+/*
* Wait queue for dasd_delete_device waits.
*/
static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
@@ -617,6 +858,8 @@ dasd_delete_device(struct dasd_device *device)
dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ /* Removve copy relation */
+ dasd_devmap_delete_copy_relation_device(device);
/*
* Drop ref_count by 3, one for the devmap reference, one for
* the cdev reference and one for the passed reference.
@@ -694,6 +937,7 @@ void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
gdp->private_data = devmap;
spin_unlock(&dasd_devmap_lock);
}
+EXPORT_SYMBOL(dasd_add_link_to_gendisk);
struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
{
@@ -1334,7 +1578,6 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
- struct request_queue *q;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
@@ -1346,15 +1589,13 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
dasd_put_device(device);
return -EINVAL;
}
- q = device->block->request_queue;
- if (!q) {
+ if (!device->block->gdp) {
dasd_put_device(device);
return -ENODEV;
}
device->blk_timeout = val;
-
- blk_queue_rq_timeout(q, device->blk_timeout * HZ);
+ blk_queue_rq_timeout(device->block->gdp->queue, val * HZ);
dasd_put_device(device);
return count;
@@ -1683,6 +1924,347 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
static struct kobj_attribute path_fcs_attribute =
__ATTR(fc_security, 0444, dasd_path_fcs_show, NULL);
+/*
+ * print copy relation in the form
+ * primary,secondary[1] primary,secondary[2], ...
+ */
+static ssize_t
+dasd_copy_pair_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char prim_busid[DASD_BUS_ID_SIZE];
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ int len = 0;
+ int i;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (IS_ERR(devmap))
+ return -ENODEV;
+
+ if (!devmap->copy)
+ return -ENODEV;
+
+ copy = devmap->copy;
+ /* find primary */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured && copy->entry[i].primary) {
+ strscpy(prim_busid, copy->entry[i].busid,
+ DASD_BUS_ID_SIZE);
+ break;
+ }
+ }
+ if (!copy->entry[i].primary)
+ goto out;
+
+ /* print all secondary */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured && !copy->entry[i].primary)
+ len += sysfs_emit_at(buf, len, "%s,%s ", prim_busid,
+ copy->entry[i].busid);
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+out:
+ return len;
+}
+
+static int dasd_devmap_set_copy_relation(struct dasd_devmap *devmap,
+ struct dasd_copy_relation *copy,
+ char *busid, bool primary)
+{
+ int i;
+
+ /* find free entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ /* current bus_id already included, nothing to do */
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ return 0;
+
+ if (!copy->entry[i].configured)
+ break;
+ }
+ if (i == DASD_CP_ENTRIES)
+ return -EINVAL;
+
+ copy->entry[i].configured = true;
+ strscpy(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE);
+ if (primary) {
+ copy->active = &copy->entry[i];
+ copy->entry[i].primary = true;
+ }
+ if (!devmap->copy)
+ devmap->copy = copy;
+
+ return 0;
+}
+
+static void dasd_devmap_del_copy_relation(struct dasd_copy_relation *copy,
+ char *busid)
+{
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ /* find entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ break;
+ }
+ if (i == DASD_CP_ENTRIES || !copy->entry[i].configured) {
+ spin_unlock(&dasd_devmap_lock);
+ return;
+ }
+
+ copy->entry[i].configured = false;
+ memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
+ if (copy->active == &copy->entry[i]) {
+ copy->active = NULL;
+ copy->entry[i].primary = false;
+ }
+ spin_unlock(&dasd_devmap_lock);
+}
+
+static int dasd_devmap_clear_copy_relation(struct device *dev)
+{
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ int i, rc = 1;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return 1;
+
+ spin_lock(&dasd_devmap_lock);
+ if (!devmap->copy)
+ goto out;
+
+ copy = devmap->copy;
+ /* first check if all secondary devices are offline*/
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (!copy->entry[i].configured)
+ continue;
+
+ if (copy->entry[i].device == copy->active->device)
+ continue;
+
+ if (copy->entry[i].device)
+ goto out;
+ }
+ /* clear all devmap entries */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (strlen(copy->entry[i].busid) == 0)
+ continue;
+ if (copy->entry[i].device) {
+ dasd_put_device(copy->entry[i].device);
+ copy->entry[i].device->copy = NULL;
+ copy->entry[i].device = NULL;
+ }
+ devmap = dasd_find_busid_locked(copy->entry[i].busid);
+ devmap->copy = NULL;
+ memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
+ }
+ kfree(copy);
+ rc = 0;
+out:
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+}
+
+/*
+ * parse BUSIDs from a copy pair
+ */
+static int dasd_devmap_parse_busid(const char *buf, char *prim_busid,
+ char *sec_busid)
+{
+ char *primary, *secondary, *tmp, *pt;
+ int id0, id1, id2;
+
+ pt = kstrdup(buf, GFP_KERNEL);
+ tmp = pt;
+ if (!tmp)
+ return -ENOMEM;
+
+ primary = strsep(&tmp, ",");
+ if (!primary) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ secondary = strsep(&tmp, ",");
+ if (!secondary) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ if (dasd_busid(primary, &id0, &id1, &id2)) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ sprintf(prim_busid, "%01x.%01x.%04x", id0, id1, id2);
+ if (dasd_busid(secondary, &id0, &id1, &id2)) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ sprintf(sec_busid, "%01x.%01x.%04x", id0, id1, id2);
+ kfree(pt);
+
+ return 0;
+}
+
+static ssize_t dasd_copy_pair_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *prim_devmap, *sec_devmap;
+ char prim_busid[DASD_BUS_ID_SIZE];
+ char sec_busid[DASD_BUS_ID_SIZE];
+ struct dasd_copy_relation *copy;
+ struct dasd_device *device;
+ bool pprc_enabled;
+ int rc;
+
+ if (strncmp(buf, "clear", strlen("clear")) == 0) {
+ if (dasd_devmap_clear_copy_relation(dev))
+ return -EINVAL;
+ return count;
+ }
+
+ rc = dasd_devmap_parse_busid(buf, prim_busid, sec_busid);
+ if (rc)
+ return rc;
+
+ if (strncmp(dev_name(dev), prim_busid, DASD_BUS_ID_SIZE) != 0 &&
+ strncmp(dev_name(dev), sec_busid, DASD_BUS_ID_SIZE) != 0)
+ return -EINVAL;
+
+ /* allocate primary devmap if needed */
+ prim_devmap = dasd_find_busid(prim_busid);
+ if (IS_ERR(prim_devmap))
+ prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
+
+ /* allocate secondary devmap if needed */
+ sec_devmap = dasd_find_busid(sec_busid);
+ if (IS_ERR(sec_devmap))
+ sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
+
+ /* setting copy relation is only allowed for offline secondary */
+ if (sec_devmap->device)
+ return -EINVAL;
+
+ if (prim_devmap->copy) {
+ copy = prim_devmap->copy;
+ } else if (sec_devmap->copy) {
+ copy = sec_devmap->copy;
+ } else {
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ }
+ spin_lock(&dasd_devmap_lock);
+ rc = dasd_devmap_set_copy_relation(prim_devmap, copy, prim_busid, true);
+ if (rc) {
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+ }
+ rc = dasd_devmap_set_copy_relation(sec_devmap, copy, sec_busid, false);
+ if (rc) {
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+ }
+ spin_unlock(&dasd_devmap_lock);
+
+ /* if primary device is already online call device setup directly */
+ if (prim_devmap->device && !prim_devmap->device->copy) {
+ device = prim_devmap->device;
+ if (device->discipline->pprc_enabled) {
+ pprc_enabled = device->discipline->pprc_enabled(device);
+ rc = dasd_devmap_set_device_copy_relation(device->cdev,
+ pprc_enabled);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+ }
+ if (rc) {
+ dasd_devmap_del_copy_relation(copy, prim_busid);
+ dasd_devmap_del_copy_relation(copy, sec_busid);
+ count = rc;
+ }
+
+ return count;
+}
+static DEVICE_ATTR(copy_pair, 0644, dasd_copy_pair_show,
+ dasd_copy_pair_store);
+
+static ssize_t
+dasd_copy_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_copy_relation *copy;
+ struct dasd_device *device;
+ int len, i;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if (!device->copy) {
+ len = sysfs_emit(buf, "none\n");
+ goto out;
+ }
+ copy = device->copy;
+ /* only the active device is primary */
+ if (copy->active->device == device) {
+ len = sysfs_emit(buf, "primary\n");
+ goto out;
+ }
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].device == device) {
+ len = sysfs_emit(buf, "secondary\n");
+ goto out;
+ }
+ }
+ /* not in the list, no COPY role */
+ len = sysfs_emit(buf, "none\n");
+out:
+ dasd_put_device(device);
+ return len;
+}
+static DEVICE_ATTR(copy_role, 0444, dasd_copy_role_show, NULL);
+
+static ssize_t dasd_device_ping(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ size_t rc;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ /*
+ * do not try during offline processing
+ * early check only
+ * the sleep_on function itself checks for offline
+ * processing again
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ rc = -EBUSY;
+ goto out;
+ }
+ if (!device->discipline || !device->discipline->device_ping) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = device->discipline->device_ping(device);
+ if (!rc)
+ rc = count;
+out:
+ dasd_put_device(device);
+ return rc;
+}
+static DEVICE_ATTR(ping, 0200, NULL, dasd_device_ping);
+
#define DASD_DEFINE_ATTR(_name, _func) \
static ssize_t dasd_##_name##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -1739,6 +2321,9 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_hpf.attr,
&dev_attr_ese.attr,
&dev_attr_fc_security.attr,
+ &dev_attr_copy_pair.attr,
+ &dev_attr_copy_role.attr,
+ &dev_attr_ping.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 94ee59864971..f956a4ac9881 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -627,7 +627,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
static void dasd_diag_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
+ struct request_queue *q = block->gdp->queue;
int max;
max = DIAG_MAX_BLOCKS << block->s2b_shift;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 3cc93e2e4e15..662730f3b027 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2013,6 +2013,49 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
}
/*
+ * return if the device is the copy relation primary if a copy relation is active
+ */
+static int dasd_device_is_primary(struct dasd_device *device)
+{
+ if (!device->copy)
+ return 1;
+
+ if (device->copy->active->device == device)
+ return 1;
+
+ return 0;
+}
+
+static int dasd_eckd_alloc_block(struct dasd_device *device)
+{
+ struct dasd_block *block;
+ struct dasd_uid temp_uid;
+
+ if (!dasd_device_is_primary(device))
+ return 0;
+
+ dasd_eckd_get_uid(device, &temp_uid);
+ if (temp_uid.type == UA_BASE_DEVICE) {
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "could not allocate dasd block structure");
+ return PTR_ERR(block);
+ }
+ device->block = block;
+ block->base = device;
+ }
+ return 0;
+}
+
+static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->rdc_data.facilities.PPRC_enabled;
+}
+
+/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
*/
@@ -2020,8 +2063,6 @@ static int
dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct dasd_block *block;
- struct dasd_uid temp_uid;
int rc, i;
int readonly;
unsigned long value;
@@ -2079,20 +2120,29 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->default_expires = value;
}
- dasd_eckd_get_uid(device, &temp_uid);
- if (temp_uid.type == UA_BASE_DEVICE) {
- block = dasd_alloc_block();
- if (IS_ERR(block)) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "could not allocate dasd "
- "block structure");
- rc = PTR_ERR(block);
- goto out_err1;
- }
- device->block = block;
- block->base = device;
+ /* Read Device Characteristics */
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &private->rdc_data, 64);
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
+ goto out_err1;
+ }
+
+ /* setup PPRC for device from devmap */
+ rc = dasd_devmap_set_device_copy_relation(device->cdev,
+ dasd_eckd_pprc_enabled(device));
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "copy relation setup failed, rc=%d", rc);
+ goto out_err1;
}
+ /* check if block device is needed and allocate in case */
+ rc = dasd_eckd_alloc_block(device);
+ if (rc)
+ goto out_err1;
+
/* register lcu with alias handling, enable PAV */
rc = dasd_alias_make_device_known_to_lcu(device);
if (rc)
@@ -2117,15 +2167,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
/* Read Extent Pool Information */
dasd_eckd_read_ext_pool_info(device);
- /* Read Device Characteristics */
- rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
- &private->rdc_data, 64);
- if (rc) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read device characteristic failed, rc=%d", rc);
- goto out_err3;
- }
-
if ((device->features & DASD_FEATURE_USERAW) &&
!(private->rdc_data.facilities.RT_in_LR)) {
dev_err(&device->cdev->dev, "The storage server does not "
@@ -6078,6 +6119,207 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
return 0;
}
+static struct dasd_device
+*copy_relation_find_device(struct dasd_copy_relation *copy,
+ char *busid)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ return copy->entry[i].device;
+ }
+ return NULL;
+}
+
+/*
+ * set the new active/primary device
+ */
+static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
+ char *old_busid)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, new_busid,
+ DASD_BUS_ID_SIZE) == 0) {
+ copy->active = &copy->entry[i];
+ copy->entry[i].primary = true;
+ } else if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, old_busid,
+ DASD_BUS_ID_SIZE) == 0) {
+ copy->entry[i].primary = false;
+ }
+ }
+}
+
+/*
+ * The function will swap the role of a given copy pair.
+ * During the swap operation the relation of the blockdevice is disconnected
+ * from the old primary and connected to the new.
+ *
+ * IO is paused on the block queue before swap and may be resumed afterwards.
+ */
+static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
+ char *sec_busid)
+{
+ struct dasd_device *primary, *secondary;
+ struct dasd_copy_relation *copy;
+ struct dasd_block *block;
+ struct gendisk *gdp;
+
+ copy = device->copy;
+ if (!copy)
+ return DASD_COPYPAIRSWAP_INVALID;
+ primary = copy->active->device;
+ if (!primary)
+ return DASD_COPYPAIRSWAP_INVALID;
+ /* double check if swap has correct primary */
+ if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
+ return DASD_COPYPAIRSWAP_PRIMARY;
+
+ secondary = copy_relation_find_device(copy, sec_busid);
+ if (!secondary)
+ return DASD_COPYPAIRSWAP_SECONDARY;
+
+ /*
+ * usually the device should be quiesced for swap
+ * for paranoia stop device and requeue requests again
+ */
+ dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
+ dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
+ dasd_generic_requeue_all_requests(primary);
+
+ /* swap DASD internal device <> block assignment */
+ block = primary->block;
+ primary->block = NULL;
+ secondary->block = block;
+ block->base = secondary;
+ /* set new primary device in COPY relation */
+ copy_pair_set_active(copy, sec_busid, prim_busid);
+
+ /* swap blocklayer device link */
+ gdp = block->gdp;
+ dasd_add_link_to_gendisk(gdp, secondary);
+
+ /* re-enable device */
+ dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
+ dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
+ dasd_schedule_device_bh(secondary);
+
+ return DASD_COPYPAIRSWAP_SUCCESS;
+}
+
+/*
+ * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
+ */
+static int dasd_eckd_query_pprc_status(struct dasd_device *device,
+ struct dasd_pprc_data_sc4 *data)
+{
+ struct dasd_pprc_data_sc4 *pprc_data;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ sizeof(*prssdp) + sizeof(*pprc_data) + 1,
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate query PPRC status request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *)cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_PPRCEQ;
+ prssdp->varies[0] = PPRCEQ_SCOPE_4;
+ pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)prssdp;
+
+ /* Read Subsystem Data - query host access */
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*pprc_data);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)pprc_data;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ *data = *pprc_data;
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "PPRC Extended Query failed with rc=%d\n",
+ rc);
+ rc = -EOPNOTSUPP;
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * ECKD NOP - no operation
+ */
+static int dasd_eckd_nop(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate NOP request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 1;
+ cqr->expires = 10 * HZ;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_NOP;
+ ccw->flags |= CCW_FLAG_SLI;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc != 0) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "NOP failed with rc=%d\n", rc);
+ rc = -EOPNOTSUPP;
+ }
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+static int dasd_eckd_device_ping(struct dasd_device *device)
+{
+ return dasd_eckd_nop(device);
+}
+
/*
* Perform Subsystem Function - CUIR response
*/
@@ -6602,7 +6844,7 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
+ struct request_queue *q = block->gdp->queue;
struct dasd_device *device = block->base;
int max;
@@ -6697,6 +6939,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
.ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
.ese_format = dasd_eckd_ese_format,
.ese_read = dasd_eckd_ese_read,
+ .pprc_status = dasd_eckd_query_pprc_status,
+ .pprc_enabled = dasd_eckd_pprc_enabled,
+ .copy_pair_swap = dasd_eckd_copy_pair_swap,
+ .device_ping = dasd_eckd_device_ping,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index a91b265441cc..f9299bd184ba 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -13,6 +13,7 @@
/*****************************************************************************
* SECTION: CCW Definitions
****************************************************************************/
+#define DASD_ECKD_CCW_NOP 0x03
#define DASD_ECKD_CCW_WRITE 0x05
#define DASD_ECKD_CCW_READ 0x06
#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
@@ -66,10 +67,16 @@
* Perform Subsystem Function / Sub-Orders
*/
#define PSF_SUBORDER_QHA 0x1C /* Query Host Access */
+#define PSF_SUBORDER_PPRCEQ 0x50 /* PPRC Extended Query */
#define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */
#define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */
/*
+ * PPRC Extended Query Scopes
+ */
+#define PPRCEQ_SCOPE_4 0x04 /* Scope 4 for PPRC Extended Query */
+
+/*
* CUIR response condition codes
*/
#define PSF_CUIR_INVALID 0x00
@@ -261,7 +268,7 @@ struct dasd_eckd_characteristics {
unsigned char reserved3:8;
unsigned char defect_wr:1;
unsigned char XRC_supported:1;
- unsigned char reserved4:1;
+ unsigned char PPRC_enabled:1;
unsigned char striping:1;
unsigned char reserved5:4;
unsigned char cfw:1;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 60be7f7bf2d1..cddfb01a3dca 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -767,7 +767,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
static void dasd_fba_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
+ struct request_queue *q = block->gdp->queue;
unsigned int max_bytes, max_discard_sectors;
int max;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5a83f0a39901..998a961e1704 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -25,7 +25,14 @@
#include "dasd_int.h"
-static struct lock_class_key dasd_bio_compl_lkclass;
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
/*
* Allocate and register gendisk structure for device.
@@ -41,10 +48,21 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
- gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
- &dasd_bio_compl_lkclass);
- if (!gdp)
- return -ENOMEM;
+ block->tag_set.ops = &dasd_mq_ops;
+ block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+ block->tag_set.nr_hw_queues = nr_hw_queues;
+ block->tag_set.queue_depth = queue_depth;
+ block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ block->tag_set.numa_node = NUMA_NO_NODE;
+ rc = blk_mq_alloc_tag_set(&block->tag_set);
+ if (rc)
+ return rc;
+
+ gdp = blk_mq_alloc_disk(&block->tag_set, block);
+ if (IS_ERR(gdp)) {
+ blk_mq_free_tag_set(&block->tag_set);
+ return PTR_ERR(gdp);
+ }
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
@@ -100,6 +118,7 @@ void dasd_gendisk_free(struct dasd_block *block)
block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
+ blk_mq_free_tag_set(&block->tag_set);
}
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 333a399f754e..97adc8a7ae6b 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -260,6 +260,55 @@ struct dasd_uid {
};
/*
+ * PPRC Status data
+ */
+struct dasd_pprc_header {
+ __u8 entries; /* 0 Number of device entries */
+ __u8 unused; /* 1 unused */
+ __u16 entry_length; /* 2-3 Length of device entry */
+ __u32 unused2; /* 4-7 unused */
+} __packed;
+
+struct dasd_pprc_dev_info {
+ __u8 state; /* 0 Copy State */
+ __u8 flags; /* 1 Flags */
+ __u8 reserved1[2]; /* 2-3 reserved */
+ __u8 prim_lss; /* 4 Primary device LSS */
+ __u8 primary; /* 5 Primary device address */
+ __u8 sec_lss; /* 6 Secondary device LSS */
+ __u8 secondary; /* 7 Secondary device address */
+ __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */
+ __u8 reserved2[12]; /* 10-21 reserved */
+ __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */
+ __u8 reserved3[12]; /* 24-35 reserved */
+ __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */
+ __u8 reserved4[90]; /* 38-127 reserved */
+} __packed;
+
+struct dasd_pprc_data_sc4 {
+ struct dasd_pprc_header header;
+ struct dasd_pprc_dev_info dev_info[5];
+} __packed;
+
+#define DASD_BUS_ID_SIZE 20
+#define DASD_CP_ENTRIES 5
+
+struct dasd_copy_entry {
+ char busid[DASD_BUS_ID_SIZE];
+ struct dasd_device *device;
+ bool primary;
+ bool configured;
+};
+
+struct dasd_copy_relation {
+ struct dasd_copy_entry entry[DASD_CP_ENTRIES];
+ struct dasd_copy_entry *active;
+};
+
+int dasd_devmap_set_device_copy_relation(struct ccw_device *,
+ bool pprc_enabled);
+
+/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
@@ -387,6 +436,10 @@ struct dasd_discipline {
struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
struct dasd_ccw_req *, struct irb *);
int (*ese_read)(struct dasd_ccw_req *, struct irb *);
+ int (*pprc_status)(struct dasd_device *, struct dasd_pprc_data_sc4 *);
+ bool (*pprc_enabled)(struct dasd_device *);
+ int (*copy_pair_swap)(struct dasd_device *, char *, char *);
+ int (*device_ping)(struct dasd_device *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -583,12 +636,12 @@ struct dasd_device {
struct dasd_profile profile;
struct dasd_format_entry format_entry;
struct kset *paths_info;
+ struct dasd_copy_relation *copy;
};
struct dasd_block {
/* Block device stuff. */
struct gendisk *gdp;
- struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
struct block_device *bdev;
@@ -629,6 +682,7 @@ struct dasd_queue {
#define DASD_STOPPED_PENDING 4 /* long busy */
#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
#define DASD_STOPPED_SU 16 /* summary unit check handling */
+#define DASD_STOPPED_PPRC 32 /* PPRC swap */
#define DASD_STOPPED_NOSPC 128 /* no space left */
/* per device flags */
@@ -654,6 +708,22 @@ struct dasd_queue {
void dasd_put_device_wake(struct dasd_device *);
/*
+ * return values to be returned from the copy pair swap function
+ * 0x00: swap successful
+ * 0x01: swap data invalid
+ * 0x02: no active device found
+ * 0x03: wrong primary specified
+ * 0x04: secondary device not found
+ * 0x05: swap already running
+ */
+#define DASD_COPYPAIRSWAP_SUCCESS 0
+#define DASD_COPYPAIRSWAP_INVALID 1
+#define DASD_COPYPAIRSWAP_NOACTIVE 2
+#define DASD_COPYPAIRSWAP_PRIMARY 3
+#define DASD_COPYPAIRSWAP_SECONDARY 4
+#define DASD_COPYPAIRSWAP_MULTIPLE 5
+
+/*
* Reference count inliners
*/
static inline void
@@ -779,6 +849,7 @@ extern debug_info_t *dasd_debug_area;
extern struct dasd_profile dasd_global_profile;
extern unsigned int dasd_global_profile_level;
extern const struct block_device_operations dasd_device_operations;
+extern struct blk_mq_ops dasd_mq_ops;
extern struct kmem_cache *dasd_page_cache;
@@ -837,6 +908,8 @@ int dasd_generic_verify_path(struct dasd_device *, __u8);
void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *);
void dasd_generic_space_avail(struct dasd_device *);
+int dasd_generic_requeue_all_requests(struct dasd_device *);
+
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 95349f95758c..d0ddf2cc9786 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -379,6 +379,56 @@ out_err:
return rc;
}
+/*
+ * Swap driver iternal copy relation.
+ */
+static int
+dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
+{
+ struct dasd_copypair_swap_data_t data;
+ struct dasd_device *device;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!device)
+ return -ENODEV;
+
+ if (copy_from_user(&data, argp, sizeof(struct dasd_copypair_swap_data_t))) {
+ dasd_put_device(device);
+ return -EFAULT;
+ }
+ if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
+ pr_warn("%s: Ivalid swap data specified.\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return DASD_COPYPAIRSWAP_INVALID;
+ }
+ if (bdev_is_partition(bdev)) {
+ pr_warn("%s: The specified DASD is a partition and cannot be swapped\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return DASD_COPYPAIRSWAP_INVALID;
+ }
+ if (!device->copy) {
+ pr_warn("%s: The specified DASD has no copy pair set up\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return -ENODEV;
+ }
+ if (!device->discipline->copy_pair_swap) {
+ dasd_put_device(device);
+ return -EOPNOTSUPP;
+ }
+ rc = device->discipline->copy_pair_swap(device, data.primary,
+ data.secondary);
+ dasd_put_device(device);
+
+ return rc;
+}
+
#ifdef CONFIG_DASD_PROFILE
/*
* Reset device profile information
@@ -637,6 +687,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDRAS:
rc = dasd_ioctl_release_space(bdev, argp);
break;
+ case BIODASDCOPYPAIRSWAP:
+ rc = dasd_ioctl_copy_pair_swap(bdev, argp);
+ break;
default:
/* if the discipline has an ioctl method try it. */
rc = -ENOTTY;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8f1d1cf23d44..59ac98f2bd27 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -2086,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
+ if (!ap_qci_info) /* QCI not supported */
+ return false;
+
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0c40af157df2..0f17933954fb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -148,12 +148,16 @@ struct ap_driver {
/*
* Called at the start of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_config_changed)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
/*
* Called at the end of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_scan_complete)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 6c8c41fac4e1..ee82207b4e60 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -984,6 +984,11 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
+
set_bit_inv(apid, matrix_mdev->matrix.apm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1109,6 +1114,11 @@ static ssize_t unassign_adapter_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
@@ -1183,6 +1193,11 @@ static ssize_t assign_domain_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
+
set_bit_inv(apqi, matrix_mdev->matrix.aqm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1286,6 +1301,11 @@ static ssize_t unassign_domain_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
@@ -1329,6 +1349,11 @@ static ssize_t assign_control_domain_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
+
/* Set the bit in the ADM (bitmask) corresponding to the AP control
* domain number (id). The bits in the mask, from most significant to
* least significant, correspond to IDs 0 up to the one less than the
@@ -1378,6 +1403,11 @@ static ssize_t unassign_control_domain_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv(domid, matrix_mdev->matrix.adm);
if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2d4436cbcb47..9dc935886e9f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1133,7 +1133,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(card->dev, &card->napi, qeth_poll);
return register_netdev(card->dev);
}
@@ -1530,8 +1530,8 @@ static void qeth_addr_change_event(struct qeth_card *card,
else
INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
data->card = card;
- memcpy(&data->ac_event, hostevs,
- sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+ data->ac_event = *hostevs;
+ memcpy(data->ac_event.entry, hostevs->entry, extrasize);
queue_delayed_work(card->event_wq, &data->dwork, 0);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d44bce0477a..d8487a10cd55 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1910,7 +1910,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_set_tso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
- netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(card->dev, &card->napi, qeth_poll);
return register_netdev(card->dev);
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 896896e32664..a10dbe632ef9 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -637,7 +637,6 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 70e401fd432a..c37027276162 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3537,7 +3537,7 @@ static struct attribute *host_v2_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v2_hw);
-static int map_queues_v2_hw(struct Scsi_Host *shost)
+static void map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
@@ -3552,9 +3552,6 @@ static int map_queues_v2_hw(struct Scsi_Host *shost)
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
-
- return 0;
-
}
static struct scsi_host_template sht_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index efe8c5be5870..d716e5632d0f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3171,13 +3171,12 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
return 0;
}
-static int hisi_sas_map_queues(struct Scsi_Host *shost)
+static void hisi_sas_map_queues(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
}
static struct scsi_host_template sht_v3_hw = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 0738238ed6cc..9857dba09c95 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ /*
+ * New SCSI devices cannot be attached anymore because of the SCSI host
+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+ * to zero because .exit_cmd_priv implementations may need the host
+ * pointer.
+ */
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ wait_for_completion(&shost->tagset_freed);
+
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -190,15 +199,6 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
-
- /*
- * After scsi_remove_host() has returned the scsi LLD module can be
- * unloaded and/or the host resources can be released. Hence wait until
- * the dependent SCSI targets and devices are gone before returning.
- */
- wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
-
- scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -254,6 +254,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto fail;
+ kref_init(&shost->tagset_refcnt);
+ init_completion(&shost->tagset_freed);
+
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -309,8 +312,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
- * Any resources associated with the SCSI host in this function except
- * the tag set will be freed by scsi_host_dev_release().
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
@@ -326,7 +329,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail:
return error;
}
@@ -406,7 +409,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- init_waitqueue_head(&shost->targets_wq);
index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9c82e5dc4fcc..a36fa1c128a8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -872,7 +872,8 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
- return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+ return ata_change_queue_depth(dev->sata_dev.ap,
+ sas_to_ata_dev(dev), sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c69c5a0979ec..55a1ad6eed03 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8053,7 +8053,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -8481,6 +8481,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 084c0f9fdc3a..938a5e435943 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4272,7 +4272,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4562,7 +4562,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a3e117a4b8e7..4ae85d590801 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3174,7 +3174,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
-static int megasas_map_queues(struct Scsi_Host *shost)
+static void megasas_map_queues(struct Scsi_Host *shost)
{
struct megasas_instance *instance;
int qoff = 0, offset;
@@ -3183,7 +3183,7 @@ static int megasas_map_queues(struct Scsi_Host *shost)
instance = (struct megasas_instance *)shost->hostdata;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
offset = instance->low_latency_index_start;
@@ -3209,8 +3209,6 @@ static int megasas_map_queues(struct Scsi_Host *shost)
map->queue_offset = qoff;
blk_mq_map_queues(map);
}
-
- return 0;
}
static void megasas_aen_polling(struct work_struct *work);
@@ -7153,22 +7151,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index e48d4261d0bc..09c5fe37754c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -5310,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index bfa1165e23b6..9681c8bf24ed 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3464,7 +3464,7 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
*
* Return: return zero.
*/
-static int mpi3mr_map_queues(struct Scsi_Host *shost)
+static void mpi3mr_map_queues(struct Scsi_Host *shost)
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
int i, qoff, offset;
@@ -3500,9 +3500,6 @@ static int mpi3mr_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
offset += map->nr_queues;
}
-
- return 0;
-
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 565339a0811d..331e896d8225 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= 32)
+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
ioc->dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index def37a7e5980..791a406b4f8e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
ioc->fw_events_cleanup = 0;
}
@@ -11872,7 +11872,7 @@ out:
* scsih_map_queues - map reply queues with request queues
* @shost: SCSI host pointer
*/
-static int scsih_map_queues(struct Scsi_Host *shost)
+static void scsih_map_queues(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc =
(struct MPT3SAS_ADAPTER *)shost->hostdata;
@@ -11882,7 +11882,7 @@ static int scsih_map_queues(struct Scsi_Host *shost)
int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
map = &shost->tag_set.map[i];
@@ -11910,7 +11910,6 @@ static int scsih_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
}
- return 0;
}
/* shost template for SAS 2.0 HBA devices */
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a0028e130a7e..2ff2fac1e403 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -81,7 +81,7 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
-static int pm8001_map_queues(struct Scsi_Host *shost)
+static void pm8001_map_queues(struct Scsi_Host *shost)
{
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3d6b137314f3..bbc4d5890ae6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3686,11 +3686,6 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 7450c3458be7..02fdeb0d31ec 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -684,12 +684,8 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
struct blk_mq_queue_map *map)
{
struct scsi_qla_host *vha = lport->private;
- int rc;
- rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
- if (rc)
- ql_log(ql_log_warn, vha, 0x21de,
- "pci map queue failed 0x%x", rc);
+ blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 0bd0fd1042df..87a93892deac 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -350,7 +350,7 @@ MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
@@ -7994,17 +7994,15 @@ qla_pci_reset_done(struct pci_dev *pdev)
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
-static int qla2xxx_map_queues(struct Scsi_Host *shost)
+static void qla2xxx_map_queues(struct Scsi_Host *shost)
{
- int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
- rc = blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
- return rc;
+ blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2b2f68288375..4acff4e84b90 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
- if (!abort_cmd)
+ if (!abort_cmd) {
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EIO;
+ }
mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
if (abort_cmd->qpair) {
@@ -6935,14 +6937,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 086ec5b5862d..c59eac7a32f2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,13 +586,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- /*
- * Decreasing the module reference count before the device reference
- * count is safe since scsi_remove_host() only returns after all
- * devices have been removed.
- */
- module_put(sdev->host->hostt->module);
+ struct module *mod = sdev->host->hostt->module;
+
put_device(&sdev->sdev_gendev);
+ module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b8a76b89f85a..697fc57bc711 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7474,12 +7474,12 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
-static int sdebug_map_queues(struct Scsi_Host *shost)
+static void sdebug_map_queues(struct Scsi_Host *shost)
{
int i, qoff;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -7501,9 +7501,6 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
}
-
- return 0;
-
}
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 448748e3fba5..786fb963cf3f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2004,9 +2004,11 @@ maybe_retry:
}
}
-static void eh_lock_door_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret eh_lock_door_done(struct request *req,
+ blk_status_t status)
{
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 729e309e6034..2d20da55fb64 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -449,25 +449,9 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
if (ret < 0)
goto out_put_request;
- ret = 0;
- if (hdr->iovec_count && hdr->dxfer_len) {
- struct iov_iter i;
- struct iovec *iov = NULL;
-
- ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
- hdr->iovec_count, 0, &iov, &i);
- if (ret < 0)
- goto out_put_request;
-
- /* SG_IO howto says that the shorter of the two wins */
- iov_iter_truncate(&i, hdr->dxfer_len);
-
- ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
- kfree(iov);
- } else if (hdr->dxfer_len)
- ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
- hdr->dxfer_len, GFP_KERNEL);
-
+ ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL, hdr->iovec_count && hdr->dxfer_len,
+ hdr->iovec_count, 0, rq_data_dir(rq));
if (ret)
goto out_put_request;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4dbd29ab1dcc..d7ec4ab2b111 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -111,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -121,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(rq, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -651,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
return bytes;
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
-{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
-}
-
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
struct request *req = scsi_cmd_to_rq(cmd);
@@ -676,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
return false;
}
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -772,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_TRANSPORT;
- fallthrough;
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -832,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
return;
fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -926,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* command block will be released and the queue function will be goosed. If we
* are not done then we have to figure out what to do next:
*
- * a) We can call scsi_io_completion_reprep(). The request will be
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
* unprepared and put back on the queue. Then a new command will
* be created for it. This should be used if we made forward
* progress, or if we want to switch from READ(10) to READ(6) for
@@ -942,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
@@ -979,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
@@ -1542,7 +1548,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
cmd->eh_eflags = 0;
- cmd->allowed = 0;
cmd->prot_type = 0;
cmd->prot_flags = 0;
cmd->submitter = 0;
@@ -1593,6 +1598,8 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return ret;
}
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
@@ -1849,13 +1856,13 @@ static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int scsi_map_queues(struct blk_mq_tag_set *set)
+static void scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
@@ -1976,9 +1983,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
return blk_mq_alloc_tag_set(tag_set);
}
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
{
+ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+ tagset_refcnt);
+
blk_mq_free_tag_set(&shost->tag_set);
+ complete(&shost->tagset_freed);
}
/**
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 429663bd78ec..f385b3f04d6e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern void scsi_mq_free_tags(struct kref *kref);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ac6059702d13..5d27f5196de6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
+ kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
@@ -406,14 +407,9 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
- struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
-
- if (atomic_dec_return(&shost->target_count) == 0)
- wake_up(&shost->targets_wq);
-
put_device(parent);
}
@@ -526,10 +522,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
- init_waitqueue_head(&starget->sdev_wq);
-
- atomic_inc(&shost->target_count);
-
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9dad2fd5297f..5d61f58399dc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -443,15 +443,18 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct scsi_device *sdev = container_of(work, struct scsi_device,
- ew.work);
- struct scsi_target *starget = sdev->sdev_target;
+ struct scsi_device *sdev;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ struct module *mod;
+
+ sdev = container_of(work, struct scsi_device, ew.work);
+
+ mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
@@ -513,16 +516,19 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
- if (starget && atomic_dec_return(&starget->sdev_count) == 0)
- wake_up(&starget->sdev_wq);
-
if (parent)
put_device(parent);
+ module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+
+ /* Set module pointer as NULL in case of module unloading */
+ if (!try_module_get(sdp->host->hostt->module))
+ sdp->host->hostt->module = NULL;
+
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -1470,6 +1476,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
mutex_unlock(&sdev->state_mutex);
blk_mq_destroy_queue(sdev->request_queue);
+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
@@ -1529,14 +1536,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
-
- /*
- * After scsi_remove_target() returns its caller can remove resources
- * associated with @starget, e.g. an rport or session. Wait until all
- * devices associated with @starget have been removed to prevent that
- * a SCSI error handling callback function triggers a use-after-free.
- */
- wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
@@ -1647,9 +1646,6 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
-
- atomic_inc(&starget->sdev_count);
-
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8f79fa6318fe..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -103,7 +103,6 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
-static void sd_start_done_work(struct work_struct *work);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
@@ -3471,7 +3470,6 @@ static int sd_probe(struct device *dev)
sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
- INIT_WORK(&sdkp->start_done_work, sd_start_done_work);
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
@@ -3594,69 +3592,12 @@ static void scsi_disk_release(struct device *dev)
kfree(sdkp);
}
-/* Process sense data after a START command finished. */
-static void sd_start_done_work(struct work_struct *work)
-{
- struct scsi_disk *sdkp = container_of(work, typeof(*sdkp),
- start_done_work);
- struct scsi_sense_hdr sshdr;
- int res = sdkp->start_result;
-
- if (res == 0)
- return;
-
- sd_print_result(sdkp, "Start/Stop Unit failed", res);
-
- if (res < 0)
- return;
-
- if (scsi_normalize_sense(sdkp->start_sense_buffer,
- sdkp->start_sense_len, &sshdr))
- sd_print_sense_hdr(sdkp, &sshdr);
-}
-
-/* A START command finished. May be called from interrupt context. */
-static void sd_start_done(struct request *req, blk_status_t status)
-{
- const struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
- struct scsi_disk *sdkp = scsi_disk(req->q->disk);
-
- sdkp->start_result = scmd->result;
- WARN_ON_ONCE(scmd->sense_len > SCSI_SENSE_BUFFERSIZE);
- sdkp->start_sense_len = scmd->sense_len;
- memcpy(sdkp->start_sense_buffer, scmd->sense_buffer,
- ARRAY_SIZE(sdkp->start_sense_buffer));
- WARN_ON_ONCE(!schedule_work(&sdkp->start_done_work));
-}
-
-/* Submit a START command asynchronously. */
-static int sd_submit_start(struct scsi_disk *sdkp, u8 cmd[], u8 cmd_len)
-{
- struct scsi_device *sdev = sdkp->device;
- struct request_queue *q = sdev->request_queue;
- struct request *req;
- struct scsi_cmnd *scmd;
-
- req = scsi_alloc_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- scmd = blk_mq_rq_to_pdu(req);
- scmd->cmd_len = cmd_len;
- memcpy(scmd->cmnd, cmd, cmd_len);
- scmd->allowed = sdkp->max_retries;
- req->timeout = SD_TIMEOUT;
- req->rq_flags |= RQF_PM | RQF_QUIET;
- req->end_io = sd_start_done;
- blk_execute_rq_nowait(req, /*at_head=*/true);
-
- return 0;
-}
-
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
+ struct scsi_sense_hdr sshdr;
struct scsi_device *sdp = sdkp->device;
+ int res;
if (start)
cmd[4] |= 1; /* START */
@@ -3667,10 +3608,23 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- /* Wait until processing of sense data has finished. */
- flush_work(&sdkp->start_done_work);
+ res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
+ if (res) {
+ sd_print_result(sdkp, "Start/Stop Unit failed", res);
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
+ sd_print_sense_hdr(sdkp, &sshdr);
+ /* 0x3a is medium not present */
+ if (sshdr.asc == 0x3a)
+ res = 0;
+ }
+ }
- return sd_submit_start(sdkp, cmd, sizeof(cmd));
+ /* SCSI error codes must not go to the generic layer */
+ if (res)
+ return -EIO;
+
+ return 0;
}
/*
@@ -3697,8 +3651,6 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
-
- flush_work(&sdkp->start_done_work);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index b89187761d61..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -150,11 +150,6 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
-
- int start_result;
- u32 start_sense_len;
- u8 start_sense_buffer[SCSI_SENSE_BUFFERSIZE];
- struct work_struct start_done_work;
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 340b050ad28d..ce34a8ad53b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
} Sg_device;
/* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -1311,7 +1311,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void
+static enum rq_end_io_ret
sg_rq_end_io(struct request *rq, blk_status_t status)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -1324,11 +1324,11 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
- return;
+ return RQ_END_IO_NONE;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
- return;
+ return RQ_END_IO_NONE;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
@@ -1406,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
+ return RQ_END_IO_NONE;
}
static const struct file_operations sg_fops = {
@@ -1803,26 +1804,8 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md->from_user = 0;
}
- if (iov_count) {
- struct iovec *iov = NULL;
- struct iov_iter i;
-
- res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
- if (res < 0)
- return res;
-
- iov_iter_truncate(&i, hp->dxfer_len);
- if (!iov_iter_count(&i)) {
- kfree(iov);
- return -EINVAL;
- }
-
- res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
- kfree(iov);
- } else
- res = blk_rq_map_user(q, rq, md, hp->dxferp,
- hp->dxfer_len, GFP_ATOMIC);
-
+ res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
+ GFP_ATOMIC, iov_count, iov_count, 1, rw);
if (!res) {
srp->bio = rq->bio;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7a8c2c75acba..b971fbe3b3a1 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -6436,12 +6436,12 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_map_queues(struct Scsi_Host *shost)
+static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 850172a2b8f1..55e7c07ebe4c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -512,7 +512,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_dec(&STp->stats->in_flight);
}
-static void st_scsi_execute_end(struct request *req, blk_status_t status)
+static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
+ blk_status_t status)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
@@ -532,6 +533,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
blk_rq_unmap_user(tmp);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fe000da11332..8ced292c4b96 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 578c4b6d0f7d..077a8e24bd28 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -711,12 +711,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_map_queues(struct Scsi_Host *shost)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+ blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 2be3afe6c2e3..dd5f2a13ceb5 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -469,6 +469,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
{
const struct meson_ee_pwrc_domain_data *match;
struct regmap *regmap_ao, *regmap_hhi;
+ struct device_node *parent_np;
struct meson_ee_pwrc *pwrc;
int i, ret;
@@ -495,7 +496,9 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
pwrc->xlate.num_domains = match->count;
- regmap_hhi = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ parent_np = of_get_parent(pdev->dev.of_node);
+ regmap_hhi = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_hhi)) {
dev_err(&pdev->dev, "failed to get HHI regmap\n");
return PTR_ERR(regmap_hhi);
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index b4615b288625..312fd9afccb0 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -273,6 +273,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
struct meson_gx_pwrc_vpu *vpu_pd;
+ struct device_node *parent_np;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
@@ -291,7 +292,9 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
- regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ parent_np = of_get_parent(pdev->dev.of_node);
+ regmap_ao = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
return PTR_ERR(regmap_ao);
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index cf1129e9f76b..031ec4aa06d5 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -660,6 +660,12 @@ int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
+int apple_rtkit_poll(struct apple_rtkit *rtk)
+{
+ return mbox_client_peek_data(rtk->mbox_chan);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_poll);
+
int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
{
u64 msg;
diff --git a/drivers/soc/bcm/bcm63xx/Kconfig b/drivers/soc/bcm/bcm63xx/Kconfig
index 9e501c8ac5ce..355c34482076 100644
--- a/drivers/soc/bcm/bcm63xx/Kconfig
+++ b/drivers/soc/bcm/bcm63xx/Kconfig
@@ -13,8 +13,8 @@ endif # SOC_BCM63XX
config BCM_PMB
bool "Broadcom PMB (Power Management Bus) driver"
- depends on ARCH_BCM4908 || (COMPILE_TEST && OF)
- default ARCH_BCM4908
+ depends on ARCH_BCMBCA || (COMPILE_TEST && OF)
+ default ARCH_BCMBCA
select PM_GENERIC_DOMAINS if PM
help
This enables support for the Broadcom's PMB (Power Management Bus) that
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 1467bbd59690..e1d7b4543248 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -288,7 +288,6 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
- of_node_put(np);
return ret;
}
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index d6b30d521307..d681cd24c6e1 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/module.h>
-#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/panic_notifier.h>
@@ -664,7 +663,20 @@ static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
return of_io_request_and_map(dn, index, dn->full_name);
}
-
+/*
+ * The AON is a small domain in the SoC that can retain its state across
+ * various system wide sleep states and specific reset conditions; the
+ * AON DATA RAM is a small RAM of a few words (< 1KB) which can store
+ * persistent information across such events.
+ *
+ * The purpose of the below panic notifier is to help with notifying
+ * the bootloader that a panic occurred and so that it should try its
+ * best to preserve the DRAM contents holding that buffer for recovery
+ * by the kernel as opposed to wiping out DRAM clean again.
+ *
+ * Reference: comment from Florian Fainelli, at
+ * https://lore.kernel.org/lkml/781cafb0-8d06-8b56-907a-5175c2da196a@gmail.com
+ */
static int brcmstb_pm_panic_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -684,13 +696,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct device_node *dn;
void __iomem *base;
- int ret, i;
+ int ret, i, s;
/* AON ctrl registers */
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
if (IS_ERR(base)) {
pr_err("error mapping AON_CTRL\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto aon_err;
}
ctrl.aon_ctrl_base = base;
@@ -700,8 +713,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
/* Assume standard offset */
ctrl.aon_sram = ctrl.aon_ctrl_base +
AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ s = 0;
} else {
ctrl.aon_sram = base;
+ s = 1;
}
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -711,7 +726,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
(const void **)&ddr_phy_data);
if (IS_ERR(base)) {
pr_err("error mapping DDR PHY\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_phy_err;
}
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -731,17 +747,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
+ of_node_put(dn);
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
break;
}
base = of_io_request_and_map(dn, 0, dn->full_name);
if (IS_ERR(base)) {
+ of_node_put(dn);
if (!ctrl.support_warm_boot)
break;
pr_err("error mapping DDR SHIMPHY %d\n", i);
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_shimphy_err;
}
ctrl.memcs[i].ddr_shimphy_base = base;
ctrl.num_memc++;
@@ -752,14 +771,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, brcmstb_memc_of_match) {
base = of_iomap(dn, 0);
if (!base) {
+ of_node_put(dn);
pr_err("error mapping DDR Sequencer %d\n", i);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto brcmstb_memc_err;
}
of_id = of_match_node(brcmstb_memc_of_match, dn);
if (!of_id) {
iounmap(base);
- return -EINVAL;
+ of_node_put(dn);
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ddr_seq_data = of_id->data;
@@ -779,21 +802,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
dn = of_find_matching_node(NULL, sram_dt_ids);
if (!dn) {
pr_err("SRAM not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ret = brcmstb_init_sram(dn);
of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
- return ret;
+ goto brcmstb_memc_err;
}
ctrl.pdev = pdev;
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
- if (!ctrl.s3_params)
- return -ENOMEM;
+ if (!ctrl.s3_params) {
+ ret = -ENOMEM;
+ goto s3_params_err;
+ }
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
sizeof(*ctrl.s3_params),
DMA_TO_DEVICE);
@@ -813,7 +839,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
out:
kfree(ctrl.s3_params);
-
+s3_params_err:
+ iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+ for (i--; i >= 0; i--)
+ iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
+
+ iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+ iounmap(ctrl.aon_ctrl_base);
+ if (s)
+ iounmap(ctrl.aon_sram);
+aon_err:
pr_warn("PM: initialization failed with code %d\n", ret);
return ret;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 07d52cafbb31..fcec6ed83d5e 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select FSL_GUTS
select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index fde4edd83c14..739e4eee6b75 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2483,13 +2483,8 @@ out:
}
EXPORT_SYMBOL(qman_create_cgr);
-int qman_delete_cgr(struct qman_cgr *cgr)
+static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
{
- unsigned long irqflags;
- struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts;
- int ret = 0;
- struct qman_cgr *i;
struct qman_portal *p = get_affine_portal();
if (cgr->chan != p->config->channel) {
@@ -2497,10 +2492,25 @@ int qman_delete_cgr(struct qman_cgr *cgr)
dev_err(p->config->dev, "CGR not owned by current portal");
dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
cgr->chan, p->config->channel);
-
- ret = -EINVAL;
- goto put_portal;
+ put_affine_portal();
+ return NULL;
}
+
+ return p;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
spin_lock_irqsave(&p->cgr_lock, irqflags);
list_del(&cgr->node);
@@ -2528,7 +2538,6 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs);
release_lock:
spin_unlock_irqrestore(&p->cgr_lock, irqflags);
-put_portal:
put_affine_portal();
return ret;
}
@@ -2559,6 +2568,54 @@ void qman_delete_cgr_safe(struct qman_cgr *cgr)
}
EXPORT_SYMBOL(qman_delete_cgr_safe);
+static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ int ret;
+ unsigned long irqflags;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ ret = qm_modify_cgr(cgr, 0, opts);
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+
+struct update_cgr_params {
+ struct qman_cgr *cgr;
+ struct qm_mcc_initcgr *opts;
+ int ret;
+};
+
+static void qman_update_cgr_smp_call(void *p)
+{
+ struct update_cgr_params *params = p;
+
+ params->ret = qman_update_cgr(params->cgr, params->opts);
+}
+
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ struct update_cgr_params params = {
+ .cgr = cgr,
+ .opts = opts,
+ };
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id())
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_update_cgr_smp_call, &params,
+ true);
+ else
+ params.ret = qman_update_cgr(cgr, opts);
+ preempt_enable();
+ return params.ret;
+}
+EXPORT_SYMBOL(qman_update_cgr_safe);
+
/* Cleanup FQs */
static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index a840494e849a..4b906791d6c7 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -20,4 +20,12 @@ config SOC_IMX8M
support, it will provide the SoC info like SoC family,
ID and revision etc.
+config SOC_IMX9
+ tristate "i.MX9 SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ help
+ If you say yes here, you get support for the NXP i.MX9 family
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 63cd29f6d4d2..7b4099ceafd6 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
obj-$(CONFIG_SOC_IMX8M) += imx8mp-blk-ctrl.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o imx93-pd.o
+obj-$(CONFIG_SOC_IMX9) += imx93-blk-ctrl.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 6383a4edc360..88aee59730e3 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -335,6 +335,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
}
+ reset_control_assert(domain->reset);
+
/* Enable reset clocks for all devices in the domain */
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
@@ -342,7 +344,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
goto out_regulator_disable;
}
- reset_control_assert(domain->reset);
+ /* delays for reset to propagate */
+ udelay(5);
if (domain->bits.pxx) {
/* request the domain to power up */
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index dff7529268e4..00879615a701 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -5,6 +5,7 @@
*/
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -37,6 +38,8 @@ struct imx8m_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
int num_clks;
+ const char * const *path_names;
+ int num_paths;
const char *gpc_name;
u32 rst_mask;
u32 clk_mask;
@@ -52,13 +55,16 @@ struct imx8m_blk_ctrl_domain_data {
};
#define DOMAIN_MAX_CLKS 4
+#define DOMAIN_MAX_PATHS 4
struct imx8m_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx8m_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct icc_bulk_data paths[DOMAIN_MAX_PATHS];
struct device *power_dev;
struct imx8m_blk_ctrl *bc;
+ int num_paths;
};
struct imx8m_blk_ctrl_data {
@@ -117,6 +123,10 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
if (data->mipi_phy_rst_mask)
regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
+ ret = icc_bulk_set_bw(domain->num_paths, domain->paths);
+ if (ret)
+ dev_err(bc->dev, "failed to set icc bw\n");
+
/* disable upstream clocks */
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
@@ -152,19 +162,6 @@ static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
return 0;
}
-static struct generic_pm_domain *
-imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
-{
- struct genpd_onecell_data *onecell_data = data;
- unsigned int index = args->args[0];
-
- if (args->args_count != 1 ||
- index >= onecell_data->num_domains)
- return ERR_PTR(-EINVAL);
-
- return onecell_data->domains[index];
-}
-
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
@@ -206,7 +203,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
return -ENOMEM;
bc->onecell_data.num_domains = bc_data->num_domains;
- bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
bc->onecell_data.domains =
devm_kcalloc(dev, bc_data->num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
@@ -224,10 +220,29 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
int j;
domain->data = data;
+ domain->num_paths = data->num_paths;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
+ for (j = 0; j < data->num_paths; j++) {
+ domain->paths[j].name = data->path_names[j];
+ /* Fake value for now, just let ICC could configure NoC mode/priority */
+ domain->paths[j].avg_bw = 1;
+ domain->paths[j].peak_bw = 1;
+ }
+
+ ret = devm_of_icc_bulk_get(dev, data->num_paths, domain->paths);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_warn_once(dev, "Could not get interconnect paths, NoC will stay unconfigured!\n");
+ domain->num_paths = 0;
+ } else {
+ dev_err_probe(dev, ret, "failed to get noc entries\n");
+ goto cleanup_pds;
+ }
+ }
+
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
@@ -243,7 +258,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
- dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
@@ -455,6 +469,46 @@ static const struct imx8m_blk_ctrl_data imx8mm_vpu_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mm_vpu_blk_ctl_domain_data),
};
+static const struct imx8m_blk_ctrl_domain_data imx8mp_vpu_blk_ctl_domain_data[] = {
+ [IMX8MP_VPUBLK_PD_G1] = {
+ .name = "vpublk-g1",
+ .clk_names = (const char *[]){ "g1", },
+ .num_clks = 1,
+ .gpc_name = "g1",
+ .rst_mask = BIT(1),
+ .clk_mask = BIT(1),
+ .path_names = (const char *[]){"g1"},
+ .num_paths = 1,
+ },
+ [IMX8MP_VPUBLK_PD_G2] = {
+ .name = "vpublk-g2",
+ .clk_names = (const char *[]){ "g2", },
+ .num_clks = 1,
+ .gpc_name = "g2",
+ .rst_mask = BIT(0),
+ .clk_mask = BIT(0),
+ .path_names = (const char *[]){"g2"},
+ .num_paths = 1,
+ },
+ [IMX8MP_VPUBLK_PD_VC8000E] = {
+ .name = "vpublk-vc8000e",
+ .clk_names = (const char *[]){ "vc8000e", },
+ .num_clks = 1,
+ .gpc_name = "vc8000e",
+ .rst_mask = BIT(2),
+ .clk_mask = BIT(2),
+ .path_names = (const char *[]){"vc8000e"},
+ .num_paths = 1,
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mp_vpu_blk_ctl_dev_data = {
+ .max_reg = 0x18,
+ .power_notifier_fn = imx8mm_vpu_power_notifier,
+ .domains = imx8mp_vpu_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_vpu_blk_ctl_domain_data),
+};
+
static int imx8mm_disp_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -650,6 +704,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "lcdif1",
.rst_mask = BIT(4) | BIT(5) | BIT(23),
.clk_mask = BIT(4) | BIT(5) | BIT(23),
+ .path_names = (const char *[]){"lcdif-rd", "lcdif-wr"},
+ .num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_ISI] = {
.name = "mediablk-isi",
@@ -658,6 +714,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "isi",
.rst_mask = BIT(6) | BIT(7),
.clk_mask = BIT(6) | BIT(7),
+ .path_names = (const char *[]){"isi0", "isi1", "isi2"},
+ .num_paths = 3,
},
[IMX8MP_MEDIABLK_PD_MIPI_CSI2_2] = {
.name = "mediablk-mipi-csi2-2",
@@ -675,6 +733,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "lcdif2",
.rst_mask = BIT(11) | BIT(12) | BIT(24),
.clk_mask = BIT(11) | BIT(12) | BIT(24),
+ .path_names = (const char *[]){"lcdif-rd", "lcdif-wr"},
+ .num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_ISP] = {
.name = "mediablk-isp",
@@ -683,6 +743,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "isp",
.rst_mask = BIT(16) | BIT(17) | BIT(18),
.clk_mask = BIT(16) | BIT(17) | BIT(18),
+ .path_names = (const char *[]){"isp0", "isp1"},
+ .num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_DWE] = {
.name = "mediablk-dwe",
@@ -691,6 +753,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "dwe",
.rst_mask = BIT(19) | BIT(20) | BIT(21),
.clk_mask = BIT(19) | BIT(20) | BIT(21),
+ .path_names = (const char *[]){"dwe"},
+ .num_paths = 1,
},
[IMX8MP_MEDIABLK_PD_MIPI_DSI_2] = {
.name = "mediablk-mipi-dsi-2",
@@ -789,6 +853,9 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
.compatible = "fsl,imx8mq-vpu-blk-ctrl",
.data = &imx8mq_vpu_blk_ctl_dev_data
}, {
+ .compatible = "fsl,imx8mp-vpu-blk-ctrl",
+ .data = &imx8mp_vpu_blk_ctl_dev_data
+ }, {
/* Sentinel */
}
};
diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
index 4ca2ede6871b..0e3b6ba22f94 100644
--- a/drivers/soc/imx/imx8mp-blk-ctrl.c
+++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -18,6 +19,8 @@
#define GPR_REG0 0x0
#define PCIE_CLOCK_MODULE_EN BIT(0)
#define USB_CLOCK_MODULE_EN BIT(1)
+#define PCIE_PHY_APB_RST BIT(4)
+#define PCIE_PHY_INIT_RST BIT(5)
struct imx8mp_blk_ctrl_domain;
@@ -36,17 +39,22 @@ struct imx8mp_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
int num_clks;
+ const char * const *path_names;
+ int num_paths;
const char *gpc_name;
};
#define DOMAIN_MAX_CLKS 2
+#define DOMAIN_MAX_PATHS 3
struct imx8mp_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx8mp_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct icc_bulk_data paths[DOMAIN_MAX_PATHS];
struct device *power_dev;
struct imx8mp_blk_ctrl *bc;
+ int num_paths;
int id;
};
@@ -75,6 +83,10 @@ static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
case IMX8MP_HSIOBLK_PD_PCIE:
regmap_set_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
break;
+ case IMX8MP_HSIOBLK_PD_PCIE_PHY:
+ regmap_set_bits(bc->regmap, GPR_REG0,
+ PCIE_PHY_APB_RST | PCIE_PHY_INIT_RST);
+ break;
default:
break;
}
@@ -90,6 +102,10 @@ static void imx8mp_hsio_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
case IMX8MP_HSIOBLK_PD_PCIE:
regmap_clear_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
break;
+ case IMX8MP_HSIOBLK_PD_PCIE_PHY:
+ regmap_clear_bits(bc->regmap, GPR_REG0,
+ PCIE_PHY_APB_RST | PCIE_PHY_INIT_RST);
+ break;
default:
break;
}
@@ -144,6 +160,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
.clk_names = (const char *[]){ "usb" },
.num_clks = 1,
.gpc_name = "usb",
+ .path_names = (const char *[]){"usb1", "usb2"},
+ .num_paths = 2,
},
[IMX8MP_HSIOBLK_PD_USB_PHY1] = {
.name = "hsioblk-usb-phy1",
@@ -158,6 +176,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
.clk_names = (const char *[]){ "pcie" },
.num_clks = 1,
.gpc_name = "pcie",
+ .path_names = (const char *[]){"noc-pcie", "pcie"},
+ .num_paths = 2,
},
[IMX8MP_HSIOBLK_PD_PCIE_PHY] = {
.name = "hsioblk-pcie-phy",
@@ -225,6 +245,13 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
break;
+ case IMX8MP_HDMIBLK_PD_HDCP:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(11));
+ break;
+ case IMX8MP_HDMIBLK_PD_HRV:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(3) | BIT(4) | BIT(5));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(15));
+ break;
default:
break;
}
@@ -273,6 +300,13 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
break;
+ case IMX8MP_HDMIBLK_PD_HDCP:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(11));
+ break;
+ case IMX8MP_HDMIBLK_PD_HRV:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(15));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(3) | BIT(4) | BIT(5));
+ break;
default:
break;
}
@@ -322,6 +356,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "lcdif",
+ .path_names = (const char *[]){"lcdif-hdmi"},
+ .num_paths = 1,
},
[IMX8MP_HDMIBLK_PD_PAI] = {
.name = "hdmiblk-pai",
@@ -353,6 +389,22 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
.num_clks = 2,
.gpc_name = "hdmi-tx-phy",
},
+ [IMX8MP_HDMIBLK_PD_HRV] = {
+ .name = "hdmiblk-hrv",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "hrv",
+ .path_names = (const char *[]){"hrv"},
+ .num_paths = 1,
+ },
+ [IMX8MP_HDMIBLK_PD_HDCP] = {
+ .name = "hdmiblk-hdcp",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "hdcp",
+ .path_names = (const char *[]){"hdcp"},
+ .num_paths = 1,
+ },
};
static const struct imx8mp_blk_ctrl_data imx8mp_hdmi_blk_ctl_dev_data = {
@@ -395,6 +447,10 @@ static int imx8mp_blk_ctrl_power_on(struct generic_pm_domain *genpd)
goto clk_disable;
}
+ ret = icc_bulk_set_bw(domain->num_paths, domain->paths);
+ if (ret)
+ dev_err(bc->dev, "failed to set icc bw\n");
+
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
return 0;
@@ -434,19 +490,6 @@ static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd)
return 0;
}
-static struct generic_pm_domain *
-imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
-{
- struct genpd_onecell_data *onecell_data = data;
- unsigned int index = args->args[0];
-
- if (args->args_count != 1 ||
- index >= onecell_data->num_domains)
- return ERR_PTR(-EINVAL);
-
- return onecell_data->domains[index];
-}
-
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
@@ -489,7 +532,6 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
return -ENOMEM;
bc->onecell_data.num_domains = num_domains;
- bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
bc->onecell_data.domains =
devm_kcalloc(dev, num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
@@ -510,10 +552,29 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
int j;
domain->data = data;
+ domain->num_paths = data->num_paths;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
+ for (j = 0; j < data->num_paths; j++) {
+ domain->paths[j].name = data->path_names[j];
+ /* Fake value for now, just let ICC could configure NoC mode/priority */
+ domain->paths[j].avg_bw = 1;
+ domain->paths[j].peak_bw = 1;
+ }
+
+ ret = devm_of_icc_bulk_get(dev, data->num_paths, domain->paths);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_warn_once(dev, "Could not get interconnect paths, NoC will stay unconfigured!\n");
+ domain->num_paths = 0;
+ } else {
+ dev_err_probe(dev, ret, "failed to get noc entries\n");
+ goto cleanup_pds;
+ }
+ }
+
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
diff --git a/drivers/soc/imx/imx93-blk-ctrl.c b/drivers/soc/imx/imx93-blk-ctrl.c
new file mode 100644
index 000000000000..2c600329436c
--- /dev/null
+++ b/drivers/soc/imx/imx93-blk-ctrl.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+#include <dt-bindings/power/fsl,imx93-power.h>
+
+#define BLK_SFT_RSTN 0x0
+#define BLK_CLK_EN 0x4
+#define BLK_MAX_CLKS 4
+
+#define DOMAIN_MAX_CLKS 4
+
+#define LCDIF_QOS_REG 0xC
+#define LCDIF_DEFAULT_QOS_OFF 12
+#define LCDIF_CFG_QOS_OFF 8
+
+#define PXP_QOS_REG 0x10
+#define PXP_R_DEFAULT_QOS_OFF 28
+#define PXP_R_CFG_QOS_OFF 24
+#define PXP_W_DEFAULT_QOS_OFF 20
+#define PXP_W_CFG_QOS_OFF 16
+
+#define ISI_CACHE_REG 0x14
+
+#define ISI_QOS_REG 0x1C
+#define ISI_V_DEFAULT_QOS_OFF 28
+#define ISI_V_CFG_QOS_OFF 24
+#define ISI_U_DEFAULT_QOS_OFF 20
+#define ISI_U_CFG_QOS_OFF 16
+#define ISI_Y_R_DEFAULT_QOS_OFF 12
+#define ISI_Y_R_CFG_QOS_OFF 8
+#define ISI_Y_W_DEFAULT_QOS_OFF 4
+#define ISI_Y_W_CFG_QOS_OFF 0
+
+#define PRIO_MASK 0xF
+
+#define PRIO(X) (X)
+
+struct imx93_blk_ctrl_domain;
+
+struct imx93_blk_ctrl {
+ struct device *dev;
+ struct regmap *regmap;
+ int num_clks;
+ struct clk_bulk_data clks[BLK_MAX_CLKS];
+ struct imx93_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+};
+
+#define DOMAIN_MAX_QOS 4
+
+struct imx93_blk_ctrl_qos {
+ u32 reg;
+ u32 cfg_off;
+ u32 default_prio;
+ u32 cfg_prio;
+};
+
+struct imx93_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ u32 rst_mask;
+ u32 clk_mask;
+ int num_qos;
+ struct imx93_blk_ctrl_qos qos[DOMAIN_MAX_QOS];
+};
+
+struct imx93_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx93_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct imx93_blk_ctrl *bc;
+};
+
+struct imx93_blk_ctrl_data {
+ const struct imx93_blk_ctrl_domain_data *domains;
+ int num_domains;
+ const char * const *clk_names;
+ int num_clks;
+ const struct regmap_access_table *reg_access_table;
+};
+
+static inline struct imx93_blk_ctrl_domain *
+to_imx93_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx93_blk_ctrl_domain, genpd);
+}
+
+static int imx93_blk_ctrl_set_qos(struct imx93_blk_ctrl_domain *domain)
+{
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+ const struct imx93_blk_ctrl_qos *qos;
+ u32 val, mask;
+ int i;
+
+ for (i = 0; i < data->num_qos; i++) {
+ qos = &data->qos[i];
+
+ mask = PRIO_MASK << qos->cfg_off;
+ mask |= PRIO_MASK << (qos->cfg_off + 4);
+ val = qos->cfg_prio << qos->cfg_off;
+ val |= qos->default_prio << (qos->cfg_off + 4);
+
+ regmap_write_bits(bc->regmap, qos->reg, mask, val);
+
+ dev_dbg(bc->dev, "data->qos[i].reg 0x%x 0x%x\n", qos->reg, val);
+ }
+
+ return 0;
+}
+
+static int imx93_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(bc->num_clks, bc->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable bus clocks\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+ dev_err(bc->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(bc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->dev);
+ dev_err(bc->dev, "failed to power up domain\n");
+ goto disable_clk;
+ }
+
+ /* ungate clk */
+ regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* release reset */
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ dev_dbg(bc->dev, "pd_on: name: %s\n", genpd->name);
+
+ return imx93_blk_ctrl_set_qos(domain);
+
+disable_clk:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+
+ return ret;
+}
+
+static int imx93_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+
+ dev_dbg(bc->dev, "pd_off: name: %s\n", genpd->name);
+
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ pm_runtime_put(bc->dev);
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+
+ return 0;
+}
+
+static int imx93_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct imx93_blk_ctrl_data *bc_data = of_device_get_match_data(dev);
+ struct imx93_blk_ctrl *bc;
+ void __iomem *base;
+ int i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = bc_data->reg_access_table,
+ .wr_table = bc_data->reg_access_table,
+ .max_register = SZ_4K,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ bc->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct imx93_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains)
+ return -ENOMEM;
+
+ bc->onecell_data.num_domains = bc_data->num_domains;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains)
+ return -ENOMEM;
+
+ for (i = 0; i < bc_data->num_clks; i++)
+ bc->clks[i].id = bc_data->clk_names[i];
+ bc->num_clks = bc_data->num_clks;
+
+ ret = devm_clk_bulk_get(dev, bc->num_clks, bc->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get bus clock\n");
+ return ret;
+ }
+
+ for (i = 0; i < bc_data->num_domains; i++) {
+ const struct imx93_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx93_blk_ctrl_power_on;
+ domain->genpd.power_off = imx93_blk_ctrl_power_off;
+ domain->bc = bc;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ goto cleanup_pds;
+ }
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_pds:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(&bc->domains[i].genpd);
+
+ return ret;
+}
+
+static int imx93_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx93_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ }
+
+ return 0;
+}
+
+static const struct imx93_blk_ctrl_domain_data imx93_media_blk_ctl_domain_data[] = {
+ [IMX93_MEDIABLK_PD_MIPI_DSI] = {
+ .name = "mediablk-mipi-dsi",
+ .clk_names = (const char *[]){ "dsi" },
+ .num_clks = 1,
+ .rst_mask = BIT(11) | BIT(12),
+ .clk_mask = BIT(11) | BIT(12),
+ },
+ [IMX93_MEDIABLK_PD_MIPI_CSI] = {
+ .name = "mediablk-mipi-csi",
+ .clk_names = (const char *[]){ "cam", "csi" },
+ .num_clks = 2,
+ .rst_mask = BIT(9) | BIT(10),
+ .clk_mask = BIT(9) | BIT(10),
+ },
+ [IMX93_MEDIABLK_PD_PXP] = {
+ .name = "mediablk-pxp",
+ .clk_names = (const char *[]){ "pxp" },
+ .num_clks = 1,
+ .rst_mask = BIT(7) | BIT(8),
+ .clk_mask = BIT(7) | BIT(8),
+ .num_qos = 2,
+ .qos = {
+ {
+ .reg = PXP_QOS_REG,
+ .cfg_off = PXP_R_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(6),
+ }, {
+ .reg = PXP_QOS_REG,
+ .cfg_off = PXP_W_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(6),
+ }
+ }
+ },
+ [IMX93_MEDIABLK_PD_LCDIF] = {
+ .name = "mediablk-lcdif",
+ .clk_names = (const char *[]){ "disp", "lcdif" },
+ .num_clks = 2,
+ .rst_mask = BIT(4) | BIT(5) | BIT(6),
+ .clk_mask = BIT(4) | BIT(5) | BIT(6),
+ .num_qos = 1,
+ .qos = {
+ {
+ .reg = LCDIF_QOS_REG,
+ .cfg_off = LCDIF_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }
+ }
+ },
+ [IMX93_MEDIABLK_PD_ISI] = {
+ .name = "mediablk-isi",
+ .clk_names = (const char *[]){ "isi" },
+ .num_clks = 1,
+ .rst_mask = BIT(2) | BIT(3),
+ .clk_mask = BIT(2) | BIT(3),
+ .num_qos = 4,
+ .qos = {
+ {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_Y_W_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_Y_R_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_U_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_V_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }
+ }
+ },
+};
+
+static const struct regmap_range imx93_media_blk_ctl_yes_ranges[] = {
+ regmap_reg_range(BLK_SFT_RSTN, BLK_CLK_EN),
+ regmap_reg_range(LCDIF_QOS_REG, ISI_CACHE_REG),
+ regmap_reg_range(ISI_QOS_REG, ISI_QOS_REG),
+};
+
+static const struct regmap_access_table imx93_media_blk_ctl_access_table = {
+ .yes_ranges = imx93_media_blk_ctl_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx93_media_blk_ctl_yes_ranges),
+};
+
+static const struct imx93_blk_ctrl_data imx93_media_blk_ctl_dev_data = {
+ .domains = imx93_media_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx93_media_blk_ctl_domain_data),
+ .clk_names = (const char *[]){ "axi", "apb", "nic", },
+ .num_clks = 3,
+ .reg_access_table = &imx93_media_blk_ctl_access_table,
+};
+
+static const struct of_device_id imx93_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx93-media-blk-ctrl",
+ .data = &imx93_media_blk_ctl_dev_data
+ }, {
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx93_blk_ctrl_of_match);
+
+static struct platform_driver imx93_blk_ctrl_driver = {
+ .probe = imx93_blk_ctrl_probe,
+ .remove = imx93_blk_ctrl_remove,
+ .driver = {
+ .name = "imx93-blk-ctrl",
+ .of_match_table = imx93_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx93_blk_ctrl_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("i.MX93 BLK CTRL driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c
new file mode 100644
index 000000000000..1f3d7039c1de
--- /dev/null
+++ b/drivers/soc/imx/imx93-pd.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+#define MIX_SLICE_SW_CTRL_OFF 0x20
+#define SLICE_SW_CTRL_PSW_CTRL_OFF_MASK BIT(4)
+#define SLICE_SW_CTRL_PDN_SOFT_MASK BIT(31)
+
+#define MIX_FUNC_STAT_OFF 0xB4
+
+#define FUNC_STAT_PSW_STAT_MASK BIT(0)
+#define FUNC_STAT_RST_STAT_MASK BIT(2)
+#define FUNC_STAT_ISO_STAT_MASK BIT(4)
+
+struct imx93_power_domain {
+ struct generic_pm_domain genpd;
+ struct device *dev;
+ void __iomem *addr;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ bool init_off;
+};
+
+#define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd)
+
+static int imx93_pd_on(struct generic_pm_domain *genpd)
+{
+ struct imx93_power_domain *domain = to_imx93_pd(genpd);
+ void __iomem *addr = domain->addr;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable clocks for domain: %s\n", genpd->name);
+ return ret;
+ }
+
+ val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
+ val &= ~SLICE_SW_CTRL_PDN_SOFT_MASK;
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+ !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000);
+ if (ret) {
+ dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx93_pd_off(struct generic_pm_domain *genpd)
+{
+ struct imx93_power_domain *domain = to_imx93_pd(genpd);
+ void __iomem *addr = domain->addr;
+ int ret;
+ u32 val;
+
+ /* Power off MIX */
+ val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
+ val |= SLICE_SW_CTRL_PDN_SOFT_MASK;
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+ val & FUNC_STAT_PSW_STAT_MASK, 1, 1000);
+ if (ret) {
+ dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+ }
+
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ return 0;
+};
+
+static int imx93_pd_remove(struct platform_device *pdev)
+{
+ struct imx93_power_domain *domain = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!domain->init_off)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ of_genpd_del_provider(np);
+ pm_genpd_remove(&domain->genpd);
+
+ return 0;
+}
+
+static int imx93_pd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx93_power_domain *domain;
+ int ret;
+
+ domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return -ENOMEM;
+
+ domain->addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(domain->addr))
+ return PTR_ERR(domain->addr);
+
+ domain->num_clks = devm_clk_bulk_get_all(dev, &domain->clks);
+ if (domain->num_clks < 0)
+ return dev_err_probe(dev, domain->num_clks, "Failed to get domain's clocks\n");
+
+ domain->genpd.name = dev_name(dev);
+ domain->genpd.power_off = imx93_pd_off;
+ domain->genpd.power_on = imx93_pd_on;
+ domain->dev = dev;
+
+ domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
+ /* Just to sync the status of hardware */
+ if (!domain->init_off) {
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable clocks for domain: %s\n",
+ domain->genpd.name);
+ return ret;
+ }
+ }
+
+ ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, domain);
+
+ return of_genpd_add_provider_simple(np, &domain->genpd);
+}
+
+static const struct of_device_id imx93_pd_ids[] = {
+ { .compatible = "fsl,imx93-src-slice" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx93_pd_ids);
+
+static struct platform_driver imx93_power_domain_driver = {
+ .driver = {
+ .name = "imx93_power_domain",
+ .owner = THIS_MODULE,
+ .of_match_table = imx93_pd_ids,
+ },
+ .probe = imx93_pd_probe,
+ .remove = imx93_pd_remove,
+};
+module_platform_driver(imx93_power_domain_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX93 power domain driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/imx93-src.c b/drivers/soc/imx/imx93-src.c
new file mode 100644
index 000000000000..4d74921cae0f
--- /dev/null
+++ b/drivers/soc/imx/imx93-src.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+static int imx93_src_probe(struct platform_device *pdev)
+{
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id imx93_src_ids[] = {
+ { .compatible = "fsl,imx93-src" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx93_src_ids);
+
+static struct platform_driver imx93_src_driver = {
+ .driver = {
+ .name = "imx93_src",
+ .owner = THIS_MODULE,
+ .of_match_table = imx93_src_ids,
+ },
+ .probe = imx93_src_probe,
+};
+module_platform_driver(imx93_src_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX93 src driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 3c3eedea35f7..73e63920b1b9 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -37,6 +37,7 @@ config MTK_INFRACFG
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
depends on RESET_CONTROLLER
+ depends on OF
select REGMAP
help
Say yes here to add support for MediaTek PMIC Wrapper found
@@ -46,6 +47,7 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
default ARCH_MEDIATEK
+ depends on OF
select REGMAP
select MTK_INFRACFG
select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index eb1ad9c37a9c..09b1ccbc0093 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -3,6 +3,12 @@
#ifndef __SOC_MEDIATEK_MT8186_MMSYS_H
#define __SOC_MEDIATEK_MT8186_MMSYS_H
+/* Values for DPI configuration in MMSYS address space */
+#define MT8186_MMSYS_DPI_OUTPUT_FORMAT 0x400
+#define DPI_FORMAT_MASK 0x1
+#define DPI_RGB888_DDR_CON BIT(0)
+#define DPI_RGB565_SDR_CON BIT(1)
+
#define MT8186_MMSYS_OVL_CON 0xF04
#define MT8186_MMSYS_OVL0_CON_MASK 0x3
#define MT8186_MMSYS_OVL0_2L_CON_MASK 0xC
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index 06d8e83a2cb5..d2c7a87aab87 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -227,6 +227,26 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(mmsys->regs + offset);
+ tmp = (tmp & ~mask) | val;
+ writel_relaxed(tmp, mmsys->regs + offset);
+}
+
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val)
+{
+ if (val)
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ DPI_RGB888_DDR_CON, DPI_FORMAT_MASK);
+ else
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ DPI_RGB565_SDR_CON, DPI_FORMAT_MASK);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_dpi_fmt_config);
+
static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned long id,
bool assert)
{
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 5ea43de4e410..c1a33d52038e 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -91,6 +91,15 @@
#define MT8183_MUTEX_MOD_MDP_AAL0 23
#define MT8183_MUTEX_MOD_MDP_CCORR0 24
+#define MT8186_MUTEX_MOD_MDP_RDMA0 0
+#define MT8186_MUTEX_MOD_MDP_AAL0 2
+#define MT8186_MUTEX_MOD_MDP_HDR0 4
+#define MT8186_MUTEX_MOD_MDP_RSZ0 5
+#define MT8186_MUTEX_MOD_MDP_RSZ1 6
+#define MT8186_MUTEX_MOD_MDP_WROT0 7
+#define MT8186_MUTEX_MOD_MDP_TDSHP0 9
+#define MT8186_MUTEX_MOD_MDP_COLOR0 14
+
#define MT8173_MUTEX_MOD_DISP_OVL0 11
#define MT8173_MUTEX_MOD_DISP_OVL1 12
#define MT8173_MUTEX_MOD_DISP_RDMA0 13
@@ -324,6 +333,17 @@ static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1,
};
+static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8186_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8186_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8186_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8186_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8186_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_HDR0] = MT8186_MUTEX_MOD_MDP_HDR0,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8186_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0,
+};
+
static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
@@ -380,6 +400,13 @@ static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
};
+static const unsigned int mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+};
+
static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -434,6 +461,13 @@ static const struct mtk_mutex_data mt2712_mutex_driver_data = {
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt6795_mutex_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt6795_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+};
+
static const struct mtk_mutex_data mt8167_mutex_driver_data = {
.mutex_mod = mt8167_mutex_mod,
.mutex_sof = mt8167_mutex_sof,
@@ -458,6 +492,12 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.no_clk = true,
};
+static const struct mtk_mutex_data mt8186_mdp_mutex_driver_data = {
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8186_mdp_mutex_table_mod,
+};
+
static const struct mtk_mutex_data mt8186_mutex_driver_data = {
.mutex_mod = mt8186_mutex_mod,
.mutex_sof = mt8186_mutex_sof,
@@ -802,6 +842,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt2701_mutex_driver_data},
{ .compatible = "mediatek,mt2712-disp-mutex",
.data = &mt2712_mutex_driver_data},
+ { .compatible = "mediatek,mt6795-disp-mutex",
+ .data = &mt6795_mutex_driver_data},
{ .compatible = "mediatek,mt8167-disp-mutex",
.data = &mt8167_mutex_driver_data},
{ .compatible = "mediatek,mt8173-disp-mutex",
@@ -810,6 +852,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8183_mutex_driver_data},
{ .compatible = "mediatek,mt8186-disp-mutex",
.data = &mt8186_mutex_driver_data},
+ { .compatible = "mediatek,mt8186-mdp3-mutex",
+ .data = &mt8186_mdp_mutex_driver_data},
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = &mt8192_mutex_driver_data},
{ .compatible = "mediatek,mt8195-disp-mutex",
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index 9734f1091c69..09e3c38b8466 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -393,7 +393,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(scpsys->dev, ret,
- "%pOF: failed to get clk at index %d: %d\n", node, i, ret);
+ "%pOF: failed to get clk at index %d\n", node, i);
goto err_put_clocks;
}
@@ -405,8 +405,8 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(scpsys->dev, ret,
- "%pOF: failed to get clk at index %d: %d\n", node,
- i + clk_ind, ret);
+ "%pOF: failed to get clk at index %d\n", node,
+ i + clk_ind);
goto err_put_subsys_clocks;
}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index d8cb0f833645..eb82ae06697f 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -2316,7 +2316,7 @@ err_out1:
static struct platform_driver pwrap_drv = {
.driver = {
.name = "mt-pmic-pwrap",
- .of_match_table = of_match_ptr(of_pwrap_match_tbl),
+ .of_match_table = of_pwrap_match_tbl,
},
.probe = pwrap_probe,
};
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index ca75b14931ec..7a668888111c 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -1141,7 +1141,7 @@ static struct platform_driver scpsys_drv = {
.name = "mtk-scpsys",
.suppress_bind_attrs = true,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_scpsys_match_tbl),
+ .of_match_table = of_scpsys_match_tbl,
},
};
builtin_platform_driver(scpsys_drv);
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index dee8664a12fd..0469c9dfeb04 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022 MediaTek Inc.
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
@@ -53,22 +54,79 @@
#define SVSB_MON_VOLT_IGNORE BIT(16)
#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24)
-/* svs bank register common configuration */
-#define SVSB_DET_MAX 0xffff
+/* svs bank register fields and common configuration */
+#define SVSB_PTPCONFIG_DETMAX GENMASK(15, 0)
+#define SVSB_DET_MAX FIELD_PREP(SVSB_PTPCONFIG_DETMAX, 0xffff)
#define SVSB_DET_WINDOW 0xa28
-#define SVSB_DTHI 0x1
-#define SVSB_DTLO 0xfe
-#define SVSB_EN_INIT01 0x1
-#define SVSB_EN_INIT02 0x5
-#define SVSB_EN_MON 0x2
-#define SVSB_EN_OFF 0x0
-#define SVSB_INTEN_INIT0x 0x00005f01
-#define SVSB_INTEN_MONVOPEN 0x00ff0000
-#define SVSB_INTSTS_CLEAN 0x00ffffff
-#define SVSB_INTSTS_COMPLETE 0x1
-#define SVSB_INTSTS_MONVOP 0x00ff0000
+
+/* DESCHAR */
+#define SVSB_DESCHAR_FLD_MDES GENMASK(7, 0)
+#define SVSB_DESCHAR_FLD_BDES GENMASK(15, 8)
+
+/* TEMPCHAR */
+#define SVSB_TEMPCHAR_FLD_DVT_FIXED GENMASK(7, 0)
+#define SVSB_TEMPCHAR_FLD_MTDES GENMASK(15, 8)
+#define SVSB_TEMPCHAR_FLD_VCO GENMASK(23, 16)
+
+/* DETCHAR */
+#define SVSB_DETCHAR_FLD_DCMDET GENMASK(7, 0)
+#define SVSB_DETCHAR_FLD_DCBDET GENMASK(15, 8)
+
+/* SVSEN (PTPEN) */
+#define SVSB_PTPEN_INIT01 BIT(0)
+#define SVSB_PTPEN_MON BIT(1)
+#define SVSB_PTPEN_INIT02 (SVSB_PTPEN_INIT01 | BIT(2))
+#define SVSB_PTPEN_OFF 0x0
+
+/* FREQPCTS */
+#define SVSB_FREQPCTS_FLD_PCT0_4 GENMASK(7, 0)
+#define SVSB_FREQPCTS_FLD_PCT1_5 GENMASK(15, 8)
+#define SVSB_FREQPCTS_FLD_PCT2_6 GENMASK(23, 16)
+#define SVSB_FREQPCTS_FLD_PCT3_7 GENMASK(31, 24)
+
+/* INTSTS */
+#define SVSB_INTSTS_VAL_CLEAN 0x00ffffff
+#define SVSB_INTSTS_F0_COMPLETE BIT(0)
+#define SVSB_INTSTS_FLD_MONVOP GENMASK(23, 16)
#define SVSB_RUNCONFIG_DEFAULT 0x80000000
+/* LIMITVALS */
+#define SVSB_LIMITVALS_FLD_DTLO GENMASK(7, 0)
+#define SVSB_LIMITVALS_FLD_DTHI GENMASK(15, 8)
+#define SVSB_LIMITVALS_FLD_VMIN GENMASK(23, 16)
+#define SVSB_LIMITVALS_FLD_VMAX GENMASK(31, 24)
+#define SVSB_VAL_DTHI 0x1
+#define SVSB_VAL_DTLO 0xfe
+
+/* INTEN */
+#define SVSB_INTEN_F0EN BIT(0)
+#define SVSB_INTEN_DACK0UPEN BIT(8)
+#define SVSB_INTEN_DC0EN BIT(9)
+#define SVSB_INTEN_DC1EN BIT(10)
+#define SVSB_INTEN_DACK0LOEN BIT(11)
+#define SVSB_INTEN_INITPROD_OVF_EN BIT(12)
+#define SVSB_INTEN_INITSUM_OVF_EN BIT(14)
+#define SVSB_INTEN_MONVOPEN GENMASK(23, 16)
+#define SVSB_INTEN_INIT0x (SVSB_INTEN_F0EN | SVSB_INTEN_DACK0UPEN | \
+ SVSB_INTEN_DC0EN | SVSB_INTEN_DC1EN | \
+ SVSB_INTEN_DACK0LOEN | \
+ SVSB_INTEN_INITPROD_OVF_EN | \
+ SVSB_INTEN_INITSUM_OVF_EN)
+
+/* TSCALCS */
+#define SVSB_TSCALCS_FLD_MTS GENMASK(11, 0)
+#define SVSB_TSCALCS_FLD_BTS GENMASK(23, 12)
+
+/* INIT2VALS */
+#define SVSB_INIT2VALS_FLD_DCVOFFSETIN GENMASK(15, 0)
+#define SVSB_INIT2VALS_FLD_AGEVOFFSETIN GENMASK(31, 16)
+
+/* VOPS */
+#define SVSB_VOPS_FLD_VOP0_4 GENMASK(7, 0)
+#define SVSB_VOPS_FLD_VOP1_5 GENMASK(15, 8)
+#define SVSB_VOPS_FLD_VOP2_6 GENMASK(23, 16)
+#define SVSB_VOPS_FLD_VOP3_7 GENMASK(31, 24)
+
/* svs bank related setting */
#define BITS8 8
#define MAX_OPP_ENTRIES 16
@@ -262,7 +320,6 @@ static const u32 svs_regs_v2[] = {
* @rst: svs platform reset control
* @efuse_parsing: svs platform efuse parsing function pointer
* @probe: svs platform probe function pointer
- * @irqflags: svs platform irq settings flags
* @efuse_max: total number of svs efuse
* @tefuse_max: total number of thermal efuse
* @regs: svs platform registers map
@@ -280,7 +337,6 @@ struct svs_platform {
struct reset_control *rst;
bool (*efuse_parsing)(struct svs_platform *svsp);
int (*probe)(struct svs_platform *svsp);
- unsigned long irqflags;
size_t efuse_max;
size_t tefuse_max;
const u32 *regs;
@@ -294,7 +350,6 @@ struct svs_platform_data {
struct svs_bank *banks;
bool (*efuse_parsing)(struct svs_platform *svsp);
int (*probe)(struct svs_platform *svsp);
- unsigned long irqflags;
const u32 *regs;
u32 bank_max;
};
@@ -668,8 +723,8 @@ static ssize_t svs_enable_debug_write(struct file *filp,
svsp->pbank = svsb;
svsb->mode_support = SVSB_MODE_ALL_DISABLE;
svs_switch_bank(svsp);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
spin_unlock_irqrestore(&svs_lock, flags);
svsb->phase = SVSB_PHASE_ERROR;
@@ -830,7 +885,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
} else if (svsb->type == SVSB_LOW) {
/* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
j = svsb->opp_count - 7;
- svsb->volt[turn_pt] = vop30 & GENMASK(7, 0);
+ svsb->volt[turn_pt] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
shift_byte++;
for (i = j; i < svsb->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
@@ -852,7 +907,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
if (svsb->type == SVSB_HIGH) {
/* volt[0] + volt[j] ~ volt[turn_pt - 1] */
j = turn_pt - 7;
- svsb->volt[0] = vop30 & GENMASK(7, 0);
+ svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
shift_byte++;
for (i = j; i < turn_pt; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
@@ -983,16 +1038,16 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
u32 temp, i;
temp = svs_readl_relaxed(svsp, VOP74);
- svsb->volt[14] = (temp >> 24) & GENMASK(7, 0);
- svsb->volt[12] = (temp >> 16) & GENMASK(7, 0);
- svsb->volt[10] = (temp >> 8) & GENMASK(7, 0);
- svsb->volt[8] = (temp & GENMASK(7, 0));
+ svsb->volt[14] = FIELD_GET(SVSB_VOPS_FLD_VOP3_7, temp);
+ svsb->volt[12] = FIELD_GET(SVSB_VOPS_FLD_VOP2_6, temp);
+ svsb->volt[10] = FIELD_GET(SVSB_VOPS_FLD_VOP1_5, temp);
+ svsb->volt[8] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, temp);
temp = svs_readl_relaxed(svsp, VOP30);
- svsb->volt[6] = (temp >> 24) & GENMASK(7, 0);
- svsb->volt[4] = (temp >> 16) & GENMASK(7, 0);
- svsb->volt[2] = (temp >> 8) & GENMASK(7, 0);
- svsb->volt[0] = (temp & GENMASK(7, 0));
+ svsb->volt[6] = FIELD_GET(SVSB_VOPS_FLD_VOP3_7, temp);
+ svsb->volt[4] = FIELD_GET(SVSB_VOPS_FLD_VOP2_6, temp);
+ svsb->volt[2] = FIELD_GET(SVSB_VOPS_FLD_VOP1_5, temp);
+ svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, temp);
for (i = 0; i <= 12; i += 2)
svsb->volt[i + 1] = interpolate(svsb->freq_pct[i],
@@ -1014,20 +1069,20 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
{
struct svs_bank *svsb = svsp->pbank;
+ u32 freqpct74_val, freqpct30_val;
+
+ freqpct74_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[8]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT1_5, svsb->freq_pct[10]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT2_6, svsb->freq_pct[12]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT3_7, svsb->freq_pct[14]);
- svs_writel_relaxed(svsp,
- (svsb->freq_pct[14] << 24) |
- (svsb->freq_pct[12] << 16) |
- (svsb->freq_pct[10] << 8) |
- svsb->freq_pct[8],
- FREQPCT74);
-
- svs_writel_relaxed(svsp,
- (svsb->freq_pct[6] << 24) |
- (svsb->freq_pct[4] << 16) |
- (svsb->freq_pct[2] << 8) |
- svsb->freq_pct[0],
- FREQPCT30);
+ freqpct30_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[0]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT1_5, svsb->freq_pct[2]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT2_6, svsb->freq_pct[4]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT3_7, svsb->freq_pct[6]);
+
+ svs_writel_relaxed(svsp, freqpct74_val, FREQPCT74);
+ svs_writel_relaxed(svsp, freqpct30_val, FREQPCT30);
}
static void svs_set_bank_phase(struct svs_platform *svsp,
@@ -1038,13 +1093,17 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
svs_switch_bank(svsp);
- des_char = (svsb->bdes << 8) | svsb->mdes;
+ des_char = FIELD_PREP(SVSB_DESCHAR_FLD_BDES, svsb->bdes) |
+ FIELD_PREP(SVSB_DESCHAR_FLD_MDES, svsb->mdes);
svs_writel_relaxed(svsp, des_char, DESCHAR);
- temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed;
+ temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, svsb->vco) |
+ FIELD_PREP(SVSB_TEMPCHAR_FLD_MTDES, svsb->mtdes) |
+ FIELD_PREP(SVSB_TEMPCHAR_FLD_DVT_FIXED, svsb->dvt_fixed);
svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
- det_char = (svsb->dcbdet << 8) | svsb->dcmdet;
+ det_char = FIELD_PREP(SVSB_DETCHAR_FLD_DCBDET, svsb->dcbdet) |
+ FIELD_PREP(SVSB_DETCHAR_FLD_DCMDET, svsb->dcmdet);
svs_writel_relaxed(svsp, det_char, DETCHAR);
svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
@@ -1053,33 +1112,37 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
svsb->set_freq_pct(svsp);
- limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) |
- (SVSB_DTHI << 8) | SVSB_DTLO;
+ limit_vals = FIELD_PREP(SVSB_LIMITVALS_FLD_DTLO, SVSB_VAL_DTLO) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_DTHI, SVSB_VAL_DTHI) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_VMIN, svsb->vmin) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_VMAX, svsb->vmax);
svs_writel_relaxed(svsp, limit_vals, LIMITVALS);
svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
switch (target_phase) {
case SVSB_PHASE_INIT01:
svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
- svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_INIT01, SVSEN);
break;
case SVSB_PHASE_INIT02:
+ init2vals = FIELD_PREP(SVSB_INIT2VALS_FLD_AGEVOFFSETIN, svsb->age_voffset_in) |
+ FIELD_PREP(SVSB_INIT2VALS_FLD_DCVOFFSETIN, svsb->dc_voffset_in);
svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
- init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in;
svs_writel_relaxed(svsp, init2vals, INIT2VALS);
- svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_INIT02, SVSEN);
break;
case SVSB_PHASE_MON:
- ts_calcs = (svsb->bts << 12) | svsb->mts;
+ ts_calcs = FIELD_PREP(SVSB_TSCALCS_FLD_BTS, svsb->bts) |
+ FIELD_PREP(SVSB_TSCALCS_FLD_MTS, svsb->mts);
svs_writel_relaxed(svsp, ts_calcs, TSCALCS);
svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN);
- svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_MON, SVSEN);
break;
default:
dev_err(svsb->dev, "requested unknown target phase: %u\n",
@@ -1115,8 +1178,8 @@ static inline void svs_error_isr_handler(struct svs_platform *svsp)
svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
svsb->phase = SVSB_PHASE_ERROR;
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
}
static inline void svs_init01_isr_handler(struct svs_platform *svsp)
@@ -1141,8 +1204,8 @@ static inline void svs_init01_isr_handler(struct svs_platform *svsp)
svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) &
GENMASK(15, 0);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
svsb->core_sel &= ~SVSB_DET_CLK_EN;
}
@@ -1160,8 +1223,8 @@ static inline void svs_init02_isr_handler(struct svs_platform *svsp)
svsb->phase = SVSB_PHASE_INIT02;
svsb->get_volts(svsp);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
}
static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
@@ -1174,7 +1237,7 @@ static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
svsb->get_volts(svsp);
svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
- svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_FLD_MONVOP, INTSTS);
}
static irqreturn_t svs_isr(int irq, void *data)
@@ -1201,13 +1264,13 @@ static irqreturn_t svs_isr(int irq, void *data)
int_sts = svs_readl_relaxed(svsp, INTSTS);
svs_en = svs_readl_relaxed(svsp, SVSEN);
- if (int_sts == SVSB_INTSTS_COMPLETE &&
- svs_en == SVSB_EN_INIT01)
+ if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
+ svs_en == SVSB_PTPEN_INIT01)
svs_init01_isr_handler(svsp);
- else if (int_sts == SVSB_INTSTS_COMPLETE &&
- svs_en == SVSB_EN_INIT02)
+ else if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
+ svs_en == SVSB_PTPEN_INIT02)
svs_init02_isr_handler(svsp);
- else if (int_sts & SVSB_INTSTS_MONVOP)
+ else if (int_sts & SVSB_INTSTS_FLD_MONVOP)
svs_mon_mode_isr_handler(svsp);
else
svs_error_isr_handler(svsp);
@@ -1493,8 +1556,8 @@ static int svs_suspend(struct device *dev)
spin_lock_irqsave(&svs_lock, flags);
svsp->pbank = svsb;
svs_switch_bank(svsp);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
spin_unlock_irqrestore(&svs_lock, flags);
svsb->phase = SVSB_PHASE_ERROR;
@@ -1589,7 +1652,7 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
dev_set_drvdata(svsb->dev, svsp);
- ret = dev_pm_opp_of_add_table(svsb->opp_dev);
+ ret = devm_pm_opp_of_add_table(svsb->opp_dev);
if (ret) {
dev_err(svsb->dev, "add opp table fail: %d\n", ret);
return ret;
@@ -1644,11 +1707,36 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
return 0;
}
+static int svs_thermal_efuse_get_data(struct svs_platform *svsp)
+{
+ struct nvmem_cell *cell;
+
+ /* Thermal efuse parsing */
+ cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
+ if (IS_ERR_OR_NULL(cell)) {
+ dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", PTR_ERR(cell));
+ return PTR_ERR(cell);
+ }
+
+ svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
+ if (IS_ERR(svsp->tefuse)) {
+ dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
+ PTR_ERR(svsp->tefuse));
+ nvmem_cell_put(cell);
+ return PTR_ERR(svsp->tefuse);
+ }
+
+ svsp->tefuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ return 0;
+}
+
static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
{
struct svs_bank *svsb;
- struct nvmem_cell *cell;
u32 idx, i, vmin, golden_temp;
+ int ret;
for (i = 0; i < svsp->efuse_max; i++)
if (svsp->efuse[i])
@@ -1686,24 +1774,9 @@ static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
svsb->vmax += svsb->dvt_fixed;
}
- /* Thermal efuse parsing */
- cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
- if (IS_ERR_OR_NULL(cell)) {
- dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
- PTR_ERR(cell));
- return false;
- }
-
- svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
- if (IS_ERR(svsp->tefuse)) {
- dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
- PTR_ERR(svsp->tefuse));
- nvmem_cell_put(cell);
+ ret = svs_thermal_efuse_get_data(svsp);
+ if (ret)
return false;
- }
-
- svsp->tefuse_max /= sizeof(u32);
- nvmem_cell_put(cell);
for (i = 0; i < svsp->tefuse_max; i++)
if (svsp->tefuse[i] != 0)
@@ -1726,11 +1799,11 @@ static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
{
struct svs_bank *svsb;
- struct nvmem_cell *cell;
int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
int o_slope, o_slope_sign, ts_id;
u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
+ int ret;
for (i = 0; i < svsp->efuse_max; i++)
if (svsp->efuse[i])
@@ -1806,24 +1879,9 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
}
}
- /* Get thermal efuse by nvmem */
- cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
- if (IS_ERR(cell)) {
- dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
- PTR_ERR(cell));
- goto remove_mt8183_svsb_mon_mode;
- }
-
- svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
- if (IS_ERR(svsp->tefuse)) {
- dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
- PTR_ERR(svsp->tefuse));
- nvmem_cell_put(cell);
- goto remove_mt8183_svsb_mon_mode;
- }
-
- svsp->tefuse_max /= sizeof(u32);
- nvmem_cell_put(cell);
+ ret = svs_thermal_efuse_get_data(svsp);
+ if (ret)
+ return false;
/* Thermal efuse parsing */
adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
@@ -2244,7 +2302,6 @@ static const struct svs_platform_data svs_mt8192_platform_data = {
.banks = svs_mt8192_banks,
.efuse_parsing = svs_mt8192_efuse_parsing,
.probe = svs_mt8192_platform_probe,
- .irqflags = IRQF_TRIGGER_HIGH,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8192_banks),
};
@@ -2254,7 +2311,6 @@ static const struct svs_platform_data svs_mt8183_platform_data = {
.banks = svs_mt8183_banks,
.efuse_parsing = svs_mt8183_efuse_parsing,
.probe = svs_mt8183_platform_probe,
- .irqflags = IRQF_TRIGGER_LOW,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8183_banks),
};
@@ -2292,7 +2348,6 @@ static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
svsp->banks = svsp_data->banks;
svsp->efuse_parsing = svsp_data->efuse_parsing;
svsp->probe = svsp_data->probe;
- svsp->irqflags = svsp_data->irqflags;
svsp->regs = svsp_data->regs;
svsp->bank_max = svsp_data->bank_max;
@@ -2306,8 +2361,7 @@ static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
static int svs_probe(struct platform_device *pdev)
{
struct svs_platform *svsp;
- unsigned int svsp_irq;
- int ret;
+ int svsp_irq, ret;
svsp = svs_platform_probe(pdev);
if (IS_ERR(svsp))
@@ -2325,10 +2379,14 @@ static int svs_probe(struct platform_device *pdev)
goto svs_probe_free_resource;
}
- svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0);
+ svsp_irq = platform_get_irq(pdev, 0);
+ if (svsp_irq < 0) {
+ ret = svsp_irq;
+ goto svs_probe_free_resource;
+ }
+
ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
- svsp->irqflags | IRQF_ONESHOT,
- svsp->name, svsp);
+ IRQF_ONESHOT, svsp->name, svsp);
if (ret) {
dev_err(svsp->dev, "register irq(%d) failed: %d\n",
svsp_irq, ret);
@@ -2392,7 +2450,7 @@ static struct platform_driver svs_driver = {
.driver = {
.name = "mtk-svs",
.pm = &svs_pm_ops,
- .of_match_table = of_match_ptr(svs_of_match),
+ .of_match_table = svs_of_match,
},
};
diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c
index 563440315acd..93449fb3519e 100644
--- a/drivers/soc/pxa/ssp.c
+++ b/drivers/soc/pxa/ssp.c
@@ -180,11 +180,7 @@ static int pxa_ssp_probe(struct platform_device *pdev)
static int pxa_ssp_remove(struct platform_device *pdev)
{
- struct ssp_device *ssp;
-
- ssp = platform_get_drvdata(pdev);
- if (ssp == NULL)
- return -ENODEV;
+ struct ssp_device *ssp = platform_get_drvdata(pdev);
mutex_lock(&ssp_lock);
list_del(&ssp->node);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e0d7a5459562..024e420f1bb7 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -129,7 +129,7 @@ config QCOM_RPMHPD
config QCOM_RPMPD
tristate "Qualcomm RPM Power domain driver"
- depends on PM
+ depends on PM && OF
depends on QCOM_SMD_RPM
select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index 7f8aca533cd3..d07be3700db6 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -5,6 +5,8 @@
* Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on
* previous work of Thara Gopinath and msm-4.9 downstream sources.
*/
+
+#include <linux/err.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -13,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regmap.h>
#include <linux/sizes.h>
/*
@@ -31,33 +34,44 @@
/* Internal sampling clock frequency */
#define HW_TIMER_HZ 19200000
-#define BWMON_GLOBAL_IRQ_STATUS 0x0
-#define BWMON_GLOBAL_IRQ_CLEAR 0x8
-#define BWMON_GLOBAL_IRQ_ENABLE 0xc
-#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
-
-#define BWMON_IRQ_STATUS 0x100
-#define BWMON_IRQ_STATUS_ZONE_SHIFT 4
-#define BWMON_IRQ_CLEAR 0x108
-#define BWMON_IRQ_ENABLE 0x10c
-#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5
-#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6
-#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7
-#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \
- BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT))
-
-#define BWMON_ENABLE 0x2a0
+#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x008
+#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x00c
+/*
+ * All values here and further are matching regmap fields, so without absolute
+ * register offsets.
+ */
+#define BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+
+#define BWMON_V4_IRQ_STATUS 0x100
+#define BWMON_V4_IRQ_CLEAR 0x108
+
+#define BWMON_V4_IRQ_ENABLE 0x10c
+#define BWMON_IRQ_ENABLE_MASK (BIT(1) | BIT(3))
+#define BWMON_V5_IRQ_STATUS 0x000
+#define BWMON_V5_IRQ_CLEAR 0x008
+#define BWMON_V5_IRQ_ENABLE 0x00c
+
+#define BWMON_V4_ENABLE 0x2a0
+#define BWMON_V5_ENABLE 0x010
#define BWMON_ENABLE_ENABLE BIT(0)
-#define BWMON_CLEAR 0x2a4
+#define BWMON_V4_CLEAR 0x2a4
+#define BWMON_V5_CLEAR 0x014
#define BWMON_CLEAR_CLEAR BIT(0)
+#define BWMON_CLEAR_CLEAR_ALL BIT(1)
-#define BWMON_SAMPLE_WINDOW 0x2a8
-#define BWMON_THRESHOLD_HIGH 0x2ac
-#define BWMON_THRESHOLD_MED 0x2b0
-#define BWMON_THRESHOLD_LOW 0x2b4
+#define BWMON_V4_SAMPLE_WINDOW 0x2a8
+#define BWMON_V5_SAMPLE_WINDOW 0x020
-#define BWMON_ZONE_ACTIONS 0x2b8
+#define BWMON_V4_THRESHOLD_HIGH 0x2ac
+#define BWMON_V4_THRESHOLD_MED 0x2b0
+#define BWMON_V4_THRESHOLD_LOW 0x2b4
+#define BWMON_V5_THRESHOLD_HIGH 0x024
+#define BWMON_V5_THRESHOLD_MED 0x028
+#define BWMON_V5_THRESHOLD_LOW 0x02c
+
+#define BWMON_V4_ZONE_ACTIONS 0x2b8
+#define BWMON_V5_ZONE_ACTIONS 0x030
/*
* Actions to perform on some zone 'z' when current zone hits the threshold:
* Increment counter of zone 'z'
@@ -83,55 +97,244 @@
BWMON_ZONE_ACTIONS_CLEAR(2) | \
BWMON_ZONE_ACTIONS_CLEAR(1) | \
BWMON_ZONE_ACTIONS_CLEAR(0))
-/* Value for BWMON_ZONE_ACTIONS */
-#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \
- BWMON_ZONE_ACTIONS_ZONE1 << 8 | \
- BWMON_ZONE_ACTIONS_ZONE2 << 16 | \
- BWMON_ZONE_ACTIONS_ZONE3 << 24)
/*
- * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT
+ * There is no clear documentation/explanation of BWMON_V4_THRESHOLD_COUNT
* register. Based on observations, this is number of times one threshold has to
* be reached, to trigger interrupt in given zone.
*
* 0xff are maximum values meant to ignore the zones 0 and 2.
*/
-#define BWMON_THRESHOLD_COUNT 0x2bc
-#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8
-#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16
-#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24
+#define BWMON_V4_THRESHOLD_COUNT 0x2bc
+#define BWMON_V5_THRESHOLD_COUNT 0x034
#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff
#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff
-/* BWMONv4 count registers use count unit of 64 kB */
-#define BWMON_COUNT_UNIT_KB 64
-#define BWMON_ZONE_COUNT 0x2d8
-#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+#define BWMON_V4_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+#define BWMON_V5_ZONE_MAX(zone) (0x044 + 4 * (zone))
+
+/* Quirks for specific BWMON types */
+#define BWMON_HAS_GLOBAL_IRQ BIT(0)
+#define BWMON_NEEDS_FORCE_CLEAR BIT(1)
+
+enum bwmon_fields {
+ F_GLOBAL_IRQ_CLEAR,
+ F_GLOBAL_IRQ_ENABLE,
+ F_IRQ_STATUS,
+ F_IRQ_CLEAR,
+ F_IRQ_ENABLE,
+ F_ENABLE,
+ F_CLEAR,
+ F_SAMPLE_WINDOW,
+ F_THRESHOLD_HIGH,
+ F_THRESHOLD_MED,
+ F_THRESHOLD_LOW,
+ F_ZONE_ACTIONS_ZONE0,
+ F_ZONE_ACTIONS_ZONE1,
+ F_ZONE_ACTIONS_ZONE2,
+ F_ZONE_ACTIONS_ZONE3,
+ F_THRESHOLD_COUNT_ZONE0,
+ F_THRESHOLD_COUNT_ZONE1,
+ F_THRESHOLD_COUNT_ZONE2,
+ F_THRESHOLD_COUNT_ZONE3,
+ F_ZONE0_MAX,
+ F_ZONE1_MAX,
+ F_ZONE2_MAX,
+ F_ZONE3_MAX,
+
+ F_NUM_FIELDS
+};
struct icc_bwmon_data {
unsigned int sample_ms;
+ unsigned int count_unit_kb; /* kbytes */
unsigned int default_highbw_kbps;
unsigned int default_medbw_kbps;
unsigned int default_lowbw_kbps;
u8 zone1_thres_count;
u8 zone3_thres_count;
+ unsigned int quirks;
+
+ const struct regmap_config *regmap_cfg;
+ const struct reg_field *regmap_fields;
};
struct icc_bwmon {
struct device *dev;
- void __iomem *base;
+ const struct icc_bwmon_data *data;
int irq;
- unsigned int default_lowbw_kbps;
- unsigned int sample_ms;
+ struct regmap *regmap;
+ struct regmap_field *regs[F_NUM_FIELDS];
+
unsigned int max_bw_kbps;
unsigned int min_bw_kbps;
unsigned int target_kbps;
unsigned int current_kbps;
};
-static void bwmon_clear_counters(struct icc_bwmon *bwmon)
+/* BWMON v4 */
+static const struct reg_field msm8998_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0),
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_read_table = {
+ .no_ranges = msm8998_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V4_IRQ_STATUS, BWMON_V4_IRQ_STATUS),
+ regmap_reg_range(BWMON_V4_ZONE_MAX(0), BWMON_V4_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_volatile_table = {
+ .yes_ranges = msm8998_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(msm8998_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default msm8998_bwmon_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_CLEAR, 0x0 },
+};
+
+static const struct regmap_config msm8998_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &msm8998_bwmon_reg_read_table,
+ .volatile_table = &msm8998_bwmon_reg_volatile_table,
+ .reg_defaults = msm8998_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* BWMON v5 */
+static const struct reg_field sdm845_llcc_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = {},
+ [F_GLOBAL_IRQ_ENABLE] = {},
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V5_IRQ_STATUS, 0, 3),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V5_IRQ_CLEAR, 0, 3),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V5_IRQ_ENABLE, 0, 3),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V5_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V5_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V5_SAMPLE_WINDOW, 0, 19),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V5_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V5_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V5_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_CLEAR, BWMON_V5_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V5_CLEAR, BWMON_V5_CLEAR),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_read_table = {
+ .no_ranges = sdm845_llcc_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_STATUS, BWMON_V5_IRQ_STATUS),
+ regmap_reg_range(BWMON_V5_ZONE_MAX(0), BWMON_V5_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_volatile_table = {
+ .yes_ranges = sdm845_llcc_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default sdm845_llcc_bwmon_reg_defaults[] = {
+ { BWMON_V5_IRQ_CLEAR, 0x0 },
+ { BWMON_V5_CLEAR, 0x0 },
+};
+
+static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &sdm845_llcc_bwmon_reg_read_table,
+ .volatile_table = &sdm845_llcc_bwmon_reg_volatile_table,
+ .reg_defaults = sdm845_llcc_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sdm845_llcc_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
{
+ unsigned int val = BWMON_CLEAR_CLEAR;
+
+ if (clear_all)
+ val |= BWMON_CLEAR_CLEAR_ALL;
/*
* Clear counters. The order and barriers are
* important. Quoting downstream Qualcomm msm-4.9 tree:
@@ -140,7 +343,9 @@ static void bwmon_clear_counters(struct icc_bwmon *bwmon)
* region. So, we need to make sure the counter clear is completed
* before we try to clear the IRQ or do any other counter operations.
*/
- writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR);
+ regmap_field_force_write(bwmon->regs[F_CLEAR], val);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_CLEAR], 0);
}
static void bwmon_clear_irq(struct icc_bwmon *bwmon)
@@ -161,76 +366,91 @@ static void bwmon_clear_irq(struct icc_bwmon *bwmon)
* clearing here so that local writes don't happen before the
* interrupt is cleared.
*/
- writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR);
- writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR);
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], BWMON_IRQ_ENABLE_MASK);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], 0);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_force_write(bwmon->regs[F_GLOBAL_IRQ_CLEAR],
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
}
static void bwmon_disable(struct icc_bwmon *bwmon)
{
/* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
- writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
- writel(0x0, bwmon->base + BWMON_IRQ_ENABLE);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE], 0x0);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], 0x0);
/*
* Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious
* IRQ.
*/
- writel(0x0, bwmon->base + BWMON_ENABLE);
+ regmap_field_write(bwmon->regs[F_ENABLE], 0x0);
}
static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
{
/* Enable interrupts */
- writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE,
- bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
- writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE],
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], irq_enable);
/* Enable bwmon */
- writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE);
+ regmap_field_write(bwmon->regs[F_ENABLE], BWMON_ENABLE_ENABLE);
}
-static unsigned int bwmon_kbps_to_count(unsigned int kbps)
+static unsigned int bwmon_kbps_to_count(struct icc_bwmon *bwmon,
+ unsigned int kbps)
{
- return kbps / BWMON_COUNT_UNIT_KB;
+ return kbps / bwmon->data->count_unit_kb;
}
-static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg,
- unsigned int kbps)
+static void bwmon_set_threshold(struct icc_bwmon *bwmon,
+ struct regmap_field *reg, unsigned int kbps)
{
unsigned int thres;
- thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms,
- MSEC_PER_SEC);
- writel_relaxed(thres, bwmon->base + reg);
+ thres = mult_frac(bwmon_kbps_to_count(bwmon, kbps),
+ bwmon->data->sample_ms, MSEC_PER_SEC);
+ regmap_field_write(reg, thres);
}
-static void bwmon_start(struct icc_bwmon *bwmon,
- const struct icc_bwmon_data *data)
+static void bwmon_start(struct icc_bwmon *bwmon)
{
- unsigned int thres_count;
+ const struct icc_bwmon_data *data = bwmon->data;
int window;
- bwmon_clear_counters(bwmon);
+ bwmon_clear_counters(bwmon, true);
- window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
- /* Maximum sampling window: 0xfffff */
- writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW);
+ window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
+ /* Maximum sampling window: 0xffffff for v4 and 0xfffff for v5 */
+ regmap_field_write(bwmon->regs[F_SAMPLE_WINDOW], window);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH,
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
data->default_highbw_kbps);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED,
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
data->default_medbw_kbps);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW,
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW],
data->default_lowbw_kbps);
- thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT |
- BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT |
- data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT |
- BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT;
- writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT);
- writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT,
- bwmon->base + BWMON_ZONE_ACTIONS);
- /* Write barriers in bwmon_clear_irq() */
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE0],
+ BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE1],
+ data->zone1_thres_count);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE2],
+ BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE3],
+ data->zone3_thres_count);
+
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE0],
+ BWMON_ZONE_ACTIONS_ZONE0);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE1],
+ BWMON_ZONE_ACTIONS_ZONE1);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE2],
+ BWMON_ZONE_ACTIONS_ZONE2);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE3],
+ BWMON_ZONE_ACTIONS_ZONE3);
bwmon_clear_irq(bwmon);
bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK);
@@ -242,7 +462,9 @@ static irqreturn_t bwmon_intr(int irq, void *dev_id)
unsigned int status, max;
int zone;
- status = readl(bwmon->base + BWMON_IRQ_STATUS);
+ if (regmap_field_read(bwmon->regs[F_IRQ_STATUS], &status))
+ return IRQ_NONE;
+
status &= BWMON_IRQ_ENABLE_MASK;
if (!status) {
/*
@@ -259,15 +481,18 @@ static irqreturn_t bwmon_intr(int irq, void *dev_id)
bwmon_disable(bwmon);
- zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1;
+ zone = get_bitmask_order(status) - 1;
/*
* Zone max bytes count register returns count units within sampling
* window. Downstream kernel for BWMONv4 (called BWMON type 2 in
* downstream) always increments the max bytes count by one.
*/
- max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1;
- max *= BWMON_COUNT_UNIT_KB;
- bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms);
+ if (regmap_field_read(bwmon->regs[F_ZONE0_MAX + zone], &max))
+ return IRQ_NONE;
+
+ max += 1;
+ max *= bwmon->data->count_unit_kb;
+ bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->data->sample_ms);
return IRQ_WAKE_THREAD;
}
@@ -297,16 +522,17 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
up_kbps = bwmon->target_kbps + 1;
if (bwmon->target_kbps >= bwmon->max_bw_kbps)
- irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT);
+ irq_enable = BIT(1);
else if (bwmon->target_kbps <= bwmon->min_bw_kbps)
- irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT);
+ irq_enable = BIT(3);
else
irq_enable = BWMON_IRQ_ENABLE_MASK;
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps);
- /* Write barriers in bwmon_clear_counters() */
- bwmon_clear_counters(bwmon);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
+ up_kbps);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
+ down_kbps);
+ bwmon_clear_counters(bwmon, false);
bwmon_clear_irq(bwmon);
bwmon_enable(bwmon, irq_enable);
@@ -324,25 +550,47 @@ out:
return IRQ_HANDLED;
}
+static int bwmon_init_regmap(struct platform_device *pdev,
+ struct icc_bwmon *bwmon)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *map;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to map bwmon registers\n");
+
+ map = devm_regmap_init_mmio(dev, base, bwmon->data->regmap_cfg);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "failed to initialize regmap\n");
+
+ BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_reg_fields) != F_NUM_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(sdm845_llcc_bwmon_reg_fields) != F_NUM_FIELDS);
+
+ return devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
+ bwmon->data->regmap_fields,
+ F_NUM_FIELDS);
+}
+
static int bwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dev_pm_opp *opp;
struct icc_bwmon *bwmon;
- const struct icc_bwmon_data *data;
int ret;
bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL);
if (!bwmon)
return -ENOMEM;
- data = of_device_get_match_data(dev);
+ bwmon->data = of_device_get_match_data(dev);
- bwmon->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(bwmon->base)) {
- dev_err(dev, "failed to map bwmon registers\n");
- return PTR_ERR(bwmon->base);
- }
+ ret = bwmon_init_regmap(pdev, bwmon);
+ if (ret)
+ return ret;
bwmon->irq = platform_get_irq(pdev, 0);
if (bwmon->irq < 0)
@@ -362,8 +610,6 @@ static int bwmon_probe(struct platform_device *pdev)
if (IS_ERR(opp))
return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
- bwmon->sample_ms = data->sample_ms;
- bwmon->default_lowbw_kbps = data->default_lowbw_kbps;
bwmon->dev = dev;
bwmon_disable(bwmon);
@@ -374,7 +620,7 @@ static int bwmon_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "failed to request IRQ\n");
platform_set_drvdata(pdev, bwmon);
- bwmon_start(bwmon, data);
+ bwmon_start(bwmon);
return 0;
}
@@ -388,18 +634,55 @@ static int bwmon_remove(struct platform_device *pdev)
return 0;
}
-/* BWMON v4 */
static const struct icc_bwmon_data msm8998_bwmon_data = {
.sample_ms = 4,
+ .count_unit_kb = 64,
.default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
.default_medbw_kbps = 512 * 1024, /* 512 MBps */
.default_lowbw_kbps = 0,
.zone1_thres_count = 16,
.zone3_thres_count = 1,
+ .quirks = BWMON_HAS_GLOBAL_IRQ,
+ .regmap_fields = msm8998_bwmon_reg_fields,
+ .regmap_cfg = &msm8998_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 1024,
+ .default_highbw_kbps = 800 * 1024, /* 800 MBps */
+ .default_medbw_kbps = 256 * 1024, /* 256 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sc7280_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .default_highbw_kbps = 800 * 1024, /* 800 MBps */
+ .default_medbw_kbps = 256 * 1024, /* 256 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_NEEDS_FORCE_CLEAR,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
};
static const struct of_device_id bwmon_of_match[] = {
- { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data },
+ {
+ .compatible = "qcom,msm8998-bwmon",
+ .data = &msm8998_bwmon_data
+ }, {
+ .compatible = "qcom,sdm845-llcc-bwmon",
+ .data = &sdm845_llcc_bwmon_data
+ }, {
+ .compatible = "qcom,sc7280-llcc-bwmon",
+ .data = &sc7280_llcc_bwmon_data
+ },
{}
};
MODULE_DEVICE_TABLE(of, bwmon_of_match);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 38d7296315a2..8b7e8118f3ce 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -104,6 +104,7 @@ struct qcom_llcc_config {
int size;
bool need_llcc_cfg;
const u32 *reg_offset;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
};
enum llcc_reg_offset {
@@ -296,12 +297,68 @@ static const struct llcc_slice_config sm8450_data[] = {
{LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
};
-static const u32 llcc_v1_2_reg_offset[] = {
+static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2304c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3000c,
+ .cmn_interrupt_0_enable = 0x3001c,
+ .cmn_interrupt_2_enable = 0x3003c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x40000,
+ .drp_ecc_error_cntr_clear = 0x40004,
+ .drp_interrupt_status = 0x41000,
+ .drp_interrupt_clear = 0x41008,
+ .drp_interrupt_enable = 0x4100c,
+ .drp_ecc_error_status0 = 0x42044,
+ .drp_ecc_error_status1 = 0x42048,
+ .drp_ecc_sb_err_syn0 = 0x4204c,
+ .drp_ecc_db_err_syn0 = 0x42070,
+};
+
+static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2034c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3400c,
+ .cmn_interrupt_0_enable = 0x3401c,
+ .cmn_interrupt_2_enable = 0x3403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x50000,
+ .drp_ecc_error_cntr_clear = 0x50004,
+ .drp_interrupt_status = 0x50020,
+ .drp_interrupt_clear = 0x50028,
+ .drp_interrupt_enable = 0x5002c,
+ .drp_ecc_error_status0 = 0x520f4,
+ .drp_ecc_error_status1 = 0x520f8,
+ .drp_ecc_sb_err_syn0 = 0x520fc,
+ .drp_ecc_db_err_syn0 = 0x52120,
+};
+
+/* LLCC register offset starting from v1.0.0 */
+static const u32 llcc_v1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00030000,
[LLCC_COMMON_STATUS0] = 0x0003000c,
};
-static const u32 llcc_v21_reg_offset[] = {
+/* LLCC register offset starting from v2.0.1 */
+static const u32 llcc_v2_1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00034000,
[LLCC_COMMON_STATUS0] = 0x0003400c,
};
@@ -310,70 +367,80 @@ static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sc7280_cfg = {
.sct_data = sc7280_data,
.size = ARRAY_SIZE(sc7280_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sc8180x_cfg = {
.sct_data = sc8180x_data,
.size = ARRAY_SIZE(sc8180x_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sc8280xp_cfg = {
.sct_data = sc8280xp_data,
.size = ARRAY_SIZE(sc8280xp_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
.need_llcc_cfg = false,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm6350_cfg = {
.sct_data = sm6350_data,
.size = ARRAY_SIZE(sm6350_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8150_cfg = {
.sct_data = sm8150_data,
.size = ARRAY_SIZE(sm8150_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8250_cfg = {
.sct_data = sm8250_data,
.size = ARRAY_SIZE(sm8250_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8350_cfg = {
.sct_data = sm8350_data,
.size = ARRAY_SIZE(sm8350_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8450_cfg = {
.sct_data = sm8450_data,
.size = ARRAY_SIZE(sm8450_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v21_reg_offset,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -774,6 +841,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
+ drv_data->edac_reg_offset = cfg->edac_reg_offset;
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
index d6bfd1bbdc2a..121ea409fafc 100644
--- a/drivers/soc/qcom/qcom_stats.c
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -246,6 +246,14 @@ static const struct stats_config rpm_data_dba0 = {
.subsystem_stats_in_smem = false,
};
+static const struct stats_config rpmh_data_sdm845 = {
+ .stats_offset = 0x48,
+ .num_records = 2,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
static const struct stats_config rpmh_data = {
.stats_offset = 0x48,
.num_records = 3,
@@ -261,6 +269,7 @@ static const struct of_device_id qcom_stats_table[] = {
{ .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,rpm-stats", .data = &rpm_data },
{ .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
+ { .compatible = "qcom,sdm845-rpmh-stats", .data = &rpmh_data_sdm845 },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_stats_table);
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 328cc8237191..b7158e3c3a0b 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -57,11 +57,11 @@ do { \
#define TLV_TYPE_SIZE sizeof(u8)
#define OPTIONAL_TLV_TYPE_START 0x10
-static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
const void *in_c_struct, u32 out_buf_len,
int enc_level);
-static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
const void *in_buf, u32 in_buf_len, int dec_level);
/**
@@ -76,10 +76,10 @@ static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
*
* Return: struct info of the next element that can be encoded.
*/
-static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
- int level)
+static const struct qmi_elem_info *
+skip_to_next_elem(const struct qmi_elem_info *ei_array, int level)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u8 tlv_type;
if (level > 1) {
@@ -101,11 +101,11 @@ static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
*
* Return: Expected minimum length of the QMI message or 0 on error.
*/
-static int qmi_calc_min_msg_len(struct qmi_elem_info *ei_array,
+static int qmi_calc_min_msg_len(const struct qmi_elem_info *ei_array,
int level)
{
int min_msg_len = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
if (!ei_array)
return min_msg_len;
@@ -194,13 +194,13 @@ static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
* Return: The number of bytes of encoded information on success or negative
* errno on error.
*/
-static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
+static int qmi_encode_struct_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 elem_len, u32 out_buf_len,
int enc_level)
{
int i, rc, encoded_bytes = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
for (i = 0; i < elem_len; i++) {
rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
@@ -233,13 +233,13 @@ static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
* Return: The number of bytes of encoded information on success or negative
* errno on error.
*/
-static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
+static int qmi_encode_string_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 out_buf_len, int enc_level)
{
int rc;
int encoded_bytes = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u32 string_len = 0;
u32 string_len_sz = 0;
@@ -289,11 +289,11 @@ static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
* Return: The number of bytes of encoded information on success or negative
* errno on error.
*/
-static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
const void *in_c_struct, u32 out_buf_len,
int enc_level)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u8 opt_flag_value = 0;
u32 data_len_value = 0, data_len_sz;
u8 *buf_dst = (u8 *)out_buf;
@@ -468,13 +468,13 @@ static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
* Return: The total size of the decoded data elements on success, negative
* errno on error.
*/
-static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
+static int qmi_decode_struct_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 elem_len, u32 tlv_len,
int dec_level)
{
int i, rc, decoded_bytes = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
@@ -514,7 +514,7 @@ static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
* Return: The total size of the decoded data elements on success, negative
* errno on error.
*/
-static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
+static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 tlv_len, int dec_level)
{
@@ -522,7 +522,7 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
int decoded_bytes = 0;
u32 string_len = 0;
u32 string_len_sz = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
if (dec_level == 1) {
string_len = tlv_len;
@@ -564,10 +564,10 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
*
* Return: Pointer to struct info, if found
*/
-static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
- u32 type)
+static const struct qmi_elem_info *find_ei(const struct qmi_elem_info *ei_array,
+ u32 type)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
while (temp_ei->data_type != QMI_EOTI) {
if (temp_ei->tlv_type == (u8)type)
@@ -590,11 +590,11 @@ static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
* Return: The number of bytes of decoded information on success, negative
* errno on error.
*/
-static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
const void *in_buf, u32 in_buf_len,
int dec_level)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u8 opt_flag_value = 1;
u32 data_len_value = 0, data_len_sz = 0;
u8 *buf_dst = out_c_struct;
@@ -713,7 +713,7 @@ static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
* Return: Buffer with encoded message, or negative ERR_PTR() on error
*/
void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
- unsigned int txn_id, struct qmi_elem_info *ei,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
const void *c_struct)
{
struct qmi_header *hdr;
@@ -767,7 +767,7 @@ EXPORT_SYMBOL(qmi_encode_message);
* errno on error.
*/
int qmi_decode_message(const void *buf, size_t len,
- struct qmi_elem_info *ei, void *c_struct)
+ const struct qmi_elem_info *ei, void *c_struct)
{
if (!ei)
return -EINVAL;
@@ -781,7 +781,7 @@ int qmi_decode_message(const void *buf, size_t len,
EXPORT_SYMBOL(qmi_decode_message);
/* Common header in all QMI responses */
-struct qmi_elem_info qmi_response_type_v01_ei[] = {
+const struct qmi_elem_info qmi_response_type_v01_ei[] = {
{
.data_type = QMI_SIGNED_2_BYTE_ENUM,
.elem_len = 1,
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index c8c4c730b135..57052726299d 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(qmi_add_server);
* Return: Transaction id on success, negative errno on failure.
*/
int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
- struct qmi_elem_info *ei, void *c_struct)
+ const struct qmi_elem_info *ei, void *c_struct)
{
int ret;
@@ -736,7 +736,8 @@ EXPORT_SYMBOL(qmi_handle_release);
static ssize_t qmi_send_message(struct qmi_handle *qmi,
struct sockaddr_qrtr *sq, struct qmi_txn *txn,
int type, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct)
+ const struct qmi_elem_info *ei,
+ const void *c_struct)
{
struct msghdr msghdr = {};
struct kvec iv;
@@ -787,7 +788,7 @@ static ssize_t qmi_send_message(struct qmi_handle *qmi,
*/
ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct)
+ const struct qmi_elem_info *ei, const void *c_struct)
{
return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
c_struct);
@@ -808,7 +809,7 @@ EXPORT_SYMBOL(qmi_send_request);
*/
ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct)
+ const struct qmi_elem_info *ei, const void *c_struct)
{
return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
c_struct);
@@ -827,7 +828,8 @@ EXPORT_SYMBOL(qmi_send_response);
* Return: 0 on success, negative errno on failure.
*/
ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
- int msg_id, size_t len, struct qmi_elem_info *ei,
+ int msg_id, size_t len,
+ const struct qmi_elem_info *ei,
const void *c_struct)
{
struct qmi_txn txn;
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 5803038c744e..337b1ad1cd3b 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -29,6 +29,7 @@
#define RPMPD_RWLM 0x6d6c7772
#define RPMPD_RWSC 0x63737772
#define RPMPD_RWSM 0x6d737772
+#define RPMPD_RWGX 0x78677772
/* Operation Keys */
#define KEY_CORNER 0x6e726f63 /* corn */
@@ -433,6 +434,26 @@ static const struct rpmpd_desc sm6125_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
+DEFINE_RPMPD_PAIR(sm6375, vddgx, vddgx_ao, RWGX, LEVEL, 0);
+static struct rpmpd *sm6375_rpmpds[] = {
+ [SM6375_VDDCX] = &sm6125_vddcx,
+ [SM6375_VDDCX_AO] = &sm6125_vddcx_ao,
+ [SM6375_VDDCX_VFL] = &sm6125_vddcx_vfl,
+ [SM6375_VDDMX] = &sm6125_vddmx,
+ [SM6375_VDDMX_AO] = &sm6125_vddmx_ao,
+ [SM6375_VDDMX_VFL] = &sm6125_vddmx_vfl,
+ [SM6375_VDDGX] = &sm6375_vddgx,
+ [SM6375_VDDGX_AO] = &sm6375_vddgx_ao,
+ [SM6375_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
+ [SM6375_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+};
+
+static const struct rpmpd_desc sm6375_desc = {
+ .rpmpds = sm6375_rpmpds,
+ .num_pds = ARRAY_SIZE(sm6375_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
+};
+
static struct rpmpd *qcm2290_rpmpds[] = {
[QCM2290_VDDCX] = &sm6115_vddcx,
[QCM2290_VDDCX_AO] = &sm6115_vddcx_ao,
@@ -466,6 +487,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
{ .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
+ { .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index 31faf4aa868e..e848cc9a3cf8 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -136,6 +136,7 @@ static void qcom_smem_state_release(struct kref *ref)
struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
list_del(&state->list);
+ of_node_put(state->of_node);
kfree(state);
}
@@ -205,7 +206,7 @@ struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
kref_init(&state->refcount);
- state->of_node = of_node;
+ state->of_node = of_node_get(of_node);
state->ops = *ops;
state->priv = priv;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 9df9bba242f3..3e8994d6110e 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -526,7 +526,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
for (id = 0; id < smsm->num_hosts; id++) {
ret = smsm_parse_ipc(smsm, id);
if (ret < 0)
- return ret;
+ goto out_put;
}
/* Acquire the main SMSM state vector */
@@ -534,13 +534,14 @@ static int qcom_smsm_probe(struct platform_device *pdev)
smsm->num_entries * sizeof(u32));
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate shared state entry\n");
- return ret;
+ goto out_put;
}
states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
if (IS_ERR(states)) {
dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
- return PTR_ERR(states);
+ ret = PTR_ERR(states);
+ goto out_put;
}
/* Acquire the list of interrupt mask vectors */
@@ -548,13 +549,14 @@ static int qcom_smsm_probe(struct platform_device *pdev)
ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
- return ret;
+ goto out_put;
}
intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
if (IS_ERR(intr_mask)) {
dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
- return PTR_ERR(intr_mask);
+ ret = PTR_ERR(intr_mask);
+ goto out_put;
}
/* Setup the reference to the local state bits */
@@ -565,7 +567,8 @@ static int qcom_smsm_probe(struct platform_device *pdev)
smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
if (IS_ERR(smsm->state)) {
dev_err(smsm->dev, "failed to register qcom_smem_state\n");
- return PTR_ERR(smsm->state);
+ ret = PTR_ERR(smsm->state);
+ goto out_put;
}
/* Register handlers for remote processor entries of interest. */
@@ -595,16 +598,19 @@ static int qcom_smsm_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smsm);
+ of_node_put(local_node);
return 0;
unwind_interfaces:
+ of_node_put(node);
for (id = 0; id < smsm->num_entries; id++)
if (smsm->entries[id].domain)
irq_domain_remove(smsm->entries[id].domain);
qcom_smem_state_unregister(smsm->state);
-
+out_put:
+ of_node_put(local_node);
return ret;
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 4554fb8655d3..aa37e1bad095 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -104,6 +104,7 @@ static const char *const pmic_models[] = {
[36] = "PM8009",
[38] = "PM8150C",
[41] = "SMB2351",
+ [45] = "PM6125",
[47] = "PMK8350",
[48] = "PM8350",
[49] = "PM8350C",
@@ -334,6 +335,7 @@ static const struct soc_id soc_id[] = {
{ 482, "SM8450" },
{ 487, "SC7280" },
{ 495, "SC7180P" },
+ { 507, "SM6375" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index c50a6ce1b99d..f95a1337450d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,6 +44,7 @@ config ARCH_RZG2L
bool
select PM
select PM_GENERIC_DOMAINS
+ select RENESAS_RZG2L_IRQC
config ARCH_RZN1
bool
@@ -332,6 +333,16 @@ config ARCH_R9A09G011
endif # ARM64
+if RISCV
+
+config ARCH_R9A07G043
+ bool "RISC-V Platform support for RZ/Five"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/Five SoC.
+
+endif # RISCV
+
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index d171f1b635c7..621ceaa047d4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -50,6 +50,10 @@ static const struct renesas_family fam_rza2 __initconst __maybe_unused = {
.name = "RZ/A2",
};
+static const struct renesas_family fam_rzfive __initconst __maybe_unused = {
+ .name = "RZ/Five",
+};
+
static const struct renesas_family fam_rzg1 __initconst __maybe_unused = {
.name = "RZ/G1",
.reg = 0xff000044, /* PRR (Product Register) */
@@ -102,6 +106,11 @@ static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
.id = 0x40,
};
+static const struct renesas_soc soc_rz_five __initconst __maybe_unused = {
+ .family = &fam_rzfive,
+ .id = 0x847c447,
+};
+
static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = {
.family = &fam_rzg1,
.id = 0x45,
@@ -320,6 +329,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a779m0", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m8", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779mb", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
@@ -358,8 +368,12 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
#endif
#if defined(CONFIG_ARCH_R9A07G043)
+#ifdef CONFIG_RISCV
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
+#else
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_g2ul },
#endif
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index 9df513d1219b..6619256c2d11 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -491,6 +491,22 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
},
};
+static const struct rockchip_iodomain_soc_data soc_data_rv1126_pmu = {
+ .grf_offset = 0x140,
+ .supply_names = {
+ NULL,
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio6",
+ "vccio7",
+ "pmuio0",
+ "pmuio1",
+ },
+};
+
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,px30-io-voltage-domain",
@@ -544,6 +560,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
.data = &soc_data_rv1108_pmu
},
+ {
+ .compatible = "rockchip,rv1126-pmu-io-voltage-domain",
+ .data = &soc_data_rv1126_pmu
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 89795abac951..84bc022f9e5b 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <soc/rockchip/pm_domains.h>
#include <dt-bindings/power/px30-power.h>
+#include <dt-bindings/power/rockchip,rv1126-power.h>
#include <dt-bindings/power/rk3036-power.h>
#include <dt-bindings/power/rk3066-power.h>
#include <dt-bindings/power/rk3128-power.h>
@@ -30,6 +31,7 @@
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
#include <dt-bindings/power/rk3568-power.h>
+#include <dt-bindings/power/rk3588-power.h>
struct rockchip_domain_info {
const char *name;
@@ -41,6 +43,9 @@ struct rockchip_domain_info {
bool active_wakeup;
int pwr_w_mask;
int req_w_mask;
+ int repair_status_mask;
+ u32 pwr_offset;
+ u32 req_offset;
};
struct rockchip_pmu_info {
@@ -49,6 +54,7 @@ struct rockchip_pmu_info {
u32 req_offset;
u32 idle_offset;
u32 ack_offset;
+ u32 repair_status_offset;
u32 core_pwrcnt_offset;
u32 gpu_pwrcnt_offset;
@@ -113,6 +119,22 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
+#define DOMAIN_M_O_R(_name, p_offset, pwr, status, r_status, r_offset, req, idle, ack, wakeup) \
+{ \
+ .name = _name, \
+ .pwr_offset = p_offset, \
+ .pwr_w_mask = (pwr) << 16, \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .repair_status_mask = (r_status), \
+ .req_offset = r_offset, \
+ .req_w_mask = (req) << 16, \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
+ .active_wakeup = wakeup, \
+}
+
#define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
{ \
.name = _name, \
@@ -126,6 +148,9 @@ struct rockchip_pmu {
#define DOMAIN_PX30(name, pwr, status, req, wakeup) \
DOMAIN_M(name, pwr, status, req, (req) << 16, req, wakeup)
+#define DOMAIN_RV1126(name, pwr, req, idle, wakeup) \
+ DOMAIN_M(name, pwr, pwr, req, idle, idle, wakeup)
+
#define DOMAIN_RK3288(name, pwr, status, req, wakeup) \
DOMAIN(name, pwr, status, req, req, (req) << 16, wakeup)
@@ -244,6 +269,9 @@ void rockchip_pmu_unblock(void)
}
EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
+#define DOMAIN_RK3588(name, p_offset, pwr, status, r_status, r_offset, req, idle, wakeup) \
+ DOMAIN_M_O_R(name, p_offset, pwr, status, r_status, r_offset, req, idle, idle, wakeup)
+
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
@@ -268,6 +296,7 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
const struct rockchip_domain_info *pd_info = pd->info;
struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
+ u32 pd_req_offset = pd_info->req_offset;
unsigned int target_ack;
unsigned int val;
bool is_idle;
@@ -276,11 +305,11 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
if (pd_info->req_mask == 0)
return 0;
else if (pd_info->req_w_mask)
- regmap_write(pmu->regmap, pmu->info->req_offset,
+ regmap_write(pmu->regmap, pmu->info->req_offset + pd_req_offset,
idle ? (pd_info->req_mask | pd_info->req_w_mask) :
pd_info->req_w_mask);
else
- regmap_update_bits(pmu->regmap, pmu->info->req_offset,
+ regmap_update_bits(pmu->regmap, pmu->info->req_offset + pd_req_offset,
pd_info->req_mask, idle ? -1U : 0);
wmb();
@@ -363,6 +392,12 @@ static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
+ if (pd->info->repair_status_mask) {
+ regmap_read(pmu->regmap, pmu->info->repair_status_offset, &val);
+ /* 1'b1: power on, 1'b0: power off */
+ return val & pd->info->repair_status_mask;
+ }
+
/* check idle status for idle-only domains */
if (pd->info->status_mask == 0)
return !rockchip_pmu_domain_is_idle(pd);
@@ -378,16 +413,17 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
+ u32 pd_pwr_offset = pd->info->pwr_offset;
bool is_on;
if (pd->info->pwr_mask == 0)
return;
else if (pd->info->pwr_w_mask)
- regmap_write(pmu->regmap, pmu->info->pwr_offset,
+ regmap_write(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
on ? pd->info->pwr_w_mask :
(pd->info->pwr_mask | pd->info->pwr_w_mask));
else
- regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
+ regmap_update_bits(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
pd->info->pwr_mask, on ? 0 : -1U);
wmb();
@@ -514,6 +550,9 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
node, id);
return -EINVAL;
}
+ /* RK3588 has domains with two parents (RKVDEC0/RKVDEC1) */
+ if (pmu->genpd_data.domains[id])
+ return 0;
pd_info = &pmu->info->domain_info[id];
if (!pd_info) {
@@ -595,14 +634,6 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
}
}
- error = rockchip_pd_power(pd, true);
- if (error) {
- dev_err(pmu->dev,
- "failed to power on domain '%pOFn': %d\n",
- node, error);
- goto err_unprepare_clocks;
- }
-
if (pd->info->name)
pd->genpd.name = pd->info->name;
else
@@ -614,7 +645,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.flags = GENPD_FLAG_PM_CLK;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
- pm_genpd_init(&pd->genpd, NULL, false);
+ pm_genpd_init(&pd->genpd, NULL, !rockchip_pmu_domain_is_on(pd));
pmu->genpd_data.domains[id] = &pd->genpd;
return 0;
@@ -855,6 +886,16 @@ static const struct rockchip_domain_info px30_pm_domains[] = {
[PX30_PD_GPU] = DOMAIN_PX30("gpu", BIT(15), BIT(15), BIT(2), false),
};
+static const struct rockchip_domain_info rv1126_pm_domains[] = {
+ [RV1126_PD_VEPU] = DOMAIN_RV1126("vepu", BIT(2), BIT(9), BIT(9), false),
+ [RV1126_PD_VI] = DOMAIN_RV1126("vi", BIT(4), BIT(6), BIT(6), false),
+ [RV1126_PD_ISPP] = DOMAIN_RV1126("ispp", BIT(1), BIT(8), BIT(8), false),
+ [RV1126_PD_VDPU] = DOMAIN_RV1126("vdpu", BIT(3), BIT(10), BIT(10), false),
+ [RV1126_PD_NVM] = DOMAIN_RV1126("nvm", BIT(7), BIT(11), BIT(11), false),
+ [RV1126_PD_SDIO] = DOMAIN_RV1126("sdio", BIT(8), BIT(13), BIT(13), false),
+ [RV1126_PD_USB] = DOMAIN_RV1126("usb", BIT(9), BIT(15), BIT(15), false),
+};
+
static const struct rockchip_domain_info rk3036_pm_domains[] = {
[RK3036_PD_MSCH] = DOMAIN_RK3036("msch", BIT(14), BIT(23), BIT(30), true),
[RK3036_PD_CORE] = DOMAIN_RK3036("core", BIT(13), BIT(17), BIT(24), false),
@@ -982,6 +1023,38 @@ static const struct rockchip_domain_info rk3568_pm_domains[] = {
[RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false),
};
+static const struct rockchip_domain_info rk3588_pm_domains[] = {
+ [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, BIT(1), 0x0, BIT(0), BIT(0), false),
+ [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0, 0x0, 0, 0, false),
+ [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0, 0x0, 0, 0, false),
+ [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, BIT(2), 0x0, BIT(1), BIT(1), false),
+ [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, BIT(3), 0x0, BIT(2), BIT(2), false),
+ [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, BIT(4), 0x0, BIT(3), BIT(3), false),
+ [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, BIT(5), 0x0, BIT(4), BIT(4), false),
+ [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, BIT(6), 0x0, BIT(5), BIT(5), false),
+ [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, BIT(7), 0x0, BIT(6), BIT(6), false),
+ [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, BIT(8), 0x0, BIT(7), BIT(7), false),
+ [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, BIT(9), 0x0, BIT(8), BIT(8), false),
+ [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, BIT(10), 0x0, 0, 0, false),
+ [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, BIT(11), 0x0, BIT(9), BIT(9), false),
+ [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, BIT(12), 0x0, BIT(10), BIT(10), false),
+ [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, BIT(13), 0x0, 0, 0, false),
+ [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, BIT(14), 0x0, BIT(11), BIT(11), false),
+ [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, BIT(15), 0x0, BIT(12), BIT(12), false),
+ [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
+ [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, BIT(17), 0x0, BIT(15), BIT(15), false),
+ [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, BIT(18), 0x4, BIT(0), BIT(16), false),
+ [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, BIT(19), 0x4, BIT(1), BIT(17), false),
+ [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, BIT(20), 0x4, BIT(5), BIT(21), false),
+ [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, BIT(21), 0x0, 0, 0, false),
+ [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, BIT(22), 0x0, 0, 0, true),
+ [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0, 0x4, BIT(2), BIT(18), false),
+ [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, BIT(23), 0x0, 0, 0, false),
+ [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, BIT(24), 0x4, BIT(3), BIT(19), false),
+ [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, BIT(25), 0x4, BIT(4), BIT(20), true),
+ [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, BIT(26), 0x0, 0, 0, false),
+};
+
static const struct rockchip_pmu_info px30_pmu = {
.pwr_offset = 0x18,
.status_offset = 0x20,
@@ -1128,6 +1201,29 @@ static const struct rockchip_pmu_info rk3568_pmu = {
.domain_info = rk3568_pm_domains,
};
+static const struct rockchip_pmu_info rk3588_pmu = {
+ .pwr_offset = 0x14c,
+ .status_offset = 0x180,
+ .req_offset = 0x10c,
+ .idle_offset = 0x120,
+ .ack_offset = 0x118,
+ .repair_status_offset = 0x290,
+
+ .num_domains = ARRAY_SIZE(rk3588_pm_domains),
+ .domain_info = rk3588_pm_domains,
+};
+
+static const struct rockchip_pmu_info rv1126_pmu = {
+ .pwr_offset = 0x110,
+ .status_offset = 0x108,
+ .req_offset = 0xc0,
+ .idle_offset = 0xd8,
+ .ack_offset = 0xd0,
+
+ .num_domains = ARRAY_SIZE(rv1126_pm_domains),
+ .domain_info = rv1126_pm_domains,
+};
+
static const struct of_device_id rockchip_pm_domain_dt_match[] = {
{
.compatible = "rockchip,px30-power-controller",
@@ -1177,6 +1273,14 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.compatible = "rockchip,rk3568-power-controller",
.data = (void *)&rk3568_pmu,
},
+ {
+ .compatible = "rockchip,rk3588-power-controller",
+ .data = (void *)&rk3588_pmu,
+ },
+ {
+ .compatible = "rockchip,rv1126-power-controller",
+ .data = (void *)&rv1126_pmu,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index a8f3876963a0..92f9186c1c42 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = {
static struct sunxi_sram_desc sun50i_a64_sram_c = {
.data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
- SUNXI_SRAM_MAP(0, 1, "cpu"),
- SUNXI_SRAM_MAP(1, 0, "de2")),
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
};
static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,36 +254,36 @@ int sunxi_sram_claim(struct device *dev)
writel(val | ((device << sram_data->offset) & mask),
base + sram_data->reg);
+ sram_desc->claimed = true;
spin_unlock(&sram_lock);
return 0;
}
EXPORT_SYMBOL(sunxi_sram_claim);
-int sunxi_sram_release(struct device *dev)
+void sunxi_sram_release(struct device *dev)
{
const struct sunxi_sram_data *sram_data;
struct sunxi_sram_desc *sram_desc;
if (!dev || !dev->of_node)
- return -EINVAL;
+ return;
sram_data = sunxi_sram_of_parse(dev->of_node, NULL);
if (IS_ERR(sram_data))
- return -EINVAL;
+ return;
sram_desc = to_sram_desc(sram_data);
spin_lock(&sram_lock);
sram_desc->claimed = false;
spin_unlock(&sram_lock);
-
- return 0;
}
EXPORT_SYMBOL(sunxi_sram_release);
struct sunxi_sramc_variant {
int num_emac_clocks;
+ bool has_ldo_ctrl;
};
static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
@@ -294,6 +294,11 @@ static const struct sunxi_sramc_variant sun8i_h3_sramc_variant = {
.num_emac_clocks = 1,
};
+static const struct sunxi_sramc_variant sun20i_d1_sramc_variant = {
+ .num_emac_clocks = 1,
+ .has_ldo_ctrl = true,
+};
+
static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
.num_emac_clocks = 1,
};
@@ -303,37 +308,38 @@ static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
};
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
+#define SUNXI_SYS_LDO_CTRL_REG 0x150
+
static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
unsigned int reg)
{
- const struct sunxi_sramc_variant *variant;
-
- variant = of_device_get_match_data(dev);
+ const struct sunxi_sramc_variant *variant = dev_get_drvdata(dev);
- if (reg < SUNXI_SRAM_EMAC_CLOCK_REG)
- return false;
- if (reg > SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
- return false;
+ if (reg >= SUNXI_SRAM_EMAC_CLOCK_REG &&
+ reg < SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
+ return true;
+ if (reg == SUNXI_SYS_LDO_CTRL_REG && variant->has_ldo_ctrl)
+ return true;
- return true;
+ return false;
}
-static struct regmap_config sunxi_sram_emac_clock_regmap = {
+static struct regmap_config sunxi_sram_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
/* last defined register */
- .max_register = SUNXI_SRAM_EMAC_CLOCK_REG + 4,
+ .max_register = SUNXI_SYS_LDO_CTRL_REG,
/* other devices have no business accessing other registers */
.readable_reg = sunxi_sram_regmap_accessible_reg,
.writeable_reg = sunxi_sram_regmap_accessible_reg,
};
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
{
- struct dentry *d;
- struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
sram_dev = &pdev->dev;
@@ -341,24 +347,21 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
+ dev_set_drvdata(dev, (struct sunxi_sramc_variant *)variant);
+
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
- &sunxi_sram_fops);
- if (!d)
- return -ENOMEM;
+ if (variant->num_emac_clocks || variant->has_ldo_ctrl) {
+ regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ }
- if (variant->num_emac_clocks > 0) {
- emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
- &sunxi_sram_emac_clock_regmap);
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (IS_ERR(emac_clock))
- return PTR_ERR(emac_clock);
- }
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
return 0;
}
@@ -385,6 +388,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = {
.data = &sun8i_h3_sramc_variant,
},
{
+ .compatible = "allwinner,sun20i-d1-system-control",
+ .data = &sun20i_d1_sramc_variant,
+ },
+ {
.compatible = "allwinner,sun50i-a64-sram-controller",
.data = &sun50i_a64_sramc_variant,
},
@@ -409,9 +416,8 @@ static struct platform_driver sunxi_sram_driver = {
.name = "sunxi-sram",
.of_match_table = sunxi_sram_dt_match,
},
- .probe = sunxi_sram_probe,
};
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 5725c8ef0406..d1ecadffa1bb 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -136,7 +136,6 @@ config SOC_TEGRA_FUSE
def_bool y
depends on ARCH_TEGRA
select SOC_BUS
- select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC
config SOC_TEGRA_FLOWCTRL
bool
@@ -162,3 +161,12 @@ config SOC_TEGRA30_VOLTAGE_COUPLER
bool "Voltage scaling support for Tegra30 SoCs"
depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
depends on REGULATOR
+
+config SOC_TEGRA_CBB
+ tristate "Tegra driver to handle error from CBB"
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
+ default y
+ help
+ Support for handling error from Tegra Control Backbone(CBB).
+ This driver handles the errors from CBB and prints debug
+ information about the failed transactions.
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 054e862b63d8..d722f512dc9d 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += fuse/
+obj-y += cbb/
obj-y += common.o
obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
diff --git a/drivers/soc/tegra/cbb/Makefile b/drivers/soc/tegra/cbb/Makefile
new file mode 100644
index 000000000000..e3ac6cdddf5c
--- /dev/null
+++ b/drivers/soc/tegra/cbb/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Control Backbone Driver code.
+#
+ifdef CONFIG_SOC_TEGRA_CBB
+obj-y += tegra-cbb.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-cbb.o
+obj-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra234-cbb.o
+endif
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
new file mode 100644
index 000000000000..d200937353c7
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+void tegra_cbb_print_err(struct seq_file *file, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ if (file) {
+ seq_vprintf(file, fmt, args);
+ } else {
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_crit("%pV", &vaf);
+ }
+
+ va_end(args);
+}
+
+void tegra_cbb_print_cache(struct seq_file *file, u32 cache)
+{
+ const char *buff_str, *mod_str, *rd_str, *wr_str;
+
+ buff_str = (cache & BIT(0)) ? "Bufferable " : "";
+ mod_str = (cache & BIT(1)) ? "Modifiable " : "";
+ rd_str = (cache & BIT(2)) ? "Read-Allocate " : "";
+ wr_str = (cache & BIT(3)) ? "Write-Allocate" : "";
+
+ if (cache == 0x0)
+ buff_str = "Device Non-Bufferable";
+
+ tegra_cbb_print_err(file, "\t Cache\t\t\t: 0x%x -- %s%s%s%s\n",
+ cache, buff_str, mod_str, rd_str, wr_str);
+}
+
+void tegra_cbb_print_prot(struct seq_file *file, u32 prot)
+{
+ const char *data_str, *secure_str, *priv_str;
+
+ data_str = (prot & 0x4) ? "Instruction" : "Data";
+ secure_str = (prot & 0x2) ? "Non-Secure" : "Secure";
+ priv_str = (prot & 0x1) ? "Privileged" : "Unprivileged";
+
+ tegra_cbb_print_err(file, "\t Protection\t\t: 0x%x -- %s, %s, %s Access\n",
+ prot, priv_str, secure_str, data_str);
+}
+
+static int tegra_cbb_err_show(struct seq_file *file, void *data)
+{
+ struct tegra_cbb *cbb = file->private;
+
+ return cbb->ops->debugfs_show(cbb, file, data);
+}
+
+static int tegra_cbb_err_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_cbb_err_show, inode->i_private);
+}
+
+static const struct file_operations tegra_cbb_err_fops = {
+ .open = tegra_cbb_err_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+{
+ static struct dentry *root;
+
+ if (!root) {
+ root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("%s(): could not create debugfs node\n", __func__);
+ return PTR_ERR(root);
+ }
+ }
+
+ return 0;
+}
+
+void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->stall_enable)
+ cbb->ops->stall_enable(cbb);
+}
+
+void tegra_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->fault_enable)
+ cbb->ops->fault_enable(cbb);
+}
+
+void tegra_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->error_clear)
+ cbb->ops->error_clear(cbb);
+}
+
+u32 tegra_cbb_get_status(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->get_status)
+ return cbb->ops->get_status(cbb);
+
+ return 0;
+}
+
+int tegra_cbb_get_irq(struct platform_device *pdev, unsigned int *nonsec_irq,
+ unsigned int *sec_irq)
+{
+ unsigned int index = 0;
+ int num_intr = 0, irq;
+
+ num_intr = platform_irq_count(pdev);
+ if (!num_intr)
+ return -EINVAL;
+
+ if (num_intr == 2) {
+ irq = platform_get_irq(pdev, index);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get non-secure IRQ: %d\n", irq);
+ return -ENOENT;
+ }
+
+ *nonsec_irq = irq;
+ index++;
+ }
+
+ irq = platform_get_irq(pdev, index);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get secure IRQ: %d\n", irq);
+ return -ENOENT;
+ }
+
+ *sec_irq = irq;
+
+ if (num_intr == 1)
+ dev_dbg(&pdev->dev, "secure IRQ: %u\n", *sec_irq);
+
+ if (num_intr == 2)
+ dev_dbg(&pdev->dev, "secure IRQ: %u, non-secure IRQ: %u\n", *sec_irq, *nonsec_irq);
+
+ return 0;
+}
+
+int tegra_cbb_register(struct tegra_cbb *cbb)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ ret = tegra_cbb_err_debugfs_init(cbb);
+ if (ret) {
+ dev_err(cbb->dev, "failed to create debugfs\n");
+ return ret;
+ }
+ }
+
+ /* register interrupt handler for errors due to different initiators */
+ ret = cbb->ops->interrupt_enable(cbb);
+ if (ret < 0) {
+ dev_err(cbb->dev, "Failed to register CBB Interrupt ISR");
+ return ret;
+ }
+
+ cbb->ops->error_enable(cbb);
+ dsb(sy);
+
+ return 0;
+}
diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
new file mode 100644
index 000000000000..1ae0bd9a1ac1
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
@@ -0,0 +1,2364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ *
+ * The driver handles Error's from Control Backbone(CBB) generated due to
+ * illegal accesses. When an error is reported from a NOC within CBB,
+ * the driver checks ErrVld status of all three Error Logger's of that NOC.
+ * It then prints debug information about failed transaction using ErrLog
+ * registers of error logger which has ErrVld set. Currently, SLV, DEC,
+ * TMO, SEC, UNS are the codes which are supported by CBB.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+#define ERRLOGGER_0_ID_COREID_0 0x00000000
+#define ERRLOGGER_0_ID_REVISIONID_0 0x00000004
+#define ERRLOGGER_0_FAULTEN_0 0x00000008
+#define ERRLOGGER_0_ERRVLD_0 0x0000000c
+#define ERRLOGGER_0_ERRCLR_0 0x00000010
+#define ERRLOGGER_0_ERRLOG0_0 0x00000014
+#define ERRLOGGER_0_ERRLOG1_0 0x00000018
+#define ERRLOGGER_0_RSVD_00_0 0x0000001c
+#define ERRLOGGER_0_ERRLOG3_0 0x00000020
+#define ERRLOGGER_0_ERRLOG4_0 0x00000024
+#define ERRLOGGER_0_ERRLOG5_0 0x00000028
+#define ERRLOGGER_0_STALLEN_0 0x00000038
+
+#define ERRLOGGER_1_ID_COREID_0 0x00000080
+#define ERRLOGGER_1_ID_REVISIONID_0 0x00000084
+#define ERRLOGGER_1_FAULTEN_0 0x00000088
+#define ERRLOGGER_1_ERRVLD_0 0x0000008c
+#define ERRLOGGER_1_ERRCLR_0 0x00000090
+#define ERRLOGGER_1_ERRLOG0_0 0x00000094
+#define ERRLOGGER_1_ERRLOG1_0 0x00000098
+#define ERRLOGGER_1_RSVD_00_0 0x0000009c
+#define ERRLOGGER_1_ERRLOG3_0 0x000000a0
+#define ERRLOGGER_1_ERRLOG4_0 0x000000a4
+#define ERRLOGGER_1_ERRLOG5_0 0x000000a8
+#define ERRLOGGER_1_STALLEN_0 0x000000b8
+
+#define ERRLOGGER_2_ID_COREID_0 0x00000100
+#define ERRLOGGER_2_ID_REVISIONID_0 0x00000104
+#define ERRLOGGER_2_FAULTEN_0 0x00000108
+#define ERRLOGGER_2_ERRVLD_0 0x0000010c
+#define ERRLOGGER_2_ERRCLR_0 0x00000110
+#define ERRLOGGER_2_ERRLOG0_0 0x00000114
+#define ERRLOGGER_2_ERRLOG1_0 0x00000118
+#define ERRLOGGER_2_RSVD_00_0 0x0000011c
+#define ERRLOGGER_2_ERRLOG3_0 0x00000120
+#define ERRLOGGER_2_ERRLOG4_0 0x00000124
+#define ERRLOGGER_2_ERRLOG5_0 0x00000128
+#define ERRLOGGER_2_STALLEN_0 0x00000138
+
+#define CBB_NOC_INITFLOW GENMASK(23, 20)
+#define CBB_NOC_TARGFLOW GENMASK(19, 16)
+#define CBB_NOC_TARG_SUBRANGE GENMASK(15, 9)
+#define CBB_NOC_SEQID GENMASK(8, 0)
+
+#define BPMP_NOC_INITFLOW GENMASK(20, 18)
+#define BPMP_NOC_TARGFLOW GENMASK(17, 13)
+#define BPMP_NOC_TARG_SUBRANGE GENMASK(12, 9)
+#define BPMP_NOC_SEQID GENMASK(8, 0)
+
+#define AON_NOC_INITFLOW GENMASK(22, 21)
+#define AON_NOC_TARGFLOW GENMASK(20, 15)
+#define AON_NOC_TARG_SUBRANGE GENMASK(14, 9)
+#define AON_NOC_SEQID GENMASK(8, 0)
+
+#define SCE_NOC_INITFLOW GENMASK(21, 19)
+#define SCE_NOC_TARGFLOW GENMASK(18, 14)
+#define SCE_NOC_TARG_SUBRANGE GENMASK(13, 9)
+#define SCE_NOC_SEQID GENMASK(8, 0)
+
+#define CBB_NOC_AXCACHE GENMASK(3, 0)
+#define CBB_NOC_NON_MOD GENMASK(4, 4)
+#define CBB_NOC_AXPROT GENMASK(7, 5)
+#define CBB_NOC_FALCONSEC GENMASK(9, 8)
+#define CBB_NOC_GRPSEC GENMASK(16, 10)
+#define CBB_NOC_VQC GENMASK(18, 17)
+#define CBB_NOC_MSTR_ID GENMASK(22, 19)
+#define CBB_NOC_AXI_ID GENMASK(30, 23)
+
+#define CLUSTER_NOC_AXCACHE GENMASK(3, 0)
+#define CLUSTER_NOC_AXPROT GENMASK(6, 4)
+#define CLUSTER_NOC_FALCONSEC GENMASK(8, 7)
+#define CLUSTER_NOC_GRPSEC GENMASK(15, 9)
+#define CLUSTER_NOC_VQC GENMASK(17, 16)
+#define CLUSTER_NOC_MSTR_ID GENMASK(21, 18)
+
+#define USRBITS_MSTR_ID GENMASK(21, 18)
+
+#define CBB_ERR_OPC GENMASK(4, 1)
+#define CBB_ERR_ERRCODE GENMASK(10, 8)
+#define CBB_ERR_LEN1 GENMASK(27, 16)
+
+#define DMAAPB_X_RAW_INTERRUPT_STATUS 0x2ec
+
+struct tegra194_cbb_packet_header {
+ bool lock; // [0]
+ u8 opc; // [4:1]
+ u8 errcode; // [10:8]= RD, RDW, RDL, RDX, WR, WRW, WRC, PRE, URG
+ u16 len1; // [27:16]
+ bool format; // [31] = 1 -> FlexNoC versions 2.7 & above
+};
+
+struct tegra194_cbb_aperture {
+ u8 initflow;
+ u8 targflow;
+ u8 targ_subrange;
+ u8 init_mapping;
+ u32 init_localaddress;
+ u8 targ_mapping;
+ u32 targ_localaddress;
+ u16 seqid;
+};
+
+struct tegra194_cbb_userbits {
+ u8 axcache;
+ u8 non_mod;
+ u8 axprot;
+ u8 falconsec;
+ u8 grpsec;
+ u8 vqc;
+ u8 mstr_id;
+ u8 axi_id;
+};
+
+struct tegra194_cbb_noc_data {
+ const char *name;
+ bool erd_mask_inband_err;
+ const char * const *master_id;
+ unsigned int max_aperture;
+ const struct tegra194_cbb_aperture *noc_aperture;
+ const char * const *routeid_initflow;
+ const char * const *routeid_targflow;
+ void (*parse_routeid)(struct tegra194_cbb_aperture *info, u64 routeid);
+ void (*parse_userbits)(struct tegra194_cbb_userbits *usrbits, u32 elog_5);
+};
+
+struct tegra194_axi2apb_bridge {
+ struct resource res;
+ void __iomem *base;
+};
+
+struct tegra194_cbb {
+ struct tegra_cbb base;
+
+ const struct tegra194_cbb_noc_data *noc;
+ struct resource *res;
+
+ void __iomem *regs;
+ unsigned int num_intr;
+ unsigned int sec_irq;
+ unsigned int nonsec_irq;
+ u32 errlog0;
+ u32 errlog1;
+ u32 errlog2;
+ u32 errlog3;
+ u32 errlog4;
+ u32 errlog5;
+
+ struct tegra194_axi2apb_bridge *bridges;
+ unsigned int num_bridges;
+};
+
+static inline struct tegra194_cbb *to_tegra194_cbb(struct tegra_cbb *cbb)
+{
+ return container_of(cbb, struct tegra194_cbb, base);
+}
+
+static LIST_HEAD(cbb_list);
+static DEFINE_SPINLOCK(cbb_lock);
+
+static const char * const tegra194_cbb_trantype[] = {
+ "RD - Read, Incrementing",
+ "RDW - Read, Wrap", /* Not Supported */
+ "RDX - Exclusive Read", /* Not Supported */
+ "RDL - Linked Read", /* Not Supported */
+ "WR - Write, Incrementing",
+ "WRW - Write, Wrap", /* Not Supported */
+ "WRC - Exclusive Write", /* Not Supported */
+ "PRE - Preamble Sequence for Fixed Accesses"
+};
+
+static const char * const tegra194_axi2apb_error[] = {
+ "SFIFONE - Status FIFO Not Empty interrupt",
+ "SFIFOF - Status FIFO Full interrupt",
+ "TIM - Timer(Timeout) interrupt",
+ "SLV - SLVERR interrupt",
+ "NULL",
+ "ERBF - Early response buffer Full interrupt",
+ "NULL",
+ "RDFIFOF - Read Response FIFO Full interrupt",
+ "WRFIFOF - Write Response FIFO Full interrupt",
+ "CH0DFIFOF - Ch0 Data FIFO Full interrupt",
+ "CH1DFIFOF - Ch1 Data FIFO Full interrupt",
+ "CH2DFIFOF - Ch2 Data FIFO Full interrupt",
+ "UAT - Unsupported alignment type error",
+ "UBS - Unsupported burst size error",
+ "UBE - Unsupported Byte Enable error",
+ "UBT - Unsupported burst type error",
+ "BFS - Block Firewall security error",
+ "ARFS - Address Range Firewall security error",
+ "CH0RFIFOF - Ch0 Request FIFO Full interrupt",
+ "CH1RFIFOF - Ch1 Request FIFO Full interrupt",
+ "CH2RFIFOF - Ch2 Request FIFO Full interrupt"
+};
+
+static const char * const tegra194_master_id[] = {
+ [0x0] = "CCPLEX",
+ [0x1] = "CCPLEX_DPMU",
+ [0x2] = "BPMP",
+ [0x3] = "AON",
+ [0x4] = "SCE",
+ [0x5] = "GPCDMA_PERIPHERAL",
+ [0x6] = "TSECA",
+ [0x7] = "TSECB",
+ [0x8] = "JTAGM_DFT",
+ [0x9] = "CORESIGHT_AXIAP",
+ [0xa] = "APE",
+ [0xb] = "PEATR",
+ [0xc] = "NVDEC",
+ [0xd] = "RCE",
+ [0xe] = "NVDEC1"
+};
+
+static const struct tegra_cbb_error tegra194_cbb_errors[] = {
+ {
+ .code = "SLV",
+ .source = "Target",
+ .desc = "Target error detected by CBB slave"
+ }, {
+ .code = "DEC",
+ .source = "Initiator NIU",
+ .desc = "Address decode error"
+ }, {
+ .code = "UNS",
+ .source = "Target NIU",
+ .desc = "Unsupported request. Not a valid transaction"
+ }, {
+ .code = "DISC", /* Not Supported by CBB */
+ .source = "Power Disconnect",
+ .desc = "Disconnected target or domain"
+ }, {
+ .code = "SEC",
+ .source = "Initiator NIU or Firewall",
+ .desc = "Security violation. Firewall error"
+ }, {
+ .code = "HIDE", /* Not Supported by CBB */
+ .source = "Firewall",
+ .desc = "Hidden security violation, reported as OK to initiator"
+ }, {
+ .code = "TMO",
+ .source = "Target NIU",
+ .desc = "Target time-out error"
+ }, {
+ .code = "RSV",
+ .source = "None",
+ .desc = "Reserved"
+ }
+};
+
+/*
+ * CBB NOC aperture lookup table as per file "cbb_central_noc_Structure.info".
+ */
+static const char * const tegra194_cbbcentralnoc_routeid_initflow[] = {
+ [0x0] = "aon_p2ps/I/aon",
+ [0x1] = "ape_p2ps/I/ape_p2ps",
+ [0x2] = "bpmp_p2ps/I/bpmp_p2ps",
+ [0x3] = "ccroc_p2ps/I/ccroc_p2ps",
+ [0x4] = "csite_p2ps/I/0",
+ [0x5] = "gpcdma_mmio_p2ps/I/0",
+ [0x6] = "jtag_p2ps/I/0",
+ [0x7] = "nvdec1_p2ps/I/0",
+ [0x8] = "nvdec_p2ps/I/0",
+ [0x9] = "rce_p2ps/I/rce_p2ps",
+ [0xa] = "sce_p2ps/I/sce_p2ps",
+ [0xb] = "tseca_p2ps/I/0",
+ [0xc] = "tsecb_p2ps/I/0",
+ [0xd] = "RESERVED",
+ [0xe] = "RESERVED",
+ [0xf] = "RESERVED"
+};
+
+static const char * const tegra194_cbbcentralnoc_routeid_targflow[] = {
+ [0x0] = "SVC/T/intreg",
+ [0x1] = "axis_satellite_axi2apb_p2pm/T/axis_satellite_axi2apb_p2pm",
+ [0x2] = "axis_satellite_grout/T/axis_satellite_grout",
+ [0x3] = "cbb_firewall/T/cbb_firewall",
+ [0x4] = "gpu_p2pm/T/gpu_p2pm",
+ [0x5] = "host1x_p2pm/T/host1x_p2pm",
+ [0x6] = "sapb_3_p2pm/T/sapb_3_p2pm",
+ [0x7] = "smmu0_p2pm/T/smmu0_p2pm",
+ [0x8] = "smmu1_p2pm/T/smmu1_p2pm",
+ [0x9] = "smmu2_p2pm/T/smmu2_p2pm",
+ [0xa] = "stm_p2pm/T/stm_p2pm",
+ [0xb] = "RESERVED",
+ [0xc] = "RESERVED",
+ [0xd] = "RESERVED",
+ [0xe] = "RESERVED",
+ [0xf] = "RESERVED"
+};
+
+/*
+ * Fields of CBB NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_cbbcentralnoc_apert_lookup[] = {
+ { 0x0, 0x0, 0x00, 0x0, 0x02300000, 0, 0x00000000 },
+ { 0x0, 0x1, 0x00, 0x0, 0x02003000, 0, 0x02003000 },
+ { 0x0, 0x1, 0x01, 0x0, 0x02006000, 2, 0x02006000 },
+ { 0x0, 0x1, 0x02, 0x0, 0x02016000, 3, 0x02016000 },
+ { 0x0, 0x1, 0x03, 0x0, 0x0201d000, 4, 0x0201d000 },
+ { 0x0, 0x1, 0x04, 0x0, 0x0202b000, 6, 0x0202b000 },
+ { 0x0, 0x1, 0x05, 0x0, 0x02434000, 20, 0x02434000 },
+ { 0x0, 0x1, 0x06, 0x0, 0x02436000, 21, 0x02436000 },
+ { 0x0, 0x1, 0x07, 0x0, 0x02438000, 22, 0x02438000 },
+ { 0x0, 0x1, 0x08, 0x0, 0x02445000, 24, 0x02445000 },
+ { 0x0, 0x1, 0x09, 0x0, 0x02446000, 25, 0x02446000 },
+ { 0x0, 0x1, 0x0a, 0x0, 0x02004000, 1, 0x02004000 },
+ { 0x0, 0x1, 0x0b, 0x0, 0x0201e000, 5, 0x0201e000 },
+ { 0x0, 0x1, 0x0c, 0x0, 0x0202c000, 7, 0x0202c000 },
+ { 0x0, 0x1, 0x0d, 0x0, 0x02204000, 8, 0x02204000 },
+ { 0x0, 0x1, 0x0e, 0x0, 0x02214000, 9, 0x02214000 },
+ { 0x0, 0x1, 0x0f, 0x0, 0x02224000, 10, 0x02224000 },
+ { 0x0, 0x1, 0x10, 0x0, 0x02234000, 11, 0x02234000 },
+ { 0x0, 0x1, 0x11, 0x0, 0x02244000, 12, 0x02244000 },
+ { 0x0, 0x1, 0x12, 0x0, 0x02254000, 13, 0x02254000 },
+ { 0x0, 0x1, 0x13, 0x0, 0x02264000, 14, 0x02264000 },
+ { 0x0, 0x1, 0x14, 0x0, 0x02274000, 15, 0x02274000 },
+ { 0x0, 0x1, 0x15, 0x0, 0x02284000, 16, 0x02284000 },
+ { 0x0, 0x1, 0x16, 0x0, 0x0243a000, 23, 0x0243a000 },
+ { 0x0, 0x1, 0x17, 0x0, 0x02370000, 17, 0x02370000 },
+ { 0x0, 0x1, 0x18, 0x0, 0x023d0000, 18, 0x023d0000 },
+ { 0x0, 0x1, 0x19, 0x0, 0x023e0000, 19, 0x023e0000 },
+ { 0x0, 0x1, 0x1a, 0x0, 0x02450000, 26, 0x02450000 },
+ { 0x0, 0x1, 0x1b, 0x0, 0x02460000, 27, 0x02460000 },
+ { 0x0, 0x1, 0x1c, 0x0, 0x02490000, 28, 0x02490000 },
+ { 0x0, 0x1, 0x1d, 0x0, 0x03130000, 31, 0x03130000 },
+ { 0x0, 0x1, 0x1e, 0x0, 0x03160000, 32, 0x03160000 },
+ { 0x0, 0x1, 0x1f, 0x0, 0x03270000, 33, 0x03270000 },
+ { 0x0, 0x1, 0x20, 0x0, 0x032e0000, 35, 0x032e0000 },
+ { 0x0, 0x1, 0x21, 0x0, 0x03300000, 36, 0x03300000 },
+ { 0x0, 0x1, 0x22, 0x0, 0x13090000, 40, 0x13090000 },
+ { 0x0, 0x1, 0x23, 0x0, 0x20120000, 43, 0x20120000 },
+ { 0x0, 0x1, 0x24, 0x0, 0x20170000, 44, 0x20170000 },
+ { 0x0, 0x1, 0x25, 0x0, 0x20190000, 45, 0x20190000 },
+ { 0x0, 0x1, 0x26, 0x0, 0x201b0000, 46, 0x201b0000 },
+ { 0x0, 0x1, 0x27, 0x0, 0x20250000, 47, 0x20250000 },
+ { 0x0, 0x1, 0x28, 0x0, 0x20260000, 48, 0x20260000 },
+ { 0x0, 0x1, 0x29, 0x0, 0x20420000, 49, 0x20420000 },
+ { 0x0, 0x1, 0x2a, 0x0, 0x20460000, 50, 0x20460000 },
+ { 0x0, 0x1, 0x2b, 0x0, 0x204f0000, 51, 0x204f0000 },
+ { 0x0, 0x1, 0x2c, 0x0, 0x20520000, 52, 0x20520000 },
+ { 0x0, 0x1, 0x2d, 0x0, 0x20580000, 53, 0x20580000 },
+ { 0x0, 0x1, 0x2e, 0x0, 0x205a0000, 54, 0x205a0000 },
+ { 0x0, 0x1, 0x2f, 0x0, 0x205c0000, 55, 0x205c0000 },
+ { 0x0, 0x1, 0x30, 0x0, 0x20690000, 56, 0x20690000 },
+ { 0x0, 0x1, 0x31, 0x0, 0x20770000, 57, 0x20770000 },
+ { 0x0, 0x1, 0x32, 0x0, 0x20790000, 58, 0x20790000 },
+ { 0x0, 0x1, 0x33, 0x0, 0x20880000, 59, 0x20880000 },
+ { 0x0, 0x1, 0x34, 0x0, 0x20990000, 62, 0x20990000 },
+ { 0x0, 0x1, 0x35, 0x0, 0x20e10000, 65, 0x20e10000 },
+ { 0x0, 0x1, 0x36, 0x0, 0x20e70000, 66, 0x20e70000 },
+ { 0x0, 0x1, 0x37, 0x0, 0x20e80000, 67, 0x20e80000 },
+ { 0x0, 0x1, 0x38, 0x0, 0x20f30000, 68, 0x20f30000 },
+ { 0x0, 0x1, 0x39, 0x0, 0x20f50000, 69, 0x20f50000 },
+ { 0x0, 0x1, 0x3a, 0x0, 0x20fc0000, 70, 0x20fc0000 },
+ { 0x0, 0x1, 0x3b, 0x0, 0x21110000, 72, 0x21110000 },
+ { 0x0, 0x1, 0x3c, 0x0, 0x21270000, 73, 0x21270000 },
+ { 0x0, 0x1, 0x3d, 0x0, 0x21290000, 74, 0x21290000 },
+ { 0x0, 0x1, 0x3e, 0x0, 0x21840000, 75, 0x21840000 },
+ { 0x0, 0x1, 0x3f, 0x0, 0x21880000, 76, 0x21880000 },
+ { 0x0, 0x1, 0x40, 0x0, 0x218d0000, 77, 0x218d0000 },
+ { 0x0, 0x1, 0x41, 0x0, 0x21950000, 78, 0x21950000 },
+ { 0x0, 0x1, 0x42, 0x0, 0x21960000, 79, 0x21960000 },
+ { 0x0, 0x1, 0x43, 0x0, 0x21a10000, 80, 0x21a10000 },
+ { 0x0, 0x1, 0x44, 0x0, 0x024a0000, 29, 0x024a0000 },
+ { 0x0, 0x1, 0x45, 0x0, 0x024c0000, 30, 0x024c0000 },
+ { 0x0, 0x1, 0x46, 0x0, 0x032c0000, 34, 0x032c0000 },
+ { 0x0, 0x1, 0x47, 0x0, 0x03400000, 37, 0x03400000 },
+ { 0x0, 0x1, 0x48, 0x0, 0x130a0000, 41, 0x130a0000 },
+ { 0x0, 0x1, 0x49, 0x0, 0x130c0000, 42, 0x130c0000 },
+ { 0x0, 0x1, 0x4a, 0x0, 0x208a0000, 60, 0x208a0000 },
+ { 0x0, 0x1, 0x4b, 0x0, 0x208c0000, 61, 0x208c0000 },
+ { 0x0, 0x1, 0x4c, 0x0, 0x209a0000, 63, 0x209a0000 },
+ { 0x0, 0x1, 0x4d, 0x0, 0x21a40000, 81, 0x21a40000 },
+ { 0x0, 0x1, 0x4e, 0x0, 0x03440000, 38, 0x03440000 },
+ { 0x0, 0x1, 0x4f, 0x0, 0x20d00000, 64, 0x20d00000 },
+ { 0x0, 0x1, 0x50, 0x0, 0x21000000, 71, 0x21000000 },
+ { 0x0, 0x1, 0x51, 0x0, 0x0b000000, 39, 0x0b000000 },
+ { 0x0, 0x2, 0x00, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x3, 0x00, 0x0, 0x02340000, 0, 0x00000000 },
+ { 0x0, 0x4, 0x00, 0x0, 0x17000000, 0, 0x17000000 },
+ { 0x0, 0x4, 0x01, 0x0, 0x18000000, 1, 0x18000000 },
+ { 0x0, 0x5, 0x00, 0x0, 0x13e80000, 1, 0x13e80000 },
+ { 0x0, 0x5, 0x01, 0x0, 0x15810000, 12, 0x15810000 },
+ { 0x0, 0x5, 0x02, 0x0, 0x15840000, 14, 0x15840000 },
+ { 0x0, 0x5, 0x03, 0x0, 0x15a40000, 17, 0x15a40000 },
+ { 0x0, 0x5, 0x04, 0x0, 0x13f00000, 3, 0x13f00000 },
+ { 0x0, 0x5, 0x05, 0x0, 0x15820000, 13, 0x15820000 },
+ { 0x0, 0x5, 0x06, 0x0, 0x13ec0000, 2, 0x13ec0000 },
+ { 0x0, 0x5, 0x07, 0x0, 0x15200000, 6, 0x15200000 },
+ { 0x0, 0x5, 0x08, 0x0, 0x15340000, 7, 0x15340000 },
+ { 0x0, 0x5, 0x09, 0x0, 0x15380000, 8, 0x15380000 },
+ { 0x0, 0x5, 0x0a, 0x0, 0x15500000, 10, 0x15500000 },
+ { 0x0, 0x5, 0x0b, 0x0, 0x155c0000, 11, 0x155c0000 },
+ { 0x0, 0x5, 0x0c, 0x0, 0x15a00000, 16, 0x15a00000 },
+ { 0x0, 0x5, 0x0d, 0x0, 0x13e00000, 0, 0x13e00000 },
+ { 0x0, 0x5, 0x0e, 0x0, 0x15100000, 5, 0x15100000 },
+ { 0x0, 0x5, 0x0f, 0x0, 0x15480000, 9, 0x15480000 },
+ { 0x0, 0x5, 0x10, 0x0, 0x15880000, 15, 0x15880000 },
+ { 0x0, 0x5, 0x11, 0x0, 0x15a80000, 18, 0x15a80000 },
+ { 0x0, 0x5, 0x12, 0x0, 0x15b00000, 19, 0x15b00000 },
+ { 0x0, 0x5, 0x13, 0x0, 0x14800000, 4, 0x14800000 },
+ { 0x0, 0x5, 0x14, 0x0, 0x15c00000, 20, 0x15c00000 },
+ { 0x0, 0x5, 0x15, 0x0, 0x16000000, 21, 0x16000000 },
+ { 0x0, 0x6, 0x00, 0x0, 0x02000000, 4, 0x02000000 },
+ { 0x0, 0x6, 0x01, 0x0, 0x02007000, 5, 0x02007000 },
+ { 0x0, 0x6, 0x02, 0x0, 0x02008000, 6, 0x02008000 },
+ { 0x0, 0x6, 0x03, 0x0, 0x02013000, 7, 0x02013000 },
+ { 0x0, 0x6, 0x04, 0x0, 0x0201c000, 8, 0x0201c000 },
+ { 0x0, 0x6, 0x05, 0x0, 0x02020000, 9, 0x02020000 },
+ { 0x0, 0x6, 0x06, 0x0, 0x0202a000, 10, 0x0202a000 },
+ { 0x0, 0x6, 0x07, 0x0, 0x0202e000, 11, 0x0202e000 },
+ { 0x0, 0x6, 0x08, 0x0, 0x06400000, 33, 0x06400000 },
+ { 0x0, 0x6, 0x09, 0x0, 0x02038000, 12, 0x02038000 },
+ { 0x0, 0x6, 0x0a, 0x0, 0x00100000, 0, 0x00100000 },
+ { 0x0, 0x6, 0x0b, 0x0, 0x023b0000, 13, 0x023b0000 },
+ { 0x0, 0x6, 0x0c, 0x0, 0x02800000, 16, 0x02800000 },
+ { 0x0, 0x6, 0x0d, 0x0, 0x030e0000, 22, 0x030e0000 },
+ { 0x0, 0x6, 0x0e, 0x0, 0x03800000, 23, 0x03800000 },
+ { 0x0, 0x6, 0x0f, 0x0, 0x03980000, 25, 0x03980000 },
+ { 0x0, 0x6, 0x10, 0x0, 0x03a60000, 26, 0x03a60000 },
+ { 0x0, 0x6, 0x11, 0x0, 0x03d80000, 31, 0x03d80000 },
+ { 0x0, 0x6, 0x12, 0x0, 0x20000000, 36, 0x20000000 },
+ { 0x0, 0x6, 0x13, 0x0, 0x20050000, 38, 0x20050000 },
+ { 0x0, 0x6, 0x14, 0x0, 0x201e0000, 40, 0x201e0000 },
+ { 0x0, 0x6, 0x15, 0x0, 0x20280000, 42, 0x20280000 },
+ { 0x0, 0x6, 0x16, 0x0, 0x202c0000, 43, 0x202c0000 },
+ { 0x0, 0x6, 0x17, 0x0, 0x20390000, 44, 0x20390000 },
+ { 0x0, 0x6, 0x18, 0x0, 0x20430000, 45, 0x20430000 },
+ { 0x0, 0x6, 0x19, 0x0, 0x20440000, 46, 0x20440000 },
+ { 0x0, 0x6, 0x1a, 0x0, 0x204e0000, 47, 0x204e0000 },
+ { 0x0, 0x6, 0x1b, 0x0, 0x20550000, 48, 0x20550000 },
+ { 0x0, 0x6, 0x1c, 0x0, 0x20570000, 49, 0x20570000 },
+ { 0x0, 0x6, 0x1d, 0x0, 0x20590000, 50, 0x20590000 },
+ { 0x0, 0x6, 0x1e, 0x0, 0x20730000, 52, 0x20730000 },
+ { 0x0, 0x6, 0x1f, 0x0, 0x209f0000, 54, 0x209f0000 },
+ { 0x0, 0x6, 0x20, 0x0, 0x20e20000, 55, 0x20e20000 },
+ { 0x0, 0x6, 0x21, 0x0, 0x20ed0000, 56, 0x20ed0000 },
+ { 0x0, 0x6, 0x22, 0x0, 0x20fd0000, 57, 0x20fd0000 },
+ { 0x0, 0x6, 0x23, 0x0, 0x21120000, 59, 0x21120000 },
+ { 0x0, 0x6, 0x24, 0x0, 0x211a0000, 60, 0x211a0000 },
+ { 0x0, 0x6, 0x25, 0x0, 0x21850000, 61, 0x21850000 },
+ { 0x0, 0x6, 0x26, 0x0, 0x21860000, 62, 0x21860000 },
+ { 0x0, 0x6, 0x27, 0x0, 0x21890000, 63, 0x21890000 },
+ { 0x0, 0x6, 0x28, 0x0, 0x21970000, 64, 0x21970000 },
+ { 0x0, 0x6, 0x29, 0x0, 0x21990000, 65, 0x21990000 },
+ { 0x0, 0x6, 0x2a, 0x0, 0x21a00000, 66, 0x21a00000 },
+ { 0x0, 0x6, 0x2b, 0x0, 0x21a90000, 68, 0x21a90000 },
+ { 0x0, 0x6, 0x2c, 0x0, 0x21ac0000, 70, 0x21ac0000 },
+ { 0x0, 0x6, 0x2d, 0x0, 0x01f80000, 3, 0x01f80000 },
+ { 0x0, 0x6, 0x2e, 0x0, 0x024e0000, 14, 0x024e0000 },
+ { 0x0, 0x6, 0x2f, 0x0, 0x030c0000, 21, 0x030c0000 },
+ { 0x0, 0x6, 0x30, 0x0, 0x03820000, 24, 0x03820000 },
+ { 0x0, 0x6, 0x31, 0x0, 0x03aa0000, 27, 0x03aa0000 },
+ { 0x0, 0x6, 0x32, 0x0, 0x03c80000, 29, 0x03c80000 },
+ { 0x0, 0x6, 0x33, 0x0, 0x130e0000, 34, 0x130e0000 },
+ { 0x0, 0x6, 0x34, 0x0, 0x20020000, 37, 0x20020000 },
+ { 0x0, 0x6, 0x35, 0x0, 0x20060000, 39, 0x20060000 },
+ { 0x0, 0x6, 0x36, 0x0, 0x20200000, 41, 0x20200000 },
+ { 0x0, 0x6, 0x37, 0x0, 0x206a0000, 51, 0x206a0000 },
+ { 0x0, 0x6, 0x38, 0x0, 0x20740000, 53, 0x20740000 },
+ { 0x0, 0x6, 0x39, 0x0, 0x20fe0000, 58, 0x20fe0000 },
+ { 0x0, 0x6, 0x3a, 0x0, 0x21a20000, 67, 0x21a20000 },
+ { 0x0, 0x6, 0x3b, 0x0, 0x21aa0000, 69, 0x21aa0000 },
+ { 0x0, 0x6, 0x3c, 0x0, 0x02b80000, 17, 0x02b80000 },
+ { 0x0, 0x6, 0x3d, 0x0, 0x03080000, 20, 0x03080000 },
+ { 0x0, 0x6, 0x3e, 0x0, 0x13100000, 35, 0x13100000 },
+ { 0x0, 0x6, 0x3f, 0x0, 0x01f00000, 2, 0x01f00000 },
+ { 0x0, 0x6, 0x40, 0x0, 0x03000000, 19, 0x03000000 },
+ { 0x0, 0x6, 0x41, 0x0, 0x03c00000, 28, 0x03c00000 },
+ { 0x0, 0x6, 0x42, 0x0, 0x03d00000, 30, 0x03d00000 },
+ { 0x0, 0x6, 0x43, 0x0, 0x01700000, 1, 0x01700000 },
+ { 0x0, 0x6, 0x44, 0x0, 0x02c00000, 18, 0x02c00000 },
+ { 0x0, 0x6, 0x45, 0x0, 0x02600000, 15, 0x02600000 },
+ { 0x0, 0x6, 0x46, 0x0, 0x06000000, 32, 0x06000000 },
+ { 0x0, 0x6, 0x47, 0x0, 0x24000000, 71, 0x24000000 },
+ { 0x0, 0x7, 0x00, 0x0, 0x12000000, 0, 0x12000000 },
+ { 0x0, 0x8, 0x00, 0x0, 0x11000000, 0, 0x11000000 },
+ { 0x0, 0x9, 0x00, 0x0, 0x10000000, 0, 0x10000000 },
+ { 0x0, 0xa, 0x00, 0x0, 0x22000000, 0, 0x22000000 }
+};
+
+/*
+ * BPMP NOC aperture lookup table as per file "BPMP_NOC_Structure.info".
+ */
+static const char * const tegra194_bpmpnoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_m_i/I/0",
+ [0x2] = "cpu_p_i/I/0",
+ [0x3] = "cvc_i/I/0",
+ [0x4] = "dma_m_i/I/0",
+ [0x5] = "dma_p_i/I/0",
+ [0x6] = "RESERVED",
+ [0x7] = "RESERVED"
+};
+
+static const char * const tegra194_bpmpnoc_routeid_targflow[] = {
+ [0x00] = "multiport0_t/T/actmon",
+ [0x01] = "multiport0_t/T/ast_0",
+ [0x02] = "multiport0_t/T/ast_1",
+ [0x03] = "multiport0_t/T/atcm_cfg",
+ [0x04] = "multiport0_t/T/car",
+ [0x05] = "multiport0_t/T/central_pwr_mgr",
+ [0x06] = "multiport0_t/T/central_vtg_ctlr",
+ [0x07] = "multiport0_t/T/cfg",
+ [0x08] = "multiport0_t/T/dma",
+ [0x09] = "multiport0_t/T/err_collator",
+ [0x0a] = "multiport0_t/T/err_collator_car",
+ [0x0b] = "multiport0_t/T/fpga_misc",
+ [0x0c] = "multiport0_t/T/fpga_uart",
+ [0x0d] = "multiport0_t/T/gte",
+ [0x0e] = "multiport0_t/T/hsp",
+ [0x0f] = "multiport0_t/T/misc",
+ [0x10] = "multiport0_t/T/pm",
+ [0x11] = "multiport0_t/T/simon0",
+ [0x12] = "multiport0_t/T/simon1",
+ [0x13] = "multiport0_t/T/simon2",
+ [0x14] = "multiport0_t/T/simon3",
+ [0x15] = "multiport0_t/T/simon4",
+ [0x16] = "multiport0_t/T/soc_therm",
+ [0x17] = "multiport0_t/T/tke",
+ [0x18] = "multiport0_t/T/vic_0",
+ [0x19] = "multiport0_t/T/vic_1",
+ [0x1a] = "ast0_t/T/0",
+ [0x1b] = "ast1_t/T/0",
+ [0x1c] = "bpmp_noc_firewall/T/0",
+ [0x1d] = "cbb_t/T/0",
+ [0x1e] = "cpu_t/T/0",
+ [0x1f] = "svc_t/T/0"
+};
+
+/*
+ * Fields of BPMP NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_bpmpnoc_apert_lookup[] = {
+ { 0x0, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x0, 0x1e, 0x0, 0x0, 0x0d400000, 0, 0x0d400000 },
+ { 0x0, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x0, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x0, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x0, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x0, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x0, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x0, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x0, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x0, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x0, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x0, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x0, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x0, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x0, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x0, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x0, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x0, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x0, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x0, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x0, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x0, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x0, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x0, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x0, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x0, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x0, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x0, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x0, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x0, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x0, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x0, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x0, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x0, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x0, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x0, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x0, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x0, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x0, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x0, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x0, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x0, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x1a, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x1, 0x1a, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x1, 0x1a, 0x2, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x2, 0x1d, 0x0, 0x0, 0x20b00000, 8, 0x20b00000 },
+ { 0x2, 0x1d, 0x1, 0x0, 0x20800000, 7, 0x20800000 },
+ { 0x2, 0x1d, 0x2, 0x0, 0x20c00000, 9, 0x20c00000 },
+ { 0x2, 0x1d, 0x3, 0x0, 0x0d800000, 3, 0x0d800000 },
+ { 0x2, 0x1d, 0x4, 0x0, 0x20000000, 6, 0x20000000 },
+ { 0x2, 0x1d, 0x5, 0x0, 0x0c000000, 2, 0x0c000000 },
+ { 0x2, 0x1d, 0x6, 0x0, 0x21000000, 10, 0x21000000 },
+ { 0x2, 0x1d, 0x7, 0x0, 0x0e000000, 4, 0x0e000000 },
+ { 0x2, 0x1d, 0x8, 0x0, 0x22000000, 11, 0x22000000 },
+ { 0x2, 0x1d, 0x9, 0x0, 0x08000000, 1, 0x08000000 },
+ { 0x2, 0x1d, 0xa, 0x0, 0x24000000, 12, 0x24000000 },
+ { 0x2, 0x1d, 0xb, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x1d, 0xc, 0x0, 0x28000000, 13, 0x28000000 },
+ { 0x2, 0x1d, 0xd, 0x0, 0x10000000, 5, 0x10000000 },
+ { 0x2, 0x1d, 0xe, 0x0, 0x30000000, 14, 0x30000000 },
+ { 0x2, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x2, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x2, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x2, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x2, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x2, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x2, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x2, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x2, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x2, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x2, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x2, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x2, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x2, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x2, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x2, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x2, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x2, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x2, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x2, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x2, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x2, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x2, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x2, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x2, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x2, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x2, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x2, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x2, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x2, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x2, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x2, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x2, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x2, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x2, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x2, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x2, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x2, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x2, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x2, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x2, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x2, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x2, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x2, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x2, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x2, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x1b, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x3, 0x1b, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x3, 0x1c, 0x0, 0x2, 0x0d640000, 0, 0x00000000 },
+ { 0x3, 0x1d, 0x0, 0x2, 0x20b00000, 8, 0x20b00000 },
+ { 0x3, 0x1d, 0x1, 0x2, 0x20800000, 7, 0x20800000 },
+ { 0x3, 0x1d, 0x2, 0x2, 0x20c00000, 9, 0x20c00000 },
+ { 0x3, 0x1d, 0x3, 0x2, 0x0d800000, 3, 0x0d800000 },
+ { 0x3, 0x1d, 0x4, 0x2, 0x20000000, 6, 0x20000000 },
+ { 0x3, 0x1d, 0x5, 0x2, 0x0c000000, 2, 0x0c000000 },
+ { 0x3, 0x1d, 0x6, 0x2, 0x21000000, 10, 0x21000000 },
+ { 0x3, 0x1d, 0x7, 0x2, 0x0e000000, 4, 0x0e000000 },
+ { 0x3, 0x1d, 0x8, 0x2, 0x22000000, 11, 0x22000000 },
+ { 0x3, 0x1d, 0x9, 0x2, 0x08000000, 1, 0x08000000 },
+ { 0x3, 0x1d, 0xa, 0x2, 0x24000000, 12, 0x24000000 },
+ { 0x3, 0x1d, 0xb, 0x2, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x1d, 0xc, 0x2, 0x28000000, 13, 0x28000000 },
+ { 0x3, 0x1d, 0xd, 0x2, 0x10000000, 5, 0x10000000 },
+ { 0x3, 0x1d, 0xe, 0x2, 0x30000000, 14, 0x30000000 },
+ { 0x3, 0x1e, 0x0, 0x2, 0x0d400000, 0, 0x0d400000 },
+ { 0x3, 0x00, 0x0, 0x2, 0x0d230000, 0, 0x00000000 },
+ { 0x3, 0x01, 0x0, 0x2, 0x0d040000, 0, 0x00000000 },
+ { 0x3, 0x02, 0x0, 0x2, 0x0d050000, 0, 0x00000000 },
+ { 0x3, 0x03, 0x0, 0x2, 0x0d000000, 0, 0x00000000 },
+ { 0x3, 0x04, 0x0, 0x2, 0x20ae0000, 3, 0x000e0000 },
+ { 0x3, 0x04, 0x1, 0x2, 0x20ac0000, 2, 0x000c0000 },
+ { 0x3, 0x04, 0x2, 0x2, 0x20a80000, 1, 0x00080000 },
+ { 0x3, 0x04, 0x3, 0x2, 0x20a00000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x0, 0x2, 0x0d2a0000, 0, 0x00000000 },
+ { 0x3, 0x06, 0x0, 0x2, 0x0d290000, 0, 0x00000000 },
+ { 0x3, 0x07, 0x0, 0x2, 0x0d2c0000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x0, 0x2, 0x0d0e0000, 4, 0x00080000 },
+ { 0x3, 0x08, 0x1, 0x2, 0x0d060000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x2, 0x2, 0x0d080000, 1, 0x00020000 },
+ { 0x3, 0x08, 0x3, 0x2, 0x0d0a0000, 2, 0x00040000 },
+ { 0x3, 0x08, 0x4, 0x2, 0x0d0c0000, 3, 0x00060000 },
+ { 0x3, 0x09, 0x0, 0x2, 0x0d650000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x0, 0x2, 0x20af0000, 0, 0x00000000 },
+ { 0x3, 0x0b, 0x0, 0x2, 0x0d3e0000, 0, 0x00000000 },
+ { 0x3, 0x0c, 0x0, 0x2, 0x0d3d0000, 0, 0x00000000 },
+ { 0x3, 0x0d, 0x0, 0x2, 0x0d1e0000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x0, 0x2, 0x0d150000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x1, 0x2, 0x0d160000, 1, 0x00010000 },
+ { 0x3, 0x0e, 0x2, 0x2, 0x0d170000, 2, 0x00020000 },
+ { 0x3, 0x0e, 0x3, 0x2, 0x0d180000, 3, 0x00030000 },
+ { 0x3, 0x0e, 0x4, 0x2, 0x0d190000, 4, 0x00040000 },
+ { 0x3, 0x0e, 0x5, 0x2, 0x0d1a0000, 5, 0x00050000 },
+ { 0x3, 0x0e, 0x6, 0x2, 0x0d1b0000, 6, 0x00060000 },
+ { 0x3, 0x0e, 0x7, 0x2, 0x0d1c0000, 7, 0x00070000 },
+ { 0x3, 0x0e, 0x8, 0x2, 0x0d1d0000, 8, 0x00080000 },
+ { 0x3, 0x0f, 0x0, 0x2, 0x0d660000, 0, 0x00000000 },
+ { 0x3, 0x10, 0x0, 0x2, 0x0d1f0000, 0, 0x00000000 },
+ { 0x3, 0x10, 0x1, 0x2, 0x0d200000, 1, 0x00010000 },
+ { 0x3, 0x10, 0x2, 0x2, 0x0d210000, 2, 0x00020000 },
+ { 0x3, 0x10, 0x3, 0x2, 0x0d220000, 3, 0x00030000 },
+ { 0x3, 0x11, 0x0, 0x2, 0x0d240000, 0, 0x00000000 },
+ { 0x3, 0x12, 0x0, 0x2, 0x0d250000, 0, 0x00000000 },
+ { 0x3, 0x13, 0x0, 0x2, 0x0d260000, 0, 0x00000000 },
+ { 0x3, 0x14, 0x0, 0x2, 0x0d270000, 0, 0x00000000 },
+ { 0x3, 0x15, 0x0, 0x2, 0x0d2b0000, 0, 0x00000000 },
+ { 0x3, 0x16, 0x0, 0x2, 0x0d280000, 0, 0x00000000 },
+ { 0x3, 0x17, 0x0, 0x2, 0x0d0f0000, 0, 0x00000000 },
+ { 0x3, 0x17, 0x1, 0x2, 0x0d100000, 1, 0x00010000 },
+ { 0x3, 0x17, 0x2, 0x2, 0x0d110000, 2, 0x00020000 },
+ { 0x3, 0x17, 0x3, 0x2, 0x0d120000, 3, 0x00030000 },
+ { 0x3, 0x17, 0x4, 0x2, 0x0d130000, 4, 0x00040000 },
+ { 0x3, 0x17, 0x5, 0x2, 0x0d140000, 5, 0x00050000 },
+ { 0x3, 0x18, 0x0, 0x2, 0x0d020000, 0, 0x00000000 },
+ { 0x3, 0x19, 0x0, 0x2, 0x0d030000, 0, 0x00000000 },
+ { 0x3, 0x1f, 0x0, 0x2, 0x0d600000, 0, 0x00000000 },
+ { 0x3, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x1b, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x4, 0x1b, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x4, 0x1e, 0x0, 0x2, 0x0d400000, 0, 0x0d400000 },
+ { 0x4, 0x1e, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x5, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x5, 0x1d, 0x0, 0x0, 0x20b00000, 8, 0x20b00000 },
+ { 0x5, 0x1d, 0x1, 0x0, 0x20800000, 7, 0x20800000 },
+ { 0x5, 0x1d, 0x2, 0x0, 0x20c00000, 9, 0x20c00000 },
+ { 0x5, 0x1d, 0x3, 0x0, 0x0d800000, 3, 0x0d800000 },
+ { 0x5, 0x1d, 0x4, 0x0, 0x20000000, 6, 0x20000000 },
+ { 0x5, 0x1d, 0x5, 0x0, 0x0c000000, 2, 0x0c000000 },
+ { 0x5, 0x1d, 0x6, 0x0, 0x21000000, 10, 0x21000000 },
+ { 0x5, 0x1d, 0x7, 0x0, 0x0e000000, 4, 0x0e000000 },
+ { 0x5, 0x1d, 0x8, 0x0, 0x22000000, 11, 0x22000000 },
+ { 0x5, 0x1d, 0x9, 0x0, 0x08000000, 1, 0x08000000 },
+ { 0x5, 0x1d, 0xa, 0x0, 0x24000000, 12, 0x24000000 },
+ { 0x5, 0x1d, 0xb, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x5, 0x1d, 0xc, 0x0, 0x28000000, 13, 0x28000000 },
+ { 0x5, 0x1d, 0xd, 0x0, 0x10000000, 5, 0x10000000 },
+ { 0x5, 0x1d, 0xe, 0x0, 0x30000000, 14, 0x30000000 },
+ { 0x5, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x5, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x5, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x5, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x5, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x5, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x5, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x5, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x5, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x5, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x5, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x5, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x5, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x5, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x5, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x5, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x5, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x5, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x5, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x5, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x5, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x5, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x5, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x5, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x5, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x5, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x5, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x5, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x5, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x5, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x5, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x5, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x5, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x5, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x5, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x5, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x5, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x5, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x5, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x5, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x5, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x5, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x5, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x5, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x5, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x5, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x5, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x5, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x5, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x5, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x5, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 }
+};
+
+/*
+ * AON NOC aperture lookup table as per file "AON_NOC_Structure.info".
+ */
+static const char * const tegra194_aonnoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_p_i/I/0",
+ [0x2] = "dma_m_i/I/0",
+ [0x3] = "dma_p_i/I/0"
+};
+
+static const char * const tegra194_aonnoc_routeid_targflow[] = {
+ [0x00] = "multiport1_t/T/aon_misc",
+ [0x01] = "multiport1_t/T/avic0",
+ [0x02] = "multiport1_t/T/avic1",
+ [0x03] = "multiport1_t/T/can1",
+ [0x04] = "multiport1_t/T/can2",
+ [0x05] = "multiport1_t/T/dma",
+ [0x06] = "multiport1_t/T/dmic",
+ [0x07] = "multiport1_t/T/err_collator",
+ [0x08] = "multiport1_t/T/fpga_misc",
+ [0x09] = "multiport1_t/T/gte",
+ [0x0a] = "multiport1_t/T/hsp",
+ [0x0b] = "multiport1_t/T/i2c2",
+ [0x0c] = "multiport1_t/T/i2c8",
+ [0x0d] = "multiport1_t/T/pwm",
+ [0x0e] = "multiport1_t/T/spi2",
+ [0x0f] = "multiport1_t/T/tke",
+ [0x10] = "multiport1_t/T/uartg",
+ [0x11] = "RESERVED",
+ [0x12] = "RESERVED",
+ [0x13] = "RESERVED",
+ [0x14] = "RESERVED",
+ [0x15] = "RESERVED",
+ [0x16] = "RESERVED",
+ [0x17] = "RESERVED",
+ [0x18] = "RESERVED",
+ [0x19] = "RESERVED",
+ [0x1a] = "RESERVED",
+ [0x1b] = "RESERVED",
+ [0x1c] = "RESERVED",
+ [0x1d] = "RESERVED",
+ [0x1e] = "RESERVED",
+ [0x1f] = "RESERVED",
+ [0x20] = "multiport0_t/T/aovc",
+ [0x21] = "multiport0_t/T/atcm",
+ [0x22] = "multiport0_t/T/cast",
+ [0x23] = "multiport0_t/T/dast",
+ [0x24] = "multiport0_t/T/err_collator_car",
+ [0x25] = "multiport0_t/T/gpio",
+ [0x26] = "multiport0_t/T/i2c10",
+ [0x27] = "multiport0_t/T/mss",
+ [0x28] = "multiport0_t/T/padctl_a12",
+ [0x29] = "multiport0_t/T/padctl_a14",
+ [0x2a] = "multiport0_t/T/padctl_a15",
+ [0x2b] = "multiport0_t/T/rtc",
+ [0x2c] = "multiport0_t/T/tsc",
+ [0x2d] = "RESERVED",
+ [0x2e] = "RESERVED",
+ [0x2f] = "RESERVED",
+ [0x30] = "multiport2_t/T/aon_vref_ro",
+ [0x31] = "multiport2_t/T/aopm",
+ [0x32] = "multiport2_t/T/car",
+ [0x33] = "multiport2_t/T/pmc",
+ [0x34] = "ast1_t/T/0",
+ [0x35] = "cbb_t/T/0",
+ [0x36] = "cpu_t/T/0",
+ [0x37] = "firewall_t/T/0",
+ [0x38] = "svc_t/T/0",
+ [0x39] = "uartc/T/uartc",
+ [0x3a] = "RESERVED",
+ [0x3b] = "RESERVED",
+ [0x3c] = "RESERVED",
+ [0x3d] = "RESERVED",
+ [0x3e] = "RESERVED",
+ [0x3f] = "RESERVED"
+};
+
+/*
+ * Fields of AON NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_aonnoc_aperture_lookup[] = {
+ { 0x0, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x0, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x0, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x0, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x0, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x0, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x0, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x0, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x0, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x0, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x0, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x0, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x0, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x0, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x0, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x0, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x0, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x0, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x0, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x0, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x0, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x0, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x0, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x0, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x0, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x0, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x0, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x0, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x0, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x0, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x0, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x0, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x0, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x0, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x0, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x0, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x0, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x0, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x0, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x0, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x0, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x0, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x0, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x0, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x0, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x0, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x0, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x0, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x0, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x0, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x0, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x0, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x0, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x0, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x0, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x0, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x0, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x0, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x0, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x0, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x0, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 },
+ { 0x1, 0x35, 0x00, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x35, 0x01, 0, 0x00100000, 1, 0x00100000 },
+ { 0x1, 0x35, 0x02, 0, 0x05a00000, 11, 0x05a00000 },
+ { 0x1, 0x35, 0x03, 0, 0x05b00000, 32, 0x05b00000 },
+ { 0x1, 0x35, 0x04, 0, 0x05c00000, 33, 0x05c00000 },
+ { 0x1, 0x35, 0x05, 0, 0x05d00000, 12, 0x05d00000 },
+ { 0x1, 0x35, 0x06, 0, 0x20000000, 19, 0x20000000 },
+ { 0x1, 0x35, 0x07, 0, 0x20100000, 20, 0x20100000 },
+ { 0x1, 0x35, 0x08, 0, 0x20a00000, 24, 0x20a00000 },
+ { 0x1, 0x35, 0x09, 0, 0x20d00000, 25, 0x20d00000 },
+ { 0x1, 0x35, 0x0a, 0, 0x00200000, 2, 0x00200000 },
+ { 0x1, 0x35, 0x0b, 0, 0x05800000, 10, 0x05800000 },
+ { 0x1, 0x35, 0x0c, 0, 0x05e00000, 13, 0x05e00000 },
+ { 0x1, 0x35, 0x0d, 0, 0x20200000, 21, 0x20200000 },
+ { 0x1, 0x35, 0x0e, 0, 0x20800000, 23, 0x20800000 },
+ { 0x1, 0x35, 0x0f, 0, 0x20e00000, 26, 0x20e00000 },
+ { 0x1, 0x35, 0x10, 0, 0x00400000, 3, 0x00400000 },
+ { 0x1, 0x35, 0x11, 0, 0x20400000, 22, 0x20400000 },
+ { 0x1, 0x35, 0x12, 0, 0x00800000, 4, 0x00800000 },
+ { 0x1, 0x35, 0x13, 0, 0x05000000, 9, 0x05000000 },
+ { 0x1, 0x35, 0x14, 0, 0x0c800000, 34, 0x0c800000 },
+ { 0x1, 0x35, 0x15, 0, 0x01000000, 5, 0x01000000 },
+ { 0x1, 0x35, 0x16, 0, 0x03000000, 7, 0x03000000 },
+ { 0x1, 0x35, 0x17, 0, 0x04000000, 8, 0x04000000 },
+ { 0x1, 0x35, 0x18, 0, 0x0d000000, 16, 0x0d000000 },
+ { 0x1, 0x35, 0x19, 0, 0x21000000, 27, 0x21000000 },
+ { 0x1, 0x35, 0x1a, 0, 0x02000000, 6, 0x02000000 },
+ { 0x1, 0x35, 0x1b, 0, 0x06000000, 14, 0x06000000 },
+ { 0x1, 0x35, 0x1c, 0, 0x0e000000, 17, 0x0e000000 },
+ { 0x1, 0x35, 0x1d, 0, 0x22000000, 28, 0x22000000 },
+ { 0x1, 0x35, 0x1e, 0, 0x08000000, 15, 0x08000000 },
+ { 0x1, 0x35, 0x1f, 0, 0x24000000, 29, 0x24000000 },
+ { 0x1, 0x35, 0x20, 0, 0x28000000, 30, 0x28000000 },
+ { 0x1, 0x35, 0x21, 0, 0x10000000, 18, 0x10000000 },
+ { 0x1, 0x35, 0x22, 0, 0x30000000, 31, 0x30000000 },
+ { 0x1, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x1, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x1, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x1, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x1, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x1, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x1, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x1, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x1, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x1, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x1, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x1, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x1, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x1, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x1, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x1, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x1, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x1, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x1, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x1, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x1, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x1, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x1, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x1, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x1, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x1, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x1, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x1, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x1, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x1, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x1, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x1, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x1, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x1, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x1, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x1, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x1, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x1, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x1, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x1, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x1, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x1, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x1, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x1, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x1, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x1, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x1, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x1, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x1, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x1, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x1, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x1, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x1, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x1, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x1, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x1, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x1, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x1, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x1, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x1, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x1, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x1, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x1, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x1, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x1, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x1, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x1, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 },
+ { 0x2, 0x34, 0x00, 0, 0x40000000, 0, 0x40000000 },
+ { 0x2, 0x34, 0x01, 0, 0x80000000, 1, 0x80000000 },
+ { 0x2, 0x36, 0x00, 0, 0x0c400000, 0, 0x0c400000 },
+ { 0x2, 0x36, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x35, 0x00, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x35, 0x01, 0, 0x00100000, 1, 0x00100000 },
+ { 0x3, 0x35, 0x02, 0, 0x05a00000, 11, 0x05a00000 },
+ { 0x3, 0x35, 0x03, 0, 0x05b00000, 32, 0x05b00000 },
+ { 0x3, 0x35, 0x04, 0, 0x05c00000, 33, 0x05c00000 },
+ { 0x3, 0x35, 0x05, 0, 0x05d00000, 12, 0x05d00000 },
+ { 0x3, 0x35, 0x06, 0, 0x20000000, 19, 0x20000000 },
+ { 0x3, 0x35, 0x07, 0, 0x20100000, 20, 0x20100000 },
+ { 0x3, 0x35, 0x08, 0, 0x20a00000, 24, 0x20a00000 },
+ { 0x3, 0x35, 0x09, 0, 0x20d00000, 25, 0x20d00000 },
+ { 0x3, 0x35, 0x0a, 0, 0x00200000, 2, 0x00200000 },
+ { 0x3, 0x35, 0x0b, 0, 0x05800000, 10, 0x05800000 },
+ { 0x3, 0x35, 0x0c, 0, 0x05e00000, 13, 0x05e00000 },
+ { 0x3, 0x35, 0x0d, 0, 0x20200000, 21, 0x20200000 },
+ { 0x3, 0x35, 0x0e, 0, 0x20800000, 23, 0x20800000 },
+ { 0x3, 0x35, 0x0f, 0, 0x20e00000, 26, 0x20e00000 },
+ { 0x3, 0x35, 0x10, 0, 0x00400000, 3, 0x00400000 },
+ { 0x3, 0x35, 0x11, 0, 0x20400000, 22, 0x20400000 },
+ { 0x3, 0x35, 0x12, 0, 0x00800000, 4, 0x00800000 },
+ { 0x3, 0x35, 0x13, 0, 0x50000000, 9, 0x05000000 },
+ { 0x3, 0x35, 0x14, 0, 0xc0800000, 34, 0x0c800000 },
+ { 0x3, 0x35, 0x15, 0, 0x10000000, 5, 0x01000000 },
+ { 0x3, 0x35, 0x16, 0, 0x30000000, 7, 0x03000000 },
+ { 0x3, 0x35, 0x17, 0, 0x04000000, 8, 0x04000000 },
+ { 0x3, 0x35, 0x18, 0, 0x0d000000, 16, 0x0d000000 },
+ { 0x3, 0x35, 0x19, 0, 0x21000000, 27, 0x21000000 },
+ { 0x3, 0x35, 0x1a, 0, 0x02000000, 6, 0x02000000 },
+ { 0x3, 0x35, 0x1b, 0, 0x06000000, 14, 0x06000000 },
+ { 0x3, 0x35, 0x1c, 0, 0x0e000000, 17, 0x0e000000 },
+ { 0x3, 0x35, 0x1d, 0, 0x22000000, 28, 0x22000000 },
+ { 0x3, 0x35, 0x1e, 0, 0x08000000, 15, 0x08000000 },
+ { 0x3, 0x35, 0x1f, 0, 0x24000000, 29, 0x24000000 },
+ { 0x3, 0x35, 0x20, 0, 0x28000000, 30, 0x28000000 },
+ { 0x3, 0x35, 0x21, 0, 0x10000000, 18, 0x10000000 },
+ { 0x3, 0x35, 0x22, 0, 0x30000000, 31, 0x30000000 },
+ { 0x3, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x3, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x3, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x3, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x3, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x3, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x3, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x3, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x3, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x3, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x3, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x3, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x3, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x3, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x3, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x3, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x3, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x3, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x3, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x3, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x3, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x3, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x3, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x3, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x3, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x3, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x3, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x3, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x3, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x3, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x3, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x3, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x3, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x3, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x3, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x3, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x3, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x3, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x3, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x3, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x3, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x3, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x3, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x3, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x3, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x3, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x3, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x3, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x3, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x3, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x3, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x3, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x3, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x3, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x3, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x3, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x3, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x3, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x3, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x3, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x3, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 }
+};
+
+/*
+ * SCE/RCE NOC aperture lookup table as per file "AON_NOC_Structure.info".
+ */
+static const char * const tegra194_scenoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_m_i/I/0",
+ [0x2] = "cpu_p_i/I/0",
+ [0x3] = "dma_m_i/I/0",
+ [0x4] = "dma_p_i/I/0",
+ [0x5] = "RESERVED",
+ [0x6] = "RESERVED",
+ [0x7] = "RESERVED"
+};
+
+static const char * const tegra194_scenoc_routeid_targflow[] = {
+ [0x00] = "multiport0_t/T/atcm_cfg",
+ [0x01] = "multiport0_t/T/car",
+ [0x02] = "multiport0_t/T/cast",
+ [0x03] = "multiport0_t/T/cfg",
+ [0x04] = "multiport0_t/T/dast",
+ [0x05] = "multiport0_t/T/dma",
+ [0x06] = "multiport0_t/T/err_collator",
+ [0x07] = "multiport0_t/T/err_collator_car",
+ [0x08] = "multiport0_t/T/fpga_misc",
+ [0x09] = "multiport0_t/T/fpga_uart",
+ [0x0a] = "multiport0_t/T/gte",
+ [0x0b] = "multiport0_t/T/hsp",
+ [0x0c] = "multiport0_t/T/misc",
+ [0x0d] = "multiport0_t/T/pm",
+ [0x0e] = "multiport0_t/T/tke",
+ [0x0f] = "RESERVED",
+ [0x10] = "multiport1_t/T/hsm",
+ [0x11] = "multiport1_t/T/vic0",
+ [0x12] = "multiport1_t/T/vic1",
+ [0x13] = "ast0_t/T/0",
+ [0x14] = "ast1_t/T/0",
+ [0x15] = "cbb_t/T/0",
+ [0x16] = "cpu_t/T/0",
+ [0x17] = "sce_noc_firewall/T/0",
+ [0x18] = "svc_t/T/0",
+ [0x19] = "RESERVED",
+ [0x1a] = "RESERVED",
+ [0x1b] = "RESERVED",
+ [0x1c] = "RESERVED",
+ [0x1d] = "RESERVED",
+ [0x1e] = "RESERVED",
+ [0x1f] = "RESERVED"
+};
+
+/*
+ * Fields of SCE/RCE NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_scenoc_apert_lookup[] = {
+ { 0x0, 0x16, 0x0, 0, 0x0b400000, 0, 0x0b400000 },
+ { 0x0, 0x16, 0x1, 0, 0x0bc00000, 1, 0x0bc00000 },
+ { 0x0, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x0, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x0, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x0, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x0, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x0, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x0, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x0, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x0, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x0, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x0, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x0, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x0, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x0, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x0, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x0, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x0, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x0, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x0, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x0, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x0, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x0, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x0, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x0, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x0, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x0, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x0, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x0, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x0, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x0, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x0, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x0, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x0, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x0, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x0, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x0, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x0, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x0, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x0, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x0, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x0, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x0, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x0, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x0, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x0, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x0, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x0, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x0, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x0, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x0, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x0, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x0, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x0, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x0, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x0, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x0, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x0, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x0, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x0, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x0, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x0, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x0, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x0, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x0, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x0, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x0, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x0, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x0, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x0, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x0, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x0, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x0, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x0, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x0, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x0, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x0, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x0, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x0, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x0, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x0, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x0, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x0, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x0, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x0, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x0, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x0, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x0, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x0, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x0, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x0, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x0, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x0, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x0, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x13, 0x0, 0, 0x40000000, 0, 0x40000000 },
+ { 0x1, 0x13, 0x1, 1, 0x80000000, 1, 0x80000000 },
+ { 0x1, 0x13, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x0, 0, 0x20c00000, 8, 0x20c00000 },
+ { 0x2, 0x15, 0x1, 0, 0x21100000, 22, 0x21100000 },
+ { 0x2, 0x15, 0x2, 0, 0x20e00000, 9, 0x20e00000 },
+ { 0x2, 0x15, 0x3, 0, 0x21200000, 23, 0x21200000 },
+ { 0x2, 0x15, 0x4, 0, 0x20800000, 7, 0x20800000 },
+ { 0x2, 0x15, 0x5, 0, 0x21400000, 24, 0x21400000 },
+ { 0x2, 0x15, 0x6, 0, 0x0b000000, 18, 0x0b000000 },
+ { 0x2, 0x15, 0x7, 0, 0x0b800000, 3, 0x0b800000 },
+ { 0x2, 0x15, 0x8, 0, 0x20000000, 6, 0x20000000 },
+ { 0x2, 0x15, 0x9, 0, 0x21800000, 25, 0x21800000 },
+ { 0x2, 0x15, 0xa, 0, 0x0a000000, 2, 0x0a000000 },
+ { 0x2, 0x15, 0xb, 0, 0x0a000000, 17, 0x0a000000 },
+ { 0x2, 0x15, 0xc, 0, 0x20000000, 21, 0x20000000 },
+ { 0x2, 0x15, 0xd, 0, 0x21000000, 10, 0x21000000 },
+ { 0x2, 0x15, 0xe, 0, 0x08000000, 1, 0x08000000 },
+ { 0x2, 0x15, 0xf, 0, 0x08000000, 16, 0x08000000 },
+ { 0x2, 0x15, 0x10, 0, 0x22000000, 11, 0x22000000 },
+ { 0x2, 0x15, 0x11, 0, 0x22000000, 26, 0x22000000 },
+ { 0x2, 0x15, 0x12, 0, 0x0c000000, 4, 0x0c000000 },
+ { 0x2, 0x15, 0x13, 0, 0x0c000000, 19, 0x0c000000 },
+ { 0x2, 0x15, 0x14, 0, 0x24000000, 12, 0x24000000 },
+ { 0x2, 0x15, 0x15, 0, 0x24000000, 27, 0x24000000 },
+ { 0x2, 0x15, 0x16, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x17, 0, 0x00000000, 15, 0x00000000 },
+ { 0x2, 0x15, 0x18, 0, 0x28000000, 13, 0x28000000 },
+ { 0x2, 0x15, 0x19, 0, 0x28000000, 28, 0x28000000 },
+ { 0x2, 0x15, 0x1a, 0, 0x10000000, 5, 0x10000000 },
+ { 0x2, 0x15, 0x1b, 0, 0x10000000, 20, 0x10000000 },
+ { 0x2, 0x15, 0x1c, 0, 0x30000000, 14, 0x30000000 },
+ { 0x2, 0x15, 0x1d, 0, 0x30000000, 29, 0x30000000 },
+ { 0x2, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x2, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x2, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x2, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x2, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x2, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x2, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x2, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x2, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x2, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x2, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x2, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x2, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x2, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x2, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x2, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x2, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x2, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x2, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x2, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x2, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x2, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x2, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x2, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x2, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x2, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x2, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x2, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x2, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x2, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x2, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x2, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x2, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x2, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x2, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x2, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x2, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x2, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x2, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x2, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x2, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x2, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x2, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x2, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x2, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x2, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x2, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x2, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x2, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x2, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x2, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x2, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x2, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x2, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x2, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x2, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x2, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x2, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x2, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x2, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x2, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x2, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x2, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x2, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x2, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x2, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x2, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x2, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x2, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x2, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x2, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x2, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x2, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x2, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x2, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x2, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x2, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x2, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x2, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x2, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x2, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x2, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x2, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x2, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x2, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x2, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x2, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x2, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x2, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x2, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x2, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x2, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x2, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x14, 0x0, 0, 0x40000000, 0, 0x40000000 },
+ { 0x3, 0x14, 0x1, 1, 0x80000000, 1, 0x80000000 },
+ { 0x3, 0x16, 0x0, 2, 0x0b400000, 0, 0x0b400000 },
+ { 0x3, 0x16, 0x1, 2, 0x0bc00000, 1, 0x0bc00000 },
+ { 0x3, 0x16, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x16, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x15, 0x0, 0, 0x20c00000, 8, 0x20c00000 },
+ { 0x4, 0x15, 0x1, 0, 0x21100000, 22, 0x21100000 },
+ { 0x4, 0x15, 0x2, 0, 0x20e00000, 9, 0x20e00000 },
+ { 0x4, 0x15, 0x3, 0, 0x21200000, 23, 0x21200000 },
+ { 0x4, 0x15, 0x4, 0, 0x20800000, 7, 0x20800000 },
+ { 0x4, 0x15, 0x5, 0, 0x21400000, 24, 0x21400000 },
+ { 0x4, 0x15, 0x6, 0, 0x0b000000, 18, 0x0b000000 },
+ { 0x4, 0x15, 0x7, 0, 0x0b800000, 3, 0x0b800000 },
+ { 0x4, 0x15, 0x8, 0, 0x20000000, 6, 0x20000000 },
+ { 0x4, 0x15, 0x9, 0, 0x21800000, 25, 0x21800000 },
+ { 0x4, 0x15, 0xa, 0, 0x0a000000, 2, 0x0a000000 },
+ { 0x4, 0x15, 0xb, 0, 0x0a000000, 17, 0x0a000000 },
+ { 0x4, 0x15, 0xc, 0, 0x20000000, 21, 0x20000000 },
+ { 0x4, 0x15, 0xd, 0, 0x21000000, 10, 0x21000000 },
+ { 0x4, 0x15, 0xe, 0, 0x08000000, 1, 0x08000000 },
+ { 0x4, 0x15, 0xf, 0, 0x08000000, 16, 0x08000000 },
+ { 0x4, 0x15, 0x10, 0, 0x22000000, 11, 0x22000000 },
+ { 0x4, 0x15, 0x11, 0, 0x22000000, 26, 0x22000000 },
+ { 0x4, 0x15, 0x12, 0, 0x0c000000, 4, 0x0c000000 },
+ { 0x4, 0x15, 0x13, 0, 0x0c000000, 19, 0x0c000000 },
+ { 0x4, 0x15, 0x14, 0, 0x24000000, 12, 0x24000000 },
+ { 0x4, 0x15, 0x15, 0, 0x24000000, 27, 0x24000000 },
+ { 0x4, 0x15, 0x16, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x15, 0x17, 0, 0x00000000, 15, 0x00000000 },
+ { 0x4, 0x15, 0x18, 0, 0x28000000, 13, 0x28000000 },
+ { 0x4, 0x15, 0x19, 0, 0x28000000, 28, 0x28000000 },
+ { 0x4, 0x15, 0x1a, 0, 0x10000000, 5, 0x10000000 },
+ { 0x4, 0x15, 0x1b, 0, 0x10000000, 20, 0x10000000 },
+ { 0x4, 0x15, 0x1c, 0, 0x30000000, 14, 0x30000000 },
+ { 0x4, 0x15, 0x1d, 0, 0x30000000, 29, 0x30000000 },
+ { 0x4, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x4, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x4, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x4, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x4, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x4, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x4, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x4, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x4, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x4, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x4, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x4, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x4, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x4, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x4, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x4, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x4, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x4, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x4, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x4, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x4, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x4, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x4, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x4, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x4, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x4, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x4, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x4, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x4, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x4, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x4, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x4, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x4, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x4, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x4, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x4, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x4, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x4, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x4, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x4, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x4, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x4, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x4, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x4, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x4, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x4, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x4, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x4, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x4, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x4, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x4, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x4, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x4, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x4, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x4, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x4, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x4, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x4, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x4, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x4, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x4, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x4, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x4, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x4, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x4, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x4, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x4, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x4, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x4, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x4, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x4, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x4, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x4, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x4, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x4, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x4, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x4, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x4, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x4, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x4, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x4, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x4, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x4, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x4, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x4, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x4, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x4, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x4, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x4, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x4, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x4, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x4, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x4, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x4, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x4, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 }
+};
+
+static void cbbcentralnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(CBB_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(CBB_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(CBB_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(CBB_NOC_SEQID, routeid);
+}
+
+static void bpmpnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(BPMP_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(BPMP_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(BPMP_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(BPMP_NOC_SEQID, routeid);
+}
+
+static void aonnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(AON_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(AON_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(AON_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(AON_NOC_SEQID, routeid);
+}
+
+static void scenoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(SCE_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(SCE_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(SCE_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(SCE_NOC_SEQID, routeid);
+}
+
+static void cbbcentralnoc_parse_userbits(struct tegra194_cbb_userbits *usrbits, u32 elog_5)
+{
+ usrbits->axcache = FIELD_GET(CBB_NOC_AXCACHE, elog_5);
+ usrbits->non_mod = FIELD_GET(CBB_NOC_NON_MOD, elog_5);
+ usrbits->axprot = FIELD_GET(CBB_NOC_AXPROT, elog_5);
+ usrbits->falconsec = FIELD_GET(CBB_NOC_FALCONSEC, elog_5);
+ usrbits->grpsec = FIELD_GET(CBB_NOC_GRPSEC, elog_5);
+ usrbits->vqc = FIELD_GET(CBB_NOC_VQC, elog_5);
+ usrbits->mstr_id = FIELD_GET(CBB_NOC_MSTR_ID, elog_5) - 1;
+ usrbits->axi_id = FIELD_GET(CBB_NOC_AXI_ID, elog_5);
+}
+
+static void clusternoc_parse_userbits(struct tegra194_cbb_userbits *usrbits, u32 elog_5)
+{
+ usrbits->axcache = FIELD_GET(CLUSTER_NOC_AXCACHE, elog_5);
+ usrbits->axprot = FIELD_GET(CLUSTER_NOC_AXCACHE, elog_5);
+ usrbits->falconsec = FIELD_GET(CLUSTER_NOC_FALCONSEC, elog_5);
+ usrbits->grpsec = FIELD_GET(CLUSTER_NOC_GRPSEC, elog_5);
+ usrbits->vqc = FIELD_GET(CLUSTER_NOC_VQC, elog_5);
+ usrbits->mstr_id = FIELD_GET(CLUSTER_NOC_MSTR_ID, elog_5) - 1;
+}
+
+static void tegra194_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_FAULTEN_0);
+ writel(1, priv->regs + ERRLOGGER_1_FAULTEN_0);
+ writel(1, priv->regs + ERRLOGGER_2_FAULTEN_0);
+}
+
+static void tegra194_cbb_stall_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_STALLEN_0);
+ writel(1, priv->regs + ERRLOGGER_1_STALLEN_0);
+ writel(1, priv->regs + ERRLOGGER_2_STALLEN_0);
+}
+
+static void tegra194_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_ERRCLR_0);
+ writel(1, priv->regs + ERRLOGGER_1_ERRCLR_0);
+ writel(1, priv->regs + ERRLOGGER_2_ERRCLR_0);
+ dsb(sy);
+}
+
+static u32 tegra194_cbb_get_status(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+ u32 value;
+
+ value = readl(priv->regs + ERRLOGGER_0_ERRVLD_0);
+ value |= (readl(priv->regs + ERRLOGGER_1_ERRVLD_0) << 1);
+ value |= (readl(priv->regs + ERRLOGGER_2_ERRVLD_0) << 2);
+
+ dsb(sy);
+ return value;
+}
+
+static u32 tegra194_axi2apb_status(void __iomem *addr)
+{
+ u32 value;
+
+ value = readl(addr + DMAAPB_X_RAW_INTERRUPT_STATUS);
+ writel(0xffffffff, addr + DMAAPB_X_RAW_INTERRUPT_STATUS);
+
+ return value;
+}
+
+static bool tegra194_axi2apb_fatal(struct seq_file *file, unsigned int bridge, u32 status)
+{
+ bool is_fatal = true;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_axi2apb_error); i++) {
+ if (status & BIT(i)) {
+ tegra_cbb_print_err(file, "\t AXI2APB_%d bridge error: %s\n",
+ bridge + 1, tegra194_axi2apb_error[i]);
+ if (strstr(tegra194_axi2apb_error[i], "Firewall"))
+ is_fatal = false;
+ }
+ }
+
+ return is_fatal;
+}
+
+/*
+ * Fetch InitlocalAddress from NOC Aperture lookup table
+ * using Targflow, Targsubrange
+ */
+static u32 get_init_localaddress(const struct tegra194_cbb_aperture *info,
+ const struct tegra194_cbb_aperture *aper, unsigned int max)
+{
+ unsigned int t_f = 0, t_sr = 0;
+ u32 addr = 0;
+
+ for (t_f = 0; t_f < max; t_f++) {
+ if (aper[t_f].targflow == info->targflow) {
+ t_sr = t_f;
+
+ do {
+ if (aper[t_sr].targ_subrange == info->targ_subrange) {
+ addr = aper[t_sr].init_localaddress;
+ return addr;
+ }
+
+ if (t_sr >= max)
+ return 0;
+
+ t_sr++;
+ } while (aper[t_sr].targflow == aper[t_sr - 1].targflow);
+
+ t_f = t_sr;
+ }
+ }
+
+ return addr;
+}
+
+static void print_errlog5(struct seq_file *file, struct tegra194_cbb *cbb)
+{
+ struct tegra194_cbb_userbits userbits;
+
+ cbb->noc->parse_userbits(&userbits, cbb->errlog5);
+
+ if (!strcmp(cbb->noc->name, "cbb-noc")) {
+ tegra_cbb_print_err(file, "\t Non-Modify\t\t: %#x\n", userbits.non_mod);
+ tegra_cbb_print_err(file, "\t AXI ID\t\t: %#x\n", userbits.axi_id);
+ }
+
+ tegra_cbb_print_err(file, "\t Master ID\t\t: %s\n",
+ cbb->noc->master_id[userbits.mstr_id]);
+ tegra_cbb_print_err(file, "\t Security Group(GRPSEC): %#x\n", userbits.grpsec);
+ tegra_cbb_print_cache(file, userbits.axcache);
+ tegra_cbb_print_prot(file, userbits.axprot);
+ tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", userbits.falconsec);
+ tegra_cbb_print_err(file, "\t Virtual Queuing Channel(VQC): %#x\n", userbits.vqc);
+}
+
+/*
+ * Fetch Base Address/InitlocalAddress from NOC aperture lookup table using TargFlow &
+ * Targ_subRange extracted from RouteId. Perform address reconstruction as below:
+ *
+ * Address = Base Address + (ErrLog3 + ErrLog4)
+ */
+static void
+print_errlog3_4(struct seq_file *file, u32 errlog3, u32 errlog4,
+ const struct tegra194_cbb_aperture *info,
+ const struct tegra194_cbb_aperture *aperture, unsigned int max)
+{
+ u64 addr = (u64)errlog4 << 32 | errlog3;
+
+ /*
+ * If errlog4[7] = "1", then it's a joker entry. Joker entries are a rare phenomenon and
+ * such addresses are not reliable. Debugging should be done using only the RouteId
+ * information.
+ */
+ if (errlog4 & 0x80)
+ tegra_cbb_print_err(file, "\t debug using RouteId alone as below address is a "
+ "joker entry and not reliable");
+
+ addr += get_init_localaddress(info, aperture, max);
+
+ tegra_cbb_print_err(file, "\t Address accessed\t: %#llx\n", addr);
+}
+
+/*
+ * Get RouteId from ErrLog1+ErrLog2 registers and fetch values of
+ * InitFlow, TargFlow, Targ_subRange and SeqId values from RouteId
+ */
+static void
+print_errlog1_2(struct seq_file *file, struct tegra194_cbb *cbb,
+ struct tegra194_cbb_aperture *info)
+{
+ u64 routeid = (u64)cbb->errlog2 << 32 | cbb->errlog1;
+ u32 seqid = 0;
+
+ tegra_cbb_print_err(file, "\t RouteId\t\t: %#llx\n", routeid);
+
+ cbb->noc->parse_routeid(info, routeid);
+
+ tegra_cbb_print_err(file, "\t InitFlow\t\t: %s\n",
+ cbb->noc->routeid_initflow[info->initflow]);
+
+ tegra_cbb_print_err(file, "\t Targflow\t\t: %s\n",
+ cbb->noc->routeid_targflow[info->targflow]);
+
+ tegra_cbb_print_err(file, "\t TargSubRange\t\t: %d\n", info->targ_subrange);
+ tegra_cbb_print_err(file, "\t SeqId\t\t\t: %d\n", seqid);
+}
+
+/*
+ * Print transcation type, error code and description from ErrLog0 for all
+ * errors. For NOC slave errors, all relevant error info is printed using
+ * ErrLog0 only. But additional information is printed for errors from
+ * APB slaves because for them:
+ * - All errors are logged as SLV(slave) errors due to APB having only single
+ * bit pslverr to report all errors.
+ * - Exact cause is printed by reading DMAAPB_X_RAW_INTERRUPT_STATUS register.
+ * - The driver prints information showing AXI2APB bridge and exact error
+ * only if there is error in any AXI2APB slave.
+ * - There is still no way to disambiguate a DEC error from SLV error type.
+ */
+static bool print_errlog0(struct seq_file *file, struct tegra194_cbb *cbb)
+{
+ struct tegra194_cbb_packet_header hdr;
+ bool is_fatal = true;
+
+ hdr.lock = cbb->errlog0 & 0x1;
+ hdr.opc = FIELD_GET(CBB_ERR_OPC, cbb->errlog0);
+ hdr.errcode = FIELD_GET(CBB_ERR_ERRCODE, cbb->errlog0);
+ hdr.len1 = FIELD_GET(CBB_ERR_LEN1, cbb->errlog0);
+ hdr.format = (cbb->errlog0 >> 31);
+
+ tegra_cbb_print_err(file, "\t Transaction Type\t: %s\n",
+ tegra194_cbb_trantype[hdr.opc]);
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].code);
+ tegra_cbb_print_err(file, "\t Error Source\t\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].source);
+ tegra_cbb_print_err(file, "\t Error Description\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].desc);
+
+ /*
+ * Do not crash system for errors which are only notifications to indicate a transaction
+ * was not allowed to be attempted.
+ */
+ if (!strcmp(tegra194_cbb_errors[hdr.errcode].code, "SEC") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "DEC") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "UNS") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "DISC")) {
+ is_fatal = false;
+ } else if (!strcmp(tegra194_cbb_errors[hdr.errcode].code, "SLV") &&
+ cbb->num_bridges > 0) {
+ unsigned int i;
+ u32 status;
+
+ /* For all SLV errors, read DMAAPB_X_RAW_INTERRUPT_STATUS
+ * register to get error status for all AXI2APB bridges.
+ * Print bridge details if a bit is set in a bridge's
+ * status register due to error in a APB slave connected
+ * to that bridge. For other NOC slaves, none of the status
+ * register will be set.
+ */
+
+ for (i = 0; i < cbb->num_bridges; i++) {
+ status = tegra194_axi2apb_status(cbb->bridges[i].base);
+
+ if (status)
+ is_fatal = tegra194_axi2apb_fatal(file, i, status);
+ }
+ }
+
+ tegra_cbb_print_err(file, "\t Packet header Lock\t: %d\n", hdr.lock);
+ tegra_cbb_print_err(file, "\t Packet header Len1\t: %d\n", hdr.len1);
+
+ if (hdr.format)
+ tegra_cbb_print_err(file, "\t NOC protocol version\t: %s\n",
+ "version >= 2.7");
+ else
+ tegra_cbb_print_err(file, "\t NOC protocol version\t: %s\n",
+ "version < 2.7");
+
+ return is_fatal;
+}
+
+/*
+ * Print debug information about failed transaction using
+ * ErrLog registers of error loggger having ErrVld set
+ */
+static bool print_errloggerX_info(struct seq_file *file, struct tegra194_cbb *cbb,
+ int errloggerX)
+{
+ struct tegra194_cbb_aperture info = { 0, };
+ bool is_fatal = true;
+
+ tegra_cbb_print_err(file, "\tError Logger\t\t: %d\n", errloggerX);
+
+ if (errloggerX == 0) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_0_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_0_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_0_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_0_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_0_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_0_ERRLOG5_0);
+ } else if (errloggerX == 1) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_1_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_1_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_1_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_1_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_1_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_1_ERRLOG5_0);
+ } else if (errloggerX == 2) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_2_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_2_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_2_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_2_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_2_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_2_ERRLOG5_0);
+ }
+
+ tegra_cbb_print_err(file, "\tErrLog0\t\t\t: %#x\n", cbb->errlog0);
+ is_fatal = print_errlog0(file, cbb);
+
+ tegra_cbb_print_err(file, "\tErrLog1\t\t\t: %#x\n", cbb->errlog1);
+ tegra_cbb_print_err(file, "\tErrLog2\t\t\t: %#x\n", cbb->errlog2);
+ print_errlog1_2(file, cbb, &info);
+
+ tegra_cbb_print_err(file, "\tErrLog3\t\t\t: %#x\n", cbb->errlog3);
+ tegra_cbb_print_err(file, "\tErrLog4\t\t\t: %#x\n", cbb->errlog4);
+ print_errlog3_4(file, cbb->errlog3, cbb->errlog4, &info, cbb->noc->noc_aperture,
+ cbb->noc->max_aperture);
+
+ tegra_cbb_print_err(file, "\tErrLog5\t\t\t: %#x\n", cbb->errlog5);
+
+ if (cbb->errlog5)
+ print_errlog5(file, cbb);
+
+ return is_fatal;
+}
+
+static bool print_errlog(struct seq_file *file, struct tegra194_cbb *cbb, u32 errvld)
+{
+ bool is_fatal = true;
+
+ pr_crit("**************************************\n");
+ pr_crit("CPU:%d, Error:%s\n", smp_processor_id(), cbb->noc->name);
+
+ if (errvld & 0x1)
+ is_fatal = print_errloggerX_info(file, cbb, 0);
+ else if (errvld & 0x2)
+ is_fatal = print_errloggerX_info(file, cbb, 1);
+ else if (errvld & 0x4)
+ is_fatal = print_errloggerX_info(file, cbb, 2);
+
+ tegra_cbb_error_clear(&cbb->base);
+ tegra_cbb_print_err(file, "\t**************************************\n");
+ return is_fatal;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_MUTEX(cbb_err_mutex);
+
+static int tegra194_cbb_debugfs_show(struct tegra_cbb *cbb, struct seq_file *file, void *data)
+{
+ struct tegra_cbb *noc;
+
+ mutex_lock(&cbb_err_mutex);
+
+ list_for_each_entry(noc, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+ u32 status;
+
+ status = tegra_cbb_get_status(noc);
+ if (status)
+ print_errlog(file, priv, status);
+ }
+
+ mutex_unlock(&cbb_err_mutex);
+
+ return 0;
+}
+#endif
+
+/*
+ * Handler for CBB errors from different initiators
+ */
+static irqreturn_t tegra194_cbb_err_isr(int irq, void *data)
+{
+ bool is_inband_err = false, is_fatal = false;
+ //struct tegra194_cbb *cbb = data;
+ struct tegra_cbb *noc;
+ unsigned long flags;
+ u8 mstr_id = 0;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ /* XXX only process interrupts for "cbb" instead of iterating over all NOCs? */
+ list_for_each_entry(noc, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+ u32 status = 0;
+
+ status = tegra_cbb_get_status(noc);
+
+ if (status && ((irq == priv->sec_irq) || (irq == priv->nonsec_irq))) {
+ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
+ smp_processor_id(), priv->noc->name, priv->res->start,
+ irq);
+
+ mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->errlog5) - 1;
+ is_fatal = print_errlog(NULL, priv, status);
+
+ /*
+ * If illegal request is from CCPLEX(0x1)
+ * initiator then call BUG() to crash system.
+ */
+ if ((mstr_id == 0x1) && priv->noc->erd_mask_inband_err)
+ is_inband_err = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ if (is_inband_err) {
+ if (is_fatal)
+ BUG();
+ else
+ WARN(true, "Warning due to CBB Error\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Register handler for CBB_NONSECURE & CBB_SECURE interrupts
+ * for reporting CBB errors
+ */
+static int tegra194_cbb_interrupt_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+ struct device *dev = cbb->dev;
+ int err;
+
+ if (priv->sec_irq) {
+ err = devm_request_irq(dev, priv->sec_irq, tegra194_cbb_err_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "failed to register interrupt %u: %d\n", priv->sec_irq, err);
+ return err;
+ }
+ }
+
+ if (priv->nonsec_irq) {
+ err = devm_request_irq(dev, priv->nonsec_irq, tegra194_cbb_err_isr, 0,
+ dev_name(dev), priv);
+ if (err) {
+ dev_err(dev, "failed to register interrupt %u: %d\n", priv->nonsec_irq,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra194_cbb_error_enable(struct tegra_cbb *cbb)
+{
+ /*
+ * Set “StallEn=1” to enable queuing of error packets till
+ * first is served & cleared
+ */
+ tegra_cbb_stall_enable(cbb);
+
+ /* set “FaultEn=1” to enable error reporting signal “Fault” */
+ tegra_cbb_fault_enable(cbb);
+}
+
+static const struct tegra_cbb_ops tegra194_cbb_ops = {
+ .get_status = tegra194_cbb_get_status,
+ .error_clear = tegra194_cbb_error_clear,
+ .fault_enable = tegra194_cbb_fault_enable,
+ .stall_enable = tegra194_cbb_stall_enable,
+ .error_enable = tegra194_cbb_error_enable,
+ .interrupt_enable = tegra194_cbb_interrupt_enable,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_show = tegra194_cbb_debugfs_show,
+#endif
+};
+
+static struct tegra194_cbb_noc_data tegra194_cbb_central_noc_data = {
+ .name = "cbb-noc",
+ .erd_mask_inband_err = true,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_cbbcentralnoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_cbbcentralnoc_apert_lookup),
+ .routeid_initflow = tegra194_cbbcentralnoc_routeid_initflow,
+ .routeid_targflow = tegra194_cbbcentralnoc_routeid_targflow,
+ .parse_routeid = cbbcentralnoc_parse_routeid,
+ .parse_userbits = cbbcentralnoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_aon_noc_data = {
+ .name = "aon-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_aonnoc_aperture_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_aonnoc_aperture_lookup),
+ .routeid_initflow = tegra194_aonnoc_routeid_initflow,
+ .routeid_targflow = tegra194_aonnoc_routeid_targflow,
+ .parse_routeid = aonnoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_bpmp_noc_data = {
+ .name = "bpmp-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_bpmpnoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_bpmpnoc_apert_lookup),
+ .routeid_initflow = tegra194_bpmpnoc_routeid_initflow,
+ .routeid_targflow = tegra194_bpmpnoc_routeid_targflow,
+ .parse_routeid = bpmpnoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_rce_noc_data = {
+ .name = "rce-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_scenoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
+ .routeid_initflow = tegra194_scenoc_routeid_initflow,
+ .routeid_targflow = tegra194_scenoc_routeid_targflow,
+ .parse_routeid = scenoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_sce_noc_data = {
+ .name = "sce-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_scenoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
+ .routeid_initflow = tegra194_scenoc_routeid_initflow,
+ .routeid_targflow = tegra194_scenoc_routeid_targflow,
+ .parse_routeid = scenoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static const struct of_device_id tegra194_cbb_match[] = {
+ { .compatible = "nvidia,tegra194-cbb-noc", .data = &tegra194_cbb_central_noc_data },
+ { .compatible = "nvidia,tegra194-aon-noc", .data = &tegra194_aon_noc_data },
+ { .compatible = "nvidia,tegra194-bpmp-noc", .data = &tegra194_bpmp_noc_data },
+ { .compatible = "nvidia,tegra194-rce-noc", .data = &tegra194_rce_noc_data },
+ { .compatible = "nvidia,tegra194-sce-noc", .data = &tegra194_sce_noc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra194_cbb_match);
+
+static int tegra194_cbb_get_bridges(struct tegra194_cbb *cbb, struct device_node *np)
+{
+ struct tegra_cbb *entry;
+ struct resource res;
+ unsigned long flags;
+ unsigned int i;
+ int err;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry(entry, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(entry);
+
+ if (priv->bridges) {
+ cbb->num_bridges = priv->num_bridges;
+ cbb->bridges = priv->bridges;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ if (!cbb->bridges) {
+ while (of_address_to_resource(np, cbb->num_bridges, &res) == 0)
+ cbb->num_bridges++;
+
+ cbb->bridges = devm_kcalloc(cbb->base.dev, cbb->num_bridges,
+ sizeof(*cbb->bridges), GFP_KERNEL);
+ if (!cbb->bridges)
+ return -ENOMEM;
+
+ for (i = 0; i < cbb->num_bridges; i++) {
+ err = of_address_to_resource(np, i, &cbb->bridges[i].res);
+ if (err < 0)
+ return err;
+
+ cbb->bridges[i].base = devm_ioremap_resource(cbb->base.dev,
+ &cbb->bridges[i].res);
+ if (IS_ERR(cbb->bridges[i].base)) {
+ dev_err(cbb->base.dev, "failed to map AXI2APB range\n");
+ return PTR_ERR(cbb->bridges[i].base);
+ }
+ }
+ }
+
+ if (cbb->num_bridges > 0) {
+ dev_dbg(cbb->base.dev, "AXI2APB bridge info present:\n");
+
+ for (i = 0; i < cbb->num_bridges; i++)
+ dev_dbg(cbb->base.dev, " %u: %pR\n", i, &cbb->bridges[i].res);
+ }
+
+ return 0;
+}
+
+static int tegra194_cbb_probe(struct platform_device *pdev)
+{
+ const struct tegra194_cbb_noc_data *noc;
+ struct tegra194_cbb *cbb;
+ struct device_node *np;
+ unsigned long flags;
+ int err;
+
+ noc = of_device_get_match_data(&pdev->dev);
+
+ if (noc->erd_mask_inband_err) {
+ /*
+ * Set Error Response Disable(ERD) bit to mask SError/inband
+ * error and only trigger interrupts for illegal access from
+ * CCPLEX initiator.
+ */
+ err = tegra194_miscreg_mask_serror();
+ if (err) {
+ dev_err(&pdev->dev, "couldn't mask inband errors\n");
+ return err;
+ }
+ }
+
+ cbb = devm_kzalloc(&pdev->dev, sizeof(*cbb), GFP_KERNEL);
+ if (!cbb)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbb->base.node);
+ cbb->base.ops = &tegra194_cbb_ops;
+ cbb->base.dev = &pdev->dev;
+ cbb->noc = noc;
+
+ cbb->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &cbb->res);
+ if (IS_ERR(cbb->regs))
+ return PTR_ERR(cbb->regs);
+
+ err = tegra_cbb_get_irq(pdev, &cbb->nonsec_irq, &cbb->sec_irq);
+ if (err)
+ return err;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,axi2apb", 0);
+ if (np) {
+ err = tegra194_cbb_get_bridges(cbb, np);
+ of_node_put(np);
+ if (err < 0)
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cbb);
+
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ return tegra_cbb_register(&cbb->base);
+}
+
+static int tegra194_cbb_remove(struct platform_device *pdev)
+{
+ struct tegra194_cbb *cbb = platform_get_drvdata(pdev);
+ struct tegra_cbb *noc, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry_safe(noc, tmp, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+
+ if (cbb->res->start == priv->res->start) {
+ list_del(&noc->node);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused tegra194_cbb_resume_noirq(struct device *dev)
+{
+ struct tegra194_cbb *cbb = dev_get_drvdata(dev);
+
+ tegra194_cbb_error_enable(&cbb->base);
+ dsb(sy);
+
+ dev_dbg(dev, "%s resumed\n", cbb->noc->name);
+ return 0;
+}
+
+static const struct dev_pm_ops tegra194_cbb_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, tegra194_cbb_resume_noirq)
+};
+
+static struct platform_driver tegra194_cbb_driver = {
+ .probe = tegra194_cbb_probe,
+ .remove = tegra194_cbb_remove,
+ .driver = {
+ .name = "tegra194-cbb",
+ .of_match_table = of_match_ptr(tegra194_cbb_match),
+ .pm = &tegra194_cbb_pm,
+ },
+};
+
+static int __init tegra194_cbb_init(void)
+{
+ return platform_driver_register(&tegra194_cbb_driver);
+}
+pure_initcall(tegra194_cbb_init);
+
+static void __exit tegra194_cbb_exit(void)
+{
+ platform_driver_unregister(&tegra194_cbb_driver);
+}
+module_exit(tegra194_cbb_exit);
+
+MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
+MODULE_DESCRIPTION("Control Backbone error handling driver for Tegra194");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
new file mode 100644
index 000000000000..3528f9e15d5c
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -0,0 +1,1113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ *
+ * The driver handles Error's from Control Backbone(CBB) version 2.0.
+ * generated due to illegal accesses. The driver prints debug information
+ * about failed transaction on receiving interrupt from Error Notifier.
+ * Error types supported by CBB2.0 are:
+ * UNSUPPORTED_ERR, PWRDOWN_ERR, TIMEOUT_ERR, FIREWALL_ERR, DECODE_ERR,
+ * SLAVE_ERR
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+#define FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0 0x0
+#define FABRIC_EN_CFG_STATUS_0_0 0x40
+#define FABRIC_EN_CFG_ADDR_INDEX_0_0 0x60
+#define FABRIC_EN_CFG_ADDR_LOW_0 0x80
+#define FABRIC_EN_CFG_ADDR_HI_0 0x84
+
+#define FABRIC_MN_MASTER_ERR_EN_0 0x200
+#define FABRIC_MN_MASTER_ERR_FORCE_0 0x204
+#define FABRIC_MN_MASTER_ERR_STATUS_0 0x208
+#define FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0 0x20c
+
+#define FABRIC_MN_MASTER_LOG_ERR_STATUS_0 0x300
+#define FABRIC_MN_MASTER_LOG_ADDR_LOW_0 0x304
+#define FABRIC_MN_MASTER_LOG_ADDR_HIGH_0 0x308
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0 0x30c
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0 0x310
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0 0x314
+#define FABRIC_MN_MASTER_LOG_USER_BITS0_0 0x318
+
+#define AXI_SLV_TIMEOUT_STATUS_0_0 0x8
+#define APB_BLOCK_TMO_STATUS_0 0xc00
+#define APB_BLOCK_NUM_TMO_OFFSET 0x20
+
+#define FAB_EM_EL_MSTRID GENMASK(29, 24)
+#define FAB_EM_EL_VQC GENMASK(17, 16)
+#define FAB_EM_EL_GRPSEC GENMASK(14, 8)
+#define FAB_EM_EL_FALCONSEC GENMASK(1, 0)
+
+#define FAB_EM_EL_FABID GENMASK(20, 16)
+#define FAB_EM_EL_SLAVEID GENMASK(7, 0)
+
+#define FAB_EM_EL_ACCESSID GENMASK(7, 0)
+
+#define FAB_EM_EL_AXCACHE GENMASK(27, 24)
+#define FAB_EM_EL_AXPROT GENMASK(22, 20)
+#define FAB_EM_EL_BURSTLENGTH GENMASK(19, 12)
+#define FAB_EM_EL_BURSTTYPE GENMASK(9, 8)
+#define FAB_EM_EL_BEATSIZE GENMASK(6, 4)
+#define FAB_EM_EL_ACCESSTYPE GENMASK(0, 0)
+
+#define USRBITS_MSTR_ID GENMASK(29, 24)
+
+#define REQ_SOCKET_ID GENMASK(27, 24)
+
+enum tegra234_cbb_fabric_ids {
+ CBB_FAB_ID,
+ SCE_FAB_ID,
+ RCE_FAB_ID,
+ DCE_FAB_ID,
+ AON_FAB_ID,
+ PSC_FAB_ID,
+ BPMP_FAB_ID,
+ FSI_FAB_ID,
+ MAX_FAB_ID,
+};
+
+struct tegra234_slave_lookup {
+ const char *name;
+ unsigned int offset;
+};
+
+struct tegra234_cbb_fabric {
+ const char *name;
+ phys_addr_t off_mask_erd;
+ bool erd_mask_inband_err;
+ const char * const *master_id;
+ unsigned int notifier_offset;
+ const struct tegra_cbb_error *errors;
+ const struct tegra234_slave_lookup *slave_map;
+};
+
+struct tegra234_cbb {
+ struct tegra_cbb base;
+
+ const struct tegra234_cbb_fabric *fabric;
+ struct resource *res;
+ void __iomem *regs;
+
+ int num_intr;
+ int sec_irq;
+
+ /* record */
+ void __iomem *mon;
+ unsigned int type;
+ u32 mask;
+ u64 access;
+ u32 mn_attr0;
+ u32 mn_attr1;
+ u32 mn_attr2;
+ u32 mn_user_bits;
+};
+
+static inline struct tegra234_cbb *to_tegra234_cbb(struct tegra_cbb *cbb)
+{
+ return container_of(cbb, struct tegra234_cbb, base);
+}
+
+static LIST_HEAD(cbb_list);
+static DEFINE_SPINLOCK(cbb_lock);
+
+static void tegra234_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ void __iomem *addr;
+
+ addr = priv->regs + priv->fabric->notifier_offset;
+ writel(0x1ff, addr + FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0);
+ dsb(sy);
+}
+
+static void tegra234_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+
+ writel(0x3f, priv->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ dsb(sy);
+}
+
+static u32 tegra234_cbb_get_status(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ void __iomem *addr;
+ u32 value;
+
+ addr = priv->regs + priv->fabric->notifier_offset;
+ value = readl(addr + FABRIC_EN_CFG_STATUS_0_0);
+ dsb(sy);
+
+ return value;
+}
+
+static void tegra234_cbb_mask_serror(struct tegra234_cbb *cbb)
+{
+ writel(0x1, cbb->regs + cbb->fabric->off_mask_erd);
+ dsb(sy);
+}
+
+static u32 tegra234_cbb_get_tmo_slv(void __iomem *addr)
+{
+ u32 timeout;
+
+ timeout = readl(addr);
+ return timeout;
+}
+
+static void tegra234_cbb_tmo_slv(struct seq_file *file, const char *slave, void __iomem *addr,
+ u32 status)
+{
+ tegra_cbb_print_err(file, "\t %s : %#x\n", slave, status);
+}
+
+static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
+ void __iomem *base)
+{
+ unsigned int block = 0;
+ void __iomem *addr;
+ char name[64];
+ u32 status;
+
+ status = tegra234_cbb_get_tmo_slv(base);
+ if (status)
+ tegra_cbb_print_err(file, "\t %s_BLOCK_TMO_STATUS : %#x\n", slave, status);
+
+ while (status) {
+ if (status & BIT(0)) {
+ u32 timeout, clients, client = 0;
+
+ addr = base + APB_BLOCK_NUM_TMO_OFFSET + (block * 4);
+ timeout = tegra234_cbb_get_tmo_slv(addr);
+ clients = timeout;
+
+ while (timeout) {
+ if (timeout & BIT(0)) {
+ if (clients != 0xffffffff)
+ clients &= BIT(client);
+
+ sprintf(name, "%s_BLOCK%d_TMO", slave, block);
+
+ tegra234_cbb_tmo_slv(file, name, addr, clients);
+ }
+
+ timeout >>= 1;
+ client++;
+ }
+ }
+
+ status >>= 1;
+ block++;
+ }
+}
+
+static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234_cbb *cbb,
+ u8 slave_id, u8 fab_id)
+{
+ const struct tegra234_slave_lookup *map = cbb->fabric->slave_map;
+ void __iomem *addr;
+
+ /*
+ * 1) Get slave node name and address mapping using slave_id.
+ * 2) Check if the timed out slave node is APB or AXI.
+ * 3) If AXI, then print timeout register and reset axi slave
+ * using <FABRIC>_SN_<>_SLV_TIMEOUT_STATUS_0_0 register.
+ * 4) If APB, then perform an additional lookup to find the client
+ * which timed out.
+ * a) Get block number from the index of set bit in
+ * <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
+ * b) Get address of register repective to block number i.e.
+ * <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
+ * c) Read the register in above step to get client_id which
+ * timed out as per the set bits.
+ * d) Reset the timedout client and print details.
+ * e) Goto step-a till all bits are set.
+ */
+
+ addr = cbb->regs + map[slave_id].offset;
+
+ if (strstr(map[slave_id].name, "AXI2APB")) {
+ addr += APB_BLOCK_TMO_STATUS_0;
+
+ tegra234_cbb_lookup_apbslv(file, map[slave_id].name, addr);
+ } else {
+ char name[64];
+ u32 status;
+
+ addr += AXI_SLV_TIMEOUT_STATUS_0_0;
+
+ status = tegra234_cbb_get_tmo_slv(addr);
+ if (status) {
+ sprintf(name, "%s_SLV_TIMEOUT_STATUS", map[slave_id].name);
+ tegra234_cbb_tmo_slv(file, name, addr, status);
+ }
+ }
+}
+
+static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb *cbb, u32 status,
+ u32 overflow)
+{
+ unsigned int type = 0;
+
+ if (status & (status - 1))
+ tegra_cbb_print_err(file, "\t Multiple type of errors reported\n");
+
+ while (status) {
+ if (status & 0x1)
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[type].code);
+
+ status >>= 1;
+ type++;
+ }
+
+ type = 0;
+
+ while (overflow) {
+ if (overflow & 0x1)
+ tegra_cbb_print_err(file, "\t Overflow\t\t: Multiple %s\n",
+ cbb->fabric->errors[type].code);
+
+ overflow >>= 1;
+ type++;
+ }
+}
+
+static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+{
+ u8 cache_type, prot_type, burst_length, mstr_id, grpsec, vqc, falconsec, beat_size;
+ u8 access_type, access_id, requester_socket_id, local_socket_id, slave_id, fab_id;
+ char fabric_name[20];
+ bool is_numa = false;
+ u8 burst_type;
+
+ if (num_possible_nodes() > 1)
+ is_numa = true;
+
+ mstr_id = FIELD_GET(FAB_EM_EL_MSTRID, cbb->mn_user_bits);
+ vqc = FIELD_GET(FAB_EM_EL_VQC, cbb->mn_user_bits);
+ grpsec = FIELD_GET(FAB_EM_EL_GRPSEC, cbb->mn_user_bits);
+ falconsec = FIELD_GET(FAB_EM_EL_FALCONSEC, cbb->mn_user_bits);
+
+ /*
+ * For SOC with multiple NUMA nodes, print cross socket access
+ * errors only if initiator/master_id is CCPLEX, CPMU or GPU.
+ */
+ if (is_numa) {
+ local_socket_id = numa_node_id();
+ requester_socket_id = FIELD_GET(REQ_SOCKET_ID, cbb->mn_attr2);
+
+ if (requester_socket_id != local_socket_id) {
+ if ((mstr_id != 0x1) && (mstr_id != 0x2) && (mstr_id != 0xB))
+ return;
+ }
+ }
+
+ fab_id = FIELD_GET(FAB_EM_EL_FABID, cbb->mn_attr2);
+ slave_id = FIELD_GET(FAB_EM_EL_SLAVEID, cbb->mn_attr2);
+
+ access_id = FIELD_GET(FAB_EM_EL_ACCESSID, cbb->mn_attr1);
+
+ cache_type = FIELD_GET(FAB_EM_EL_AXCACHE, cbb->mn_attr0);
+ prot_type = FIELD_GET(FAB_EM_EL_AXPROT, cbb->mn_attr0);
+ burst_length = FIELD_GET(FAB_EM_EL_BURSTLENGTH, cbb->mn_attr0);
+ burst_type = FIELD_GET(FAB_EM_EL_BURSTTYPE, cbb->mn_attr0);
+ beat_size = FIELD_GET(FAB_EM_EL_BEATSIZE, cbb->mn_attr0);
+ access_type = FIELD_GET(FAB_EM_EL_ACCESSTYPE, cbb->mn_attr0);
+
+ tegra_cbb_print_err(file, "\n");
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[cbb->type].code);
+
+ tegra_cbb_print_err(file, "\t MASTER_ID\t\t: %s\n", cbb->fabric->master_id[mstr_id]);
+ tegra_cbb_print_err(file, "\t Address\t\t: %#llx\n", cbb->access);
+
+ tegra_cbb_print_cache(file, cache_type);
+ tegra_cbb_print_prot(file, prot_type);
+
+ tegra_cbb_print_err(file, "\t Access_Type\t\t: %s", (access_type) ? "Write\n" : "Read\n");
+ tegra_cbb_print_err(file, "\t Access_ID\t\t: %#x", access_id);
+
+ if (fab_id == PSC_FAB_ID)
+ strcpy(fabric_name, "psc-fabric");
+ else if (fab_id == FSI_FAB_ID)
+ strcpy(fabric_name, "fsi-fabric");
+ else
+ strcpy(fabric_name, cbb->fabric->name);
+
+ if (is_numa) {
+ tegra_cbb_print_err(file, "\t Requester_Socket_Id\t: %#x\n",
+ requester_socket_id);
+ tegra_cbb_print_err(file, "\t Local_Socket_Id\t: %#x\n",
+ local_socket_id);
+ tegra_cbb_print_err(file, "\t No. of NUMA_NODES\t: %#x\n",
+ num_possible_nodes());
+ }
+
+ tegra_cbb_print_err(file, "\t Fabric\t\t: %s\n", fabric_name);
+ tegra_cbb_print_err(file, "\t Slave_Id\t\t: %#x\n", slave_id);
+ tegra_cbb_print_err(file, "\t Burst_length\t\t: %#x\n", burst_length);
+ tegra_cbb_print_err(file, "\t Burst_type\t\t: %#x\n", burst_type);
+ tegra_cbb_print_err(file, "\t Beat_size\t\t: %#x\n", beat_size);
+ tegra_cbb_print_err(file, "\t VQC\t\t\t: %#x\n", vqc);
+ tegra_cbb_print_err(file, "\t GRPSEC\t\t: %#x\n", grpsec);
+ tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", falconsec);
+
+ if ((fab_id == PSC_FAB_ID) || (fab_id == FSI_FAB_ID))
+ return;
+
+ if (!strcmp(cbb->fabric->errors[cbb->type].code, "TIMEOUT_ERR")) {
+ tegra234_lookup_slave_timeout(file, cbb, slave_id, fab_id);
+ return;
+ }
+
+ tegra_cbb_print_err(file, "\t Slave\t\t\t: %s\n", cbb->fabric->slave_map[slave_id].name);
+}
+
+static int print_errmonX_info(struct seq_file *file, struct tegra234_cbb *cbb)
+{
+ u32 overflow, status, error;
+
+ status = readl(cbb->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ if (!status) {
+ pr_err("Error Notifier received a spurious notification\n");
+ return -ENODATA;
+ }
+
+ if (status == 0xffffffff) {
+ pr_err("CBB registers returning all 1's which is invalid\n");
+ return -EINVAL;
+ }
+
+ overflow = readl(cbb->mon + FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0);
+
+ tegra234_cbb_print_error(file, cbb, status, overflow);
+
+ error = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ERR_STATUS_0);
+ if (!error) {
+ pr_info("Error Monitor doesn't have Error Logger\n");
+ return -EINVAL;
+ }
+
+ cbb->type = 0;
+
+ while (error) {
+ if (error & BIT(0)) {
+ u32 hi, lo;
+
+ hi = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_HIGH_0);
+ lo = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_LOW_0);
+
+ cbb->access = (u64)hi << 32 | lo;
+
+ cbb->mn_attr0 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0);
+ cbb->mn_attr1 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0);
+ cbb->mn_attr2 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0);
+ cbb->mn_user_bits = readl(cbb->mon + FABRIC_MN_MASTER_LOG_USER_BITS0_0);
+
+ print_errlog_err(file, cbb);
+ }
+
+ cbb->type++;
+ error >>= 1;
+ }
+
+ return 0;
+}
+
+static int print_err_notifier(struct seq_file *file, struct tegra234_cbb *cbb, u32 status)
+{
+ unsigned int index = 0;
+ int err;
+
+ pr_crit("**************************************\n");
+ pr_crit("CPU:%d, Error:%s, Errmon:%d\n", smp_processor_id(),
+ cbb->fabric->name, status);
+
+ while (status) {
+ if (status & BIT(0)) {
+ unsigned int notifier = cbb->fabric->notifier_offset;
+ u32 hi, lo, mask = BIT(index);
+ phys_addr_t addr;
+ u64 offset;
+
+ writel(mask, cbb->regs + notifier + FABRIC_EN_CFG_ADDR_INDEX_0_0);
+ hi = readl(cbb->regs + notifier + FABRIC_EN_CFG_ADDR_HI_0);
+ lo = readl(cbb->regs + notifier + FABRIC_EN_CFG_ADDR_LOW_0);
+
+ addr = (u64)hi << 32 | lo;
+
+ offset = addr - cbb->res->start;
+ cbb->mon = cbb->regs + offset;
+ cbb->mask = BIT(index);
+
+ err = print_errmonX_info(file, cbb);
+ tegra234_cbb_error_clear(&cbb->base);
+ if (err)
+ return err;
+ }
+
+ status >>= 1;
+ index++;
+ }
+
+ tegra_cbb_print_err(file, "\t**************************************\n");
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_MUTEX(cbb_debugfs_mutex);
+
+static int tegra234_cbb_debugfs_show(struct tegra_cbb *cbb, struct seq_file *file, void *data)
+{
+ int err = 0;
+
+ mutex_lock(&cbb_debugfs_mutex);
+
+ list_for_each_entry(cbb, &cbb_list, node) {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ u32 status;
+
+ status = tegra_cbb_get_status(&priv->base);
+ if (status) {
+ err = print_err_notifier(file, priv, status);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&cbb_debugfs_mutex);
+ return err;
+}
+#endif
+
+/*
+ * Handler for CBB errors
+ */
+static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+{
+ bool is_inband_err = false;
+ struct tegra_cbb *cbb;
+ unsigned long flags;
+ u8 mstr_id;
+ int err;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry(cbb, &cbb_list, node) {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ u32 status = tegra_cbb_get_status(cbb);
+
+ if (status && (irq == priv->sec_irq)) {
+ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
+ smp_processor_id(), priv->fabric->name,
+ priv->res->start, irq);
+
+ err = print_err_notifier(NULL, priv, status);
+ if (err)
+ goto unlock;
+
+ mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
+
+ /*
+ * If illegal request is from CCPLEX(id:0x1) master then call BUG() to
+ * crash system.
+ */
+ if ((mstr_id == 0x1) && priv->fabric->off_mask_erd)
+ is_inband_err = 1;
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&cbb_lock, flags);
+ WARN_ON(is_inband_err);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Register handler for CBB_SECURE interrupt for reporting errors
+ */
+static int tegra234_cbb_interrupt_enable(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+
+ if (priv->sec_irq) {
+ int err = devm_request_irq(cbb->dev, priv->sec_irq, tegra234_cbb_isr, 0,
+ dev_name(cbb->dev), priv);
+ if (err) {
+ dev_err(cbb->dev, "failed to register interrupt %u: %d\n", priv->sec_irq,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra234_cbb_error_enable(struct tegra_cbb *cbb)
+{
+ tegra_cbb_fault_enable(cbb);
+}
+
+static const struct tegra_cbb_ops tegra234_cbb_ops = {
+ .get_status = tegra234_cbb_get_status,
+ .error_clear = tegra234_cbb_error_clear,
+ .fault_enable = tegra234_cbb_fault_enable,
+ .error_enable = tegra234_cbb_error_enable,
+ .interrupt_enable = tegra234_cbb_interrupt_enable,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_show = tegra234_cbb_debugfs_show,
+#endif
+};
+
+static const char * const tegra234_master_id[] = {
+ [0x00] = "TZ",
+ [0x01] = "CCPLEX",
+ [0x02] = "CCPMU",
+ [0x03] = "BPMP_FW",
+ [0x04] = "AON",
+ [0x05] = "SCE",
+ [0x06] = "GPCDMA_P",
+ [0x07] = "TSECA_NONSECURE",
+ [0x08] = "TSECA_LIGHTSECURE",
+ [0x09] = "TSECA_HEAVYSECURE",
+ [0x0a] = "CORESIGHT",
+ [0x0b] = "APE",
+ [0x0c] = "PEATRANS",
+ [0x0d] = "JTAGM_DFT",
+ [0x0e] = "RCE",
+ [0x0f] = "DCE",
+ [0x10] = "PSC_FW_USER",
+ [0x11] = "PSC_FW_SUPERVISOR",
+ [0x12] = "PSC_FW_MACHINE",
+ [0x13] = "PSC_BOOT",
+ [0x14] = "BPMP_BOOT",
+ [0x15] = "NVDEC_NONSECURE",
+ [0x16] = "NVDEC_LIGHTSECURE",
+ [0x17] = "NVDEC_HEAVYSECURE",
+ [0x18] = "CBB_INTERNAL",
+ [0x19] = "RSVD"
+};
+
+static const struct tegra_cbb_error tegra234_cbb_errors[] = {
+ {
+ .code = "SLAVE_ERR",
+ .desc = "Slave being accessed responded with an error"
+ }, {
+ .code = "DECODE_ERR",
+ .desc = "Attempt to access an address hole"
+ }, {
+ .code = "FIREWALL_ERR",
+ .desc = "Attempt to access a region which is firewall protected"
+ }, {
+ .code = "TIMEOUT_ERR",
+ .desc = "No response returned by slave"
+ }, {
+ .code = "PWRDOWN_ERR",
+ .desc = "Attempt to access a portion of fabric that is powered down"
+ }, {
+ .code = "UNSUPPORTED_ERR",
+ .desc = "Attempt to access a slave through an unsupported access"
+ }
+};
+
+static const struct tegra234_slave_lookup tegra234_aon_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST", 0x14000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
+ .name = "aon-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_aon_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x17000,
+};
+
+static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_bpmp_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
+ { "AON", 0x40000 },
+ { "BPMP", 0x41000 },
+ { "CBB", 0x42000 },
+ { "HOST1X", 0x43000 },
+ { "STM", 0x44000 },
+ { "FSI", 0x45000 },
+ { "PSC", 0x46000 },
+ { "PCIE_C1", 0x47000 },
+ { "PCIE_C2", 0x48000 },
+ { "PCIE_C3", 0x49000 },
+ { "PCIE_C0", 0x4a000 },
+ { "PCIE_C4", 0x4b000 },
+ { "GPU", 0x4c000 },
+ { "SMMU0", 0x4d000 },
+ { "SMMU1", 0x4e000 },
+ { "SMMU2", 0x4f000 },
+ { "SMMU3", 0x50000 },
+ { "SMMU4", 0x51000 },
+ { "PCIE_C10", 0x52000 },
+ { "PCIE_C7", 0x53000 },
+ { "PCIE_C8", 0x54000 },
+ { "PCIE_C9", 0x55000 },
+ { "PCIE_C5", 0x56000 },
+ { "PCIE_C6", 0x57000 },
+ { "DCE", 0x58000 },
+ { "RCE", 0x59000 },
+ { "SCE", 0x5a000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_10", 0x71000 },
+ { "AXI2APB_11", 0x72000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_14", 0x75000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_17", 0x78000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7a000 },
+ { "AXI2APB_2", 0x7b000 },
+ { "AXI2APB_20", 0x7c000 },
+ { "AXI2APB_21", 0x7d000 },
+ { "AXI2APB_22", 0x7e000 },
+ { "AXI2APB_23", 0x7f000 },
+ { "AXI2APB_25", 0x80000 },
+ { "AXI2APB_26", 0x81000 },
+ { "AXI2APB_27", 0x82000 },
+ { "AXI2APB_28", 0x83000 },
+ { "AXI2APB_29", 0x84000 },
+ { "AXI2APB_30", 0x85000 },
+ { "AXI2APB_31", 0x86000 },
+ { "AXI2APB_32", 0x87000 },
+ { "AXI2APB_33", 0x88000 },
+ { "AXI2APB_34", 0x89000 },
+ { "AXI2APB_35", 0x92000 },
+ { "AXI2APB_4", 0x8b000 },
+ { "AXI2APB_5", 0x8c000 },
+ { "AXI2APB_6", 0x8d000 },
+ { "AXI2APB_7", 0x8e000 },
+ { "AXI2APB_8", 0x8f000 },
+ { "AXI2APB_9", 0x90000 },
+ { "AXI2APB_3", 0x91000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_cbb_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x3a004
+};
+
+static const struct tegra234_slave_lookup tegra234_dce_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
+ .name = "dce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_dce_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct tegra234_slave_lookup tegra234_rce_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
+ .name = "rce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_rce_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct tegra234_slave_lookup tegra234_sce_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
+ .name = "sce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_sce_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const char * const tegra241_master_id[] = {
+ [0x0] = "TZ",
+ [0x1] = "CCPLEX",
+ [0x2] = "CCPMU",
+ [0x3] = "BPMP_FW",
+ [0x4] = "PSC_FW_USER",
+ [0x5] = "PSC_FW_SUPERVISOR",
+ [0x6] = "PSC_FW_MACHINE",
+ [0x7] = "PSC_BOOT",
+ [0x8] = "BPMP_BOOT",
+ [0x9] = "JTAGM_DFT",
+ [0xa] = "CORESIGHT",
+ [0xb] = "GPU",
+ [0xc] = "PEATRANS",
+ [0xd ... 0x3f] = "RSVD"
+};
+
+/*
+ * Possible causes for Slave and Timeout errors.
+ * SLAVE_ERR:
+ * Slave being accessed responded with an error. Slave could return
+ * an error for various cases :
+ * Unsupported access, clamp setting when power gated, register
+ * level firewall(SCR), address hole within the slave, etc
+ *
+ * TIMEOUT_ERR:
+ * No response returned by slave. Can be due to slave being clock
+ * gated, under reset, powered down or slave inability to respond
+ * for an internal slave issue
+ */
+static const struct tegra_cbb_error tegra241_cbb_errors[] = {
+ {
+ .code = "SLAVE_ERR",
+ .desc = "Slave being accessed responded with an error."
+ }, {
+ .code = "DECODE_ERR",
+ .desc = "Attempt to access an address hole or Reserved region of memory."
+ }, {
+ .code = "FIREWALL_ERR",
+ .desc = "Attempt to access a region which is firewalled."
+ }, {
+ .code = "TIMEOUT_ERR",
+ .desc = "No response returned by slave."
+ }, {
+ .code = "PWRDOWN_ERR",
+ .desc = "Attempt to access a portion of the fabric that is powered down."
+ }, {
+ .code = "UNSUPPORTED_ERR",
+ .desc = "Attempt to access a slave through an unsupported access."
+ }, {
+ .code = "POISON_ERR",
+ .desc = "Slave responds with poison error to indicate error in data."
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "NO_SUCH_ADDRESS_ERR",
+ .desc = "The address belongs to the pri_target range but there is no register "
+ "implemented at the address."
+ }, {
+ .code = "TASK_ERR",
+ .desc = "Attempt to update a PRI task when the current task has still not "
+ "completed."
+ }, {
+ .code = "EXTERNAL_ERR",
+ .desc = "Indicates that an external PRI register access met with an error due to "
+ "any issue in the unit."
+ }, {
+ .code = "INDEX_ERR",
+ .desc = "Applicable to PRI index aperture pair, when the programmed index is "
+ "outside the range defined in the manual."
+ }, {
+ .code = "RESET_ERR",
+ .desc = "Target in Reset Error: Attempt to access a SubPri or external PRI "
+ "register but they are in reset."
+ }, {
+ .code = "REGISTER_RST_ERR",
+ .desc = "Attempt to access a PRI register but the register is partial or "
+ "completely in reset."
+ }, {
+ .code = "POWER_GATED_ERR",
+ .desc = "Returned by external PRI client when the external access goes to a power "
+ "gated domain."
+ }, {
+ .code = "SUBPRI_FS_ERR",
+ .desc = "Subpri is floorswept: Attempt to access a subpri through the main pri "
+ "target but subPri logic is floorswept."
+ }, {
+ .code = "SUBPRI_CLK_OFF_ERR",
+ .desc = "Subpri clock is off: Attempt to access a subpri through the main pri "
+ "target but subPris clock is gated/off."
+ },
+};
+
+static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+ { "CCPLEX", 0x50000 },
+ { "PCIE_C8", 0x51000 },
+ { "PCIE_C9", 0x52000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "AON", 0x5b000 },
+ { "BPMP", 0x5c000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "PSC", 0x5d000 },
+ { "STM", 0x5e000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_10", 0x71000 },
+ { "AXI2APB_11", 0x72000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_14", 0x75000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_17", 0x78000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7a000 },
+ { "AXI2APB_2", 0x7b000 },
+ { "AXI2APB_20", 0x7c000 },
+ { "AXI2APB_4", 0x87000 },
+ { "AXI2APB_5", 0x88000 },
+ { "AXI2APB_6", 0x89000 },
+ { "AXI2APB_7", 0x8a000 },
+ { "AXI2APB_8", 0x8b000 },
+ { "AXI2APB_9", 0x8c000 },
+ { "AXI2APB_3", 0x8d000 },
+ { "AXI2APB_21", 0x7d000 },
+ { "AXI2APB_22", 0x7e000 },
+ { "AXI2APB_23", 0x7f000 },
+ { "AXI2APB_24", 0x80000 },
+ { "AXI2APB_25", 0x81000 },
+ { "AXI2APB_26", 0x82000 },
+ { "AXI2APB_27", 0x83000 },
+ { "AXI2APB_28", 0x84000 },
+ { "PCIE_C4", 0x53000 },
+ { "PCIE_C5", 0x54000 },
+ { "PCIE_C6", 0x55000 },
+ { "PCIE_C7", 0x56000 },
+ { "PCIE_C2", 0x57000 },
+ { "PCIE_C3", 0x58000 },
+ { "PCIE_C0", 0x59000 },
+ { "PCIE_C1", 0x5a000 },
+ { "AXI2APB_29", 0x85000 },
+ { "AXI2APB_30", 0x86000 },
+};
+
+static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_cbb_slave_map,
+ .errors = tegra241_cbb_errors,
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x40004,
+};
+
+static const struct tegra234_slave_lookup tegra241_bpmp_slave_map[] = {
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+ { "AXI2APB", 0x00000 },
+ { "DBB0", 0x17000 },
+ { "DBB1", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra241_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_bpmp_slave_map,
+ .errors = tegra241_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct of_device_id tegra234_cbb_dt_ids[] = {
+ { .compatible = "nvidia,tegra234-cbb-fabric", .data = &tegra234_cbb_fabric },
+ { .compatible = "nvidia,tegra234-aon-fabric", .data = &tegra234_aon_fabric },
+ { .compatible = "nvidia,tegra234-bpmp-fabric", .data = &tegra234_bpmp_fabric },
+ { .compatible = "nvidia,tegra234-dce-fabric", .data = &tegra234_dce_fabric },
+ { .compatible = "nvidia,tegra234-rce-fabric", .data = &tegra234_rce_fabric },
+ { .compatible = "nvidia,tegra234-sce-fabric", .data = &tegra234_sce_fabric },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tegra234_cbb_dt_ids);
+
+struct tegra234_cbb_acpi_uid {
+ const char *hid;
+ const char *uid;
+ const struct tegra234_cbb_fabric *fabric;
+};
+
+static const struct tegra234_cbb_acpi_uid tegra234_cbb_acpi_uids[] = {
+ { "NVDA1070", "1", &tegra241_cbb_fabric },
+ { "NVDA1070", "2", &tegra241_bpmp_fabric },
+ { },
+};
+
+static const struct
+tegra234_cbb_fabric *tegra234_cbb_acpi_get_fabric(struct acpi_device *adev)
+{
+ const struct tegra234_cbb_acpi_uid *entry;
+
+ for (entry = tegra234_cbb_acpi_uids; entry->hid; entry++) {
+ if (acpi_dev_hid_uid_match(adev, entry->hid, entry->uid))
+ return entry->fabric;
+ }
+
+ return NULL;
+}
+
+static const struct acpi_device_id tegra241_cbb_acpi_ids[] = {
+ { "NVDA1070" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, tegra241_cbb_acpi_ids);
+
+static int tegra234_cbb_probe(struct platform_device *pdev)
+{
+ const struct tegra234_cbb_fabric *fabric;
+ struct tegra234_cbb *cbb;
+ unsigned long flags = 0;
+ int err;
+
+ if (pdev->dev.of_node) {
+ fabric = of_device_get_match_data(&pdev->dev);
+ } else {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ fabric = tegra234_cbb_acpi_get_fabric(device);
+ if (!fabric) {
+ dev_err(&pdev->dev, "no device match found\n");
+ return -ENODEV;
+ }
+ }
+
+ cbb = devm_kzalloc(&pdev->dev, sizeof(*cbb), GFP_KERNEL);
+ if (!cbb)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbb->base.node);
+ cbb->base.ops = &tegra234_cbb_ops;
+ cbb->base.dev = &pdev->dev;
+ cbb->fabric = fabric;
+
+ cbb->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &cbb->res);
+ if (IS_ERR(cbb->regs))
+ return PTR_ERR(cbb->regs);
+
+ err = tegra_cbb_get_irq(pdev, NULL, &cbb->sec_irq);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, cbb);
+
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ /* set ERD bit to mask SError and generate interrupt to report error */
+ if (cbb->fabric->off_mask_erd)
+ tegra234_cbb_mask_serror(cbb);
+
+ return tegra_cbb_register(&cbb->base);
+}
+
+static int tegra234_cbb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __maybe_unused tegra234_cbb_resume_noirq(struct device *dev)
+{
+ struct tegra234_cbb *cbb = dev_get_drvdata(dev);
+
+ tegra234_cbb_error_enable(&cbb->base);
+
+ dev_dbg(dev, "%s resumed\n", cbb->fabric->name);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra234_cbb_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, tegra234_cbb_resume_noirq)
+};
+
+static struct platform_driver tegra234_cbb_driver = {
+ .probe = tegra234_cbb_probe,
+ .remove = tegra234_cbb_remove,
+ .driver = {
+ .name = "tegra234-cbb",
+ .of_match_table = tegra234_cbb_dt_ids,
+ .acpi_match_table = tegra241_cbb_acpi_ids,
+ .pm = &tegra234_cbb_pm,
+ },
+};
+
+static int __init tegra234_cbb_init(void)
+{
+ return platform_driver_register(&tegra234_cbb_driver);
+}
+pure_initcall(tegra234_cbb_init);
+
+static void __exit tegra234_cbb_exit(void)
+{
+ platform_driver_unregister(&tegra234_cbb_driver);
+}
+module_exit(tegra234_cbb_exit);
+
+MODULE_DESCRIPTION("Control Backbone 2.0 error handling driver for Tegra234");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index b0a8405dbdb1..6542267a224d 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -568,6 +568,7 @@ static int __init tegra_init_fuse(void)
np = of_find_matching_node(NULL, car_match);
if (np) {
void __iomem *base = of_iomap(np, 0);
+ of_node_put(np);
if (base) {
tegra_enable_fuse_clk(base);
iounmap(base);
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 590c862538d0..3351bd872ab2 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -16,12 +16,16 @@
#define FUSE_SKU_INFO 0x10
+#define ERD_ERR_CONFIG 0x120c
+#define ERD_MASK_INBAND_ERR 0x1
+
#define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4
#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \
(0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
(0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+static void __iomem *apbmisc_base;
static bool long_ram_code;
static u32 strapping;
static u32 chipid;
@@ -93,6 +97,28 @@ u32 tegra_read_ram_code(void)
}
EXPORT_SYMBOL_GPL(tegra_read_ram_code);
+/*
+ * The function sets ERD(Error Response Disable) bit.
+ * This allows to mask inband errors and always send an
+ * OKAY response from CBB to the master which caused error.
+ */
+int tegra194_miscreg_mask_serror(void)
+{
+ if (!apbmisc_base)
+ return -EPROBE_DEFER;
+
+ if (!of_machine_is_compatible("nvidia,tegra194")) {
+ WARN(1, "Only supported for Tegra194 devices!\n");
+ return -EOPNOTSUPP;
+ }
+
+ writel_relaxed(ERD_MASK_INBAND_ERR,
+ apbmisc_base + ERD_ERR_CONFIG);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra194_miscreg_mask_serror);
+
static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra20-apbmisc", },
{ .compatible = "nvidia,tegra186-misc", },
@@ -134,7 +160,7 @@ void __init tegra_init_revision(void)
void __init tegra_init_apbmisc(void)
{
- void __iomem *apbmisc_base, *strapping_base;
+ void __iomem *strapping_base;
struct resource apbmisc, straps;
struct device_node *np;
@@ -182,12 +208,12 @@ void __init tegra_init_apbmisc(void)
*/
if (of_address_to_resource(np, 0, &apbmisc) < 0) {
pr_err("failed to get APBMISC registers\n");
- return;
+ goto put;
}
if (of_address_to_resource(np, 1, &straps) < 0) {
pr_err("failed to get strapping options registers\n");
- return;
+ goto put;
}
}
@@ -196,7 +222,6 @@ void __init tegra_init_apbmisc(void)
pr_err("failed to map APBMISC registers\n");
} else {
chipid = readl_relaxed(apbmisc_base + 4);
- iounmap(apbmisc_base);
}
strapping_base = ioremap(straps.start, resource_size(&straps));
@@ -208,4 +233,7 @@ void __init tegra_init_apbmisc(void)
}
long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+
+put:
+ of_node_put(np);
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 6a4b8f7e7948..678e8bc8a45d 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -296,6 +296,17 @@ struct tegra_wake_event {
} gpio;
};
+#define TEGRA_WAKE_SIMPLE(_name, _id) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .irq = 0, \
+ .gpio = { \
+ .instance = UINT_MAX, \
+ .pin = UINT_MAX, \
+ }, \
+ }
+
#define TEGRA_WAKE_IRQ(_name, _id, _irq) \
{ \
.name = _name, \
@@ -2239,6 +2250,7 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < soc->num_wake_events; i++) {
const struct tegra_wake_event *event = &soc->wake_events[i];
+ /* IRQ and simple wake events */
if (fwspec->param_count == 2) {
struct irq_fwspec spec;
@@ -2251,6 +2263,12 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
if (err < 0)
break;
+ /* simple hierarchies stop at the PMC level */
+ if (event->irq == 0) {
+ err = irq_domain_disconnect_hierarchy(domain->parent, virq);
+ break;
+ }
+
spec.fwnode = &pmc->dev->of_node->fwnode;
spec.param_count = 3;
spec.param[0] = GIC_SPI;
@@ -2263,6 +2281,7 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
break;
}
+ /* GPIO wake events */
if (fwspec->param_count == 3) {
if (event->gpio.instance != fwspec->param[0] ||
event->gpio.pin != fwspec->param[1])
@@ -2274,7 +2293,7 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
/* GPIO hierarchies stop at the PMC level */
if (!err && domain->parent)
- err = irq_domain_disconnect_hierarchy(domain->parent,
+ err = irq_domain_disconnect_hierarchy(domain->parent,
virq);
break;
}
@@ -2885,17 +2904,10 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->scratch = base;
}
- pmc->clk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(pmc->clk)) {
- err = PTR_ERR(pmc->clk);
-
- if (err != -ENOENT) {
- dev_err(&pdev->dev, "failed to get pclk: %d\n", err);
- return err;
- }
-
- pmc->clk = NULL;
- }
+ pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(pmc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pmc->clk),
+ "failed to get pclk\n");
/*
* PMC should be last resort for restarting since it soft-resets
@@ -3757,6 +3769,13 @@ static const struct tegra_wake_event tegra194_wake_events[] = {
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
+ TEGRA_WAKE_SIMPLE("usb3-port-0", 76),
+ TEGRA_WAKE_SIMPLE("usb3-port-1", 77),
+ TEGRA_WAKE_SIMPLE("usb3-port-2-3", 78),
+ TEGRA_WAKE_SIMPLE("usb2-port-0", 79),
+ TEGRA_WAKE_SIMPLE("usb2-port-1", 80),
+ TEGRA_WAKE_SIMPLE("usb2-port-2", 81),
+ TEGRA_WAKE_SIMPLE("usb2-port-3", 82),
};
static const struct tegra_pmc_soc tegra194_pmc_soc = {
@@ -4025,7 +4044,7 @@ static int __init tegra_pmc_early_init(void)
return -ENXIO;
}
- if (np) {
+ if (of_device_is_available(np)) {
pmc->soc = match->data;
if (pmc->soc->maybe_tz_only)
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 8d4000664fa3..d95b07896a3e 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -298,6 +298,38 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
}
/**
+ * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
+ * @bus: SDW bus
+ * @sync_delay: Delay before reading status
+ */
+void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
+{
+ u32 status;
+
+ if (!bus->ops->read_ping_status)
+ return;
+
+ /*
+ * wait for peripheral to sync if desired. 10-15ms should be more than
+ * enough in most cases.
+ */
+ if (sync_delay)
+ usleep_range(10000, 15000);
+
+ mutex_lock(&bus->msg_lock);
+
+ status = bus->ops->read_ping_status(bus);
+
+ mutex_unlock(&bus->msg_lock);
+
+ if (!status)
+ dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
+ else
+ dev_dbg(bus->dev, "PING status: %#x\n", status);
+}
+EXPORT_SYMBOL(sdw_show_ping_status);
+
+/**
* sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
* @bus: SDW bus
* @msg: SDW message to be xfered
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 4fbb19557f5e..615b0b63a3e1 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -756,6 +756,14 @@ cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num)
}
EXPORT_SYMBOL(cdns_reset_page_addr);
+u32 cdns_read_ping_status(struct sdw_bus *bus)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+
+ return cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+}
+EXPORT_SYMBOL(cdns_read_ping_status);
+
/*
* IRQ handling
*/
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 595d72c15d97..ca9e805bab88 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -177,6 +177,8 @@ enum sdw_command_response
cdns_xfer_msg_defer(struct sdw_bus *bus,
struct sdw_msg *msg, struct sdw_defer *defer);
+u32 cdns_read_ping_status(struct sdw_bus *bus);
+
int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 89d1d0d021fc..a5965e8827b9 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1262,6 +1262,7 @@ static struct sdw_master_ops sdw_intel_ops = {
.set_bus_conf = cdns_bus_conf,
.pre_bank_switch = intel_pre_bank_switch,
.post_bank_switch = intel_post_bank_switch,
+ .read_ping_status = cdns_read_ping_status,
};
static int intel_init(struct sdw_intel *sdw)
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 9df970eeca45..3a992a6478c3 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -169,7 +169,7 @@ struct qcom_swrm_ctrl {
u8 wcmd_id;
struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
- enum sdw_slave_status status[SDW_MAX_DEVICES];
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
u32 slave_status;
@@ -420,7 +420,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
- for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
+ for (dev_num = 0; dev_num <= SDW_MAX_DEVICES; dev_num++) {
status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
@@ -440,7 +440,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
ctrl->slave_status = val;
- for (i = 0; i < SDW_MAX_DEVICES; i++) {
+ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
u32 s;
s = (val >> (i * 2));
@@ -1356,10 +1356,6 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ctrl->bus.clk_stop_timeout = 300;
- ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr");
- if (IS_ERR(ctrl->audio_cgcr))
- dev_err(dev, "Failed to get audio_cgcr reset required for soundwire-v1.6.0\n");
-
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
goto err_clk;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index e32f6a2058ae..d1bb62f7368b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -591,6 +591,15 @@ config SPI_MICROCHIP_CORE
PolarFire SoC.
If built as a module, it will be called spi-microchip-core.
+config SPI_MICROCHIP_CORE_QSPI
+ tristate "Microchip FPGA QSPI controllers"
+ depends on SPI_MASTER
+ help
+ This enables the QSPI driver for Microchip FPGA QSPI controllers.
+ Say Y or M here if you want to use the QSPI controllers on
+ PolarFire SoC.
+ If built as a module, it will be called spi-microchip-core-qspi.
+
config SPI_MT65XX
tristate "MediaTek SPI controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 15d2f3835e45..4b34e855c841 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o
+obj-$(CONFIG_SPI_MICROCHIP_CORE_QSPI) += spi-microchip-core-qspi.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 08df4f8d0531..e23121456c70 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -36,9 +36,17 @@
#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
-/* M_CMD OP codes for SPI */
-#define AMD_SPI_XFER_TX 1
-#define AMD_SPI_XFER_RX 2
+#define AMD_SPI_ENA_REG 0x20
+#define AMD_SPI_ALT_SPD_SHIFT 20
+#define AMD_SPI_ALT_SPD_MASK GENMASK(23, AMD_SPI_ALT_SPD_SHIFT)
+#define AMD_SPI_SPI100_SHIFT 0
+#define AMD_SPI_SPI100_MASK GENMASK(AMD_SPI_SPI100_SHIFT, AMD_SPI_SPI100_SHIFT)
+#define AMD_SPI_SPEED_REG 0x6C
+#define AMD_SPI_SPD7_SHIFT 8
+#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
+
+#define AMD_SPI_MAX_HZ 100000000
+#define AMD_SPI_MIN_HZ 800000
/**
* enum amd_spi_versions - SPI controller versions
@@ -50,14 +58,41 @@ enum amd_spi_versions {
AMD_SPI_V2,
};
+enum amd_spi_speed {
+ F_66_66MHz,
+ F_33_33MHz,
+ F_22_22MHz,
+ F_16_66MHz,
+ F_100MHz,
+ F_800KHz,
+ SPI_SPD7,
+ F_50MHz = 0x4,
+ F_4MHz = 0x32,
+ F_3_17MHz = 0x3F
+};
+
+/**
+ * struct amd_spi_freq - Matches device speed with values to write in regs
+ * @speed_hz: Device frequency
+ * @enable_val: Value to be written to "enable register"
+ * @spd7_val: Some frequencies requires to have a value written at SPISPEED register
+ */
+struct amd_spi_freq {
+ u32 speed_hz;
+ u32 enable_val;
+ u32 spd7_val;
+};
+
/**
* struct amd_spi - SPI driver instance
* @io_remap_addr: Start address of the SPI controller registers
* @version: SPI controller hardware version
+ * @speed_hz: Device frequency
*/
struct amd_spi {
void __iomem *io_remap_addr;
enum amd_spi_versions version;
+ unsigned int speed_hz;
};
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
@@ -189,65 +224,125 @@ static int amd_spi_master_setup(struct spi_device *spi)
return 0;
}
+static const struct amd_spi_freq amd_spi_freq[] = {
+ { AMD_SPI_MAX_HZ, F_100MHz, 0},
+ { 66660000, F_66_66MHz, 0},
+ { 50000000, SPI_SPD7, F_50MHz},
+ { 33330000, F_33_33MHz, 0},
+ { 22220000, F_22_22MHz, 0},
+ { 16660000, F_16_66MHz, 0},
+ { 4000000, SPI_SPD7, F_4MHz},
+ { 3170000, SPI_SPD7, F_3_17MHz},
+ { AMD_SPI_MIN_HZ, F_800KHz, 0},
+};
+
+static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
+{
+ unsigned int i, spd7_val, alt_spd;
+
+ if (speed_hz < AMD_SPI_MIN_HZ)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
+ if (speed_hz >= amd_spi_freq[i].speed_hz)
+ break;
+
+ if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
+ return 0;
+
+ amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
+
+ alt_spd = (amd_spi_freq[i].enable_val << AMD_SPI_ALT_SPD_SHIFT)
+ & AMD_SPI_ALT_SPD_MASK;
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, alt_spd,
+ AMD_SPI_ALT_SPD_MASK);
+
+ if (amd_spi->speed_hz == AMD_SPI_MAX_HZ)
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, 1,
+ AMD_SPI_SPI100_MASK);
+
+ if (amd_spi_freq[i].spd7_val) {
+ spd7_val = (amd_spi_freq[i].spd7_val << AMD_SPI_SPD7_SHIFT)
+ & AMD_SPI_SPD7_MASK;
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
+ AMD_SPI_SPD7_MASK);
+ }
+
+ return 0;
+}
+
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
struct spi_master *master,
struct spi_message *message)
{
struct spi_transfer *xfer = NULL;
- u8 cmd_opcode;
+ struct spi_device *spi = message->spi;
+ u8 cmd_opcode = 0, fifo_pos = AMD_SPI_FIFO_BASE;
u8 *buf = NULL;
- u32 m_cmd = 0;
u32 i = 0;
u32 tx_len = 0, rx_len = 0;
list_for_each_entry(xfer, &message->transfers,
transfer_list) {
- if (xfer->rx_buf)
- m_cmd = AMD_SPI_XFER_RX;
- if (xfer->tx_buf)
- m_cmd = AMD_SPI_XFER_TX;
+ if (xfer->speed_hz)
+ amd_set_spi_freq(amd_spi, xfer->speed_hz);
+ else
+ amd_set_spi_freq(amd_spi, spi->max_speed_hz);
- if (m_cmd & AMD_SPI_XFER_TX) {
+ if (xfer->tx_buf) {
buf = (u8 *)xfer->tx_buf;
- tx_len = xfer->len - 1;
- cmd_opcode = *(u8 *)xfer->tx_buf;
- buf++;
- amd_spi_set_opcode(amd_spi, cmd_opcode);
+ if (!tx_len) {
+ cmd_opcode = *(u8 *)xfer->tx_buf;
+ buf++;
+ xfer->len--;
+ }
+ tx_len += xfer->len;
/* Write data into the FIFO. */
- for (i = 0; i < tx_len; i++) {
- iowrite8(buf[i], ((u8 __iomem *)amd_spi->io_remap_addr +
- AMD_SPI_FIFO_BASE + i));
- }
+ for (i = 0; i < xfer->len; i++)
+ amd_spi_writereg8(amd_spi, fifo_pos + i, buf[i]);
- amd_spi_set_tx_count(amd_spi, tx_len);
- amd_spi_clear_fifo_ptr(amd_spi);
- /* Execute command */
- amd_spi_execute_opcode(amd_spi);
- }
- if (m_cmd & AMD_SPI_XFER_RX) {
- /*
- * Store no. of bytes to be received from
- * FIFO
- */
- rx_len = xfer->len;
- buf = (u8 *)xfer->rx_buf;
- amd_spi_set_rx_count(amd_spi, rx_len);
- amd_spi_clear_fifo_ptr(amd_spi);
- /* Execute command */
- amd_spi_execute_opcode(amd_spi);
- amd_spi_busy_wait(amd_spi);
- /* Read data from FIFO to receive buffer */
- for (i = 0; i < rx_len; i++)
- buf[i] = amd_spi_readreg8(amd_spi, AMD_SPI_FIFO_BASE + tx_len + i);
+ fifo_pos += xfer->len;
}
+
+ /* Store no. of bytes to be received from FIFO */
+ if (xfer->rx_buf)
+ rx_len += xfer->len;
+ }
+
+ if (!buf) {
+ message->status = -EINVAL;
+ goto fin_msg;
+ }
+
+ amd_spi_set_opcode(amd_spi, cmd_opcode);
+ amd_spi_set_tx_count(amd_spi, tx_len);
+ amd_spi_set_rx_count(amd_spi, rx_len);
+
+ /* Execute command */
+ message->status = amd_spi_execute_opcode(amd_spi);
+ if (message->status)
+ goto fin_msg;
+
+ if (rx_len) {
+ message->status = amd_spi_busy_wait(amd_spi);
+ if (message->status)
+ goto fin_msg;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list)
+ if (xfer->rx_buf) {
+ buf = (u8 *)xfer->rx_buf;
+ /* Read data from FIFO to receive buffer */
+ for (i = 0; i < xfer->len; i++)
+ buf[i] = amd_spi_readreg8(amd_spi, fifo_pos + i);
+ fifo_pos += xfer->len;
+ }
}
/* Update statistics */
message->actual_length = tx_len + rx_len + 1;
- /* complete the transaction */
- message->status = 0;
+fin_msg:
switch (amd_spi->version) {
case AMD_SPI_V1:
break;
@@ -260,7 +355,7 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
spi_finalize_current_message(master);
- return 0;
+ return message->status;
}
static int amd_spi_master_transfer(struct spi_master *master,
@@ -275,9 +370,7 @@ static int amd_spi_master_transfer(struct spi_master *master,
* Extract spi_transfers from the spi message and
* program the controller.
*/
- amd_spi_fifo_xfer(amd_spi, master, msg);
-
- return 0;
+ return amd_spi_fifo_xfer(amd_spi, master, msg);
}
static size_t amd_spi_max_transfer_size(struct spi_device *spi)
@@ -312,6 +405,8 @@ static int amd_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
master->mode_bits = 0;
master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->max_speed_hz = AMD_SPI_MAX_HZ;
+ master->min_speed_hz = AMD_SPI_MIN_HZ;
master->setup = amd_spi_master_setup;
master->transfer_one_message = amd_spi_master_transfer;
master->max_transfer_size = amd_spi_max_transfer_size;
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index 3e891bf22470..a334e89add86 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -736,10 +736,8 @@ static int aspeed_spi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
aspi->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(aspi->regs)) {
- dev_err(dev, "missing AHB register window\n");
+ if (IS_ERR(aspi->regs))
return PTR_ERR(aspi->regs);
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
aspi->ahb_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 267342dfa738..2dcbe166df63 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
/* sample LSB (from slave) on leading edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
setsck(spi, cpol);
}
return word;
@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* sample LSB (from slave) on trailing edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
}
return word;
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 72b1a5a2298c..447230547945 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -39,6 +39,7 @@
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+#define CQSPI_SLOW_SRAM BIT(4)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -87,6 +88,7 @@ struct cqspi_st {
bool use_dma_read;
u32 pd_dev_id;
bool wr_completion;
+ bool slow_sram;
};
struct cqspi_driver_platdata {
@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
}
}
- irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else if (!cqspi->slow_sram)
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else
+ irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
- writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ /*
+ * On SoCFPGA platform reading the SRAM is slow due to
+ * hardware limitation and causing read interrupt storm to CPU,
+ * so enabling only watermark interrupt to disable all read
+ * interrupts later as we want to run "bytes to read" loop with
+ * all the read interrupts disabled for max performance.
+ */
+
+ if (!cqspi->slow_sram)
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ else
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
+ /*
+ * Disable all read interrupts until
+ * we are out of "bytes to read"
+ */
+ if (cqspi->slow_sram)
+ writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) {
@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
- if (remaining > 0)
+ if (remaining > 0) {
reinit_completion(&cqspi->transfer_complete);
+ if (cqspi->slow_sram)
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ }
}
/* Check indirect done status */
@@ -1619,7 +1645,7 @@ static int cqspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- return ret;
+ goto probe_pm_failed;
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
cqspi->wr_completion = false;
+ if (ddata->quirks & CQSPI_SLOW_SRAM)
+ cqspi->slow_sram = true;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0"))
@@ -1712,6 +1740,7 @@ probe_reset_failed:
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
pm_runtime_put_sync(dev);
+probe_pm_failed:
pm_runtime_disable(dev);
return ret;
}
@@ -1779,7 +1808,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
+ .quirks = CQSPI_DISABLE_DAC_MODE
+ | CQSPI_NO_SUPPORT_WR_COMPLETION
+ | CQSPI_SLOW_SRAM,
};
static const struct cqspi_driver_platdata versal_ospi = {
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 3ab19be83095..9e187f9c6c95 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -565,10 +565,8 @@ static int cdns_xspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
- if (IS_ERR(cdns_xspi->sdmabase)) {
- dev_err(dev, "Failed to remap SDMA address\n");
+ if (IS_ERR(cdns_xspi->sdmabase))
return PTR_ERR(cdns_xspi->sdmabase);
- }
cdns_xspi->sdmasize = resource_size(res);
cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
index c06553416123..3fb89dee595e 100644
--- a/drivers/spi/spi-dw-bt1.c
+++ b/drivers/spi/spi-dw-bt1.c
@@ -293,8 +293,10 @@ static int dw_spi_bt1_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_host(&pdev->dev, dws);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
goto err_disable_clk;
+ }
platform_set_drvdata(pdev, dwsbt1);
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index f87d97ccd2d6..99edddf9958b 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -955,7 +955,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
ret = spi_register_controller(master);
if (ret) {
- dev_err(&master->dev, "problem registering spi master\n");
+ dev_err_probe(dev, ret, "problem registering spi master\n");
goto err_dma_exit;
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index fd004c9db9dc..a33e547b7d39 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1294,8 +1294,7 @@ static int dspi_probe(struct platform_device *pdev)
else
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_ctlr_put;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 19b1f3d881b0..e8c1c8a4c6c8 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -855,8 +855,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
init_completion(&fsl_lpspi->xfer_done);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(fsl_lpspi->base)) {
ret = PTR_ERR(fsl_lpspi->base);
goto out_controller_put;
@@ -912,7 +911,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
- dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
+ dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
goto free_dma;
}
@@ -947,11 +946,8 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
{
- int ret;
-
pinctrl_pm_select_sleep_state(dev);
- ret = pm_runtime_force_suspend(dev);
- return ret;
+ return pm_runtime_force_suspend(dev);
}
static int __maybe_unused fsl_lpspi_resume(struct device *dev)
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 46ae46a944c5..85cc71ba624a 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -867,8 +867,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, q);
/* find the resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
- q->iobase = devm_ioremap_resource(dev, res);
+ q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
if (IS_ERR(q->iobase)) {
ret = PTR_ERR(q->iobase);
goto err_put_ctrl;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index bdf94cc7be1a..731624f157fc 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -111,32 +111,6 @@ static void fsl_spi_change_mode(struct spi_device *spi)
local_irq_restore(flags);
}
-static void fsl_spi_chipselect(struct spi_device *spi, int value)
-{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_platform_data *pdata;
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
-
- pdata = spi->dev.parent->parent->platform_data;
-
- if (value == BITBANG_CS_INACTIVE) {
- if (pdata->cs_control)
- pdata->cs_control(spi, false);
- }
-
- if (value == BITBANG_CS_ACTIVE) {
- mpc8xxx_spi->rx_shift = cs->rx_shift;
- mpc8xxx_spi->tx_shift = cs->tx_shift;
- mpc8xxx_spi->get_rx = cs->get_rx;
- mpc8xxx_spi->get_tx = cs->get_tx;
-
- fsl_spi_change_mode(spi);
-
- if (pdata->cs_control)
- pdata->cs_control(spi, true);
- }
-}
-
static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
int bits_per_word, int msb_first)
{
@@ -354,15 +328,11 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
return mpc8xxx_spi->count;
}
-static int fsl_spi_do_one_msg(struct spi_master *master,
- struct spi_message *m)
+static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *m)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
- struct spi_device *spi = m->spi;
- struct spi_transfer *t, *first;
- unsigned int cs_change;
- const int nsecs = 50;
- int status, last_bpw;
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *t;
/*
* In CPU mode, optimize large byte transfers to use larger
@@ -378,62 +348,30 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
t->bits_per_word = 16;
}
}
+ return 0;
+}
- /* Don't allow changes if CS is active */
- cs_change = 1;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (cs_change)
- first = t;
- cs_change = t->cs_change;
- if (first->speed_hz != t->speed_hz) {
- dev_err(&spi->dev,
- "speed_hz cannot change while CS is active\n");
- return -EINVAL;
- }
- }
-
- last_bpw = -1;
- cs_change = 1;
- status = -EINVAL;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (cs_change || last_bpw != t->bits_per_word)
- status = fsl_spi_setup_transfer(spi, t);
- if (status < 0)
- break;
- last_bpw = t->bits_per_word;
-
- if (cs_change) {
- fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
- ndelay(nsecs);
- }
- cs_change = t->cs_change;
- if (t->len)
- status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
- if (status) {
- status = -EMSGSIZE;
- break;
- }
- m->actual_length += t->len;
-
- spi_transfer_delay_exec(t);
-
- if (cs_change) {
- ndelay(nsecs);
- fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
- }
+static int fsl_spi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int status;
- m->status = status;
+ status = fsl_spi_setup_transfer(spi, t);
+ if (status < 0)
+ return status;
+ if (t->len)
+ status = fsl_spi_bufs(spi, t, !!t->tx_dma || !!t->rx_dma);
+ if (status > 0)
+ return -EMSGSIZE;
- if (status || !cs_change) {
- ndelay(nsecs);
- fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
- }
+ return status;
+}
- fsl_spi_setup_transfer(spi, NULL);
- spi_finalize_current_message(master);
- return 0;
+static int fsl_spi_unprepare_message(struct spi_controller *controller,
+ struct spi_message *msg)
+{
+ return fsl_spi_setup_transfer(msg->spi, NULL);
}
static int fsl_spi_setup(struct spi_device *spi)
@@ -482,9 +420,6 @@ static int fsl_spi_setup(struct spi_device *spi)
return retval;
}
- /* Initialize chipselect - might be active for SPI_CS_HIGH mode */
- fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
-
return 0;
}
@@ -557,9 +492,7 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
u32 slvsel;
u16 cs = spi->chip_select;
- if (spi->cs_gpiod) {
- gpiod_set_value(spi->cs_gpiod, on);
- } else if (cs < mpc8xxx_spi->native_chipselects) {
+ if (cs < mpc8xxx_spi->native_chipselects) {
slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
mpc8xxx_spi_write_reg(&reg_base->slvsel, slvsel);
@@ -568,7 +501,6 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
static void fsl_spi_grlib_probe(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
@@ -588,7 +520,18 @@ static void fsl_spi_grlib_probe(struct device *dev)
mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
}
master->num_chipselect = mpc8xxx_spi->native_chipselects;
- pdata->cs_control = fsl_spi_grlib_cs_control;
+ master->set_cs = fsl_spi_grlib_cs_control;
+}
+
+static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+{
+ struct device *dev = spi->dev.parent->parent;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+
+ if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
+ return;
+ iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
}
static struct spi_master *fsl_spi_probe(struct device *dev,
@@ -613,8 +556,11 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
master->setup = fsl_spi_setup;
master->cleanup = fsl_spi_cleanup;
- master->transfer_one_message = fsl_spi_do_one_msg;
+ master->prepare_message = fsl_spi_prepare_message;
+ master->transfer_one = fsl_spi_transfer_one;
+ master->unprepare_message = fsl_spi_unprepare_message;
master->use_gpio_descriptors = true;
+ master->set_cs = fsl_spi_cs_control;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->max_bits_per_word = 32;
@@ -688,21 +634,6 @@ err:
return ERR_PTR(ret);
}
-static void fsl_spi_cs_control(struct spi_device *spi, bool on)
-{
- if (spi->cs_gpiod) {
- gpiod_set_value(spi->cs_gpiod, on);
- } else {
- struct device *dev = spi->dev.parent->parent;
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
-
- if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
- return;
- iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
- }
-}
-
static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@@ -744,12 +675,10 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
ret = gpiod_count(dev, "cs");
if (ret < 0)
ret = 0;
- if (ret == 0 && !spisel_boot) {
+ if (ret == 0 && !spisel_boot)
pdata->max_chipselect = 1;
- } else {
+ else
pdata->max_chipselect = ret + spisel_boot;
- pdata->cs_control = fsl_spi_cs_control;
- }
}
ret = of_address_to_resource(np, 0, &mem);
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
index 9ea355f7d64f..15b110183839 100644
--- a/drivers/spi/spi-gxp.c
+++ b/drivers/spi/spi-gxp.c
@@ -254,7 +254,6 @@ static int gxp_spifi_probe(struct platform_device *pdev)
const struct gxp_spi_data *data;
struct spi_controller *ctlr;
struct gxp_spi *spifi;
- struct resource *res;
int ret;
data = of_device_get_match_data(&pdev->dev);
@@ -269,18 +268,15 @@ static int gxp_spifi_probe(struct platform_device *pdev)
spifi->data = data;
spifi->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spifi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ spifi->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spifi->reg_base))
return PTR_ERR(spifi->reg_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- spifi->dat_base = devm_ioremap_resource(&pdev->dev, res);
+ spifi->dat_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(spifi->dat_base))
return PTR_ERR(spifi->dat_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- spifi->dir_base = devm_ioremap_resource(&pdev->dev, res);
+ spifi->dir_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(spifi->dir_base))
return PTR_ERR(spifi->dir_base);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 71376b6df89d..bfd12247f173 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -730,11 +730,9 @@ static int img_spfi_resume(struct device *dev)
struct img_spfi *spfi = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
spfi_reset(spfi);
pm_runtime_put(dev);
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 66063687ae27..55f4ee2db002 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -116,6 +116,22 @@
#define ERASE_64K_OPCODE_SHIFT 16
#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+/* Flash descriptor fields */
+#define FLVALSIG_MAGIC 0x0ff0a55a
+#define FLMAP0_NC_MASK GENMASK(9, 8)
+#define FLMAP0_NC_SHIFT 8
+#define FLMAP0_FCBA_MASK GENMASK(7, 0)
+
+#define FLCOMP_C0DEN_MASK GENMASK(3, 0)
+#define FLCOMP_C0DEN_512K 0x00
+#define FLCOMP_C0DEN_1M 0x01
+#define FLCOMP_C0DEN_2M 0x02
+#define FLCOMP_C0DEN_4M 0x03
+#define FLCOMP_C0DEN_8M 0x04
+#define FLCOMP_C0DEN_16M 0x05
+#define FLCOMP_C0DEN_32M 0x06
+#define FLCOMP_C0DEN_64M 0x07
+
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -129,6 +145,7 @@
* @master: Pointer to the SPI controller structure
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
+ * @chip0_size: Size of the first flash chip in bytes
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
@@ -146,6 +163,7 @@ struct intel_spi {
struct spi_controller *master;
size_t nregions;
size_t pr_num;
+ size_t chip0_size;
bool locked;
bool swseq_reg;
bool swseq_erase;
@@ -158,6 +176,7 @@ struct intel_spi_mem_op {
struct spi_mem_op mem_op;
u32 replacement_op;
int (*exec_op)(struct intel_spi *ispi,
+ const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op);
};
@@ -441,7 +460,16 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
return 0;
}
-static int intel_spi_read_reg(struct intel_spi *ispi,
+static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
+ const struct spi_mem *mem)
+{
+ /* Pick up the correct start address */
+ if (!mem)
+ return 0;
+ return mem->spi->chip_select == 1 ? ispi->chip0_size : 0;
+}
+
+static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
@@ -449,8 +477,7 @@ static int intel_spi_read_reg(struct intel_spi *ispi,
u8 opcode = op->cmd.opcode;
int ret;
- /* Address of the first chip */
- writel(0, ispi->base + FADDR);
+ writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
if (ispi->swseq_reg)
ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
@@ -464,7 +491,7 @@ static int intel_spi_read_reg(struct intel_spi *ispi,
return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
}
-static int intel_spi_write_reg(struct intel_spi *ispi,
+static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
@@ -511,7 +538,7 @@ static int intel_spi_write_reg(struct intel_spi *ispi,
if (opcode == SPINOR_OP_WRDI)
return 0;
- writel(0, ispi->base + FADDR);
+ writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
/* Write the value beforehand */
ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
@@ -524,13 +551,13 @@ static int intel_spi_write_reg(struct intel_spi *ispi,
return intel_spi_hw_cycle(ispi, opcode, nbytes);
}
-static int intel_spi_read(struct intel_spi *ispi,
+static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
- void *read_buf = op->data.buf.in;
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t block_size, nbytes = op->data.nbytes;
- u32 addr = op->addr.val;
+ void *read_buf = op->data.buf.in;
u32 val, status;
int ret;
@@ -585,13 +612,13 @@ static int intel_spi_read(struct intel_spi *ispi,
return 0;
}
-static int intel_spi_write(struct intel_spi *ispi,
+static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t block_size, nbytes = op->data.nbytes;
const void *write_buf = op->data.buf.out;
- u32 addr = op->addr.val;
u32 val, status;
int ret;
@@ -648,12 +675,12 @@ static int intel_spi_write(struct intel_spi *ispi,
return 0;
}
-static int intel_spi_erase(struct intel_spi *ispi,
+static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
u8 opcode = op->cmd.opcode;
- u32 addr = op->addr.val;
u32 val, status;
int ret;
@@ -765,7 +792,7 @@ static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *o
if (!iop)
return -EOPNOTSUPP;
- return iop->exec_op(ispi, iop, op);
+ return iop->exec_op(ispi, mem, iop, op);
}
static const char *intel_spi_get_name(struct spi_mem *mem)
@@ -805,7 +832,7 @@ static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
op.data.nbytes = len;
op.data.buf.in = buf;
- ret = iop->exec_op(ispi, iop, &op);
+ ret = iop->exec_op(ispi, desc->mem, iop, &op);
return ret ? ret : len;
}
@@ -821,7 +848,7 @@ static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs
op.data.nbytes = len;
op.data.buf.out = buf;
- ret = iop->exec_op(ispi, iop, &op);
+ ret = iop->exec_op(ispi, desc->mem, iop, &op);
return ret ? ret : len;
}
@@ -1073,6 +1100,7 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->pregs = ispi->base + CNL_PR;
ispi->nregions = CNL_FREG_NUM;
ispi->pr_num = CNL_PR_NUM;
+ erase_64k = true;
break;
default:
@@ -1226,10 +1254,98 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
+static int intel_spi_read_desc(struct intel_spi *ispi)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(0, NULL, 0));
+ u32 buf[2], nc, fcba, flcomp;
+ ssize_t ret;
+
+ op.addr.val = 0x10;
+ op.data.buf.in = buf;
+ op.data.nbytes = sizeof(buf);
+
+ ret = intel_spi_read(ispi, NULL, NULL, &op);
+ if (ret) {
+ dev_warn(ispi->dev, "failed to read descriptor\n");
+ return ret;
+ }
+
+ dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
+ dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
+
+ if (buf[0] != FLVALSIG_MAGIC) {
+ dev_warn(ispi->dev, "descriptor signature not valid\n");
+ return -ENODEV;
+ }
+
+ fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
+ dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
+
+ op.addr.val = fcba;
+ op.data.buf.in = &flcomp;
+ op.data.nbytes = sizeof(flcomp);
+
+ ret = intel_spi_read(ispi, NULL, NULL, &op);
+ if (ret) {
+ dev_warn(ispi->dev, "failed to read FLCOMP\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
+
+ switch (flcomp & FLCOMP_C0DEN_MASK) {
+ case FLCOMP_C0DEN_512K:
+ ispi->chip0_size = SZ_512K;
+ break;
+ case FLCOMP_C0DEN_1M:
+ ispi->chip0_size = SZ_1M;
+ break;
+ case FLCOMP_C0DEN_2M:
+ ispi->chip0_size = SZ_2M;
+ break;
+ case FLCOMP_C0DEN_4M:
+ ispi->chip0_size = SZ_4M;
+ break;
+ case FLCOMP_C0DEN_8M:
+ ispi->chip0_size = SZ_8M;
+ break;
+ case FLCOMP_C0DEN_16M:
+ ispi->chip0_size = SZ_16M;
+ break;
+ case FLCOMP_C0DEN_32M:
+ ispi->chip0_size = SZ_32M;
+ break;
+ case FLCOMP_C0DEN_64M:
+ ispi->chip0_size = SZ_64M;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
+
+ nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
+ if (!nc)
+ ispi->master->num_chipselect = 1;
+ else if (nc == 1)
+ ispi->master->num_chipselect = 2;
+ else
+ return -EINVAL;
+
+ dev_dbg(ispi->dev, "%u flash components found\n",
+ ispi->master->num_chipselect);
+ return 0;
+}
+
static int intel_spi_populate_chip(struct intel_spi *ispi)
{
struct flash_platform_data *pdata;
struct spi_board_info chip;
+ int ret;
pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -1247,7 +1363,23 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
snprintf(chip.modalias, 8, "spi-nor");
chip.platform_data = pdata;
- return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV;
+ if (!spi_new_device(ispi->master, &chip))
+ return -ENODEV;
+
+ /* Add the second chip if present */
+ if (ispi->master->num_chipselect < 2)
+ return 0;
+
+ ret = intel_spi_read_desc(ispi);
+ if (ret)
+ return ret;
+
+ chip.platform_data = NULL;
+ chip.chip_select = 1;
+
+ if (!spi_new_device(ispi->master, &chip))
+ return -ENODEV;
+ return 0;
}
/**
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 4d4f77a186a9..dd7de8fa37d0 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -313,6 +313,33 @@ static struct spi_test spi_tests[] = {
},
},
},
+ {
+ .description = "three tx+rx transfers with overlapping cache lines",
+ .fill_option = FILL_COUNT_8,
+ /*
+ * This should be large enough for the controller driver to
+ * choose to transfer it with DMA.
+ */
+ .iterate_len = { 512, -1 },
+ .iterate_transfer_mask = BIT(1),
+ .transfer_count = 3,
+ .transfers = {
+ {
+ .len = 1,
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ {
+ .tx_buf = TX(1),
+ .rx_buf = RX(1),
+ },
+ {
+ .len = 1,
+ .tx_buf = TX(513),
+ .rx_buf = RX(513),
+ },
+ },
+ },
{ /* end of tests sequence */ }
};
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa7afc8..bad201510a99 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@ struct meson_spicc_device {
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+static const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
new file mode 100644
index 000000000000..19a6a46829f6
--- /dev/null
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip coreQSPI QSPI controller driver
+ *
+ * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * QSPI Control register mask defines
+ */
+#define CONTROL_ENABLE BIT(0)
+#define CONTROL_MASTER BIT(1)
+#define CONTROL_XIP BIT(2)
+#define CONTROL_XIPADDR BIT(3)
+#define CONTROL_CLKIDLE BIT(10)
+#define CONTROL_SAMPLE_MASK GENMASK(12, 11)
+#define CONTROL_MODE0 BIT(13)
+#define CONTROL_MODE12_MASK GENMASK(15, 14)
+#define CONTROL_MODE12_EX_RO BIT(14)
+#define CONTROL_MODE12_EX_RW BIT(15)
+#define CONTROL_MODE12_FULL GENMASK(15, 14)
+#define CONTROL_FLAGSX4 BIT(16)
+#define CONTROL_CLKRATE_MASK GENMASK(27, 24)
+#define CONTROL_CLKRATE_SHIFT 24
+
+/*
+ * QSPI Frames register mask defines
+ */
+#define FRAMES_TOTALBYTES_MASK GENMASK(15, 0)
+#define FRAMES_CMDBYTES_MASK GENMASK(24, 16)
+#define FRAMES_CMDBYTES_SHIFT 16
+#define FRAMES_SHIFT 25
+#define FRAMES_IDLE_MASK GENMASK(29, 26)
+#define FRAMES_IDLE_SHIFT 26
+#define FRAMES_FLAGBYTE BIT(30)
+#define FRAMES_FLAGWORD BIT(31)
+
+/*
+ * QSPI Interrupt Enable register mask defines
+ */
+#define IEN_TXDONE BIT(0)
+#define IEN_RXDONE BIT(1)
+#define IEN_RXAVAILABLE BIT(2)
+#define IEN_TXAVAILABLE BIT(3)
+#define IEN_RXFIFOEMPTY BIT(4)
+#define IEN_TXFIFOFULL BIT(5)
+
+/*
+ * QSPI Status register mask defines
+ */
+#define STATUS_TXDONE BIT(0)
+#define STATUS_RXDONE BIT(1)
+#define STATUS_RXAVAILABLE BIT(2)
+#define STATUS_TXAVAILABLE BIT(3)
+#define STATUS_RXFIFOEMPTY BIT(4)
+#define STATUS_TXFIFOFULL BIT(5)
+#define STATUS_READY BIT(7)
+#define STATUS_FLAGSX4 BIT(8)
+#define STATUS_MASK GENMASK(8, 0)
+
+#define BYTESUPPER_MASK GENMASK(31, 16)
+#define BYTESLOWER_MASK GENMASK(15, 0)
+
+#define MAX_DIVIDER 16
+#define MIN_DIVIDER 0
+#define MAX_DATA_CMD_LEN 256
+
+/* QSPI ready time out value */
+#define TIMEOUT_MS 500
+
+/*
+ * QSPI Register offsets.
+ */
+#define REG_CONTROL (0x00)
+#define REG_FRAMES (0x04)
+#define REG_IEN (0x0c)
+#define REG_STATUS (0x10)
+#define REG_DIRECT_ACCESS (0x14)
+#define REG_UPPER_ACCESS (0x18)
+#define REG_RX_DATA (0x40)
+#define REG_TX_DATA (0x44)
+#define REG_X4_RX_DATA (0x48)
+#define REG_X4_TX_DATA (0x4c)
+#define REG_FRAMESUP (0x50)
+
+/**
+ * struct mchp_coreqspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @clk: QSPI Operating clock
+ * @data_completion: completion structure
+ * @op_lock: lock access to the device
+ * @txbuf: TX buffer
+ * @rxbuf: RX buffer
+ * @irq: IRQ number
+ * @tx_len: Number of bytes left to transfer
+ * @rx_len: Number of bytes left to receive
+ */
+struct mchp_coreqspi {
+ void __iomem *regs;
+ struct clk *clk;
+ struct completion data_completion;
+ struct mutex op_lock; /* lock access to the device */
+ u8 *txbuf;
+ u8 *rxbuf;
+ int irq;
+ int tx_len;
+ int rx_len;
+};
+
+static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+{
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ /*
+ * The operating mode can be configured based on the command that needs to be send.
+ * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
+ * 00: Normal (single DQ0 TX and single DQ1 RX lines)
+ * 01: Extended RO (command and address bytes on DQ0 only)
+ * 10: Extended RW (command byte on DQ0 only)
+ * 11: Full. (command and address are on all DQ lines)
+ * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data
+ * 0: 2-bits (BSPI)
+ * 1: 4-bits (QSPI)
+ */
+ if (op->data.buswidth == 4 || op->data.buswidth == 2) {
+ control &= ~CONTROL_MODE12_MASK;
+ if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
+ control |= CONTROL_MODE12_EX_RO;
+ else if (op->cmd.buswidth == 1)
+ control |= CONTROL_MODE12_EX_RW;
+ else
+ control |= CONTROL_MODE12_FULL;
+
+ control |= CONTROL_MODE0;
+ } else {
+ control &= ~(CONTROL_MODE12_MASK |
+ CONTROL_MODE0);
+ }
+
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
+{
+ u32 control, data;
+
+ if (!qspi->rx_len)
+ return;
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ /*
+ * Read 4-bytes from the SPI FIFO in single transaction and then read
+ * the reamaining data byte wise.
+ */
+ control |= CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->rx_len >= 4) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
+ ;
+ data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
+ *(u32 *)qspi->rxbuf = data;
+ qspi->rxbuf += 4;
+ qspi->rx_len -= 4;
+ }
+
+ control &= ~CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->rx_len--) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
+ ;
+ data = readl_relaxed(qspi->regs + REG_RX_DATA);
+ *qspi->rxbuf++ = (data & 0xFF);
+ }
+}
+
+static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word)
+{
+ u32 control, data;
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control |= CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->tx_len >= 4) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
+ ;
+ data = *(u32 *)qspi->txbuf;
+ qspi->txbuf += 4;
+ qspi->tx_len -= 4;
+ writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
+ }
+
+ control &= ~CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->tx_len--) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
+ ;
+ data = *qspi->txbuf++;
+ writel_relaxed(data, qspi->regs + REG_TX_DATA);
+ }
+}
+
+static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
+{
+ u32 mask = IEN_TXDONE |
+ IEN_RXDONE |
+ IEN_RXAVAILABLE;
+
+ writel_relaxed(mask, qspi->regs + REG_IEN);
+}
+
+static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
+{
+ writel_relaxed(0, qspi->regs + REG_IEN);
+}
+
+static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
+{
+ struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
+
+ if (intfield == 0)
+ return ret;
+
+ if (intfield & IEN_TXDONE) {
+ writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
+ ret = IRQ_HANDLED;
+ }
+
+ if (intfield & IEN_RXAVAILABLE) {
+ writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
+ mchp_coreqspi_read_op(qspi);
+ ret = IRQ_HANDLED;
+ }
+
+ if (intfield & IEN_RXDONE) {
+ writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
+ complete(&qspi->data_completion);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
+{
+ unsigned long clk_hz;
+ u32 control, baud_rate_val = 0;
+
+ clk_hz = clk_get_rate(qspi->clk);
+ if (!clk_hz)
+ return -EINVAL;
+
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
+ if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
+ dev_err(&spi->dev,
+ "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
+ spi->max_speed_hz, clk_hz);
+ return -EINVAL;
+ }
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
+ control |= CONTROL_CLKIDLE;
+ else
+ control &= ~CONTROL_CLKIDLE;
+
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
+{
+ struct spi_controller *ctlr = spi_dev->master;
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ control |= (CONTROL_MASTER | CONTROL_ENABLE);
+ control &= ~CONTROL_CLKIDLE;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+{
+ u32 idle_cycles = 0;
+ int total_bytes, cmd_bytes, frames, ctrl;
+
+ cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
+ total_bytes = cmd_bytes + op->data.nbytes;
+
+ /*
+ * As per the coreQSPI IP spec,the number of command and data bytes are
+ * controlled by the frames register for each SPI sequence. This supports
+ * the SPI flash memory read and writes sequences as below. so configure
+ * the cmd and total bytes accordingly.
+ * ---------------------------------------------------------------------
+ * TOTAL BYTES | CMD BYTES | What happens |
+ * ______________________________________________________________________
+ * | | |
+ * 1 | 1 | The SPI core will transmit a single byte |
+ * | | and receive data is discarded |
+ * | | |
+ * 1 | 0 | The SPI core will transmit a single byte |
+ * | | and return a single byte |
+ * | | |
+ * 10 | 4 | The SPI core will transmit 4 command |
+ * | | bytes discarding the receive data and |
+ * | | transmits 6 dummy bytes returning the 6 |
+ * | | received bytes and return a single byte |
+ * | | |
+ * 10 | 10 | The SPI core will transmit 10 command |
+ * | | |
+ * 10 | 0 | The SPI core will transmit 10 command |
+ * | | bytes and returning 10 received bytes |
+ * ______________________________________________________________________
+ */
+ if (!(op->data.dir == SPI_MEM_DATA_IN))
+ cmd_bytes = total_bytes;
+
+ frames = total_bytes & BYTESUPPER_MASK;
+ writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
+ frames = total_bytes & BYTESLOWER_MASK;
+ frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
+
+ if (op->dummy.buswidth)
+ idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+
+ frames |= idle_cycles << FRAMES_IDLE_SHIFT;
+ ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ if (ctrl & CONTROL_MODE12_MASK)
+ frames |= (1 << FRAMES_SHIFT);
+
+ frames |= FRAMES_FLAGWORD;
+ writel_relaxed(frames, qspi->regs + REG_FRAMES);
+}
+
+static int mchp_qspi_wait_for_ready(struct spi_mem *mem)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata
+ (mem->spi->master);
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(qspi->regs + REG_STATUS, status,
+ (status & STATUS_READY), 0,
+ TIMEOUT_MS);
+ if (ret) {
+ dev_err(&mem->spi->dev,
+ "Timeout waiting on QSPI ready.\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata
+ (mem->spi->master);
+ u32 address = op->addr.val;
+ u8 opcode = op->cmd.opcode;
+ u8 opaddr[5];
+ int err, i;
+
+ mutex_lock(&qspi->op_lock);
+ err = mchp_qspi_wait_for_ready(mem);
+ if (err)
+ goto error;
+
+ err = mchp_coreqspi_setup_clock(qspi, mem->spi);
+ if (err)
+ goto error;
+
+ err = mchp_coreqspi_set_mode(qspi, op);
+ if (err)
+ goto error;
+
+ reinit_completion(&qspi->data_completion);
+ mchp_coreqspi_config_op(qspi, op);
+ if (op->cmd.opcode) {
+ qspi->txbuf = &opcode;
+ qspi->rxbuf = NULL;
+ qspi->tx_len = op->cmd.nbytes;
+ qspi->rx_len = 0;
+ mchp_coreqspi_write_op(qspi, false);
+ }
+
+ qspi->txbuf = &opaddr[0];
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
+
+ qspi->rxbuf = NULL;
+ qspi->tx_len = op->addr.nbytes;
+ qspi->rx_len = 0;
+ mchp_coreqspi_write_op(qspi, false);
+ }
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ qspi->txbuf = (u8 *)op->data.buf.out;
+ qspi->rxbuf = NULL;
+ qspi->rx_len = 0;
+ qspi->tx_len = op->data.nbytes;
+ mchp_coreqspi_write_op(qspi, true);
+ } else {
+ qspi->txbuf = NULL;
+ qspi->rxbuf = (u8 *)op->data.buf.in;
+ qspi->rx_len = op->data.nbytes;
+ qspi->tx_len = 0;
+ }
+ }
+
+ mchp_coreqspi_enable_ints(qspi);
+
+ if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+error:
+ mutex_unlock(&qspi->op_lock);
+ mchp_coreqspi_disable_ints(qspi);
+
+ return err;
+}
+
+static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
+ (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
+ /*
+ * If the command and address are on DQ0 only, then this
+ * controller doesn't support sending data on dual and
+ * quad lines. but it supports reading data on dual and
+ * quad lines with same configuration as command and
+ * address on DQ0.
+ * i.e. The control register[15:13] :EX_RO(read only) is
+ * meant only for the command and address are on DQ0 but
+ * not to write data, it is just to read.
+ * Ex: 0x34h is Quad Load Program Data which is not
+ * supported. Then the spi-mem layer will iterate over
+ * each command and it will chose the supported one.
+ */
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return false;
+ }
+
+ return true;
+}
+
+static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->data.nbytes > MAX_DATA_CMD_LEN)
+ op->data.nbytes = MAX_DATA_CMD_LEN;
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
+ .adjust_op_size = mchp_coreqspi_adjust_op_size,
+ .supports_op = mchp_coreqspi_supports_op,
+ .exec_op = mchp_coreqspi_exec_op,
+};
+
+static int mchp_coreqspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mchp_coreqspi *qspi;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!ctlr)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "unable to allocate master for QSPI controller\n");
+
+ qspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, qspi);
+
+ qspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qspi->regs))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
+ "failed to map registers\n");
+
+ qspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
+ "could not get clock\n");
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+ init_completion(&qspi->data_completion);
+ mutex_init(&qspi->op_lock);
+
+ qspi->irq = platform_get_irq(pdev, 0);
+ if (qspi->irq < 0) {
+ ret = qspi->irq;
+ goto out;
+ }
+
+ ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
+ IRQF_SHARED, pdev->name, qspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed %d\n", ret);
+ goto out;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mem_ops = &mchp_coreqspi_mem_ops;
+ ctlr->setup = mchp_coreqspi_setup_op;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = np;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "spi_register_controller failed\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ clk_disable_unprepare(qspi->clk);
+
+ return ret;
+}
+
+static int mchp_coreqspi_remove(struct platform_device *pdev)
+{
+ struct mchp_coreqspi *qspi = platform_get_drvdata(pdev);
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ mchp_coreqspi_disable_ints(qspi);
+ control &= ~CONTROL_ENABLE;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ clk_disable_unprepare(qspi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_coreqspi_of_match[] = {
+ { .compatible = "microchip,coreqspi-rtl-v2" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
+
+static struct platform_driver mchp_coreqspi_driver = {
+ .probe = mchp_coreqspi_probe,
+ .driver = {
+ .name = "microchip,coreqspi",
+ .of_match_table = mchp_coreqspi_of_match,
+ },
+ .remove = mchp_coreqspi_remove,
+};
+module_platform_driver(mchp_coreqspi_driver);
+
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com");
+MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index ce4385330b19..d352844c798c 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -548,12 +548,12 @@ static int mchp_corespi_probe(struct platform_device *pdev)
IRQF_SHARED, dev_name(&pdev->dev), master);
if (ret)
return dev_err_probe(&pdev->dev, ret,
- "could not request irq: %d\n", ret);
+ "could not request irq\n");
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk),
- "could not get clk: %d\n", ret);
+ "could not get clk\n");
ret = clk_prepare_enable(spi->clk);
if (ret)
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index bc5e36fd4288..cb075c1acbee 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -11,13 +11,14 @@
*/
#include <linux/module.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -89,7 +90,7 @@ struct mpc52xx_spi {
const u8 *tx_buf;
int cs_change;
int gpio_cs_count;
- unsigned int *gpio_cs;
+ struct gpio_desc **gpio_cs;
};
/*
@@ -101,9 +102,10 @@ static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
if (ms->gpio_cs_count > 0) {
cs = ms->message->spi->chip_select;
- gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
- } else
+ gpiod_set_value(ms->gpio_cs[cs], value);
+ } else {
out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
+ }
}
/*
@@ -385,10 +387,10 @@ static int mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
+ struct gpio_desc *gpio_cs;
void __iomem *regs;
u8 ctrl1;
int rc, i = 0;
- int gpio_cs;
/* MMIO registers */
dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
@@ -438,7 +440,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
- ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
+ ms->gpio_cs_count = gpiod_count(&op->dev, NULL);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
@@ -450,23 +452,16 @@ static int mpc52xx_spi_probe(struct platform_device *op)
}
for (i = 0; i < ms->gpio_cs_count; i++) {
- gpio_cs = of_get_gpio(op->dev.of_node, i);
- if (!gpio_is_valid(gpio_cs)) {
- dev_err(&op->dev,
- "could not parse the gpio field in oftree\n");
- rc = -ENODEV;
- goto err_gpio;
- }
-
- rc = gpio_request(gpio_cs, dev_name(&op->dev));
+ gpio_cs = gpiod_get_index(&op->dev,
+ NULL, i, GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(gpio_cs);
if (rc) {
dev_err(&op->dev,
- "can't request spi cs gpio #%d on gpio line %d\n",
- i, gpio_cs);
+ "failed to get spi cs gpio #%d: %d\n",
+ i, rc);
goto err_gpio;
}
- gpio_direction_output(gpio_cs, 1);
ms->gpio_cs[i] = gpio_cs;
}
}
@@ -507,7 +502,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
dev_err(&ms->master->dev, "initialization failed\n");
err_gpio:
while (i-- > 0)
- gpio_free(ms->gpio_cs[i]);
+ gpiod_put(ms->gpio_cs[i]);
kfree(ms->gpio_cs);
err_alloc_gpio:
@@ -528,7 +523,7 @@ static int mpc52xx_spi_remove(struct platform_device *op)
free_irq(ms->irq1, ms);
for (i = 0; i < ms->gpio_cs_count; i++)
- gpio_free(ms->gpio_cs[i]);
+ gpiod_put(ms->gpio_cs[i]);
kfree(ms->gpio_cs);
spi_unregister_master(master);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0a3b9f7eed30..11aeae7fe7fc 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1184,6 +1184,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
+ if (mdata->dev_comp->ipm_design)
+ dma_set_max_seg_size(dev, SZ_16M);
+ else
+ dma_set_max_seg_size(dev, SZ_256K);
+
ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
IRQF_TRIGGER_NONE, dev_name(dev), master);
if (ret)
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index b4b9b7309b5e..c4cc8e2f85e2 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -55,7 +55,6 @@ struct mt7621_spi {
void __iomem *base;
unsigned int sys_freq;
unsigned int speed;
- struct clk *clk;
int pending_write;
};
@@ -327,7 +326,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
- int status = 0;
struct clk *clk;
int ret;
@@ -339,21 +337,14 @@ static int mt7621_spi_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n",
- status);
- return PTR_ERR(clk);
- }
-
- status = clk_prepare_enable(clk);
- if (status)
- return status;
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get SYS clock\n");
master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs));
if (!master) {
dev_info(&pdev->dev, "master allocation failed\n");
- clk_disable_unprepare(clk);
return -ENOMEM;
}
@@ -369,38 +360,18 @@ static int mt7621_spi_probe(struct platform_device *pdev)
rs = spi_controller_get_devdata(master);
rs->base = base;
- rs->clk = clk;
rs->master = master;
- rs->sys_freq = clk_get_rate(rs->clk);
+ rs->sys_freq = clk_get_rate(clk);
rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "SPI reset failed!\n");
- clk_disable_unprepare(clk);
return ret;
}
- ret = spi_register_controller(master);
- if (ret)
- clk_disable_unprepare(clk);
-
- return ret;
-}
-
-static int mt7621_spi_remove(struct platform_device *pdev)
-{
- struct spi_controller *master;
- struct mt7621_spi *rs;
-
- master = dev_get_drvdata(&pdev->dev);
- rs = spi_controller_get_devdata(master);
-
- spi_unregister_controller(master);
- clk_disable_unprepare(rs->clk);
-
- return 0;
+ return devm_spi_register_controller(&pdev->dev, master);
}
MODULE_ALIAS("platform:" DRIVER_NAME);
@@ -411,7 +382,6 @@ static struct platform_driver mt7621_spi_driver = {
.of_match_table = mt7621_spi_match,
},
.probe = mt7621_spi_probe,
- .remove = mt7621_spi_remove,
};
module_platform_driver(mt7621_spi_driver);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index f5d32ec4634e..0709e987bd5a 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi)
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
+ ctlr->must_async = true;
ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret)
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index 1668a347e003..7f2e4d1b0d43 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -443,6 +443,7 @@ static int npcm_pspi_remove(struct platform_device *pdev)
static const struct of_device_id npcm_pspi_match[] = {
{ .compatible = "nuvoton,npcm750-pspi", .data = NULL },
+ { .compatible = "nuvoton,npcm845-pspi", .data = NULL },
{}
};
MODULE_DEVICE_TABLE(of, npcm_pspi_match);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 2b0301fc971c..d6a65a989ef8 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -588,7 +588,7 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
{
int ret;
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
return 0;
ret = clk_prepare_enable(f->clk_en);
@@ -606,7 +606,7 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
return 0;
clk_disable_unprepare(f->clk);
@@ -1100,7 +1100,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, f);
/* find the resources - configuration register address space */
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
else
res = platform_get_resource_byname(pdev,
@@ -1113,7 +1113,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
}
/* find the resources - controller memory mapped space */
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
else
res = platform_get_resource_byname(pdev,
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 20b047172965..061f7394e5b9 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -412,6 +412,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
return status;
err_fck:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi100k->fck);
err_ick:
clk_disable_unprepare(spi100k->ick);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index c42e59df38fe..6ba9b0d7710b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1509,10 +1509,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
status = platform_get_irq(pdev, 0);
- if (status == -EPROBE_DEFER)
- goto free_master;
if (status < 0) {
- dev_err(&pdev->dev, "no irq resource found\n");
+ dev_err_probe(&pdev->dev, status, "no irq resource found\n");
goto free_master;
}
init_completion(&mcspi->txdone);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 838d12e65144..2bf21c2e7a52 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1441,31 +1441,6 @@ static const struct of_device_id pxa2xx_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
-#ifdef CONFIG_ACPI
-
-static int pxa2xx_spi_get_port_id(struct device *dev)
-{
- struct acpi_device *adev;
- unsigned int devid;
- int port_id = -1;
-
- adev = ACPI_COMPANION(dev);
- if (adev && adev->pnp.unique_id &&
- !kstrtouint(adev->pnp.unique_id, 0, &devid))
- port_id = devid;
- return port_id;
-}
-
-#else /* !CONFIG_ACPI */
-
-static int pxa2xx_spi_get_port_id(struct device *dev)
-{
- return -1;
-}
-
-#endif /* CONFIG_ACPI */
-
-
#ifdef CONFIG_PCI
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
@@ -1479,13 +1454,16 @@ static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_controller *pdata;
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
struct ssp_device *ssp;
struct resource *res;
- struct device *parent = pdev->dev.parent;
struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL;
const struct pci_device_id *pcidev_id = NULL;
enum pxa_ssp_type type;
const void *match;
+ int status;
+ u64 uid;
if (pcidev)
pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev);
@@ -1529,7 +1507,12 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
ssp->type = type;
ssp->dev = &pdev->dev;
- ssp->port_id = pxa2xx_spi_get_port_id(&pdev->dev);
+
+ status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (status)
+ ssp->port_id = -1;
+ else
+ ssp->port_id = uid;
pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave");
pdata->num_chipselect = 1;
@@ -1873,10 +1856,8 @@ static int pxa2xx_spi_runtime_suspend(struct device *dev)
static int pxa2xx_spi_runtime_resume(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
- int status;
- status = clk_prepare_enable(drv_data->ssp->clk);
- return status;
+ return clk_prepare_enable(drv_data->ssp->clk);
}
#endif
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 00d6084306b4..7d89510dc3f0 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1198,8 +1198,10 @@ static int spi_qup_pm_resume_runtime(struct device *device)
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
/* Disable clocks auto gaiting */
config = readl_relaxed(controller->base + QUP_CONFIG);
@@ -1245,14 +1247,25 @@ static int spi_qup_resume(struct device *device)
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
- return ret;
+ goto disable_clk;
- return spi_master_resume(master);
+ ret = spi_master_resume(master);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 660aa866af06..ef25b5e93900 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -449,7 +449,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
struct spi_master *master;
int err = 0;
- master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
return -ENOMEM;
@@ -463,8 +463,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
- err = -ENOENT;
- goto err_no_pdata;
+ return -ENOENT;
}
platform_set_drvdata(pdev, hw);
@@ -499,29 +498,24 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
/* find and map our resources */
hw->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hw->regs)) {
- err = PTR_ERR(hw->regs);
- goto err_no_pdata;
- }
+ if (IS_ERR(hw->regs))
+ return PTR_ERR(hw->regs);
hw->irq = platform_get_irq(pdev, 0);
- if (hw->irq < 0) {
- err = -ENOENT;
- goto err_no_pdata;
- }
+ if (hw->irq < 0)
+ return -ENOENT;
err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_no_pdata;
+ return err;
}
hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
- err = PTR_ERR(hw->clk);
- goto err_no_pdata;
+ return PTR_ERR(hw->clk);
}
s3c24xx_spi_initialsetup(hw);
@@ -539,8 +533,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
err_register:
clk_disable(hw->clk);
- err_no_pdata:
- spi_master_put(hw->master);
return err;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7f346866614a..71d324ec9a70 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -84,6 +84,7 @@
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
@@ -389,8 +390,8 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
- sdd->rx_dma.ch = 0;
- sdd->tx_dma.ch = 0;
+ sdd->rx_dma.ch = NULL;
+ sdd->tx_dma.ch = NULL;
}
return 0;
@@ -711,6 +712,13 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
return 0;
}
+static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
+}
+
static int s3c64xx_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -1152,6 +1160,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
+ master->max_transfer_size = s3c64xx_spi_max_transfer_size;
master->num_chipselect = sci->num_cs;
master->use_gpio_descriptors = true;
master->dma_alignment = 8;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d0012b30410c..9bca3d076f05 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1085,6 +1085,7 @@ static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
{},
};
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index f3fe92300639..9131660c1afb 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
@@ -355,10 +356,10 @@ static int stm32_qspi_get_mode(u8 buswidth)
return buswidth;
}
-static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
+static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
- struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
+ struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
+ struct stm32_qspi_flash *flash = &qspi->flash[spi->chip_select];
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
@@ -465,7 +466,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
qspi->fmode = CCR_FMODE_APM;
qspi->status_timeout = timeout_ms;
- ret = stm32_qspi_send(mem, op);
+ ret = stm32_qspi_send(mem->spi, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
@@ -489,7 +490,7 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
else
qspi->fmode = CCR_FMODE_INDW;
- ret = stm32_qspi_send(mem, op);
+ ret = stm32_qspi_send(mem->spi, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
@@ -545,7 +546,7 @@ static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
else
qspi->fmode = CCR_FMODE_INDR;
- ret = stm32_qspi_send(desc->mem, &op);
+ ret = stm32_qspi_send(desc->mem->spi, &op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
@@ -554,12 +555,96 @@ static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
return ret ?: len;
}
+static int stm32_qspi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ int ret = 0;
+
+ if (!spi->cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qspi->lock);
+
+ gpiod_set_value_cansleep(spi->cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(qspi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * QSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account QSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* if happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ qspi->fmode = CCR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ qspi->fmode = CCR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_qspi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(spi->cs_gpiod, false);
+
+ mutex_unlock(&qspi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
static int stm32_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
- u32 presc;
+ u32 presc, mode;
int ret;
if (ctrl->busy)
@@ -568,6 +653,16 @@ static int stm32_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
+ mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
+ if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
+ ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
+ gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
+ dev_err(qspi->dev, "configuration not supported\n");
+
+ return -EINVAL;
+ }
+
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
@@ -580,6 +675,16 @@ static int stm32_qspi_setup(struct spi_device *spi)
mutex_lock(&qspi->lock);
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+
+ /*
+ * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
+ * are both set in spi->mode and "cs-gpios" properties is found in DT
+ */
+ if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ qspi->cr_reg |= CR_DFM;
+ dev_dbg(qspi->dev, "Dual flash mode enable");
+ }
+
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
@@ -741,11 +846,13 @@ static int stm32_qspi_probe(struct platform_device *pdev)
mutex_init(&qspi->lock);
- ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
- | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
+ | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_OCTAL;
ctrl->setup = stm32_qspi_setup;
ctrl->bus_num = -1;
ctrl->mem_ops = &stm32_qspi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_qspi_transfer_one_message;
ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
ctrl->dev.of_node = dev->of_node;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 523edfdf5dcd..7377d3b81302 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -421,7 +421,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
@@ -439,10 +439,8 @@ static int xilinx_spi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xspi->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(xspi->regs)) {
- ret = PTR_ERR(xspi->regs);
- goto put_master;
- }
+ if (IS_ERR(xspi->regs))
+ return PTR_ERR(xspi->regs);
master->bus_num = pdev->id;
master->num_chipselect = num_cs;
@@ -472,14 +470,13 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->irq = platform_get_irq(pdev, 0);
if (xspi->irq < 0 && xspi->irq != -ENXIO) {
- ret = xspi->irq;
- goto put_master;
+ return xspi->irq;
} else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
dev_name(&pdev->dev), xspi);
if (ret)
- goto put_master;
+ return ret;
}
/* SPI controller initializations */
@@ -488,7 +485,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&xspi->bitbang);
if (ret) {
dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
- goto put_master;
+ return ret;
}
dev_info(&pdev->dev, "at %pR, irq=%d\n", res, xspi->irq);
@@ -500,11 +497,6 @@ static int xilinx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
return 0;
-
-put_master:
- spi_master_put(master);
-
- return ret;
}
static int xilinx_spi_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index fc2b5eb7d614..2fa7608f94cd 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -83,7 +83,7 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
- master = spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
if (!master)
return -ENOMEM;
@@ -97,30 +97,24 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xspi->regs)) {
- ret = PTR_ERR(xspi->regs);
- goto err;
- }
+ if (IS_ERR(xspi->regs))
+ return PTR_ERR(xspi->regs);
xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
usleep_range(1000, 2000);
if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
dev_err(&pdev->dev, "Device stuck in busy state\n");
- ret = -EBUSY;
- goto err;
+ return -EBUSY;
}
ret = spi_bitbang_start(&xspi->bitbang);
if (ret < 0) {
dev_err(&pdev->dev, "spi_bitbang_start failed\n");
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, master);
return 0;
-err:
- spi_master_put(master);
- return ret;
}
static int xtfpga_spi_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8f97a3eacdea..5f9aedd1f0b6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -95,7 +95,7 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
-static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev)
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
{
struct spi_statistics __percpu *pcpu_stats;
@@ -162,7 +162,7 @@ static struct device_attribute dev_attr_spi_device_##field = { \
}
#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
ssize_t len; \
@@ -309,7 +309,7 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
@@ -753,7 +753,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
- strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
+ strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
@@ -1010,9 +1010,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
}
#ifdef CONFIG_HAS_DMA
-int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, void *buf, size_t len,
- enum dma_data_direction dir)
+static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
@@ -1078,28 +1078,41 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
sg = sg_next(sg);
}
- ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
- if (!ret)
- ret = -ENOMEM;
+ ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
- sgt->nents = ret;
-
return 0;
}
-void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, enum dma_data_direction dir)
+int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
+}
+
+static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
if (sgt->orig_nents) {
- dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ dma_unmap_sgtable(dev, sgt, dir, attrs);
sg_free_table(sgt);
+ sgt->orig_nents = 0;
+ sgt->nents = 0;
}
}
+void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
+}
+
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
@@ -1124,29 +1137,37 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync is done before each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
- ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE,
+ attrs);
if (ret != 0)
return ret;
}
if (xfer->rx_buf != NULL) {
- ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
- xfer->rx_buf, xfer->len,
- DMA_FROM_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE, attrs);
if (ret != 0) {
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
- DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, tx_dev,
+ &xfer->tx_sg, DMA_TO_DEVICE,
+ attrs);
+
return ret;
}
}
}
+ ctlr->cur_rx_dma_dev = rx_dev;
+ ctlr->cur_tx_dma_dev = tx_dev;
ctlr->cur_msg_mapped = true;
return 0;
@@ -1154,38 +1175,60 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
- struct device *tx_dev, *rx_dev;
if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
return 0;
- if (ctlr->dma_tx)
- tx_dev = ctlr->dma_tx->device->dev;
- else if (ctlr->dma_map_dev)
- tx_dev = ctlr->dma_map_dev;
- else
- tx_dev = ctlr->dev.parent;
-
- if (ctlr->dma_rx)
- rx_dev = ctlr->dma_rx->device->dev;
- else if (ctlr->dma_map_dev)
- rx_dev = ctlr->dma_map_dev;
- else
- rx_dev = ctlr->dev.parent;
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync has already been done after each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
- spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
}
ctlr->cur_msg_mapped = false;
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
@@ -1198,6 +1241,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
{
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
#endif /* !CONFIG_HAS_DMA */
static inline int spi_unmap_msg(struct spi_controller *ctlr,
@@ -1275,8 +1328,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
@@ -1432,10 +1485,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
- spi_set_cs(msg->spi, true, false);
+ xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
+ spi_set_cs(msg->spi, !xfer->cs_off, false);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
@@ -1455,8 +1509,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
+ spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ spi_dma_sync_for_cpu(ctlr, xfer);
+
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
@@ -1479,6 +1536,8 @@ fallback_pio:
if (ret < 0)
msg->status = ret;
}
+
+ spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
@@ -1503,10 +1562,15 @@ fallback_pio:
&msg->transfers)) {
keep_cs = true;
} else {
- spi_set_cs(msg->spi, false, false);
+ if (!xfer->cs_off)
+ spi_set_cs(msg->spi, false, false);
_spi_transfer_cs_change_delay(msg, xfer);
- spi_set_cs(msg->spi, true, false);
+ if (!list_next_entry(xfer, transfer_list)->cs_off)
+ spi_set_cs(msg->spi, true, false);
}
+ } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
+ xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
+ spi_set_cs(msg->spi, xfer->cs_off, false);
}
msg->actual_length += xfer->len;
@@ -1587,6 +1651,15 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
trace_spi_message_start(msg);
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi),
+ GFP_KERNEL | GFP_DMA);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
@@ -1727,8 +1800,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
- if (!ret)
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
ctlr->cur_msg = NULL;
ctlr->fallback = false;
@@ -2330,7 +2402,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
goto err_out;
}
- strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+ strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
ancillary->chip_select = chip_select;
@@ -2726,7 +2798,7 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
if (!spi)
return -ENOMEM;
- strlcpy(spi->modalias, name, sizeof(spi->modalias));
+ strscpy(spi->modalias, name, sizeof(spi->modalias));
rc = spi_add_device(spi);
if (rc) {
@@ -4033,7 +4105,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* guard against reentrancy from a different context. The io_mutex
* will catch those cases.
*/
- if (READ_ONCE(ctlr->queue_empty)) {
+ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
message->actual_length = 0;
message->status = -EINPROGRESS;
@@ -4375,7 +4447,7 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
- ctlr = acpi_spi_find_controller_by_adev(adev->parent);
+ ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
if (!ctlr)
break;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index cbc8b1d91995..783f1b88ebf2 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -1194,7 +1194,7 @@ static const struct v4l2_subdev_ops gc0310_ops = {
.sensor = &gc0310_sensor_ops,
};
-static int gc0310_remove(struct i2c_client *client)
+static void gc0310_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct gc0310_device *dev = to_gc0310_sensor(sd);
@@ -1207,8 +1207,6 @@ static int gc0310_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int gc0310_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index 0e6b2e6100d1..4d5a7e335f85 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -952,7 +952,7 @@ static const struct v4l2_subdev_ops gc2235_ops = {
.sensor = &gc2235_sensor_ops,
};
-static int gc2235_remove(struct i2c_client *client)
+static void gc2235_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct gc2235_device *dev = to_gc2235_sensor(sd);
@@ -965,8 +965,6 @@ static int gc2235_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int gc2235_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index e046489cd253..75d16b525294 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -910,7 +910,7 @@ free_flash:
return err;
}
-static int lm3554_remove(struct i2c_client *client)
+static void lm3554_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct lm3554 *flash = to_lm3554(sd);
@@ -926,8 +926,6 @@ static int lm3554_remove(struct i2c_client *client)
lm3554_gpio_uninit(client);
kfree(flash);
-
- return 0;
}
static const struct dev_pm_ops lm3554_pm_ops = {
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 3c81ab73cdae..a0e8e94b2412 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -1713,7 +1713,7 @@ static const struct v4l2_subdev_ops mt9m114_ops = {
.sensor = &mt9m114_sensor_ops,
};
-static int mt9m114_remove(struct i2c_client *client)
+static void mt9m114_remove(struct i2c_client *client)
{
struct mt9m114_device *dev;
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -1724,7 +1724,6 @@ static int mt9m114_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
- return 0;
}
static int mt9m114_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 4ba99c660681..8f48b23be3aa 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -1135,7 +1135,7 @@ static const struct v4l2_subdev_ops ov2680_ops = {
.sensor = &ov2680_sensor_ops,
};
-static int ov2680_remove(struct i2c_client *client)
+static void ov2680_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2680_device *dev = to_ov2680_sensor(sd);
@@ -1148,8 +1148,6 @@ static int ov2680_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int ov2680_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index d5d099ac1b70..887b6f99f6ca 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -1090,7 +1090,7 @@ static const struct v4l2_subdev_ops ov2722_ops = {
.sensor = &ov2722_sensor_ops,
};
-static int ov2722_remove(struct i2c_client *client)
+static void ov2722_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2722_device *dev = to_ov2722_sensor(sd);
@@ -1103,8 +1103,6 @@ static int ov2722_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
kfree(dev);
-
- return 0;
}
static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 6c95f57a52e9..c1cd631455e6 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1877,7 +1877,7 @@ static const struct v4l2_subdev_ops ov5693_ops = {
.pad = &ov5693_pad_ops,
};
-static int ov5693_remove(struct i2c_client *client)
+static void ov5693_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5693_device *dev = to_ov5693_sensor(sd);
@@ -1893,8 +1893,6 @@ static int ov5693_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int ov5693_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
index 6b5abd958bff..99b333b68198 100644
--- a/drivers/staging/media/max96712/max96712.c
+++ b/drivers/staging/media/max96712/max96712.c
@@ -407,15 +407,13 @@ static int max96712_probe(struct i2c_client *client)
return max96712_v4l2_register(priv);
}
-static int max96712_remove(struct i2c_client *client)
+static void max96712_remove(struct i2c_client *client)
{
struct max96712_priv *priv = i2c_get_clientdata(client);
v4l2_async_unregister_subdev(&priv->sd);
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
-
- return 0;
}
static const struct of_device_id max96712_of_table[] = {
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 4af5a831bde0..4fc167b42cf0 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -1162,8 +1162,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
+ writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 7042f10887bb..285a071f02be 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -340,14 +340,12 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
*
* Unregister the i2c client device as a MOST interface
*/
-static int i2c_remove(struct i2c_client *client)
+static void i2c_remove(struct i2c_client *client)
{
struct hdm_i2c *dev = i2c_get_clientdata(client);
most_deregister_interface(&dev->most_iface);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id i2c_id[] = {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 9363c5cfe50f..4fb9b9f10799 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -668,7 +668,7 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
return rc;
}
-static int dcon_remove(struct i2c_client *client)
+static void dcon_remove(struct i2c_client *client)
{
struct dcon_priv *dcon = i2c_get_clientdata(client);
@@ -684,8 +684,6 @@ static int dcon_remove(struct i2c_client *client)
cancel_work_sync(&dcon->switch_source);
kfree(dcon);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index ca6b966f5dd3..1ead7793062a 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -3041,8 +3041,8 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
- netif_napi_add_weight(qdev->ndev, &rx_ring->napi,
- qlge_napi_poll_msix, 64);
+ netif_napi_add(qdev->ndev, &rx_ring->napi,
+ qlge_napi_poll_msix);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
} else {
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index cac9553666e6..aa100b5141e1 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -18,6 +18,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
MODULE_VERSION(DRIVERVERSION);
+MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
#define CONFIG_BR_EXT_BRNAME "br0"
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index cc2b44f60c46..9147d176da4f 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -28,6 +28,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 2326aae6709e..bb7db96ed821 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index cb6d287f580d..2d09be6425d5 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -850,8 +850,8 @@ exit:
}
static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
char *alg_name;
u32 param_len;
@@ -932,8 +932,8 @@ addkey_end:
}
static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie,
struct key_params*))
{
@@ -941,7 +941,8 @@ static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct adapter *padapter = rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -955,7 +956,7 @@ static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_index
+ struct net_device *ndev, int link_id, u8 key_index
, bool unicast, bool multicast
)
{
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index dbd1159a2ef0..ce04c38f6afd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -987,22 +988,16 @@ release_fb:
static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
{
- struct apertures_struct *ap;
+ resource_size_t base = pci_resource_start(pdev, 0);
+ resource_size_t size = pci_resource_len(pdev, 0);
bool primary = false;
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
- kfree(ap);
- return 0;
+
+ return aperture_remove_conflicting_devices(base, size, primary, "sm750_fb1");
}
static int lynxfb_pci_probe(struct pci_dev *pdev,
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index b7b56d8406d1..471bb310176f 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -143,8 +143,8 @@ exit:
}
static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct wlandevice *wlandev = dev->ml_priv;
u32 did;
@@ -172,7 +172,7 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
}
static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*))
{
@@ -202,7 +202,8 @@ static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
}
static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct wlandevice *wlandev = dev->ml_priv;
u32 did;
@@ -227,7 +228,8 @@ static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
}
static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct wlandevice *wlandev = dev->ml_priv;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e6a967ddc08c..8a7306e5e133 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -39,7 +39,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
}
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, blk_status_t);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
/* pscsi_attach_hba():
*
@@ -1002,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
return 0;
}
-static void pscsi_req_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret pscsi_req_done(struct request *req,
+ blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
@@ -1029,6 +1030,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
}
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static const struct target_backend_ops pscsi_ops = {
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 3deaeecb712e..2940559c3086 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -486,6 +486,7 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.netnsok = true,
.small_ops = tcmu_genl_ops,
.n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .resv_start_op = TCMU_CMD_SET_FEATURES + 1,
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 7ab31740cff8..0828240f27e6 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -271,8 +271,8 @@ static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
unsigned long start)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
struct ffa_mem_region_attributes mem_attr = {
.receiver = ffa_dev->vm_id,
.attrs = FFA_MEM_RW,
@@ -294,14 +294,14 @@ static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
if (rc)
return rc;
args.sg = sgt.sgl;
- rc = ffa_ops->memory_share(ffa_dev, &args);
+ rc = mem_ops->memory_share(&args);
sg_free_table(&sgt);
if (rc)
return rc;
rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
if (rc) {
- ffa_ops->memory_reclaim(args.g_handle, 0);
+ mem_ops->memory_reclaim(args.g_handle, 0);
return rc;
}
@@ -314,8 +314,9 @@ static int optee_ffa_shm_unregister(struct tee_context *ctx,
struct tee_shm *shm)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
u64 global_handle = shm->sec_world_id;
struct ffa_send_direct_data data = {
.data0 = OPTEE_FFA_UNREGISTER_SHM,
@@ -327,11 +328,11 @@ static int optee_ffa_shm_unregister(struct tee_context *ctx,
optee_shm_rem_ffa_handle(optee, global_handle);
shm->sec_world_id = 0;
- rc = ffa_ops->sync_send_receive(ffa_dev, &data);
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc)
pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
- rc = ffa_ops->memory_reclaim(global_handle, 0);
+ rc = mem_ops->memory_reclaim(global_handle, 0);
if (rc)
pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
@@ -342,7 +343,7 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
struct tee_shm *shm)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ const struct ffa_mem_ops *mem_ops;
u64 global_handle = shm->sec_world_id;
int rc;
@@ -353,7 +354,8 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
*/
optee_shm_rem_ffa_handle(optee, global_handle);
- rc = ffa_ops->memory_reclaim(global_handle, 0);
+ mem_ops = optee->ffa.ffa_dev->ops->mem_ops;
+ rc = mem_ops->memory_reclaim(global_handle, 0);
if (rc)
pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
@@ -529,8 +531,8 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
struct optee_msg_arg *rpc_arg)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
struct optee_call_waiter w;
u32 cmd = data->data0;
u32 w4 = data->data1;
@@ -541,7 +543,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
/* Initialize waiter */
optee_cq_wait_init(&optee->call_queue, &w);
while (true) {
- rc = ffa_ops->sync_send_receive(ffa_dev, data);
+ rc = msg_ops->sync_send_receive(ffa_dev, data);
if (rc)
goto done;
@@ -576,7 +578,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
* OP-TEE has returned with a RPC request.
*
* Note that data->data4 (passed in register w7) is already
- * filled in by ffa_ops->sync_send_receive() returning
+ * filled in by ffa_mem_ops->sync_send_receive() returning
* above.
*/
cond_resched();
@@ -652,14 +654,15 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
*/
static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
- const struct ffa_dev_ops *ops)
+ const struct ffa_ops *ops)
{
+ const struct ffa_msg_ops *msg_ops = ops->msg_ops;
struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
int rc;
- ops->mode_32bit_set(ffa_dev);
+ msg_ops->mode_32bit_set(ffa_dev);
- rc = ops->sync_send_receive(ffa_dev, &data);
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d\n", rc);
return false;
@@ -672,7 +675,7 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
}
data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
- rc = ops->sync_send_receive(ffa_dev, &data);
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d\n", rc);
return false;
@@ -687,14 +690,14 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
}
static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
- const struct ffa_dev_ops *ops,
+ const struct ffa_ops *ops,
u32 *sec_caps,
unsigned int *rpc_param_count)
{
struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
int rc;
- rc = ops->sync_send_receive(ffa_dev, &data);
+ rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d", rc);
return false;
@@ -783,7 +786,7 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
- const struct ffa_dev_ops *ffa_ops;
+ const struct ffa_ops *ffa_ops;
unsigned int rpc_param_count;
struct tee_shm_pool *pool;
struct tee_device *teedev;
@@ -793,11 +796,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
u32 sec_caps;
int rc;
- ffa_ops = ffa_dev_ops_get(ffa_dev);
- if (!ffa_ops) {
- pr_warn("failed \"method\" init: ffa\n");
- return -ENOENT;
- }
+ ffa_ops = ffa_dev->ops;
if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
return -EINVAL;
@@ -821,7 +820,6 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
- optee->ffa.ffa_ops = ffa_ops;
optee->rpc_param_count = rpc_param_count;
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index a33d98d17cfd..04ae58892608 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -111,7 +111,6 @@ struct optee_smc {
*/
struct optee_ffa {
struct ffa_device *ffa_dev;
- const struct ffa_dev_ops *ffa_ops;
/* Serializes access to @global_ids */
struct mutex mutex;
struct rhashtable global_ids;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f2b1bcefcadd..27295bda3e0b 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
@@ -326,6 +327,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
void *ret;
int id;
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index e61b91d14ad1..d30cb791e63c 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -179,12 +179,12 @@ static int amlogic_thermal_disable(struct amlogic_thermal *data)
return 0;
}
-static int amlogic_thermal_get_temp(void *data, int *temp)
+static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
unsigned int tval;
- struct amlogic_thermal *pdata = data;
+ struct amlogic_thermal *pdata = tz->devdata;
- if (!data)
+ if (!pdata)
return -EINVAL;
regmap_read(pdata->regmap, TSENSOR_STAT0, &tval);
@@ -195,7 +195,7 @@ static int amlogic_thermal_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops amlogic_thermal_ops = {
+static const struct thermal_zone_device_ops amlogic_thermal_ops = {
.get_temp = amlogic_thermal_get_temp,
};
@@ -276,10 +276,10 @@ static int amlogic_thermal_probe(struct platform_device *pdev)
return PTR_ERR(pdata->sec_ao_map);
}
- pdata->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- 0,
- pdata,
- &amlogic_thermal_ops);
+ pdata->tzd = devm_thermal_of_zone_register(&pdev->dev,
+ 0,
+ pdata,
+ &amlogic_thermal_ops);
if (IS_ERR(pdata->tzd)) {
ret = PTR_ERR(pdata->tzd);
dev_err(dev, "Failed to register tsensor: %d\n", ret);
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index c2ebfb5be4b3..52d63b3997fe 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -420,9 +420,9 @@ static struct thermal_zone_device_ops legacy_ops = {
.get_temp = armada_get_temp_legacy,
};
-static int armada_get_temp(void *_sensor, int *temp)
+static int armada_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct armada_thermal_sensor *sensor = _sensor;
+ struct armada_thermal_sensor *sensor = tz->devdata;
struct armada_thermal_priv *priv = sensor->priv;
int ret;
@@ -450,7 +450,7 @@ unlock_mutex:
return ret;
}
-static const struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_device_ops of_ops = {
.get_temp = armada_get_temp,
};
@@ -928,9 +928,9 @@ static int armada_thermal_probe(struct platform_device *pdev)
/* Register the sensor */
sensor->priv = priv;
sensor->id = sensor_id;
- tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, sensor,
- &of_ops);
+ tz = devm_thermal_of_zone_register(&pdev->dev,
+ sensor->id, sensor,
+ &of_ops);
if (IS_ERR(tz)) {
dev_info(&pdev->dev, "Thermal sensor %d unavailable\n",
sensor_id);
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
index e9bef5c3414b..1f8651d15160 100644
--- a/drivers/thermal/broadcom/bcm2711_thermal.c
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -31,11 +31,11 @@ struct bcm2711_thermal_priv {
struct thermal_zone_device *thermal;
};
-static int bcm2711_get_temp(void *data, int *temp)
+static int bcm2711_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct bcm2711_thermal_priv *priv = data;
- int slope = thermal_zone_get_slope(priv->thermal);
- int offset = thermal_zone_get_offset(priv->thermal);
+ struct bcm2711_thermal_priv *priv = tz->devdata;
+ int slope = thermal_zone_get_slope(tz);
+ int offset = thermal_zone_get_offset(tz);
u32 val;
int ret;
@@ -54,7 +54,7 @@ static int bcm2711_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = {
+static const struct thermal_zone_device_ops bcm2711_thermal_of_ops = {
.get_temp = bcm2711_get_temp,
};
@@ -88,8 +88,8 @@ static int bcm2711_thermal_probe(struct platform_device *pdev)
}
priv->regmap = regmap;
- thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv,
- &bcm2711_thermal_of_ops);
+ thermal = devm_thermal_of_zone_register(dev, 0, priv,
+ &bcm2711_thermal_of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(dev, "could not register sensor: %d\n", ret);
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index c8e4344d5a3d..2c67841a1115 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -88,9 +88,9 @@ static int bcm2835_thermal_temp2adc(int temp, int offset, int slope)
return temp;
}
-static int bcm2835_thermal_get_temp(void *d, int *temp)
+static int bcm2835_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct bcm2835_thermal_data *data = d;
+ struct bcm2835_thermal_data *data = tz->devdata;
u32 val = readl(data->regs + BCM2835_TS_TSENSSTAT);
if (!(val & BCM2835_TS_TSENSSTAT_VALID))
@@ -135,7 +135,7 @@ static void bcm2835_thermal_debugfs(struct platform_device *pdev)
debugfs_create_regset32("regset", 0444, data->debugfsdir, regset);
}
-static const struct thermal_zone_of_device_ops bcm2835_thermal_ops = {
+static const struct thermal_zone_device_ops bcm2835_thermal_ops = {
.get_temp = bcm2835_thermal_get_temp,
};
@@ -206,8 +206,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
data->clk, rate);
/* register of thermal sensor and get info from DT */
- tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
- &bcm2835_thermal_ops);
+ tz = devm_thermal_of_zone_register(&pdev->dev, 0, data,
+ &bcm2835_thermal_ops);
if (IS_ERR(tz)) {
err = PTR_ERR(tz);
dev_err(&pdev->dev,
@@ -277,7 +277,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
return 0;
err_tz:
- thermal_zone_of_sensor_unregister(&pdev->dev, tz);
+ thermal_of_zone_unregister(tz);
err_clk:
clk_disable_unprepare(data->clk);
@@ -290,7 +290,7 @@ static int bcm2835_thermal_remove(struct platform_device *pdev)
struct thermal_zone_device *tz = data->tz;
debugfs_remove_recursive(data->debugfsdir);
- thermal_zone_of_sensor_unregister(&pdev->dev, tz);
+ thermal_of_zone_unregister(tz);
clk_disable_unprepare(data->clk);
return 0;
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 0cedb8b4f00a..c79c6cfdd74d 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -105,7 +105,7 @@ static struct avs_tmon_trip avs_tmon_trips[] = {
struct brcmstb_thermal_params {
unsigned int offset;
unsigned int mult;
- const struct thermal_zone_of_device_ops *of_ops;
+ const struct thermal_zone_device_ops *of_ops;
};
struct brcmstb_thermal_priv {
@@ -150,9 +150,9 @@ static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv,
return (u32)((offset - temp) / mult);
}
-static int brcmstb_get_temp(void *data, int *temp)
+static int brcmstb_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct brcmstb_thermal_priv *priv = data;
+ struct brcmstb_thermal_priv *priv = tz->devdata;
u32 val;
long t;
@@ -260,9 +260,9 @@ static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int brcmstb_set_trips(void *data, int low, int high)
+static int brcmstb_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct brcmstb_thermal_priv *priv = data;
+ struct brcmstb_thermal_priv *priv = tz->devdata;
dev_dbg(priv->dev, "set trips %d <--> %d\n", low, high);
@@ -288,7 +288,7 @@ static int brcmstb_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops brcmstb_16nm_of_ops = {
+static const struct thermal_zone_device_ops brcmstb_16nm_of_ops = {
.get_temp = brcmstb_get_temp,
};
@@ -298,7 +298,7 @@ static const struct brcmstb_thermal_params brcmstb_16nm_params = {
.of_ops = &brcmstb_16nm_of_ops,
};
-static const struct thermal_zone_of_device_ops brcmstb_28nm_of_ops = {
+static const struct thermal_zone_device_ops brcmstb_28nm_of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
@@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
static int brcmstb_thermal_probe(struct platform_device *pdev)
{
- const struct thermal_zone_of_device_ops *of_ops;
+ const struct thermal_zone_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
struct resource *res;
@@ -341,8 +341,8 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
of_ops = priv->temp_params->of_ops;
- thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv,
- of_ops);
+ thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv,
+ of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
diff --git a/drivers/thermal/broadcom/ns-thermal.c b/drivers/thermal/broadcom/ns-thermal.c
index c9468ba9d449..07a8a3f49bd0 100644
--- a/drivers/thermal/broadcom/ns-thermal.c
+++ b/drivers/thermal/broadcom/ns-thermal.c
@@ -14,19 +14,14 @@
#define PVTMON_CONTROL0_SEL_TEST_MODE 0x0000000e
#define PVTMON_STATUS 0x08
-struct ns_thermal {
- struct thermal_zone_device *tz;
- void __iomem *pvtmon;
-};
-
-static int ns_thermal_get_temp(void *data, int *temp)
+static int ns_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct ns_thermal *ns_thermal = data;
- int offset = thermal_zone_get_offset(ns_thermal->tz);
- int slope = thermal_zone_get_slope(ns_thermal->tz);
+ void __iomem *pvtmon = tz->devdata;
+ int offset = thermal_zone_get_offset(tz);
+ int slope = thermal_zone_get_slope(tz);
u32 val;
- val = readl(ns_thermal->pvtmon + PVTMON_CONTROL0);
+ val = readl(pvtmon + PVTMON_CONTROL0);
if ((val & PVTMON_CONTROL0_SEL_MASK) != PVTMON_CONTROL0_SEL_TEMP_MONITOR) {
/* Clear current mode selection */
val &= ~PVTMON_CONTROL0_SEL_MASK;
@@ -34,50 +29,47 @@ static int ns_thermal_get_temp(void *data, int *temp)
/* Set temp monitor mode (it's the default actually) */
val |= PVTMON_CONTROL0_SEL_TEMP_MONITOR;
- writel(val, ns_thermal->pvtmon + PVTMON_CONTROL0);
+ writel(val, pvtmon + PVTMON_CONTROL0);
}
- val = readl(ns_thermal->pvtmon + PVTMON_STATUS);
+ val = readl(pvtmon + PVTMON_STATUS);
*temp = slope * val + offset;
return 0;
}
-static const struct thermal_zone_of_device_ops ns_thermal_ops = {
+static const struct thermal_zone_device_ops ns_thermal_ops = {
.get_temp = ns_thermal_get_temp,
};
static int ns_thermal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ns_thermal *ns_thermal;
-
- ns_thermal = devm_kzalloc(dev, sizeof(*ns_thermal), GFP_KERNEL);
- if (!ns_thermal)
- return -ENOMEM;
+ struct thermal_zone_device *tz;
+ void __iomem *pvtmon;
- ns_thermal->pvtmon = of_iomap(dev_of_node(dev), 0);
- if (WARN_ON(!ns_thermal->pvtmon))
+ pvtmon = of_iomap(dev_of_node(dev), 0);
+ if (WARN_ON(!pvtmon))
return -ENOENT;
- ns_thermal->tz = devm_thermal_zone_of_sensor_register(dev, 0,
- ns_thermal,
- &ns_thermal_ops);
- if (IS_ERR(ns_thermal->tz)) {
- iounmap(ns_thermal->pvtmon);
- return PTR_ERR(ns_thermal->tz);
+ tz = devm_thermal_of_zone_register(dev, 0,
+ pvtmon,
+ &ns_thermal_ops);
+ if (IS_ERR(tz)) {
+ iounmap(pvtmon);
+ return PTR_ERR(tz);
}
- platform_set_drvdata(pdev, ns_thermal);
+ platform_set_drvdata(pdev, pvtmon);
return 0;
}
static int ns_thermal_remove(struct platform_device *pdev)
{
- struct ns_thermal *ns_thermal = platform_get_drvdata(pdev);
+ void __iomem *pvtmon = platform_get_drvdata(pdev);
- iounmap(ns_thermal->pvtmon);
+ iounmap(pvtmon);
return 0;
}
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 85ab9edd580c..2b93502543ff 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -19,7 +19,6 @@
#define SR_TMON_MAX_LIST 6
struct sr_tmon {
- struct thermal_zone_device *tz;
unsigned int crit_temp;
unsigned int tmon_id;
struct sr_thermal *priv;
@@ -31,9 +30,9 @@ struct sr_thermal {
struct sr_tmon tmon[SR_TMON_MAX_LIST];
};
-static int sr_get_temp(void *data, int *temp)
+static int sr_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct sr_tmon *tmon = data;
+ struct sr_tmon *tmon = tz->devdata;
struct sr_thermal *sr_thermal = tmon->priv;
*temp = readl(sr_thermal->regs + SR_TMON_TEMP_BASE(tmon->tmon_id));
@@ -41,13 +40,14 @@ static int sr_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops sr_tz_ops = {
+static const struct thermal_zone_device_ops sr_tz_ops = {
.get_temp = sr_get_temp,
};
static int sr_thermal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct thermal_zone_device *tz;
struct sr_thermal *sr_thermal;
struct sr_tmon *tmon;
struct resource *res;
@@ -84,10 +84,10 @@ static int sr_thermal_probe(struct platform_device *pdev)
writel(0, sr_thermal->regs + SR_TMON_TEMP_BASE(i));
tmon->tmon_id = i;
tmon->priv = sr_thermal;
- tmon->tz = devm_thermal_zone_of_sensor_register(dev, i, tmon,
- &sr_tz_ops);
- if (IS_ERR(tmon->tz))
- return PTR_ERR(tmon->tz);
+ tz = devm_thermal_of_zone_register(dev, i, tmon,
+ &sr_tz_ops);
+ if (IS_ERR(tz))
+ return PTR_ERR(tz);
dev_dbg(dev, "thermal sensor %d registered\n", i);
}
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index b76293cc989c..9f8b438fcf8f 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -475,7 +475,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
- * @np: a valid struct device_node to the cooling device device tree node
+ * @np: a valid struct device_node to the cooling device tree node
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @em: Energy Model of the cpufreq policy
@@ -501,17 +501,17 @@ __cpufreq_cooling_register(struct device_node *np,
struct thermal_cooling_device_ops *cooling_ops;
char *name;
+ if (IS_ERR_OR_NULL(policy)) {
+ pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
+ return ERR_PTR(-EINVAL);
+ }
+
dev = get_cpu_device(policy->cpu);
if (unlikely(!dev)) {
pr_warn("No cpu device for cpu %d\n", policy->cpu);
return ERR_PTR(-ENODEV);
}
- if (IS_ERR_OR_NULL(policy)) {
- pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
- return ERR_PTR(-EINVAL);
- }
-
i = cpufreq_table_count_valid_entries(policy);
if (!i) {
pr_debug("%s: CPUFreq table not found or has no valid entries\n",
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index 180edec34e07..7dcfde7a9f2c 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -248,10 +248,9 @@ static int da9062_thermal_probe(struct platform_device *pdev)
jiffies_to_msecs(thermal->zone->passive_delay_jiffies));
ret = platform_get_irq_byname(pdev, "THERMAL");
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get platform IRQ.\n");
+ if (ret < 0)
goto err_zone;
- }
+
thermal->irq = ret;
ret = request_threaded_irq(thermal->irq, NULL,
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 121cf853e545..cb10e280681f 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -58,9 +58,9 @@ struct db8500_thermal_zone {
};
/* Callback to get current temperature */
-static int db8500_thermal_get_temp(void *data, int *temp)
+static int db8500_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct db8500_thermal_zone *th = data;
+ struct db8500_thermal_zone *th = tz->devdata;
/*
* TODO: There is no PRCMU interface to get temperature data currently,
@@ -72,7 +72,7 @@ static int db8500_thermal_get_temp(void *data, int *temp)
return 0;
}
-static struct thermal_zone_of_device_ops thdev_ops = {
+static const struct thermal_zone_device_ops thdev_ops = {
.get_temp = db8500_thermal_get_temp,
};
@@ -182,7 +182,7 @@ static int db8500_thermal_probe(struct platform_device *pdev)
}
/* register of thermal sensor and get info from DT */
- th->tz = devm_thermal_zone_of_sensor_register(dev, 0, th, &thdev_ops);
+ th->tz = devm_thermal_of_zone_register(dev, 0, th, &thdev_ops);
if (IS_ERR(th->tz)) {
dev_err(dev, "register thermal zone sensor failed\n");
return PTR_ERR(th->tz);
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 991a1c54296d..a08bbe33be96 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -31,8 +31,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
trip, trip_temp, tz->temperature,
trip_hyst);
- mutex_lock(&tz->lock);
-
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -65,8 +63,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
instance->cdev->updated = false; /* cdev needs update */
mutex_unlock(&instance->cdev->lock);
}
-
- mutex_unlock(&tz->lock);
}
/**
@@ -100,15 +96,13 @@ static int bang_bang_control(struct thermal_zone_device *tz, int trip)
{
struct thermal_instance *instance;
- thermal_zone_trip_update(tz, trip);
+ lockdep_assert_held(&tz->lock);
- mutex_lock(&tz->lock);
+ thermal_zone_trip_update(tz, trip);
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
- mutex_unlock(&tz->lock);
-
return 0;
}
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index 6a2abcfc648f..a4ee4661e9cc 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -82,7 +82,7 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
- mutex_lock(&tz->lock);
+ lockdep_assert_held(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
@@ -112,7 +112,6 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
mutex_unlock(&cdev->lock);
}
- mutex_unlock(&tz->lock);
return 0;
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 1d5052470967..2d1aeaba38a8 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -392,8 +392,6 @@ static int allocate_power(struct thermal_zone_device *tz,
int i, num_actors, total_weight, ret = 0;
int trip_max_desired_temperature = params->trip_max_desired_temperature;
- mutex_lock(&tz->lock);
-
num_actors = 0;
total_weight = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
@@ -404,10 +402,8 @@ static int allocate_power(struct thermal_zone_device *tz,
}
}
- if (!num_actors) {
- ret = -ENODEV;
- goto unlock;
- }
+ if (!num_actors)
+ return -ENODEV;
/*
* We need to allocate five arrays of the same size:
@@ -421,10 +417,8 @@ static int allocate_power(struct thermal_zone_device *tz,
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
- if (!req_power) {
- ret = -ENOMEM;
- goto unlock;
- }
+ if (!req_power)
+ return -ENOMEM;
max_power = &req_power[num_actors];
granted_power = &req_power[2 * num_actors];
@@ -496,8 +490,6 @@ static int allocate_power(struct thermal_zone_device *tz,
control_temp - tz->temperature);
kfree(req_power);
-unlock:
- mutex_unlock(&tz->lock);
return ret;
}
@@ -576,7 +568,6 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
struct power_allocator_params *params = tz->governor_data;
u32 req_power;
- mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
struct thermal_cooling_device *cdev = instance->cdev;
@@ -598,7 +589,6 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
mutex_unlock(&instance->cdev->lock);
}
- mutex_unlock(&tz->lock);
}
/**
@@ -712,6 +702,8 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
struct power_allocator_params *params = tz->governor_data;
bool update;
+ lockdep_assert_held(&tz->lock);
+
/*
* We get called for every trip point but we only need to do
* our calculations once
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index 9729b46d0258..cdd3354bc27f 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -117,8 +117,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
trip, trip_type, trip_temp, trend, throttle);
- mutex_lock(&tz->lock);
-
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -145,8 +143,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
instance->cdev->updated = false; /* cdev needs update */
mutex_unlock(&instance->cdev->lock);
}
-
- mutex_unlock(&tz->lock);
}
/**
@@ -164,15 +160,13 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip)
{
struct thermal_instance *instance;
- thermal_zone_trip_update(tz, trip);
+ lockdep_assert_held(&tz->lock);
- mutex_lock(&tz->lock);
+ thermal_zone_trip_update(tz, trip);
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
- mutex_unlock(&tz->lock);
-
return 0;
}
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index a62a4e90bd3f..8bc1c22aaf03 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -34,7 +34,8 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip)
char *thermal_prop[5];
int i;
- mutex_lock(&tz->lock);
+ lockdep_assert_held(&tz->lock);
+
thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type);
thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature);
thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip);
@@ -43,7 +44,7 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip)
kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop);
for (i = 0; i < 4; ++i)
kfree(thermal_prop[i]);
- mutex_unlock(&tz->lock);
+
return 0;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 19a242c69ce6..d6974db7aaf7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -434,9 +434,9 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
return 0;
}
-static int hisi_thermal_get_temp(void *__data, int *temp)
+static int hisi_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct hisi_thermal_sensor *sensor = __data;
+ struct hisi_thermal_sensor *sensor = tz->devdata;
struct hisi_thermal_data *data = sensor->data;
*temp = data->ops->get_temp(sensor);
@@ -447,7 +447,7 @@ static int hisi_thermal_get_temp(void *__data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
+static const struct thermal_zone_device_ops hisi_of_thermal_ops = {
.get_temp = hisi_thermal_get_temp,
};
@@ -459,7 +459,7 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
data->ops->irq_handler(sensor);
- hisi_thermal_get_temp(sensor, &temp);
+ temp = data->ops->get_temp(sensor);
if (temp >= sensor->thres_temp) {
dev_crit(&data->pdev->dev,
@@ -484,9 +484,9 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
int ret, i;
const struct thermal_trip *trip;
- sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, sensor,
- &hisi_of_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
+ sensor->id, sensor,
+ &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
sensor->tzd = NULL;
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index af666bd9e8d4..e2c2673025a7 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -96,15 +96,15 @@ static int imx8mp_tmu_get_temp(void *data, int *temp)
return 0;
}
-static int tmu_get_temp(void *data, int *temp)
+static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct tmu_sensor *sensor = data;
+ struct tmu_sensor *sensor = tz->devdata;
struct imx8mm_tmu *tmu = sensor->priv;
- return tmu->socdata->get_temp(data, temp);
+ return tmu->socdata->get_temp(sensor, temp);
}
-static struct thermal_zone_of_device_ops tmu_tz_ops = {
+static const struct thermal_zone_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
@@ -165,9 +165,9 @@ static int imx8mm_tmu_probe(struct platform_device *pdev)
for (i = 0; i < data->num_sensors; i++) {
tmu->sensors[i].priv = tmu;
tmu->sensors[i].tzd =
- devm_thermal_zone_of_sensor_register(&pdev->dev, i,
- &tmu->sensors[i],
- &tmu_tz_ops);
+ devm_thermal_of_zone_register(&pdev->dev, i,
+ &tmu->sensors[i],
+ &tmu_tz_ops);
if (IS_ERR(tmu->sensors[i].tzd)) {
ret = PTR_ERR(tmu->sensors[i].tzd);
dev_err(&pdev->dev,
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 331a241eb0ef..10bfa6507eb4 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -43,11 +43,11 @@ struct imx_sc_msg_misc_get_temp {
} data;
} __packed __aligned(4);
-static int imx_sc_thermal_get_temp(void *data, int *temp)
+static int imx_sc_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct imx_sc_msg_misc_get_temp msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
- struct imx_sc_sensor *sensor = data;
+ struct imx_sc_sensor *sensor = tz->devdata;
int ret;
msg.data.req.resource_id = sensor->resource_id;
@@ -70,7 +70,7 @@ static int imx_sc_thermal_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops imx_sc_thermal_ops = {
+static const struct thermal_zone_device_ops imx_sc_thermal_ops = {
.get_temp = imx_sc_thermal_get_temp,
};
@@ -109,10 +109,10 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
break;
}
- sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->resource_id,
- sensor,
- &imx_sc_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
+ sensor->resource_id,
+ sensor,
+ &imx_sc_thermal_ops);
if (IS_ERR(sensor->tzd)) {
dev_err(&pdev->dev, "failed to register thermal zone\n");
ret = PTR_ERR(sensor->tzd);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 80d4e0676083..db8a6f63657d 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
- if (!priv->data_vault)
+ if (ZERO_OR_NULL_PTR(priv->data_vault))
goto out_free;
bin_attr_data_vault.private = priv->data_vault;
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
goto free_imok;
}
- if (priv->data_vault) {
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
if (result)
@@ -614,7 +614,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_sysfs:
cleanup_odvp(priv);
- if (priv->data_vault) {
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
kfree(priv->data_vault);
}
@@ -647,7 +647,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
- if (priv->data_vault)
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index c2dc4c158b9d..bf1b1cdfade4 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -373,18 +373,7 @@ static struct pci_driver proc_thermal_pci_driver = {
.driver.pm = &proc_thermal_pci_pm,
};
-static int __init proc_thermal_init(void)
-{
- return pci_register_driver(&proc_thermal_pci_driver);
-}
-
-static void __exit proc_thermal_exit(void)
-{
- pci_unregister_driver(&proc_thermal_pci_driver);
-}
-
-module_init(proc_thermal_init);
-module_exit(proc_thermal_exit);
+module_pci_driver(proc_thermal_pci_driver);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
index 4571a1a53b84..09e032f822f3 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
@@ -151,18 +151,7 @@ static struct pci_driver proc_thermal_pci_driver = {
.driver.pm = &proc_thermal_pci_pm,
};
-static int __init proc_thermal_init(void)
-{
- return pci_register_driver(&proc_thermal_pci_driver);
-}
-
-static void __exit proc_thermal_exit(void)
-{
- pci_unregister_driver(&proc_thermal_pci_driver);
-}
-
-module_init(proc_thermal_init);
-module_exit(proc_thermal_exit);
+module_pci_driver(proc_thermal_pci_driver);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index c841ab37e7c6..2a5570b9799a 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -62,8 +62,7 @@ static struct dentry *debug_dir;
static unsigned int set_target_ratio;
static unsigned int current_ratio;
static bool should_skip;
-static bool reduce_irq;
-static atomic_t idle_wakeup_counter;
+
static unsigned int control_cpu; /* The cpu assigned to collect stat and update
* control parameters. default to BSP but BSP
* can be offlined.
@@ -285,9 +284,6 @@ static unsigned int get_compensation(int ratio)
cal_data[ratio + 1].steady_comp) / 3;
}
- /* REVISIT: simple penalty of double idle injection */
- if (reduce_irq)
- comp = ratio;
/* do not exceed limit */
if (comp + ratio >= MAX_TARGET_RATIO)
comp = MAX_TARGET_RATIO - ratio - 1;
@@ -301,13 +297,9 @@ static void adjust_compensation(int target_ratio, unsigned int win)
struct powerclamp_calibration_data *d = &cal_data[target_ratio];
/*
- * adjust compensations if confidence level has not been reached or
- * there are too many wakeups during the last idle injection period, we
- * cannot trust the data for compensation.
+ * adjust compensations if confidence level has not been reached.
*/
- if (d->confidence >= CONFIDENCE_OK ||
- atomic_read(&idle_wakeup_counter) >
- win * num_online_cpus())
+ if (d->confidence >= CONFIDENCE_OK)
return;
delta = set_target_ratio - current_ratio;
@@ -347,14 +339,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
tsc_last = tsc_now;
adjust_compensation(target_ratio, win);
- /*
- * too many external interrupts, set flag such
- * that we can take measure later.
- */
- reduce_irq = atomic_read(&idle_wakeup_counter) >=
- 2 * win * num_online_cpus();
- atomic_set(&idle_wakeup_counter, 0);
/* if we are above target+guard, skip */
return set_target_ratio + guard <= current_ratio;
}
@@ -532,8 +517,10 @@ static int start_power_clamp(void)
/* prefer BSP */
control_cpu = 0;
- if (!cpu_online(control_cpu))
- control_cpu = smp_processor_id();
+ if (!cpu_online(control_cpu)) {
+ control_cpu = get_cpu();
+ put_cpu();
+ }
clamping = true;
schedule_delayed_work(&poll_pkg_cstate_work, 0);
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index 5d0b3ffc6f46..22c9bcb899c3 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -139,9 +139,9 @@ static int k3_bgp_read_temp(struct k3_thermal_data *devdata,
return 0;
}
-static int k3_thermal_get_temp(void *devdata, int *temp)
+static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct k3_thermal_data *data = devdata;
+ struct k3_thermal_data *data = tz->devdata;
int ret = 0;
ret = k3_bgp_read_temp(data, temp);
@@ -151,7 +151,7 @@ static int k3_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static const struct thermal_zone_of_device_ops k3_of_thermal_ops = {
+static const struct thermal_zone_device_ops k3_of_thermal_ops = {
.get_temp = k3_thermal_get_temp,
};
@@ -213,9 +213,9 @@ static int k3_bandgap_probe(struct platform_device *pdev)
writel(val, data[id].bgp->base + data[id].ctrl_offset);
data[id].tzd =
- devm_thermal_zone_of_sensor_register(dev, id,
- &data[id],
- &k3_of_thermal_ops);
+ devm_thermal_of_zone_register(dev, id,
+ &data[id],
+ &k3_of_thermal_ops);
if (IS_ERR(data[id].tzd)) {
dev_err(dev, "thermal zone device is NULL\n");
ret = PTR_ERR(data[id].tzd);
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index 115a44eb4fbf..16b6bcf1bf4f 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -247,9 +247,9 @@ static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata,
}
/* Get temperature callback function for thermal zone */
-static int k3_thermal_get_temp(void *devdata, int *temp)
+static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct k3_thermal_data *data = devdata;
+ struct k3_thermal_data *data = tz->devdata;
int ret = 0;
ret = k3_bgp_read_temp(data, temp);
@@ -259,7 +259,7 @@ static int k3_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static const struct thermal_zone_of_device_ops k3_of_thermal_ops = {
+static const struct thermal_zone_device_ops k3_of_thermal_ops = {
.get_temp = k3_thermal_get_temp,
};
@@ -474,10 +474,8 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset);
bgp->ts_data[id] = &data[id];
- ti_thermal =
- devm_thermal_zone_of_sensor_register(bgp->dev, id,
- &data[id],
- &k3_of_thermal_ops);
+ ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, &data[id],
+ &k3_of_thermal_ops);
if (IS_ERR(ti_thermal)) {
dev_err(bgp->dev, "thermal zone device is NULL\n");
ret = PTR_ERR(ti_thermal);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 82d06c7411eb..6451a55eb582 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -44,9 +44,9 @@ struct max77620_therm_info {
* Return 0 on success otherwise error number to show reason of failure.
*/
-static int max77620_thermal_read_temp(void *data, int *temp)
+static int max77620_thermal_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct max77620_therm_info *mtherm = data;
+ struct max77620_therm_info *mtherm = tz->devdata;
unsigned int val;
int ret;
@@ -66,7 +66,7 @@ static int max77620_thermal_read_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops max77620_thermal_ops = {
+static const struct thermal_zone_device_ops max77620_thermal_ops = {
.get_temp = max77620_thermal_read_temp,
};
@@ -114,7 +114,7 @@ static int max77620_thermal_probe(struct platform_device *pdev)
*/
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
- mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+ mtherm->tz_device = devm_thermal_of_zone_register(&pdev->dev, 0,
mtherm, &max77620_thermal_ops);
if (IS_ERR(mtherm->tz_device)) {
ret = PTR_ERR(mtherm->tz_device);
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index ede94eadddda..8440692e3890 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -679,9 +679,9 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
return max;
}
-static int mtk_read_temp(void *data, int *temperature)
+static int mtk_read_temp(struct thermal_zone_device *tz, int *temperature)
{
- struct mtk_thermal *mt = data;
+ struct mtk_thermal *mt = tz->devdata;
int i;
int tempmax = INT_MIN;
@@ -700,7 +700,7 @@ static int mtk_read_temp(void *data, int *temperature)
return 0;
}
-static const struct thermal_zone_of_device_ops mtk_thermal_ops = {
+static const struct thermal_zone_device_ops mtk_thermal_ops = {
.get_temp = mtk_read_temp,
};
@@ -1082,8 +1082,8 @@ static int mtk_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mt);
- tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
- &mtk_thermal_ops);
+ tzdev = devm_thermal_of_zone_register(&pdev->dev, 0, mt,
+ &mtk_thermal_ops);
if (IS_ERR(tzdev)) {
ret = PTR_ERR(tzdev);
goto err_disable_clk_peri_therm;
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 073943cbcc2b..af68adf720cc 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -357,9 +357,9 @@ static irqreturn_t adc_tm5_gen2_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int adc_tm5_get_temp(void *data, int *temp)
+static int adc_tm5_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct adc_tm5_channel *channel = data;
+ struct adc_tm5_channel *channel = tz->devdata;
int ret;
if (!channel || !channel->iio)
@@ -639,9 +639,9 @@ config_fail:
return ret;
}
-static int adc_tm5_set_trips(void *data, int low, int high)
+static int adc_tm5_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct adc_tm5_channel *channel = data;
+ struct adc_tm5_channel *channel = tz->devdata;
struct adc_tm5_chip *chip;
int ret;
@@ -660,7 +660,7 @@ static int adc_tm5_set_trips(void *data, int low, int high)
return ret;
}
-static struct thermal_zone_of_device_ops adc_tm5_thermal_ops = {
+static const struct thermal_zone_device_ops adc_tm5_thermal_ops = {
.get_temp = adc_tm5_get_temp,
.set_trips = adc_tm5_set_trips,
};
@@ -672,11 +672,10 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
for (i = 0; i < adc_tm->nchannels; i++) {
adc_tm->channels[i].chip = adc_tm;
-
- tzd = devm_thermal_zone_of_sensor_register(adc_tm->dev,
- adc_tm->channels[i].channel,
- &adc_tm->channels[i],
- &adc_tm5_thermal_ops);
+ tzd = devm_thermal_of_zone_register(adc_tm->dev,
+ adc_tm->channels[i].channel,
+ &adc_tm->channels[i],
+ &adc_tm5_thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -ENODEV) {
dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n",
@@ -1026,10 +1025,8 @@ static int adc_tm5_probe(struct platform_device *pdev)
adc_tm->base = reg;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "get_irq failed: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = adc_tm5_get_dt_data(adc_tm, node);
if (ret) {
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 770f82cc9bca..be785ab37e53 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -186,9 +186,9 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
return 0;
}
-static int qpnp_tm_get_temp(void *data, int *temp)
+static int qpnp_tm_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct qpnp_tm_chip *chip = data;
+ struct qpnp_tm_chip *chip = tz->devdata;
int ret, mili_celsius;
if (!temp)
@@ -263,9 +263,9 @@ skip:
return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
}
-static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
+static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp)
{
- struct qpnp_tm_chip *chip = data;
+ struct qpnp_tm_chip *chip = tz->devdata;
const struct thermal_trip *trip_points;
int ret;
@@ -283,7 +283,7 @@ static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
return ret;
}
-static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
+static const struct thermal_zone_device_ops qpnp_tm_sensor_ops = {
.get_temp = qpnp_tm_get_temp,
.set_trip_temp = qpnp_tm_set_trip_temp,
};
@@ -446,7 +446,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
* read the trip points. get_temp() returns the default temperature
* before the hardware initialization is completed.
*/
- chip->tz_dev = devm_thermal_zone_of_sensor_register(
+ chip->tz_dev = devm_thermal_of_zone_register(
&pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
if (IS_ERR(chip->tz_dev)) {
dev_err(&pdev->dev, "failed to register sensor\n");
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index e49f58e83513..b1b10005fb28 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -532,9 +532,9 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct tsens_sensor *s = _sensor;
+ struct tsens_sensor *s = tz->devdata;
struct tsens_priv *priv = s->priv;
struct device *dev = priv->dev;
struct tsens_irq_data d;
@@ -925,9 +925,9 @@ err_put_device:
return ret;
}
-static int tsens_get_temp(void *data, int *temp)
+static int tsens_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct tsens_sensor *s = data;
+ struct tsens_sensor *s = tz->devdata;
struct tsens_priv *priv = s->priv;
return priv->ops->get_temp(s, temp);
@@ -991,7 +991,7 @@ static const struct of_device_id tsens_table[] = {
};
MODULE_DEVICE_TABLE(of, tsens_table);
-static const struct thermal_zone_of_device_ops tsens_of_ops = {
+static const struct thermal_zone_device_ops tsens_of_ops = {
.get_temp = tsens_get_temp,
.set_trips = tsens_set_trips,
};
@@ -1044,9 +1044,9 @@ static int tsens_register(struct tsens_priv *priv)
for (i = 0; i < priv->num_sensors; i++) {
priv->sensor[i].priv = priv;
- tzd = devm_thermal_zone_of_sensor_register(priv->dev, priv->sensor[i].hw_id,
- &priv->sensor[i],
- &tsens_of_ops);
+ tzd = devm_thermal_of_zone_register(priv->dev, priv->sensor[i].hw_id,
+ &priv->sensor[i],
+ &tsens_of_ops);
if (IS_ERR(tzd))
continue;
priv->sensor[i].tzd = tzd;
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 73049f9bea25..d111e218f362 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -82,9 +82,9 @@ static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s)
return container_of(s, struct qoriq_tmu_data, sensor[s->id]);
}
-static int tmu_get_temp(void *p, int *temp)
+static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct qoriq_sensor *qsensor = p;
+ struct qoriq_sensor *qsensor = tz->devdata;
struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor);
u32 val;
/*
@@ -122,7 +122,7 @@ static int tmu_get_temp(void *p, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops tmu_tz_ops = {
+static const struct thermal_zone_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
@@ -146,9 +146,9 @@ static int qoriq_tmu_register_tmu_zone(struct device *dev,
sensor->id = id;
- tzd = devm_thermal_zone_of_sensor_register(dev, id,
- sensor,
- &tmu_tz_ops);
+ tzd = devm_thermal_of_zone_register(dev, id,
+ sensor,
+ &tmu_tz_ops);
ret = PTR_ERR_OR_ZERO(tzd);
if (ret) {
if (ret == -ENODEV)
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index cda7c52f2319..4c1c6f89aa2f 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -164,9 +164,9 @@ static int rcar_gen3_thermal_round(int temp)
return result * RCAR3_THERMAL_GRAN;
}
-static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
+static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct rcar_gen3_thermal_tsc *tsc = devdata;
+ struct rcar_gen3_thermal_tsc *tsc = tz->devdata;
int mcelsius, val;
int reg;
@@ -203,9 +203,9 @@ static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
return INT_FIXPT(val);
}
-static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+static int rcar_gen3_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct rcar_gen3_thermal_tsc *tsc = devdata;
+ struct rcar_gen3_thermal_tsc *tsc = tz->devdata;
u32 irqmsk = 0;
if (low != -INT_MAX) {
@@ -225,7 +225,7 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
return 0;
}
-static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
+static struct thermal_zone_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
.set_trips = rcar_gen3_thermal_set_trips,
};
@@ -508,8 +508,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
- &rcar_gen3_tz_of_ops);
+ zone = devm_thermal_of_zone_register(dev, i, tsc,
+ &rcar_gen3_tz_of_ops);
if (IS_ERR(zone)) {
dev_err(dev, "Sensor %u: Can't register thermal zone\n", i);
ret = PTR_ERR(zone);
@@ -560,7 +560,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
priv->thermal_init(tsc);
if (zone->ops->set_trips)
- rcar_gen3_thermal_set_trips(tsc, zone->prev_low_trip,
+ rcar_gen3_thermal_set_trips(zone, zone->prev_low_trip,
zone->prev_high_trip);
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 1d729ed4d685..4df42d70d867 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -271,13 +271,6 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
return 0;
}
-static int rcar_thermal_of_get_temp(void *data, int *temp)
-{
- struct rcar_thermal_priv *priv = data;
-
- return rcar_thermal_get_current_temp(priv, temp);
-}
-
static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
{
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
@@ -323,8 +316,8 @@ static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
return 0;
}
-static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
- .get_temp = rcar_thermal_of_get_temp,
+static struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
+ .get_temp = rcar_thermal_get_temp,
};
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
@@ -534,7 +527,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
goto error_unregister;
if (chip->use_of_thermal) {
- priv->zone = devm_thermal_zone_of_sensor_register(
+ priv->zone = devm_thermal_of_zone_register(
dev, i, priv,
&rcar_thermal_zone_of_ops);
} else {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index dc3a9c276a09..819e059cde71 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1211,9 +1211,9 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
+static int rockchip_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_sensor *sensor = tz->devdata;
struct rockchip_thermal_data *thermal = sensor->thermal;
const struct rockchip_tsadc_chip *tsadc = thermal->chip;
@@ -1224,9 +1224,9 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
sensor->id, thermal->regs, high);
}
-static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
+static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_sensor *sensor = tz->devdata;
struct rockchip_thermal_data *thermal = sensor->thermal;
const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
int retval;
@@ -1239,7 +1239,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
return retval;
}
-static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
+static const struct thermal_zone_device_ops rockchip_of_thermal_ops = {
.get_temp = rockchip_thermal_get_temp,
.set_trips = rockchip_thermal_set_trips,
};
@@ -1326,8 +1326,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
sensor->thermal = thermal;
sensor->id = id;
- sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id,
- sensor, &rockchip_of_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, id, sensor,
+ &rockchip_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
error = PTR_ERR(sensor->tzd);
dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c
index 51ae80eda6af..2e0649f38506 100644
--- a/drivers/thermal/rzg2l_thermal.c
+++ b/drivers/thermal/rzg2l_thermal.c
@@ -73,9 +73,9 @@ static inline void rzg2l_thermal_write(struct rzg2l_thermal_priv *priv, u32 reg,
iowrite32(data, priv->base + reg);
}
-static int rzg2l_thermal_get_temp(void *devdata, int *temp)
+static int rzg2l_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct rzg2l_thermal_priv *priv = devdata;
+ struct rzg2l_thermal_priv *priv = tz->devdata;
u32 result = 0, dsensor, ts_code_ave;
int val, i;
@@ -114,7 +114,7 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops rzg2l_tz_of_ops = {
+static const struct thermal_zone_device_ops rzg2l_tz_of_ops = {
.get_temp = rzg2l_thermal_get_temp,
};
@@ -207,8 +207,8 @@ static int rzg2l_thermal_probe(struct platform_device *pdev)
goto err;
}
- zone = devm_thermal_zone_of_sensor_register(dev, 0, priv,
- &rzg2l_tz_of_ops);
+ zone = devm_thermal_of_zone_register(dev, 0, priv,
+ &rzg2l_tz_of_ops);
if (IS_ERR(zone)) {
dev_err(dev, "Can't register thermal zone");
ret = PTR_ERR(zone);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index f4ab4c5b4b62..51874d0a284c 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -650,9 +650,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
-static int exynos_get_temp(void *p, int *temp)
+static int exynos_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct exynos_tmu_data *data = p;
+ struct exynos_tmu_data *data = tz->devdata;
int value, ret = 0;
if (!data || !data->tmu_read)
@@ -728,9 +728,9 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
writel(val, data->base + emul_con);
}
-static int exynos_tmu_set_emulation(void *drv_data, int temp)
+static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp)
{
- struct exynos_tmu_data *data = drv_data;
+ struct exynos_tmu_data *data = tz->devdata;
int ret = -EINVAL;
if (data->soc == SOC_ARCH_EXYNOS4210)
@@ -750,7 +750,7 @@ out:
}
#else
#define exynos4412_tmu_set_emulation NULL
-static int exynos_tmu_set_emulation(void *drv_data, int temp)
+static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp)
{ return -EINVAL; }
#endif /* CONFIG_THERMAL_EMULATION */
@@ -997,7 +997,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return 0;
}
-static const struct thermal_zone_of_device_ops exynos_sensor_ops = {
+static const struct thermal_zone_device_ops exynos_sensor_ops = {
.get_temp = exynos_get_temp,
.set_emul_temp = exynos_tmu_set_emulation,
};
@@ -1091,8 +1091,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
* data->tzd must be registered before calling exynos_tmu_initialize(),
* requesting irq and calling exynos_tmu_control().
*/
- data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
- &exynos_sensor_ops);
+ data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data,
+ &exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
ret = PTR_ERR(data->tzd);
if (ret != -EPROBE_DEFER)
@@ -1104,21 +1104,19 @@ static int exynos_tmu_probe(struct platform_device *pdev)
ret = exynos_tmu_initialize(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize TMU\n");
- goto err_thermal;
+ goto err_sclk;
}
ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
- goto err_thermal;
+ goto err_sclk;
}
exynos_tmu_control(pdev, true);
return 0;
-err_thermal:
- thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
err_sclk:
clk_disable_unprepare(data->sclk);
err_clk:
@@ -1136,9 +1134,7 @@ err_sensor:
static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tzd = data->tzd;
- thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
exynos_tmu_control(pdev, false);
clk_disable_unprepare(data->sclk);
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
index fff80fc18002..ac884514f116 100644
--- a/drivers/thermal/sprd_thermal.c
+++ b/drivers/thermal/sprd_thermal.c
@@ -204,9 +204,9 @@ static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen)
return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1));
}
-static int sprd_thm_read_temp(void *devdata, int *temp)
+static int sprd_thm_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct sprd_thermal_sensor *sen = devdata;
+ struct sprd_thermal_sensor *sen = tz->devdata;
u32 data;
data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) &
@@ -217,7 +217,7 @@ static int sprd_thm_read_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops sprd_thm_ops = {
+static const struct thermal_zone_device_ops sprd_thm_ops = {
.get_temp = sprd_thm_read_temp,
};
@@ -408,10 +408,10 @@ static int sprd_thm_probe(struct platform_device *pdev)
sprd_thm_sensor_init(thm, sen);
- sen->tzd = devm_thermal_zone_of_sensor_register(sen->dev,
- sen->id,
- sen,
- &sprd_thm_ops);
+ sen->tzd = devm_thermal_of_zone_register(sen->dev,
+ sen->id,
+ sen,
+ &sprd_thm_ops);
if (IS_ERR(sen->tzd)) {
dev_err(&pdev->dev, "register thermal zone failed %d\n",
sen->id);
@@ -523,8 +523,8 @@ static int sprd_thm_remove(struct platform_device *pdev)
for (i = 0; i < thm->nr_sensors; i++) {
sprd_thm_toggle_sensor(thm->sensor[i], false);
- devm_thermal_zone_of_sensor_unregister(&pdev->dev,
- thm->sensor[i]->tzd);
+ devm_thermal_of_zone_unregister(&pdev->dev,
+ thm->sensor[i]->tzd);
}
clk_disable_unprepare(thm->clk);
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 5fd3fb8912a6..78feb802a87d 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -302,9 +302,9 @@ static int stm_disable_irq(struct stm_thermal_sensor *sensor)
return 0;
}
-static int stm_thermal_set_trips(void *data, int low, int high)
+static int stm_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct stm_thermal_sensor *sensor = data;
+ struct stm_thermal_sensor *sensor = tz->devdata;
u32 itr1, th;
int ret;
@@ -350,9 +350,9 @@ static int stm_thermal_set_trips(void *data, int low, int high)
}
/* Callback to get temperature from HW */
-static int stm_thermal_get_temp(void *data, int *temp)
+static int stm_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct stm_thermal_sensor *sensor = data;
+ struct stm_thermal_sensor *sensor = tz->devdata;
u32 periods;
int freqM, ret;
@@ -474,7 +474,7 @@ static int stm_thermal_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
stm_thermal_suspend, stm_thermal_resume);
-static const struct thermal_zone_of_device_ops stm_tz_ops = {
+static const struct thermal_zone_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
.set_trips = stm_thermal_set_trips,
};
@@ -539,9 +539,9 @@ static int stm_thermal_probe(struct platform_device *pdev)
return ret;
}
- sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
- sensor,
- &stm_tz_ops);
+ sensor->th_dev = devm_thermal_of_zone_register(&pdev->dev, 0,
+ sensor,
+ &stm_tz_ops);
if (IS_ERR(sensor->th_dev)) {
dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n",
@@ -572,7 +572,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
return 0;
err_tz:
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
return ret;
}
@@ -582,7 +581,6 @@ static int stm_thermal_remove(struct platform_device *pdev)
stm_thermal_sensor_off(sensor);
thermal_remove_hwmon_sysfs(sensor->th_dev);
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
return 0;
}
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 212c87e63a66..e64d06d1328c 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -108,9 +108,9 @@ static int sun50i_h5_calc_temp(struct ths_device *tmdev,
return -1590 * reg / 10 + 276000;
}
-static int sun8i_ths_get_temp(void *data, int *temp)
+static int sun8i_ths_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct tsensor *s = data;
+ struct tsensor *s = tz->devdata;
struct ths_device *tmdev = s->tmdev;
int val = 0;
@@ -135,7 +135,7 @@ static int sun8i_ths_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops ths_ops = {
+static const struct thermal_zone_device_ops ths_ops = {
.get_temp = sun8i_ths_get_temp,
};
@@ -468,10 +468,10 @@ static int sun8i_ths_register(struct ths_device *tmdev)
tmdev->sensor[i].tmdev = tmdev;
tmdev->sensor[i].id = i;
tmdev->sensor[i].tzd =
- devm_thermal_zone_of_sensor_register(tmdev->dev,
- i,
- &tmdev->sensor[i],
- &ths_ops);
+ devm_thermal_of_zone_register(tmdev->dev,
+ i,
+ &tmdev->sensor[i],
+ &ths_ops);
if (IS_ERR(tmdev->sensor[i].tzd))
return PTR_ERR(tmdev->sensor[i].tzd);
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 825eab526619..1efe470f31e9 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -421,9 +421,9 @@ static int translate_temp(u16 val)
return t;
}
-static int tegra_thermctl_get_temp(void *data, int *out_temp)
+static int tegra_thermctl_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct tegra_thermctl_zone *zone = data;
+ struct tegra_thermctl_zone *zone = tz->devdata;
u32 val;
val = readl(zone->reg);
@@ -582,10 +582,9 @@ static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
return temp;
}
-static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
+static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp)
{
- struct tegra_thermctl_zone *zone = data;
- struct thermal_zone_device *tz = zone->tz;
+ struct tegra_thermctl_zone *zone = tz->devdata;
struct tegra_soctherm *ts = zone->ts;
const struct tegra_tsensor_group *sg = zone->sg;
struct device *dev = zone->dev;
@@ -657,9 +656,9 @@ static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
mutex_unlock(&zn->ts->thermctl_lock);
}
-static int tegra_thermctl_set_trips(void *data, int lo, int hi)
+static int tegra_thermctl_set_trips(struct thermal_zone_device *tz, int lo, int hi)
{
- struct tegra_thermctl_zone *zone = data;
+ struct tegra_thermctl_zone *zone = tz->devdata;
u32 r;
thermal_irq_disable(zone);
@@ -682,7 +681,7 @@ static int tegra_thermctl_set_trips(void *data, int lo, int hi)
return 0;
}
-static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+static const struct thermal_zone_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
.set_trips = tegra_thermctl_set_trips,
@@ -2194,9 +2193,9 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
zone->sg = soc->ttgs[i];
zone->ts = tegra;
- z = devm_thermal_zone_of_sensor_register(&pdev->dev,
- soc->ttgs[i]->id, zone,
- &tegra_of_thermal_ops);
+ z = devm_thermal_of_zone_register(&pdev->dev,
+ soc->ttgs[i]->id, zone,
+ &tegra_of_thermal_ops);
if (IS_ERR(z)) {
err = PTR_ERR(z);
dev_err(&pdev->dev, "failed to register sensor: %d\n",
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
index 5affc3d196be..eb84f0b9dc7c 100644
--- a/drivers/thermal/tegra/tegra-bpmp-thermal.c
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -30,9 +30,9 @@ struct tegra_bpmp_thermal {
struct tegra_bpmp_thermal_zone **zones;
};
-static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
+static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone,
+ int *out_temp)
{
- struct tegra_bpmp_thermal_zone *zone = data;
struct mrq_thermal_host_to_bpmp_request req;
union mrq_thermal_bpmp_to_host_response reply;
struct tegra_bpmp_message msg;
@@ -60,9 +60,14 @@ static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
return 0;
}
-static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
+static int tegra_bpmp_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct tegra_bpmp_thermal_zone *zone = data;
+ return __tegra_bpmp_thermal_get_temp(tz->devdata, out_temp);
+}
+
+static int tegra_bpmp_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct tegra_bpmp_thermal_zone *zone = tz->devdata;
struct mrq_thermal_host_to_bpmp_request req;
struct tegra_bpmp_message msg;
int err;
@@ -157,7 +162,7 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
return 0;
}
-static const struct thermal_zone_of_device_ops tegra_bpmp_of_thermal_ops = {
+static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops = {
.get_temp = tegra_bpmp_thermal_get_temp,
.set_trips = tegra_bpmp_thermal_set_trips,
};
@@ -200,13 +205,13 @@ static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
zone->idx = i;
zone->tegra = tegra;
- err = tegra_bpmp_thermal_get_temp(zone, &temp);
+ err = __tegra_bpmp_thermal_get_temp(zone, &temp);
if (err < 0) {
devm_kfree(&pdev->dev, zone);
continue;
}
- tzd = devm_thermal_zone_of_sensor_register(
+ tzd = devm_thermal_of_zone_register(
&pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -EPROBE_DEFER)
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index 05886684f429..c34501287e96 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -159,9 +159,9 @@ static void devm_tegra_tsensor_hw_disable(void *data)
tegra_tsensor_hw_disable(ts);
}
-static int tegra_tsensor_get_temp(void *data, int *temp)
+static int tegra_tsensor_get_temp(struct thermal_zone_device *tz, int *temp)
{
- const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor_channel *tsc = tz->devdata;
const struct tegra_tsensor *ts = tsc->ts;
int err, c1, c2, c3, c4, counter;
u32 val;
@@ -217,9 +217,9 @@ static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int tem
return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a);
}
-static int tegra_tsensor_set_trips(void *data, int low, int high)
+static int tegra_tsensor_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor_channel *tsc = tz->devdata;
const struct tegra_tsensor *ts = tsc->ts;
u32 val;
@@ -240,7 +240,7 @@ static int tegra_tsensor_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops ops = {
+static const struct thermal_zone_device_ops ops = {
.get_temp = tegra_tsensor_get_temp,
.set_trips = tegra_tsensor_set_trips,
};
@@ -516,7 +516,7 @@ static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
tsc->id = id;
tsc->regs = ts->regs + 0x40 * (hw_id + 1);
- tsc->tzd = devm_thermal_zone_of_sensor_register(ts->dev, id, tsc, &ops);
+ tsc->tzd = devm_thermal_of_zone_register(ts->dev, id, tsc, &ops);
if (IS_ERR(tsc->tzd)) {
if (PTR_ERR(tsc->tzd) != -ENODEV)
return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd),
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index 73665c3ccfe0..323e273e3298 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -52,9 +52,9 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
return temp;
}
-static int gadc_thermal_get_temp(void *data, int *temp)
+static int gadc_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct gadc_thermal_info *gti = data;
+ struct gadc_thermal_info *gti = tz->devdata;
int val;
int ret;
@@ -68,7 +68,7 @@ static int gadc_thermal_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops gadc_thermal_ops = {
+static const struct thermal_zone_device_ops gadc_thermal_ops = {
.get_temp = gadc_thermal_get_temp,
};
@@ -143,8 +143,8 @@ static int gadc_thermal_probe(struct platform_device *pdev)
gti->dev = &pdev->dev;
platform_set_drvdata(pdev, gti);
- gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
- &gadc_thermal_ops);
+ gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti,
+ &gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
ret = PTR_ERR(gti->tz_dev);
if (ret != -EPROBE_DEFER)
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6a5d0ae5d7a4..7e669b60a065 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -295,27 +295,14 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
cancel_delayed_work(&tz->poll_queue);
}
-static inline bool should_stop_polling(struct thermal_zone_device *tz)
-{
- return !thermal_zone_device_is_enabled(tz);
-}
-
static void monitor_thermal_zone(struct thermal_zone_device *tz)
{
- bool stop;
-
- stop = should_stop_polling(tz);
-
- mutex_lock(&tz->lock);
-
- if (!stop && tz->passive)
+ if (tz->mode != THERMAL_DEVICE_ENABLED)
+ thermal_zone_device_set_polling(tz, 0);
+ else if (tz->passive)
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
- else if (!stop && tz->polling_delay_jiffies)
+ else if (tz->polling_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
- else
- thermal_zone_device_set_polling(tz, 0);
-
- mutex_unlock(&tz->lock);
}
static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip)
@@ -383,18 +370,13 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
handle_critical_trips(tz, trip, trip_temp, type);
else
handle_non_critical_trips(tz, trip);
- /*
- * Alright, we handled this trip successfully.
- * So, start monitoring again.
- */
- monitor_thermal_zone(tz);
}
static void update_temperature(struct thermal_zone_device *tz)
{
int temp, ret;
- ret = thermal_zone_get_temp(tz, &temp);
+ ret = __thermal_zone_get_temp(tz, &temp);
if (ret) {
if (ret != -EAGAIN)
dev_warn(&tz->device,
@@ -403,10 +385,8 @@ static void update_temperature(struct thermal_zone_device *tz)
return;
}
- mutex_lock(&tz->lock);
tz->last_temperature = tz->temperature;
tz->temperature = temp;
- mutex_unlock(&tz->lock);
trace_thermal_temperature(tz);
@@ -469,15 +449,9 @@ EXPORT_SYMBOL_GPL(thermal_zone_device_disable);
int thermal_zone_device_is_enabled(struct thermal_zone_device *tz)
{
- enum thermal_device_mode mode;
-
- mutex_lock(&tz->lock);
-
- mode = tz->mode;
+ lockdep_assert_held(&tz->lock);
- mutex_unlock(&tz->lock);
-
- return mode == THERMAL_DEVICE_ENABLED;
+ return tz->mode == THERMAL_DEVICE_ENABLED;
}
void thermal_zone_device_update(struct thermal_zone_device *tz,
@@ -485,9 +459,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
{
int count;
- if (should_stop_polling(tz))
- return;
-
if (atomic_read(&in_suspend))
return;
@@ -495,14 +466,23 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
"'get_temp' ops set\n", __func__))
return;
+ mutex_lock(&tz->lock);
+
+ if (!thermal_zone_device_is_enabled(tz))
+ goto out;
+
update_temperature(tz);
- thermal_zone_set_trips(tz);
+ __thermal_zone_set_trips(tz);
tz->notify_event = event;
for (count = 0; count < tz->num_trips; count++)
handle_thermal_trip(tz, count);
+
+ monitor_thermal_zone(tz);
+out:
+ mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
@@ -1212,7 +1192,20 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (num_trips > THERMAL_MAX_TRIPS || num_trips < 0 || mask >> num_trips) {
+ /*
+ * Max trip count can't exceed 31 as the "mask >> num_trips" condition.
+ * For example, shifting by 32 will result in compiler warning:
+ * warning: right shift count >= width of type [-Wshift-count- overflow]
+ *
+ * Also "mask >> num_trips" will always be true with 32 bit shift.
+ * E.g. mask = 0x80000000 for trip id 31 to be RW. Then
+ * mask >> 32 = 0x80000000
+ * This will result in failure for the below condition.
+ *
+ * Check will be true when the bit 31 of the mask is set.
+ * 32 bit shift will cause overflow of 4 byte integer.
+ */
+ if (num_trips > (BITS_PER_TYPE(int) - 1) || num_trips < 0 || mask >> num_trips) {
pr_err("Incorrect number of thermal trips\n");
return ERR_PTR(-EINVAL);
}
@@ -1239,7 +1232,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
}
tz->id = id;
- strlcpy(tz->type, type, sizeof(tz->type));
+ strscpy(tz->type, type, sizeof(tz->type));
result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
if (result)
@@ -1329,6 +1322,7 @@ free_tz:
kfree(tz);
return ERR_PTR(result);
}
+EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
@@ -1457,9 +1451,6 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
- if (!thermal_zone_device_is_enabled(tz))
- continue;
-
thermal_zone_device_init(tz);
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
@@ -1491,10 +1482,6 @@ static int __init thermal_init(void)
if (result)
goto unregister_governors;
- result = of_parse_thermal_zones();
- if (result)
- goto unregister_class;
-
result = register_pm_notifier(&thermal_pm_nb);
if (result)
pr_warn("Thermal: Can not register suspend notifier, return %d\n",
@@ -1502,8 +1489,6 @@ static int __init thermal_init(void)
return 0;
-unregister_class:
- class_unregister(&thermal_class);
unregister_governors:
thermal_unregister_governors();
error:
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index c991bb290512..1571917bd3c8 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -112,6 +112,8 @@ int thermal_build_list_of_policies(char *buf);
/* Helpers */
void thermal_zone_set_trips(struct thermal_zone_device *tz);
+void __thermal_zone_set_trips(struct thermal_zone_device *tz);
+int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
@@ -135,13 +137,11 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
/* device tree support */
#ifdef CONFIG_THERMAL_OF
-int of_parse_thermal_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
-static inline int of_parse_thermal_zones(void) { return 0; }
static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
{
return 0;
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 690890f054a3..c65cdce8f856 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -64,27 +64,17 @@ get_thermal_instance(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL(get_thermal_instance);
-/**
- * thermal_zone_get_temp() - returns the temperature of a thermal zone
- * @tz: a valid pointer to a struct thermal_zone_device
- * @temp: a valid pointer to where to store the resulting temperature.
- *
- * When a valid thermal zone reference is passed, it will fetch its
- * temperature and fill @temp.
- *
- * Return: On success returns 0, an error code otherwise
- */
-int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
int ret = -EINVAL;
int count;
int crit_temp = INT_MAX;
enum thermal_trip_type type;
- if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
- goto exit;
+ lockdep_assert_held(&tz->lock);
- mutex_lock(&tz->lock);
+ if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
+ return -EINVAL;
ret = tz->ops->get_temp(tz, temp);
@@ -107,35 +97,42 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
*temp = tz->emul_temperature;
}
- mutex_unlock(&tz->lock);
-exit:
return ret;
}
-EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
/**
- * thermal_zone_set_trips - Computes the next trip points for the driver
- * @tz: a pointer to a thermal zone device structure
+ * thermal_zone_get_temp() - returns the temperature of a thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @temp: a valid pointer to where to store the resulting temperature.
*
- * The function computes the next temperature boundaries by browsing
- * the trip points. The result is the closer low and high trip points
- * to the current temperature. These values are passed to the backend
- * driver to let it set its own notification mechanism (usually an
- * interrupt).
+ * When a valid thermal zone reference is passed, it will fetch its
+ * temperature and fill @temp.
*
- * It does not return a value
+ * Return: On success returns 0, an error code otherwise
*/
-void thermal_zone_set_trips(struct thermal_zone_device *tz)
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ int ret;
+
+ mutex_lock(&tz->lock);
+ ret = __thermal_zone_get_temp(tz, temp);
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
+
+void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
int low = -INT_MAX;
int high = INT_MAX;
int trip_temp, hysteresis;
int i, ret;
- mutex_lock(&tz->lock);
+ lockdep_assert_held(&tz->lock);
if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
- goto exit;
+ return;
for (i = 0; i < tz->num_trips; i++) {
int trip_low;
@@ -154,7 +151,7 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
/* No need to change trip points */
if (tz->prev_low_trip == low && tz->prev_high_trip == high)
- goto exit;
+ return;
tz->prev_low_trip = low;
tz->prev_high_trip = high;
@@ -169,8 +166,24 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
ret = tz->ops->set_trips(tz, low, high);
if (ret)
dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+}
-exit:
+/**
+ * thermal_zone_set_trips - Computes the next trip points for the driver
+ * @tz: a pointer to a thermal zone device structure
+ *
+ * The function computes the next temperature boundaries by browsing
+ * the trip points. The result is the closer low and high trip points
+ * to the current temperature. These values are passed to the backend
+ * driver to let it set its own notification mechanism (usually an
+ * interrupt).
+ *
+ * It does not return a value
+ */
+void thermal_zone_set_trips(struct thermal_zone_device *tz)
+{
+ mutex_lock(&tz->lock);
+ __thermal_zone_set_trips(tz);
mutex_unlock(&tz->lock);
}
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 09e49ec8b6f4..f53f4ceb6a5d 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -147,7 +147,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return -ENOMEM;
INIT_LIST_HEAD(&hwmon->tz_list);
- strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
+ strscpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
strreplace(hwmon->type, '-', '_');
hwmon->device = hwmon_device_register_for_thermal(&tz->device,
hwmon->type, hwmon);
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index 360b0dfdc3b0..39c921415989 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -20,11 +20,10 @@ static u32 thermal_mmio_readb(void __iomem *mmio_base)
return readb(mmio_base);
}
-static int thermal_mmio_get_temperature(void *private, int *temp)
+static int thermal_mmio_get_temperature(struct thermal_zone_device *tz, int *temp)
{
int t;
- struct thermal_mmio *sensor =
- (struct thermal_mmio *)private;
+ struct thermal_mmio *sensor = tz->devdata;
t = sensor->read_mmio(sensor->mmio_base) & sensor->mask;
t *= sensor->factor;
@@ -34,7 +33,7 @@ static int thermal_mmio_get_temperature(void *private, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops thermal_mmio_ops = {
+static const struct thermal_zone_device_ops thermal_mmio_ops = {
.get_temp = thermal_mmio_get_temperature,
};
@@ -68,10 +67,10 @@ static int thermal_mmio_probe(struct platform_device *pdev)
}
}
- thermal_zone = devm_thermal_zone_of_sensor_register(&pdev->dev,
- 0,
- sensor,
- &thermal_mmio_ops);
+ thermal_zone = devm_thermal_of_zone_register(&pdev->dev,
+ 0,
+ sensor,
+ &thermal_mmio_ops);
if (IS_ERR(thermal_zone)) {
dev_err(&pdev->dev,
"failed to register sensor (%ld)\n",
@@ -79,7 +78,7 @@ static int thermal_mmio_probe(struct platform_device *pdev)
return PTR_ERR(thermal_zone);
}
- thermal_mmio_get_temperature(sensor, &temperature);
+ thermal_mmio_get_temperature(thermal_zone, &temperature);
dev_info(&pdev->dev,
"thermal mmio sensor %s registered, current temperature: %d\n",
pdev->name, temperature);
@@ -107,7 +106,7 @@ static struct platform_driver thermal_mmio_driver = {
.probe = thermal_mmio_probe,
.driver = {
.name = "thermal-mmio",
- .of_match_table = of_match_ptr(thermal_mmio_id_table),
+ .of_match_table = thermal_mmio_id_table,
},
};
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 050d243a5fa1..e2d78a996b5f 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -693,6 +693,7 @@ static struct genl_family thermal_gnl_family __ro_after_init = {
.policy = thermal_genl_policy,
.small_ops = thermal_genl_ops,
.n_small_ops = ARRAY_SIZE(thermal_genl_ops),
+ .resv_start_op = THERMAL_GENL_CMD_CDEV_GET + 1,
.mcgrps = thermal_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps),
};
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 802c30b72a92..fd2fb84bf246 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -19,93 +19,6 @@
#include "thermal_core.h"
-/*** Private data structures to represent thermal device tree data ***/
-
-/**
- * struct __thermal_cooling_bind_param - a cooling device for a trip point
- * @cooling_device: a pointer to identify the referred cooling device
- * @min: minimum cooling state used at this trip point
- * @max: maximum cooling state used at this trip point
- */
-
-struct __thermal_cooling_bind_param {
- struct device_node *cooling_device;
- unsigned long min;
- unsigned long max;
-};
-
-/**
- * struct __thermal_bind_params - a match between trip and cooling device
- * @tcbp: a pointer to an array of cooling devices
- * @count: number of elements in array
- * @trip_id: the trip point index
- * @usage: the percentage (from 0 to 100) of cooling contribution
- */
-
-struct __thermal_bind_params {
- struct __thermal_cooling_bind_param *tcbp;
- unsigned int count;
- unsigned int trip_id;
- unsigned int usage;
-};
-
-/**
- * struct __thermal_zone - internal representation of a thermal zone
- * @passive_delay: polling interval while passive cooling is activated
- * @polling_delay: zone polling interval
- * @slope: slope of the temperature adjustment curve
- * @offset: offset of the temperature adjustment curve
- * @ntrips: number of trip points
- * @trips: an array of trip points (0..ntrips - 1)
- * @num_tbps: number of thermal bind params
- * @tbps: an array of thermal bind params (0..num_tbps - 1)
- * @sensor_data: sensor private data used while reading temperature and trend
- * @ops: set of callbacks to handle the thermal zone based on DT
- */
-
-struct __thermal_zone {
- int passive_delay;
- int polling_delay;
- int slope;
- int offset;
-
- /* trip data */
- int ntrips;
- struct thermal_trip *trips;
-
- /* cooling binding data */
- int num_tbps;
- struct __thermal_bind_params *tbps;
-
- /* sensor interface */
- void *sensor_data;
- const struct thermal_zone_of_device_ops *ops;
-};
-
-/*** DT thermal zone device callbacks ***/
-
-static int of_thermal_get_temp(struct thermal_zone_device *tz,
- int *temp)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->get_temp)
- return -EINVAL;
-
- return data->ops->get_temp(data->sensor_data, temp);
-}
-
-static int of_thermal_set_trips(struct thermal_zone_device *tz,
- int low, int high)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->set_trips)
- return -EINVAL;
-
- return data->ops->set_trips(data->sensor_data, low, high);
-}
-
/**
* of_thermal_get_ntrips - function to export number of available trip
* points.
@@ -158,114 +71,6 @@ of_thermal_get_trip_points(struct thermal_zone_device *tz)
}
EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
-/**
- * of_thermal_set_emul_temp - function to set emulated temperature
- *
- * @tz: pointer to a thermal zone
- * @temp: temperature to set
- *
- * This function gives the ability to set emulated value of temperature,
- * which is handy for debugging
- *
- * Return: zero on success, error code otherwise
- */
-static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
- int temp)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->set_emul_temp)
- return -EINVAL;
-
- return data->ops->set_emul_temp(data->sensor_data, temp);
-}
-
-static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
- enum thermal_trend *trend)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->get_trend)
- return -EINVAL;
-
- return data->ops->get_trend(data->sensor_data, trip, trend);
-}
-
-static int of_thermal_change_mode(struct thermal_zone_device *tz,
- enum thermal_device_mode mode)
-{
- struct __thermal_zone *data = tz->devdata;
-
- return data->ops->change_mode(data->sensor_data, mode);
-}
-
-static int of_thermal_bind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- struct __thermal_zone *data = thermal->devdata;
- struct __thermal_bind_params *tbp;
- struct __thermal_cooling_bind_param *tcbp;
- int i, j;
-
- if (!data || IS_ERR(data))
- return -ENODEV;
-
- /* find where to bind */
- for (i = 0; i < data->num_tbps; i++) {
- tbp = data->tbps + i;
-
- for (j = 0; j < tbp->count; j++) {
- tcbp = tbp->tcbp + j;
-
- if (tcbp->cooling_device == cdev->np) {
- int ret;
-
- ret = thermal_zone_bind_cooling_device(thermal,
- tbp->trip_id, cdev,
- tcbp->max,
- tcbp->min,
- tbp->usage);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int of_thermal_unbind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- struct __thermal_zone *data = thermal->devdata;
- struct __thermal_bind_params *tbp;
- struct __thermal_cooling_bind_param *tcbp;
- int i, j;
-
- if (!data || IS_ERR(data))
- return -ENODEV;
-
- /* find where to unbind */
- for (i = 0; i < data->num_tbps; i++) {
- tbp = data->tbps + i;
-
- for (j = 0; j < tbp->count; j++) {
- tcbp = tbp->tcbp + j;
-
- if (tcbp->cooling_device == cdev->np) {
- int ret;
-
- ret = thermal_zone_unbind_cooling_device(thermal,
- tbp->trip_id, cdev);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
enum thermal_trip_type *type)
{
@@ -288,28 +93,6 @@ static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
return 0;
}
-static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
- int temp)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (trip >= tz->num_trips || trip < 0)
- return -EDOM;
-
- if (data->ops && data->ops->set_trip_temp) {
- int ret;
-
- ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
- if (ret)
- return ret;
- }
-
- /* thermal framework should take care of data->mask & (1 << trip) */
- tz->trips[trip].temperature = temp;
-
- return 0;
-}
-
static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
int *hyst)
{
@@ -347,62 +130,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
return -EINVAL;
}
-static struct thermal_zone_device_ops of_thermal_ops = {
- .get_trip_type = of_thermal_get_trip_type,
- .get_trip_temp = of_thermal_get_trip_temp,
- .set_trip_temp = of_thermal_set_trip_temp,
- .get_trip_hyst = of_thermal_get_trip_hyst,
- .set_trip_hyst = of_thermal_set_trip_hyst,
- .get_crit_temp = of_thermal_get_crit_temp,
-
- .bind = of_thermal_bind,
- .unbind = of_thermal_unbind,
-};
-
-/*** sensor API ***/
-
-static struct thermal_zone_device *
-thermal_zone_of_add_sensor(struct device_node *zone,
- struct device_node *sensor, void *data,
- const struct thermal_zone_of_device_ops *ops)
-{
- struct thermal_zone_device *tzd;
- struct __thermal_zone *tz;
-
- tzd = thermal_zone_get_zone_by_name(zone->name);
- if (IS_ERR(tzd))
- return ERR_PTR(-EPROBE_DEFER);
-
- tz = tzd->devdata;
-
- if (!ops)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&tzd->lock);
- tz->ops = ops;
- tz->sensor_data = data;
-
- tzd->ops->get_temp = of_thermal_get_temp;
- tzd->ops->get_trend = of_thermal_get_trend;
-
- /*
- * The thermal zone core will calculate the window if they have set the
- * optional set_trips pointer.
- */
- if (ops->set_trips)
- tzd->ops->set_trips = of_thermal_set_trips;
-
- if (ops->set_emul_temp)
- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
-
- if (ops->change_mode)
- tzd->ops->change_mode = of_thermal_change_mode;
-
- mutex_unlock(&tzd->lock);
-
- return tzd;
-}
-
/**
* thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone
* @tz_np: a valid thermal zone device node.
@@ -447,207 +174,6 @@ int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
}
EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id);
-/**
- * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone
- * @dev: a valid struct device pointer of a sensor device. Must contain
- * a valid .of_node, for the sensor node.
- * @sensor_id: a sensor identifier, in case the sensor IP has more
- * than one sensors
- * @data: a private pointer (owned by the caller) that will be passed
- * back, when a temperature reading is needed.
- * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
- *
- * This function will search the list of thermal zones described in device
- * tree and look for the zone that refer to the sensor device pointed by
- * @dev->of_node as temperature providers. For the zone pointing to the
- * sensor node, the sensor will be added to the DT thermal zone device.
- *
- * The thermal zone temperature is provided by the @get_temp function
- * pointer. When called, it will have the private pointer @data back.
- *
- * The thermal zone temperature trend is provided by the @get_trend function
- * pointer. When called, it will have the private pointer @data back.
- *
- * TODO:
- * 01 - This function must enqueue the new sensor instead of using
- * it as the only source of temperature values.
- *
- * 02 - There must be a way to match the sensor with all thermal zones
- * that refer to it.
- *
- * Return: On success returns a valid struct thermal_zone_device,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
- * check the return value with help of IS_ERR() helper.
- */
-struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
- const struct thermal_zone_of_device_ops *ops)
-{
- struct device_node *np, *child, *sensor_np;
- struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np)
- return ERR_PTR(-ENODEV);
-
- if (!dev || !dev->of_node) {
- of_node_put(np);
- return ERR_PTR(-ENODEV);
- }
-
- sensor_np = of_node_get(dev->of_node);
-
- for_each_available_child_of_node(np, child) {
- int ret, id;
-
- /* For now, thermal framework supports only 1 sensor per zone */
- ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id);
- if (ret)
- continue;
-
- if (id == sensor_id) {
- tzd = thermal_zone_of_add_sensor(child, sensor_np,
- data, ops);
- if (!IS_ERR(tzd))
- thermal_zone_device_enable(tzd);
-
- of_node_put(child);
- goto exit;
- }
- }
-exit:
- of_node_put(sensor_np);
- of_node_put(np);
-
- return tzd;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
-
-/**
- * thermal_zone_of_sensor_unregister - unregisters a sensor from a DT thermal zone
- * @dev: a valid struct device pointer of a sensor device. Must contain
- * a valid .of_node, for the sensor node.
- * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
- *
- * This function removes the sensor callbacks and private data from the
- * thermal zone device registered with thermal_zone_of_sensor_register()
- * API. It will also silent the zone by remove the .get_temp() and .get_trend()
- * thermal zone device callbacks.
- *
- * TODO: When the support to several sensors per zone is added, this
- * function must search the sensor list based on @dev parameter.
- *
- */
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tzd)
-{
- struct __thermal_zone *tz;
-
- if (!dev || !tzd || !tzd->devdata)
- return;
-
- tz = tzd->devdata;
-
- /* no __thermal_zone, nothing to be done */
- if (!tz)
- return;
-
- /* stop temperature polling */
- thermal_zone_device_disable(tzd);
-
- mutex_lock(&tzd->lock);
- tzd->ops->get_temp = NULL;
- tzd->ops->get_trend = NULL;
- tzd->ops->set_emul_temp = NULL;
- tzd->ops->change_mode = NULL;
-
- tz->ops = NULL;
- tz->sensor_data = NULL;
- mutex_unlock(&tzd->lock);
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
-
-static void devm_thermal_zone_of_sensor_release(struct device *dev, void *res)
-{
- thermal_zone_of_sensor_unregister(dev,
- *(struct thermal_zone_device **)res);
-}
-
-static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res,
- void *data)
-{
- struct thermal_zone_device **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
-/**
- * devm_thermal_zone_of_sensor_register - Resource managed version of
- * thermal_zone_of_sensor_register()
- * @dev: a valid struct device pointer of a sensor device. Must contain
- * a valid .of_node, for the sensor node.
- * @sensor_id: a sensor identifier, in case the sensor IP has more
- * than one sensors
- * @data: a private pointer (owned by the caller) that will be passed
- * back, when a temperature reading is needed.
- * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
- *
- * Refer thermal_zone_of_sensor_register() for more details.
- *
- * Return: On success returns a valid struct thermal_zone_device,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
- * check the return value with help of IS_ERR() helper.
- * Registered thermal_zone_device device will automatically be
- * released when device is unbounded.
- */
-struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int sensor_id,
- void *data, const struct thermal_zone_of_device_ops *ops)
-{
- struct thermal_zone_device **ptr, *tzd;
-
- ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- tzd = thermal_zone_of_sensor_register(dev, sensor_id, data, ops);
- if (IS_ERR(tzd)) {
- devres_free(ptr);
- return tzd;
- }
-
- *ptr = tzd;
- devres_add(dev, ptr);
-
- return tzd;
-}
-EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_register);
-
-/**
- * devm_thermal_zone_of_sensor_unregister - Resource managed version of
- * thermal_zone_of_sensor_unregister().
- * @dev: Device for which which resource was allocated.
- * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
- *
- * This function removes the sensor callbacks and private data from the
- * thermal zone device registered with devm_thermal_zone_of_sensor_register()
- * API. It will also silent the zone by remove the .get_temp() and .get_trend()
- * thermal zone device callbacks.
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tzd)
-{
- WARN_ON(devres_release(dev, devm_thermal_zone_of_sensor_release,
- devm_thermal_zone_of_sensor_match, tzd));
-}
-EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister);
-
/*** functions parsing device tree nodes ***/
static int of_find_trip_id(struct device_node *np, struct device_node *trip)
@@ -679,98 +205,6 @@ out:
return i;
}
-/**
- * thermal_of_populate_bind_params - parse and fill cooling map data
- * @np: DT node containing a cooling-map node
- * @__tbp: data structure to be filled with cooling map info
- * @trips: array of thermal zone trip points
- * @ntrips: number of trip points inside trips.
- *
- * This function parses a cooling-map type of node represented by
- * @np parameter and fills the read data into @__tbp data structure.
- * It needs the already parsed array of trip points of the thermal zone
- * in consideration.
- *
- * Return: 0 on success, proper error code otherwise
- */
-static int thermal_of_populate_bind_params(struct device_node *tz_np,
- struct device_node *np,
- struct __thermal_bind_params *__tbp)
-{
- struct of_phandle_args cooling_spec;
- struct __thermal_cooling_bind_param *__tcbp;
- struct device_node *trip;
- int ret, i, count;
- int trip_id;
- u32 prop;
-
- /* Default weight. Usage is optional */
- __tbp->usage = THERMAL_WEIGHT_DEFAULT;
- ret = of_property_read_u32(np, "contribution", &prop);
- if (ret == 0)
- __tbp->usage = prop;
-
- trip = of_parse_phandle(np, "trip", 0);
- if (!trip) {
- pr_err("missing trip property\n");
- return -ENODEV;
- }
-
- trip_id = of_find_trip_id(tz_np, trip);
- if (trip_id < 0) {
- ret = trip_id;
- goto end;
- }
-
- __tbp->trip_id = trip_id;
-
- count = of_count_phandle_with_args(np, "cooling-device",
- "#cooling-cells");
- if (count <= 0) {
- pr_err("Add a cooling_device property with at least one device\n");
- ret = -ENOENT;
- goto end;
- }
-
- __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
- if (!__tcbp) {
- ret = -ENOMEM;
- goto end;
- }
-
- for (i = 0; i < count; i++) {
- ret = of_parse_phandle_with_args(np, "cooling-device",
- "#cooling-cells", i, &cooling_spec);
- if (ret < 0) {
- pr_err("Invalid cooling-device entry\n");
- goto free_tcbp;
- }
-
- __tcbp[i].cooling_device = cooling_spec.np;
-
- if (cooling_spec.args_count >= 2) { /* at least min and max */
- __tcbp[i].min = cooling_spec.args[0];
- __tcbp[i].max = cooling_spec.args[1];
- } else {
- pr_err("wrong reference to cooling device, missing limits\n");
- }
- }
-
- __tbp->tcbp = __tcbp;
- __tbp->count = count;
-
- goto end;
-
-free_tcbp:
- for (i = i - 1; i >= 0; i--)
- of_node_put(__tcbp[i].cooling_device);
- kfree(__tcbp);
-end:
- of_node_put(trip);
-
- return ret;
-}
-
/*
* It maps 'enum thermal_trip_type' found in include/linux/thermal.h
* into the device tree binding of 'trip', property type.
@@ -811,16 +245,6 @@ static int thermal_of_get_trip_type(struct device_node *np,
return -ENODEV;
}
-/**
- * thermal_of_populate_trip - parse and fill one trip point data
- * @np: DT node containing a trip point node
- * @trip: trip point data structure to be filled up
- *
- * This function parses a trip point type of node represented by
- * @np parameter and fills the read data into @trip data structure.
- *
- * Return: 0 on success, proper error code otherwise
- */
static int thermal_of_populate_trip(struct device_node *np,
struct thermal_trip *trip)
{
@@ -897,258 +321,458 @@ out_of_node_put:
return ERR_PTR(ret);
}
-/**
- * thermal_of_build_thermal_zone - parse and fill one thermal zone data
- * @np: DT node containing a thermal zone node
- *
- * This function parses a thermal zone type of node represented by
- * @np parameter and fills the read data into a __thermal_zone data structure
- * and return this pointer.
- *
- * TODO: Missing properties to parse: thermal-sensor-names
- *
- * Return: On success returns a valid struct __thermal_zone,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
- * check the return value with help of IS_ERR() helper.
- */
-static struct __thermal_zone
-__init *thermal_of_build_thermal_zone(struct device_node *np)
+static struct device_node *of_thermal_zone_find(struct device_node *sensor, int id)
{
- struct device_node *child = NULL, *gchild;
- struct __thermal_zone *tz;
- int ret, i;
- u32 prop, coef[2];
+ struct device_node *np, *tz;
+ struct of_phandle_args sensor_specs;
+ np = of_find_node_by_name(NULL, "thermal-zones");
if (!np) {
- pr_err("no thermal zone np\n");
- return ERR_PTR(-EINVAL);
+ pr_debug("No thermal zones description\n");
+ return ERR_PTR(-ENODEV);
}
- tz = kzalloc(sizeof(*tz), GFP_KERNEL);
- if (!tz)
- return ERR_PTR(-ENOMEM);
+ /*
+ * Search for each thermal zone, a defined sensor
+ * corresponding to the one passed as parameter
+ */
+ for_each_available_child_of_node(np, tz) {
+
+ int count, i;
+
+ count = of_count_phandle_with_args(tz, "thermal-sensors",
+ "#thermal-sensor-cells");
+ if (count <= 0) {
+ pr_err("%pOFn: missing thermal sensor\n", tz);
+ tz = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+
+ int ret;
- ret = of_property_read_u32(np, "polling-delay-passive", &prop);
+ ret = of_parse_phandle_with_args(tz, "thermal-sensors",
+ "#thermal-sensor-cells",
+ i, &sensor_specs);
+ if (ret < 0) {
+ pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret);
+ tz = ERR_PTR(ret);
+ goto out;
+ }
+
+ if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
+ sensor_specs.args[0] : 0)) {
+ pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz);
+ goto out;
+ }
+ }
+ }
+ tz = ERR_PTR(-ENODEV);
+out:
+ of_node_put(np);
+ return tz;
+}
+
+static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdelay)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
if (ret < 0) {
pr_err("%pOFn: missing polling-delay-passive property\n", np);
- goto free_tz;
+ return ret;
}
- tz->passive_delay = prop;
- ret = of_property_read_u32(np, "polling-delay", &prop);
+ ret = of_property_read_u32(np, "polling-delay", delay);
if (ret < 0) {
pr_err("%pOFn: missing polling-delay property\n", np);
- goto free_tz;
+ return ret;
}
- tz->polling_delay = prop;
+
+ return 0;
+}
+
+static struct thermal_zone_params *thermal_of_parameters_init(struct device_node *np)
+{
+ struct thermal_zone_params *tzp;
+ int coef[2];
+ int ncoef = ARRAY_SIZE(coef);
+ int prop, ret;
+
+ tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
+ if (!tzp)
+ return ERR_PTR(-ENOMEM);
+
+ tzp->no_hwmon = true;
+
+ if (!of_property_read_u32(np, "sustainable-power", &prop))
+ tzp->sustainable_power = prop;
/*
- * REVIST: for now, the thermal framework supports only
- * one sensor per thermal zone. Thus, we are considering
- * only the first two values as slope and offset.
+ * For now, the thermal framework supports only one sensor per
+ * thermal zone. Thus, we are considering only the first two
+ * values as slope and offset.
*/
- ret = of_property_read_u32_array(np, "coefficients", coef, 2);
- if (ret == 0) {
- tz->slope = coef[0];
- tz->offset = coef[1];
- } else {
- tz->slope = 1;
- tz->offset = 0;
+ ret = of_property_read_u32_array(np, "coefficients", coef, ncoef);
+ if (ret) {
+ coef[0] = 1;
+ coef[1] = 0;
+ }
+
+ tzp->slope = coef[0];
+ tzp->offset = coef[1];
+
+ return tzp;
+}
+
+static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_device *tz)
+{
+ struct device_node *np, *tz_np;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ tz_np = of_get_child_by_name(np, tz->type);
+
+ of_node_put(np);
+
+ if (!tz_np)
+ return ERR_PTR(-ENODEV);
+
+ return tz_np;
+}
+
+static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_id,
+ struct thermal_zone_device *tz, struct thermal_cooling_device *cdev)
+{
+ struct of_phandle_args cooling_spec;
+ int ret;
+
+ ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ index, &cooling_spec);
+
+ of_node_put(cooling_spec.np);
+
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ return ret;
}
- tz->trips = thermal_of_trips_init(np, &tz->ntrips);
- if (IS_ERR(tz->trips)) {
- ret = PTR_ERR(tz->trips);
- goto finish;
+ if (cooling_spec.args_count < 2) {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ return -EINVAL;
}
- /* cooling-maps */
- child = of_get_child_by_name(np, "cooling-maps");
+ if (cooling_spec.np != cdev->np)
+ return 0;
- /* cooling-maps not provided */
- if (!child)
- goto finish;
+ ret = thermal_zone_unbind_cooling_device(tz, trip_id, cdev);
+ if (ret)
+ pr_err("Failed to unbind '%s' with '%s': %d\n", tz->type, cdev->type, ret);
- tz->num_tbps = of_get_child_count(child);
- if (tz->num_tbps == 0)
- goto finish;
+ return ret;
+}
- tz->tbps = kcalloc(tz->num_tbps, sizeof(*tz->tbps), GFP_KERNEL);
- if (!tz->tbps) {
- ret = -ENOMEM;
- goto free_trips;
+static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
+ struct thermal_zone_device *tz, struct thermal_cooling_device *cdev)
+{
+ struct of_phandle_args cooling_spec;
+ int ret, weight = THERMAL_WEIGHT_DEFAULT;
+
+ of_property_read_u32(map_np, "contribution", &weight);
+
+ ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ index, &cooling_spec);
+
+ of_node_put(cooling_spec.np);
+
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ return ret;
}
- i = 0;
- for_each_child_of_node(child, gchild) {
- ret = thermal_of_populate_bind_params(np, gchild, &tz->tbps[i++]);
- if (ret) {
- of_node_put(gchild);
- goto free_tbps;
- }
+ if (cooling_spec.args_count < 2) {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ return -EINVAL;
}
-finish:
- of_node_put(child);
+ if (cooling_spec.np != cdev->np)
+ return 0;
+
+ ret = thermal_zone_bind_cooling_device(tz, trip_id, cdev, cooling_spec.args[1],
+ cooling_spec.args[0],
+ weight);
+ if (ret)
+ pr_err("Failed to bind '%s' with '%s': %d\n", tz->type, cdev->type, ret);
- return tz;
+ return ret;
+}
-free_tbps:
- for (i = i - 1; i >= 0; i--) {
- struct __thermal_bind_params *tbp = tz->tbps + i;
- int j;
+static int thermal_of_for_each_cooling_device(struct device_node *tz_np, struct device_node *map_np,
+ struct thermal_zone_device *tz, struct thermal_cooling_device *cdev,
+ int (*action)(struct device_node *, int, int,
+ struct thermal_zone_device *, struct thermal_cooling_device *))
+{
+ struct device_node *tr_np;
+ int count, i, trip_id;
- for (j = 0; j < tbp->count; j++)
- of_node_put(tbp->tcbp[j].cooling_device);
+ tr_np = of_parse_phandle(map_np, "trip", 0);
+ if (!tr_np)
+ return -ENODEV;
- kfree(tbp->tcbp);
+ trip_id = of_find_trip_id(tz_np, tr_np);
+ if (trip_id < 0)
+ return trip_id;
+
+ count = of_count_phandle_with_args(map_np, "cooling-device", "#cooling-cells");
+ if (count <= 0) {
+ pr_err("Add a cooling_device property with at least one device\n");
+ return -ENOENT;
}
- kfree(tz->tbps);
-free_trips:
- kfree(tz->trips);
-free_tz:
- kfree(tz);
- of_node_put(child);
+ /*
+ * At this point, we don't want to bail out when there is an
+ * error, we will try to bind/unbind as many as possible
+ * cooling devices
+ */
+ for (i = 0; i < count; i++)
+ action(map_np, i, trip_id, tz, cdev);
- return ERR_PTR(ret);
+ return 0;
}
-static __init void of_thermal_free_zone(struct __thermal_zone *tz)
+static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev,
+ int (*action)(struct device_node *, int, int,
+ struct thermal_zone_device *, struct thermal_cooling_device *))
{
- struct __thermal_bind_params *tbp;
- int i, j;
+ struct device_node *tz_np, *cm_np, *child;
+ int ret = 0;
- for (i = 0; i < tz->num_tbps; i++) {
- tbp = tz->tbps + i;
+ tz_np = thermal_of_zone_get_by_name(tz);
+ if (IS_ERR(tz_np)) {
+ pr_err("Failed to get node tz by name\n");
+ return PTR_ERR(tz_np);
+ }
- for (j = 0; j < tbp->count; j++)
- of_node_put(tbp->tcbp[j].cooling_device);
+ cm_np = of_get_child_by_name(tz_np, "cooling-maps");
+ if (!cm_np)
+ goto out;
- kfree(tbp->tcbp);
+ for_each_child_of_node(cm_np, child) {
+ ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
+ if (ret)
+ break;
}
- kfree(tz->tbps);
- kfree(tz->trips);
- kfree(tz);
+ of_node_put(cm_np);
+out:
+ of_node_put(tz_np);
+
+ return ret;
+}
+
+static int thermal_of_bind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_bind);
+}
+
+static int thermal_of_unbind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_unbind);
}
/**
- * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ * thermal_of_zone_unregister - Cleanup the specific allocated ressources
*
- * Finds all zones parsed and added to the thermal framework and remove them
- * from the system, together with their resources.
+ * This function disables the thermal zone and frees the different
+ * ressources allocated specific to the thermal OF.
*
+ * @tz: a pointer to the thermal zone structure
*/
-static __init void of_thermal_destroy_zones(void)
+void thermal_of_zone_unregister(struct thermal_zone_device *tz)
{
- struct device_node *np, *child;
+ struct thermal_trip *trips = tz->trips;
+ struct thermal_zone_params *tzp = tz->tzp;
+ struct thermal_zone_device_ops *ops = tz->ops;
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return;
+ thermal_zone_device_disable(tz);
+ thermal_zone_device_unregister(tz);
+ kfree(trips);
+ kfree(tzp);
+ kfree(ops);
+}
+EXPORT_SYMBOL_GPL(thermal_of_zone_unregister);
+
+/**
+ * thermal_of_zone_register - Register a thermal zone with device node
+ * sensor
+ *
+ * The thermal_of_zone_register() parses a device tree given a device
+ * node sensor and identifier. It searches for the thermal zone
+ * associated to the couple sensor/id and retrieves all the thermal
+ * zone properties and registers new thermal zone with those
+ * properties.
+ *
+ * @sensor: A device node pointer corresponding to the sensor in the device tree
+ * @id: An integer as sensor identifier
+ * @data: A private data to be stored in the thermal zone dedicated private area
+ * @ops: A set of thermal sensor ops
+ *
+ * Return: a valid thermal zone structure pointer on success.
+ * - EINVAL: if the device tree thermal description is malformed
+ * - ENOMEM: if one structure can not be allocated
+ * - Other negative errors are returned by the underlying called functions
+ */
+struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_trip *trips;
+ struct thermal_zone_params *tzp;
+ struct thermal_zone_device_ops *of_ops;
+ struct device_node *np;
+ int delay, pdelay;
+ int ntrips, mask;
+ int ret;
+
+ of_ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
+ if (!of_ops)
+ return ERR_PTR(-ENOMEM);
+
+ np = of_thermal_zone_find(sensor, id);
+ if (IS_ERR(np)) {
+ if (PTR_ERR(np) != -ENODEV)
+ pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id);
+ return ERR_CAST(np);
}
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
+ trips = thermal_of_trips_init(np, &ntrips);
+ if (IS_ERR(trips)) {
+ pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id);
+ return ERR_CAST(trips);
+ }
- zone = thermal_zone_get_zone_by_name(child->name);
- if (IS_ERR(zone))
- continue;
+ ret = thermal_of_monitor_init(np, &delay, &pdelay);
+ if (ret) {
+ pr_err("Failed to initialize monitoring delays from %pOFn\n", np);
+ goto out_kfree_trips;
+ }
- thermal_zone_device_unregister(zone);
- kfree(zone->tzp);
- kfree(zone->ops);
- of_thermal_free_zone(zone->devdata);
+ tzp = thermal_of_parameters_init(np);
+ if (IS_ERR(tzp)) {
+ ret = PTR_ERR(tzp);
+ pr_err("Failed to initialize parameter from %pOFn: %d\n", np, ret);
+ goto out_kfree_trips;
}
- of_node_put(np);
+
+ of_ops->get_trip_type = of_ops->get_trip_type ? : of_thermal_get_trip_type;
+ of_ops->get_trip_temp = of_ops->get_trip_temp ? : of_thermal_get_trip_temp;
+ of_ops->get_trip_hyst = of_ops->get_trip_hyst ? : of_thermal_get_trip_hyst;
+ of_ops->set_trip_hyst = of_ops->set_trip_hyst ? : of_thermal_set_trip_hyst;
+ of_ops->get_crit_temp = of_ops->get_crit_temp ? : of_thermal_get_crit_temp;
+ of_ops->bind = thermal_of_bind;
+ of_ops->unbind = thermal_of_unbind;
+
+ mask = GENMASK_ULL((ntrips) - 1, 0);
+
+ tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
+ mask, data, of_ops, tzp,
+ pdelay, delay);
+ if (IS_ERR(tz)) {
+ ret = PTR_ERR(tz);
+ pr_err("Failed to register thermal zone %pOFn: %d\n", np, ret);
+ goto out_kfree_tzp;
+ }
+
+ ret = thermal_zone_device_enable(tz);
+ if (ret) {
+ pr_err("Failed to enabled thermal zone '%s', id=%d: %d\n",
+ tz->type, tz->id, ret);
+ thermal_of_zone_unregister(tz);
+ return ERR_PTR(ret);
+ }
+
+ return tz;
+
+out_kfree_tzp:
+ kfree(tzp);
+out_kfree_trips:
+ kfree(trips);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(thermal_of_zone_register);
+
+static void devm_thermal_of_zone_release(struct device *dev, void *res)
+{
+ thermal_of_zone_unregister(*(struct thermal_zone_device **)res);
+}
+
+static int devm_thermal_of_zone_match(struct device *dev, void *res,
+ void *data)
+{
+ struct thermal_zone_device **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
}
/**
- * of_parse_thermal_zones - parse device tree thermal data
+ * devm_thermal_of_zone_register - register a thermal tied with the sensor life cycle
*
- * Initialization function that can be called by machine initialization
- * code to parse thermal data and populate the thermal framework
- * with hardware thermal zones info. This function only parses thermal zones.
- * Cooling devices and sensor devices nodes are supposed to be parsed
- * by their respective drivers.
- *
- * Return: 0 on success, proper error code otherwise
+ * This function is the device version of the thermal_of_zone_register() function.
*
+ * @dev: a device structure pointer to sensor to be tied with the thermal zone OF life cycle
+ * @sensor_id: the sensor identifier
+ * @data: a pointer to a private data to be stored in the thermal zone 'devdata' field
+ * @ops: a pointer to the ops structure associated with the sensor
*/
-int __init of_parse_thermal_zones(void)
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int sensor_id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
- struct device_node *np, *child;
- struct __thermal_zone *tz;
- struct thermal_zone_device_ops *ops;
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return 0; /* Run successfully on systems without thermal DT */
- }
-
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
- struct thermal_zone_params *tzp;
- int i, mask = 0;
- u32 prop;
-
- tz = thermal_of_build_thermal_zone(child);
- if (IS_ERR(tz)) {
- pr_err("failed to build thermal zone %pOFn: %ld\n",
- child,
- PTR_ERR(tz));
- continue;
- }
-
- ops = kmemdup(&of_thermal_ops, sizeof(*ops), GFP_KERNEL);
- if (!ops)
- goto exit_free;
+ struct thermal_zone_device **ptr, *tzd;
- tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
- if (!tzp) {
- kfree(ops);
- goto exit_free;
- }
+ ptr = devres_alloc(devm_thermal_of_zone_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
- /* No hwmon because there might be hwmon drivers registering */
- tzp->no_hwmon = true;
-
- if (!of_property_read_u32(child, "sustainable-power", &prop))
- tzp->sustainable_power = prop;
-
- for (i = 0; i < tz->ntrips; i++)
- mask |= 1 << i;
-
- /* these two are left for temperature drivers to use */
- tzp->slope = tz->slope;
- tzp->offset = tz->offset;
-
- zone = thermal_zone_device_register_with_trips(child->name, tz->trips, tz->ntrips,
- mask, tz, ops, tzp, tz->passive_delay,
- tz->polling_delay);
- if (IS_ERR(zone)) {
- pr_err("Failed to build %pOFn zone %ld\n", child,
- PTR_ERR(zone));
- kfree(tzp);
- kfree(ops);
- of_thermal_free_zone(tz);
- /* attempting to build remaining zones still */
- }
+ tzd = thermal_of_zone_register(dev->of_node, sensor_id, data, ops);
+ if (IS_ERR(tzd)) {
+ devres_free(ptr);
+ return tzd;
}
- of_node_put(np);
-
- return 0;
-exit_free:
- of_node_put(child);
- of_node_put(np);
- of_thermal_free_zone(tz);
+ *ptr = tzd;
+ devres_add(dev, ptr);
- /* no memory available, so free what we have built */
- of_thermal_destroy_zones();
+ return tzd;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_of_zone_register);
- return -ENOMEM;
+/**
+ * devm_thermal_of_zone_unregister - Resource managed version of
+ * thermal_of_zone_unregister().
+ * @dev: Device for which which resource was allocated.
+ * @tz: a pointer to struct thermal_zone where the sensor is registered.
+ *
+ * This function removes the sensor callbacks and private data from the
+ * thermal zone device registered with devm_thermal_zone_of_sensor_register()
+ * API. It will also silent the zone by remove the .get_temp() and .get_trend()
+ * thermal zone device callbacks.
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz)
+{
+ WARN_ON(devres_release(dev, devm_thermal_of_zone_release,
+ devm_thermal_of_zone_match, tz));
}
+EXPORT_SYMBOL_GPL(devm_thermal_of_zone_unregister);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 3a8d6e747c25..78c5841bdfae 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -49,7 +49,11 @@ static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int enabled = thermal_zone_device_is_enabled(tz);
+ int enabled;
+
+ mutex_lock(&tz->lock);
+ enabled = thermal_zone_device_is_enabled(tz);
+ mutex_unlock(&tz->lock);
return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled");
}
@@ -115,7 +119,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
int temperature, hyst = 0;
enum thermal_trip_type type;
- if (!tz->ops->set_trip_temp)
+ if (!tz->ops->set_trip_temp && !tz->trips)
return -EPERM;
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1)
@@ -128,6 +132,9 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ if (tz->trips)
+ tz->trips[trip].temperature = temperature;
+
if (tz->ops->get_trip_hyst) {
ret = tz->ops->get_trip_hyst(tz, trip, &hyst);
if (ret)
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 703039d8b937..8a9055bd376e 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -65,10 +65,10 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
/* thermal zone ops */
/* Get temperature callback function for thermal zone */
-static inline int __ti_thermal_get_temp(void *devdata, int *temp)
+static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct thermal_zone_device *pcb_tz = NULL;
- struct ti_thermal_data *data = devdata;
+ struct ti_thermal_data *data = tz->devdata;
struct ti_bandgap *bgp;
const struct ti_temp_sensor *s;
int ret, tmp, slope, constant;
@@ -85,8 +85,8 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp)
return ret;
/* Default constants */
- slope = thermal_zone_get_slope(data->ti_thermal);
- constant = thermal_zone_get_offset(data->ti_thermal);
+ slope = thermal_zone_get_slope(tz);
+ constant = thermal_zone_get_offset(tz);
pcb_tz = data->pcb_tz;
/* In case pcb zone is available, use the extrapolation rule with it */
@@ -107,9 +107,9 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
+static int __ti_thermal_get_trend(struct thermal_zone_device *tz, int trip, enum thermal_trend *trend)
{
- struct ti_thermal_data *data = p;
+ struct ti_thermal_data *data = tz->devdata;
struct ti_bandgap *bgp;
int id, tr, ret = 0;
@@ -130,7 +130,7 @@ static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
return 0;
}
-static const struct thermal_zone_of_device_ops ti_of_thermal_ops = {
+static const struct thermal_zone_device_ops ti_of_thermal_ops = {
.get_temp = __ti_thermal_get_temp,
.get_trend = __ti_thermal_get_trend,
};
@@ -170,7 +170,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
return -EINVAL;
/* in case this is specified by DT */
- data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id,
+ data->ti_thermal = devm_thermal_of_zone_register(bgp->dev, id,
data, &ti_of_thermal_ops);
if (IS_ERR(data->ti_thermal)) {
dev_err(bgp->dev, "thermal zone device is NULL\n");
diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c
index 4cae5561a2a3..4111d99ef50e 100644
--- a/drivers/thermal/uniphier_thermal.c
+++ b/drivers/thermal/uniphier_thermal.c
@@ -187,9 +187,9 @@ static void uniphier_tm_disable_sensor(struct uniphier_tm_dev *tdev)
usleep_range(1000, 2000); /* The spec note says at least 1ms */
}
-static int uniphier_tm_get_temp(void *data, int *out_temp)
+static int uniphier_tm_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct uniphier_tm_dev *tdev = data;
+ struct uniphier_tm_dev *tdev = tz->devdata;
struct regmap *map = tdev->regmap;
int ret;
u32 temp;
@@ -204,7 +204,7 @@ static int uniphier_tm_get_temp(void *data, int *out_temp)
return 0;
}
-static const struct thermal_zone_of_device_ops uniphier_of_thermal_ops = {
+static const struct thermal_zone_device_ops uniphier_of_thermal_ops = {
.get_temp = uniphier_tm_get_temp,
};
@@ -289,8 +289,8 @@ static int uniphier_tm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
- tdev->tz_dev = devm_thermal_zone_of_sensor_register(dev, 0, tdev,
- &uniphier_of_thermal_ops);
+ tdev->tz_dev = devm_thermal_of_zone_register(dev, 0, tdev,
+ &uniphier_of_thermal_ops);
if (IS_ERR(tdev->tz_dev)) {
dev_err(dev, "failed to register sensor device\n");
return PTR_ERR(tdev->tz_dev);
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index e76a6c173637..f12d0a3ee3e2 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -29,8 +29,7 @@ config USB4_DEBUGFS_WRITE
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
- depends on (USB4=m || KUNIT=y)
- depends on KUNIT
+ depends on USB4 && KUNIT=y
default KUNIT_ALL_TESTS
config USB4_DMA_TEST
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index b1f0dc8df47c..7a8adf5ad5a0 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -42,7 +42,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
*/
dev = acpi_get_first_physical_node(adev);
while (!dev) {
- adev = adev->parent;
+ adev = acpi_dev_parent(adev);
if (!adev)
break;
dev = acpi_get_first_physical_node(adev);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index e5ede5debfb0..0c661a706160 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -407,7 +407,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
static int tb_async_error(const struct ctl_pkg *pkg)
{
- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+ const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index ae38f0d25a8d..572b5896caa3 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2529,6 +2529,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
tb->cm_ops = &icm_icl_ops;
break;
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index cb8c9c4ae93a..b5cd9673e15d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -28,7 +28,11 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1
-
+/*
+ * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
+ * transferred.
+ */
+#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
@@ -38,7 +42,9 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
+/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0)
+#define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring)
{
@@ -458,8 +464,18 @@ static void ring_release_msix(struct tb_ring *ring)
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
+ unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
+ if (nhi->quirks & QUIRK_E2E) {
+ start_hop = RING_FIRST_USABLE_HOPID + 1;
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
+ ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
+ ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
+ }
+ }
+
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
@@ -469,7 +485,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
* Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1.
*/
- for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
@@ -484,6 +500,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
}
}
+ if (ring->hop > 0 && ring->hop < start_hop) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
@@ -1097,12 +1118,26 @@ static void nhi_shutdown(struct tb_nhi *nhi)
static void nhi_check_quirks(struct tb_nhi *nhi)
{
- /*
- * Intel hardware supports auto clear of the interrupt status
- * reqister right after interrupt is being issued.
- */
- if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL)
+ if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ /*
+ * Intel hardware supports auto clear of the interrupt
+ * status register right after interrupt is being
+ * issued.
+ */
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ /*
+ * Falcon Ridge controller needs the end-to-end
+ * flow control workaround to avoid losing Rx
+ * packets when RING_FLAG_E2E is set.
+ */
+ nhi->quirks |= QUIRK_E2E;
+ break;
+ }
+ }
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index f09da5b62233..01190d9ced16 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 244f8cd38b25..77d7f07ca075 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2413,6 +2413,7 @@ int tb_switch_configure(struct tb_switch *sw)
* additional capabilities.
*/
sw->config.cmuv = USB4_VERSION_1_0;
+ sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
@@ -3786,14 +3787,18 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
*/
int tb_switch_xhci_connect(struct tb_switch *sw)
{
- bool usb_port1, usb_port3, xhci_port1, xhci_port3;
struct tb_port *port1, *port3;
int ret;
+ if (sw->generation != 3)
+ return 0;
+
port1 = &sw->ports[1];
port3 = &sw->ports[3];
if (tb_switch_is_alpine_ridge(sw)) {
+ bool usb_port1, usb_port3, xhci_port1, xhci_port3;
+
usb_port1 = tb_lc_is_usb_plugged(port1);
usb_port3 = tb_lc_is_usb_plugged(port3);
xhci_port1 = tb_lc_is_xhci_connected(port1);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9853f6c7e81d..9a277078338c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -174,10 +174,10 @@ static void tb_discover_tunnels(struct tb *tb)
}
}
-static int tb_port_configure_xdomain(struct tb_port *port)
+static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
- return usb4_port_configure_xdomain(port);
+ return usb4_port_configure_xdomain(port, xd);
return tb_lc_configure_xdomain(port);
}
@@ -212,7 +212,7 @@ static void tb_scan_xdomain(struct tb_port *port)
NULL);
if (xd) {
tb_port_at(route, sw)->xdomain = xd;
- tb_port_configure_xdomain(port);
+ tb_port_configure_xdomain(port, xd);
tb_xdomain_add(xd);
}
}
@@ -1516,7 +1516,7 @@ static void tb_restore_children(struct tb_switch *sw)
tb_restore_children(port->remote->sw);
} else if (port->xdomain) {
- tb_port_configure_xdomain(port);
+ tb_port_configure_xdomain(port, port->xdomain);
}
}
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5db76de40cc1..0f067c06cba6 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1176,7 +1176,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
-int usb4_port_configure_xdomain(struct tb_port *port);
+int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 3a2e7126db9d..a386228a44ee 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1115,12 +1115,14 @@ static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
/**
* usb4_port_configure_xdomain() - Configure port for XDomain
* @port: USB4 port connected to another host
+ * @xd: XDomain that is connected to the port
*
- * Marks the USB4 port as being connected to another host. Returns %0 in
- * success and negative errno in failure.
+ * Marks the USB4 port as being connected to another host and updates
+ * the link type. Returns %0 in success and negative errno in failure.
*/
-int usb4_port_configure_xdomain(struct tb_port *port)
+int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
+ xd->link_usb4 = link_is_usb4(port);
return usb4_set_xdomain_configured(port, true);
}
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index 6b02945624ee..1a30c0a23286 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -53,6 +53,8 @@ static ssize_t link_show(struct device *dev, struct device_attribute *attr,
link = port->sw->link_usb4 ? "usb4" : "tbt";
else if (tb_port_has_remote(port))
link = port->remote->sw->link_usb4 ? "usb4" : "tbt";
+ else if (port->xdomain)
+ link = port->xdomain->link_usb4 ? "usb4" : "tbt";
else
link = "none";
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index caa5c14ed57f..01c112e2e214 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -248,7 +248,7 @@ struct gsm_mux {
bool constipated; /* Asked by remote to shut up */
bool has_devices; /* Devices were registered */
- spinlock_t tx_lock;
+ struct mutex tx_mutex;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
@@ -256,7 +256,7 @@ struct gsm_mux {
struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
- struct timer_list kick_timer; /* Kick TX queuing on timeout */
+ struct delayed_work kick_timeout; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -680,7 +680,6 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
struct gsm_msg *msg;
u8 *dp;
int ocr;
- unsigned long flags;
msg = gsm_data_alloc(gsm, addr, 0, control);
if (!msg)
@@ -702,10 +701,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
gsm->tx_bytes += msg->len;
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
gsmld_write_trigger(gsm);
return 0;
@@ -730,7 +729,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
spin_unlock_irqrestore(&dlci->lock, flags);
/* Clear data packets in MUX write queue */
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
if (msg->addr != addr)
continue;
@@ -738,7 +737,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
list_del(&msg->list);
kfree(msg);
}
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -1009,7 +1008,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
gsm->tx_bytes += msg->len;
gsmld_write_trigger(gsm);
- mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
+ schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
}
/**
@@ -1024,10 +1023,9 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
- unsigned long flags;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
__gsm_data_queue(dlci, msg);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/**
@@ -1039,7 +1037,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
@@ -1099,7 +1097,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -1115,7 +1113,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
if (dlci->adaption == 4)
overhead = 1;
- /* dlci->skb is locked by tx_lock */
+ /* dlci->skb is locked by tx_mutex */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
@@ -1169,7 +1167,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
* Push an empty frame in to the transmit queue to update the modem status
* bits and to transmit an optional break.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
@@ -1283,13 +1281,12 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
- unsigned long flags;
int sweep;
if (dlci->constipated)
return;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
@@ -1300,7 +1297,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/*
@@ -1984,24 +1981,23 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
/**
- * gsm_kick_timer - transmit if possible
- * @t: timer contained in our gsm object
+ * gsm_kick_timeout - transmit if possible
+ * @work: work contained in our gsm object
*
* Transmit data from DLCIs if the queue is empty. We can't rely on
* a tty wakeup except when we filled the pipe so we need to fire off
* new data ourselves in other cases.
*/
-static void gsm_kick_timer(struct timer_list *t)
+static void gsm_kick_timeout(struct work_struct *work)
{
- struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
- unsigned long flags;
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
int sent = 0;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
if (gsm->tx_bytes < TX_THRESH_LO)
sent = gsm_dlci_data_sweep(gsm);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
if (sent && debug & 4)
pr_info("%s TX queue stalled\n", __func__);
@@ -2458,7 +2454,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
- del_timer_sync(&gsm->kick_timer);
+ cancel_delayed_work_sync(&gsm->kick_timeout);
del_timer_sync(&gsm->t2_timer);
/* Finish writing to ldisc */
@@ -2501,13 +2497,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
if (dlci == NULL)
return -ENOMEM;
- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- INIT_WORK(&gsm->tx_work, gsmld_write_task);
- init_waitqueue_head(&gsm->event);
- spin_lock_init(&gsm->control_lock);
- spin_lock_init(&gsm->tx_lock);
-
if (gsm->encoding == 0)
gsm->receive = gsm0_receive;
else
@@ -2538,6 +2527,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
break;
}
}
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2609,9 +2599,15 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
+ mutex_init(&gsm->tx_mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
INIT_LIST_HEAD(&gsm->tx_data_list);
+ INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2636,6 +2632,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_unlock(&gsm_mux_lock);
if (i == MAX_MUX) {
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2791,17 +2788,16 @@ static void gsmld_write_trigger(struct gsm_mux *gsm)
static void gsmld_write_task(struct work_struct *work)
{
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
- unsigned long flags;
int i, ret;
/* All outstanding control channel and control messages and one data
* frame is sent.
*/
ret = -ENODEV;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
if (gsm->tty)
ret = gsm_data_kick(gsm);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
if (ret >= 0)
for (i = 0; i < NUM_DLCI; i++)
@@ -2858,7 +2854,8 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
flags = *fp++;
switch (flags) {
case TTY_NORMAL:
- gsm->receive(gsm, *cp);
+ if (gsm->receive)
+ gsm->receive(gsm, *cp);
break;
case TTY_OVERRUN:
case TTY_BREAK:
@@ -2946,10 +2943,6 @@ static int gsmld_open(struct tty_struct *tty)
gsmld_attach_gsm(tty, gsm);
- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- INIT_WORK(&gsm->tx_work, gsmld_write_task);
-
return 0;
}
@@ -3012,7 +3005,6 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
int space;
int ret;
@@ -3020,13 +3012,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
return -ENODEV;
ret = -ENOBUFS;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
space = tty_write_room(tty);
if (space >= nr)
ret = tty->ops->write(tty, buf, nr);
else
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
return ret;
}
@@ -3323,14 +3315,13 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
struct gsm_mux *gsm = dlci->gsm;
- unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
return;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
gsm_dlci_modem_output(gsm, dlci, brk);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 7520cc02fd4d..65d6af755567 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -461,9 +461,6 @@ static int __init serial21285_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- if (machine_is_personal_server())
- baud = 57600;
-
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 0dcecbbc3967..f7fbef83583c 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1334,6 +1334,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = serial8250_em485_start_tx;
up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 877173907c53..ba4b63fd511e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1083,8 +1083,8 @@ config SERIAL_TIMBERDALE
config SERIAL_BCM63XX
tristate "Broadcom BCM63xx/BCM33xx UART support"
select SERIAL_CORE
- depends on ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
- default ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC
+ depends on ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ default ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC
help
This enables the driver for the onchip UART core found on
the following chipsets:
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 30ba9eef7b39..7450d3853031 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -294,9 +294,6 @@ static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (rs485conf->flags & SER_RS485_RX_DURING_TX)
@@ -306,6 +303,7 @@ static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f6c33cd228c8..fbc4b071b330 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1394,9 +1394,9 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
writeb(modem, sport->port.membase + UARTMODEM);
@@ -2191,6 +2191,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
+ lpuart32_write(&sport->port, 0, UARTMODIR);
lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
@@ -2723,14 +2724,15 @@ static int lpuart_probe(struct platform_device *pdev)
lpuart_reg.cons = LPUART_CONSOLE;
handler = lpuart_int;
}
- ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret)
- goto failed_attach_port;
ret = lpuart_global_reset(sport);
if (ret)
goto failed_reset;
+ ret = uart_add_one_port(&lpuart_reg, &sport->port);
+ if (ret)
+ goto failed_attach_port;
+
ret = uart_get_rs485_mode(&sport->port);
if (ret)
goto failed_get_rs485;
@@ -2746,9 +2748,9 @@ static int lpuart_probe(struct platform_device *pdev)
failed_irq_request:
failed_get_rs485:
-failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
+failed_reset:
lpuart_disable_clks(sport);
return ret;
}
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index ab10ca4a45b5..7cf81f692ac4 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1616,11 +1616,9 @@ static int max310x_i2c_probe(struct i2c_client *client)
regmaps, client->irq);
}
-static int max310x_i2c_remove(struct i2c_client *client)
+static void max310x_i2c_remove(struct i2c_client *client)
{
max310x_remove(&client->dev);
-
- return 0;
}
static struct i2c_driver max310x_i2c_driver = {
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 259e08cc347c..d983692c59e0 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1689,11 +1689,9 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
}
-static int sc16is7xx_i2c_remove(struct i2c_client *client)
+static void sc16is7xx_i2c_remove(struct i2c_client *client)
{
sc16is7xx_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id sc16is7xx_i2c_id_table[] = {
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index ad4f3567ff90..a5748e41483b 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned int count;
@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 5c3a07546a58..4b1d4fe8458e 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -945,7 +945,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "unable to find controller clock\n");
return PTR_ERR(clk);
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 4877c54c613d..889b701ba7c6 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(port, count);
}
uart_write_wakeup(port);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9fdecc795b6b..5e287dedce01 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -470,7 +470,6 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
while (head) {
struct tty_buffer *next;
- unsigned char *p, *f = NULL;
unsigned int count;
/*
@@ -489,11 +488,16 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
continue;
}
- p = char_buf_ptr(head, head->lookahead);
- if (~head->flags & TTYB_NORMAL)
- f = flag_buf_ptr(head, head->lookahead);
+ if (port->client_ops->lookahead_buf) {
+ unsigned char *p, *f = NULL;
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ }
- port->client_ops->lookahead_buf(port, p, f, count);
head->lookahead += count;
}
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ae9c926acd6f..0b669c82ddc9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4662,9 +4662,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
- else if (vc->vc_sw->con_font_set)
+ else if (vc->vc_sw->con_font_set) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
- else
+ } else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
@@ -4691,9 +4693,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
console_unlock();
return -EINVAL;
}
- if (vc->vc_sw->con_font_default)
+ if (vc->vc_sw->con_font_default) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
- else
+ } else
rc = -ENOSYS;
console_unlock();
if (!rc) {
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 6bc679d22927..9538832b03a0 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2701,9 +2701,9 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
* Associate the UFS controller queue with the default and poll HCTX types.
* Initialize the mq_map[] arrays.
*/
-static int ufshcd_map_queues(struct Scsi_Host *shost)
+static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i, ret;
+ int i;
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -2720,11 +2720,8 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
WARN_ON_ONCE(true);
}
map->queue_offset = 0;
- ret = blk_mq_map_queues(map);
- WARN_ON_ONCE(ret);
+ blk_mq_map_queues(map);
}
-
- return 0;
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
@@ -8741,6 +8738,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_device *sdp;
unsigned long flags;
int ret, retries;
+ unsigned long deadline;
+ int32_t remaining;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -8773,9 +8772,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
+ deadline = jiffies + 10 * HZ;
for (retries = 3; retries > 0; --retries) {
+ ret = -ETIMEDOUT;
+ remaining = deadline - jiffies;
+ if (remaining <= 0)
+ break;
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ remaining / HZ, 0, 0, RQF_PM, NULL);
if (!scsi_status_is_check_condition(ret) ||
!scsi_sense_valid(&sshdr) ||
sshdr.sense_key != UNIT_ATTENTION)
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index a1a7a1175a5a..3d69a81c5b17 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -613,14 +613,17 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
srgn->srgn_state = HPB_SRGN_VALID;
}
-static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
ufshpb_put_req(umap_req->hpb, umap_req);
+ return RQ_END_IO_NONE;
}
-static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
struct ufshpb_lu *hpb = map_req->hpb;
@@ -636,6 +639,7 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
ufshpb_put_map_req(map_req->hpb, map_req);
+ return RQ_END_IO_NONE;
}
static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index eced97538082..c3628a8645a5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1711,7 +1711,7 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = {
.pa_dbg_option_suite = 0x2E820183,
};
-struct exynos_ufs_drv_data fsd_ufs_drvs = {
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.uic_attr = &fsd_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d21b69997e75..5adcb349718c 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1530,7 +1530,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 &&
- le32_to_cpu(trb->control) & TRB_SMM)
+ le32_to_cpu(trb->control) & TRB_SMM &&
+ le32_to_cpu(trb->control) & TRB_CHAIN)
transfer_end = true;
cdns3_ep_inc_deq(priv_ep);
@@ -1690,6 +1691,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 483bcb1213f7..cc637c4599e1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1810,6 +1810,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2633acde7ac1..bbab424b0d55 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6038,6 +6038,11 @@ re_enumerate:
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -6071,6 +6076,10 @@ int usb_reset_device(struct usb_device *udev)
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -6135,6 +6144,7 @@ int usb_reset_device(struct usb_device *udev)
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c8ba87df7abe..fd0ccf6f3ec5 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -154,9 +154,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_power_on(hsotg->phy);
+ ret = phy_init(hsotg->phy);
if (ret == 0)
- ret = phy_init(hsotg->phy);
+ ret = phy_power_on(hsotg->phy);
}
return ret;
@@ -188,9 +188,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_exit(hsotg->phy);
+ ret = phy_power_off(hsotg->phy);
if (ret == 0)
- ret = phy_power_off(hsotg->phy);
+ ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c5c238ab3083..d0237b30c9be 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -833,15 +833,16 @@ static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
dwc3_clk_disable(dwc);
reset_control_assert(dwc->reset);
}
@@ -1751,12 +1752,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- if (!dwc->sysdev_is_parent) {
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
- }
-
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1821,7 +1816,13 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
- device_init_wakeup(&pdev->dev, of_property_read_bool(dev->of_node, "wakeup-source"));
+
+ if (!dwc->sysdev_is_parent &&
+ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_clks;
+ }
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
@@ -1879,16 +1880,16 @@ err5:
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1983,7 +1984,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
dwc3_core_exit(dwc);
break;
}
@@ -2044,7 +2045,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6b018048fe2e..4ee4ca09873a 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -44,6 +44,7 @@
#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPL 0x460e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
@@ -456,6 +457,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index c5e482f53e9d..d3f3937d7005 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/pm_domain.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
@@ -299,11 +298,24 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
icc_put(qcom->icc_path_apps);
}
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ return dwc->xhci;
+}
+
static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
- struct usb_hcd *hcd = platform_get_drvdata(dwc->xhci);
struct usb_device *udev;
+ struct usb_hcd __maybe_unused *hcd;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ hcd = platform_get_drvdata(dwc->xhci);
/*
* It is possible to query the speed of all children of
@@ -311,8 +323,11 @@ static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
* currently supports only 1 port per controller. So
* this is sufficient.
*/
+#ifdef CONFIG_USB
udev = usb_hub_find_child(hcd->self.root_hub, 1);
-
+#else
+ udev = NULL;
+#endif
if (!udev)
return USB_SPEED_UNKNOWN;
@@ -387,7 +402,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
}
-static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
+static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
{
u32 val;
int i, ret;
@@ -406,7 +421,11 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
if (ret)
dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
- if (device_may_wakeup(qcom->dev)) {
+ /*
+ * The role is stable during suspend as role switching is done from a
+ * freezable workqueue.
+ */
+ if (dwc3_qcom_is_host(qcom) && wakeup) {
qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
dwc3_qcom_enable_interrupts(qcom);
}
@@ -416,7 +435,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
return 0;
}
-static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
+static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
{
int ret;
int i;
@@ -424,7 +443,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
if (!qcom->is_suspended)
return 0;
- if (device_may_wakeup(qcom->dev))
+ if (dwc3_qcom_is_host(qcom) && wakeup)
dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
@@ -458,7 +477,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
if (qcom->pm_suspended)
return IRQ_HANDLED;
- if (dwc->xhci)
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
@@ -757,13 +780,13 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
static int dwc3_qcom_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- int ret, i;
- bool ignore_pipe_clk;
- struct generic_pm_domain *genpd;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ struct resource *res, *parent_res = NULL;
+ int ret, i;
+ bool ignore_pipe_clk;
+ bool wakeup_source;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
@@ -772,8 +795,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- genpd = pd_to_genpd(qcom->dev->pm_domain);
-
if (has_acpi_companion(dev)) {
qcom->acpi_pdata = acpi_device_get_match_data(dev);
if (!qcom->acpi_pdata) {
@@ -881,16 +902,9 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret)
goto interconnect_exit;
- if (device_can_wakeup(&qcom->dwc3->dev)) {
- /*
- * Setting GENPD_FLAG_ALWAYS_ON flag takes care of keeping
- * genpd on in both runtime suspend and system suspend cases.
- */
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- device_init_wakeup(&pdev->dev, true);
- } else {
- genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
- }
+ wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
+ device_init_wakeup(&pdev->dev, wakeup_source);
+ device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
qcom->is_suspended = false;
pm_runtime_set_active(dev);
@@ -944,39 +958,45 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- int ret = 0;
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
- ret = dwc3_qcom_suspend(qcom);
- if (!ret)
- qcom->pm_suspended = true;
+ ret = dwc3_qcom_suspend(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = true;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
int ret;
- ret = dwc3_qcom_resume(qcom);
- if (!ret)
- qcom->pm_suspended = false;
+ ret = dwc3_qcom_resume(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = false;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_suspend(qcom);
+ return dwc3_qcom_suspend(qcom, true);
}
static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_resume(qcom);
+ return dwc3_qcom_resume(qcom, true);
}
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 166b5bde45cb..6c14a79279f9 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
- child = of_get_child_by_name(node, "dwc3");
+ child = of_get_child_by_name(node, "usb");
if (!child) {
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index aeeec751c53c..eca945feeec3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2539,9 +2539,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
- if (dwc->pullups_connected == is_on)
- return 0;
-
dwc->softconnect = is_on;
/*
@@ -2566,6 +2563,11 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc);
} else {
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f56c30cf151e..a7154fe8206d 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,13 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-plat.h"
#include "core.h"
+static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
+ .quirks = XHCI_SKIP_PHY_INIT,
+};
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -92,6 +97,11 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
+ sizeof(dwc3_xhci_plat_priv));
+ if (ret)
+ goto err;
+
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
@@ -135,4 +145,5 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 1905a8d8e0c9..08726e4c68a5 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -291,6 +291,12 @@ static struct usb_endpoint_descriptor ss_ep_int_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor ss_ep_int_desc_comp = {
+ .bLength = sizeof(ss_ep_int_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .wBytesPerInterval = cpu_to_le16(6),
+};
+
/* Audio Streaming OUT Interface - Alt0 */
static struct usb_interface_descriptor std_as_out_if0_desc = {
.bLength = sizeof std_as_out_if0_desc,
@@ -604,7 +610,8 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
- (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc_comp,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
@@ -800,6 +807,7 @@ static void setup_headers(struct f_uac2_opts *opts,
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *ep_int_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
@@ -827,6 +835,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epin_fback_desc = &ss_epin_fback_desc;
epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
+ ep_int_desc_comp = &ss_ep_int_desc_comp;
}
i = 0;
@@ -855,8 +864,11 @@ static void setup_headers(struct f_uac2_opts *opts,
if (EPOUT_EN(opts))
headers[i++] = USBDHDR(&io_out_ot_desc);
- if (FUOUT_EN(opts) || FUIN_EN(opts))
+ if (FUOUT_EN(opts) || FUIN_EN(opts)) {
headers[i++] = USBDHDR(ep_int_desc);
+ if (ep_int_desc_comp)
+ headers[i++] = USBDHDR(ep_int_desc_comp);
+ }
if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&std_as_out_if0_desc);
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 03035dbbe97b..208c6a92780a 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
+ /*
+ * Convert to Minutes-Seconds-Frames.
+ * Sector size is already set to 2048 bytes.
+ */
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index 60ae8b2d3f6a..dd21c251542c 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -158,8 +158,8 @@ size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
snd_pcm_sframes_t frames;
try_again:
- if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
- runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
+ if (runtime->state == SNDRV_PCM_STATE_XRUN ||
+ runtime->state == SNDRV_PCM_STATE_SUSPENDED) {
result = snd_pcm_kernel_ioctl(substream,
SNDRV_PCM_IOCTL_PREPARE, NULL);
if (result < 0) {
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index cafcf260394c..c63c0c2cf649 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -736,7 +736,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
ret = gadget->ops->pullup(gadget, 0);
if (!ret) {
gadget->connected = 0;
- gadget->udc->driver->disconnect(gadget);
+ mutex_lock(&udc_lock);
+ if (gadget->udc->driver)
+ gadget->udc->driver->disconnect(gadget);
+ mutex_unlock(&udc_lock);
}
out:
@@ -1489,7 +1492,6 @@ static int gadget_bind_driver(struct device *dev)
usb_gadget_udc_set_speed(udc, driver->max_speed);
- mutex_lock(&udc_lock);
ret = driver->bind(udc->gadget, driver);
if (ret)
goto err_bind;
@@ -1499,7 +1501,6 @@ static int gadget_bind_driver(struct device *dev)
goto err_start;
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
- mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
@@ -1512,6 +1513,7 @@ static int gadget_bind_driver(struct device *dev)
dev_err(&udc->dev, "failed to start %s: %d\n",
driver->function, ret);
+ mutex_lock(&udc_lock);
udc->driver = NULL;
driver->is_bound = false;
mutex_unlock(&udc_lock);
@@ -1529,7 +1531,6 @@ static void gadget_unbind_driver(struct device *dev)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- mutex_lock(&udc_lock);
usb_gadget_disconnect(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
@@ -1537,6 +1538,7 @@ static void gadget_unbind_driver(struct device *dev)
udc->driver->unbind(gadget);
usb_gadget_udc_stop(udc);
+ mutex_lock(&udc_lock);
driver->is_bound = false;
udc->driver = NULL;
mutex_unlock(&udc_lock);
@@ -1612,7 +1614,7 @@ static ssize_t soft_connect_store(struct device *dev,
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
ssize_t ret;
- mutex_lock(&udc_lock);
+ device_lock(&udc->gadget->dev);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
ret = -EOPNOTSUPP;
@@ -1633,7 +1635,7 @@ static ssize_t soft_connect_store(struct device *dev,
ret = n;
out:
- mutex_unlock(&udc_lock);
+ device_unlock(&udc->gadget->dev);
return ret;
}
static DEVICE_ATTR_WO(soft_connect);
@@ -1652,11 +1654,15 @@ static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- struct usb_gadget_driver *drv = udc->driver;
+ struct usb_gadget_driver *drv;
+ int rc = 0;
- if (!drv || !drv->function)
- return 0;
- return scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_lock(&udc_lock);
+ drv = udc->driver;
+ if (drv && drv->function)
+ rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_unlock(&udc_lock);
+ return rc;
}
static DEVICE_ATTR_RO(function);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0fdc014c9401..4619d5e89d5b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
@@ -1648,6 +1648,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status = bus_state->resuming_ports;
+ /*
+ * SS devices are only visible to roothub after link training completes.
+ * Keep polling roothubs for a grace period after xHC start
+ */
+ if (xhci->run_graceperiod) {
+ if (time_before(jiffies, xhci->run_graceperiod))
+ status = 1;
+ else
+ xhci->run_graceperiod = 0;
+ }
+
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 06a6b19acaae..579899eb24c1 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -425,7 +425,6 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- u32 extra_cs_count;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
@@ -461,18 +460,12 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (last_cs > 7)
return -ESCH_CS_OVERFLOW;
- if (sch_ep->ep_type == ISOC_IN_EP)
- extra_cs_count = (last_cs == 7) ? 1 : 2;
- else /* ep_type : INTR IN / INTR OUT */
- extra_cs_count = 1;
-
- cs_count += extra_cs_count;
if (cs_count > 7)
cs_count = 7; /* HW limit */
sch_ep->cs_count = cs_count;
- /* one for ss, the other for idle */
- sch_ep->num_budget_microframes = cs_count + 2;
+ /* ss, idle are ignored */
+ sch_ep->num_budget_microframes = cs_count;
/*
* if interval=1, maxp >752, num_budge_micoframe is larger
@@ -771,8 +764,8 @@ int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
if (ret)
return ret;
- if (ep->hcpriv)
- drop_ep_quirk(hcd, udev, ep);
+ /* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
+ drop_ep_quirk(hcd, udev, ep);
return 0;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 044855818cb1..a8641b6536ee 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -398,12 +398,17 @@ static int xhci_plat_remove(struct platform_device *dev)
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
- usb_remove_hcd(shared_hcd);
- xhci->shared_hcd = NULL;
+ if (shared_hcd) {
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
+ }
+
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(shared_hcd);
+
+ if (shared_hcd)
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 65858f607437..38649284ff88 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -151,9 +151,11 @@ int xhci_start(struct xhci_hcd *xhci)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
+ }
return ret;
}
@@ -791,8 +793,6 @@ static void xhci_stop(struct usb_hcd *hcd)
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- unsigned long flags;
- int i;
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -808,21 +808,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
-
- /* Power off USB2 ports*/
- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
-
- /* Power off USB3 ports*/
- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
-
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 1960b47acfb2..7caa0db5e826 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1826,7 +1826,7 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
-
+ unsigned long run_graceperiod;
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -2196,8 +2196,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
- bool on, unsigned long *flags);
void xhci_hc_died(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index d1df153e7f5a..d63c63942af1 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -71,10 +71,7 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
{
int err;
- if (hub->reset_gpio) {
- gpiod_set_value_cansleep(hub->reset_gpio, 1);
- fsleep(hub->pdata->reset_us);
- }
+ gpiod_set_value_cansleep(hub->reset_gpio, 1);
err = regulator_disable(hub->vdd);
if (err) {
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 330f494cd158..3c9fa663475f 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -289,14 +289,12 @@ static int usb3503_i2c_probe(struct i2c_client *i2c,
return usb3503_probe(hub);
}
-static int usb3503_i2c_remove(struct i2c_client *i2c)
+static void usb3503_i2c_remove(struct i2c_client *i2c)
{
struct usb3503 *hub;
hub = i2c_get_clientdata(i2c);
clk_disable_unprepare(hub->clk);
-
- return 0;
}
static int usb3503_platform_probe(struct platform_device *pdev)
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f906dfd360d3..6c8f7763e75e 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -86,7 +86,7 @@ config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
depends on ARCH_OMAP2PLUS || COMPILE_TEST
- depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
+ depends on NOP_USB_XCEIV!=m || USB_MUSB_HDRC=m
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index f8bd93fe69cd..e5d3f206097c 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -1196,7 +1196,7 @@ static void isp1301_release(struct device *dev)
static struct isp1301 *the_transceiver;
-static int isp1301_remove(struct i2c_client *i2c)
+static void isp1301_remove(struct i2c_client *i2c)
{
struct isp1301 *isp;
@@ -1214,8 +1214,6 @@ static int isp1301_remove(struct i2c_client *i2c)
put_device(&i2c->dev);
the_transceiver = NULL;
-
- return 0;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index ad3d57f1c273..c2777a5c1f4e 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -120,14 +120,12 @@ static int isp1301_probe(struct i2c_client *client,
return 0;
}
-static int isp1301_remove(struct i2c_client *client)
+static void isp1301_remove(struct i2c_client *client)
{
struct isp1301 *isp = i2c_get_clientdata(client);
usb_remove_phy(&isp->phy);
isp1301_i2c_client = NULL;
-
- return 0;
}
static struct i2c_driver isp1301_driver = {
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2798fca71261..af01a462cc43 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -97,7 +97,10 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+
unsigned long quirks;
+ u8 version;
+
unsigned long break_end;
};
@@ -250,8 +253,12 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
+ *
+ * At least one device with version 0x27 appears to have this bit
+ * inverted.
*/
- val |= BIT(7);
+ if (priv->version > 0x27)
+ val |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
@@ -265,6 +272,9 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
* (stop bits, parity and word length). Version 0x30 and above use
* CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
*/
+ if (priv->version < 0x30)
+ return 0;
+
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
if (r)
@@ -308,7 +318,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r)
return r;
- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
+
+ priv->version = buffer[0];
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c374620a486f..a34957c4b64c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d5a3986dfee7..52d59be92034 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = {
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ /* Omron devices */
+ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e92c165c86b..31c8ccabbbb7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -662,6 +662,12 @@
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
+ * Omron corporation (https://www.omron.com)
+ */
+ #define OMRON_VID 0x0590
+ #define OMRON_CS1W_CIF31_PID 0x00b2
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index de59fa919540..697683e3fbff 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -253,8 +253,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
+#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -438,6 +440,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
+#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
+#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -573,6 +577,10 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
+/* OPPO products */
+#define OPPO_VENDOR_ID 0x22d9
+#define OPPO_PRODUCT_R11 0x276c
+
/* Device flags */
@@ -1131,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+ .driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1138,6 +1148,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1149,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
@@ -1993,8 +2009,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2155,6 +2175,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1a05e3dcfec8..4993227ab293 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2294,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */
+UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
+ "NXP Semiconductors",
+ "PN7462AU",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Supplied with some Castlewood ORB removable drives */
UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
"Double-H Technology",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 4051c8cd0cd8..251778d14e2d 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -62,6 +69,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+/* Reported-by: Tom Hu <huxiaoying@kylinos.cn> */
+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
+ "ASUS",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
@@ -69,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
@@ -111,6 +132,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
+ "Thinkplus",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 5defdfead653..831e7049977d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,7 @@ config TYPEC_ANX7411
tristate "Analogix ANX7411 Type-C DRP Port controller driver"
depends on I2C
depends on USB_ROLE_SWITCH
+ depends on POWER_SUPPLY
help
Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port
controller driver.
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index c1d8c23baa39..de66a2949e33 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -99,8 +99,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index c0f0842d443c..e205f409589a 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1541,7 +1541,7 @@ free_i2c_dummy:
return ret;
}
-static int anx7411_i2c_remove(struct i2c_client *client)
+static void anx7411_i2c_remove(struct i2c_client *client)
{
struct anx7411_data *plat = i2c_get_clientdata(client);
@@ -1565,8 +1565,6 @@ static int anx7411_i2c_remove(struct i2c_client *client)
typec_unregister_port(plat->typec.port);
anx7411_port_unregister_altmodes(plat->typec.port_amode);
-
- return 0;
}
static const struct i2c_device_id anx7411_id[] = {
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebc29ec20e3f..bd5e5dd70431 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2346,6 +2346,7 @@ static void __exit typec_exit(void)
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
class_unregister(&typec_mux_class);
+ class_unregister(&retimer_class);
}
module_exit(typec_exit);
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index cd47c3597e19..2a58185fb14c 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -245,14 +245,12 @@ err_put_fwnode:
return ret;
}
-static int hd3ss3220_remove(struct i2c_client *client)
+static void hd3ss3220_remove(struct i2c_client *client)
{
struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
typec_unregister_port(hd3ss3220->port);
usb_role_switch_put(hd3ss3220->role_sw);
-
- return 0;
}
static const struct of_device_id dev_ids[] = {
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
index 6184f5367190..d6495e533e58 100644
--- a/drivers/usb/typec/mux/fsa4480.c
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -181,14 +181,12 @@ static int fsa4480_probe(struct i2c_client *client)
return 0;
}
-static int fsa4480_remove(struct i2c_client *client)
+static void fsa4480_remove(struct i2c_client *client)
{
struct fsa4480 *fsa = i2c_get_clientdata(client);
typec_mux_unregister(fsa->mux);
typec_switch_unregister(fsa->sw);
-
- return 0;
}
static const struct i2c_device_id fsa4480_table[] = {
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 47b733f78fb0..e1f4df7238bf 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -569,13 +569,6 @@ err_unregister_switch:
return ret;
}
-static int is_memory(struct acpi_resource *res, void *data)
-{
- struct resource r;
-
- return !acpi_dev_resource_memory(res, &r);
-}
-
/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
static const struct acpi_device_id iom_acpi_ids[] = {
/* TigerLake */
@@ -583,6 +576,9 @@ static const struct acpi_device_id iom_acpi_ids[] = {
/* AlderLake */
{ "INTC1079", 0x160, },
+
+ /* Meteor Lake */
+ { "INTC107A", 0x160, },
{}
};
@@ -606,7 +602,7 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
return -ENODEV;
INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 6ce9f282594e..1cd388b55c30 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -160,13 +160,12 @@ static int pi3usb30532_probe(struct i2c_client *client)
return 0;
}
-static int pi3usb30532_remove(struct i2c_client *client)
+static void pi3usb30532_remove(struct i2c_client *client)
{
struct pi3usb30532 *pi = i2c_get_clientdata(client);
typec_mux_unregister(pi->mux);
typec_switch_unregister(pi->sw);
- return 0;
}
static const struct i2c_device_id pi3usb30532_table[] = {
diff --git a/drivers/usb/typec/rt1719.c b/drivers/usb/typec/rt1719.c
index f1b698edd7eb..ea8b700b0ceb 100644
--- a/drivers/usb/typec/rt1719.c
+++ b/drivers/usb/typec/rt1719.c
@@ -930,14 +930,12 @@ err_fwnode_put:
return ret;
}
-static int rt1719_remove(struct i2c_client *i2c)
+static void rt1719_remove(struct i2c_client *i2c)
{
struct rt1719_data *data = i2c_get_clientdata(i2c);
typec_unregister_port(data->port);
usb_role_switch_put(data->role_sw);
-
- return 0;
}
static const struct of_device_id __maybe_unused rt1719_device_table[] = {
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index e7745d1c2a5c..8638f1d39896 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -801,7 +801,7 @@ fwnode_put:
return ret;
}
-static int stusb160x_remove(struct i2c_client *client)
+static void stusb160x_remove(struct i2c_client *client)
{
struct stusb160x *chip = i2c_get_clientdata(client);
@@ -823,8 +823,6 @@ static int stusb160x_remove(struct i2c_client *client)
if (chip->main_supply)
regulator_disable(chip->main_supply);
-
- return 0;
}
static int __maybe_unused stusb160x_suspend(struct device *dev)
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 96c55eaf3f80..5e9348f28d50 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1771,7 +1771,7 @@ destroy_workqueue:
return ret;
}
-static int fusb302_remove(struct i2c_client *client)
+static void fusb302_remove(struct i2c_client *client)
{
struct fusb302_chip *chip = i2c_get_clientdata(client);
@@ -1783,8 +1783,6 @@ static int fusb302_remove(struct i2c_client *client)
fwnode_handle_put(chip->tcpc_dev.fwnode);
destroy_workqueue(chip->wq);
fusb302_debugfs_exit(chip);
-
- return 0;
}
static int fusb302_pm_suspend(struct device *dev)
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 812784702d53..f00810d198a8 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -868,7 +868,7 @@ static int tcpci_probe(struct i2c_client *client,
return 0;
}
-static int tcpci_remove(struct i2c_client *client)
+static void tcpci_remove(struct i2c_client *client)
{
struct tcpci_chip *chip = i2c_get_clientdata(client);
int err;
@@ -879,8 +879,6 @@ static int tcpci_remove(struct i2c_client *client)
dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
tcpci_unregister_port(chip->tcpci);
-
- return 0;
}
static const struct i2c_device_id tcpci_id[] = {
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
index 4b6705f3d7b7..03f89e6f1a78 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -492,14 +492,12 @@ unreg_port:
return ret;
}
-static int max_tcpci_remove(struct i2c_client *client)
+static void max_tcpci_remove(struct i2c_client *client)
{
struct max_tcpci_chip *chip = i2c_get_clientdata(client);
if (!IS_ERR_OR_NULL(chip->tcpci))
tcpci_unregister_port(chip->tcpci);
-
- return 0;
}
static const struct i2c_device_id max_tcpci_id[] = {
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 3291ca4948da..c1327713f06d 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -263,12 +263,11 @@ static int rt1711h_probe(struct i2c_client *client,
return 0;
}
-static int rt1711h_remove(struct i2c_client *client)
+static void rt1711h_remove(struct i2c_client *client)
{
struct rt1711h_chip *chip = i2c_get_clientdata(client);
tcpci_unregister_port(chip->tcpci);
- return 0;
}
static const struct i2c_device_id rt1711h_id[] = {
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index ea5a917c51b1..904c7b4ce2f0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -6320,6 +6320,13 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
struct tcpm_port *port = power_supply_get_drvdata(psy);
int ret;
+ /*
+ * All the properties below are related to USB PD. The check needs to be
+ * property specific when a non-pd related property is added.
+ */
+ if (!port->pd_supported)
+ return -EOPNOTSUPP;
+
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
ret = tcpm_psy_set_online(port, val);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index dfbba5ae9487..b637e8b378b3 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -857,15 +857,13 @@ err_clear_mask:
return ret;
}
-static int tps6598x_remove(struct i2c_client *client)
+static void tps6598x_remove(struct i2c_client *client)
{
struct tps6598x *tps = i2c_get_clientdata(client);
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
-
- return 0;
}
static const struct of_device_id tps6598x_of_match[] = {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1aea46493b85..6364f0d467ea 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -588,8 +588,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
- if (ret == 0 && offset == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
return ret;
}
@@ -1200,32 +1198,6 @@ out_unlock:
return ret;
}
-static void ucsi_unregister_connectors(struct ucsi *ucsi)
-{
- struct ucsi_connector *con;
- int i;
-
- if (!ucsi->connector)
- return;
-
- for (i = 0; i < ucsi->cap.num_connectors; i++) {
- con = &ucsi->connector[i];
-
- if (!con->wq)
- break;
-
- cancel_work_sync(&con->work);
- ucsi_unregister_partner(con);
- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(con);
- destroy_workqueue(con->wq);
- typec_unregister_port(con->port);
- }
-
- kfree(ucsi->connector);
- ucsi->connector = NULL;
-}
-
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
@@ -1234,6 +1206,7 @@ static void ucsi_unregister_connectors(struct ucsi *ucsi)
*/
static int ucsi_init(struct ucsi *ucsi)
{
+ struct ucsi_connector *con;
u64 command;
int ret;
int i;
@@ -1264,7 +1237,7 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors,
+ ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
ret = -ENOMEM;
@@ -1288,7 +1261,15 @@ static int ucsi_init(struct ucsi *ucsi)
return 0;
err_unregister:
- ucsi_unregister_connectors(ucsi);
+ for (con = ucsi->connector; con->port; con++) {
+ ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
+ if (con->wq)
+ destroy_workqueue(con->wq);
+ typec_unregister_port(con->port);
+ con->port = NULL;
+ }
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1402,6 +1383,7 @@ EXPORT_SYMBOL_GPL(ucsi_register);
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
+ int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_delayed_work_sync(&ucsi->work);
@@ -1409,7 +1391,18 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
- ucsi_unregister_connectors(ucsi);
+ for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ cancel_work_sync(&ucsi->connector[i].work);
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+ if (ucsi->connector[i].wq)
+ destroy_workqueue(ucsi->connector[i].wq);
+ typec_unregister_port(ucsi->connector[i].port);
+ }
+
+ kfree(ucsi->connector);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 5c0bf48be766..349756335362 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -1403,7 +1403,7 @@ out_ucsi_destroy:
return status;
}
-static int ucsi_ccg_remove(struct i2c_client *client)
+static void ucsi_ccg_remove(struct i2c_client *client)
{
struct ucsi_ccg *uc = i2c_get_clientdata(client);
@@ -1413,8 +1413,6 @@ static int ucsi_ccg_remove(struct i2c_client *client)
ucsi_unregister(uc->ucsi);
ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
-
- return 0;
}
static const struct i2c_device_id ucsi_ccg_device_id[] = {
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 061551d464f1..16289ff583b4 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -688,7 +688,7 @@ destroy:
return ret;
}
-static int ucsi_stm32g0_remove(struct i2c_client *client)
+static void ucsi_stm32g0_remove(struct i2c_client *client)
{
struct ucsi_stm32g0 *g0 = i2c_get_clientdata(client);
@@ -697,8 +697,6 @@ static int ucsi_stm32g0_remove(struct i2c_client *client)
if (g0->fw_name)
i2c_unregister_device(g0->i2c_bl);
ucsi_destroy(g0->ucsi);
-
- return 0;
}
static int ucsi_stm32g0_suspend(struct device *dev)
diff --git a/drivers/usb/typec/wusb3801.c b/drivers/usb/typec/wusb3801.c
index e63509f8b01e..3cc7a15ecbd3 100644
--- a/drivers/usb/typec/wusb3801.c
+++ b/drivers/usb/typec/wusb3801.c
@@ -399,7 +399,7 @@ err_put_connector:
return ret;
}
-static int wusb3801_remove(struct i2c_client *client)
+static void wusb3801_remove(struct i2c_client *client)
{
struct wusb3801 *wusb3801 = i2c_get_clientdata(client);
@@ -411,8 +411,6 @@ static int wusb3801_remove(struct i2c_client *client)
if (wusb3801->vbus_on)
regulator_disable(wusb3801->vbus_supply);
-
- return 0;
}
static const struct of_device_id wusb3801_of_match[] = {
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 75a703b803a2..3e4486bfa0b7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = vp_ioread16(avail_idx_addr);
@@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
vp_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index ed100a35e596..90913365def4 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
+ int rqt_table_size = roundup_pow_of_two(ndev->rqt_size);
+ int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
+ int act_sz = roundup_pow_of_two(num / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j = j + 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index c06c02704461..7badf5777597 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -1183,6 +1183,7 @@ static struct genl_family vdpa_nl_family __ro_after_init = {
.module = THIS_MODULE,
.ops = vdpa_nl_ops,
.n_ops = ARRAY_SIZE(vdpa_nl_ops),
+ .resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
};
static int vdpa_init(void)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 41c0b29739f1..35dceee3ed56 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- if (offset > dev->config_size ||
- len > dev->config_size - offset)
+ /* Initialize the buffer in case of partial copy. */
+ memset(buf, 0, len);
+
+ if (offset > dev->config_size)
return;
+ if (len > dev->config_size - offset)
+ len = dev->config_size - offset;
+
memcpy(buf, dev->config + offset, len);
}
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index e163aa9f6144..0cbdcd14f1c8 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -151,7 +151,10 @@ int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
if (!vdev->vdev.kvm)
return 0;
- return kvm_s390_pci_register_kvm(zdev, vdev->vdev.kvm);
+ if (zpci_kvm_hook.kvm_register)
+ return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm);
+
+ return -ENOENT;
}
void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
@@ -161,5 +164,6 @@ void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
if (!zdev || !vdev->vdev.kvm)
return;
- kvm_s390_pci_unregister_kvm(zdev);
+ if (zpci_kvm_hook.kvm_unregister)
+ zpci_kvm_hook.kvm_unregister(zdev);
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index db516c90a977..8706482665d1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -558,6 +558,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
pages, NULL, NULL);
if (ret > 0) {
+ int i;
+
+ /*
+ * The zero page is always resident, we don't need to pin it
+ * and it falls into our invalid/reserved test so we don't
+ * unpin in put_pfn(). Unpin all zero pages in the batch here.
+ */
+ for (i = 0 ; i < ret; i++) {
+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
+ unpin_user_page(pages[i]);
+ }
+
*pfn = page_to_pfn(pages[0]);
goto done;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 68e4ecd1cc0e..d7a04d573988 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -118,7 +118,7 @@ struct vhost_net_virtqueue {
/* Number of XDP frames batched */
int batched_xdp;
/* an array of userspace buffers info */
- struct ubuf_info *ubuf_info;
+ struct ubuf_info_msgzc *ubuf_info;
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
@@ -382,8 +382,9 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
}
static void vhost_zerocopy_callback(struct sk_buff *skb,
- struct ubuf_info *ubuf, bool success)
+ struct ubuf_info *ubuf_base, bool success)
{
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
int cnt;
@@ -871,7 +872,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
- struct ubuf_info *ubuf;
+ struct ubuf_info_msgzc *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -907,14 +908,14 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
ubuf = nvq->ubuf_info + nvq->upend_idx;
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
- ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
- ubuf->flags = SKBFL_ZEROCOPY_FRAG;
- refcount_set(&ubuf->refcnt, 1);
+ ubuf->ubuf.callback = vhost_zerocopy_callback;
+ ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
+ refcount_set(&ubuf->ubuf.refcnt, 1);
msg.msg_control = &ctl;
ctl.type = TUN_MSG_UBUF;
- ctl.ptr = ubuf;
+ ctl.ptr = &ubuf->ubuf;
msg.msg_controllen = sizeof(ctl);
ubufs = nvq->ubufs;
atomic_inc(&ubufs->refcount);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 368330417bde..5703775af129 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -393,7 +393,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
if (!pkt->buf) {
kfree(pkt);
return NULL;
diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c
index 538f2d40acda..9e6bcc03a1a4 100644
--- a/drivers/video/aperture.c
+++ b/drivers/video/aperture.c
@@ -2,15 +2,17 @@
#include <linux/aperture.h>
#include <linux/device.h>
-#include <linux/fb.h> /* for old fbdev helpers */
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sysfb.h>
#include <linux/types.h>
#include <linux/vgaarb.h>
+#include <video/vga.h>
+
/**
* DOC: overview
*
@@ -283,26 +285,27 @@ static void aperture_detach_devices(resource_size_t base, resource_size_t size)
int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
bool primary, const char *name)
{
-#if IS_REACHABLE(CONFIG_FB)
- struct apertures_struct *a;
- int ret;
-
- a = alloc_apertures(1);
- if (!a)
- return -ENOMEM;
-
- a->ranges[0].base = base;
- a->ranges[0].size = size;
-
- ret = remove_conflicting_framebuffers(a, name, primary);
- kfree(a);
-
- if (ret)
- return ret;
-#endif
+ /*
+ * If a driver asked to unregister a platform device registered by
+ * sysfb, then can be assumed that this is a driver for a display
+ * that is set up by the system firmware and has a generic driver.
+ *
+ * Drivers for devices that don't have a generic driver will never
+ * ask for this, so let's assume that a real driver for the display
+ * was already probed and prevent sysfb to register devices later.
+ */
+ sysfb_disable();
aperture_detach_devices(base, size);
+ /*
+ * If this is the primary adapter, there could be a VGA device
+ * that consumes the VGA framebuffer I/O range. Remove this device
+ * as well.
+ */
+ if (primary)
+ aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
+
return 0;
}
EXPORT_SYMBOL(aperture_remove_conflicting_devices);
@@ -321,30 +324,36 @@ EXPORT_SYMBOL(aperture_remove_conflicting_devices);
*/
int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
{
+ bool primary = false;
resource_size_t base, size;
int bar, ret;
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, name);
- if (ret)
- return ret;
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- ret = vga_remove_vgacon(pdev);
- if (ret)
- return ret;
for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
+
base = pci_resource_start(pdev, bar);
size = pci_resource_len(pdev, bar);
- aperture_detach_devices(base, size);
+ ret = aperture_remove_conflicting_devices(base, size, primary, name);
+ if (ret)
+ break;
}
+ if (ret)
+ return ret;
+
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
+ ret = vga_remove_vgacon(pdev);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index a003e02e13ce..936ba1e4d35e 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -268,6 +268,19 @@ config BACKLIGHT_MAX8925
If you have a LCD backlight connected to the WLED output of MAX8925
WLED output, say Y here to enable this driver.
+config BACKLIGHT_MT6370
+ tristate "MediaTek MT6370 Backlight Driver"
+ depends on MFD_MT6370
+ help
+ This enables support for Mediatek MT6370 Backlight driver.
+ It's commonly used to drive the display WLED. There are 4 channels
+ inside, and each channel supports up to 30mA of current capability
+ with 2048 current steps (only for MT6370/MT6371) or 16384 current
+ steps (only for MT6372) in exponential or linear mapping curves.
+
+ This driver can also be built as a module. If so, the module
+ will be called "mt6370-backlight".
+
config BACKLIGHT_APPLE
tristate "Apple Backlight Driver"
depends on X86 && ACPI
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index cae2c83422ae..e815f3f1deff 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
obj-$(CONFIG_BACKLIGHT_LV5207LP) += lv5207lp.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
+obj-$(CONFIG_BACKLIGHT_MT6370) += mt6370-backlight.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 8ec19425671f..b0fe02273e87 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -753,7 +753,7 @@ out:
return ret;
}
-static int adp8860_remove(struct i2c_client *client)
+static void adp8860_remove(struct i2c_client *client)
{
struct adp8860_bl *data = i2c_get_clientdata(client);
@@ -765,8 +765,6 @@ static int adp8860_remove(struct i2c_client *client)
if (data->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8860_bl_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 8b5213a39527..5becace3fd0f 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -925,7 +925,7 @@ out:
return ret;
}
-static int adp8870_remove(struct i2c_client *client)
+static void adp8870_remove(struct i2c_client *client)
{
struct adp8870_bl *data = i2c_get_clientdata(client);
@@ -937,8 +937,6 @@ static int adp8870_remove(struct i2c_client *client)
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8870_bl_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/arcxcnn_bl.c b/drivers/video/backlight/arcxcnn_bl.c
index 7b1c0a0e6cad..060c0eef6a52 100644
--- a/drivers/video/backlight/arcxcnn_bl.c
+++ b/drivers/video/backlight/arcxcnn_bl.c
@@ -362,7 +362,7 @@ probe_err:
return ret;
}
-static int arcxcnn_remove(struct i2c_client *cl)
+static void arcxcnn_remove(struct i2c_client *cl)
{
struct arcxcnn *lp = i2c_get_clientdata(cl);
@@ -376,8 +376,6 @@ static int arcxcnn_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
-
- return 0;
}
static const struct of_device_id arcxcnn_dt_ids[] = {
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 515184fbe33a..a506872d4396 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -175,14 +175,12 @@ static int bd6107_probe(struct i2c_client *client,
return 0;
}
-static int bd6107_remove(struct i2c_client *client)
+static void bd6107_remove(struct i2c_client *client)
{
struct backlight_device *backlight = i2c_get_clientdata(client);
backlight->props.brightness = 0;
backlight_update_status(backlight);
-
- return 0;
}
static const struct i2c_device_id bd6107_ids[] = {
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 1d17c439430e..475f35635bf6 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -579,7 +579,7 @@ static int lm3630a_probe(struct i2c_client *client,
return 0;
}
-static int lm3630a_remove(struct i2c_client *client)
+static void lm3630a_remove(struct i2c_client *client)
{
int rval;
struct lm3630a_chip *pchip = i2c_get_clientdata(client);
@@ -596,7 +596,6 @@ static int lm3630a_remove(struct i2c_client *client)
free_irq(pchip->irq, pchip);
destroy_workqueue(pchip->irqthread);
}
- return 0;
}
static const struct i2c_device_id lm3630a_id[] = {
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 48c04155a5f9..6580911671a3 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -390,7 +390,7 @@ err_out:
return ret;
}
-static int lm3639_remove(struct i2c_client *client)
+static void lm3639_remove(struct i2c_client *client)
{
struct lm3639_chip_data *pchip = i2c_get_clientdata(client);
@@ -400,7 +400,6 @@ static int lm3639_remove(struct i2c_client *client)
led_classdev_unregister(&pchip->cdev_flash);
if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
- return 0;
}
static const struct i2c_device_id lm3639_id[] = {
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index fc02c5c16055..bd0bdeae23a4 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -534,7 +534,7 @@ disable_supply:
return ret;
}
-static int lp855x_remove(struct i2c_client *cl)
+static void lp855x_remove(struct i2c_client *cl)
{
struct lp855x *lp = i2c_get_clientdata(cl);
@@ -545,8 +545,6 @@ static int lp855x_remove(struct i2c_client *cl)
if (lp->supply)
regulator_disable(lp->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
-
- return 0;
}
static const struct of_device_id lp855x_dt_ids[] = {
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 1842ae9a55f8..767b800d79fa 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -124,14 +124,12 @@ static int lv5207lp_probe(struct i2c_client *client,
return 0;
}
-static int lv5207lp_remove(struct i2c_client *client)
+static void lv5207lp_remove(struct i2c_client *client)
{
struct backlight_device *backlight = i2c_get_clientdata(client);
backlight->props.brightness = 0;
backlight_update_status(backlight);
-
- return 0;
}
static const struct i2c_device_id lv5207lp_ids[] = {
diff --git a/drivers/video/backlight/mt6370-backlight.c b/drivers/video/backlight/mt6370-backlight.c
new file mode 100644
index 000000000000..623d4f2baca2
--- /dev/null
+++ b/drivers/video/backlight/mt6370-backlight.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiaEn Wu <chiaen_wu@richtek.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MT6370_REG_DEV_INFO 0x100
+#define MT6370_REG_BL_EN 0x1A0
+#define MT6370_REG_BL_BSTCTRL 0x1A1
+#define MT6370_REG_BL_PWM 0x1A2
+#define MT6370_REG_BL_DIM2 0x1A4
+
+#define MT6370_VENID_MASK GENMASK(7, 4)
+#define MT6370_BL_EXT_EN_MASK BIT(7)
+#define MT6370_BL_EN_MASK BIT(6)
+#define MT6370_BL_CODE_MASK BIT(0)
+#define MT6370_BL_CH_MASK GENMASK(5, 2)
+#define MT6370_BL_CH_SHIFT 2
+#define MT6370_BL_DIM2_COMMON_MASK GENMASK(2, 0)
+#define MT6370_BL_DIM2_COMMON_SHIFT 3
+#define MT6370_BL_DIM2_6372_MASK GENMASK(5, 0)
+#define MT6370_BL_DIM2_6372_SHIFT 6
+#define MT6370_BL_PWM_EN_MASK BIT(7)
+#define MT6370_BL_PWM_HYS_EN_MASK BIT(2)
+#define MT6370_BL_PWM_HYS_SEL_MASK GENMASK(1, 0)
+#define MT6370_BL_OVP_EN_MASK BIT(7)
+#define MT6370_BL_OVP_SEL_MASK GENMASK(6, 5)
+#define MT6370_BL_OVP_SEL_SHIFT 5
+#define MT6370_BL_OC_EN_MASK BIT(3)
+#define MT6370_BL_OC_SEL_MASK GENMASK(2, 1)
+#define MT6370_BL_OC_SEL_SHIFT 1
+
+#define MT6370_BL_PWM_HYS_TH_MIN_STEP 1
+#define MT6370_BL_PWM_HYS_TH_MAX_STEP 64
+#define MT6370_BL_OVP_MIN_UV 17000000
+#define MT6370_BL_OVP_MAX_UV 29000000
+#define MT6370_BL_OVP_STEP_UV 4000000
+#define MT6370_BL_OCP_MIN_UA 900000
+#define MT6370_BL_OCP_MAX_UA 1800000
+#define MT6370_BL_OCP_STEP_UA 300000
+#define MT6370_BL_MAX_COMMON_BRIGHTNESS 2048
+#define MT6370_BL_MAX_6372_BRIGHTNESS 16384
+#define MT6370_BL_MAX_CH 15
+
+enum {
+ MT6370_VID_COMMON = 1,
+ MT6370_VID_6372,
+};
+
+struct mt6370_priv {
+ u8 dim2_mask;
+ u8 dim2_shift;
+ int def_max_brightness;
+ struct backlight_device *bl;
+ struct device *dev;
+ struct gpio_desc *enable_gpio;
+ struct regmap *regmap;
+};
+
+static int mt6370_bl_update_status(struct backlight_device *bl_dev)
+{
+ struct mt6370_priv *priv = bl_get_data(bl_dev);
+ int brightness = backlight_get_brightness(bl_dev);
+ unsigned int enable_val;
+ u8 brightness_val[2];
+ int ret;
+
+ if (brightness) {
+ brightness_val[0] = (brightness - 1) & priv->dim2_mask;
+ brightness_val[1] = (brightness - 1) >> priv->dim2_shift;
+
+ ret = regmap_raw_write(priv->regmap, MT6370_REG_BL_DIM2,
+ brightness_val, sizeof(brightness_val));
+ if (ret)
+ return ret;
+ }
+
+ gpiod_set_value(priv->enable_gpio, !!brightness);
+
+ enable_val = brightness ? MT6370_BL_EN_MASK : 0;
+ return regmap_update_bits(priv->regmap, MT6370_REG_BL_EN,
+ MT6370_BL_EN_MASK, enable_val);
+}
+
+static int mt6370_bl_get_brightness(struct backlight_device *bl_dev)
+{
+ struct mt6370_priv *priv = bl_get_data(bl_dev);
+ unsigned int enable;
+ u8 brightness_val[2];
+ int brightness, ret;
+
+ ret = regmap_read(priv->regmap, MT6370_REG_BL_EN, &enable);
+ if (ret)
+ return ret;
+
+ if (!(enable & MT6370_BL_EN_MASK))
+ return 0;
+
+ ret = regmap_raw_read(priv->regmap, MT6370_REG_BL_DIM2,
+ brightness_val, sizeof(brightness_val));
+ if (ret)
+ return ret;
+
+ brightness = brightness_val[1] << priv->dim2_shift;
+ brightness += brightness_val[0] & priv->dim2_mask;
+
+ return brightness + 1;
+}
+
+static const struct backlight_ops mt6370_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = mt6370_bl_update_status,
+ .get_brightness = mt6370_bl_get_brightness,
+};
+
+static int mt6370_init_backlight_properties(struct mt6370_priv *priv,
+ struct backlight_properties *props)
+{
+ struct device *dev = priv->dev;
+ u8 prop_val;
+ u32 brightness, ovp_uV, ocp_uA;
+ unsigned int mask, val;
+ int ret;
+
+ /* Vendor optional properties */
+ val = 0;
+ if (device_property_read_bool(dev, "mediatek,bled-pwm-enable"))
+ val |= MT6370_BL_PWM_EN_MASK;
+
+ if (device_property_read_bool(dev, "mediatek,bled-pwm-hys-enable"))
+ val |= MT6370_BL_PWM_HYS_EN_MASK;
+
+ ret = device_property_read_u8(dev,
+ "mediatek,bled-pwm-hys-input-th-steps",
+ &prop_val);
+ if (!ret) {
+ prop_val = clamp_val(prop_val,
+ MT6370_BL_PWM_HYS_TH_MIN_STEP,
+ MT6370_BL_PWM_HYS_TH_MAX_STEP);
+ prop_val = prop_val <= 1 ? 0 :
+ prop_val <= 4 ? 1 :
+ prop_val <= 16 ? 2 : 3;
+ val |= prop_val;
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6370_REG_BL_PWM,
+ val, val);
+ if (ret)
+ return ret;
+
+ val = 0;
+ if (device_property_read_bool(dev, "mediatek,bled-ovp-shutdown"))
+ val |= MT6370_BL_OVP_EN_MASK;
+
+ ret = device_property_read_u32(dev, "mediatek,bled-ovp-microvolt",
+ &ovp_uV);
+ if (!ret) {
+ ovp_uV = clamp_val(ovp_uV, MT6370_BL_OVP_MIN_UV,
+ MT6370_BL_OVP_MAX_UV);
+ ovp_uV = DIV_ROUND_UP(ovp_uV - MT6370_BL_OVP_MIN_UV,
+ MT6370_BL_OVP_STEP_UV);
+ val |= ovp_uV << MT6370_BL_OVP_SEL_SHIFT;
+ }
+
+ if (device_property_read_bool(dev, "mediatek,bled-ocp-shutdown"))
+ val |= MT6370_BL_OC_EN_MASK;
+
+ ret = device_property_read_u32(dev, "mediatek,bled-ocp-microamp",
+ &ocp_uA);
+ if (!ret) {
+ ocp_uA = clamp_val(ocp_uA, MT6370_BL_OCP_MIN_UA,
+ MT6370_BL_OCP_MAX_UA);
+ ocp_uA = DIV_ROUND_UP(ocp_uA - MT6370_BL_OCP_MIN_UA,
+ MT6370_BL_OCP_STEP_UA);
+ val |= ocp_uA << MT6370_BL_OC_SEL_SHIFT;
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6370_REG_BL_BSTCTRL,
+ val, val);
+ if (ret)
+ return ret;
+
+ /* Common properties */
+ ret = device_property_read_u32(dev, "max-brightness", &brightness);
+ if (ret)
+ brightness = priv->def_max_brightness;
+
+ props->max_brightness = min_t(u32, brightness, priv->def_max_brightness);
+
+ ret = device_property_read_u32(dev, "default-brightness", &brightness);
+ if (ret)
+ brightness = props->max_brightness;
+
+ props->brightness = min_t(u32, brightness, props->max_brightness);
+
+ val = 0;
+ if (device_property_read_bool(dev, "mediatek,bled-exponential-mode-enable")) {
+ val |= MT6370_BL_CODE_MASK;
+ props->scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props->scale = BACKLIGHT_SCALE_LINEAR;
+
+ ret = device_property_read_u8(dev, "mediatek,bled-channel-use",
+ &prop_val);
+ if (ret) {
+ dev_err(dev, "mediatek,bled-channel-use DT property missing\n");
+ return ret;
+ }
+
+ if (!prop_val || prop_val > MT6370_BL_MAX_CH) {
+ dev_err(dev,
+ "No channel specified or over than upper bound (%d)\n",
+ prop_val);
+ return -EINVAL;
+ }
+
+ mask = MT6370_BL_EXT_EN_MASK | MT6370_BL_CH_MASK;
+ val |= prop_val << MT6370_BL_CH_SHIFT;
+
+ if (priv->enable_gpio)
+ val |= MT6370_BL_EXT_EN_MASK;
+
+ return regmap_update_bits(priv->regmap, MT6370_REG_BL_EN, mask, val);
+}
+
+static int mt6370_check_vendor_info(struct mt6370_priv *priv)
+{
+ /*
+ * Because MT6372 uses 14 bits to control the brightness,
+ * MT6370 and MT6371 use 11 bits. This function is used
+ * to check the vendor's ID and set the relative hardware
+ * mask, shift and default maximum brightness value that
+ * should be used.
+ */
+ unsigned int dev_info, hw_vid, of_vid;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
+ if (ret)
+ return ret;
+
+ of_vid = (uintptr_t)device_get_match_data(priv->dev);
+ hw_vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
+ hw_vid = (hw_vid == 0x9 || hw_vid == 0xb) ? MT6370_VID_6372 : MT6370_VID_COMMON;
+ if (hw_vid != of_vid)
+ return dev_err_probe(priv->dev, -EINVAL,
+ "Buggy DT, wrong compatible string\n");
+
+ if (hw_vid == MT6370_VID_6372) {
+ priv->dim2_mask = MT6370_BL_DIM2_6372_MASK;
+ priv->dim2_shift = MT6370_BL_DIM2_6372_SHIFT;
+ priv->def_max_brightness = MT6370_BL_MAX_6372_BRIGHTNESS;
+ } else {
+ priv->dim2_mask = MT6370_BL_DIM2_COMMON_MASK;
+ priv->dim2_shift = MT6370_BL_DIM2_COMMON_SHIFT;
+ priv->def_max_brightness = MT6370_BL_MAX_COMMON_BRIGHTNESS;
+ }
+
+ return 0;
+}
+
+static int mt6370_bl_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ };
+ struct device *dev = &pdev->dev;
+ struct mt6370_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+ ret = mt6370_check_vendor_info(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to check vendor info\n");
+
+ priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->enable_gpio),
+ "Failed to get 'enable' gpio\n");
+
+ ret = mt6370_init_backlight_properties(priv, &props);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to init backlight properties\n");
+
+ priv->bl = devm_backlight_device_register(dev, pdev->name, dev, priv,
+ &mt6370_bl_ops, &props);
+ if (IS_ERR(priv->bl))
+ return dev_err_probe(dev, PTR_ERR(priv->bl),
+ "Failed to register backlight\n");
+
+ backlight_update_status(priv->bl);
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int mt6370_bl_remove(struct platform_device *pdev)
+{
+ struct mt6370_priv *priv = platform_get_drvdata(pdev);
+ struct backlight_device *bl_dev = priv->bl;
+
+ bl_dev->props.brightness = 0;
+ backlight_update_status(priv->bl);
+
+ return 0;
+}
+
+static const struct of_device_id mt6370_bl_of_match[] = {
+ { .compatible = "mediatek,mt6370-backlight", .data = (void *)MT6370_VID_COMMON },
+ { .compatible = "mediatek,mt6372-backlight", .data = (void *)MT6370_VID_6372 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_bl_of_match);
+
+static struct platform_driver mt6370_bl_driver = {
+ .driver = {
+ .name = "mt6370-backlight",
+ .of_match_table = mt6370_bl_of_match,
+ },
+ .probe = mt6370_bl_probe,
+ .remove = mt6370_bl_remove,
+};
+module_platform_driver(mt6370_bl_driver);
+
+MODULE_AUTHOR("ChiaEn Wu <chiaen_wu@richtek.com>");
+MODULE_DESCRIPTION("MediaTek MT6370 Backlight Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 6df6fcd132e3..f55b3d616a87 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -121,12 +121,11 @@ err_reg:
return ret;
}
-static int tosa_bl_remove(struct i2c_client *client)
+static void tosa_bl_remove(struct i2c_client *client)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
data->bl = NULL;
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index bd4dc97d4d34..db568f67e4dc 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -290,7 +290,7 @@ static char default_sti_path[21] __read_mostly;
static int __init sti_setup(char *str)
{
if (str)
- strlcpy (default_sti_path, str, sizeof (default_sti_path));
+ strscpy(default_sti_path, str, sizeof(default_sti_path));
return 1;
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index a2a381631628..a317d9fe1d67 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -11,6 +11,7 @@
* Code is based on s3fb
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -956,6 +957,10 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
int rc;
u8 regval;
+ rc = aperture_remove_conflicting_pci_devices(dev, "arkfb");
+ if (rc < 0)
+ return rc;
+
/* Ignore secondary VGA device because there is no VGA arbitration */
if (! svga_primary_device(dev)) {
dev_info(&(dev->dev), "ignoring secondary device\n");
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index f8ef62542f7f..3818437a8f69 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -29,6 +29,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -545,6 +546,10 @@ static int asiliantfb_pci_init(struct pci_dev *dp,
struct fb_info *p;
int err;
+ err = aperture_remove_conflicting_pci_devices(dp, "asiliantfb");
+ if (err)
+ return err;
+
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
return -ENODEV;
addr = pci_resource_start(dp, 0);
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index b26c81233b6b..57e398fe7a81 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -23,7 +23,7 @@
* - Convert to new framebuffer API,
* fix colormap setting at 16 bits/pixel (565)
*
- * Paul Mundt
+ * Paul Mundt
* - PCI hotplug
*
* Jon Smirl <jonsmirl@yahoo.com>
@@ -47,6 +47,7 @@
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -520,13 +521,13 @@ static const struct fb_ops aty128fb_ops = {
* - endian conversions may possibly be avoided by
* using the other register aperture. TODO.
*/
-static inline u32 _aty_ld_le32(volatile unsigned int regindex,
+static inline u32 _aty_ld_le32(volatile unsigned int regindex,
const struct aty128fb_par *par)
{
return readl (par->regbase + regindex);
}
-static inline void _aty_st_le32(volatile unsigned int regindex, u32 val,
+static inline void _aty_st_le32(volatile unsigned int regindex, u32 val,
const struct aty128fb_par *par)
{
writel (val, par->regbase + regindex);
@@ -559,12 +560,12 @@ static inline void _aty_st_8(unsigned int regindex, u8 val,
static u32 _aty_ld_pll(unsigned int pll_index,
const struct aty128fb_par *par)
-{
+{
aty_st_8(CLOCK_CNTL_INDEX, pll_index & 0x3F);
return aty_ld_le32(CLOCK_CNTL_DATA);
}
-
+
static void _aty_st_pll(unsigned int pll_index, u32 val,
const struct aty128fb_par *par)
{
@@ -619,7 +620,7 @@ static int register_test(const struct aty128fb_par *par)
aty_st_le32(BIOS_0_SCRATCH, 0xAAAAAAAA);
if (aty_ld_le32(BIOS_0_SCRATCH) == 0xAAAAAAAA)
- flag = 1;
+ flag = 1;
}
aty_st_le32(BIOS_0_SCRATCH, val); // restore value
@@ -901,7 +902,7 @@ static void aty128_get_pllinfo(struct aty128fb_par *par,
bios_hdr = BIOS_IN16(0x48);
bios_pll = BIOS_IN16(bios_hdr + 0x30);
-
+
par->constants.ppll_max = BIOS_IN32(bios_pll + 0x16);
par->constants.ppll_min = BIOS_IN32(bios_pll + 0x12);
par->constants.xclk = BIOS_IN16(bios_pll + 0x08);
@@ -913,7 +914,7 @@ static void aty128_get_pllinfo(struct aty128fb_par *par,
par->constants.xclk, par->constants.ref_divider,
par->constants.ref_clk);
-}
+}
#ifdef CONFIG_X86
static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par)
@@ -925,7 +926,7 @@ static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par)
*/
u32 segstart;
unsigned char __iomem *rom_base = NULL;
-
+
for (segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) {
rom_base = ioremap(segstart, 0x10000);
if (rom_base == NULL)
@@ -1118,12 +1119,12 @@ static int aty128_var_to_crtc(const struct fb_var_screeninfo *var,
v_sync_wid = 1;
else if (v_sync_wid > 0x1f) /* 0x1f = max vwidth */
v_sync_wid = 0x1f;
-
+
v_sync_strt = v_disp + lower;
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
+
c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
crtc->gen_cntl = 0x3000000L | c_sync | (dst << 8);
@@ -1301,11 +1302,11 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on)
aty_st_le32(LVDS_GEN_CNTL, reg);
#ifdef CONFIG_FB_ATY128_BACKLIGHT
aty128_bl_set_power(info, FB_BLANK_UNBLANK);
-#endif
+#endif
} else {
#ifdef CONFIG_FB_ATY128_BACKLIGHT
aty128_bl_set_power(info, FB_BLANK_POWERDOWN);
-#endif
+#endif
reg = aty_ld_le32(LVDS_GEN_CNTL);
reg |= LVDS_DISPLAY_DIS;
aty_st_le32(LVDS_GEN_CNTL, reg);
@@ -1481,7 +1482,7 @@ static int aty128_ddafifo(struct aty128_ddafifo *dsp,
* This actually sets the video mode.
*/
static int aty128fb_set_par(struct fb_info *info)
-{
+{
struct aty128fb_par *par = info->par;
u32 config;
int err;
@@ -1595,7 +1596,7 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
var->accel_flags = par->accel_flags;
return 0;
-}
+}
static int aty128fb_check_var(struct fb_var_screeninfo *var,
@@ -1979,12 +1980,12 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PowerBook Titanium */
if (of_machine_is_compatible("PowerBook3,2"))
default_vmode = VMODE_1152_768_60;
-
- if (default_cmode > 16)
+
+ if (default_cmode > 16)
default_cmode = CMODE_32;
- else if (default_cmode > 8)
+ else if (default_cmode > 8)
default_cmode = CMODE_16;
- else
+ else
default_cmode = CMODE_8;
if (mac_vmode_to_var(default_vmode, default_cmode, &var))
@@ -1994,7 +1995,7 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_PPC_PMAC */
{
if (mode_option)
- if (fb_find_mode(&var, info, mode_option, NULL,
+ if (fb_find_mode(&var, info, mode_option, NULL,
0, &defaultmode, 8) == 0)
var = default_var;
}
@@ -2055,6 +2056,10 @@ static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *bios = NULL;
#endif
+ err = aperture_remove_conflicting_pci_devices(pdev, "aty128fb");
+ if (err)
+ return err;
+
/* Enable device in PCI config */
if ((err = pci_enable_device(pdev))) {
printk(KERN_ERR "aty128fb: Cannot enable PCI device: %d\n",
@@ -2301,7 +2306,7 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
struct aty128fb_par *par = info->par;
u32 value;
int rc;
-
+
switch (cmd) {
case FBIO_ATY128_SET_MIRROR:
if (par->chip_gen != rage_M3)
@@ -2313,8 +2318,8 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
par->crt_on = (value & 0x02) != 0;
if (!par->crt_on && !par->lcd_on)
par->lcd_on = 1;
- aty128_set_crt_enable(par, par->crt_on);
- aty128_set_lcd_enable(par, par->lcd_on);
+ aty128_set_crt_enable(par, par->crt_on);
+ aty128_set_lcd_enable(par, par->lcd_on);
return 0;
case FBIO_ATY128_GET_MIRROR:
if (par->chip_gen != rage_M3)
@@ -2331,7 +2336,7 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
if (!par->pdev->pm_cap)
return;
-
+
/* Set the chip into the appropriate suspend mode (we use D2,
* D3 would require a complete re-initialisation of the chip,
* including PCI config registers, clocks, AGP configuration, ...)
@@ -2376,12 +2381,12 @@ static int aty128_pci_suspend_late(struct device *dev, pm_message_t state)
*/
return 0;
#endif /* CONFIG_PPC_PMAC */
-
+
if (state.event == pdev->dev.power.power_state.event)
return 0;
printk(KERN_DEBUG "aty128fb: suspending...\n");
-
+
console_lock();
fb_set_suspend(info, 1);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a3e6faed7745..b3463d137152 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -48,6 +48,7 @@
******************************************************************************/
+#include <linux/aperture.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -3533,7 +3534,11 @@ static int atyfb_pci_probe(struct pci_dev *pdev,
struct fb_info *info;
struct resource *rp;
struct atyfb_par *par;
- int rc = -ENOMEM;
+ int rc;
+
+ rc = aperture_remove_conflicting_pci_devices(pdev, "atyfb");
+ if (rc)
+ return rc;
/* Enable device in PCI config */
if (pci_enable_device(pdev)) {
@@ -3891,7 +3896,7 @@ static int __init atyfb_setup(char *options)
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
- strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
+ strscpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 6851f47613e1..8b28c9bddd97 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -7,7 +7,7 @@
* Copyright 2000 Ani Joshi <ajoshi@kernel.crashing.org>
*
* i2c bits from Luca Tettamanti <kronos@kronoz.cjb.net>
- *
+ *
* Special thanks to ATI DevRel team for their hardware donations.
*
* ...Insert GPL boilerplate here...
@@ -54,6 +54,7 @@
#include "radeonfb.h"
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -110,7 +111,7 @@ static const struct pci_device_id radeonfb_pci_table[] = {
/* Radeon IGP320M (U1) */
CHIP_DEF(PCI_CHIP_RS100_4336, RS100, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
/* Radeon IGP320 (A3) */
- CHIP_DEF(PCI_CHIP_RS100_4136, RS100, CHIP_HAS_CRTC2 | CHIP_IS_IGP),
+ CHIP_DEF(PCI_CHIP_RS100_4136, RS100, CHIP_HAS_CRTC2 | CHIP_IS_IGP),
/* IGP330M/340M/350M (U2) */
CHIP_DEF(PCI_CHIP_RS200_4337, RS200, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
/* IGP330/340/350 (A4) */
@@ -240,7 +241,7 @@ typedef struct {
* interfere with anything
*/
static reg_val common_regs[] = {
- { OVR_CLR, 0 },
+ { OVR_CLR, 0 },
{ OVR_WID_LEFT_RIGHT, 0 },
{ OVR_WID_TOP_BOTTOM, 0 },
{ OV0_SCALE_CNTL, 0 },
@@ -255,7 +256,7 @@ static reg_val common_regs[] = {
/*
* globals
*/
-
+
static char *mode_option;
static char *monitor_layout;
static bool noaccel = 0;
@@ -422,7 +423,7 @@ static int radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
* ROM somewhere in the first meg. We will just ignore the copy
* and use the ROM directly.
*/
-
+
/* Fix from ATI for problem with Radeon hardware not leaving ROM enabled */
unsigned int temp;
temp = INREG(MPP_TB_CONFIG);
@@ -430,14 +431,14 @@ static int radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
temp |= 0x04 << 24;
OUTREG(MPP_TB_CONFIG, temp);
temp = INREG(MPP_TB_CONFIG);
-
+
rom = pci_map_rom(dev, &rom_size);
if (!rom) {
printk(KERN_ERR "radeonfb (%s): ROM failed to map\n",
pci_name(rinfo->pdev));
return -ENOMEM;
}
-
+
rinfo->bios_seg = rom;
/* Very simple test to make sure it appeared */
@@ -515,7 +516,7 @@ static int radeon_find_mem_vbios(struct radeonfb_info *rinfo)
*/
u32 segstart;
void __iomem *rom_base = NULL;
-
+
for(segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) {
rom_base = ioremap(segstart, 0x10000);
if (rom_base == NULL)
@@ -605,16 +606,16 @@ static int radeon_probe_pll_params(struct radeonfb_info *rinfo)
for(i=0; i<1000000; i++)
if (((INREG(CRTC_VLINE_CRNT_VLINE) >> 16) & 0x3ff) == 0)
break;
-
+
stop_time = ktime_get();
-
+
local_irq_enable();
total_usecs = ktime_us_delta(stop_time, start_time);
if (total_usecs >= 10 * USEC_PER_SEC || total_usecs == 0)
return -1;
hz = USEC_PER_SEC/(u32)total_usecs;
-
+
hTotal = ((INREG(CRTC_H_TOTAL_DISP) & 0x1ff) + 1) * 8;
vTotal = ((INREG(CRTC_V_TOTAL_DISP) & 0x3ff) + 1);
vclk = (long long)hTotal * (long long)vTotal * hz;
@@ -662,7 +663,7 @@ static int radeon_probe_pll_params(struct radeonfb_info *rinfo)
denom *= 3;
break;
case 6:
- denom *= 6;
+ denom *= 6;
break;
case 7:
denom *= 12;
@@ -878,7 +879,7 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
v.green.length = 6;
v.blue.length = 5;
v.transp.offset = v.transp.length = 0;
- break;
+ break;
case 24:
nom = 4;
den = 1;
@@ -908,7 +909,7 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
v.yres_virtual = v.yres;
if (v.xres_virtual < v.xres)
v.xres_virtual = v.xres;
-
+
/* XXX I'm adjusting xres_virtual to the pitch, that may help XFree
* with some panels, though I don't quite like this solution
@@ -929,14 +930,14 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
if (v.xoffset > v.xres_virtual - v.xres)
v.xoffset = v.xres_virtual - v.xres - 1;
-
+
if (v.yoffset > v.yres_virtual - v.yres)
v.yoffset = v.yres_virtual - v.yres - 1;
-
+
v.red.msb_right = v.green.msb_right = v.blue.msb_right =
v.transp.offset = v.transp.length =
v.transp.msb_right = 0;
-
+
memcpy(var, &v, sizeof(v));
return 0;
@@ -951,7 +952,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
if ((var->xoffset + info->var.xres > info->var.xres_virtual)
|| (var->yoffset + info->var.yres > info->var.yres_virtual))
return -EINVAL;
-
+
if (rinfo->asleep)
return 0;
@@ -1151,7 +1152,7 @@ static int radeonfb_blank (int blank, struct fb_info *info)
if (rinfo->asleep)
return 0;
-
+
return radeon_screen_blank(rinfo, blank, 0);
}
@@ -1401,7 +1402,7 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
} else {
/* R300 uses ref_div_acc field as real ref divider */
OUTPLLP(PPLL_REF_DIV,
- (mode->ppll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+ (mode->ppll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
~R300_PPLL_REF_DIV_ACC_MASK);
}
} else
@@ -1423,7 +1424,7 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
workaround shouldn't have any effect on them. */
for (i = 0; (i < 10000 && INPLL(PPLL_REF_DIV) & PPLL_ATOMIC_UPDATE_R); i++)
;
-
+
OUTPLL(HTOTAL_CNTL, 0);
/* Clear reset & atomic update */
@@ -1510,7 +1511,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
radeon_fifo_wait(2);
OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
-
+
return;
}
@@ -1735,7 +1736,7 @@ static int radeonfb_set_par(struct fb_info *info)
/* Clear auto-center etc... */
newmode->crtc_more_cntl = rinfo->init_state.crtc_more_cntl;
newmode->crtc_more_cntl &= 0xfffffff0;
-
+
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
newmode->crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN;
if (mirror)
@@ -1793,7 +1794,7 @@ static int radeonfb_set_par(struct fb_info *info)
newmode->surface_cntl |= NONSURF_AP0_SWP_16BPP;
newmode->surface_cntl |= NONSURF_AP1_SWP_16BPP;
break;
- case 24:
+ case 24:
case 32:
newmode->surface_cntl |= NONSURF_AP0_SWP_32BPP;
newmode->surface_cntl |= NONSURF_AP1_SWP_32BPP;
@@ -1980,7 +1981,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
/* Fill fix common fields */
- strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
+ strscpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
info->fix.smem_len = rinfo->video_ram;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -2028,7 +2029,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
}
save_crtc_gen_cntl = INREG(CRTC_GEN_CNTL);
save_crtc_ext_cntl = INREG(CRTC_EXT_CNTL);
-
+
OUTREG(CRTC_EXT_CNTL, save_crtc_ext_cntl | CRTC_DISPLAY_DIS);
OUTREG(CRTC_GEN_CNTL, save_crtc_gen_cntl | CRTC_DISP_REQ_EN_B);
mdelay(100);
@@ -2038,7 +2039,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
#ifdef SET_MC_FB_FROM_APERTURE
/* Set framebuffer to be at the same address as set in PCI BAR */
- OUTREG(MC_FB_LOCATION,
+ OUTREG(MC_FB_LOCATION,
((aper_base + aper_size - 1) & 0xffff0000) | (aper_base >> 16));
rinfo->fb_local_base = aper_base;
#else
@@ -2079,7 +2080,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
OUTREG(CRTC_GEN_CNTL, save_crtc_gen_cntl);
OUTREG(CRTC_EXT_CNTL, save_crtc_ext_cntl);
if (rinfo->has_CRTC2)
- OUTREG(CRTC2_GEN_CNTL, save_crtc2_gen_cntl);
+ OUTREG(CRTC2_GEN_CNTL, save_crtc2_gen_cntl);
pr_debug("aper_base: %08x MC_FB_LOC to: %08x, MC_AGP_LOC to: %08x\n",
aper_base,
@@ -2094,34 +2095,34 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tmp;
/* framebuffer size */
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
(rinfo->family == CHIP_FAMILY_RS200) ||
(rinfo->family == CHIP_FAMILY_RS300) ||
(rinfo->family == CHIP_FAMILY_RC410) ||
(rinfo->family == CHIP_FAMILY_RS400) ||
(rinfo->family == CHIP_FAMILY_RS480) ) {
- u32 tom = INREG(NB_TOM);
- tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
-
- radeon_fifo_wait(6);
- OUTREG(MC_FB_LOCATION, tom);
- OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
-
- /* This is supposed to fix the crtc2 noise problem. */
- OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
-
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
- (rinfo->family == CHIP_FAMILY_RS200)) {
- /* This is to workaround the asic bug for RMX, some versions
- of BIOS doesn't have this register initialized correctly.
- */
- OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
- ~CRTC_H_CUTOFF_ACTIVE_EN);
- }
- } else {
- tmp = INREG(CNFG_MEMSIZE);
+ u32 tom = INREG(NB_TOM);
+
+ tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+ radeon_fifo_wait(6);
+ OUTREG(MC_FB_LOCATION, tom);
+ OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
+
+ /* This is supposed to fix the crtc2 noise problem. */
+ OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
+
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ (rinfo->family == CHIP_FAMILY_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ * of BIOS doesn't have this register initialized correctly.
+ */
+ OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
+ ~CRTC_H_CUTOFF_ACTIVE_EN);
+ }
+ } else {
+ tmp = INREG(CNFG_MEMSIZE);
}
/* mem size is bits [28:0], mask off the rest */
@@ -2239,20 +2240,10 @@ static const struct bin_attribute edid2_attr = {
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
+ resource_size_t base = pci_resource_start(pdev, 0);
+ resource_size_t size = pci_resource_len(pdev, 0);
- remove_conflicting_framebuffers(ap, KBUILD_MODNAME, false);
-
- kfree(ap);
-
- return 0;
+ return aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
}
static int radeonfb_pci_register(struct pci_dev *pdev,
@@ -2265,7 +2256,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
int err = 0;
pr_debug("radeonfb_pci_register BEGIN\n");
-
+
/* Enable device in PCI config */
ret = pci_enable_device(pdev);
if (ret < 0) {
@@ -2280,9 +2271,9 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
goto err_disable;
}
rinfo = info->par;
- rinfo->info = info;
+ rinfo->info = info;
rinfo->pdev = pdev;
-
+
spin_lock_init(&rinfo->reg_lock);
timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0);
@@ -2521,7 +2512,7 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct radeonfb_info *rinfo = info->par;
-
+
if (!rinfo)
return;
@@ -2540,7 +2531,7 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
iounmap(rinfo->mmio_base);
iounmap(rinfo->fb_base);
-
+
pci_release_region(pdev, 2);
pci_release_region(pdev, 0);
@@ -2550,7 +2541,7 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
fb_destroy_modedb(rinfo->mon1_modedb);
#ifdef CONFIG_FB_RADEON_I2C
radeon_delete_i2c_busses(rinfo);
-#endif
+#endif
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index e7702fe1fe7d..6403ae07970d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -182,7 +182,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void bw2_init_fix(struct fb_info *info, int linebytes)
{
- strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
+ strscpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_MONO01;
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index 3a1c2e0739a1..4651b48a87f9 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -7,6 +7,7 @@
* - FB1 is display 1 with unique memory area
* - both display use 32 bit colors
*/
+#include <linux/aperture.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fb.h>
@@ -614,6 +615,10 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
struct fb_info *info;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(dev, "carminefb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(dev);
if (ret)
return ret;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 393894af26f8..f1c1c95c1fdf 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -14,6 +14,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -122,7 +123,7 @@ static int chipsfb_set_par(struct fb_info *info)
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 5;
-
+
} else {
/* p->var.bits_per_pixel == 8 */
write_cr(0x13, 100); // Set line length (doublewords)
@@ -131,13 +132,13 @@ static int chipsfb_set_par(struct fb_info *info)
write_xr(0x20, 0x00); // 8 bit blitter mode
info->fix.line_length = 800;
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->var.red.offset = info->var.green.offset =
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 8;
-
+
}
return 0;
}
@@ -351,7 +352,11 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
struct fb_info *p;
unsigned long addr;
unsigned short cmd;
- int rc = -ENODEV;
+ int rc;
+
+ rc = aperture_remove_conflicting_pci_devices(dp, "chipsfb");
+ if (rc)
+ return rc;
if (pci_enable_device(dp) < 0) {
dev_err(&dp->dev, "Cannot enable PCI device\n");
@@ -430,6 +435,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index a41a75841e10..b08bee43779a 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -34,6 +34,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1999,7 +2000,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
}
/* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ strscpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
sizeof(info->fix.id));
/* monochrome: only 1 memory plane */
@@ -2085,6 +2086,10 @@ static int cirrusfb_pci_register(struct pci_dev *pdev,
unsigned long board_addr, board_size;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "cirrusfb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret < 0) {
printk(KERN_ERR "cirrusfb: Cannot enable PCI device\n");
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 771ce1f76951..a1061c2f1640 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -326,7 +326,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
info->var.vmode = FB_VMODE_NONINTERLACED;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.accel = FB_ACCEL_NONE;
- strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
fb_videomode_to_var(&info->var, &cfb->mode);
ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index cf9ac4da0a82..098b62f7b701 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -412,7 +412,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5)) {
- strlcpy(fontname, options + 5, sizeof(fontname));
+ strscpy(fontname, options + 5, sizeof(fontname));
continue;
}
@@ -2401,15 +2401,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
- int resize;
+ int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
+ old_userfont = p->userfont;
if ((p->userfont = userfont))
REFCOUNT(data)++;
+
+ old_width = vc->vc_font.width;
+ old_height = vc->vc_font.height;
+ old_charcount = vc->vc_font.charcount;
+
vc->vc_font.width = w;
vc->vc_font.height = h;
vc->vc_font.charcount = charcount;
@@ -2425,7 +2431,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
- vc_resize(vc, cols, rows);
+ ret = vc_resize(vc, cols, rows);
+ if (ret)
+ goto err_out;
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2435,6 +2443,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (old_data && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
+
+err_out:
+ p->fontdata = old_data;
+ vc->vc_font.data = (void *)old_data;
+
+ if (userfont) {
+ p->userfont = old_userfont;
+ REFCOUNT(data)--;
+ }
+
+ vc->vc_font.width = old_width;
+ vc->vc_font.height = old_height;
+ vc->vc_font.charcount = old_charcount;
+
+ return ret;
}
/*
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 02b0cf2cfafe..1e70d8c67653 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -13,13 +13,13 @@
#include <linux/module.h>
+#include <linux/aperture.h>
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
-#include <linux/sysfb.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
@@ -40,6 +40,7 @@
#include <asm/fb.h>
+#include <video/vga.h>
/*
* Frame buffer device initialization and setup routines
@@ -50,10 +51,10 @@
static DEFINE_MUTEX(registration_lock);
struct fb_info *registered_fb[FB_MAX] __read_mostly;
-EXPORT_SYMBOL(registered_fb);
-
int num_registered_fb __read_mostly;
-EXPORT_SYMBOL(num_registered_fb);
+#define for_each_registered_fb(i) \
+ for (i = 0; i < FB_MAX; i++) \
+ if (!registered_fb[i]) {} else
bool fb_center_logo __read_mostly;
@@ -1525,103 +1526,6 @@ static int fb_check_foreignness(struct fb_info *fi)
return 0;
}
-static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
-{
- /* is the generic aperture base the same as the HW one */
- if (gen->base == hw->base)
- return true;
- /* is the generic aperture base inside the hw base->hw base+size */
- if (gen->base > hw->base && gen->base < hw->base + hw->size)
- return true;
- return false;
-}
-
-static bool fb_do_apertures_overlap(struct apertures_struct *gena,
- struct apertures_struct *hwa)
-{
- int i, j;
- if (!hwa || !gena)
- return false;
-
- for (i = 0; i < hwa->count; ++i) {
- struct aperture *h = &hwa->ranges[i];
- for (j = 0; j < gena->count; ++j) {
- struct aperture *g = &gena->ranges[j];
- printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
- (unsigned long long)g->base,
- (unsigned long long)g->size,
- (unsigned long long)h->base,
- (unsigned long long)h->size);
- if (apertures_overlap(g, h))
- return true;
- }
- }
-
- return false;
-}
-
-static void do_unregister_framebuffer(struct fb_info *fb_info);
-
-#define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
-{
- int i;
-
-restart_removal:
- /* check all firmware fbs and kick off if the base addr overlaps */
- for_each_registered_fb(i) {
- struct apertures_struct *gen_aper;
- struct device *device;
-
- if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
- continue;
-
- gen_aper = registered_fb[i]->apertures;
- device = registered_fb[i]->device;
- if (fb_do_apertures_overlap(gen_aper, a) ||
- (primary && gen_aper && gen_aper->count &&
- gen_aper->ranges[0].base == VGA_FB_PHYS)) {
-
- printk(KERN_INFO "fb%d: switching to %s from %s\n",
- i, name, registered_fb[i]->fix.id);
-
- /*
- * If we kick-out a firmware driver, we also want to remove
- * the underlying platform device, such as simple-framebuffer,
- * VESA, EFI, etc. A native driver will then be able to
- * allocate the memory range.
- *
- * If it's not a platform device, at least print a warning. A
- * fix would add code to remove the device from the system. For
- * framebuffers without any Linux device, print a warning as
- * well.
- */
- if (!device) {
- pr_warn("fb%d: no device set\n", i);
- do_unregister_framebuffer(registered_fb[i]);
- } else if (dev_is_platform(device)) {
- /*
- * Drop the lock because if the device is unregistered, its
- * driver will call to unregister_framebuffer(), that takes
- * this lock.
- */
- mutex_unlock(&registration_lock);
- platform_device_unregister(to_platform_device(device));
- mutex_lock(&registration_lock);
- } else {
- pr_warn("fb%d: cannot remove device\n", i);
- do_unregister_framebuffer(registered_fb[i]);
- }
- /*
- * Restart the removal loop now that the device has been
- * unregistered and its associated framebuffer gone.
- */
- goto restart_removal;
- }
- }
-}
-
static int do_register_framebuffer(struct fb_info *fb_info)
{
int i;
@@ -1630,10 +1534,6 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- do_remove_conflicting_framebuffers(fb_info->apertures,
- fb_info->fix.id,
- fb_is_primary_device(fb_info));
-
if (num_registered_fb == FB_MAX)
return -ENXIO;
@@ -1752,100 +1652,31 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
put_fb_info(fb_info);
}
-/**
- * remove_conflicting_framebuffers - remove firmware-configured framebuffers
- * @a: memory range, users of which are to be removed
- * @name: requesting driver name
- * @primary: also kick vga16fb if present
- *
- * This function removes framebuffer devices (initialized by firmware/bootloader)
- * which use memory range described by @a. If @a is NULL all such devices are
- * removed.
- */
-int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+static int fb_aperture_acquire_for_platform_device(struct fb_info *fb_info)
{
- bool do_free = false;
-
- if (!a) {
- a = alloc_apertures(1);
- if (!a)
- return -ENOMEM;
-
- a->ranges[0].base = 0;
- a->ranges[0].size = ~0;
- do_free = true;
- }
-
- /*
- * If a driver asked to unregister a platform device registered by
- * sysfb, then can be assumed that this is a driver for a display
- * that is set up by the system firmware and has a generic driver.
- *
- * Drivers for devices that don't have a generic driver will never
- * ask for this, so let's assume that a real driver for the display
- * was already probed and prevent sysfb to register devices later.
- */
- sysfb_disable();
-
- mutex_lock(&registration_lock);
- do_remove_conflicting_framebuffers(a, name, primary);
- mutex_unlock(&registration_lock);
-
- if (do_free)
- kfree(a);
-
- return 0;
-}
-EXPORT_SYMBOL(remove_conflicting_framebuffers);
+ struct apertures_struct *ap = fb_info->apertures;
+ struct device *dev = fb_info->device;
+ struct platform_device *pdev;
+ unsigned int i;
+ int ret;
-/**
- * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
- * @pdev: PCI device
- * @name: requesting driver name
- *
- * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for any of @pdev's memory bars.
- *
- * The function assumes that PCI device with shadowed ROM drives a primary
- * display and so kicks out vga16fb.
- */
-int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
-{
- struct apertures_struct *ap;
- bool primary = false;
- int err, idx, bar;
+ if (!ap)
+ return 0;
- for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
- continue;
- idx++;
- }
+ if (!dev_is_platform(dev))
+ return 0;
- ap = alloc_apertures(idx);
- if (!ap)
- return -ENOMEM;
+ pdev = to_platform_device(dev);
- for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
- continue;
- ap->ranges[idx].base = pci_resource_start(pdev, bar);
- ap->ranges[idx].size = pci_resource_len(pdev, bar);
- pci_dbg(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
- (unsigned long)pci_resource_start(pdev, bar),
- (unsigned long)pci_resource_end(pdev, bar));
- idx++;
+ for (ret = 0, i = 0; i < ap->count; ++i) {
+ ret = devm_aperture_acquire_for_platform_device(pdev, ap->ranges[i].base,
+ ap->ranges[i].size);
+ if (ret)
+ break;
}
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW;
-#endif
- err = remove_conflicting_framebuffers(ap, name, primary);
- kfree(ap);
- return err;
+ return ret;
}
-EXPORT_SYMBOL(remove_conflicting_pci_framebuffers);
/**
* register_framebuffer - registers a frame buffer device
@@ -1861,6 +1692,12 @@ register_framebuffer(struct fb_info *fb_info)
{
int ret;
+ if (fb_info->flags & FBINFO_MISC_FIRMWARE) {
+ ret = fb_aperture_acquire_for_platform_device(fb_info);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&registration_lock);
ret = do_register_framebuffer(fb_info);
mutex_unlock(&registration_lock);
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index c2a60b187467..4d7f63892dcc 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
if (WARN_ON(refcount_read(&info->count)))
return;
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+ mutex_destroy(&info->bl_curve_mutex);
+#endif
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index d45355b9a58c..585af90a68a5 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -33,6 +33,7 @@
* (which, incidentally, is about the same saving as a 2.5in hard disk
* entering standby mode.)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1134,7 +1135,7 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx)
info->fb_size = int_cfb_info->fb.fix.smem_len;
info->info = int_cfb_info;
- strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
+ strscpy(info->dev_name, int_cfb_info->fb.fix.id,
sizeof(info->dev_name));
}
@@ -1229,7 +1230,7 @@ static int cyber2000fb_ddc_getsda(void *data)
static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
- strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
cfb->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1304,7 +1305,7 @@ static int cyber2000fb_i2c_getscl(void *data)
static int cyber2000fb_i2c_register(struct cfb_info *cfb)
{
- strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
cfb->i2c_adapter.owner = THIS_MODULE;
cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
@@ -1500,7 +1501,7 @@ static int cyber2000fb_setup(char *options)
if (strncmp(opt, "font:", 5) == 0) {
static char default_font_storage[40];
- strlcpy(default_font_storage, opt + 5,
+ strscpy(default_font_storage, opt + 5,
sizeof(default_font_storage));
default_font = default_font_storage;
continue;
@@ -1720,6 +1721,10 @@ static int cyberpro_pci_probe(struct pci_dev *dev,
sprintf(name, "CyberPro%4X", id->device);
+ err = aperture_remove_conflicting_pci_devices(dev, name);
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err)
return err;
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index b3d580e57221..7cba3969a970 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -883,7 +883,7 @@ static void ffb_init_fix(struct fb_info *info)
} else
ffb_type_name = "Elite 3D";
- strlcpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 5d34d89fb665..1514c653a84f 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -6,6 +6,7 @@
* Copyright (C) 2005 Arcom Control Systems Ltd.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -320,6 +321,10 @@ static int gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_info *info;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "gx1fb");
+ if (ret)
+ return ret;
+
info = gx1fb_init_fbinfo(&pdev->dev);
if (!info)
return -ENOMEM;
@@ -410,13 +415,13 @@ static void __init gx1fb_setup(char *options)
continue;
if (!strncmp(this_opt, "mode:", 5))
- strlcpy(mode_option, this_opt + 5, sizeof(mode_option));
+ strscpy(mode_option, this_opt + 5, sizeof(mode_option));
else if (!strncmp(this_opt, "crt:", 4))
crt_option = !!simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "panel:", 6))
- strlcpy(panel_option, this_opt + 6, sizeof(panel_option));
+ strscpy(panel_option, this_opt + 6, sizeof(panel_option));
else
- strlcpy(mode_option, this_opt, sizeof(mode_option));
+ strscpy(mode_option, this_opt, sizeof(mode_option));
}
}
#endif
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 44089b331f91..2527bd80ec5f 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -15,6 +15,7 @@
*
* 16 MiB of framebuffer memory is assumed to be available.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -364,6 +365,10 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_videomode *modedb_ptr;
unsigned int modedb_size;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "gxfb");
+ if (ret)
+ return ret;
+
info = gxfb_init_fbinfo(&pdev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 66c81262d18f..9d26592dbfce 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -6,6 +6,7 @@
* Built from gxfb (which is Copyright (C) 2006 Arcom Control Systems Ltd.)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -484,6 +485,10 @@ static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_videomode *modedb_ptr;
unsigned int modedb_size;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "lxfb");
+ if (ret)
+ return ret;
+
info = lxfb_init_fbinfo(&pdev->dev);
if (info == NULL)
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index e5475ae1e158..0dcef4bec8d7 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Paul Mackerras, IBM Corp. <paulus@samba.org>
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fb.h>
@@ -621,6 +622,10 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct fb_var_screeninfo var;
enum gxt_cards cardtype;
+ err = aperture_remove_conflicting_pci_devices(pdev, "gxt4500fb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "gxt4500: cannot enable PCI device: %d\n",
@@ -650,7 +655,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
info->fix = gxt4500_fix;
- strlcpy(info->fix.id, cardinfo[cardtype].cardname,
+ strscpy(info->fix.id, cardinfo[cardtype].cardname,
sizeof(info->fix.id));
info->pseudo_palette = par->pseudo_palette;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 886c564787f1..072ce07ba9e0 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -45,6 +45,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
@@ -74,10 +75,6 @@
#define SYNTHVID_DEPTH_WIN8 32
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
-
enum pipe_msg_type {
PIPE_MSG_INVALID,
PIPE_MSG_DATA,
@@ -1074,8 +1071,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- remove_conflicting_framebuffers(info->apertures,
- KBUILD_MODNAME, false);
+ aperture_remove_conflicting_devices(info->apertures->ranges[0].base,
+ info->apertures->ranges[0].size,
+ false, KBUILD_MODNAME);
if (gen2vm) {
/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 7f09a0daaaa2..b795f6503cb6 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -12,6 +12,7 @@
* i740fb by Patrick LERDA, v0.9
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -159,7 +160,7 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1013,6 +1014,10 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
bool found = false;
u8 *edid;
+ ret = aperture_remove_conflicting_pci_devices(dev, "i740fb");
+ if (ret)
+ return ret;
+
info = framebuffer_alloc(sizeof(struct i740fb_par), &(dev->dev));
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index 13bbf7fe13bf..ff09f8c20bfc 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -2,12 +2,12 @@
* linux/drivers/video/i810_main.c -- Intel 810 frame buffer device
*
* Copyright (C) 2001 Antonino Daplas<adaplas@pol.net>
- * All Rights Reserved
+ * All Rights Reserved
*
* Contributors:
* Michael Vogt <mvogt@acm.org> - added support for Intel 815 chipsets
- * and enabling the power-on state of
- * external VGA connectors for
+ * and enabling the power-on state of
+ * external VGA connectors for
* secondary displays
*
* Fredrik Andersson <krueger@shell.linux.se> - alpha testing of
@@ -17,10 +17,10 @@
* timings support
*
* The code framework is a modification of vfb.c by Geert Uytterhoeven.
- * DotClock and PLL calculations are partly based on i810_driver.c
+ * DotClock and PLL calculations are partly based on i810_driver.c
* in xfree86 v4.0.3 by Precision Insight.
- * Watermark calculation and tables are based on i810_wmark.c
- * in xfre86 v4.0.3 by Precision Insight. Slight modifications
+ * Watermark calculation and tables are based on i810_wmark.c
+ * in xfre86 v4.0.3 by Precision Insight. Slight modifications
* only to allow for integer operations instead of floating point.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -28,6 +28,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -204,8 +205,8 @@ static void i810_dram_off(u8 __iomem *mmio, u8 mode)
* @mode: protect/unprotect
*
* DESCRIPTION:
- * The IBM VGA standard allows protection of certain VGA registers.
- * This will protect or unprotect them.
+ * The IBM VGA standard allows protection of certain VGA registers.
+ * This will protect or unprotect them.
*/
static void i810_protect_regs(u8 __iomem *mmio, int mode)
{
@@ -215,7 +216,7 @@ static void i810_protect_regs(u8 __iomem *mmio, int mode)
reg = i810_readb(CR_DATA_CGA, mmio);
reg = (mode == OFF) ? reg & ~0x80 :
reg | 0x80;
-
+
i810_writeb(CR_INDEX_CGA, mmio, CR11);
i810_writeb(CR_DATA_CGA, mmio, reg);
}
@@ -225,18 +226,18 @@ static void i810_protect_regs(u8 __iomem *mmio, int mode)
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
- * Loads the P, M, and N registers.
+ * Loads the P, M, and N registers.
*/
static void i810_load_pll(struct i810fb_par *par)
{
u32 tmp1, tmp2;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
tmp1 = par->regs.M | par->regs.N << 16;
tmp2 = i810_readl(DCLK_2D, mmio);
tmp2 &= ~MN_MASK;
i810_writel(DCLK_2D, mmio, tmp1 | tmp2);
-
+
tmp1 = par->regs.P;
tmp2 = i810_readl(DCLK_0DS, mmio);
tmp2 &= ~(P_OR << 16);
@@ -254,7 +255,7 @@ static void i810_load_pll(struct i810fb_par *par)
* Load values to VGA registers
*/
static void i810_load_vga(struct i810fb_par *par)
-{
+{
u8 __iomem *mmio = par->mmio_start_virtual;
/* interlace */
@@ -327,7 +328,7 @@ static void i810_load_2d(struct i810fb_par *par)
u8 tmp8;
u8 __iomem *mmio = par->mmio_start_virtual;
- i810_writel(FW_BLC, mmio, par->watermark);
+ i810_writel(FW_BLC, mmio, par->watermark);
tmp = i810_readl(PIXCONF, mmio);
tmp |= 1 | 1 << 20;
i810_writel(PIXCONF, mmio, tmp);
@@ -339,7 +340,7 @@ static void i810_load_2d(struct i810fb_par *par)
tmp8 |= 2;
i810_writeb(GR_INDEX, mmio, GR10);
i810_writeb(GR_DATA, mmio, tmp8);
-}
+}
/**
* i810_hires - enables high resolution mode
@@ -348,7 +349,7 @@ static void i810_load_2d(struct i810fb_par *par)
static void i810_hires(u8 __iomem *mmio)
{
u8 val;
-
+
i810_writeb(CR_INDEX_CGA, mmio, CR80);
val = i810_readb(CR_DATA_CGA, mmio);
i810_writeb(CR_INDEX_CGA, mmio, CR80);
@@ -363,13 +364,13 @@ static void i810_hires(u8 __iomem *mmio)
*
* DESCRIPTION:
* Loads the characters per line
- */
+ */
static void i810_load_pitch(struct i810fb_par *par)
{
u32 tmp, pitch;
u8 val;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
pitch = par->pitch >> 3;
i810_writeb(SR_INDEX, mmio, SR01);
val = i810_readb(SR_DATA, mmio);
@@ -381,7 +382,7 @@ static void i810_load_pitch(struct i810fb_par *par)
tmp = pitch & 0xFF;
i810_writeb(CR_INDEX_CGA, mmio, CR13);
i810_writeb(CR_DATA_CGA, mmio, (u8) tmp);
-
+
tmp = pitch >> 8;
i810_writeb(CR_INDEX_CGA, mmio, CR41);
val = i810_readb(CR_DATA_CGA, mmio) & ~0x0F;
@@ -414,7 +415,7 @@ static void i810_load_color(struct i810fb_par *par)
/**
* i810_load_regs - loads all registers for the mode
* @par: pointer to i810fb_par structure
- *
+ *
* DESCRIPTION:
* Loads registers
*/
@@ -428,7 +429,7 @@ static void i810_load_regs(struct i810fb_par *par)
i810_load_pll(par);
i810_load_vga(par);
i810_load_vgax(par);
- i810_dram_off(mmio, ON);
+ i810_dram_off(mmio, ON);
i810_load_2d(par);
i810_hires(mmio);
i810_screen_off(mmio, ON);
@@ -443,7 +444,7 @@ static void i810_write_dac(u8 regno, u8 red, u8 green, u8 blue,
i810_writeb(CLUT_INDEX_WRITE, mmio, regno);
i810_writeb(CLUT_DATA, mmio, red);
i810_writeb(CLUT_DATA, mmio, green);
- i810_writeb(CLUT_DATA, mmio, blue);
+ i810_writeb(CLUT_DATA, mmio, blue);
}
static void i810_read_dac(u8 regno, u8 *red, u8 *green, u8 *blue,
@@ -456,13 +457,13 @@ static void i810_read_dac(u8 regno, u8 *red, u8 *green, u8 *blue,
}
/************************************************************
- * VGA State Restore *
+ * VGA State Restore *
************************************************************/
static void i810_restore_pll(struct i810fb_par *par)
{
u32 tmp1, tmp2;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
tmp1 = par->hw_state.dclk_2d;
tmp2 = i810_readl(DCLK_2D, mmio);
tmp1 &= ~MN_MASK;
@@ -494,7 +495,7 @@ static void i810_restore_vgax(struct i810fb_par *par)
{
u8 i, j;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
for (i = 0; i < 4; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR30+i);
i810_writeb(CR_DATA_CGA, mmio, *(&(par->hw_state.cr30) + i));
@@ -528,7 +529,7 @@ static void i810_restore_vga(struct i810fb_par *par)
{
u8 i;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
for (i = 0; i < 10; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR00 + i);
i810_writeb(CR_DATA_CGA, mmio, *((&par->hw_state.cr00) + i));
@@ -559,10 +560,10 @@ static void i810_restore_2d(struct i810fb_par *par)
u8 __iomem *mmio = par->mmio_start_virtual;
tmp_word = i810_readw(BLTCNTL, mmio);
- tmp_word &= ~(3 << 4);
+ tmp_word &= ~(3 << 4);
tmp_word |= par->hw_state.bltcntl;
i810_writew(BLTCNTL, mmio, tmp_word);
-
+
i810_dram_off(mmio, OFF);
i810_writel(PIXCONF, mmio, par->hw_state.pixconf);
i810_dram_off(mmio, ON);
@@ -577,7 +578,7 @@ static void i810_restore_2d(struct i810fb_par *par)
tmp_long |= par->hw_state.fw_blc;
i810_writel(FW_BLC, mmio, tmp_long);
- i810_writel(HWS_PGA, mmio, par->hw_state.hws_pga);
+ i810_writel(HWS_PGA, mmio, par->hw_state.hws_pga);
i810_writew(IER, mmio, par->hw_state.ier);
i810_writew(IMR, mmio, par->hw_state.imr);
i810_writel(DPLYSTAS, mmio, par->hw_state.dplystas);
@@ -621,7 +622,7 @@ static void i810_save_vgax(struct i810fb_par *par)
i810_writeb(CR_INDEX_CGA, mmio, CR41);
par->hw_state.cr41 = i810_readb(CR_DATA_CGA, mmio);
i810_writeb(CR_INDEX_CGA, mmio, CR70);
- par->hw_state.cr70 = i810_readb(CR_DATA_CGA, mmio);
+ par->hw_state.cr70 = i810_readb(CR_DATA_CGA, mmio);
par->hw_state.msr = i810_readb(MSR_READ, mmio);
i810_writeb(CR_INDEX_CGA, mmio, CR80);
par->hw_state.cr80 = i810_readb(CR_DATA_CGA, mmio);
@@ -654,8 +655,8 @@ static void i810_save_2d(struct i810fb_par *par)
par->hw_state.pixconf = i810_readl(PIXCONF, mmio);
par->hw_state.fw_blc = i810_readl(FW_BLC, mmio);
par->hw_state.bltcntl = i810_readw(BLTCNTL, mmio);
- par->hw_state.hwstam = i810_readw(HWSTAM, mmio);
- par->hw_state.hws_pga = i810_readl(HWS_PGA, mmio);
+ par->hw_state.hwstam = i810_readw(HWSTAM, mmio);
+ par->hw_state.hws_pga = i810_readl(HWS_PGA, mmio);
par->hw_state.ier = i810_readw(IER, mmio);
par->hw_state.imr = i810_readw(IMR, mmio);
par->hw_state.dplystas = i810_readl(DPLYSTAS, mmio);
@@ -669,7 +670,7 @@ static void i810_save_vga_state(struct i810fb_par *par)
}
/************************************************************
- * Helpers *
+ * Helpers *
************************************************************/
/**
* get_line_length - calculates buffer pitch in bytes
@@ -678,12 +679,12 @@ static void i810_save_vga_state(struct i810fb_par *par)
* @bpp: bits per pixel
*
* DESCRIPTION:
- * Calculates buffer pitch in bytes.
+ * Calculates buffer pitch in bytes.
*/
static u32 get_line_length(struct i810fb_par *par, int xres_virtual, int bpp)
{
u32 length;
-
+
length = xres_virtual*bpp;
length = (length+31)&-32;
length >>= 3;
@@ -716,17 +717,17 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
n_target_max = 30;
/*
- * find P such that target freq is 16x reference freq (Hz).
+ * find P such that target freq is 16x reference freq (Hz).
*/
p_divisor = 1;
p_target = 0;
- while(!((1000000 * p_divisor)/(16 * 24 * target_freq)) &&
+ while(!((1000000 * p_divisor)/(16 * 24 * target_freq)) &&
p_divisor <= 32) {
p_divisor <<= 1;
p_target++;
}
- n_reg = m_reg = n_target = 3;
+ n_reg = m_reg = n_target = 3;
while (diff_min && mod_min && (n_target < n_target_max)) {
f_out = (p_divisor * n_reg * 1000000)/(4 * 24 * m_reg);
mod = (p_divisor * n_reg * 1000000) % (4 * 24 * m_reg);
@@ -744,14 +745,14 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
diff_min = diff;
n_best = n_target;
m_best = m_target;
- }
+ }
if (!diff && mod_min > mod) {
mod_min = mod;
n_best = n_target;
m_best = m_target;
}
- }
+ }
if (m) *m = (m_best - 2) & 0x3FF;
if (n) *n = (n_best - 2) & 0x3FF;
if (p) *p = (p_target << 4);
@@ -772,7 +773,7 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
static void i810_enable_cursor(u8 __iomem *mmio, int mode)
{
u32 temp;
-
+
temp = i810_readl(PIXCONF, mmio);
temp = (mode == ON) ? temp | CURSOR_ENABLE_MASK :
temp & ~CURSOR_ENABLE_MASK;
@@ -786,10 +787,10 @@ static void i810_reset_cursor_image(struct i810fb_par *par)
int i, j;
for (i = 64; i--; ) {
- for (j = 0; j < 8; j++) {
- i810_writeb(j, addr, 0xff);
- i810_writeb(j+8, addr, 0x00);
- }
+ for (j = 0; j < 8; j++) {
+ i810_writeb(j, addr, 0xff);
+ i810_writeb(j+8, addr, 0x00);
+ }
addr +=16;
}
}
@@ -800,9 +801,9 @@ static void i810_load_cursor_image(int width, int height, u8 *data,
u8 __iomem *addr = par->cursor_heap.virtual;
int i, j, w = width/8;
int mod = width % 8, t_mask, d_mask;
-
+
t_mask = 0xff >> mod;
- d_mask = ~(0xff >> mod);
+ d_mask = ~(0xff >> mod);
for (i = height; i--; ) {
for (j = 0; j < w; j++) {
i810_writeb(j+0, addr, 0x00);
@@ -854,7 +855,7 @@ static void i810_init_cursor(struct i810fb_par *par)
i810_enable_cursor(mmio, OFF);
i810_writel(CURBASE, mmio, par->cursor_heap.physical);
i810_writew(CURCNTR, mmio, COORD_ACTIVE | CURSOR_MODE_64_XOR);
-}
+}
/*********************************************************************
* Framebuffer hook helpers *
@@ -873,7 +874,7 @@ static void i810_round_off(struct fb_var_screeninfo *var)
u32 xres, yres, vxres, vyres;
/*
- * Presently supports only these configurations
+ * Presently supports only these configurations
*/
xres = var->xres;
@@ -883,20 +884,20 @@ static void i810_round_off(struct fb_var_screeninfo *var)
var->bits_per_pixel += 7;
var->bits_per_pixel &= ~7;
-
+
if (var->bits_per_pixel < 8)
var->bits_per_pixel = 8;
- if (var->bits_per_pixel > 32)
+ if (var->bits_per_pixel > 32)
var->bits_per_pixel = 32;
round_off_xres(&xres);
if (xres < 40)
xres = 40;
- if (xres > 2048)
+ if (xres > 2048)
xres = 2048;
xres = (xres + 7) & ~7;
- if (vxres < xres)
+ if (vxres < xres)
vxres = xres;
round_off_yres(&xres, &yres);
@@ -905,7 +906,7 @@ static void i810_round_off(struct fb_var_screeninfo *var)
if (yres >= 2048)
yres = 2048;
- if (vyres < yres)
+ if (vyres < yres)
vyres = yres;
if (var->bits_per_pixel == 32)
@@ -917,30 +918,30 @@ static void i810_round_off(struct fb_var_screeninfo *var)
var->hsync_len = (var->hsync_len + 4) & ~7;
if (var->vmode & FB_VMODE_INTERLACED) {
- if (!((yres + var->upper_margin + var->vsync_len +
+ if (!((yres + var->upper_margin + var->vsync_len +
var->lower_margin) & 1))
var->upper_margin++;
}
-
+
var->xres = xres;
var->yres = yres;
var->xres_virtual = vxres;
var->yres_virtual = vyres;
-}
+}
/**
* set_color_bitfields - sets rgba fields
* @var: pointer to fb_var_screeninfo
*
* DESCRIPTION:
- * The length, offset and ordering for each color field
- * (red, green, blue) will be set as specified
+ * The length, offset and ordering for each color field
+ * (red, green, blue) will be set as specified
* by the hardware
- */
+ */
static void set_color_bitfields(struct fb_var_screeninfo *var)
{
switch (var->bits_per_pixel) {
- case 8:
+ case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
@@ -984,11 +985,11 @@ static void set_color_bitfields(struct fb_var_screeninfo *var)
* @info: pointer to fb_info
*
* DESCRIPTION:
- * This will check if the framebuffer size is sufficient
- * for the current mode and if the user's monitor has the
+ * This will check if the framebuffer size is sufficient
+ * for the current mode and if the user's monitor has the
* required specifications to display the current mode.
*/
-static int i810_check_params(struct fb_var_screeninfo *var,
+static int i810_check_params(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
@@ -1007,14 +1008,14 @@ static int i810_check_params(struct fb_var_screeninfo *var,
vyres = info->var.yres;
vxres = par->fb.size/vyres;
vxres /= var->bits_per_pixel >> 3;
- line_length = get_line_length(par, vxres,
+ line_length = get_line_length(par, vxres,
var->bits_per_pixel);
vidmem = line_length * info->var.yres;
if (vxres < var->xres) {
printk("i810fb: required video memory, "
"%d bytes, for %dx%d-%d (virtual) "
- "is out of range\n",
- vidmem, vxres, vyres,
+ "is out of range\n",
+ vidmem, vxres, vyres,
var->bits_per_pixel);
return -ENOMEM;
}
@@ -1074,7 +1075,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
}
return retval;
-}
+}
/**
* encode_fix - fill up fb_fix_screeninfo structure
@@ -1131,9 +1132,9 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
*
* DESCRIPTION:
* Based on the contents of @var, @par will be dynamically filled up.
- * @par contains all information necessary to modify the hardware.
+ * @par contains all information necessary to modify the hardware.
*/
-static void decode_var(const struct fb_var_screeninfo *var,
+static void decode_var(const struct fb_var_screeninfo *var,
struct i810fb_par *par)
{
u32 xres, yres, vxres, vyres;
@@ -1175,13 +1176,13 @@ static void decode_var(const struct fb_var_screeninfo *var,
if (var->nonstd && var->bits_per_pixel != 8)
par->pixconf |= 1 << 27;
- i810_calc_dclk(var->pixclock, &par->regs.M,
+ i810_calc_dclk(var->pixclock, &par->regs.M,
&par->regs.N, &par->regs.P);
i810fb_encode_registers(var, par, xres, yres);
par->watermark = i810_get_watermark(var, par);
par->pitch = get_line_length(par, vxres, var->bits_per_pixel);
-}
+}
/**
* i810fb_getcolreg - gets red, green and blue values of the hardware DAC
@@ -1196,7 +1197,7 @@ static void decode_var(const struct fb_var_screeninfo *var,
* Gets the red, green and blue values of the hardware DAC as pointed by @regno
* and writes them to @red, @green and @blue respectively
*/
-static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
+static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
u8 *transp, struct fb_info *info)
{
struct i810fb_par *par = info->par;
@@ -1212,18 +1213,18 @@ static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
temp = i810_readb(PIXCONF1, mmio);
i810_writeb(PIXCONF1, mmio, temp & ~EXTENDED_PALETTE);
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
- info->var.green.length == 5)
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ info->var.green.length == 5)
i810_read_dac(regno * 8, red, green, blue, mmio);
- else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
info->var.green.length == 6) {
u8 tmp;
i810_read_dac(regno * 8, red, &tmp, blue, mmio);
i810_read_dac(regno * 4, &tmp, green, &tmp, mmio);
}
- else
+ else
i810_read_dac(regno, red, green, blue, mmio);
*transp = 0;
@@ -1232,7 +1233,7 @@ static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
return 0;
}
-/******************************************************************
+/******************************************************************
* Framebuffer device-specific hooks *
******************************************************************/
@@ -1252,7 +1253,7 @@ static int i810fb_open(struct fb_info *info, int user)
par->use_count++;
mutex_unlock(&par->open_lock);
-
+
return 0;
}
@@ -1273,13 +1274,13 @@ static int i810fb_release(struct fb_info *info, int user)
par->use_count--;
mutex_unlock(&par->open_lock);
-
+
return 0;
}
-static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
+static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
@@ -1302,24 +1303,24 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
temp = i810_readb(PIXCONF1, mmio);
i810_writeb(PIXCONF1, mmio, temp & ~EXTENDED_PALETTE);
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
info->var.green.length == 5) {
- for (i = 0; i < 8; i++)
- i810_write_dac((u8) (regno * 8) + i, (u8) red,
+ for (i = 0; i < 8; i++)
+ i810_write_dac((u8) (regno * 8) + i, (u8) red,
(u8) green, (u8) blue, mmio);
- } else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ } else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
info->var.green.length == 6) {
u8 r, g, b;
if (regno < 32) {
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++)
i810_write_dac((u8) (regno * 8) + i,
- (u8) red, (u8) green,
+ (u8) red, (u8) green,
(u8) blue, mmio);
}
i810_read_dac((u8) (regno*4), &r, &g, &b, mmio);
- for (i = 0; i < 4; i++)
- i810_write_dac((u8) (regno*4) + i, r, (u8) green,
+ for (i = 0; i < 4; i++)
+ i810_write_dac((u8) (regno*4) + i, r, (u8) green,
b, mmio);
} else if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) {
i810_write_dac((u8) regno, (u8) red, (u8) green,
@@ -1330,20 +1331,20 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno < 16) {
switch (info->var.bits_per_pixel) {
- case 16:
+ case 16:
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- if (info->var.green.length == 5)
- ((u32 *)info->pseudo_palette)[regno] =
+ if (info->var.green.length == 5)
+ ((u32 *)info->pseudo_palette)[regno] =
(regno << 10) | (regno << 5) |
regno;
else
- ((u32 *)info->pseudo_palette)[regno] =
+ ((u32 *)info->pseudo_palette)[regno] =
(regno << 11) | (regno << 5) |
regno;
} else {
if (info->var.green.length == 5) {
/* RGB 555 */
- ((u32 *)info->pseudo_palette)[regno] =
+ ((u32 *)info->pseudo_palette)[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
@@ -1358,12 +1359,12 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
break;
case 24: /* RGB 888 */
case 32: /* RGBA 8888 */
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- ((u32 *)info->pseudo_palette)[regno] =
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ ((u32 *)info->pseudo_palette)[regno] =
(regno << 16) | (regno << 8) |
regno;
- else
- ((u32 *)info->pseudo_palette)[regno] =
+ else
+ ((u32 *)info->pseudo_palette)[regno] =
((red & 0xff00) << 8) |
(green & 0xff00) |
((blue & 0xff00) >> 8);
@@ -1373,13 +1374,13 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-static int i810fb_pan_display(struct fb_var_screeninfo *var,
+static int i810fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
u32 total;
-
- total = var->xoffset * par->depth +
+
+ total = var->xoffset * par->depth +
var->yoffset * info->fix.line_length;
i810fb_load_front(total, info);
@@ -1391,7 +1392,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info)
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
int mode = 0, pwr, scr_off = 0;
-
+
pwr = i810_readl(PWR_CLKC, mmio);
switch (blank_mode) {
@@ -1421,7 +1422,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info)
scr_off = OFF;
break;
default:
- return -EINVAL;
+ return -EINVAL;
}
i810_screen_off(mmio, scr_off);
@@ -1452,7 +1453,7 @@ static int i810fb_set_par(struct fb_info *info)
return 0;
}
-static int i810fb_check_var(struct fb_var_screeninfo *var,
+static int i810fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int err;
@@ -1550,7 +1551,7 @@ static const struct fb_ops i810fb_ops = {
.fb_set_par = i810fb_set_par,
.fb_setcolreg = i810fb_setcolreg,
.fb_blank = i810fb_blank,
- .fb_pan_display = i810fb_pan_display,
+ .fb_pan_display = i810fb_pan_display,
.fb_fillrect = i810fb_fillrect,
.fb_copyarea = i810fb_copyarea,
.fb_imageblit = i810fb_imageblit,
@@ -1593,7 +1594,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
return 0;
}
-static int i810fb_resume(struct pci_dev *dev)
+static int i810fb_resume(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct i810fb_par *par = info->par;
@@ -1628,14 +1629,14 @@ fail:
/***********************************************************************
* AGP resource allocation *
***********************************************************************/
-
+
static void i810_fix_pointers(struct i810fb_par *par)
{
par->fb.physical = par->aperture.physical+(par->fb.offset << 12);
par->fb.virtual = par->aperture.virtual+(par->fb.offset << 12);
- par->iring.physical = par->aperture.physical +
+ par->iring.physical = par->aperture.physical +
(par->iring.offset << 12);
- par->iring.virtual = par->aperture.virtual +
+ par->iring.virtual = par->aperture.virtual +
(par->iring.offset << 12);
par->cursor_heap.virtual = par->aperture.virtual+
(par->cursor_heap.offset << 12);
@@ -1666,7 +1667,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
struct i810fb_par *par = info->par;
int size;
struct agp_bridge_data *bridge;
-
+
i810_fix_offsets(par);
size = par->fb.size + par->iring.size;
@@ -1674,7 +1675,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
printk("i810fb_alloc_fbmem: cannot acquire agpgart\n");
return -ENODEV;
}
- if (!(par->i810_gtt.i810_fb_memory =
+ if (!(par->i810_gtt.i810_fb_memory =
agp_allocate_memory(bridge, size >> 12, AGP_NORMAL_MEMORY))) {
printk("i810fb_alloc_fbmem: can't allocate framebuffer "
"memory\n");
@@ -1686,9 +1687,9 @@ static int i810_alloc_agp_mem(struct fb_info *info)
printk("i810fb_alloc_fbmem: can't bind framebuffer memory\n");
agp_backend_release(bridge);
return -EBUSY;
- }
-
- if (!(par->i810_gtt.i810_cursor_memory =
+ }
+
+ if (!(par->i810_gtt.i810_cursor_memory =
agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
AGP_PHYSICAL_MEMORY))) {
printk("i810fb_alloc_cursormem: can't allocate "
@@ -1701,7 +1702,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
printk("i810fb_alloc_cursormem: cannot bind cursor memory\n");
agp_backend_release(bridge);
return -EBUSY;
- }
+ }
par->cursor_heap.physical = par->i810_gtt.i810_cursor_memory->physical;
@@ -1712,8 +1713,8 @@ static int i810_alloc_agp_mem(struct fb_info *info)
return 0;
}
-/***************************************************************
- * Initialization *
+/***************************************************************
+ * Initialization *
***************************************************************/
/**
@@ -1728,7 +1729,7 @@ static void i810_init_monspecs(struct fb_info *info)
{
if (!hsync1)
hsync1 = HFMIN;
- if (!hsync2)
+ if (!hsync2)
hsync2 = HFMAX;
if (!info->monspecs.hfmax)
info->monspecs.hfmax = hsync2;
@@ -1739,7 +1740,7 @@ static void i810_init_monspecs(struct fb_info *info)
if (!vsync1)
vsync1 = VFMIN;
- if (!vsync2)
+ if (!vsync2)
vsync2 = VFMAX;
if (IS_DVT && vsync1 < 60)
vsync1 = 60;
@@ -1747,7 +1748,7 @@ static void i810_init_monspecs(struct fb_info *info)
info->monspecs.vfmax = vsync2;
if (!info->monspecs.vfmin)
info->monspecs.vfmin = vsync1;
- if (vsync2 < vsync1)
+ if (vsync2 < vsync1)
info->monspecs.vfmin = vsync2;
}
@@ -1760,27 +1761,27 @@ static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
{
mutex_init(&par->open_lock);
- if (voffset)
+ if (voffset)
v_offset_default = voffset;
else if (par->aperture.size > 32 * 1024 * 1024)
v_offset_default = 16;
else
v_offset_default = 8;
- if (!vram)
+ if (!vram)
vram = 1;
- if (accel)
+ if (accel)
par->dev_flags |= HAS_ACCELERATION;
- if (sync)
+ if (sync)
par->dev_flags |= ALWAYS_SYNC;
par->ddc_num = (ddc3 ? 3 : 2);
if (bpp < 8)
bpp = 8;
-
+
par->i810fb_ops = i810fb_ops;
if (xres)
@@ -1793,7 +1794,7 @@ static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
else
info->var.yres = 480;
- if (!vyres)
+ if (!vyres)
vyres = (vram << 20)/(info->var.xres*bpp >> 3);
info->var.yres_virtual = vyres;
@@ -1802,12 +1803,12 @@ static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
if (dcolor)
info->var.nonstd = 1;
- if (par->dev_flags & HAS_ACCELERATION)
+ if (par->dev_flags & HAS_ACCELERATION)
info->var.accel_flags = 1;
i810_init_monspecs(info);
}
-
+
/**
* i810_init_device - initialize device
* @par: pointer to i810fb_par structure
@@ -1840,9 +1841,9 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
{
int err;
- if ((err = pci_enable_device(par->dev))) {
+ if ((err = pci_enable_device(par->dev))) {
printk("i810fb_init: cannot enable device\n");
- return err;
+ return err;
}
par->res_flags |= PCI_DEVICE_ENABLED;
@@ -1860,8 +1861,8 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
return -ENOMEM;
}
- if (!request_mem_region(par->aperture.physical,
- par->aperture.size,
+ if (!request_mem_region(par->aperture.physical,
+ par->aperture.size,
i810_pci_list[entry->driver_data])) {
printk("i810fb_init: cannot request framebuffer region\n");
return -ENODEV;
@@ -1874,16 +1875,16 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
printk("i810fb_init: cannot remap framebuffer region\n");
return -ENODEV;
}
-
- if (!request_mem_region(par->mmio_start_phys,
- MMIO_SIZE,
+
+ if (!request_mem_region(par->mmio_start_phys,
+ MMIO_SIZE,
i810_pci_list[entry->driver_data])) {
printk("i810fb_init: cannot request mmio region\n");
return -ENODEV;
}
par->res_flags |= MMIO_REQ;
- par->mmio_start_virtual = ioremap(par->mmio_start_phys,
+ par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
printk("i810fb_init: cannot remap mmio region\n");
@@ -1963,7 +1964,7 @@ static int i810fb_setup(char *options)
if (!options || !*options)
return 0;
-
+
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "mtrr", 4))
mtrr = true;
@@ -1987,13 +1988,13 @@ static int i810fb_setup(char *options)
bpp = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "hsync1:", 7)) {
hsync1 = simple_strtoul(this_opt+7, &suffix, 0);
- if (strncmp(suffix, "H", 1))
+ if (strncmp(suffix, "H", 1))
hsync1 *= 1000;
} else if (!strncmp(this_opt, "hsync2:", 7)) {
hsync2 = simple_strtoul(this_opt+7, &suffix, 0);
- if (strncmp(suffix, "H", 1))
+ if (strncmp(suffix, "H", 1))
hsync2 *= 1000;
- } else if (!strncmp(this_opt, "vsync1:", 7))
+ } else if (!strncmp(this_opt, "vsync1:", 7))
vsync1 = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "vsync2:", 7))
vsync2 = simple_strtoul(this_opt+7, NULL, 0);
@@ -2016,6 +2017,10 @@ static int i810fb_init_pci(struct pci_dev *dev,
struct fb_videomode mode;
int err = -1, vfreq, hfreq, pixclock;
+ err = aperture_remove_conflicting_pci_devices(dev, "i810fb");
+ if (err)
+ return err;
+
info = framebuffer_alloc(sizeof(struct i810fb_par), &dev->dev);
if (!info)
return -ENOMEM;
@@ -2044,7 +2049,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
return err;
}
- i810_init_device(par);
+ i810_init_device(par);
info->screen_base = par->fb.virtual;
info->fbops = &par->i810fb_ops;
@@ -2064,21 +2069,21 @@ static int i810fb_init_pci(struct pci_dev *dev,
err = register_framebuffer(info);
if (err < 0) {
- i810fb_release_resource(info, par);
+ i810fb_release_resource(info, par);
printk("i810fb_init: cannot register framebuffer device\n");
- return err;
- }
+ return err;
+ }
pci_set_drvdata(dev, info);
pixclock = 1000000000/(info->var.pixclock);
pixclock *= 1000;
- hfreq = pixclock/(info->var.xres + info->var.left_margin +
+ hfreq = pixclock/(info->var.xres + info->var.left_margin +
info->var.hsync_len + info->var.right_margin);
vfreq = hfreq/(info->var.yres + info->var.upper_margin +
info->var.vsync_len + info->var.lower_margin);
printk("I810FB: fb%d : %s v%d.%d.%d%s\n"
- "I810FB: Video RAM : %dK\n"
+ "I810FB: Video RAM : %dK\n"
"I810FB: Monitor : H: %d-%d KHz V: %d-%d Hz\n"
"I810FB: Mode : %dx%d-%dbpp@%dHz\n",
info->node,
@@ -2086,7 +2091,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
VERSION_MAJOR, VERSION_MINOR, VERSION_TEENIE, BRANCH_VERSION,
(int) par->fb.size>>10, info->monspecs.hfmin/1000,
info->monspecs.hfmax/1000, info->monspecs.vfmin,
- info->monspecs.vfmax, info->var.xres,
+ info->monspecs.vfmax, info->var.xres,
info->var.yres, info->var.bits_per_pixel, vfreq);
return 0;
}
@@ -2095,7 +2100,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
* De-initialization *
***************************************************************/
-static void i810fb_release_resource(struct fb_info *info,
+static void i810fb_release_resource(struct fb_info *info,
struct i810fb_par *par)
{
struct gtt_data *gtt = &par->i810_gtt;
@@ -2128,10 +2133,10 @@ static void i810fb_remove_pci(struct pci_dev *dev)
struct fb_info *info = pci_get_drvdata(dev);
struct i810fb_par *par = info->par;
- unregister_framebuffer(info);
+ unregister_framebuffer(info);
i810fb_release_resource(info, par);
printk("cleanup_module: unloaded i810 framebuffer device\n");
-}
+}
#ifndef MODULE
static int i810fb_init(void)
@@ -2144,7 +2149,7 @@ static int i810fb_init(void)
return pci_register_driver(&i810fb_driver);
}
-#endif
+#endif
/*********************************************************************
* Modularization *
@@ -2161,7 +2166,7 @@ static int i810fb_init(void)
}
module_param(vram, int, 0);
-MODULE_PARM_DESC(vram, "System RAM to allocate to framebuffer in MiB"
+MODULE_PARM_DESC(vram, "System RAM to allocate to framebuffer in MiB"
" (default=4)");
module_param(voffset, int, 0);
MODULE_PARM_DESC(voffset, "at what offset to place start of framebuffer "
@@ -2186,7 +2191,7 @@ module_param(vsync1, int, 0);
MODULE_PARM_DESC(vsync1, "Minimum vertical frequency of monitor in Hz"
" (default = 50)");
module_param(vsync2, int, 0);
-MODULE_PARM_DESC(vsync2, "Maximum vertical frequency of monitor in Hz"
+MODULE_PARM_DESC(vsync2, "Maximum vertical frequency of monitor in Hz"
" (default = 60)");
module_param(accel, bool, 0);
MODULE_PARM_DESC(accel, "Use Acceleration (BLIT) engine (default = 0)");
@@ -2208,7 +2213,7 @@ MODULE_PARM_DESC(mode_option, "Specify initial video mode");
MODULE_AUTHOR("Tony A. Daplas");
MODULE_DESCRIPTION("Framebuffer device for the Intel 810/815 and"
" compatible cards");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL");
static void __exit i810fb_exit(void)
{
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 16f272a50811..d7edb9c5d3a3 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -16,6 +16,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -86,7 +87,7 @@ enum {
SSTATUS = 36, /* 0x90 */
PRC = 37, /* 0x94 */
-#if 0
+#if 0
/* PCI Registers */
DVID = 0x00000000L,
SC = 0x00000004L,
@@ -103,8 +104,8 @@ enum {
PDATA = 0x04,
PPMASK = 0x08,
PADDRR = 0x0c,
- PIDXLO = 0x10,
- PIDXHI = 0x14,
+ PIDXLO = 0x10,
+ PIDXHI = 0x14,
PIDXDATA= 0x18,
PIDXCTL = 0x1c
};
@@ -131,7 +132,7 @@ enum {
SYSCLKC = 0x18, /* () System Clock C */
/*
* Dot clock rate is 20MHz * (m + 1) / ((n + 1) * (p ? 2 * p : 1)
- * c is charge pump bias which depends on the VCO frequency
+ * c is charge pump bias which depends on the VCO frequency
*/
PIXM0 = 0x20, /* () Pixel M 0 */
PIXN0 = 0x21, /* () Pixel N 0 */
@@ -320,7 +321,7 @@ struct imstt_par {
__u32 ramdac;
__u32 palette[16];
};
-
+
enum {
IBM = 0,
TVP = 1
@@ -373,7 +374,7 @@ static struct imstt_regvals tvp_reg_init_17 = {
static struct imstt_regvals tvp_reg_init_18 = {
1152,
- 0x0009, 0x0011, 0x059, 0x5b, 0x0003, 0x0031, 0x0397, 0x039a, 0x0000,
+ 0x0009, 0x0011, 0x059, 0x5b, 0x0003, 0x0031, 0x0397, 0x039a, 0x0000,
0xfd, 0x3a, 0xf1,
{ 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 }
};
@@ -856,10 +857,10 @@ imsttfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
static int
-imsttfb_set_par(struct fb_info *info)
+imsttfb_set_par(struct fb_info *info)
{
struct imstt_par *par = info->par;
-
+
if (!compute_imstt_regvals(par, info->var.xres, info->var.yres))
return -EINVAL;
@@ -930,7 +931,7 @@ imsttfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-static int
+static int
imsttfb_blank(int blank, struct fb_info *info)
{
struct imstt_par *par = info->par;
@@ -986,7 +987,7 @@ imsttfb_blank(int blank, struct fb_info *info)
static void
imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
+{
struct imstt_par *par = info->par;
__u32 Bpp, line_pitch, bgc, dx, dy, width, height;
@@ -1192,7 +1193,7 @@ imstt_set_cursor(struct imstt_par *par, struct fb_image *d, int on)
}
}
-static int
+static int
imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct imstt_par *par = info->par;
@@ -1200,7 +1201,7 @@ imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
if (cursor->dest == NULL && cursor->rop == ROP_XOR)
return 1;
-
+
imstt_set_cursor(info, cursor, 0);
if (flags & FB_CUR_SETPOS) {
@@ -1469,8 +1470,13 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct imstt_par *par;
struct fb_info *info;
struct device_node *dp;
- int ret = -ENOMEM;
-
+ int ret;
+
+ ret = aperture_remove_conflicting_pci_devices(pdev, "imsttfb");
+ if (ret)
+ return ret;
+ ret = -ENOMEM;
+
dp = pci_device_to_OF_node(pdev);
if(dp)
printk(KERN_INFO "%s: OF name %pOFn\n",__func__, dp);
@@ -1619,7 +1625,7 @@ static int __init imsttfb_init(void)
#endif
return pci_register_driver(&imsttfb_pci_driver);
}
-
+
static void __exit imsttfb_exit(void)
{
pci_unregister_driver(&imsttfb_pci_driver);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index d97d7456d15a..94f3bc637fc8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -681,7 +681,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
fbi->devtype = pdev->id_entry->driver_data;
- strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index 5647fca8c49a..d4a2891a9a7a 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -107,6 +107,7 @@
* Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -483,6 +484,10 @@ static int intelfb_pci_register(struct pci_dev *pdev,
DBG_MSG("intelfb_pci_register\n");
+ err = aperture_remove_conflicting_pci_devices(pdev, "intelfb");
+ if (err)
+ return err;
+
num_registered++;
if (num_registered != 1) {
ERR_MSG("Attempted to register %d devices "
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index d57772f96ad2..b4b93054c520 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -9,6 +9,7 @@
* for more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -676,6 +677,10 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long size;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "kyrofb");
+ if (err)
+ return err;
+
if ((err = pci_enable_device(pdev))) {
printk(KERN_WARNING "kyrofb: Can't enable pdev: %d\n", err);
return err;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 236521b19daf..775d34115e2d 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -100,6 +100,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/version.h>
#include "matroxfb_base.h"
@@ -2044,6 +2045,10 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
u_int32_t cmd;
DBG(__func__)
+ err = aperture_remove_conflicting_pci_devices(pdev, "matroxfb");
+ if (err)
+ return err;
+
svid = pdev->subsystem_vendor;
sid = pdev->subsystem_device;
for (b = dev_list; b->vendor; b++) {
@@ -2383,9 +2388,9 @@ static int __init matroxfb_setup(char *options) {
else if (!strncmp(this_opt, "mem:", 4))
mem = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "mode:", 5))
- strlcpy(videomode, this_opt+5, sizeof(videomode));
+ strscpy(videomode, this_opt + 5, sizeof(videomode));
else if (!strncmp(this_opt, "outputs:", 8))
- strlcpy(outputs, this_opt+8, sizeof(outputs));
+ strscpy(outputs, this_opt + 8, sizeof(outputs));
else if (!strncmp(this_opt, "dfp:", 4)) {
dfp_type = simple_strtoul(this_opt+4, NULL, 0);
dfp = 1;
@@ -2455,7 +2460,7 @@ static int __init matroxfb_setup(char *options) {
else if (!strcmp(this_opt, "dfp"))
dfp = value;
else {
- strlcpy(videomode, this_opt, sizeof(videomode));
+ strscpy(videomode, this_opt, sizeof(videomode));
}
}
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c
index 9a98c4a6ba33..f2e02958673d 100644
--- a/drivers/video/fbdev/matrox/matroxfb_maven.c
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.c
@@ -1276,11 +1276,10 @@ ERROR0:;
return err;
}
-static int maven_remove(struct i2c_client *client)
+static void maven_remove(struct i2c_client *client)
{
maven_shutdown_client(client);
kfree(i2c_get_clientdata(client));
- return 0;
}
static const struct i2c_device_id maven_id[] = {
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index a7508f5be343..96800c9c9cd9 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -10,6 +10,7 @@
#undef DEBUG
+#include <linux/aperture.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
@@ -999,6 +1000,10 @@ static int mb862xx_pci_probe(struct pci_dev *pdev,
struct device *dev = &pdev->dev;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "mb862xxfb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret < 0) {
dev_err(dev, "Cannot enable PCI device\n");
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 28d32cbf496b..93a2d2d1abe8 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -23,9 +23,9 @@
*
* 0.3.3
* - Porting over to new fbdev api. (jsimmons)
- *
+ *
* 0.3.2
- * - got rid of all floating point (dok)
+ * - got rid of all floating point (dok)
*
* 0.3.1
* - added module license (dok)
@@ -54,6 +54,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1154,14 +1155,14 @@ static int neofb_set_par(struct fb_info *info)
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
- case FB_ACCEL_NEOMAGIC_NM2380:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_accel_init(info, &info->var);
break;
default:
break;
- }
+ }
return 0;
}
@@ -1493,15 +1494,15 @@ neofb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_fillrect(info, rect);
break;
default:
cfb_fillrect(info, rect);
break;
- }
+ }
}
static void
@@ -1509,15 +1510,15 @@ neofb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
- case FB_ACCEL_NEOMAGIC_NM2380:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_copyarea(info, area);
break;
default:
cfb_copyarea(info, area);
break;
- }
+ }
}
static void
@@ -1536,20 +1537,20 @@ neofb_imageblit(struct fb_info *info, const struct fb_image *image)
}
}
-static int
+static int
neofb_sync(struct fb_info *info)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
- case FB_ACCEL_NEOMAGIC_NM2380:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_sync(info);
break;
default:
break;
}
- return 0;
+ return 0;
}
/*
@@ -2029,6 +2030,10 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
DBG("neofb_probe");
+ err = aperture_remove_conflicting_pci_devices(dev, "neofb");
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err)
return err;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index a372a183c1f0..329e2e8133c6 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1276,11 +1277,15 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
struct nvidia_par *par;
struct fb_info *info;
unsigned short cmd;
-
+ int ret;
NVTRACE_ENTER();
assert(pd != NULL);
+ ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
+ if (ret)
+ return ret;
+
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
if (!info)
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index dfb4ddc45701..17cda5765683 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1642,15 +1642,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
fbdev->int_irq = platform_get_irq(pdev, 0);
- if (!fbdev->int_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->int_irq < 0) {
r = ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
- if (!fbdev->ext_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->ext_irq < 0) {
r = ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index afa688e754b9..5ccddcfce722 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1331,7 +1331,7 @@ static void clear_fb_info(struct fb_info *fbi)
{
memset(&fbi->var, 0, sizeof(fbi->var));
memset(&fbi->fix, 0, sizeof(fbi->fix));
- strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+ strscpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
}
static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index d3be2c64f1c0..7da715d31a93 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -27,6 +27,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -617,6 +618,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
@@ -1516,6 +1522,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int err;
int retval = -ENXIO;
+ err = aperture_remove_conflicting_pci_devices(pdev, "pm2fb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err) {
printk(KERN_WARNING "pm2fb: Can't enable pdev: %d\n", err);
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index a8faf46adeb1..ba69846d444f 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1315,6 +1316,10 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
int err;
int retval = -ENXIO;
+ err = aperture_remove_conflicting_pci_devices(dev, "pm3fb");
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err) {
printk(KERN_WARNING "pm3fb: Can't enable PCI dev: %d\n", err);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index f4add36cb5f4..b73ad14efa20 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -45,6 +45,7 @@
#undef DEBUG
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -942,6 +943,10 @@ static int pvr2fb_pci_probe(struct pci_dev *pdev,
{
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "pvrfb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret) {
printk(KERN_ERR "pvr2fb: PCI enable failed\n");
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e943300d23e8..d5d0bbd39213 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -640,7 +640,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
info->node = -1;
- strlcpy(info->fix.id, mi->id, 16);
+ strscpy(info->fix.id, mi->id, 16);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 66cfc3e9d3cf..696ac5431180 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2042,7 +2042,7 @@ static int __init pxafb_setup_options(void)
return -ENODEV;
if (options)
- strlcpy(g_options, options, sizeof(g_options));
+ strscpy(g_options, options, sizeof(g_options));
return 0;
}
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 84d5e23ad7d3..0ea74e28f915 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -29,6 +29,7 @@
* doublescan modes are broken
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -474,7 +475,7 @@ static inline void reverse_order(u32 *l)
* DESCRIPTiON:
* Loads cursor image based on a monochrome source and mask bitmap. The
* image bits determines the color of the pixel, 0 for background, 1 for
- * foreground. Only the affected region (as determined by @w and @h
+ * foreground. Only the affected region (as determined by @w and @h
* parameters) will be updated.
*
* CALLED FROM:
@@ -494,7 +495,7 @@ static void rivafb_load_cursor_image(struct riva_par *par, u8 *data8,
for (i = 0; i < h; i++) {
b = *data++;
reverse_order(&b);
-
+
for (j = 0; j < w/2; j++) {
tmp = 0;
#if defined (__BIG_ENDIAN)
@@ -562,7 +563,7 @@ static void riva_rclut(RIVA_HW_INST *chip,
unsigned char regnum, unsigned char *red,
unsigned char *green, unsigned char *blue)
{
-
+
VGA_WR08(chip->PDIO, 0x3c7, regnum);
*red = VGA_RD08(chip->PDIO, 0x3c9);
*green = VGA_RD08(chip->PDIO, 0x3c9);
@@ -673,7 +674,7 @@ static int riva_load_video_mode(struct fb_info *info)
int rc;
struct riva_par *par = info->par;
struct riva_regs newmode;
-
+
NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(FB_BLANK_NORMAL, info);
@@ -717,7 +718,7 @@ static int riva_load_video_mode(struct fb_info *info)
hBlankEnd = hTotal + 4;
}
- newmode.crtc[0x0] = Set8Bits (hTotal);
+ newmode.crtc[0x0] = Set8Bits (hTotal);
newmode.crtc[0x1] = Set8Bits (hDisplay);
newmode.crtc[0x2] = Set8Bits (hBlankStart);
newmode.crtc[0x3] = SetBitField (hBlankEnd, 4: 0, 4:0) | SetBit (7);
@@ -748,20 +749,20 @@ static int riva_load_video_mode(struct fb_info *info)
| SetBitField(vStart,10:10,2:2)
| SetBitField(vDisplay,10:10,1:1)
| SetBitField(vTotal,10:10,0:0);
- newmode.ext.horiz = SetBitField(hTotal,8:8,0:0)
+ newmode.ext.horiz = SetBitField(hTotal,8:8,0:0)
| SetBitField(hDisplay,8:8,1:1)
| SetBitField(hBlankStart,8:8,2:2)
| SetBitField(hStart,8:8,3:3);
newmode.ext.extra = SetBitField(vTotal,11:11,0:0)
| SetBitField(vDisplay,11:11,2:2)
| SetBitField(vStart,11:11,4:4)
- | SetBitField(vBlankStart,11:11,6:6);
+ | SetBitField(vBlankStart,11:11,6:6);
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
int tmp = (hTotal >> 1) & ~1;
newmode.ext.interlace = Set8Bits(tmp);
newmode.ext.horiz |= SetBitField(tmp, 8:8,4:4);
- } else
+ } else
newmode.ext.interlace = 0xff; /* interlace off */
if (par->riva.Architecture >= NV_ARCH_10)
@@ -774,7 +775,7 @@ static int riva_load_video_mode(struct fb_info *info)
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
newmode.misc_output &= ~0x80;
else
- newmode.misc_output |= 0x80;
+ newmode.misc_output |= 0x80;
rc = CalcStateExt(&par->riva, &newmode.ext, par->pdev, bpp, width,
hDisplaySize, height, dotClock);
@@ -841,7 +842,7 @@ static void riva_update_var(struct fb_var_screeninfo *var,
}
/**
- * rivafb_do_maximize -
+ * rivafb_do_maximize -
* @info: pointer to fb_info object containing info for current riva board
* @var: standard kernel fb changeable data
* @nom: nom
@@ -852,7 +853,7 @@ static void riva_update_var(struct fb_var_screeninfo *var,
*
* RETURNS:
* -EINVAL on failure, 0 on success
- *
+ *
*
* CALLED FROM:
* rivafb_check_var()
@@ -916,14 +917,14 @@ static int rivafb_do_maximize(struct fb_info *info,
return -EINVAL;
}
}
-
+
if (var->xres_virtual * nom / den >= 8192) {
printk(KERN_WARNING PFX
"virtual X resolution (%d) is too high, lowering to %d\n",
var->xres_virtual, 8192 * den / nom - 16);
var->xres_virtual = 8192 * den / nom - 16;
}
-
+
if (var->xres_virtual < var->xres) {
printk(KERN_ERR PFX
"virtual X resolution (%d) is smaller than real\n", var->xres_virtual);
@@ -1010,7 +1011,7 @@ static int riva_get_cmap_len(const struct fb_var_screeninfo *var)
break;
case 6:
rc = 64; /* 64 entries (2^6), 16 bpp, RGB565 */
- break;
+ break;
default:
/* should not occur */
break;
@@ -1042,7 +1043,7 @@ static int rivafb_open(struct fb_info *info, int user)
/* vgaHWunlock() + riva unlock (0x7F) */
CRTCout(par, 0x11, 0xFF);
par->riva.LockUnlock(&par->riva, 0);
-
+
riva_save_state(par, &par->initial_state);
}
par->ref_count++;
@@ -1082,7 +1083,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
struct riva_par *par = info->par;
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
-
+
NVTRACE_ENTER();
if (!var->pixclock)
return -EINVAL;
@@ -1176,7 +1177,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (var->yoffset > var->yres_virtual - var->yres)
var->yoffset = var->yres_virtual - var->yres - 1;
- var->red.msb_right =
+ var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
@@ -1198,7 +1199,7 @@ static int rivafb_set_par(struct fb_info *info)
goto out;
if(!(info->flags & FBINFO_HWACCEL_DISABLED))
riva_setup_accel(info);
-
+
par->cursor_reset = 1;
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
@@ -1486,7 +1487,7 @@ static inline void convert_bgcolor_16(u32 *col)
* CALLED FROM:
* framebuffer hook
*/
-static void rivafb_imageblit(struct fb_info *info,
+static void rivafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct riva_par *par = info->par;
@@ -1515,7 +1516,7 @@ static void rivafb_imageblit(struct fb_info *info,
bgx = par->palette[image->bg_color];
}
if (info->var.green.length == 6)
- convert_bgcolor_16(&bgx);
+ convert_bgcolor_16(&bgx);
break;
}
@@ -1612,7 +1613,7 @@ static int rivafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
u8 *dat = (u8 *) cursor->image.data;
u8 *msk = (u8 *) cursor->mask;
u8 *src;
-
+
src = kmalloc_array(s_pitch, cursor->image.height, GFP_ATOMIC);
if (src) {
@@ -1683,7 +1684,7 @@ static const struct fb_ops riva_fb_ops = {
.fb_fillrect = rivafb_fillrect,
.fb_copyarea = rivafb_copyarea,
.fb_imageblit = rivafb_imageblit,
- .fb_cursor = rivafb_cursor,
+ .fb_cursor = rivafb_cursor,
.fb_sync = rivafb_sync,
};
@@ -1713,7 +1714,7 @@ static int riva_set_fbinfo(struct fb_info *info)
info->pseudo_palette = par->pseudo_palette;
cmap_len = riva_get_cmap_len(&info->var);
- fb_alloc_cmap(&info->cmap, cmap_len, 0);
+ fb_alloc_cmap(&info->cmap, cmap_len, 0);
info->pixmap.size = 8 * 1024;
info->pixmap.buf_align = 4;
@@ -1898,6 +1899,10 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
NVTRACE_ENTER();
assert(pd != NULL);
+ ret = aperture_remove_conflicting_pci_devices(pd, "rivafb");
+ if (ret)
+ return ret;
+
info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev);
if (!info) {
ret = -ENOMEM;
@@ -1929,7 +1934,7 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
default_par->Chipset = (pd->vendor << 16) | pd->device;
printk(KERN_INFO PFX "nVidia device/chipset %X\n",default_par->Chipset);
-
+
if(default_par->riva.Architecture == 0) {
printk(KERN_ERR PFX "unknown NV_ARCH\n");
ret=-ENODEV;
@@ -1947,7 +1952,7 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
if (flatpanel == 1)
printk(KERN_INFO PFX "flatpanel support enabled\n");
default_par->forceCRTC = forceCRTC;
-
+
rivafb_fix.mmio_len = pci_resource_len(pd, 0);
rivafb_fix.smem_len = pci_resource_len(pd, 1);
@@ -1959,7 +1964,7 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(pd, PCI_COMMAND, cmd);
}
-
+
rivafb_fix.mmio_start = pci_resource_start(pd, 0);
rivafb_fix.smem_start = pci_resource_start(pd, 1);
@@ -2058,7 +2063,7 @@ err_iounmap_screen_base:
#endif
iounmap(info->screen_base);
err_iounmap_pramin:
- if (default_par->riva.Architecture == NV_ARCH_03)
+ if (default_par->riva.Architecture == NV_ARCH_03)
iounmap(default_par->riva.PRAMIN);
err_iounmap_ctrl_base:
iounmap(default_par->ctrl_base);
@@ -2077,7 +2082,7 @@ static void rivafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = info->par;
-
+
NVTRACE_ENTER();
#ifdef CONFIG_FB_RIVA_I2C
@@ -2117,11 +2122,11 @@ static int rivafb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "forceCRTC", 9)) {
char *p;
-
+
p = this_opt + 9;
- if (!*p || !*(++p)) continue;
+ if (!*p || !*(++p)) continue;
forceCRTC = *p - '0';
- if (forceCRTC < 0 || forceCRTC > 1)
+ if (forceCRTC < 0 || forceCRTC > 1)
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 5069f6f67923..7713274bd04c 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -11,6 +11,7 @@
* which is based on the code of neofb.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -248,7 +249,7 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
{
struct s3fb_info *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1131,6 +1132,10 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ rc = aperture_remove_conflicting_pci_devices(dev, "s3fb");
+ if (rc)
+ return rc;
+
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct s3fb_info), &(dev->dev));
if (!info)
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 8114c921ceb8..b7818b652698 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -41,6 +41,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -2176,6 +2177,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
DBG("savagefb_probe");
+ err = aperture_remove_conflicting_pci_devices(dev, "savagefb");
+ if (err)
+ return err;
+
info = framebuffer_alloc(sizeof(struct savagefb_par), &dev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index cf2a90ecd64e..e770b4a356b5 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -355,7 +355,7 @@ static int simplefb_regulators_get(struct simplefb_par *par,
if (!p || p == prop->name)
continue;
- strlcpy(name, prop->name,
+ strscpy(name, prop->name,
strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1);
regulator = devm_regulator_get_optional(&pdev->dev, name);
if (IS_ERR(regulator)) {
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index f28fd69d5eb7..1c197c3f9538 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -19,6 +19,7 @@
* which is (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -649,37 +650,37 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
u16 xres=0, yres, myres;
#ifdef CONFIG_FB_SIS_300
- if(ivideo->sisvga_engine == SIS_300_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS300))
+ if (ivideo->sisvga_engine == SIS_300_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS300))
return -1 ;
}
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS315))
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS315))
return -1;
}
#endif
myres = sisbios_mode[myindex].yres;
- switch(vbflags & VB_DISPTYPE_DISP2) {
+ switch (vbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
xres = ivideo->lcdxres; yres = ivideo->lcdyres;
- if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
- (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
- if(sisbios_mode[myindex].xres > xres)
+ if ((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
+ (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
+ if (sisbios_mode[myindex].xres > xres)
return -1;
- if(myres > yres)
+ if (myres > yres)
return -1;
}
- if(ivideo->sisfb_fstn) {
- if(sisbios_mode[myindex].xres == 320) {
- if(myres == 240) {
- switch(sisbios_mode[myindex].mode_no[1]) {
+ if (ivideo->sisfb_fstn) {
+ if (sisbios_mode[myindex].xres == 320) {
+ if (myres == 240) {
+ switch (sisbios_mode[myindex].mode_no[1]) {
case 0x50: myindex = MODE_FSTN_8; break;
case 0x56: myindex = MODE_FSTN_16; break;
case 0x53: return -1;
@@ -688,7 +689,7 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
}
}
- if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
return -1;
@@ -696,14 +697,14 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
break;
case CRT2_TV:
- if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_VGA:
- if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
@@ -1872,7 +1873,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
+ strscpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
@@ -2204,82 +2205,88 @@ static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
- bool mustwait = false;
- u8 sr1F, cr17;
+ bool mustwait = false;
+ u8 sr1F, cr17;
#ifdef CONFIG_FB_SIS_315
- u8 cr63=0;
+ u8 cr63 = 0;
#endif
- u16 temp = 0xffff;
- int i;
+ u16 temp = 0xffff;
+ int i;
+
+ sr1F = SiS_GetReg(SISSR, 0x1F);
+ SiS_SetRegOR(SISSR, 0x1F, 0x04);
+ SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- sr1F = SiS_GetReg(SISSR, 0x1F);
- SiS_SetRegOR(SISSR, 0x1F, 0x04);
- SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- if(sr1F & 0xc0) mustwait = true;
+ if (sr1F & 0xc0)
+ mustwait = true;
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
- cr63 &= 0x40;
- SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
+ cr63 &= 0x40;
+ SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
+ }
#endif
- cr17 = SiS_GetReg(SISCR, 0x17);
- cr17 &= 0x80;
- if(!cr17) {
- SiS_SetRegOR(SISCR, 0x17, 0x80);
- mustwait = true;
- SiS_SetReg(SISSR, 0x00, 0x01);
- SiS_SetReg(SISSR, 0x00, 0x03);
- }
+ cr17 = SiS_GetReg(SISCR, 0x17);
+ cr17 &= 0x80;
- if(mustwait) {
- for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
- }
+ if (!cr17) {
+ SiS_SetRegOR(SISCR, 0x17, 0x80);
+ mustwait = true;
+ SiS_SetReg(SISSR, 0x00, 0x01);
+ SiS_SetReg(SISSR, 0x00, 0x03);
+ }
+ if (mustwait) {
+ for (i = 0; i < 10; i++)
+ sisfbwaitretracecrt1(ivideo);
+ }
#ifdef CONFIG_FB_SIS_315
- if(ivideo->chip >= SIS_330) {
- SiS_SetRegAND(SISCR, 0x32, ~0x20);
- if(ivideo->chip >= SIS_340) {
- SiS_SetReg(SISCR, 0x57, 0x4a);
- } else {
- SiS_SetReg(SISCR, 0x57, 0x5f);
- }
- SiS_SetRegOR(SISCR, 0x53, 0x02);
- while ((SiS_GetRegByte(SISINPSTAT)) & 0x01) break;
- while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
- if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
- SiS_SetRegAND(SISCR, 0x53, 0xfd);
- SiS_SetRegAND(SISCR, 0x57, 0x00);
- }
+ if (ivideo->chip >= SIS_330) {
+ SiS_SetRegAND(SISCR, 0x32, ~0x20);
+ if (ivideo->chip >= SIS_340)
+ SiS_SetReg(SISCR, 0x57, 0x4a);
+ else
+ SiS_SetReg(SISCR, 0x57, 0x5f);
+
+ SiS_SetRegOR(SISCR, 0x53, 0x02);
+ while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)
+ break;
+ while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01))
+ break;
+ if ((SiS_GetRegByte(SISMISCW)) & 0x10)
+ temp = 1;
+
+ SiS_SetRegAND(SISCR, 0x53, 0xfd);
+ SiS_SetRegAND(SISCR, 0x57, 0x00);
+ }
#endif
- if(temp == 0xffff) {
- i = 3;
- do {
- temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
- ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
- } while(((temp == 0) || (temp == 0xffff)) && i--);
+ if (temp == 0xffff) {
+ i = 3;
- if((temp == 0) || (temp == 0xffff)) {
- if(sisfb_test_DDC1(ivideo)) temp = 1;
- }
- }
+ do {
+ temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
+ ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
+ } while (((temp == 0) || (temp == 0xffff)) && i--);
- if((temp) && (temp != 0xffff)) {
- SiS_SetRegOR(SISCR, 0x32, 0x20);
- }
+ if ((temp == 0) || (temp == 0xffff)) {
+ if (sisfb_test_DDC1(ivideo))
+ temp = 1;
+ }
+ }
+
+ if ((temp) && (temp != 0xffff))
+ SiS_SetRegOR(SISCR, 0x32, 0x20);
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA)
+ SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
#endif
- SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
-
- SiS_SetReg(SISSR, 0x1F, sr1F);
+ SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
+ SiS_SetReg(SISSR, 0x1F, sr1F);
}
/* Determine and detect attached devices on SiS30x */
@@ -2293,25 +2300,25 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
ivideo->SiS_Pr.PanelSelfDetected = false;
/* LCD detection only for TMDS bridges */
- if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
+ if (!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
return;
- if(ivideo->vbflags2 & VB2_30xBDH)
+ if (ivideo->vbflags2 & VB2_30xBDH)
return;
/* If LCD already set up by BIOS, skip it */
reg = SiS_GetReg(SISCR, 0x32);
- if(reg & 0x08)
+ if (reg & 0x08)
return;
realcrtno = 1;
- if(ivideo->SiS_Pr.DDCPortMixup)
+ if (ivideo->SiS_Pr.DDCPortMixup)
realcrtno = 0;
/* Check DDC capabilities */
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
- if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
+ if ((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
return;
/* Read DDC data */
@@ -2320,17 +2327,17 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, realcrtno, 1,
&buffer[0], ivideo->vbflags2);
- } while((temp) && i--);
+ } while ((temp) && i--);
- if(temp)
+ if (temp)
return;
/* No digital device */
- if(!(buffer[0x14] & 0x80))
+ if (!(buffer[0x14] & 0x80))
return;
/* First detailed timing preferred timing? */
- if(!(buffer[0x18] & 0x02))
+ if (!(buffer[0x18] & 0x02))
return;
xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
@@ -2338,26 +2345,26 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
switch(xres) {
case 1024:
- if(yres == 768)
+ if (yres == 768)
paneltype = 0x02;
break;
case 1280:
- if(yres == 1024)
+ if (yres == 1024)
paneltype = 0x03;
break;
case 1600:
- if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
+ if ((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
paneltype = 0x0b;
break;
}
- if(!paneltype)
+ if (!paneltype)
return;
- if(buffer[0x23])
+ if (buffer[0x23])
cr37 |= 0x10;
- if((buffer[0x47] & 0x18) == 0x18)
+ if ((buffer[0x47] & 0x18) == 0x18)
cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
else
cr37 |= 0xc0;
@@ -2372,31 +2379,34 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
- int temp, mytest, result, i, j;
-
- for(j = 0; j < 10; j++) {
- result = 0;
- for(i = 0; i < 3; i++) {
- mytest = test;
- SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
- temp = (type >> 8) | (mytest & 0x00ff);
- SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
- mytest >>= 8;
- mytest &= 0x7f;
- temp = SiS_GetReg(SISPART4, 0x03);
- temp ^= 0x0e;
- temp &= mytest;
- if(temp == mytest) result++;
+ int temp, mytest, result, i, j;
+
+ for (j = 0; j < 10; j++) {
+ result = 0;
+ for (i = 0; i < 3; i++) {
+ mytest = test;
+ SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
+ temp = (type >> 8) | (mytest & 0x00ff);
+ SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
+ mytest >>= 8;
+ mytest &= 0x7f;
+ temp = SiS_GetReg(SISPART4, 0x03);
+ temp ^= 0x0e;
+ temp &= mytest;
+ if (temp == mytest)
+ result++;
#if 1
- SiS_SetReg(SISPART4, 0x11, 0x00);
- SiS_SetRegAND(SISPART4, 0x10, 0xe0);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
+ SiS_SetReg(SISPART4, 0x11, 0x00);
+ SiS_SetRegAND(SISPART4, 0x10, 0xe0);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
#endif
- }
- if((result == 0) || (result >= 2)) break;
- }
- return result;
+ }
+
+ if ((result == 0) || (result >= 2))
+ break;
+ }
+ return result;
}
static void SiS_Sense30x(struct sis_video_info *ivideo)
@@ -4262,18 +4272,17 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
-
+ for (k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
- if(RankCapacity != PseudoRankCapacity)
+ if (RankCapacity != PseudoRankCapacity)
continue;
- if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
+ if ((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
continue;
BankNumHigh = RankCapacity * 16 * iteration - 1;
- if(iteration == 3) { /* Rank No */
+ if (iteration == 3) { /* Rank No */
BankNumMid = RankCapacity * 16 - 1;
} else {
BankNumMid = RankCapacity * 16 * iteration / 2 - 1;
@@ -4287,18 +4296,22 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
SiS_SetRegOR(SISSR, 0x15, 0x04); /* Test */
sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
- if(buswidth == 4) sr14 |= 0x80;
- else if(buswidth == 2) sr14 |= 0x40;
+
+ if (buswidth == 4)
+ sr14 |= 0x80;
+ else if (buswidth == 2)
+ sr14 |= 0x40;
+
SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
SiS_SetReg(SISSR, 0x14, sr14);
BankNumHigh <<= 16;
BankNumMid <<= 16;
- if((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
- (BankNumMid + PhysicalAdrHigh >= mapsize) ||
- (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
- (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
+ if ((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
+ (BankNumMid + PhysicalAdrHigh >= mapsize) ||
+ (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
+ (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
continue;
/* Write data */
@@ -4312,7 +4325,7 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
(FBAddr + BankNumHigh + PhysicalAdrOtherPage));
/* Read data */
- if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
+ if (readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
return 1;
}
@@ -5849,6 +5862,10 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if(sisfb_off)
return -ENXIO;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "sisfb");
+ if (ret)
+ return ret;
+
sis_fb_info = framebuffer_alloc(sizeof(*ivideo), &pdev->dev);
if(!sis_fb_info)
return -ENOMEM;
@@ -5867,7 +5884,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->cardnumber++;
}
- strlcpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
+ strscpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
@@ -6150,24 +6167,20 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
int result = 1;
- /* if((ivideo->chip == SIS_315H) ||
- (ivideo->chip == SIS_315) ||
- (ivideo->chip == SIS_315PRO) ||
- (ivideo->chip == SIS_330)) {
- sisfb_post_sis315330(pdev);
- } else */ if(ivideo->chip == XGI_20) {
+
+ if (ivideo->chip == XGI_20) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
- } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
+ } else if ((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else {
printk(KERN_INFO "sisfb: Card is not "
"POSTed and sisfb can't do this either.\n");
}
- if(!result) {
+ if (!result) {
printk(KERN_ERR "sisfb: Failed to POST card\n");
ret = -ENODEV;
goto error_3;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 8ab9a3fbd281..a10f1057293b 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -10,38 +10,39 @@
* The primary goal is to remove the console code from fbdev and place it
* into fbcon.c. This reduces the code and makes writing a new fbdev driver
* easy since the author doesn't need to worry about console internals. It
- * also allows the ability to run fbdev without a console/tty system on top
- * of it.
+ * also allows the ability to run fbdev without a console/tty system on top
+ * of it.
*
* First the roles of struct fb_info and struct display have changed. Struct
* display will go away. The way the new framebuffer console code will
- * work is that it will act to translate data about the tty/console in
+ * work is that it will act to translate data about the tty/console in
* struct vc_data to data in a device independent way in struct fb_info. Then
- * various functions in struct fb_ops will be called to store the device
- * dependent state in the par field in struct fb_info and to change the
+ * various functions in struct fb_ops will be called to store the device
+ * dependent state in the par field in struct fb_info and to change the
* hardware to that state. This allows a very clean separation of the fbdev
* layer from the console layer. It also allows one to use fbdev on its own
- * which is a bounus for embedded devices. The reason this approach works is
+ * which is a bounus for embedded devices. The reason this approach works is
* for each framebuffer device when used as a tty/console device is allocated
- * a set of virtual terminals to it. Only one virtual terminal can be active
- * per framebuffer device. We already have all the data we need in struct
+ * a set of virtual terminals to it. Only one virtual terminal can be active
+ * per framebuffer device. We already have all the data we need in struct
* vc_data so why store a bunch of colormaps and other fbdev specific data
- * per virtual terminal.
+ * per virtual terminal.
*
* As you can see doing this makes the con parameter pretty much useless
- * for struct fb_ops functions, as it should be. Also having struct
- * fb_var_screeninfo and other data in fb_info pretty much eliminates the
+ * for struct fb_ops functions, as it should be. Also having struct
+ * fb_var_screeninfo and other data in fb_info pretty much eliminates the
* need for get_fix and get_var. Once all drivers use the fix, var, and cmap
* fbcon can be written around these fields. This will also eliminate the
* need to regenerate struct fb_var_screeninfo, struct fb_fix_screeninfo
* struct fb_cmap every time get_var, get_fix, get_cmap functions are called
- * as many drivers do now.
+ * as many drivers do now.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -66,68 +67,68 @@
static char *mode_option;
/*
- * If your driver supports multiple boards, you should make the
- * below data types arrays, or allocate them dynamically (using kmalloc()).
- */
+ * If your driver supports multiple boards, you should make the
+ * below data types arrays, or allocate them dynamically (using kmalloc()).
+ */
-/*
+/*
* This structure defines the hardware state of the graphics card. Normally
* you place this in a header file in linux/include/video. This file usually
* also includes register information. That allows other driver subsystems
- * and userland applications the ability to use the same header file to
- * avoid duplicate work and easy porting of software.
+ * and userland applications the ability to use the same header file to
+ * avoid duplicate work and easy porting of software.
*/
struct xxx_par;
/*
* Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo
* if we don't use modedb. If we do use modedb see xxxfb_init how to use it
- * to get a fb_var_screeninfo. Otherwise define a default var as well.
+ * to get a fb_var_screeninfo. Otherwise define a default var as well.
*/
static const struct fb_fix_screeninfo xxxfb_fix = {
- .id = "FB's name",
+ .id = "FB's name",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 1,
.ypanstep = 1,
- .ywrapstep = 1,
+ .ywrapstep = 1,
.accel = FB_ACCEL_NONE,
};
/*
- * Modern graphical hardware not only supports pipelines but some
+ * Modern graphical hardware not only supports pipelines but some
* also support multiple monitors where each display can have
- * its own unique data. In this case each display could be
- * represented by a separate framebuffer device thus a separate
+ * its own unique data. In this case each display could be
+ * represented by a separate framebuffer device thus a separate
* struct fb_info. Now the struct xxx_par represents the graphics
- * hardware state thus only one exist per card. In this case the
- * struct xxx_par for each graphics card would be shared between
- * every struct fb_info that represents a framebuffer on that card.
- * This allows when one display changes it video resolution (info->var)
+ * hardware state thus only one exist per card. In this case the
+ * struct xxx_par for each graphics card would be shared between
+ * every struct fb_info that represents a framebuffer on that card.
+ * This allows when one display changes it video resolution (info->var)
* the other displays know instantly. Each display can always be
* aware of the entire hardware state that affects it because they share
* the same xxx_par struct. The other side of the coin is multiple
* graphics cards that pass data around until it is finally displayed
* on one monitor. Such examples are the voodoo 1 cards and high end
* NUMA graphics servers. For this case we have a bunch of pars, each
- * one that represents a graphics state, that belong to one struct
+ * one that represents a graphics state, that belong to one struct
* fb_info. Their you would want to have *par point to a array of device
- * states and have each struct fb_ops function deal with all those
+ * states and have each struct fb_ops function deal with all those
* states. I hope this covers every possible hardware design. If not
- * feel free to send your ideas at jsimmons@users.sf.net
+ * feel free to send your ideas at jsimmons@users.sf.net
*/
/*
- * If your driver supports multiple boards or it supports multiple
- * framebuffers, you should make these arrays, or allocate them
+ * If your driver supports multiple boards or it supports multiple
+ * framebuffers, you should make these arrays, or allocate them
* dynamically using framebuffer_alloc() and free them with
* framebuffer_release().
- */
+ */
static struct fb_info info;
- /*
+ /*
* Each one represents the state of the hardware. Most hardware have
- * just one hardware state. These here represent the default state(s).
+ * just one hardware state. These here represent the default state(s).
*/
static struct xxx_par __initdata current_par;
@@ -136,12 +137,12 @@ static struct xxx_par __initdata current_par;
* first accessed.
* @info: frame buffer structure that represents a single frame buffer
* @user: tell us if the userland (value=1) or the console is accessing
- * the framebuffer.
+ * the framebuffer.
*
* This function is the first function called in the framebuffer api.
- * Usually you don't need to provide this function. The case where it
+ * Usually you don't need to provide this function. The case where it
* is used is to change from a text mode hardware state to a graphics
- * mode state.
+ * mode state.
*
* Returns negative errno on error, or zero on success.
*/
@@ -151,13 +152,13 @@ static int xxxfb_open(struct fb_info *info, int user)
}
/**
- * xxxfb_release - Optional function. Called when the framebuffer
- * device is closed.
+ * xxxfb_release - Optional function. Called when the framebuffer
+ * device is closed.
* @info: frame buffer structure that represents a single frame buffer
* @user: tell us if the userland (value=1) or the console is accessing
- * the framebuffer.
- *
- * Thus function is called when we close /dev/fb or the framebuffer
+ * the framebuffer.
+ *
+ * Thus function is called when we close /dev/fb or the framebuffer
* console system is released. Usually you don't need this function.
* The case where it is usually used is to go from a graphics state
* to a text mode state.
@@ -170,17 +171,17 @@ static int xxxfb_release(struct fb_info *info, int user)
}
/**
- * xxxfb_check_var - Optional function. Validates a var passed in.
+ * xxxfb_check_var - Optional function. Validates a var passed in.
* @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * @info: frame buffer structure that represents a single frame buffer
*
* Checks to see if the hardware supports the state requested by
- * var passed in. This function does not alter the hardware state!!!
- * This means the data stored in struct fb_info and struct xxx_par do
- * not change. This includes the var inside of struct fb_info.
+ * var passed in. This function does not alter the hardware state!!!
+ * This means the data stored in struct fb_info and struct xxx_par do
+ * not change. This includes the var inside of struct fb_info.
* Do NOT change these. This function can be called on its own if we
- * intent to only test a mode and not actually set it. The stuff in
- * modedb.c is a example of this. If the var passed in is slightly
+ * intent to only test a mode and not actually set it. The stuff in
+ * modedb.c is a example of this. If the var passed in is slightly
* off by what the hardware can support then we alter the var PASSED in
* to what we can do.
*
@@ -208,7 +209,7 @@ static int xxxfb_release(struct fb_info *info, int user)
static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
/* ... */
- return 0;
+ return 0;
}
/**
@@ -217,9 +218,9 @@ static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
*
* Using the fb_var_screeninfo in fb_info we set the resolution of the
* this particular framebuffer. This function alters the par AND the
- * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in
+ * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in
* fb_info since we are using that data. This means we depend on the
- * data in var inside fb_info to be supported by the hardware.
+ * data in var inside fb_info to be supported by the hardware.
*
* This function is also used to recover/restore the hardware to a
* known working state.
@@ -254,20 +255,20 @@ static int xxxfb_set_par(struct fb_info *info)
{
struct xxx_par *par = info->par;
/* ... */
- return 0;
+ return 0;
}
/**
* xxxfb_setcolreg - Optional function. Sets a color register.
- * @regno: Which register in the CLUT we are programming
- * @red: The red value which can be up to 16 bits wide
- * @green: The green value which can be up to 16 bits wide
+ * @regno: Which register in the CLUT we are programming
+ * @red: The red value which can be up to 16 bits wide
+ * @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported, the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
- *
+ *
* Set a single color register. The values supplied have a 16 bit
- * magnitude which needs to be scaled in this function for the hardware.
+ * magnitude which needs to be scaled in this function for the hardware.
* Things to take into consideration are how many color registers, if
* any, are supported with the current color visual. With truecolor mode
* no color palettes are supported. Here a pseudo palette is created
@@ -275,8 +276,8 @@ static int xxxfb_set_par(struct fb_info *info)
* pseudocolor mode we have a limited color palette. To deal with this
* we can program what color is displayed for a particular pixel value.
* DirectColor is similar in that we can program each color field. If
- * we have a static colormap we don't need to implement this function.
- *
+ * we have a static colormap we don't need to implement this function.
+ *
* Returns negative errno on error, or zero on success.
*/
static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -440,7 +441,7 @@ static int xxxfb_pan_display(struct fb_var_screeninfo *var,
/**
* xxxfb_blank - NOT a required function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*
* Blank the screen if blank_mode != FB_BLANK_UNBLANK, else unblank.
@@ -469,22 +470,22 @@ static int xxxfb_blank(int blank_mode, struct fb_info *info)
/*
* We provide our own functions if we have hardware acceleration
- * or non packed pixel format layouts. If we have no hardware
+ * or non packed pixel format layouts. If we have no hardware
* acceleration, we can use a generic unaccelerated function. If using
- * a pack pixel format just use the functions in cfb_*.c. Each file
+ * a pack pixel format just use the functions in cfb_*.c. Each file
* has one of the three different accel functions we support.
*/
/**
- * xxxfb_fillrect - REQUIRED function. Can use generic routines if
+ * xxxfb_fillrect - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
- * Draws a rectangle on the screen.
+ * Draws a rectangle on the screen.
*
* @info: frame buffer structure that represents a single frame buffer
- * @region: The structure representing the rectangular region we
+ * @region: The structure representing the rectangular region we
* wish to draw to.
*
- * This drawing operation places/removes a retangle on the screen
+ * This drawing operation places/removes a retangle on the screen
* depending on the rastering operation with the value of color which
* is in the current color depth format.
*/
@@ -492,13 +493,13 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
{
/* Meaning of struct fb_fillrect
*
- * @dx: The x and y corrdinates of the upper left hand corner of the
- * @dy: area we want to draw to.
+ * @dx: The x and y corrdinates of the upper left hand corner of the
+ * @dy: area we want to draw to.
* @width: How wide the rectangle is we want to draw.
* @height: How tall the rectangle is we want to draw.
- * @color: The color to fill in the rectangle with.
+ * @color: The color to fill in the rectangle with.
* @rop: The raster operation. We can draw the rectangle with a COPY
- * of XOR which provides erasing effect.
+ * of XOR which provides erasing effect.
*/
}
@@ -514,7 +515,7 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
* This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
-void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
/*
* @dx: The x and y coordinates of the upper left hand corner of the
@@ -530,28 +531,28 @@ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
/**
* xxxfb_imageblit - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
- * Copies a image from system memory to the screen.
+ * Copies a image from system memory to the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @image: structure defining the image.
*
- * This drawing operation draws a image on the screen. It can be a
+ * This drawing operation draws a image on the screen. It can be a
* mono image (needed for font handling) or a color image (needed for
- * tux).
+ * tux).
*/
-void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
+void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
{
/*
* @dx: The x and y coordinates of the upper left hand corner of the
* @dy: destination area to place the image on the screen.
* @width: How wide the image is we want to copy.
* @height: How tall the image is we want to copy.
- * @fg_color: For mono bitmap images this is color data for
+ * @fg_color: For mono bitmap images this is color data for
* @bg_color: the foreground and background of the image to
* write directly to the frmaebuffer.
* @depth: How many bits represent a single pixel for this image.
* @data: The actual data used to construct the image on the display.
- * @cmap: The colormap used for color images.
+ * @cmap: The colormap used for color images.
*/
/*
@@ -578,13 +579,13 @@ void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
/*
- * @set: Which fields we are altering in struct fb_cursor
- * @enable: Disable or enable the cursor
- * @rop: The bit operation we want to do.
- * @mask: This is the cursor mask bitmap.
+ * @set: Which fields we are altering in struct fb_cursor
+ * @enable: Disable or enable the cursor
+ * @rop: The bit operation we want to do.
+ * @mask: This is the cursor mask bitmap.
* @dest: A image of the area we are going to display the cursor.
- * Used internally by the driver.
- * @hot: The hot spot.
+ * Used internally by the driver.
+ * @hot: The hot spot.
* @image: The actual data for the cursor image.
*
* NOTES ON FLAGS (cursor->set):
@@ -612,11 +613,11 @@ int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
}
/**
- * xxxfb_sync - NOT a required function. Normally the accel engine
+ * xxxfb_sync - NOT a required function. Normally the accel engine
* for a graphics card take a specific amount of time.
* Often we have to wait for the accelerator to finish
* its operation before we can write to the framebuffer
- * so we can have consistent display output.
+ * so we can have consistent display output.
*
* @info: frame buffer structure that represents a single frame buffer
*
@@ -664,8 +665,15 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
struct fb_info *info;
struct xxx_par *par;
struct device *device = &dev->dev; /* or &pdev->dev */
- int cmap_len, retval;
-
+ int cmap_len, retval;
+
+ /*
+ * Remove firmware-based drivers that create resource conflicts.
+ */
+ retval = aperture_remove_conflicting_pci_devices(pdev, "xxxfb");
+ if (retval)
+ return retval;
+
/*
* Dynamically allocate info and par
*/
@@ -677,11 +685,11 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
par = info->par;
- /*
+ /*
* Here we set the screen_base to the virtual memory address
* for the framebuffer. Usually we obtain the resource address
* from the bus layer and then translate it to virtual memory
- * space via ioremap. Consult ioport.h.
+ * space via ioremap. Consult ioport.h.
*/
info->screen_base = framebuffer_virtual_memory;
info->fbops = &xxxfb_ops;
@@ -765,24 +773,24 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
/*
* This should give a reasonable default video mode. The following is
- * done when we can set a video mode.
+ * done when we can set a video mode.
*/
if (!mode_option)
- mode_option = "640x480@60";
+ mode_option = "640x480@60";
retval = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8);
-
+
if (!retval || retval == 4)
- return -EINVAL;
+ return -EINVAL;
/* This has to be done! */
if (fb_alloc_cmap(&info->cmap, cmap_len, 0))
return -ENOMEM;
-
- /*
- * The following is done in the case of having hardware with a static
- * mode. If we are setting the mode ourselves we don't call this.
- */
+
+ /*
+ * The following is done in the case of having hardware with a static
+ * mode. If we are setting the mode ourselves we don't call this.
+ */
info->var = xxxfb_var;
/*
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 6a52eba64559..fce6cfbadfd6 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1719,7 +1719,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
enable = 0;
}
- strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
+ strscpy(fb->fix.id, fbname, sizeof(fb->fix.id));
memcpy(&par->ops,
(head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 092a1caa1208..3baf33635e65 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -18,6 +18,7 @@
* Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
*/
+#include <linux/aperture.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -1502,6 +1503,10 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Silicon Motion display driver.\n");
+ err = aperture_remove_conflicting_pci_devices(pdev, "smtcfb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev); /* enable SMTC chip */
if (err)
return err;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 5c765655d000..5c891aa00d59 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -450,7 +450,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- /* Set Set Area Color Mode ON/OFF & Low Power Display Mode */
+ /* Set Area Color Mode ON/OFF & Low Power Display Mode */
if (par->area_color_enable || par->low_power) {
u32 mode;
@@ -817,7 +817,7 @@ fb_alloc_error:
return ret;
}
-static int ssd1307fb_remove(struct i2c_client *client)
+static void ssd1307fb_remove(struct i2c_client *client)
{
struct fb_info *info = i2c_get_clientdata(client);
struct ssd1307fb_par *par = info->par;
@@ -836,8 +836,6 @@ static int ssd1307fb_remove(struct i2c_client *client)
fb_deferred_io_cleanup(info);
__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
framebuffer_release(info);
-
- return 0;
}
static const struct i2c_device_id ssd1307fb_i2c_id[] = {
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 27d4b0ace2d6..a56b24288566 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -80,6 +80,7 @@
* Includes
*/
+#include <linux/aperture.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -364,7 +365,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
var->pixclock = KHZ2PICOS(freq);
-
+
if (var->vmode & FB_VMODE_INTERLACED)
vBackPorch += (vBackPorch % 2);
if (var->vmode & FB_VMODE_DOUBLE) {
@@ -382,7 +383,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
printk(KERN_ERR "sstfb: Unsupported bpp %d\n", var->bits_per_pixel);
return -EINVAL;
}
-
+
/* validity tests */
if (var->xres <= 1 || yDim <= 0 || var->hsync_len <= 1 ||
hSyncOff <= 1 || var->left_margin <= 2 || vSyncOn <= 0 ||
@@ -392,7 +393,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
if (IS_VOODOO2(par)) {
/* Voodoo 2 limits */
- tiles_in_X = (var->xres + 63 ) / 64 * 2;
+ tiles_in_X = (var->xres + 63 ) / 64 * 2;
if (var->xres > POW2(11) || yDim >= POW2(11)) {
printk(KERN_ERR "sstfb: Unsupported resolution %dx%d\n",
@@ -631,7 +632,7 @@ static int sstfb_set_par(struct fb_info *info)
lfbmode |= ( LFB_WORD_SWIZZLE_WR | LFB_BYTE_SWIZZLE_WR |
LFB_WORD_SWIZZLE_RD | LFB_BYTE_SWIZZLE_RD );
#endif
-
+
if (clipping) {
sst_write(LFBMODE, lfbmode | EN_PXL_PIPELINE);
/*
@@ -684,7 +685,7 @@ static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
| (green << info->var.green.offset)
| (blue << info->var.blue.offset)
| (transp << info->var.transp.offset);
-
+
par->palette[regno] = col;
return 0;
@@ -773,7 +774,7 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct sstfb_par *par = info->par;
u32 stride = info->fix.line_length;
-
+
if (!IS_VOODOO2(par))
return;
@@ -795,17 +796,17 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
* FillRect 2D command (solidfill or invert (via ROP_XOR)) - Voodoo2 only
*/
#if 0
-static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct sstfb_par *par = info->par;
u32 stride = info->fix.line_length;
if (!IS_VOODOO2(par))
return;
-
+
sst_write(BLTCLIPX, info->var.xres);
sst_write(BLTCLIPY, info->var.yres);
-
+
sst_write(BLTDSTBASEADDR, 0);
sst_write(BLTCOLOR, rect->color);
sst_write(BLTROP, rect->rop == ROP_COPY ? BLTROP_COPY : BLTROP_XOR);
@@ -820,8 +821,8 @@ static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-/*
- * get lfb size
+/*
+ * get lfb size
*/
static int sst_get_memsize(struct fb_info *info, __u32 *memsize)
{
@@ -859,8 +860,8 @@ static int sst_get_memsize(struct fb_info *info, __u32 *memsize)
}
-/*
- * DAC detection routines
+/*
+ * DAC detection routines
*/
/* fbi should be idle, and fifo emty and mem disabled */
@@ -963,7 +964,7 @@ static int sst_detect_ics(struct fb_info *info)
* see detect_dac
*/
-static int sst_set_pll_att_ti(struct fb_info *info,
+static int sst_set_pll_att_ti(struct fb_info *info,
const struct pll_timing *t, const int clock)
{
struct sstfb_par *par = info->par;
@@ -1326,6 +1327,10 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct sst_spec *spec;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "sstfb");
+ if (err)
+ return err;
+
/* Enable device in PCI config. */
if ((err=pci_enable_device(pdev))) {
printk(KERN_ERR "cannot enable device\n");
@@ -1338,10 +1343,10 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
pci_set_drvdata(pdev, info);
-
+
par = info->par;
fix = &info->fix;
-
+
par->type = id->driver_data;
spec = &voodoo_spec[par->type];
f_ddprintk("found device : %s\n", spec->name);
@@ -1382,7 +1387,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
sst_get_memsize(info, &fix->smem_len);
- strlcpy(fix->id, spec->name, sizeof(fix->id));
+ strscpy(fix->id, spec->name, sizeof(fix->id));
printk(KERN_INFO "%s (revision %d) with %s dac\n",
fix->id, par->revision, par->dac_sw.name);
@@ -1407,7 +1412,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* fact dithered to 16bit).
*/
fix->line_length = 2048; /* default value, for 24 or 32bit: 4096 */
-
+
fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 16);
if (sstfb_check_var(&info->var, info)) {
@@ -1419,7 +1424,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_ERR "sstfb: can't set default video mode.\n");
goto fail;
}
-
+
if (fb_alloc_cmap(&info->cmap, 256, 0)) {
printk(KERN_ERR "sstfb: can't alloc cmap memory.\n");
goto fail;
@@ -1465,7 +1470,7 @@ static void sstfb_remove(struct pci_dev *pdev)
info = pci_get_drvdata(pdev);
par = info->par;
-
+
device_remove_file(info->dev, &device_attrs[0]);
sst_shutdown(info);
iounmap(info->screen_base);
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 15b079505a00..490bd9a14763 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -80,7 +80,7 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
info->pseudo_palette = gp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
+ strscpy(info->fix.id, "gfb", sizeof(info->fix.id));
info->fix.smem_start = gp->fb_base_phys;
info->fix.smem_len = gp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 1d3bacd9d5ac..f4059529c602 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -84,7 +85,7 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
info->pseudo_palette = sp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "s3d", sizeof(info->fix.id));
info->fix.smem_start = sp->fb_base_phys;
info->fix.smem_len = sp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -123,6 +124,10 @@ static int s3d_pci_register(struct pci_dev *pdev,
struct s3d_info *sp;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "s3dfb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err < 0) {
printk(KERN_ERR "s3d: Cannot enable PCI device %s\n",
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 9daf17b11106..b0c8cf0c535a 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -207,7 +208,7 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
info->pseudo_palette = ep->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "e3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "e3d", sizeof(info->fix.id));
info->fix.smem_start = ep->fb_base_phys;
info->fix.smem_len = ep->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -249,6 +250,10 @@ static int e3d_pci_register(struct pci_dev *pdev,
unsigned int line_length;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "e3dfb");
+ if (err)
+ return err;
+
of_node = pci_device_to_OF_node(pdev);
if (!of_node) {
printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 1638a40fed22..01d87f53324d 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -333,7 +333,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
else
tcx_name = "TCX24";
- strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tcx_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 67e37a62b07c..592a913d0718 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -64,6 +64,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1264,7 +1265,7 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = I2C_CLASS_DDC;
chan->adapter.algo_data = &chan->algo;
@@ -1293,7 +1294,7 @@ static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
@@ -1376,6 +1377,10 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_monspecs *specs;
bool found;
+ err = aperture_remove_conflicting_pci_devices(pdev, "tdfxfb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "tdfxfb: Can't enable pdev: %d\n", err);
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ae0cf5540636..251dbd282f5e 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -12,6 +12,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/bitrev.h>
#include <linux/compiler.h>
#include <linux/delay.h>
@@ -106,6 +107,12 @@ static struct pci_driver tgafb_pci_driver = {
static int tgafb_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ int ret;
+
+ ret = aperture_remove_conflicting_pci_devices(pdev, "tgafb");
+ if (ret)
+ return ret;
+
return tgafb_register(&pdev->dev);
}
@@ -729,7 +736,7 @@ tgafb_mono_imageblit(struct fb_info *info, const struct fb_image *image)
/* Handle another common case in which accel_putcs
generates a large bitmap, which happens to be aligned.
- Allow the tail to be misaligned. This case is
+ Allow the tail to be misaligned. This case is
interesting because we've not got to hold partial
bytes across the words being written. */
@@ -908,9 +915,9 @@ tgafb_imageblit(struct fb_info *info, const struct fb_image *image)
}
/**
- * tgafb_fillrect - REQUIRED function. Can use generic routines if
+ * tgafb_fillrect - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
- * Draws a rectangle on the screen.
+ * Draws a rectangle on the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @rect: structure defining the rectagle and operation.
@@ -1044,7 +1051,7 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
/* Handle the special case of copying entire lines, e.g. during scrolling.
We can avoid a lot of needless computation in this case. In the 8bpp
- case we need to use the COPY64 registers instead of mask writes into
+ case we need to use the COPY64 registers instead of mask writes into
the frame buffer to achieve maximum performance. */
static inline void
@@ -1251,7 +1258,7 @@ copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
}
static void
-tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
unsigned long dx, dy, width, height, sx, sy, vxres, vyres;
unsigned long line_length, bpp;
@@ -1344,7 +1351,7 @@ tgafb_init_fix(struct fb_info *info)
memory_size = 16777216;
}
- strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 319131bd72cf..f9c3b1d38fc2 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -16,6 +16,7 @@
* timing value tweaking so it looks good on every monitor in every mode
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/init.h>
@@ -270,7 +271,7 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
{
struct tridentfb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1470,6 +1471,10 @@ static int trident_pci_probe(struct pci_dev *dev,
int chip_id;
bool found = false;
+ err = aperture_remove_conflicting_pci_devices(dev, "tridentfb");
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err)
return err;
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index ff61605b8764..82b36dbb5b1a 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -14,6 +14,7 @@
* Alan Hourihane <alanh-at-tungstengraphics-dot-com>
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -442,7 +443,11 @@ static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vml_info *vinfo;
struct fb_info *info;
struct vml_par *par;
- int err = 0;
+ int err;
+
+ err = aperture_remove_conflicting_pci_devices(dev, "vmlfb");
+ if (err)
+ return err;
par = kzalloc(sizeof(*par), GFP_KERNEL);
if (par == NULL)
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index d21f68f3ee44..35cf51ae3292 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1,13 +1,13 @@
/*
* linux/drivers/video/vga16.c -- VGA 16-color framebuffer driver
- *
+ *
* Copyright 1999 Ben Pfaff <pfaffben@debian.org> and Petr Vandrovec <VANDROVE@vc.cvut.cz>
* Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm
* Based on VESA framebuffer (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
- * archive for more details.
+ * archive for more details.
*/
#include <linux/module.h>
@@ -25,9 +25,6 @@
#include <asm/io.h>
#include <video/vga.h>
-#define VGA_FB_PHYS 0xA0000
-#define VGA_FB_PHYS_LEN 65536
-
#define MODE_SKIP4 1
#define MODE_8BPP 2
#define MODE_CFB 4
@@ -70,7 +67,7 @@ static struct fb_var_screeninfo vga16fb_defined = {
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
- .bits_per_pixel = 4,
+ .bits_per_pixel = 4,
.activate = FB_ACTIVATE_TEST,
.height = -1,
.width = -1,
@@ -87,8 +84,8 @@ static struct fb_var_screeninfo vga16fb_defined = {
/* name should not depend on EGA/VGA */
static const struct fb_fix_screeninfo vga16fb_fix = {
.id = "VGA16 VGA",
- .smem_start = VGA_FB_PHYS,
- .smem_len = VGA_FB_PHYS_LEN,
+ .smem_start = VGA_FB_PHYS_BASE,
+ .smem_len = VGA_FB_PHYS_SIZE,
.type = FB_TYPE_VGA_PLANES,
.type_aux = FB_AUX_VGA_PLANES_VGA4,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -120,7 +117,7 @@ static inline void rmw(volatile char __iomem *p)
static inline int setmode(int mode)
{
int oldmode;
-
+
oldmode = vga_io_rgfx(VGA_GFX_MODE);
vga_io_w(VGA_GFX_D, mode);
return oldmode;
@@ -139,19 +136,19 @@ static inline void setmask(int mask)
vga_io_w(VGA_GFX_D, mask);
}
-/* Set the Data Rotate Register and return its old value.
+/* Set the Data Rotate Register and return its old value.
Bits 0-2 are rotate count, bits 3-4 are logical operation
(0=NOP, 1=AND, 2=OR, 3=XOR). */
static inline int setop(int op)
{
int oldop;
-
+
oldop = vga_io_rgfx(VGA_GFX_DATA_ROTATE);
vga_io_w(VGA_GFX_D, op);
return oldop;
}
-/* Set the Enable Set/Reset Register and return its old value.
+/* Set the Enable Set/Reset Register and return its old value.
The code here always uses value 0xf for this register. */
static inline int setsr(int sr)
{
@@ -185,25 +182,25 @@ static inline void setindex(int index)
}
/* Check if the video mode is supported by the driver */
-static inline int check_mode_supported(void)
+static inline int check_mode_supported(const struct screen_info *si)
{
/* non-x86 architectures treat orig_video_isVGA as a boolean flag */
#if defined(CONFIG_X86)
/* only EGA and VGA in 16 color graphic mode are supported */
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
+ if (si->orig_video_isVGA != VIDEO_TYPE_EGAC &&
+ si->orig_video_isVGA != VIDEO_TYPE_VGAC)
return -ENODEV;
- if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
- screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
- screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
- screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */
+ if (si->orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
+ si->orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
+ si->orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
+ si->orig_video_mode != 0x12) /* 640x480/4 (VGA) */
return -ENODEV;
#endif
return 0;
}
-static void vga16fb_pan_var(struct fb_info *info,
+static void vga16fb_pan_var(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct vga16fb_par *par = info->par;
@@ -296,7 +293,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
par->clkdiv = best->seq_clock_mode;
*pixclock = (best->pixclock * div) / mul;
}
-
+
#define FAIL(X) return -EINVAL
static int vga16fb_open(struct fb_info *info, int user)
@@ -511,7 +508,7 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
par->misc &= ~0x40;
if (var->sync & FB_SYNC_VERT_HIGH_ACT)
par->misc &= ~0x80;
-
+
par->mode = mode;
if (mode & MODE_8BPP)
@@ -520,8 +517,8 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
else
/* pixel clock == vga clock */
vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
-
- var->red.offset = var->green.offset = var->blue.offset =
+
+ var->red.offset = var->green.offset = var->blue.offset =
var->transp.offset = 0;
var->red.length = var->green.length = var->blue.length =
(par->isVGA) ? 6 : 2;
@@ -588,10 +585,10 @@ static int vga16fb_set_par(struct fb_info *info)
else
atc[VGA_ATC_PEL] = info->var.xoffset & 7;
atc[VGA_ATC_COLOR_PAGE] = 0x00;
-
+
if (par->mode & MODE_TEXT) {
- fh = 16; // FIXME !!! Fudge font height.
- par->crtc[VGA_CRTC_MAX_SCAN] = (par->crtc[VGA_CRTC_MAX_SCAN]
+ fh = 16; // FIXME !!! Fudge font height.
+ par->crtc[VGA_CRTC_MAX_SCAN] = (par->crtc[VGA_CRTC_MAX_SCAN]
& ~0x1F) | (fh - 1);
}
@@ -602,10 +599,10 @@ static int vga16fb_set_par(struct fb_info *info)
vga_io_w(EGA_GFX_E0, 0x00);
vga_io_w(EGA_GFX_E1, 0x01);
}
-
+
/* update misc output register */
vga_io_w(VGA_MIS_W, par->misc);
-
+
/* synchronous reset on */
vga_io_wseq(0x00, 0x01);
@@ -617,7 +614,7 @@ static int vga16fb_set_par(struct fb_info *info)
for (i = 2; i < VGA_SEQ_C; i++) {
vga_io_wseq(i, seq[i]);
}
-
+
/* synchronous reset off */
vga_io_wseq(0x00, 0x03);
@@ -628,12 +625,12 @@ static int vga16fb_set_par(struct fb_info *info)
for (i = 0; i < VGA_CRTC_REGS; i++) {
vga_io_wcrt(i, par->crtc[i]);
}
-
+
/* write graphics controller registers */
for (i = 0; i < VGA_GFX_C; i++) {
vga_io_wgfx(i, gdc[i]);
}
-
+
/* write attribute controller registers */
for (i = 0; i < VGA_ATT_C; i++) {
vga_io_r(VGA_IS1_RC); /* reset flip-flop */
@@ -656,7 +653,7 @@ static void ega16_setpalette(int regno, unsigned red, unsigned green, unsigned b
{
static const unsigned char map[] = { 000, 001, 010, 011 };
int val;
-
+
if (regno >= 16)
return;
val = map[red>>14] | ((map[green>>14]) << 1) | ((map[blue>>14]) << 2);
@@ -687,17 +684,17 @@ static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
-
+
if (regno >= 256)
return 1;
gray = info->var.grayscale;
-
+
if (gray) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
}
- if (par->isVGA)
+ if (par->isVGA)
vga16_setpalette(regno,red,green,blue);
else
ega16_setpalette(regno,red,green,blue);
@@ -705,7 +702,7 @@ static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
}
static int vga16fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
+ struct fb_info *info)
{
vga16fb_pan_var(info, var);
return 0;
@@ -720,7 +717,7 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
{
unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
-
+
/* save original values of VGA controller registers */
if(!par->vesa_blanked) {
par->vga_state.CrtMiscIO = vga_io_r(VGA_MIS_R);
@@ -776,7 +773,7 @@ static void vga_vesa_unblank(struct vga16fb_par *par)
{
unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
-
+
/* restore original values of VGA controller registers */
vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO);
@@ -962,7 +959,7 @@ static void vga16fb_fillrect(struct fb_info *info, const struct fb_fillrect *rec
}
break;
}
- } else
+ } else
vga_8planes_fillrect(info, rect);
break;
case FB_TYPE_PACKED_PIXELS:
@@ -1029,7 +1026,7 @@ static void vga_8planes_copyarea(struct fb_info *info, const struct fb_copyarea
static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
+ u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
int x, x2, y2, old_dx, old_dy, vxres, vyres;
int height, width, line_ofs;
char __iomem *dst = NULL;
@@ -1094,9 +1091,9 @@ static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
dst += line_ofs;
}
} else {
- dst = info->screen_base + (dx/8) + width +
+ dst = info->screen_base + (dx/8) + width +
(dy + height - 1) * info->fix.line_length;
- src = info->screen_base + (sx/8) + width +
+ src = info->screen_base + (sx/8) + width +
(sy + height - 1) * info->fix.line_length;
while (height--) {
for (x = 0; x < width; x++) {
@@ -1109,7 +1106,7 @@ static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
dst -= line_ofs;
}
}
- } else
+ } else
vga_8planes_copyarea(info, area);
break;
case FB_TYPE_PACKED_PIXELS:
@@ -1182,7 +1179,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
setsr(0xf);
setcolor(image->fg_color);
selectmask();
-
+
setmask(0xff);
writeb(image->bg_color, where);
rmb();
@@ -1191,7 +1188,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
wmb();
for (y = 0; y < image->height; y++) {
dst = where;
- for (x = image->width/8; x--;)
+ for (x = image->width/8; x--;)
writeb(*cdat++, dst++);
where += info->fix.line_length;
}
@@ -1202,7 +1199,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
setsr(0xf);
setcolor(image->bg_color);
selectmask();
-
+
setmask(0xff);
for (y = 0; y < image->height; y++) {
dst = where;
@@ -1218,7 +1215,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
where += info->fix.line_length;
}
}
- } else
+ } else
vga_8planes_imageblit(info, image);
break;
case FB_TYPE_PACKED_PIXELS:
@@ -1231,7 +1228,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
static void vga_imageblit_color(struct fb_info *info, const struct fb_image *image)
{
/*
- * Draw logo
+ * Draw logo
*/
struct vga16fb_par *par = info->par;
char __iomem *where =
@@ -1248,7 +1245,7 @@ static void vga_imageblit_color(struct fb_info *info, const struct fb_image *ima
setsr(0xf);
setop(0);
setmode(0);
-
+
for (y = 0; y < image->height; y++) {
for (x = 0; x < image->width; x++) {
dst = where + x/8;
@@ -1272,7 +1269,7 @@ static void vga_imageblit_color(struct fb_info *info, const struct fb_image *ima
break;
}
}
-
+
static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if (image->depth == 1)
@@ -1304,28 +1301,22 @@ static const struct fb_ops vga16fb_ops = {
.fb_imageblit = vga16fb_imageblit,
};
-#ifndef MODULE
-static int __init vga16fb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt) continue;
- }
- return 0;
-}
-#endif
-
static int vga16fb_probe(struct platform_device *dev)
{
+ struct screen_info *si;
struct fb_info *info;
struct vga16fb_par *par;
int i;
int ret = 0;
+ si = dev_get_platdata(&dev->dev);
+ if (!si)
+ return -ENODEV;
+
+ ret = check_mode_supported(si);
+ if (ret)
+ return ret;
+
printk(KERN_DEBUG "vga16fb: initializing\n");
info = framebuffer_alloc(sizeof(struct vga16fb_par), &dev->dev);
@@ -1339,8 +1330,8 @@ static int vga16fb_probe(struct platform_device *dev)
goto err_ioremap;
}
- /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
- info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
+ /* XXX share VGA_FB_PHYS_BASE and I/O region with vgacon and others */
+ info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS_BASE, 0);
if (!info->screen_base) {
printk(KERN_ERR "vga16fb: unable to map device\n");
@@ -1352,19 +1343,19 @@ static int vga16fb_probe(struct platform_device *dev)
par = info->par;
#if defined(CONFIG_X86)
- par->isVGA = screen_info.orig_video_isVGA == VIDEO_TYPE_VGAC;
+ par->isVGA = si->orig_video_isVGA == VIDEO_TYPE_VGAC;
#else
/* non-x86 architectures treat orig_video_isVGA as a boolean flag */
- par->isVGA = screen_info.orig_video_isVGA;
+ par->isVGA = si->orig_video_isVGA;
#endif
par->palette_blanked = 0;
par->vesa_blanked = 0;
i = par->isVGA? 6 : 2;
-
+
vga16fb_defined.red.length = i;
vga16fb_defined.green.length = i;
- vga16fb_defined.blue.length = i;
+ vga16fb_defined.blue.length = i;
/* name should not depend on EGA/VGA */
info->fbops = &vga16fb_ops;
@@ -1391,8 +1382,8 @@ static int vga16fb_probe(struct platform_device *dev)
vga16fb_update_fix(info);
- info->apertures->ranges[0].base = VGA_FB_PHYS;
- info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
+ info->apertures->ranges[0].base = VGA_FB_PHYS_BASE;
+ info->apertures->ranges[0].size = VGA_FB_PHYS_SIZE;
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
@@ -1425,58 +1416,22 @@ static int vga16fb_remove(struct platform_device *dev)
return 0;
}
+static const struct platform_device_id vga16fb_driver_id_table[] = {
+ {"ega-framebuffer", 0},
+ {"vga-framebuffer", 0},
+ { }
+};
+
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
.remove = vga16fb_remove,
.driver = {
.name = "vga16fb",
},
+ .id_table = vga16fb_driver_id_table,
};
-static struct platform_device *vga16fb_device;
-
-static int __init vga16fb_init(void)
-{
- int ret;
-#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("vga16fb", &option))
- return -ENODEV;
-
- vga16fb_setup(option);
-#endif
-
- ret = check_mode_supported();
- if (ret)
- return ret;
-
- ret = platform_driver_register(&vga16fb_driver);
-
- if (!ret) {
- vga16fb_device = platform_device_alloc("vga16fb", 0);
-
- if (vga16fb_device)
- ret = platform_device_add(vga16fb_device);
- else
- ret = -ENOMEM;
-
- if (ret) {
- platform_device_put(vga16fb_device);
- platform_driver_unregister(&vga16fb_driver);
- }
- }
-
- return ret;
-}
-
-static void __exit vga16fb_exit(void)
-{
- platform_device_unregister(vga16fb_device);
- platform_driver_unregister(&vga16fb_driver);
-}
+module_platform_driver(vga16fb_driver);
MODULE_DESCRIPTION("Legacy VGA framebuffer device driver");
MODULE_LICENSE("GPL");
-module_init(vga16fb_init);
-module_exit(vga16fb_exit);
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index 89d75079b730..2ee8fcae08df 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -8,6 +8,7 @@
/*
* Core code for the Via multifunction framebuffer device.
*/
+#include <linux/aperture.h>
#include <linux/via-core.h>
#include <linux/via_i2c.h>
#include <linux/via-gpio.h>
@@ -617,6 +618,10 @@ static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "viafb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret)
return ret;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 4274c6efb249..49b9f148d3a1 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -12,6 +12,7 @@
* (http://davesdomain.org.uk/viafb/)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -672,6 +673,10 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ rc = aperture_remove_conflicting_pci_devices(dev, "vt8623fb");
+ if (rc)
+ return rc;
+
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct vt8623fb_info), &(dev->dev));
if (!info)
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 947be761dfa4..03c7f27dde49 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/display/drm_dp.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/errno.h>
@@ -381,12 +382,34 @@ static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *fr
*
* Returns 0 on success or a negative error code on failure.
*/
-int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame)
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame)
{
return hdmi_audio_infoframe_check_only(frame);
}
EXPORT_SYMBOL(hdmi_audio_infoframe_check);
+static void
+hdmi_audio_infoframe_pack_payload(const struct hdmi_audio_infoframe *frame,
+ u8 *buffer)
+{
+ u8 channels;
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ buffer[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ buffer[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ buffer[2] = frame->coding_type_ext & 0x1f;
+ buffer[3] = frame->channel_allocation;
+ buffer[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ buffer[4] |= BIT(7);
+}
+
/**
* hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
@@ -404,7 +427,6 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_check);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size)
{
- unsigned char channels;
u8 *ptr = buffer;
size_t length;
int ret;
@@ -420,28 +442,13 @@ ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
memset(buffer, 0, size);
- if (frame->channels >= 2)
- channels = frame->channels - 1;
- else
- channels = 0;
-
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
- /* start infoframe payload */
- ptr += HDMI_INFOFRAME_HEADER_SIZE;
-
- ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
- ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
- (frame->sample_size & 0x3);
- ptr[2] = frame->coding_type_ext & 0x1f;
- ptr[3] = frame->channel_allocation;
- ptr[4] = (frame->level_shift_value & 0xf) << 3;
-
- if (frame->downmix_inhibit)
- ptr[4] |= BIT(7);
+ hdmi_audio_infoframe_pack_payload(frame,
+ ptr + HDMI_INFOFRAME_HEADER_SIZE);
hdmi_infoframe_set_checksum(buffer, length);
@@ -480,6 +487,43 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
+ * hdmi_audio_infoframe_pack_for_dp - Pack a HDMI Audio infoframe for DisplayPort
+ *
+ * @frame: HDMI Audio infoframe
+ * @sdp: Secondary data packet for DisplayPort.
+ * @dp_version: DisplayPort version to be encoded in the header
+ *
+ * Packs a HDMI Audio Infoframe to be sent over DisplayPort. This function
+ * fills the secondary data packet to be used for DisplayPort.
+ *
+ * Return: Number of total written bytes or a negative errno on failure.
+ */
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version)
+{
+ int ret;
+
+ ret = hdmi_audio_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ memset(sdp->db, 0, sizeof(sdp->db));
+
+ /* Secondary-data packet header */
+ sdp->sdp_header.HB0 = 0;
+ sdp->sdp_header.HB1 = frame->type;
+ sdp->sdp_header.HB2 = DP_SDP_AUDIO_INFOFRAME_HB2;
+ sdp->sdp_header.HB3 = (dp_version & 0x3f) << 2;
+
+ hdmi_audio_infoframe_pack_payload(frame, sdp->db);
+
+ /* Return size = frame length + four HB for sdp_header */
+ return frame->length + 4;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_for_dp);
+
+/**
* hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
*
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index ce91add81401..dc4d25c26256 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -17,7 +17,7 @@ config NITRO_ENCLAVES
config NITRO_ENCLAVES_MISC_DEV_TEST
bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS
- depends on NITRO_ENCLAVES && KUNIT
+ depends on NITRO_ENCLAVES && KUNIT=y
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the misc device functionality of the Nitro
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index c492a57531c6..3ff746e3f24a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -360,7 +360,7 @@ static void vm_synchronize_cbs(struct virtio_device *vdev)
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
- const char *name, u32 size, bool ctx)
+ const char *name, bool ctx)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtio_mmio_vq_info *info;
@@ -395,11 +395,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
- if (!size || size > num)
- size = num;
-
/* Create the vring */
- vq = vring_create_virtqueue(index, size, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, true, ctx, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
@@ -477,7 +474,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -503,7 +499,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- sizes ? sizes[i] : 0,
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 00ad476a815d..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -174,7 +174,6 @@ error:
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec)
{
@@ -187,7 +186,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
if (!info)
return ERR_PTR(-ENOMEM);
- vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, size, ctx,
+ vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
msix_vec);
if (IS_ERR(vq))
goto out_info;
@@ -284,7 +283,7 @@ void vp_del_vqs(struct virtio_device *vdev)
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], bool per_vq_vectors,
+ const char * const names[], bool per_vq_vectors,
const bool *ctx,
struct irq_affinity *desc)
{
@@ -327,8 +326,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
else
msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- sizes ? sizes[i] : 0,
- ctx ? ctx[i] : false, msix_vec);
+ ctx ? ctx[i] : false,
+ msix_vec);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
@@ -358,7 +357,7 @@ error_find:
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], const bool *ctx)
+ const char * const names[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err, queue_idx = 0;
@@ -380,7 +379,6 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- sizes ? sizes[i] : 0,
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
@@ -398,21 +396,21 @@ out_del_vqs:
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc)
{
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, true, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, false, ctx, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
if (!err)
return 0;
/* Finally fall back to regular interrupts. */
- return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, sizes, ctx);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
}
const char *vp_bus_name(struct virtio_device *vdev)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index c0448378b698..23112d84218f 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -80,7 +80,6 @@ struct virtio_pci_device {
unsigned int idx,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec);
void (*del_vq)(struct virtio_pci_vq_info *info);
@@ -111,7 +110,7 @@ void vp_del_vqs(struct virtio_device *vdev);
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], u32 sizes[], const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc);
const char *vp_bus_name(struct virtio_device *vdev);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index d75e5c4e637f..2257f1b3d8ae 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,7 +112,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec)
{
@@ -126,13 +125,10 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
return ERR_PTR(-ENOENT);
- if (!size || size > num)
- size = num;
-
info->msix_vector = msix_vec;
/* create the vring */
- vq = vring_create_virtqueue(index, size,
+ vq = vring_create_virtqueue(index, num,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
true, false, ctx,
vp_notify, callback, name);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f7965c5dd36b..c3b9f2761849 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,7 +293,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
- u32 size,
bool ctx,
u16 msix_vec)
{
@@ -311,18 +310,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
- if (!size || size > num)
- size = num;
-
- if (size & (size - 1)) {
- dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", size);
+ if (num & (num - 1)) {
+ dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
return ERR_PTR(-EINVAL);
}
info->msix_vector = msix_vec;
/* create the vring */
- vq = vring_create_virtqueue(index, size,
+ vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev,
true, true, ctx,
vp_notify, callback, name);
@@ -351,15 +347,12 @@ err:
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char * const names[],
- u32 sizes[],
- const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
- int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, sizes, ctx,
- desc);
+ int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
if (rc)
return rc;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index d66c8e6d0ef3..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2426,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 9bc4d110b800..9670cc79371d 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -131,7 +131,7 @@ static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
static struct virtqueue *
virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
- const char *name, u32 size, bool ctx)
+ const char *name, bool ctx)
{
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
@@ -168,17 +168,14 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
- if (!size || size > max_num)
- size = max_num;
-
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
- may_reduce_num = (size == min_num) ? false : true;
+ may_reduce_num = (max_num == min_num) ? false : true;
/* Create the vring */
align = ops->get_vq_align(vdpa);
- vq = vring_create_virtqueue(index, size, align, vdev,
+ vq = vring_create_virtqueue(index, max_num, align, vdev,
true, may_reduce_num, ctx,
virtio_vdpa_notify, callback, name);
if (!vq) {
@@ -272,7 +269,6 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- u32 sizes[],
const bool *ctx,
struct irq_affinity *desc)
{
@@ -288,9 +284,9 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
- vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, callbacks[i],
- names[i], sizes ? sizes[i] : 0,
- ctx ? ctx[i] : false);
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
+ callbacks[i], names[i], ctx ?
+ ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 6c962e88501c..62c44616d8a9 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -525,7 +525,7 @@ exit:
return err;
}
-static int ds2482_remove(struct i2c_client *client)
+static void ds2482_remove(struct i2c_client *client)
{
struct ds2482_data *data = i2c_get_clientdata(client);
int idx;
@@ -538,7 +538,6 @@ static int ds2482_remove(struct i2c_client *client)
/* Free the memory */
kfree(data);
- return 0;
}
/*
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9295492d24f7..688922fc4edb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1799,7 +1799,7 @@ config BCM7038_WDT
tristate "BCM63xx/BCM7038 Watchdog"
select WATCHDOG_CORE
depends on HAS_IOMEM
- depends on ARCH_BCM4908 || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
+ depends on ARCH_BCMBCA || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
help
Watchdog driver for the built-in hardware in Broadcom 7038 and
later SoCs used in set-top boxes. BCM7038 was made public
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index c5a9b820d43a..d0e88875443a 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -708,13 +708,11 @@ static int ziirave_wdt_probe(struct i2c_client *client,
return ret;
}
-static int ziirave_wdt_remove(struct i2c_client *client)
+static void ziirave_wdt_remove(struct i2c_client *client)
{
struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
watchdog_unregister_device(&w_priv->wdd);
-
- return 0;
}
static const struct i2c_device_id ziirave_wdt_id[] = {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 738029de3c67..e1ec725c2819 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1047,6 +1047,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
size_t size;
int i, ret;
+ if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
+ return -ENOMEM;
+
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 3369734108af..e88e8f6f0a33 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -581,27 +581,30 @@ static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
- unsigned int i;
+ unsigned int i, off = 0;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
- PAGE_SIZE);
+ PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
- (unsigned long) kbufs[i].uptr,
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
- if (page_count < 0)
- return page_count;
+ if (page_count <= 0)
+ return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
+
+ off = (requested == page_count) ? 0 : off + page_count;
+ i += !off;
}
return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
- if (rc < 0) {
- nr_pages = pinned;
+ if (rc < 0)
goto out;
- }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7a0c93acc2c5..d3dcda344989 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1121,7 +1121,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
"%s: writing %s", __func__, state);
return;
}
- strlcpy(phy, val, VSCSI_NAMELEN);
+ strscpy(phy, val, VSCSI_NAMELEN);
kfree(val);
/* virtual SCSI device */
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d5f3f763717e..d4b251925796 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
grant_ref_t gref_head;
unsigned int i;
+ void *addr;
int ret;
- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
if (!*vaddr) {
ret = -ENOMEM;
goto err;
@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long gfn;
if (is_vmalloc_addr(*vaddr))
- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
+ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
else
- gfn = virt_to_gfn(vaddr[i]);
+ gfn = virt_to_gfn(addr);
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
+
+ addr += XEN_PAGE_SIZE;
}
return 0;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 07b010a68fcf..f44d5a64351e 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -40,7 +40,7 @@ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
return -EINVAL;
}
- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+ strscpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
pr_warn("bus_id %s no slash\n", bus_id);
return -EINVAL;